Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F104176662
No One
Temporary
Actions
Download File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
7 MB
Referenced Files
None
Subscribers
None
View Options
This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/sys/arm/allwinner/if_emac.c b/sys/arm/allwinner/if_emac.c
index ce53e075c196..5752582c3fa2 100644
--- a/sys/arm/allwinner/if_emac.c
+++ b/sys/arm/allwinner/if_emac.c
@@ -1,1199 +1,1194 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2013 Ganbold Tsagaankhuu <ganbold@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* A10/A20 EMAC driver */
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/lock.h>
#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/gpio.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <machine/intr.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_mib.h>
#include <net/ethernet.h>
#include <net/if_vlan_var.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
#endif
#include <net/bpf.h>
#include <net/bpfdesc.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <arm/allwinner/if_emacreg.h>
#include <arm/allwinner/aw_sid.h>
#include <dev/extres/clk/clk.h>
#include "miibus_if.h"
#include "gpio_if.h"
#include "a10_sramc.h"
struct emac_softc {
struct ifnet *emac_ifp;
device_t emac_dev;
device_t emac_miibus;
bus_space_handle_t emac_handle;
bus_space_tag_t emac_tag;
struct resource *emac_res;
struct resource *emac_irq;
void *emac_intrhand;
clk_t emac_clk;
int emac_if_flags;
struct mtx emac_mtx;
struct callout emac_tick_ch;
int emac_watchdog_timer;
int emac_rx_process_limit;
int emac_link;
uint32_t emac_fifo_mask;
};
static int emac_probe(device_t);
static int emac_attach(device_t);
static int emac_detach(device_t);
static int emac_shutdown(device_t);
static int emac_suspend(device_t);
static int emac_resume(device_t);
static int emac_sys_setup(struct emac_softc *);
static void emac_reset(struct emac_softc *);
static void emac_init_locked(struct emac_softc *);
static void emac_start_locked(struct ifnet *);
static void emac_init(void *);
static void emac_stop_locked(struct emac_softc *);
static void emac_intr(void *);
static int emac_ioctl(struct ifnet *, u_long, caddr_t);
static void emac_rxeof(struct emac_softc *, int);
static void emac_txeof(struct emac_softc *, uint32_t);
static int emac_miibus_readreg(device_t, int, int);
static int emac_miibus_writereg(device_t, int, int, int);
static void emac_miibus_statchg(device_t);
static int emac_ifmedia_upd(struct ifnet *);
static void emac_ifmedia_sts(struct ifnet *, struct ifmediareq *);
static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
static int sysctl_hw_emac_proc_limit(SYSCTL_HANDLER_ARGS);
#define EMAC_READ_REG(sc, reg) \
bus_space_read_4(sc->emac_tag, sc->emac_handle, reg)
#define EMAC_WRITE_REG(sc, reg, val) \
bus_space_write_4(sc->emac_tag, sc->emac_handle, reg, val)
static int
emac_sys_setup(struct emac_softc *sc)
{
int error;
/* Activate EMAC clock. */
error = clk_get_by_ofw_index(sc->emac_dev, 0, 0, &sc->emac_clk);
if (error != 0) {
device_printf(sc->emac_dev, "cannot get clock\n");
return (error);
}
error = clk_enable(sc->emac_clk);
if (error != 0) {
device_printf(sc->emac_dev, "cannot enable clock\n");
return (error);
}
/* Map sram. */
a10_map_to_emac();
return (0);
}
static void
emac_get_hwaddr(struct emac_softc *sc, uint8_t *hwaddr)
{
uint32_t val0, val1, rnd;
u_char rootkey[16];
size_t rootkey_size;
/*
* Try to get MAC address from running hardware.
* If there is something non-zero there just use it.
*
* Otherwise set the address to a convenient locally assigned address,
* using the SID rootkey.
* This is was uboot does so we end up with the same mac as if uboot
* did set it.
* If we can't get the root key, generate a random one,
* 'bsd' + random 24 low-order bits. 'b' is 0x62, which has the locally
* assigned bit set, and the broadcast/multicast bit clear.
*/
val0 = EMAC_READ_REG(sc, EMAC_MAC_A0);
val1 = EMAC_READ_REG(sc, EMAC_MAC_A1);
if ((val0 | val1) != 0 && (val0 | val1) != 0xffffff) {
hwaddr[0] = (val1 >> 16) & 0xff;
hwaddr[1] = (val1 >> 8) & 0xff;
hwaddr[2] = (val1 >> 0) & 0xff;
hwaddr[3] = (val0 >> 16) & 0xff;
hwaddr[4] = (val0 >> 8) & 0xff;
hwaddr[5] = (val0 >> 0) & 0xff;
} else {
rootkey_size = sizeof(rootkey);
if (aw_sid_get_fuse(AW_SID_FUSE_ROOTKEY, rootkey,
&rootkey_size) == 0) {
hwaddr[0] = 0x2;
hwaddr[1] = rootkey[3];
hwaddr[2] = rootkey[12];
hwaddr[3] = rootkey[13];
hwaddr[4] = rootkey[14];
hwaddr[5] = rootkey[15];
}
else {
rnd = arc4random() & 0x00ffffff;
hwaddr[0] = 'b';
hwaddr[1] = 's';
hwaddr[2] = 'd';
hwaddr[3] = (rnd >> 16) & 0xff;
hwaddr[4] = (rnd >> 8) & 0xff;
hwaddr[5] = (rnd >> 0) & 0xff;
}
}
if (bootverbose)
printf("MAC address: %s\n", ether_sprintf(hwaddr));
}
static u_int
emac_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t h, *hashes = arg;
h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
hashes[h >> 5] |= 1 << (h & 0x1f);
return (1);
}
static void
emac_set_rx_mode(struct emac_softc *sc)
{
struct ifnet *ifp;
uint32_t hashes[2];
uint32_t rcr = 0;
EMAC_ASSERT_LOCKED(sc);
ifp = sc->emac_ifp;
rcr = EMAC_READ_REG(sc, EMAC_RX_CTL);
/* Unicast packet and DA filtering */
rcr |= EMAC_RX_UCAD;
rcr |= EMAC_RX_DAF;
hashes[0] = 0;
hashes[1] = 0;
if (ifp->if_flags & IFF_ALLMULTI) {
hashes[0] = 0xffffffff;
hashes[1] = 0xffffffff;
} else
if_foreach_llmaddr(ifp, emac_hash_maddr, hashes);
rcr |= EMAC_RX_MCO;
rcr |= EMAC_RX_MHF;
EMAC_WRITE_REG(sc, EMAC_RX_HASH0, hashes[0]);
EMAC_WRITE_REG(sc, EMAC_RX_HASH1, hashes[1]);
if (ifp->if_flags & IFF_BROADCAST) {
rcr |= EMAC_RX_BCO;
rcr |= EMAC_RX_MCO;
}
if (ifp->if_flags & IFF_PROMISC)
rcr |= EMAC_RX_PA;
else
rcr |= EMAC_RX_UCAD;
EMAC_WRITE_REG(sc, EMAC_RX_CTL, rcr);
}
static void
emac_reset(struct emac_softc *sc)
{
EMAC_WRITE_REG(sc, EMAC_CTL, 0);
DELAY(200);
EMAC_WRITE_REG(sc, EMAC_CTL, 1);
DELAY(200);
}
static void
emac_drain_rxfifo(struct emac_softc *sc)
{
uint32_t data;
while (EMAC_READ_REG(sc, EMAC_RX_FBC) > 0)
data = EMAC_READ_REG(sc, EMAC_RX_IO_DATA);
}
static void
emac_txeof(struct emac_softc *sc, uint32_t status)
{
struct ifnet *ifp;
EMAC_ASSERT_LOCKED(sc);
ifp = sc->emac_ifp;
status &= (EMAC_TX_FIFO0 | EMAC_TX_FIFO1);
sc->emac_fifo_mask &= ~status;
if (status == (EMAC_TX_FIFO0 | EMAC_TX_FIFO1))
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 2);
else
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
/* Unarm watchdog timer if no TX */
sc->emac_watchdog_timer = 0;
}
static void
emac_rxeof(struct emac_softc *sc, int count)
{
struct ifnet *ifp;
struct mbuf *m, *m0;
uint32_t reg_val, rxcount;
int16_t len;
uint16_t status;
int i;
ifp = sc->emac_ifp;
for (; count > 0 &&
(ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; count--) {
/*
* Race warning: The first packet might arrive with
* the interrupts disabled, but the second will fix
*/
rxcount = EMAC_READ_REG(sc, EMAC_RX_FBC);
if (!rxcount) {
/* Had one stuck? */
rxcount = EMAC_READ_REG(sc, EMAC_RX_FBC);
if (!rxcount)
return;
}
/* Check packet header */
reg_val = EMAC_READ_REG(sc, EMAC_RX_IO_DATA);
if (reg_val != EMAC_PACKET_HEADER) {
/* Packet header is wrong */
if (bootverbose)
if_printf(ifp, "wrong packet header\n");
/* Disable RX */
reg_val = EMAC_READ_REG(sc, EMAC_CTL);
reg_val &= ~EMAC_CTL_RX_EN;
EMAC_WRITE_REG(sc, EMAC_CTL, reg_val);
/* Flush RX FIFO */
reg_val = EMAC_READ_REG(sc, EMAC_RX_CTL);
reg_val |= EMAC_RX_FLUSH_FIFO;
EMAC_WRITE_REG(sc, EMAC_RX_CTL, reg_val);
for (i = 100; i > 0; i--) {
DELAY(100);
if ((EMAC_READ_REG(sc, EMAC_RX_CTL) &
EMAC_RX_FLUSH_FIFO) == 0)
break;
}
if (i == 0) {
device_printf(sc->emac_dev,
"flush FIFO timeout\n");
/* Reinitialize controller */
emac_init_locked(sc);
return;
}
/* Enable RX */
reg_val = EMAC_READ_REG(sc, EMAC_CTL);
reg_val |= EMAC_CTL_RX_EN;
EMAC_WRITE_REG(sc, EMAC_CTL, reg_val);
return;
}
/* Get packet size and status */
reg_val = EMAC_READ_REG(sc, EMAC_RX_IO_DATA);
len = reg_val & 0xffff;
status = (reg_val >> 16) & 0xffff;
if (len < 64 || (status & EMAC_PKT_OK) == 0) {
if (bootverbose)
if_printf(ifp,
"bad packet: len = %i status = %i\n",
len, status);
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
emac_drain_rxfifo(sc);
continue;
}
#if 0
if (status & (EMAC_CRCERR | EMAC_LENERR)) {
good_packet = 0;
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
if (status & EMAC_CRCERR)
if_printf(ifp, "crc error\n");
if (status & EMAC_LENERR)
if_printf(ifp, "length error\n");
}
#endif
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
emac_drain_rxfifo(sc);
return;
}
m->m_len = m->m_pkthdr.len = MCLBYTES;
/* Copy entire frame to mbuf first. */
bus_space_read_multi_4(sc->emac_tag, sc->emac_handle,
EMAC_RX_IO_DATA, mtod(m, uint32_t *), roundup2(len, 4) / 4);
m->m_pkthdr.rcvif = ifp;
m->m_len = m->m_pkthdr.len = len - ETHER_CRC_LEN;
/*
* Emac controller needs strict alignment, so to avoid
* copying over an entire frame to align, we allocate
* a new mbuf and copy ethernet header + IP header to
* the new mbuf. The new mbuf is prepended into the
* existing mbuf chain.
*/
if (m->m_len <= (MHLEN - ETHER_HDR_LEN)) {
bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
m->m_data += ETHER_HDR_LEN;
} else if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN) &&
m->m_len > (MHLEN - ETHER_HDR_LEN)) {
MGETHDR(m0, M_NOWAIT, MT_DATA);
if (m0 != NULL) {
len = ETHER_HDR_LEN + m->m_pkthdr.l2hlen;
bcopy(m->m_data, m0->m_data, len);
m->m_data += len;
m->m_len -= len;
m0->m_len = len;
M_MOVE_PKTHDR(m0, m);
m0->m_next = m;
m = m0;
} else {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
m_freem(m);
m = NULL;
continue;
}
} else if (m->m_len > EMAC_MAC_MAXF) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
m_freem(m);
m = NULL;
continue;
}
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
EMAC_UNLOCK(sc);
(*ifp->if_input)(ifp, m);
EMAC_LOCK(sc);
}
}
static void
emac_watchdog(struct emac_softc *sc)
{
struct ifnet *ifp;
EMAC_ASSERT_LOCKED(sc);
if (sc->emac_watchdog_timer == 0 || --sc->emac_watchdog_timer)
return;
ifp = sc->emac_ifp;
if (sc->emac_link == 0) {
if (bootverbose)
if_printf(sc->emac_ifp, "watchdog timeout "
"(missed link)\n");
} else
if_printf(sc->emac_ifp, "watchdog timeout -- resetting\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
emac_init_locked(sc);
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
emac_start_locked(ifp);
}
static void
emac_tick(void *arg)
{
struct emac_softc *sc;
struct mii_data *mii;
sc = (struct emac_softc *)arg;
mii = device_get_softc(sc->emac_miibus);
mii_tick(mii);
emac_watchdog(sc);
callout_reset(&sc->emac_tick_ch, hz, emac_tick, sc);
}
static void
emac_init(void *xcs)
{
struct emac_softc *sc;
sc = (struct emac_softc *)xcs;
EMAC_LOCK(sc);
emac_init_locked(sc);
EMAC_UNLOCK(sc);
}
static void
emac_init_locked(struct emac_softc *sc)
{
struct ifnet *ifp;
struct mii_data *mii;
uint32_t reg_val;
uint8_t *eaddr;
EMAC_ASSERT_LOCKED(sc);
ifp = sc->emac_ifp;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
return;
/* Flush RX FIFO */
reg_val = EMAC_READ_REG(sc, EMAC_RX_CTL);
reg_val |= EMAC_RX_FLUSH_FIFO;
EMAC_WRITE_REG(sc, EMAC_RX_CTL, reg_val);
DELAY(1);
/* Soft reset MAC */
reg_val = EMAC_READ_REG(sc, EMAC_MAC_CTL0);
reg_val &= (~EMAC_MAC_CTL0_SOFT_RST);
EMAC_WRITE_REG(sc, EMAC_MAC_CTL0, reg_val);
/* Set MII clock */
reg_val = EMAC_READ_REG(sc, EMAC_MAC_MCFG);
reg_val &= (~(0xf << 2));
reg_val |= (0xd << 2);
EMAC_WRITE_REG(sc, EMAC_MAC_MCFG, reg_val);
/* Clear RX counter */
EMAC_WRITE_REG(sc, EMAC_RX_FBC, 0);
/* Disable all interrupt and clear interrupt status */
EMAC_WRITE_REG(sc, EMAC_INT_CTL, 0);
reg_val = EMAC_READ_REG(sc, EMAC_INT_STA);
EMAC_WRITE_REG(sc, EMAC_INT_STA, reg_val);
DELAY(1);
/* Set up TX */
reg_val = EMAC_READ_REG(sc, EMAC_TX_MODE);
reg_val |= EMAC_TX_AB_M;
reg_val &= EMAC_TX_TM;
EMAC_WRITE_REG(sc, EMAC_TX_MODE, reg_val);
/* Set up RX */
reg_val = EMAC_READ_REG(sc, EMAC_RX_CTL);
reg_val |= EMAC_RX_SETUP;
reg_val &= EMAC_RX_TM;
EMAC_WRITE_REG(sc, EMAC_RX_CTL, reg_val);
/* Set up MAC CTL0. */
reg_val = EMAC_READ_REG(sc, EMAC_MAC_CTL0);
reg_val |= EMAC_MAC_CTL0_SETUP;
EMAC_WRITE_REG(sc, EMAC_MAC_CTL0, reg_val);
/* Set up MAC CTL1. */
reg_val = EMAC_READ_REG(sc, EMAC_MAC_CTL1);
reg_val |= EMAC_MAC_CTL1_SETUP;
EMAC_WRITE_REG(sc, EMAC_MAC_CTL1, reg_val);
/* Set up IPGT */
EMAC_WRITE_REG(sc, EMAC_MAC_IPGT, EMAC_MAC_IPGT_FD);
/* Set up IPGR */
EMAC_WRITE_REG(sc, EMAC_MAC_IPGR, EMAC_MAC_NBTB_IPG2 |
(EMAC_MAC_NBTB_IPG1 << 8));
/* Set up Collison window */
EMAC_WRITE_REG(sc, EMAC_MAC_CLRT, EMAC_MAC_RM | (EMAC_MAC_CW << 8));
/* Set up Max Frame Length */
EMAC_WRITE_REG(sc, EMAC_MAC_MAXF, EMAC_MAC_MFL);
/* Setup ethernet address */
eaddr = IF_LLADDR(ifp);
EMAC_WRITE_REG(sc, EMAC_MAC_A1, eaddr[0] << 16 |
eaddr[1] << 8 | eaddr[2]);
EMAC_WRITE_REG(sc, EMAC_MAC_A0, eaddr[3] << 16 |
eaddr[4] << 8 | eaddr[5]);
/* Setup rx filter */
emac_set_rx_mode(sc);
/* Enable RX/TX0/RX Hlevel interrupt */
reg_val = EMAC_READ_REG(sc, EMAC_INT_CTL);
reg_val |= EMAC_INT_EN;
EMAC_WRITE_REG(sc, EMAC_INT_CTL, reg_val);
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
sc->emac_link = 0;
/* Switch to the current media. */
mii = device_get_softc(sc->emac_miibus);
mii_mediachg(mii);
callout_reset(&sc->emac_tick_ch, hz, emac_tick, sc);
}
static void
emac_start(struct ifnet *ifp)
{
struct emac_softc *sc;
sc = ifp->if_softc;
EMAC_LOCK(sc);
emac_start_locked(ifp);
EMAC_UNLOCK(sc);
}
static void
emac_start_locked(struct ifnet *ifp)
{
struct emac_softc *sc;
struct mbuf *m, *m0;
uint32_t fifo, reg;
sc = ifp->if_softc;
if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
return;
if (sc->emac_fifo_mask == (EMAC_TX_FIFO0 | EMAC_TX_FIFO1))
return;
if (sc->emac_link == 0)
return;
IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
if (m == NULL)
return;
/* Select channel */
if (sc->emac_fifo_mask & EMAC_TX_FIFO0)
fifo = 1;
else
fifo = 0;
sc->emac_fifo_mask |= (1 << fifo);
if (sc->emac_fifo_mask == (EMAC_TX_FIFO0 | EMAC_TX_FIFO1))
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
EMAC_WRITE_REG(sc, EMAC_TX_INS, fifo);
/*
* Emac controller wants 4 byte aligned TX buffers.
* We have to copy pretty much all the time.
*/
if (m->m_next != NULL || (mtod(m, uintptr_t) & 3) != 0) {
m0 = m_defrag(m, M_NOWAIT);
if (m0 == NULL) {
m_freem(m);
m = NULL;
return;
}
m = m0;
}
/* Write data */
bus_space_write_multi_4(sc->emac_tag, sc->emac_handle,
EMAC_TX_IO_DATA, mtod(m, uint32_t *),
roundup2(m->m_len, 4) / 4);
/* Send the data lengh. */
reg = (fifo == 0) ? EMAC_TX_PL0 : EMAC_TX_PL1;
EMAC_WRITE_REG(sc, reg, m->m_len);
/* Start translate from fifo to phy. */
reg = (fifo == 0) ? EMAC_TX_CTL0 : EMAC_TX_CTL1;
EMAC_WRITE_REG(sc, reg, EMAC_READ_REG(sc, reg) | 1);
/* Set timeout */
sc->emac_watchdog_timer = 5;
/* Data have been sent to hardware, it is okay to free the mbuf now. */
BPF_MTAP(ifp, m);
m_freem(m);
}
static void
emac_stop_locked(struct emac_softc *sc)
{
struct ifnet *ifp;
uint32_t reg_val;
EMAC_ASSERT_LOCKED(sc);
ifp = sc->emac_ifp;
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
sc->emac_link = 0;
/* Disable all interrupt and clear interrupt status */
EMAC_WRITE_REG(sc, EMAC_INT_CTL, 0);
reg_val = EMAC_READ_REG(sc, EMAC_INT_STA);
EMAC_WRITE_REG(sc, EMAC_INT_STA, reg_val);
/* Disable RX/TX */
reg_val = EMAC_READ_REG(sc, EMAC_CTL);
reg_val &= ~(EMAC_CTL_RST | EMAC_CTL_TX_EN | EMAC_CTL_RX_EN);
EMAC_WRITE_REG(sc, EMAC_CTL, reg_val);
callout_stop(&sc->emac_tick_ch);
}
static void
emac_intr(void *arg)
{
struct emac_softc *sc;
struct ifnet *ifp;
uint32_t reg_val;
sc = (struct emac_softc *)arg;
EMAC_LOCK(sc);
/* Disable all interrupts */
EMAC_WRITE_REG(sc, EMAC_INT_CTL, 0);
/* Get EMAC interrupt status */
reg_val = EMAC_READ_REG(sc, EMAC_INT_STA);
/* Clear ISR status */
EMAC_WRITE_REG(sc, EMAC_INT_STA, reg_val);
/* Received incoming packet */
if (reg_val & EMAC_INT_STA_RX)
emac_rxeof(sc, sc->emac_rx_process_limit);
/* Transmit Interrupt check */
if (reg_val & EMAC_INT_STA_TX) {
emac_txeof(sc, reg_val);
ifp = sc->emac_ifp;
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
emac_start_locked(ifp);
}
/* Re-enable interrupt mask */
reg_val = EMAC_READ_REG(sc, EMAC_INT_CTL);
reg_val |= EMAC_INT_EN;
EMAC_WRITE_REG(sc, EMAC_INT_CTL, reg_val);
EMAC_UNLOCK(sc);
}
static int
emac_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
{
struct emac_softc *sc;
struct mii_data *mii;
struct ifreq *ifr;
int error = 0;
sc = ifp->if_softc;
ifr = (struct ifreq *)data;
switch (command) {
case SIOCSIFFLAGS:
EMAC_LOCK(sc);
if (ifp->if_flags & IFF_UP) {
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
if ((ifp->if_flags ^ sc->emac_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI))
emac_set_rx_mode(sc);
} else
emac_init_locked(sc);
} else {
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
emac_stop_locked(sc);
}
sc->emac_if_flags = ifp->if_flags;
EMAC_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
EMAC_LOCK(sc);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
emac_set_rx_mode(sc);
}
EMAC_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
mii = device_get_softc(sc->emac_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static int
emac_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "allwinner,sun4i-a10-emac"))
return (ENXIO);
device_set_desc(dev, "A10/A20 EMAC ethernet controller");
return (BUS_PROBE_DEFAULT);
}
static int
emac_detach(device_t dev)
{
struct emac_softc *sc;
sc = device_get_softc(dev);
sc->emac_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
if (device_is_attached(dev)) {
ether_ifdetach(sc->emac_ifp);
EMAC_LOCK(sc);
emac_stop_locked(sc);
EMAC_UNLOCK(sc);
callout_drain(&sc->emac_tick_ch);
}
if (sc->emac_intrhand != NULL)
bus_teardown_intr(sc->emac_dev, sc->emac_irq,
sc->emac_intrhand);
if (sc->emac_miibus != NULL) {
device_delete_child(sc->emac_dev, sc->emac_miibus);
bus_generic_detach(sc->emac_dev);
}
if (sc->emac_clk != NULL)
clk_disable(sc->emac_clk);
if (sc->emac_res != NULL)
bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->emac_res);
if (sc->emac_irq != NULL)
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->emac_irq);
if (sc->emac_ifp != NULL)
if_free(sc->emac_ifp);
if (mtx_initialized(&sc->emac_mtx))
mtx_destroy(&sc->emac_mtx);
return (0);
}
static int
emac_shutdown(device_t dev)
{
return (emac_suspend(dev));
}
static int
emac_suspend(device_t dev)
{
struct emac_softc *sc;
struct ifnet *ifp;
sc = device_get_softc(dev);
EMAC_LOCK(sc);
ifp = sc->emac_ifp;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
emac_stop_locked(sc);
EMAC_UNLOCK(sc);
return (0);
}
static int
emac_resume(device_t dev)
{
struct emac_softc *sc;
struct ifnet *ifp;
sc = device_get_softc(dev);
EMAC_LOCK(sc);
ifp = sc->emac_ifp;
if ((ifp->if_flags & IFF_UP) != 0) {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
emac_init_locked(sc);
}
EMAC_UNLOCK(sc);
return (0);
}
static int
emac_attach(device_t dev)
{
struct emac_softc *sc;
struct ifnet *ifp;
int error, rid;
uint8_t eaddr[ETHER_ADDR_LEN];
sc = device_get_softc(dev);
sc->emac_dev = dev;
error = 0;
mtx_init(&sc->emac_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->emac_tick_ch, &sc->emac_mtx, 0);
rid = 0;
sc->emac_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->emac_res == NULL) {
device_printf(dev, "unable to map memory\n");
error = ENXIO;
goto fail;
}
sc->emac_tag = rman_get_bustag(sc->emac_res);
sc->emac_handle = rman_get_bushandle(sc->emac_res);
rid = 0;
sc->emac_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (sc->emac_irq == NULL) {
device_printf(dev, "cannot allocate IRQ resources.\n");
error = ENXIO;
goto fail;
}
/* Create device sysctl node. */
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "process_limit",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
&sc->emac_rx_process_limit, 0, sysctl_hw_emac_proc_limit, "I",
"max number of Rx events to process");
sc->emac_rx_process_limit = EMAC_PROC_DEFAULT;
error = resource_int_value(device_get_name(dev), device_get_unit(dev),
"process_limit", &sc->emac_rx_process_limit);
if (error == 0) {
if (sc->emac_rx_process_limit < EMAC_PROC_MIN ||
sc->emac_rx_process_limit > EMAC_PROC_MAX) {
device_printf(dev, "process_limit value out of range; "
"using default: %d\n", EMAC_PROC_DEFAULT);
sc->emac_rx_process_limit = EMAC_PROC_DEFAULT;
}
}
/* Setup EMAC */
error = emac_sys_setup(sc);
if (error != 0)
goto fail;
emac_reset(sc);
ifp = sc->emac_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "unable to allocate ifp\n");
- error = ENOSPC;
- goto fail;
- }
ifp->if_softc = sc;
/* Setup MII */
error = mii_attach(dev, &sc->emac_miibus, ifp, emac_ifmedia_upd,
emac_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(dev, "PHY probe failed\n");
goto fail;
}
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_start = emac_start;
ifp->if_ioctl = emac_ioctl;
ifp->if_init = emac_init;
IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
/* Get MAC address */
emac_get_hwaddr(sc, eaddr);
ether_ifattach(ifp, eaddr);
/* VLAN capability setup. */
ifp->if_capabilities |= IFCAP_VLAN_MTU;
ifp->if_capenable = ifp->if_capabilities;
/* Tell the upper layer we support VLAN over-sized frames. */
ifp->if_hdrlen = sizeof(struct ether_vlan_header);
error = bus_setup_intr(dev, sc->emac_irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, emac_intr, sc, &sc->emac_intrhand);
if (error != 0) {
device_printf(dev, "could not set up interrupt handler.\n");
ether_ifdetach(ifp);
goto fail;
}
fail:
if (error != 0)
emac_detach(dev);
return (error);
}
static boolean_t
emac_miibus_iowait(struct emac_softc *sc)
{
uint32_t timeout;
for (timeout = 100; timeout != 0; --timeout) {
DELAY(100);
if ((EMAC_READ_REG(sc, EMAC_MAC_MIND) & 0x1) == 0)
return (true);
}
return (false);
}
/*
* The MII bus interface
*/
static int
emac_miibus_readreg(device_t dev, int phy, int reg)
{
struct emac_softc *sc;
int rval;
sc = device_get_softc(dev);
/* Issue phy address and reg */
EMAC_WRITE_REG(sc, EMAC_MAC_MADR, (phy << 8) | reg);
/* Pull up the phy io line */
EMAC_WRITE_REG(sc, EMAC_MAC_MCMD, 0x1);
if (!emac_miibus_iowait(sc)) {
device_printf(dev, "timeout waiting for mii read\n");
return (0);
}
/* Push down the phy io line */
EMAC_WRITE_REG(sc, EMAC_MAC_MCMD, 0x0);
/* Read data */
rval = EMAC_READ_REG(sc, EMAC_MAC_MRDD);
return (rval);
}
static int
emac_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct emac_softc *sc;
sc = device_get_softc(dev);
/* Issue phy address and reg */
EMAC_WRITE_REG(sc, EMAC_MAC_MADR, (phy << 8) | reg);
/* Write data */
EMAC_WRITE_REG(sc, EMAC_MAC_MWTD, data);
/* Pull up the phy io line */
EMAC_WRITE_REG(sc, EMAC_MAC_MCMD, 0x1);
if (!emac_miibus_iowait(sc)) {
device_printf(dev, "timeout waiting for mii write\n");
return (0);
}
/* Push down the phy io line */
EMAC_WRITE_REG(sc, EMAC_MAC_MCMD, 0x0);
return (0);
}
static void
emac_miibus_statchg(device_t dev)
{
struct emac_softc *sc;
struct mii_data *mii;
struct ifnet *ifp;
uint32_t reg_val;
sc = device_get_softc(dev);
mii = device_get_softc(sc->emac_miibus);
ifp = sc->emac_ifp;
if (mii == NULL || ifp == NULL ||
(ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
return;
sc->emac_link = 0;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
sc->emac_link = 1;
break;
default:
break;
}
}
/* Program MACs with resolved speed/duplex. */
if (sc->emac_link != 0) {
reg_val = EMAC_READ_REG(sc, EMAC_MAC_IPGT);
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
reg_val &= ~EMAC_MAC_IPGT_HD;
reg_val |= EMAC_MAC_IPGT_FD;
} else {
reg_val &= ~EMAC_MAC_IPGT_FD;
reg_val |= EMAC_MAC_IPGT_HD;
}
EMAC_WRITE_REG(sc, EMAC_MAC_IPGT, reg_val);
/* Enable RX/TX */
reg_val = EMAC_READ_REG(sc, EMAC_CTL);
reg_val |= EMAC_CTL_RST | EMAC_CTL_TX_EN | EMAC_CTL_RX_EN;
EMAC_WRITE_REG(sc, EMAC_CTL, reg_val);
} else {
/* Disable RX/TX */
reg_val = EMAC_READ_REG(sc, EMAC_CTL);
reg_val &= ~(EMAC_CTL_RST | EMAC_CTL_TX_EN | EMAC_CTL_RX_EN);
EMAC_WRITE_REG(sc, EMAC_CTL, reg_val);
}
}
static int
emac_ifmedia_upd(struct ifnet *ifp)
{
struct emac_softc *sc;
struct mii_data *mii;
struct mii_softc *miisc;
int error;
sc = ifp->if_softc;
mii = device_get_softc(sc->emac_miibus);
EMAC_LOCK(sc);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
error = mii_mediachg(mii);
EMAC_UNLOCK(sc);
return (error);
}
static void
emac_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct emac_softc *sc;
struct mii_data *mii;
sc = ifp->if_softc;
mii = device_get_softc(sc->emac_miibus);
EMAC_LOCK(sc);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
EMAC_UNLOCK(sc);
}
static device_method_t emac_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, emac_probe),
DEVMETHOD(device_attach, emac_attach),
DEVMETHOD(device_detach, emac_detach),
DEVMETHOD(device_shutdown, emac_shutdown),
DEVMETHOD(device_suspend, emac_suspend),
DEVMETHOD(device_resume, emac_resume),
/* bus interface, for miibus */
DEVMETHOD(bus_print_child, bus_generic_print_child),
DEVMETHOD(bus_driver_added, bus_generic_driver_added),
/* MII interface */
DEVMETHOD(miibus_readreg, emac_miibus_readreg),
DEVMETHOD(miibus_writereg, emac_miibus_writereg),
DEVMETHOD(miibus_statchg, emac_miibus_statchg),
DEVMETHOD_END
};
static driver_t emac_driver = {
"emac",
emac_methods,
sizeof(struct emac_softc)
};
static devclass_t emac_devclass;
DRIVER_MODULE(emac, simplebus, emac_driver, emac_devclass, 0, 0);
DRIVER_MODULE(miibus, emac, miibus_driver, miibus_devclass, 0, 0);
MODULE_DEPEND(emac, miibus, 1, 1, 1);
MODULE_DEPEND(emac, ether, 1, 1, 1);
static int
sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
{
int error, value;
if (arg1 == NULL)
return (EINVAL);
value = *(int *)arg1;
error = sysctl_handle_int(oidp, &value, 0, req);
if (error || req->newptr == NULL)
return (error);
if (value < low || value > high)
return (EINVAL);
*(int *)arg1 = value;
return (0);
}
static int
sysctl_hw_emac_proc_limit(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req,
EMAC_PROC_MIN, EMAC_PROC_MAX));
}
diff --git a/sys/arm/ti/cpsw/if_cpsw.c b/sys/arm/ti/cpsw/if_cpsw.c
index 81c2e29e0f8c..9ab73cd62527 100644
--- a/sys/arm/ti/cpsw/if_cpsw.c
+++ b/sys/arm/ti/cpsw/if_cpsw.c
@@ -1,3026 +1,3021 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org>
* Copyright (c) 2016 Rubicon Communications, LLC (Netgate)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* TI Common Platform Ethernet Switch (CPSW) Driver
* Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs.
*
* This controller is documented in the AM335x Technical Reference
* Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM
* and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM.
*
* It is basically a single Ethernet port (port 0) wired internally to
* a 3-port store-and-forward switch connected to two independent
* "sliver" controllers (port 1 and port 2). You can operate the
* controller in a variety of different ways by suitably configuring
* the slivers and the Address Lookup Engine (ALE) that routes packets
* between the ports.
*
* This code was developed and tested on a BeagleBone with
* an AM335x SoC.
*/
#include <sys/cdefs.h>
#include "opt_cpsw.h"
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <machine/stdarg.h>
#include <net/ethernet.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <dev/extres/syscon/syscon.h>
#include "syscon_if.h"
#include <arm/ti/am335x/am335x_scm.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/fdt/fdt_common.h>
#ifdef CPSW_ETHERSWITCH
#include <dev/etherswitch/etherswitch.h>
#include "etherswitch_if.h"
#endif
#include "if_cpswreg.h"
#include "if_cpswvar.h"
#include "miibus_if.h"
/* Device probe/attach/detach. */
static int cpsw_probe(device_t);
static int cpsw_attach(device_t);
static int cpsw_detach(device_t);
static int cpswp_probe(device_t);
static int cpswp_attach(device_t);
static int cpswp_detach(device_t);
static phandle_t cpsw_get_node(device_t, device_t);
/* Device Init/shutdown. */
static int cpsw_shutdown(device_t);
static void cpswp_init(void *);
static void cpswp_init_locked(void *);
static void cpswp_stop_locked(struct cpswp_softc *);
/* Device Suspend/Resume. */
static int cpsw_suspend(device_t);
static int cpsw_resume(device_t);
/* Ioctl. */
static int cpswp_ioctl(struct ifnet *, u_long command, caddr_t data);
static int cpswp_miibus_readreg(device_t, int phy, int reg);
static int cpswp_miibus_writereg(device_t, int phy, int reg, int value);
static void cpswp_miibus_statchg(device_t);
/* Send/Receive packets. */
static void cpsw_intr_rx(void *arg);
static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *);
static void cpsw_rx_enqueue(struct cpsw_softc *);
static void cpswp_start(struct ifnet *);
static void cpsw_intr_tx(void *);
static void cpswp_tx_enqueue(struct cpswp_softc *);
static int cpsw_tx_dequeue(struct cpsw_softc *);
/* Misc interrupts and watchdog. */
static void cpsw_intr_rx_thresh(void *);
static void cpsw_intr_misc(void *);
static void cpswp_tick(void *);
static void cpswp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
static int cpswp_ifmedia_upd(struct ifnet *);
static void cpsw_tx_watchdog(void *);
/* ALE support */
static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t, uint32_t *);
static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t, uint32_t *);
static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t, int, uint8_t *);
static void cpsw_ale_dump_table(struct cpsw_softc *);
static int cpsw_ale_update_vlan_table(struct cpsw_softc *, int, int, int, int,
int);
static int cpswp_ale_update_addresses(struct cpswp_softc *, int);
/* Statistics and sysctls. */
static void cpsw_add_sysctls(struct cpsw_softc *);
static void cpsw_stats_collect(struct cpsw_softc *);
static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS);
#ifdef CPSW_ETHERSWITCH
static etherswitch_info_t *cpsw_getinfo(device_t);
static int cpsw_getport(device_t, etherswitch_port_t *);
static int cpsw_setport(device_t, etherswitch_port_t *);
static int cpsw_getconf(device_t, etherswitch_conf_t *);
static int cpsw_getvgroup(device_t, etherswitch_vlangroup_t *);
static int cpsw_setvgroup(device_t, etherswitch_vlangroup_t *);
static int cpsw_readreg(device_t, int);
static int cpsw_writereg(device_t, int, int);
static int cpsw_readphy(device_t, int, int);
static int cpsw_writephy(device_t, int, int, int);
#endif
/*
* Arbitrary limit on number of segments in an mbuf to be transmitted.
* Packets with more segments than this will be defragmented before
* they are queued.
*/
#define CPSW_TXFRAGS 16
/* Shared resources. */
static device_method_t cpsw_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, cpsw_probe),
DEVMETHOD(device_attach, cpsw_attach),
DEVMETHOD(device_detach, cpsw_detach),
DEVMETHOD(device_shutdown, cpsw_shutdown),
DEVMETHOD(device_suspend, cpsw_suspend),
DEVMETHOD(device_resume, cpsw_resume),
/* Bus interface */
DEVMETHOD(bus_add_child, device_add_child_ordered),
/* OFW methods */
DEVMETHOD(ofw_bus_get_node, cpsw_get_node),
#ifdef CPSW_ETHERSWITCH
/* etherswitch interface */
DEVMETHOD(etherswitch_getinfo, cpsw_getinfo),
DEVMETHOD(etherswitch_readreg, cpsw_readreg),
DEVMETHOD(etherswitch_writereg, cpsw_writereg),
DEVMETHOD(etherswitch_readphyreg, cpsw_readphy),
DEVMETHOD(etherswitch_writephyreg, cpsw_writephy),
DEVMETHOD(etherswitch_getport, cpsw_getport),
DEVMETHOD(etherswitch_setport, cpsw_setport),
DEVMETHOD(etherswitch_getvgroup, cpsw_getvgroup),
DEVMETHOD(etherswitch_setvgroup, cpsw_setvgroup),
DEVMETHOD(etherswitch_getconf, cpsw_getconf),
#endif
DEVMETHOD_END
};
static driver_t cpsw_driver = {
"cpswss",
cpsw_methods,
sizeof(struct cpsw_softc),
};
static devclass_t cpsw_devclass;
DRIVER_MODULE(cpswss, simplebus, cpsw_driver, cpsw_devclass, 0, 0);
/* Port/Slave resources. */
static device_method_t cpswp_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, cpswp_probe),
DEVMETHOD(device_attach, cpswp_attach),
DEVMETHOD(device_detach, cpswp_detach),
/* MII interface */
DEVMETHOD(miibus_readreg, cpswp_miibus_readreg),
DEVMETHOD(miibus_writereg, cpswp_miibus_writereg),
DEVMETHOD(miibus_statchg, cpswp_miibus_statchg),
DEVMETHOD_END
};
static driver_t cpswp_driver = {
"cpsw",
cpswp_methods,
sizeof(struct cpswp_softc),
};
static devclass_t cpswp_devclass;
#ifdef CPSW_ETHERSWITCH
DRIVER_MODULE(etherswitch, cpswss, etherswitch_driver, etherswitch_devclass, 0, 0);
MODULE_DEPEND(cpswss, etherswitch, 1, 1, 1);
#endif
DRIVER_MODULE(cpsw, cpswss, cpswp_driver, cpswp_devclass, 0, 0);
DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0);
MODULE_DEPEND(cpsw, ether, 1, 1, 1);
MODULE_DEPEND(cpsw, miibus, 1, 1, 1);
#ifdef CPSW_ETHERSWITCH
static struct cpsw_vlangroups cpsw_vgroups[CPSW_VLANS];
#endif
static uint32_t slave_mdio_addr[] = { 0x4a100200, 0x4a100300 };
static struct resource_spec irq_res_spec[] = {
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0 }
};
static struct {
void (*cb)(void *);
} cpsw_intr_cb[] = {
{ cpsw_intr_rx_thresh },
{ cpsw_intr_rx },
{ cpsw_intr_tx },
{ cpsw_intr_misc },
};
/* Number of entries here must match size of stats
* array in struct cpswp_softc. */
static struct cpsw_stat {
int reg;
char *oid;
} cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = {
{0x00, "GoodRxFrames"},
{0x04, "BroadcastRxFrames"},
{0x08, "MulticastRxFrames"},
{0x0C, "PauseRxFrames"},
{0x10, "RxCrcErrors"},
{0x14, "RxAlignErrors"},
{0x18, "OversizeRxFrames"},
{0x1c, "RxJabbers"},
{0x20, "ShortRxFrames"},
{0x24, "RxFragments"},
{0x30, "RxOctets"},
{0x34, "GoodTxFrames"},
{0x38, "BroadcastTxFrames"},
{0x3c, "MulticastTxFrames"},
{0x40, "PauseTxFrames"},
{0x44, "DeferredTxFrames"},
{0x48, "CollisionsTxFrames"},
{0x4c, "SingleCollisionTxFrames"},
{0x50, "MultipleCollisionTxFrames"},
{0x54, "ExcessiveCollisions"},
{0x58, "LateCollisions"},
{0x5c, "TxUnderrun"},
{0x60, "CarrierSenseErrors"},
{0x64, "TxOctets"},
{0x68, "RxTx64OctetFrames"},
{0x6c, "RxTx65to127OctetFrames"},
{0x70, "RxTx128to255OctetFrames"},
{0x74, "RxTx256to511OctetFrames"},
{0x78, "RxTx512to1024OctetFrames"},
{0x7c, "RxTx1024upOctetFrames"},
{0x80, "NetOctets"},
{0x84, "RxStartOfFrameOverruns"},
{0x88, "RxMiddleOfFrameOverruns"},
{0x8c, "RxDmaOverruns"}
};
/*
* Basic debug support.
*/
static void
cpsw_debugf_head(const char *funcname)
{
int t = (int)(time_second % (24 * 60 * 60));
printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname);
}
static void
cpsw_debugf(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vprintf(fmt, ap);
va_end(ap);
printf("\n");
}
#define CPSW_DEBUGF(_sc, a) do { \
if ((_sc)->debug) { \
cpsw_debugf_head(__func__); \
cpsw_debugf a; \
} \
} while (0)
/*
* Locking macros
*/
#define CPSW_TX_LOCK(sc) do { \
mtx_assert(&(sc)->rx.lock, MA_NOTOWNED); \
mtx_lock(&(sc)->tx.lock); \
} while (0)
#define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx.lock)
#define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx.lock, MA_OWNED)
#define CPSW_RX_LOCK(sc) do { \
mtx_assert(&(sc)->tx.lock, MA_NOTOWNED); \
mtx_lock(&(sc)->rx.lock); \
} while (0)
#define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx.lock)
#define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx.lock, MA_OWNED)
#define CPSW_PORT_LOCK(_sc) do { \
mtx_assert(&(_sc)->lock, MA_NOTOWNED); \
mtx_lock(&(_sc)->lock); \
} while (0)
#define CPSW_PORT_UNLOCK(_sc) mtx_unlock(&(_sc)->lock)
#define CPSW_PORT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->lock, MA_OWNED)
/*
* Read/Write macros
*/
#define cpsw_read_4(_sc, _reg) bus_read_4((_sc)->mem_res, (_reg))
#define cpsw_write_4(_sc, _reg, _val) \
bus_write_4((_sc)->mem_res, (_reg), (_val))
#define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16))
#define cpsw_cpdma_bd_paddr(sc, slot) \
BUS_SPACE_PHYSADDR(sc->mem_res, slot->bd_offset)
#define cpsw_cpdma_read_bd(sc, slot, val) \
bus_read_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4)
#define cpsw_cpdma_write_bd(sc, slot, val) \
bus_write_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4)
#define cpsw_cpdma_write_bd_next(sc, slot, next_slot) \
cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot))
#define cpsw_cpdma_write_bd_flags(sc, slot, val) \
bus_write_2(sc->mem_res, slot->bd_offset + 14, val)
#define cpsw_cpdma_read_bd_flags(sc, slot) \
bus_read_2(sc->mem_res, slot->bd_offset + 14)
#define cpsw_write_hdp_slot(sc, queue, slot) \
cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot))
#define CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0))
#define cpsw_read_cp(sc, queue) \
cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET)
#define cpsw_write_cp(sc, queue, val) \
cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val))
#define cpsw_write_cp_slot(sc, queue, slot) \
cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot))
#if 0
/* XXX temporary function versions for debugging. */
static void
cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
{
uint32_t reg = queue->hdp_offset;
uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg)));
cpsw_write_4(sc, reg, v);
}
static void
cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
{
uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue)));
cpsw_write_cp(sc, queue, v);
}
#endif
/*
* Expanded dump routines for verbose debugging.
*/
static void
cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
{
static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ",
"TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun",
"PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1",
"Port0"};
struct cpsw_cpdma_bd bd;
const char *sep;
int i;
cpsw_cpdma_read_bd(sc, slot, &bd);
printf("BD Addr : 0x%08x Next : 0x%08x\n",
cpsw_cpdma_bd_paddr(sc, slot), bd.next);
printf(" BufPtr: 0x%08x BufLen: 0x%08x\n", bd.bufptr, bd.buflen);
printf(" BufOff: 0x%08x PktLen: 0x%08x\n", bd.bufoff, bd.pktlen);
printf(" Flags: ");
sep = "";
for (i = 0; i < 16; ++i) {
if (bd.flags & (1 << (15 - i))) {
printf("%s%s", sep, flags[i]);
sep = ",";
}
}
printf("\n");
if (slot->mbuf) {
printf(" Ether: %14D\n",
(char *)(slot->mbuf->m_data), " ");
printf(" Packet: %16D\n",
(char *)(slot->mbuf->m_data) + 14, " ");
}
}
#define CPSW_DUMP_SLOT(cs, slot) do { \
IF_DEBUG(sc) { \
cpsw_dump_slot(sc, slot); \
} \
} while (0)
static void
cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q)
{
struct cpsw_slot *slot;
int i = 0;
int others = 0;
STAILQ_FOREACH(slot, q, next) {
if (i > CPSW_TXFRAGS)
++others;
else
cpsw_dump_slot(sc, slot);
++i;
}
if (others)
printf(" ... and %d more.\n", others);
printf("\n");
}
#define CPSW_DUMP_QUEUE(sc, q) do { \
IF_DEBUG(sc) { \
cpsw_dump_queue(sc, q); \
} \
} while (0)
static void
cpsw_init_slots(struct cpsw_softc *sc)
{
struct cpsw_slot *slot;
int i;
STAILQ_INIT(&sc->avail);
/* Put the slot descriptors onto the global avail list. */
for (i = 0; i < nitems(sc->_slots); i++) {
slot = &sc->_slots[i];
slot->bd_offset = cpsw_cpdma_bd_offset(i);
STAILQ_INSERT_TAIL(&sc->avail, slot, next);
}
}
static int
cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested)
{
const int max_slots = nitems(sc->_slots);
struct cpsw_slot *slot;
int i;
if (requested < 0)
requested = max_slots;
for (i = 0; i < requested; ++i) {
slot = STAILQ_FIRST(&sc->avail);
if (slot == NULL)
return (0);
if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) {
device_printf(sc->dev, "failed to create dmamap\n");
return (ENOMEM);
}
STAILQ_REMOVE_HEAD(&sc->avail, next);
STAILQ_INSERT_TAIL(&queue->avail, slot, next);
++queue->avail_queue_len;
++queue->queue_slots;
}
return (0);
}
static void
cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
{
int error;
if (slot->dmamap) {
if (slot->mbuf)
bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap);
KASSERT(error == 0, ("Mapping still active"));
slot->dmamap = NULL;
}
if (slot->mbuf) {
m_freem(slot->mbuf);
slot->mbuf = NULL;
}
}
static void
cpsw_reset(struct cpsw_softc *sc)
{
int i;
callout_stop(&sc->watchdog.callout);
/* Reset RMII/RGMII wrapper. */
cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
;
/* Disable TX and RX interrupts for all cores. */
for (i = 0; i < 3; ++i) {
cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00);
cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00);
cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00);
cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00);
}
/* Reset CPSW subsystem. */
cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
;
/* Reset Sliver port 1 and 2 */
for (i = 0; i < 2; i++) {
/* Reset */
cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
;
}
/* Reset DMA controller. */
cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
;
/* Disable TX & RX DMA */
cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0);
cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0);
/* Clear all queues. */
for (i = 0; i < 8; i++) {
cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0);
cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0);
cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0);
cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0);
}
/* Clear all interrupt Masks */
cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
}
static void
cpsw_init(struct cpsw_softc *sc)
{
struct cpsw_slot *slot;
uint32_t reg;
/* Disable the interrupt pacing. */
reg = cpsw_read_4(sc, CPSW_WR_INT_CONTROL);
reg &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK);
cpsw_write_4(sc, CPSW_WR_INT_CONTROL, reg);
/* Clear ALE */
cpsw_write_4(sc, CPSW_ALE_CONTROL, CPSW_ALE_CTL_CLEAR_TBL);
/* Enable ALE */
reg = CPSW_ALE_CTL_ENABLE;
if (sc->dualemac)
reg |= CPSW_ALE_CTL_VLAN_AWARE;
cpsw_write_4(sc, CPSW_ALE_CONTROL, reg);
/* Set Host Port Mapping. */
cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
/* Initialize ALE: set host port to forwarding(3). */
cpsw_write_4(sc, CPSW_ALE_PORTCTL(0),
ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD);
cpsw_write_4(sc, CPSW_SS_PTYPE, 0);
/* Enable statistics for ports 0, 1 and 2 */
cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7);
/* Turn off flow control. */
cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0);
/* Make IP hdr aligned with 4 */
cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2);
/* Initialize RX Buffer Descriptors */
cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), 0);
cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
/* Enable TX & RX DMA */
cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1);
cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1);
/* Enable Interrupts for core 0 */
cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF);
cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF);
cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0xFF);
cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F);
/* Enable host Error Interrupt */
cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3);
/* Enable interrupts for RX and TX on Channel 0 */
cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET,
CPSW_CPDMA_RX_INT(0) | CPSW_CPDMA_RX_INT_THRESH(0));
cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1);
/* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
cpsw_write_4(sc, MDIOCONTROL, MDIOCTL_ENABLE | MDIOCTL_FAULTENB | 0xff);
/* Select MII in GMII_SEL, Internal Delay mode */
//ti_scm_reg_write_4(0x650, 0);
/* Initialize active queues. */
slot = STAILQ_FIRST(&sc->tx.active);
if (slot != NULL)
cpsw_write_hdp_slot(sc, &sc->tx, slot);
slot = STAILQ_FIRST(&sc->rx.active);
if (slot != NULL)
cpsw_write_hdp_slot(sc, &sc->rx, slot);
cpsw_rx_enqueue(sc);
cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), sc->rx.active_queue_len);
cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), CPSW_TXFRAGS);
/* Activate network interface. */
sc->rx.running = 1;
sc->tx.running = 1;
sc->watchdog.timer = 0;
callout_init(&sc->watchdog.callout, 0);
callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc);
}
/*
*
* Device Probe, Attach, Detach.
*
*/
static int
cpsw_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "ti,cpsw"))
return (ENXIO);
device_set_desc(dev, "3-port Switch Ethernet Subsystem");
return (BUS_PROBE_DEFAULT);
}
static int
cpsw_intr_attach(struct cpsw_softc *sc)
{
int i;
for (i = 0; i < CPSW_INTR_COUNT; i++) {
if (bus_setup_intr(sc->dev, sc->irq_res[i],
INTR_TYPE_NET | INTR_MPSAFE, NULL,
cpsw_intr_cb[i].cb, sc, &sc->ih_cookie[i]) != 0) {
return (-1);
}
}
return (0);
}
static void
cpsw_intr_detach(struct cpsw_softc *sc)
{
int i;
for (i = 0; i < CPSW_INTR_COUNT; i++) {
if (sc->ih_cookie[i]) {
bus_teardown_intr(sc->dev, sc->irq_res[i],
sc->ih_cookie[i]);
}
}
}
static int
cpsw_get_fdt_data(struct cpsw_softc *sc, int port)
{
char *name;
int len, phy, vlan;
pcell_t phy_id[3], vlan_id;
phandle_t child;
unsigned long mdio_child_addr;
/* Find any slave with phy-handle/phy_id */
phy = -1;
vlan = -1;
for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) {
if (OF_getprop_alloc(child, "name", (void **)&name) < 0)
continue;
if (sscanf(name, "slave@%lx", &mdio_child_addr) != 1) {
OF_prop_free(name);
continue;
}
OF_prop_free(name);
if (mdio_child_addr != slave_mdio_addr[port] &&
mdio_child_addr != (slave_mdio_addr[port] & 0xFFF))
continue;
if (fdt_get_phyaddr(child, NULL, &phy, NULL) != 0){
/* Users with old DTB will have phy_id instead */
phy = -1;
len = OF_getproplen(child, "phy_id");
if (len / sizeof(pcell_t) == 2) {
/* Get phy address from fdt */
if (OF_getencprop(child, "phy_id", phy_id, len) > 0)
phy = phy_id[1];
}
}
len = OF_getproplen(child, "dual_emac_res_vlan");
if (len / sizeof(pcell_t) == 1) {
/* Get phy address from fdt */
if (OF_getencprop(child, "dual_emac_res_vlan",
&vlan_id, len) > 0) {
vlan = vlan_id;
}
}
break;
}
if (phy == -1)
return (ENXIO);
sc->port[port].phy = phy;
sc->port[port].vlan = vlan;
return (0);
}
static int
cpsw_attach(device_t dev)
{
int error, i;
struct cpsw_softc *sc;
uint32_t reg;
sc = device_get_softc(dev);
sc->dev = dev;
sc->node = ofw_bus_get_node(dev);
getbinuptime(&sc->attach_uptime);
if (OF_getencprop(sc->node, "active_slave", &sc->active_slave,
sizeof(sc->active_slave)) <= 0) {
sc->active_slave = 0;
}
if (sc->active_slave > 1)
sc->active_slave = 1;
if (OF_hasprop(sc->node, "dual_emac"))
sc->dualemac = 1;
for (i = 0; i < CPSW_PORTS; i++) {
if (!sc->dualemac && i != sc->active_slave)
continue;
if (cpsw_get_fdt_data(sc, i) != 0) {
device_printf(dev,
"failed to get PHY address from FDT\n");
return (ENXIO);
}
}
/* Initialize mutexes */
mtx_init(&sc->tx.lock, device_get_nameunit(dev),
"cpsw TX lock", MTX_DEF);
mtx_init(&sc->rx.lock, device_get_nameunit(dev),
"cpsw RX lock", MTX_DEF);
/* Allocate IRQ resources */
error = bus_alloc_resources(dev, irq_res_spec, sc->irq_res);
if (error) {
device_printf(dev, "could not allocate IRQ resources\n");
cpsw_detach(dev);
return (ENXIO);
}
sc->mem_rid = 0;
sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&sc->mem_rid, RF_ACTIVE);
if (sc->mem_res == NULL) {
device_printf(sc->dev, "failed to allocate memory resource\n");
cpsw_detach(dev);
return (ENXIO);
}
reg = cpsw_read_4(sc, CPSW_SS_IDVER);
device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7),
reg & 0xFF, (reg >> 11) & 0x1F);
cpsw_add_sysctls(sc);
/* Allocate a busdma tag and DMA safe memory for mbufs. */
error = bus_dma_tag_create(
bus_get_dma_tag(sc->dev), /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filtfunc, filtfuncarg */
MCLBYTES, CPSW_TXFRAGS, /* maxsize, nsegments */
MCLBYTES, 0, /* maxsegsz, flags */
NULL, NULL, /* lockfunc, lockfuncarg */
&sc->mbuf_dtag); /* dmatag */
if (error) {
device_printf(dev, "bus_dma_tag_create failed\n");
cpsw_detach(dev);
return (error);
}
/* Allocate a NULL buffer for padding. */
sc->nullpad = malloc(ETHER_MIN_LEN, M_DEVBUF, M_WAITOK | M_ZERO);
cpsw_init_slots(sc);
/* Allocate slots to TX and RX queues. */
STAILQ_INIT(&sc->rx.avail);
STAILQ_INIT(&sc->rx.active);
STAILQ_INIT(&sc->tx.avail);
STAILQ_INIT(&sc->tx.active);
// For now: 128 slots to TX, rest to RX.
// XXX TODO: start with 32/64 and grow dynamically based on demand.
if (cpsw_add_slots(sc, &sc->tx, 128) ||
cpsw_add_slots(sc, &sc->rx, -1)) {
device_printf(dev, "failed to allocate dmamaps\n");
cpsw_detach(dev);
return (ENOMEM);
}
device_printf(dev, "Initial queue size TX=%d RX=%d\n",
sc->tx.queue_slots, sc->rx.queue_slots);
sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0);
sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0);
if (cpsw_intr_attach(sc) == -1) {
device_printf(dev, "failed to setup interrupts\n");
cpsw_detach(dev);
return (ENXIO);
}
#ifdef CPSW_ETHERSWITCH
for (i = 0; i < CPSW_VLANS; i++)
cpsw_vgroups[i].vid = -1;
#endif
/* Reset the controller. */
cpsw_reset(sc);
cpsw_init(sc);
for (i = 0; i < CPSW_PORTS; i++) {
if (!sc->dualemac && i != sc->active_slave)
continue;
sc->port[i].dev = device_add_child(dev, "cpsw", i);
if (sc->port[i].dev == NULL) {
cpsw_detach(dev);
return (ENXIO);
}
}
bus_generic_probe(dev);
bus_generic_attach(dev);
return (0);
}
static int
cpsw_detach(device_t dev)
{
struct cpsw_softc *sc;
int error, i;
bus_generic_detach(dev);
sc = device_get_softc(dev);
for (i = 0; i < CPSW_PORTS; i++) {
if (sc->port[i].dev)
device_delete_child(dev, sc->port[i].dev);
}
if (device_is_attached(dev)) {
callout_stop(&sc->watchdog.callout);
callout_drain(&sc->watchdog.callout);
}
/* Stop and release all interrupts */
cpsw_intr_detach(sc);
/* Free dmamaps and mbufs */
for (i = 0; i < nitems(sc->_slots); ++i)
cpsw_free_slot(sc, &sc->_slots[i]);
/* Free null padding buffer. */
if (sc->nullpad)
free(sc->nullpad, M_DEVBUF);
/* Free DMA tag */
if (sc->mbuf_dtag) {
error = bus_dma_tag_destroy(sc->mbuf_dtag);
KASSERT(error == 0, ("Unable to destroy DMA tag"));
}
/* Free IO memory handler */
if (sc->mem_res != NULL)
bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res);
bus_release_resources(dev, irq_res_spec, sc->irq_res);
/* Destroy mutexes */
mtx_destroy(&sc->rx.lock);
mtx_destroy(&sc->tx.lock);
/* Detach the switch device, if present. */
error = bus_generic_detach(dev);
if (error != 0)
return (error);
return (device_delete_children(dev));
}
static phandle_t
cpsw_get_node(device_t bus, device_t dev)
{
/* Share controller node with port device. */
return (ofw_bus_get_node(bus));
}
static int
cpswp_probe(device_t dev)
{
if (device_get_unit(dev) > 1) {
device_printf(dev, "Only two ports are supported.\n");
return (ENXIO);
}
device_set_desc(dev, "Ethernet Switch Port");
return (BUS_PROBE_DEFAULT);
}
static int
cpswp_attach(device_t dev)
{
int error;
struct ifnet *ifp;
struct cpswp_softc *sc;
uint32_t reg;
uint8_t mac_addr[ETHER_ADDR_LEN];
phandle_t opp_table;
struct syscon *syscon;
sc = device_get_softc(dev);
sc->dev = dev;
sc->pdev = device_get_parent(dev);
sc->swsc = device_get_softc(sc->pdev);
sc->unit = device_get_unit(dev);
sc->phy = sc->swsc->port[sc->unit].phy;
sc->vlan = sc->swsc->port[sc->unit].vlan;
if (sc->swsc->dualemac && sc->vlan == -1)
sc->vlan = sc->unit + 1;
if (sc->unit == 0) {
sc->physel = MDIOUSERPHYSEL0;
sc->phyaccess = MDIOUSERACCESS0;
} else {
sc->physel = MDIOUSERPHYSEL1;
sc->phyaccess = MDIOUSERACCESS1;
}
mtx_init(&sc->lock, device_get_nameunit(dev), "cpsw port lock",
MTX_DEF);
/* Allocate network interface */
ifp = sc->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- cpswp_detach(dev);
- return (ENXIO);
- }
-
if_initname(ifp, device_get_name(sc->dev), sc->unit);
ifp->if_softc = sc;
ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN?
ifp->if_capenable = ifp->if_capabilities;
ifp->if_init = cpswp_init;
ifp->if_start = cpswp_start;
ifp->if_ioctl = cpswp_ioctl;
ifp->if_snd.ifq_drv_maxlen = sc->swsc->tx.queue_slots;
IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
IFQ_SET_READY(&ifp->if_snd);
/* FIXME: For now; Go and kidnap syscon from opp-table */
/* ti,cpsw actually have an optional syscon reference but only for am33xx?? */
opp_table = OF_finddevice("/opp-table");
if (opp_table == -1) {
device_printf(dev, "Cant find /opp-table\n");
cpswp_detach(dev);
return (ENXIO);
}
if (!OF_hasprop(opp_table, "syscon")) {
device_printf(dev, "/opp-table doesnt have required syscon property\n");
cpswp_detach(dev);
return (ENXIO);
}
if (syscon_get_by_ofw_property(dev, opp_table, "syscon", &syscon) != 0) {
device_printf(dev, "Failed to get syscon\n");
cpswp_detach(dev);
return (ENXIO);
}
/* Get high part of MAC address from control module (mac_id[0|1]_hi) */
reg = SYSCON_READ_4(syscon, SCM_MAC_ID0_HI + sc->unit * 8);
mac_addr[0] = reg & 0xFF;
mac_addr[1] = (reg >> 8) & 0xFF;
mac_addr[2] = (reg >> 16) & 0xFF;
mac_addr[3] = (reg >> 24) & 0xFF;
/* Get low part of MAC address from control module (mac_id[0|1]_lo) */
reg = SYSCON_READ_4(syscon, SCM_MAC_ID0_LO + sc->unit * 8);
mac_addr[4] = reg & 0xFF;
mac_addr[5] = (reg >> 8) & 0xFF;
error = mii_attach(dev, &sc->miibus, ifp, cpswp_ifmedia_upd,
cpswp_ifmedia_sts, BMSR_DEFCAPMASK, sc->phy, MII_OFFSET_ANY, 0);
if (error) {
device_printf(dev, "attaching PHYs failed\n");
cpswp_detach(dev);
return (error);
}
sc->mii = device_get_softc(sc->miibus);
/* Select PHY and enable interrupts */
cpsw_write_4(sc->swsc, sc->physel,
MDIO_PHYSEL_LINKINTENB | (sc->phy & 0x1F));
ether_ifattach(sc->ifp, mac_addr);
callout_init(&sc->mii_callout, 0);
return (0);
}
static int
cpswp_detach(device_t dev)
{
struct cpswp_softc *sc;
sc = device_get_softc(dev);
CPSW_DEBUGF(sc->swsc, (""));
if (device_is_attached(dev)) {
ether_ifdetach(sc->ifp);
CPSW_PORT_LOCK(sc);
cpswp_stop_locked(sc);
CPSW_PORT_UNLOCK(sc);
callout_drain(&sc->mii_callout);
}
bus_generic_detach(dev);
if_free(sc->ifp);
mtx_destroy(&sc->lock);
return (0);
}
/*
*
* Init/Shutdown.
*
*/
static int
cpsw_ports_down(struct cpsw_softc *sc)
{
struct cpswp_softc *psc;
struct ifnet *ifp1, *ifp2;
if (!sc->dualemac)
return (1);
psc = device_get_softc(sc->port[0].dev);
ifp1 = psc->ifp;
psc = device_get_softc(sc->port[1].dev);
ifp2 = psc->ifp;
if ((ifp1->if_flags & IFF_UP) == 0 && (ifp2->if_flags & IFF_UP) == 0)
return (1);
return (0);
}
static void
cpswp_init(void *arg)
{
struct cpswp_softc *sc = arg;
CPSW_DEBUGF(sc->swsc, (""));
CPSW_PORT_LOCK(sc);
cpswp_init_locked(arg);
CPSW_PORT_UNLOCK(sc);
}
static void
cpswp_init_locked(void *arg)
{
#ifdef CPSW_ETHERSWITCH
int i;
#endif
struct cpswp_softc *sc = arg;
struct ifnet *ifp;
uint32_t reg;
CPSW_DEBUGF(sc->swsc, (""));
CPSW_PORT_LOCK_ASSERT(sc);
ifp = sc->ifp;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
return;
getbinuptime(&sc->init_uptime);
if (!sc->swsc->rx.running && !sc->swsc->tx.running) {
/* Reset the controller. */
cpsw_reset(sc->swsc);
cpsw_init(sc->swsc);
}
/* Set Slave Mapping. */
cpsw_write_4(sc->swsc, CPSW_SL_RX_PRI_MAP(sc->unit), 0x76543210);
cpsw_write_4(sc->swsc, CPSW_PORT_P_TX_PRI_MAP(sc->unit + 1),
0x33221100);
cpsw_write_4(sc->swsc, CPSW_SL_RX_MAXLEN(sc->unit), 0x5f2);
/* Enable MAC RX/TX modules. */
/* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */
/* Huh? Docs call bit 0 "Loopback" some places, "FullDuplex" others. */
reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit));
reg |= CPSW_SL_MACTL_GMII_ENABLE;
cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg);
/* Initialize ALE: set port to forwarding, initialize addrs */
cpsw_write_4(sc->swsc, CPSW_ALE_PORTCTL(sc->unit + 1),
ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD);
cpswp_ale_update_addresses(sc, 1);
if (sc->swsc->dualemac) {
/* Set Port VID. */
cpsw_write_4(sc->swsc, CPSW_PORT_P_VLAN(sc->unit + 1),
sc->vlan & 0xfff);
cpsw_ale_update_vlan_table(sc->swsc, sc->vlan,
(1 << (sc->unit + 1)) | (1 << 0), /* Member list */
(1 << (sc->unit + 1)) | (1 << 0), /* Untagged egress */
(1 << (sc->unit + 1)) | (1 << 0), 0); /* mcast reg flood */
#ifdef CPSW_ETHERSWITCH
for (i = 0; i < CPSW_VLANS; i++) {
if (cpsw_vgroups[i].vid != -1)
continue;
cpsw_vgroups[i].vid = sc->vlan;
break;
}
#endif
}
mii_mediachg(sc->mii);
callout_reset(&sc->mii_callout, hz, cpswp_tick, sc);
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
}
static int
cpsw_shutdown(device_t dev)
{
struct cpsw_softc *sc;
struct cpswp_softc *psc;
int i;
sc = device_get_softc(dev);
CPSW_DEBUGF(sc, (""));
for (i = 0; i < CPSW_PORTS; i++) {
if (!sc->dualemac && i != sc->active_slave)
continue;
psc = device_get_softc(sc->port[i].dev);
CPSW_PORT_LOCK(psc);
cpswp_stop_locked(psc);
CPSW_PORT_UNLOCK(psc);
}
return (0);
}
static void
cpsw_rx_teardown(struct cpsw_softc *sc)
{
int i = 0;
CPSW_RX_LOCK(sc);
CPSW_DEBUGF(sc, ("starting RX teardown"));
sc->rx.teardown = 1;
cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0);
CPSW_RX_UNLOCK(sc);
while (sc->rx.running) {
if (++i > 10) {
device_printf(sc->dev,
"Unable to cleanly shutdown receiver\n");
return;
}
DELAY(200);
}
if (!sc->rx.running)
CPSW_DEBUGF(sc, ("finished RX teardown (%d retries)", i));
}
static void
cpsw_tx_teardown(struct cpsw_softc *sc)
{
int i = 0;
CPSW_TX_LOCK(sc);
CPSW_DEBUGF(sc, ("starting TX teardown"));
/* Start the TX queue teardown if queue is not empty. */
if (STAILQ_FIRST(&sc->tx.active) != NULL)
cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0);
else
sc->tx.teardown = 1;
cpsw_tx_dequeue(sc);
while (sc->tx.running && ++i < 10) {
DELAY(200);
cpsw_tx_dequeue(sc);
}
if (sc->tx.running) {
device_printf(sc->dev,
"Unable to cleanly shutdown transmitter\n");
}
CPSW_DEBUGF(sc,
("finished TX teardown (%d retries, %d idle buffers)", i,
sc->tx.active_queue_len));
CPSW_TX_UNLOCK(sc);
}
static void
cpswp_stop_locked(struct cpswp_softc *sc)
{
struct ifnet *ifp;
uint32_t reg;
ifp = sc->ifp;
CPSW_DEBUGF(sc->swsc, (""));
CPSW_PORT_LOCK_ASSERT(sc);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
return;
/* Disable interface */
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
/* Stop ticker */
callout_stop(&sc->mii_callout);
/* Tear down the RX/TX queues. */
if (cpsw_ports_down(sc->swsc)) {
cpsw_rx_teardown(sc->swsc);
cpsw_tx_teardown(sc->swsc);
}
/* Stop MAC RX/TX modules. */
reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit));
reg &= ~CPSW_SL_MACTL_GMII_ENABLE;
cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg);
if (cpsw_ports_down(sc->swsc)) {
/* Capture stats before we reset controller. */
cpsw_stats_collect(sc->swsc);
cpsw_reset(sc->swsc);
cpsw_init(sc->swsc);
}
}
/*
* Suspend/Resume.
*/
static int
cpsw_suspend(device_t dev)
{
struct cpsw_softc *sc;
struct cpswp_softc *psc;
int i;
sc = device_get_softc(dev);
CPSW_DEBUGF(sc, (""));
for (i = 0; i < CPSW_PORTS; i++) {
if (!sc->dualemac && i != sc->active_slave)
continue;
psc = device_get_softc(sc->port[i].dev);
CPSW_PORT_LOCK(psc);
cpswp_stop_locked(psc);
CPSW_PORT_UNLOCK(psc);
}
return (0);
}
static int
cpsw_resume(device_t dev)
{
struct cpsw_softc *sc;
sc = device_get_softc(dev);
CPSW_DEBUGF(sc, ("UNIMPLEMENTED"));
return (0);
}
/*
*
* IOCTL
*
*/
static void
cpsw_set_promisc(struct cpswp_softc *sc, int set)
{
uint32_t reg;
/*
* Enabling promiscuous mode requires ALE_BYPASS to be enabled.
* That disables the ALE forwarding logic and causes every
* packet to be sent only to the host port. In bypass mode,
* the ALE processes host port transmit packets the same as in
* normal mode.
*/
reg = cpsw_read_4(sc->swsc, CPSW_ALE_CONTROL);
reg &= ~CPSW_ALE_CTL_BYPASS;
if (set)
reg |= CPSW_ALE_CTL_BYPASS;
cpsw_write_4(sc->swsc, CPSW_ALE_CONTROL, reg);
}
static void
cpsw_set_allmulti(struct cpswp_softc *sc, int set)
{
if (set) {
printf("All-multicast mode unimplemented\n");
}
}
static int
cpswp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
{
struct cpswp_softc *sc;
struct ifreq *ifr;
int error;
uint32_t changed;
error = 0;
sc = ifp->if_softc;
ifr = (struct ifreq *)data;
switch (command) {
case SIOCSIFCAP:
changed = ifp->if_capenable ^ ifr->ifr_reqcap;
if (changed & IFCAP_HWCSUM) {
if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM)
ifp->if_capenable |= IFCAP_HWCSUM;
else
ifp->if_capenable &= ~IFCAP_HWCSUM;
}
error = 0;
break;
case SIOCSIFFLAGS:
CPSW_PORT_LOCK(sc);
if (ifp->if_flags & IFF_UP) {
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
changed = ifp->if_flags ^ sc->if_flags;
CPSW_DEBUGF(sc->swsc,
("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)",
changed));
if (changed & IFF_PROMISC)
cpsw_set_promisc(sc,
ifp->if_flags & IFF_PROMISC);
if (changed & IFF_ALLMULTI)
cpsw_set_allmulti(sc,
ifp->if_flags & IFF_ALLMULTI);
} else {
CPSW_DEBUGF(sc->swsc,
("SIOCSIFFLAGS: starting up"));
cpswp_init_locked(sc);
}
} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: shutting down"));
cpswp_stop_locked(sc);
}
sc->if_flags = ifp->if_flags;
CPSW_PORT_UNLOCK(sc);
break;
case SIOCADDMULTI:
cpswp_ale_update_addresses(sc, 0);
break;
case SIOCDELMULTI:
/* Ugh. DELMULTI doesn't provide the specific address
being removed, so the best we can do is remove
everything and rebuild it all. */
cpswp_ale_update_addresses(sc, 1);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
break;
default:
error = ether_ioctl(ifp, command, data);
}
return (error);
}
/*
*
* MIIBUS
*
*/
static int
cpswp_miibus_ready(struct cpsw_softc *sc, uint32_t reg)
{
uint32_t r, retries = CPSW_MIIBUS_RETRIES;
while (--retries) {
r = cpsw_read_4(sc, reg);
if ((r & MDIO_PHYACCESS_GO) == 0)
return (1);
DELAY(CPSW_MIIBUS_DELAY);
}
return (0);
}
static int
cpswp_miibus_readreg(device_t dev, int phy, int reg)
{
struct cpswp_softc *sc;
uint32_t cmd, r;
sc = device_get_softc(dev);
if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
device_printf(dev, "MDIO not ready to read\n");
return (0);
}
/* Set GO, reg, phy */
cmd = MDIO_PHYACCESS_GO | (reg & 0x1F) << 21 | (phy & 0x1F) << 16;
cpsw_write_4(sc->swsc, sc->phyaccess, cmd);
if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
device_printf(dev, "MDIO timed out during read\n");
return (0);
}
r = cpsw_read_4(sc->swsc, sc->phyaccess);
if ((r & MDIO_PHYACCESS_ACK) == 0) {
device_printf(dev, "Failed to read from PHY.\n");
r = 0;
}
return (r & 0xFFFF);
}
static int
cpswp_miibus_writereg(device_t dev, int phy, int reg, int value)
{
struct cpswp_softc *sc;
uint32_t cmd;
sc = device_get_softc(dev);
if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
device_printf(dev, "MDIO not ready to write\n");
return (0);
}
/* Set GO, WRITE, reg, phy, and value */
cmd = MDIO_PHYACCESS_GO | MDIO_PHYACCESS_WRITE |
(reg & 0x1F) << 21 | (phy & 0x1F) << 16 | (value & 0xFFFF);
cpsw_write_4(sc->swsc, sc->phyaccess, cmd);
if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
device_printf(dev, "MDIO timed out during write\n");
return (0);
}
return (0);
}
static void
cpswp_miibus_statchg(device_t dev)
{
struct cpswp_softc *sc;
uint32_t mac_control, reg;
sc = device_get_softc(dev);
CPSW_DEBUGF(sc->swsc, (""));
reg = CPSW_SL_MACCONTROL(sc->unit);
mac_control = cpsw_read_4(sc->swsc, reg);
mac_control &= ~(CPSW_SL_MACTL_GIG | CPSW_SL_MACTL_IFCTL_A |
CPSW_SL_MACTL_IFCTL_B | CPSW_SL_MACTL_FULLDUPLEX);
switch(IFM_SUBTYPE(sc->mii->mii_media_active)) {
case IFM_1000_SX:
case IFM_1000_LX:
case IFM_1000_CX:
case IFM_1000_T:
mac_control |= CPSW_SL_MACTL_GIG;
break;
case IFM_100_TX:
mac_control |= CPSW_SL_MACTL_IFCTL_A;
break;
}
if (sc->mii->mii_media_active & IFM_FDX)
mac_control |= CPSW_SL_MACTL_FULLDUPLEX;
cpsw_write_4(sc->swsc, reg, mac_control);
}
/*
*
* Transmit/Receive Packets.
*
*/
static void
cpsw_intr_rx(void *arg)
{
struct cpsw_softc *sc;
struct ifnet *ifp;
struct mbuf *received, *next;
sc = (struct cpsw_softc *)arg;
CPSW_RX_LOCK(sc);
if (sc->rx.teardown) {
sc->rx.running = 0;
sc->rx.teardown = 0;
cpsw_write_cp(sc, &sc->rx, 0xfffffffc);
}
received = cpsw_rx_dequeue(sc);
cpsw_rx_enqueue(sc);
cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1);
CPSW_RX_UNLOCK(sc);
while (received != NULL) {
next = received->m_nextpkt;
received->m_nextpkt = NULL;
ifp = received->m_pkthdr.rcvif;
(*ifp->if_input)(ifp, received);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
received = next;
}
}
static struct mbuf *
cpsw_rx_dequeue(struct cpsw_softc *sc)
{
int nsegs, port, removed;
struct cpsw_cpdma_bd bd;
struct cpsw_slot *last, *slot;
struct cpswp_softc *psc;
struct mbuf *m, *m0, *mb_head, *mb_tail;
uint16_t m0_flags;
nsegs = 0;
m0 = NULL;
last = NULL;
mb_head = NULL;
mb_tail = NULL;
removed = 0;
/* Pull completed packets off hardware RX queue. */
while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) {
cpsw_cpdma_read_bd(sc, slot, &bd);
/*
* Stop on packets still in use by hardware, but do not stop
* on packets with the teardown complete flag, they will be
* discarded later.
*/
if ((bd.flags & (CPDMA_BD_OWNER | CPDMA_BD_TDOWNCMPLT)) ==
CPDMA_BD_OWNER)
break;
last = slot;
++removed;
STAILQ_REMOVE_HEAD(&sc->rx.active, next);
STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next);
bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
m = slot->mbuf;
slot->mbuf = NULL;
if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
CPSW_DEBUGF(sc, ("RX teardown is complete"));
m_freem(m);
sc->rx.running = 0;
sc->rx.teardown = 0;
break;
}
port = (bd.flags & CPDMA_BD_PORT_MASK) - 1;
KASSERT(port >= 0 && port <= 1,
("patcket received with invalid port: %d", port));
psc = device_get_softc(sc->port[port].dev);
/* Set up mbuf */
m->m_data += bd.bufoff;
m->m_len = bd.buflen;
if (bd.flags & CPDMA_BD_SOP) {
m->m_pkthdr.len = bd.pktlen;
m->m_pkthdr.rcvif = psc->ifp;
m->m_flags |= M_PKTHDR;
m0_flags = bd.flags;
m0 = m;
}
nsegs++;
m->m_next = NULL;
m->m_nextpkt = NULL;
if (bd.flags & CPDMA_BD_EOP && m0 != NULL) {
if (m0_flags & CPDMA_BD_PASS_CRC)
m_adj(m0, -ETHER_CRC_LEN);
m0_flags = 0;
m0 = NULL;
if (nsegs > sc->rx.longest_chain)
sc->rx.longest_chain = nsegs;
nsegs = 0;
}
if ((psc->ifp->if_capenable & IFCAP_RXCSUM) != 0) {
/* check for valid CRC by looking into pkt_err[5:4] */
if ((bd.flags &
(CPDMA_BD_SOP | CPDMA_BD_PKT_ERR_MASK)) ==
CPDMA_BD_SOP) {
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
m->m_pkthdr.csum_data = 0xffff;
}
}
if (STAILQ_FIRST(&sc->rx.active) != NULL &&
(bd.flags & (CPDMA_BD_EOP | CPDMA_BD_EOQ)) ==
(CPDMA_BD_EOP | CPDMA_BD_EOQ)) {
cpsw_write_hdp_slot(sc, &sc->rx,
STAILQ_FIRST(&sc->rx.active));
sc->rx.queue_restart++;
}
/* Add mbuf to packet list to be returned. */
if (mb_tail != NULL && (bd.flags & CPDMA_BD_SOP)) {
mb_tail->m_nextpkt = m;
} else if (mb_tail != NULL) {
mb_tail->m_next = m;
} else if (mb_tail == NULL && (bd.flags & CPDMA_BD_SOP) == 0) {
if (bootverbose)
printf(
"%s: %s: discanding fragment packet w/o header\n",
__func__, psc->ifp->if_xname);
m_freem(m);
continue;
} else {
mb_head = m;
}
mb_tail = m;
}
if (removed != 0) {
cpsw_write_cp_slot(sc, &sc->rx, last);
sc->rx.queue_removes += removed;
sc->rx.avail_queue_len += removed;
sc->rx.active_queue_len -= removed;
if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len)
sc->rx.max_avail_queue_len = sc->rx.avail_queue_len;
CPSW_DEBUGF(sc, ("Removed %d received packet(s) from RX queue", removed));
}
return (mb_head);
}
static void
cpsw_rx_enqueue(struct cpsw_softc *sc)
{
bus_dma_segment_t seg[1];
struct cpsw_cpdma_bd bd;
struct cpsw_slot *first_new_slot, *last_old_slot, *next, *slot;
int error, nsegs, added = 0;
/* Register new mbufs with hardware. */
first_new_slot = NULL;
last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next);
while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) {
if (first_new_slot == NULL)
first_new_slot = slot;
if (slot->mbuf == NULL) {
slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (slot->mbuf == NULL) {
device_printf(sc->dev,
"Unable to fill RX queue\n");
break;
}
slot->mbuf->m_len =
slot->mbuf->m_pkthdr.len =
slot->mbuf->m_ext.ext_size;
}
error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap,
slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT);
KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs));
KASSERT(error == 0, ("DMA error (error=%d)", error));
if (error != 0 || nsegs != 1) {
device_printf(sc->dev,
"%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n",
__func__, nsegs, error);
bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
m_freem(slot->mbuf);
slot->mbuf = NULL;
break;
}
bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD);
/* Create and submit new rx descriptor. */
if ((next = STAILQ_NEXT(slot, next)) != NULL)
bd.next = cpsw_cpdma_bd_paddr(sc, next);
else
bd.next = 0;
bd.bufptr = seg->ds_addr;
bd.bufoff = 0;
bd.buflen = MCLBYTES - 1;
bd.pktlen = bd.buflen;
bd.flags = CPDMA_BD_OWNER;
cpsw_cpdma_write_bd(sc, slot, &bd);
++added;
STAILQ_REMOVE_HEAD(&sc->rx.avail, next);
STAILQ_INSERT_TAIL(&sc->rx.active, slot, next);
}
if (added == 0 || first_new_slot == NULL)
return;
CPSW_DEBUGF(sc, ("Adding %d buffers to RX queue", added));
/* Link new entries to hardware RX queue. */
if (last_old_slot == NULL) {
/* Start a fresh queue. */
cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot);
} else {
/* Add buffers to end of current queue. */
cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot);
}
sc->rx.queue_adds += added;
sc->rx.avail_queue_len -= added;
sc->rx.active_queue_len += added;
cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), added);
if (sc->rx.active_queue_len > sc->rx.max_active_queue_len)
sc->rx.max_active_queue_len = sc->rx.active_queue_len;
}
static void
cpswp_start(struct ifnet *ifp)
{
struct cpswp_softc *sc;
sc = ifp->if_softc;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
sc->swsc->tx.running == 0) {
return;
}
CPSW_TX_LOCK(sc->swsc);
cpswp_tx_enqueue(sc);
cpsw_tx_dequeue(sc->swsc);
CPSW_TX_UNLOCK(sc->swsc);
}
static void
cpsw_intr_tx(void *arg)
{
struct cpsw_softc *sc;
sc = (struct cpsw_softc *)arg;
CPSW_TX_LOCK(sc);
if (cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)) == 0xfffffffc)
cpsw_write_cp(sc, &sc->tx, 0xfffffffc);
cpsw_tx_dequeue(sc);
cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 2);
CPSW_TX_UNLOCK(sc);
}
static void
cpswp_tx_enqueue(struct cpswp_softc *sc)
{
bus_dma_segment_t segs[CPSW_TXFRAGS];
struct cpsw_cpdma_bd bd;
struct cpsw_slot *first_new_slot, *last, *last_old_slot, *next, *slot;
struct mbuf *m0;
int error, nsegs, seg, added = 0, padlen;
/* Pull pending packets from IF queue and prep them for DMA. */
last = NULL;
first_new_slot = NULL;
last_old_slot = STAILQ_LAST(&sc->swsc->tx.active, cpsw_slot, next);
while ((slot = STAILQ_FIRST(&sc->swsc->tx.avail)) != NULL) {
IF_DEQUEUE(&sc->ifp->if_snd, m0);
if (m0 == NULL)
break;
slot->mbuf = m0;
padlen = ETHER_MIN_LEN - ETHER_CRC_LEN - m0->m_pkthdr.len;
if (padlen < 0)
padlen = 0;
else if (padlen > 0)
m_append(slot->mbuf, padlen, sc->swsc->nullpad);
/* Create mapping in DMA memory */
error = bus_dmamap_load_mbuf_sg(sc->swsc->mbuf_dtag,
slot->dmamap, slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
/* If the packet is too fragmented, try to simplify. */
if (error == EFBIG ||
(error == 0 && nsegs > sc->swsc->tx.avail_queue_len)) {
bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap);
m0 = m_defrag(slot->mbuf, M_NOWAIT);
if (m0 == NULL) {
device_printf(sc->dev,
"Can't defragment packet; dropping\n");
m_freem(slot->mbuf);
} else {
CPSW_DEBUGF(sc->swsc,
("Requeueing defragmented packet"));
IF_PREPEND(&sc->ifp->if_snd, m0);
}
slot->mbuf = NULL;
continue;
}
if (error != 0) {
device_printf(sc->dev,
"%s: Can't setup DMA (error=%d), dropping packet\n",
__func__, error);
bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap);
m_freem(slot->mbuf);
slot->mbuf = NULL;
break;
}
bus_dmamap_sync(sc->swsc->mbuf_dtag, slot->dmamap,
BUS_DMASYNC_PREWRITE);
CPSW_DEBUGF(sc->swsc,
("Queueing TX packet: %d segments + %d pad bytes",
nsegs, padlen));
if (first_new_slot == NULL)
first_new_slot = slot;
/* Link from the previous descriptor. */
if (last != NULL)
cpsw_cpdma_write_bd_next(sc->swsc, last, slot);
slot->ifp = sc->ifp;
/* If there is only one segment, the for() loop
* gets skipped and the single buffer gets set up
* as both SOP and EOP. */
if (nsegs > 1) {
next = STAILQ_NEXT(slot, next);
bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next);
} else
bd.next = 0;
/* Start by setting up the first buffer. */
bd.bufptr = segs[0].ds_addr;
bd.bufoff = 0;
bd.buflen = segs[0].ds_len;
bd.pktlen = m_length(slot->mbuf, NULL);
bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER;
if (sc->swsc->dualemac) {
bd.flags |= CPDMA_BD_TO_PORT;
bd.flags |= ((sc->unit + 1) & CPDMA_BD_PORT_MASK);
}
for (seg = 1; seg < nsegs; ++seg) {
/* Save the previous buffer (which isn't EOP) */
cpsw_cpdma_write_bd(sc->swsc, slot, &bd);
STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next);
STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next);
slot = STAILQ_FIRST(&sc->swsc->tx.avail);
/* Setup next buffer (which isn't SOP) */
if (nsegs > seg + 1) {
next = STAILQ_NEXT(slot, next);
bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next);
} else
bd.next = 0;
bd.bufptr = segs[seg].ds_addr;
bd.bufoff = 0;
bd.buflen = segs[seg].ds_len;
bd.pktlen = 0;
bd.flags = CPDMA_BD_OWNER;
}
/* Save the final buffer. */
bd.flags |= CPDMA_BD_EOP;
cpsw_cpdma_write_bd(sc->swsc, slot, &bd);
STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next);
STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next);
last = slot;
added += nsegs;
if (nsegs > sc->swsc->tx.longest_chain)
sc->swsc->tx.longest_chain = nsegs;
BPF_MTAP(sc->ifp, m0);
}
if (first_new_slot == NULL)
return;
/* Attach the list of new buffers to the hardware TX queue. */
if (last_old_slot != NULL &&
(cpsw_cpdma_read_bd_flags(sc->swsc, last_old_slot) &
CPDMA_BD_EOQ) == 0) {
/* Add buffers to end of current queue. */
cpsw_cpdma_write_bd_next(sc->swsc, last_old_slot,
first_new_slot);
} else {
/* Start a fresh queue. */
cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, first_new_slot);
}
sc->swsc->tx.queue_adds += added;
sc->swsc->tx.avail_queue_len -= added;
sc->swsc->tx.active_queue_len += added;
if (sc->swsc->tx.active_queue_len > sc->swsc->tx.max_active_queue_len) {
sc->swsc->tx.max_active_queue_len = sc->swsc->tx.active_queue_len;
}
CPSW_DEBUGF(sc->swsc, ("Queued %d TX packet(s)", added));
}
static int
cpsw_tx_dequeue(struct cpsw_softc *sc)
{
struct cpsw_slot *slot, *last_removed_slot = NULL;
struct cpsw_cpdma_bd bd;
uint32_t flags, removed = 0;
/* Pull completed buffers off the hardware TX queue. */
slot = STAILQ_FIRST(&sc->tx.active);
while (slot != NULL) {
flags = cpsw_cpdma_read_bd_flags(sc, slot);
/* TearDown complete is only marked on the SOP for the packet. */
if ((flags & (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) ==
(CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) {
sc->tx.teardown = 1;
}
if ((flags & (CPDMA_BD_SOP | CPDMA_BD_OWNER)) ==
(CPDMA_BD_SOP | CPDMA_BD_OWNER) && sc->tx.teardown == 0)
break; /* Hardware is still using this packet. */
bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
m_freem(slot->mbuf);
slot->mbuf = NULL;
if (slot->ifp) {
if (sc->tx.teardown == 0)
if_inc_counter(slot->ifp, IFCOUNTER_OPACKETS, 1);
else
if_inc_counter(slot->ifp, IFCOUNTER_OQDROPS, 1);
}
/* Dequeue any additional buffers used by this packet. */
while (slot != NULL && slot->mbuf == NULL) {
STAILQ_REMOVE_HEAD(&sc->tx.active, next);
STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next);
++removed;
last_removed_slot = slot;
slot = STAILQ_FIRST(&sc->tx.active);
}
cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot);
/* Restart the TX queue if necessary. */
cpsw_cpdma_read_bd(sc, last_removed_slot, &bd);
if (slot != NULL && bd.next != 0 && (bd.flags &
(CPDMA_BD_EOP | CPDMA_BD_OWNER | CPDMA_BD_EOQ)) ==
(CPDMA_BD_EOP | CPDMA_BD_EOQ)) {
cpsw_write_hdp_slot(sc, &sc->tx, slot);
sc->tx.queue_restart++;
break;
}
}
if (removed != 0) {
sc->tx.queue_removes += removed;
sc->tx.active_queue_len -= removed;
sc->tx.avail_queue_len += removed;
if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len)
sc->tx.max_avail_queue_len = sc->tx.avail_queue_len;
CPSW_DEBUGF(sc, ("TX removed %d completed packet(s)", removed));
}
if (sc->tx.teardown && STAILQ_EMPTY(&sc->tx.active)) {
CPSW_DEBUGF(sc, ("TX teardown is complete"));
sc->tx.teardown = 0;
sc->tx.running = 0;
}
return (removed);
}
/*
*
* Miscellaneous interrupts.
*
*/
static void
cpsw_intr_rx_thresh(void *arg)
{
struct cpsw_softc *sc;
struct ifnet *ifp;
struct mbuf *received, *next;
sc = (struct cpsw_softc *)arg;
CPSW_RX_LOCK(sc);
received = cpsw_rx_dequeue(sc);
cpsw_rx_enqueue(sc);
cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0);
CPSW_RX_UNLOCK(sc);
while (received != NULL) {
next = received->m_nextpkt;
received->m_nextpkt = NULL;
ifp = received->m_pkthdr.rcvif;
(*ifp->if_input)(ifp, received);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
received = next;
}
}
static void
cpsw_intr_misc_host_error(struct cpsw_softc *sc)
{
uint32_t intstat;
uint32_t dmastat;
int txerr, rxerr, txchan, rxchan;
printf("\n\n");
device_printf(sc->dev,
"HOST ERROR: PROGRAMMING ERROR DETECTED BY HARDWARE\n");
printf("\n\n");
intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat);
dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS);
device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat);
txerr = (dmastat >> 20) & 15;
txchan = (dmastat >> 16) & 7;
rxerr = (dmastat >> 12) & 15;
rxchan = (dmastat >> 8) & 7;
switch (txerr) {
case 0: break;
case 1: printf("SOP error on TX channel %d\n", txchan);
break;
case 2: printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan);
break;
case 3: printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan);
break;
case 4: printf("Zero Buffer Pointer on TX channel %d\n", txchan);
break;
case 5: printf("Zero Buffer Length on TX channel %d\n", txchan);
break;
case 6: printf("Packet length error on TX channel %d\n", txchan);
break;
default: printf("Unknown error on TX channel %d\n", txchan);
break;
}
if (txerr != 0) {
printf("CPSW_CPDMA_TX%d_HDP=0x%x\n",
txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan)));
printf("CPSW_CPDMA_TX%d_CP=0x%x\n",
txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan)));
cpsw_dump_queue(sc, &sc->tx.active);
}
switch (rxerr) {
case 0: break;
case 2: printf("Ownership bit not set on RX channel %d\n", rxchan);
break;
case 4: printf("Zero Buffer Pointer on RX channel %d\n", rxchan);
break;
case 5: printf("Zero Buffer Length on RX channel %d\n", rxchan);
break;
case 6: printf("Buffer offset too big on RX channel %d\n", rxchan);
break;
default: printf("Unknown RX error on RX channel %d\n", rxchan);
break;
}
if (rxerr != 0) {
printf("CPSW_CPDMA_RX%d_HDP=0x%x\n",
rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan)));
printf("CPSW_CPDMA_RX%d_CP=0x%x\n",
rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan)));
cpsw_dump_queue(sc, &sc->rx.active);
}
printf("\nALE Table\n");
cpsw_ale_dump_table(sc);
// XXX do something useful here??
panic("CPSW HOST ERROR INTERRUPT");
// Suppress this interrupt in the future.
cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat);
printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n");
// The watchdog will probably reset the controller
// in a little while. It will probably fail again.
}
static void
cpsw_intr_misc(void *arg)
{
struct cpsw_softc *sc = arg;
uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0));
if (stat & CPSW_WR_C_MISC_EVNT_PEND)
CPSW_DEBUGF(sc, ("Time sync event interrupt unimplemented"));
if (stat & CPSW_WR_C_MISC_STAT_PEND)
cpsw_stats_collect(sc);
if (stat & CPSW_WR_C_MISC_HOST_PEND)
cpsw_intr_misc_host_error(sc);
if (stat & CPSW_WR_C_MISC_MDIOLINK) {
cpsw_write_4(sc, MDIOLINKINTMASKED,
cpsw_read_4(sc, MDIOLINKINTMASKED));
}
if (stat & CPSW_WR_C_MISC_MDIOUSER) {
CPSW_DEBUGF(sc,
("MDIO operation completed interrupt unimplemented"));
}
cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3);
}
/*
*
* Periodic Checks and Watchdog.
*
*/
static void
cpswp_tick(void *msc)
{
struct cpswp_softc *sc = msc;
/* Check for media type change */
mii_tick(sc->mii);
if (sc->media_status != sc->mii->mii_media.ifm_media) {
printf("%s: media type changed (ifm_media=%x)\n", __func__,
sc->mii->mii_media.ifm_media);
cpswp_ifmedia_upd(sc->ifp);
}
/* Schedule another timeout one second from now */
callout_reset(&sc->mii_callout, hz, cpswp_tick, sc);
}
static void
cpswp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct cpswp_softc *sc;
struct mii_data *mii;
sc = ifp->if_softc;
CPSW_DEBUGF(sc->swsc, (""));
CPSW_PORT_LOCK(sc);
mii = sc->mii;
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
CPSW_PORT_UNLOCK(sc);
}
static int
cpswp_ifmedia_upd(struct ifnet *ifp)
{
struct cpswp_softc *sc;
sc = ifp->if_softc;
CPSW_DEBUGF(sc->swsc, (""));
CPSW_PORT_LOCK(sc);
mii_mediachg(sc->mii);
sc->media_status = sc->mii->mii_media.ifm_media;
CPSW_PORT_UNLOCK(sc);
return (0);
}
static void
cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc)
{
struct cpswp_softc *psc;
int i;
cpsw_debugf_head("CPSW watchdog");
device_printf(sc->dev, "watchdog timeout\n");
printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 0,
cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)));
printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 0,
cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)));
cpsw_dump_queue(sc, &sc->tx.active);
for (i = 0; i < CPSW_PORTS; i++) {
if (!sc->dualemac && i != sc->active_slave)
continue;
psc = device_get_softc(sc->port[i].dev);
CPSW_PORT_LOCK(psc);
cpswp_stop_locked(psc);
CPSW_PORT_UNLOCK(psc);
}
}
static void
cpsw_tx_watchdog(void *msc)
{
struct cpsw_softc *sc;
sc = msc;
CPSW_TX_LOCK(sc);
if (sc->tx.active_queue_len == 0 || !sc->tx.running) {
sc->watchdog.timer = 0; /* Nothing to do. */
} else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) {
sc->watchdog.timer = 0; /* Stuff done while we weren't looking. */
} else if (cpsw_tx_dequeue(sc) > 0) {
sc->watchdog.timer = 0; /* We just did something. */
} else {
/* There was something to do but it didn't get done. */
++sc->watchdog.timer;
if (sc->watchdog.timer > 5) {
sc->watchdog.timer = 0;
++sc->watchdog.resets;
cpsw_tx_watchdog_full_reset(sc);
}
}
sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes;
CPSW_TX_UNLOCK(sc);
/* Schedule another timeout one second from now */
callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc);
}
/*
*
* ALE support routines.
*
*/
static void
cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
{
cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023);
ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0);
ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1);
ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2);
}
static void
cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
{
cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]);
cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]);
cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]);
cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023));
}
static void
cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc)
{
int i;
uint32_t ale_entry[3];
/* First four entries are link address and broadcast. */
for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) {
cpsw_ale_read_entry(sc, i, ale_entry);
if ((ALE_TYPE(ale_entry) == ALE_TYPE_ADDR ||
ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) &&
ALE_MCAST(ale_entry) == 1) { /* MCast link addr */
ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
cpsw_ale_write_entry(sc, i, ale_entry);
}
}
}
static int
cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, int vlan,
uint8_t *mac)
{
int free_index = -1, matching_index = -1, i;
uint32_t ale_entry[3], ale_type;
/* Find a matching entry or a free entry. */
for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) {
cpsw_ale_read_entry(sc, i, ale_entry);
/* Entry Type[61:60] is 0 for free entry */
if (free_index < 0 && ALE_TYPE(ale_entry) == 0)
free_index = i;
if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) &&
(((ale_entry[1] >> 0) & 0xFF) == mac[1]) &&
(((ale_entry[0] >>24) & 0xFF) == mac[2]) &&
(((ale_entry[0] >>16) & 0xFF) == mac[3]) &&
(((ale_entry[0] >> 8) & 0xFF) == mac[4]) &&
(((ale_entry[0] >> 0) & 0xFF) == mac[5])) {
matching_index = i;
break;
}
}
if (matching_index < 0) {
if (free_index < 0)
return (ENOMEM);
i = free_index;
}
if (vlan != -1)
ale_type = ALE_TYPE_VLAN_ADDR << 28 | vlan << 16;
else
ale_type = ALE_TYPE_ADDR << 28;
/* Set MAC address */
ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
ale_entry[1] = mac[0] << 8 | mac[1];
/* Entry type[61:60] and Mcast fwd state[63:62] is fw(3). */
ale_entry[1] |= ALE_MCAST_FWD | ale_type;
/* Set portmask [68:66] */
ale_entry[2] = (portmap & 7) << 2;
cpsw_ale_write_entry(sc, i, ale_entry);
return 0;
}
static void
cpsw_ale_dump_table(struct cpsw_softc *sc) {
int i;
uint32_t ale_entry[3];
for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
cpsw_ale_read_entry(sc, i, ale_entry);
switch (ALE_TYPE(ale_entry)) {
case ALE_TYPE_VLAN:
printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2],
ale_entry[1], ale_entry[0]);
printf("type: %u ", ALE_TYPE(ale_entry));
printf("vlan: %u ", ALE_VLAN(ale_entry));
printf("untag: %u ", ALE_VLAN_UNTAG(ale_entry));
printf("reg flood: %u ", ALE_VLAN_REGFLOOD(ale_entry));
printf("unreg flood: %u ", ALE_VLAN_UNREGFLOOD(ale_entry));
printf("members: %u ", ALE_VLAN_MEMBERS(ale_entry));
printf("\n");
break;
case ALE_TYPE_ADDR:
case ALE_TYPE_VLAN_ADDR:
printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2],
ale_entry[1], ale_entry[0]);
printf("type: %u ", ALE_TYPE(ale_entry));
printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ",
(ale_entry[1] >> 8) & 0xFF,
(ale_entry[1] >> 0) & 0xFF,
(ale_entry[0] >>24) & 0xFF,
(ale_entry[0] >>16) & 0xFF,
(ale_entry[0] >> 8) & 0xFF,
(ale_entry[0] >> 0) & 0xFF);
printf(ALE_MCAST(ale_entry) ? "mcast " : "ucast ");
if (ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR)
printf("vlan: %u ", ALE_VLAN(ale_entry));
printf("port: %u ", ALE_PORTS(ale_entry));
printf("\n");
break;
}
}
printf("\n");
}
static u_int
cpswp_set_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct cpswp_softc *sc = arg;
uint32_t portmask;
if (sc->swsc->dualemac)
portmask = 1 << (sc->unit + 1) | 1 << 0;
else
portmask = 7;
cpsw_ale_mc_entry_set(sc->swsc, portmask, sc->vlan, LLADDR(sdl));
return (1);
}
static int
cpswp_ale_update_addresses(struct cpswp_softc *sc, int purge)
{
uint8_t *mac;
uint32_t ale_entry[3], ale_type, portmask;
if (sc->swsc->dualemac) {
ale_type = ALE_TYPE_VLAN_ADDR << 28 | sc->vlan << 16;
portmask = 1 << (sc->unit + 1) | 1 << 0;
} else {
ale_type = ALE_TYPE_ADDR << 28;
portmask = 7;
}
/*
* Route incoming packets for our MAC address to Port 0 (host).
* For simplicity, keep this entry at table index 0 for port 1 and
* at index 2 for port 2 in the ALE.
*/
mac = LLADDR((struct sockaddr_dl *)sc->ifp->if_addr->ifa_addr);
ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
ale_entry[1] = ale_type | mac[0] << 8 | mac[1]; /* addr entry + mac */
ale_entry[2] = 0; /* port = 0 */
cpsw_ale_write_entry(sc->swsc, 0 + 2 * sc->unit, ale_entry);
/* Set outgoing MAC Address for slave port. */
cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_HI(sc->unit + 1),
mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]);
cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_LO(sc->unit + 1),
mac[5] << 8 | mac[4]);
/* Keep the broadcast address at table entry 1 (or 3). */
ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */
/* ALE_MCAST_FWD, Addr type, upper 16 bits of Mac */
ale_entry[1] = ALE_MCAST_FWD | ale_type | 0xffff;
ale_entry[2] = portmask << 2;
cpsw_ale_write_entry(sc->swsc, 1 + 2 * sc->unit, ale_entry);
/* SIOCDELMULTI doesn't specify the particular address
being removed, so we have to remove all and rebuild. */
if (purge)
cpsw_ale_remove_all_mc_entries(sc->swsc);
/* Set other multicast addrs desired. */
if_foreach_llmaddr(sc->ifp, cpswp_set_maddr, sc);
return (0);
}
static int
cpsw_ale_update_vlan_table(struct cpsw_softc *sc, int vlan, int ports,
int untag, int mcregflood, int mcunregflood)
{
int free_index, i, matching_index;
uint32_t ale_entry[3];
free_index = matching_index = -1;
/* Find a matching entry or a free entry. */
for (i = 5; i < CPSW_MAX_ALE_ENTRIES; i++) {
cpsw_ale_read_entry(sc, i, ale_entry);
/* Entry Type[61:60] is 0 for free entry */
if (free_index < 0 && ALE_TYPE(ale_entry) == 0)
free_index = i;
if (ALE_VLAN(ale_entry) == vlan) {
matching_index = i;
break;
}
}
if (matching_index < 0) {
if (free_index < 0)
return (-1);
i = free_index;
}
ale_entry[0] = (untag & 7) << 24 | (mcregflood & 7) << 16 |
(mcunregflood & 7) << 8 | (ports & 7);
ale_entry[1] = ALE_TYPE_VLAN << 28 | vlan << 16;
ale_entry[2] = 0;
cpsw_ale_write_entry(sc, i, ale_entry);
return (0);
}
/*
*
* Statistics and Sysctls.
*
*/
#if 0
static void
cpsw_stats_dump(struct cpsw_softc *sc)
{
int i;
uint32_t r;
for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
cpsw_stat_sysctls[i].reg);
CPSW_DEBUGF(sc, ("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid,
(intmax_t)sc->shadow_stats[i], r,
(intmax_t)sc->shadow_stats[i] + r));
}
}
#endif
static void
cpsw_stats_collect(struct cpsw_softc *sc)
{
int i;
uint32_t r;
CPSW_DEBUGF(sc, ("Controller shadow statistics updated."));
for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
cpsw_stat_sysctls[i].reg);
sc->shadow_stats[i] += r;
cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg,
r);
}
}
static int
cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS)
{
struct cpsw_softc *sc;
struct cpsw_stat *stat;
uint64_t result;
sc = (struct cpsw_softc *)arg1;
stat = &cpsw_stat_sysctls[oidp->oid_number];
result = sc->shadow_stats[oidp->oid_number];
result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg);
return (sysctl_handle_64(oidp, &result, 0, req));
}
static int
cpsw_stat_attached(SYSCTL_HANDLER_ARGS)
{
struct cpsw_softc *sc;
struct bintime t;
unsigned result;
sc = (struct cpsw_softc *)arg1;
getbinuptime(&t);
bintime_sub(&t, &sc->attach_uptime);
result = t.sec;
return (sysctl_handle_int(oidp, &result, 0, req));
}
static int
cpsw_intr_coalesce(SYSCTL_HANDLER_ARGS)
{
int error;
struct cpsw_softc *sc;
uint32_t ctrl, intr_per_ms;
sc = (struct cpsw_softc *)arg1;
error = sysctl_handle_int(oidp, &sc->coal_us, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
ctrl = cpsw_read_4(sc, CPSW_WR_INT_CONTROL);
ctrl &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK);
if (sc->coal_us == 0) {
/* Disable the interrupt pace hardware. */
cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl);
cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), 0);
cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), 0);
return (0);
}
if (sc->coal_us > CPSW_WR_C_IMAX_US_MAX)
sc->coal_us = CPSW_WR_C_IMAX_US_MAX;
if (sc->coal_us < CPSW_WR_C_IMAX_US_MIN)
sc->coal_us = CPSW_WR_C_IMAX_US_MIN;
intr_per_ms = 1000 / sc->coal_us;
/* Just to make sure... */
if (intr_per_ms > CPSW_WR_C_IMAX_MAX)
intr_per_ms = CPSW_WR_C_IMAX_MAX;
if (intr_per_ms < CPSW_WR_C_IMAX_MIN)
intr_per_ms = CPSW_WR_C_IMAX_MIN;
/* Set the prescale to produce 4us pulses from the 125 Mhz clock. */
ctrl |= (125 * 4) & CPSW_WR_INT_PRESCALE_MASK;
/* Enable the interrupt pace hardware. */
cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), intr_per_ms);
cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), intr_per_ms);
ctrl |= CPSW_WR_INT_C0_RX_PULSE | CPSW_WR_INT_C0_TX_PULSE;
cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl);
return (0);
}
static int
cpsw_stat_uptime(SYSCTL_HANDLER_ARGS)
{
struct cpsw_softc *swsc;
struct cpswp_softc *sc;
struct bintime t;
unsigned result;
swsc = arg1;
sc = device_get_softc(swsc->port[arg2].dev);
if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) {
getbinuptime(&t);
bintime_sub(&t, &sc->init_uptime);
result = t.sec;
} else
result = 0;
return (sysctl_handle_int(oidp, &result, 0, req));
}
static void
cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node,
struct cpsw_queue *queue)
{
struct sysctl_oid_list *parent;
parent = SYSCTL_CHILDREN(node);
SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers",
CTLFLAG_RD, &queue->queue_slots, 0,
"Total buffers currently assigned to this queue");
SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers",
CTLFLAG_RD, &queue->active_queue_len, 0,
"Buffers currently registered with hardware controller");
SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers",
CTLFLAG_RD, &queue->max_active_queue_len, 0,
"Max value of activeBuffers since last driver reset");
SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers",
CTLFLAG_RD, &queue->avail_queue_len, 0,
"Buffers allocated to this queue but not currently "
"registered with hardware controller");
SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers",
CTLFLAG_RD, &queue->max_avail_queue_len, 0,
"Max value of availBuffers since last driver reset");
SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued",
CTLFLAG_RD, &queue->queue_adds, 0,
"Total buffers added to queue");
SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued",
CTLFLAG_RD, &queue->queue_removes, 0,
"Total buffers removed from queue");
SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "queueRestart",
CTLFLAG_RD, &queue->queue_restart, 0,
"Total times the queue has been restarted");
SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain",
CTLFLAG_RD, &queue->longest_chain, 0,
"Max buffers used for a single packet");
}
static void
cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node,
struct cpsw_softc *sc)
{
struct sysctl_oid_list *parent;
parent = SYSCTL_CHILDREN(node);
SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets",
CTLFLAG_RD, &sc->watchdog.resets, 0,
"Total number of watchdog resets");
}
static void
cpsw_add_sysctls(struct cpsw_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid *stats_node, *queue_node, *node;
struct sysctl_oid_list *parent, *stats_parent, *queue_parent;
struct sysctl_oid_list *ports_parent, *port_parent;
char port[16];
int i;
ctx = device_get_sysctl_ctx(sc->dev);
parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "debug",
CTLFLAG_RW, &sc->debug, 0, "Enable switch debug messages");
SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs",
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
sc, 0, cpsw_stat_attached, "IU",
"Time since driver attach");
SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "intr_coalesce_us",
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
sc, 0, cpsw_intr_coalesce, "IU",
"minimum time between interrupts");
node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "ports",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Ports Statistics");
ports_parent = SYSCTL_CHILDREN(node);
for (i = 0; i < CPSW_PORTS; i++) {
if (!sc->dualemac && i != sc->active_slave)
continue;
port[0] = '0' + i;
port[1] = '\0';
node = SYSCTL_ADD_NODE(ctx, ports_parent, OID_AUTO,
port, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
"CPSW Port Statistics");
port_parent = SYSCTL_CHILDREN(node);
SYSCTL_ADD_PROC(ctx, port_parent, OID_AUTO, "uptime",
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, i,
cpsw_stat_uptime, "IU", "Seconds since driver init");
}
stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Statistics");
stats_parent = SYSCTL_CHILDREN(stats_node);
for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
SYSCTL_ADD_PROC(ctx, stats_parent, i,
cpsw_stat_sysctls[i].oid,
CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
sc, 0, cpsw_stats_sysctl, "IU",
cpsw_stat_sysctls[i].oid);
}
queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Queue Statistics");
queue_parent = SYSCTL_CHILDREN(queue_node);
node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Statistics");
cpsw_add_queue_sysctls(ctx, node, &sc->tx);
node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Statistics");
cpsw_add_queue_sysctls(ctx, node, &sc->rx);
node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Watchdog Statistics");
cpsw_add_watchdog_sysctls(ctx, node, sc);
}
#ifdef CPSW_ETHERSWITCH
static etherswitch_info_t etherswitch_info = {
.es_nports = CPSW_PORTS + 1,
.es_nvlangroups = CPSW_VLANS,
.es_name = "TI Common Platform Ethernet Switch (CPSW)",
.es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q,
};
static etherswitch_info_t *
cpsw_getinfo(device_t dev)
{
return (ðerswitch_info);
}
static int
cpsw_getport(device_t dev, etherswitch_port_t *p)
{
int err;
struct cpsw_softc *sc;
struct cpswp_softc *psc;
struct ifmediareq *ifmr;
uint32_t reg;
if (p->es_port < 0 || p->es_port > CPSW_PORTS)
return (ENXIO);
err = 0;
sc = device_get_softc(dev);
if (p->es_port == CPSW_CPU_PORT) {
p->es_flags |= ETHERSWITCH_PORT_CPU;
ifmr = &p->es_ifmr;
ifmr->ifm_current = ifmr->ifm_active =
IFM_ETHER | IFM_1000_T | IFM_FDX;
ifmr->ifm_mask = 0;
ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
ifmr->ifm_count = 0;
} else {
psc = device_get_softc(sc->port[p->es_port - 1].dev);
err = ifmedia_ioctl(psc->ifp, &p->es_ifr,
&psc->mii->mii_media, SIOCGIFMEDIA);
}
reg = cpsw_read_4(sc, CPSW_PORT_P_VLAN(p->es_port));
p->es_pvid = reg & ETHERSWITCH_VID_MASK;
reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port));
if (reg & ALE_PORTCTL_DROP_UNTAGGED)
p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED;
if (reg & ALE_PORTCTL_INGRESS)
p->es_flags |= ETHERSWITCH_PORT_INGRESS;
return (err);
}
static int
cpsw_setport(device_t dev, etherswitch_port_t *p)
{
struct cpsw_softc *sc;
struct cpswp_softc *psc;
struct ifmedia *ifm;
uint32_t reg;
if (p->es_port < 0 || p->es_port > CPSW_PORTS)
return (ENXIO);
sc = device_get_softc(dev);
if (p->es_pvid != 0) {
cpsw_write_4(sc, CPSW_PORT_P_VLAN(p->es_port),
p->es_pvid & ETHERSWITCH_VID_MASK);
}
reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port));
if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED)
reg |= ALE_PORTCTL_DROP_UNTAGGED;
else
reg &= ~ALE_PORTCTL_DROP_UNTAGGED;
if (p->es_flags & ETHERSWITCH_PORT_INGRESS)
reg |= ALE_PORTCTL_INGRESS;
else
reg &= ~ALE_PORTCTL_INGRESS;
cpsw_write_4(sc, CPSW_ALE_PORTCTL(p->es_port), reg);
/* CPU port does not allow media settings. */
if (p->es_port == CPSW_CPU_PORT)
return (0);
psc = device_get_softc(sc->port[p->es_port - 1].dev);
ifm = &psc->mii->mii_media;
return (ifmedia_ioctl(psc->ifp, &p->es_ifr, ifm, SIOCSIFMEDIA));
}
static int
cpsw_getconf(device_t dev, etherswitch_conf_t *conf)
{
/* Return the VLAN mode. */
conf->cmd = ETHERSWITCH_CONF_VLAN_MODE;
conf->vlan_mode = ETHERSWITCH_VLAN_DOT1Q;
return (0);
}
static int
cpsw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg)
{
int i, vid;
uint32_t ale_entry[3];
struct cpsw_softc *sc;
sc = device_get_softc(dev);
if (vg->es_vlangroup >= CPSW_VLANS)
return (EINVAL);
vg->es_vid = 0;
vid = cpsw_vgroups[vg->es_vlangroup].vid;
if (vid == -1)
return (0);
for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
cpsw_ale_read_entry(sc, i, ale_entry);
if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN)
continue;
if (vid != ALE_VLAN(ale_entry))
continue;
vg->es_fid = 0;
vg->es_vid = ALE_VLAN(ale_entry) | ETHERSWITCH_VID_VALID;
vg->es_member_ports = ALE_VLAN_MEMBERS(ale_entry);
vg->es_untagged_ports = ALE_VLAN_UNTAG(ale_entry);
}
return (0);
}
static void
cpsw_remove_vlan(struct cpsw_softc *sc, int vlan)
{
int i;
uint32_t ale_entry[3];
for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
cpsw_ale_read_entry(sc, i, ale_entry);
if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN)
continue;
if (vlan != ALE_VLAN(ale_entry))
continue;
ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
cpsw_ale_write_entry(sc, i, ale_entry);
break;
}
}
static int
cpsw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg)
{
int i;
struct cpsw_softc *sc;
sc = device_get_softc(dev);
for (i = 0; i < CPSW_VLANS; i++) {
/* Is this Vlan ID in use by another vlangroup ? */
if (vg->es_vlangroup != i && cpsw_vgroups[i].vid == vg->es_vid)
return (EINVAL);
}
if (vg->es_vid == 0) {
if (cpsw_vgroups[vg->es_vlangroup].vid == -1)
return (0);
cpsw_remove_vlan(sc, cpsw_vgroups[vg->es_vlangroup].vid);
cpsw_vgroups[vg->es_vlangroup].vid = -1;
vg->es_untagged_ports = 0;
vg->es_member_ports = 0;
vg->es_vid = 0;
return (0);
}
vg->es_vid &= ETHERSWITCH_VID_MASK;
vg->es_member_ports &= CPSW_PORTS_MASK;
vg->es_untagged_ports &= CPSW_PORTS_MASK;
if (cpsw_vgroups[vg->es_vlangroup].vid != -1 &&
cpsw_vgroups[vg->es_vlangroup].vid != vg->es_vid)
return (EINVAL);
cpsw_vgroups[vg->es_vlangroup].vid = vg->es_vid;
cpsw_ale_update_vlan_table(sc, vg->es_vid, vg->es_member_ports,
vg->es_untagged_ports, vg->es_member_ports, 0);
return (0);
}
static int
cpsw_readreg(device_t dev, int addr)
{
/* Not supported. */
return (0);
}
static int
cpsw_writereg(device_t dev, int addr, int value)
{
/* Not supported. */
return (0);
}
static int
cpsw_readphy(device_t dev, int phy, int reg)
{
/* Not supported. */
return (0);
}
static int
cpsw_writephy(device_t dev, int phy, int reg, int data)
{
/* Not supported. */
return (0);
}
#endif
diff --git a/sys/dev/ae/if_ae.c b/sys/dev/ae/if_ae.c
index f8e83dc3c563..a85100daa8f1 100644
--- a/sys/dev/ae/if_ae.c
+++ b/sys/dev/ae/if_ae.c
@@ -1,2251 +1,2245 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Driver for Attansic Technology Corp. L2 FastEthernet adapter.
*
* This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon.
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/rman.h>
#include <sys/module.h>
#include <sys/queue.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <machine/bus.h>
#include "miibus_if.h"
#include "if_aereg.h"
#include "if_aevar.h"
/*
* Devices supported by this driver.
*/
static struct ae_dev {
uint16_t vendorid;
uint16_t deviceid;
const char *name;
} ae_devs[] = {
{ VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2,
"Attansic Technology Corp, L2 FastEthernet" },
};
#define AE_DEVS_COUNT nitems(ae_devs)
static struct resource_spec ae_res_spec_mem[] = {
{ SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
{ -1, 0, 0 }
};
static struct resource_spec ae_res_spec_irq[] = {
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0, 0 }
};
static struct resource_spec ae_res_spec_msi[] = {
{ SYS_RES_IRQ, 1, RF_ACTIVE },
{ -1, 0, 0 }
};
static int ae_probe(device_t dev);
static int ae_attach(device_t dev);
static void ae_pcie_init(ae_softc_t *sc);
static void ae_phy_reset(ae_softc_t *sc);
static void ae_phy_init(ae_softc_t *sc);
static int ae_reset(ae_softc_t *sc);
static void ae_init(void *arg);
static int ae_init_locked(ae_softc_t *sc);
static int ae_detach(device_t dev);
static int ae_miibus_readreg(device_t dev, int phy, int reg);
static int ae_miibus_writereg(device_t dev, int phy, int reg, int val);
static void ae_miibus_statchg(device_t dev);
static void ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr);
static int ae_mediachange(struct ifnet *ifp);
static void ae_retrieve_address(ae_softc_t *sc);
static void ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs,
int error);
static int ae_alloc_rings(ae_softc_t *sc);
static void ae_dma_free(ae_softc_t *sc);
static int ae_shutdown(device_t dev);
static int ae_suspend(device_t dev);
static void ae_powersave_disable(ae_softc_t *sc);
static void ae_powersave_enable(ae_softc_t *sc);
static int ae_resume(device_t dev);
static unsigned int ae_tx_avail_size(ae_softc_t *sc);
static int ae_encap(ae_softc_t *sc, struct mbuf **m_head);
static void ae_start(struct ifnet *ifp);
static void ae_start_locked(struct ifnet *ifp);
static void ae_link_task(void *arg, int pending);
static void ae_stop_rxmac(ae_softc_t *sc);
static void ae_stop_txmac(ae_softc_t *sc);
static void ae_mac_config(ae_softc_t *sc);
static int ae_intr(void *arg);
static void ae_int_task(void *arg, int pending);
static void ae_tx_intr(ae_softc_t *sc);
static void ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd);
static void ae_rx_intr(ae_softc_t *sc);
static void ae_watchdog(ae_softc_t *sc);
static void ae_tick(void *arg);
static void ae_rxfilter(ae_softc_t *sc);
static void ae_rxvlan(ae_softc_t *sc);
static int ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
static void ae_stop(ae_softc_t *sc);
static int ae_check_eeprom_present(ae_softc_t *sc, int *vpdc);
static int ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word);
static int ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr);
static int ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr);
static void ae_update_stats_rx(uint16_t flags, ae_stats_t *stats);
static void ae_update_stats_tx(uint16_t flags, ae_stats_t *stats);
static void ae_init_tunables(ae_softc_t *sc);
static device_method_t ae_methods[] = {
/* Device interface. */
DEVMETHOD(device_probe, ae_probe),
DEVMETHOD(device_attach, ae_attach),
DEVMETHOD(device_detach, ae_detach),
DEVMETHOD(device_shutdown, ae_shutdown),
DEVMETHOD(device_suspend, ae_suspend),
DEVMETHOD(device_resume, ae_resume),
/* MII interface. */
DEVMETHOD(miibus_readreg, ae_miibus_readreg),
DEVMETHOD(miibus_writereg, ae_miibus_writereg),
DEVMETHOD(miibus_statchg, ae_miibus_statchg),
{ NULL, NULL }
};
static driver_t ae_driver = {
"ae",
ae_methods,
sizeof(ae_softc_t)
};
static devclass_t ae_devclass;
DRIVER_MODULE(ae, pci, ae_driver, ae_devclass, 0, 0);
MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, ae, ae_devs,
nitems(ae_devs));
DRIVER_MODULE(miibus, ae, miibus_driver, miibus_devclass, 0, 0);
MODULE_DEPEND(ae, pci, 1, 1, 1);
MODULE_DEPEND(ae, ether, 1, 1, 1);
MODULE_DEPEND(ae, miibus, 1, 1, 1);
/*
* Tunables.
*/
static int msi_disable = 0;
TUNABLE_INT("hw.ae.msi_disable", &msi_disable);
#define AE_READ_4(sc, reg) \
bus_read_4((sc)->mem[0], (reg))
#define AE_READ_2(sc, reg) \
bus_read_2((sc)->mem[0], (reg))
#define AE_READ_1(sc, reg) \
bus_read_1((sc)->mem[0], (reg))
#define AE_WRITE_4(sc, reg, val) \
bus_write_4((sc)->mem[0], (reg), (val))
#define AE_WRITE_2(sc, reg, val) \
bus_write_2((sc)->mem[0], (reg), (val))
#define AE_WRITE_1(sc, reg, val) \
bus_write_1((sc)->mem[0], (reg), (val))
#define AE_PHY_READ(sc, reg) \
ae_miibus_readreg(sc->dev, 0, reg)
#define AE_PHY_WRITE(sc, reg, val) \
ae_miibus_writereg(sc->dev, 0, reg, val)
#define AE_CHECK_EADDR_VALID(eaddr) \
((eaddr[0] == 0 && eaddr[1] == 0) || \
(eaddr[0] == 0xffffffff && eaddr[1] == 0xffff))
#define AE_RXD_VLAN(vtag) \
(((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9))
#define AE_TXD_VLAN(vtag) \
(((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08))
static int
ae_probe(device_t dev)
{
uint16_t deviceid, vendorid;
int i;
vendorid = pci_get_vendor(dev);
deviceid = pci_get_device(dev);
/*
* Search through the list of supported devs for matching one.
*/
for (i = 0; i < AE_DEVS_COUNT; i++) {
if (vendorid == ae_devs[i].vendorid &&
deviceid == ae_devs[i].deviceid) {
device_set_desc(dev, ae_devs[i].name);
return (BUS_PROBE_DEFAULT);
}
}
return (ENXIO);
}
static int
ae_attach(device_t dev)
{
ae_softc_t *sc;
struct ifnet *ifp;
uint8_t chiprev;
uint32_t pcirev;
int nmsi, pmc;
int error;
sc = device_get_softc(dev); /* Automatically allocated and zeroed
on attach. */
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
sc->dev = dev;
/*
* Initialize mutexes and tasks.
*/
mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
callout_init_mtx(&sc->tick_ch, &sc->mtx, 0);
TASK_INIT(&sc->int_task, 0, ae_int_task, sc);
TASK_INIT(&sc->link_task, 0, ae_link_task, sc);
pci_enable_busmaster(dev); /* Enable bus mastering. */
sc->spec_mem = ae_res_spec_mem;
/*
* Allocate memory-mapped registers.
*/
error = bus_alloc_resources(dev, sc->spec_mem, sc->mem);
if (error != 0) {
device_printf(dev, "could not allocate memory resources.\n");
sc->spec_mem = NULL;
goto fail;
}
/*
* Retrieve PCI and chip revisions.
*/
pcirev = pci_get_revid(dev);
chiprev = (AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) &
AE_MASTER_REVNUM_MASK;
if (bootverbose) {
device_printf(dev, "pci device revision: %#04x\n", pcirev);
device_printf(dev, "chip id: %#02x\n", chiprev);
}
nmsi = pci_msi_count(dev);
if (bootverbose)
device_printf(dev, "MSI count: %d.\n", nmsi);
/*
* Allocate interrupt resources.
*/
if (msi_disable == 0 && nmsi == 1) {
error = pci_alloc_msi(dev, &nmsi);
if (error == 0) {
device_printf(dev, "Using MSI messages.\n");
sc->spec_irq = ae_res_spec_msi;
error = bus_alloc_resources(dev, sc->spec_irq, sc->irq);
if (error != 0) {
device_printf(dev, "MSI allocation failed.\n");
sc->spec_irq = NULL;
pci_release_msi(dev);
} else {
sc->flags |= AE_FLAG_MSI;
}
}
}
if (sc->spec_irq == NULL) {
sc->spec_irq = ae_res_spec_irq;
error = bus_alloc_resources(dev, sc->spec_irq, sc->irq);
if (error != 0) {
device_printf(dev, "could not allocate IRQ resources.\n");
sc->spec_irq = NULL;
goto fail;
}
}
ae_init_tunables(sc);
ae_phy_reset(sc); /* Reset PHY. */
error = ae_reset(sc); /* Reset the controller itself. */
if (error != 0)
goto fail;
ae_pcie_init(sc);
ae_retrieve_address(sc); /* Load MAC address. */
error = ae_alloc_rings(sc); /* Allocate ring buffers. */
if (error != 0)
goto fail;
ifp = sc->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "could not allocate ifnet structure.\n");
- error = ENXIO;
- goto fail;
- }
-
ifp->if_softc = sc;
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = ae_ioctl;
ifp->if_start = ae_start;
ifp->if_init = ae_init;
ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
ifp->if_hwassist = 0;
ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
IFQ_SET_READY(&ifp->if_snd);
if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
ifp->if_capabilities |= IFCAP_WOL_MAGIC;
sc->flags |= AE_FLAG_PMG;
}
ifp->if_capenable = ifp->if_capabilities;
/*
* Configure and attach MII bus.
*/
error = mii_attach(dev, &sc->miibus, ifp, ae_mediachange,
ae_mediastatus, BMSR_DEFCAPMASK, AE_PHYADDR_DEFAULT,
MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
ether_ifattach(ifp, sc->eaddr);
/* Tell the upper layer(s) we support long frames. */
ifp->if_hdrlen = sizeof(struct ether_vlan_header);
/*
* Create and run all helper tasks.
*/
sc->tq = taskqueue_create_fast("ae_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->tq);
taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->dev));
/*
* Configure interrupt handlers.
*/
error = bus_setup_intr(dev, sc->irq[0], INTR_TYPE_NET | INTR_MPSAFE,
ae_intr, NULL, sc, &sc->intrhand);
if (error != 0) {
device_printf(dev, "could not set up interrupt handler.\n");
taskqueue_free(sc->tq);
sc->tq = NULL;
ether_ifdetach(ifp);
goto fail;
}
fail:
if (error != 0)
ae_detach(dev);
return (error);
}
#define AE_SYSCTL(stx, parent, name, desc, ptr) \
SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, name, CTLFLAG_RD, ptr, 0, desc)
static void
ae_init_tunables(ae_softc_t *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid *root, *stats, *stats_rx, *stats_tx;
struct ae_stats *ae_stats;
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
ae_stats = &sc->stats;
ctx = device_get_sysctl_ctx(sc->dev);
root = device_get_sysctl_tree(sc->dev);
stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ae statistics");
/*
* Receiver statistcics.
*/
stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "bcast",
"broadcast frames", &ae_stats->rx_bcast);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "mcast",
"multicast frames", &ae_stats->rx_mcast);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "pause",
"PAUSE frames", &ae_stats->rx_pause);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "control",
"control frames", &ae_stats->rx_ctrl);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "crc_errors",
"frames with CRC errors", &ae_stats->rx_crcerr);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "code_errors",
"frames with invalid opcode", &ae_stats->rx_codeerr);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "runt",
"runt frames", &ae_stats->rx_runt);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "frag",
"fragmented frames", &ae_stats->rx_frag);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "align_errors",
"frames with alignment errors", &ae_stats->rx_align);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "truncated",
"frames truncated due to Rx FIFO inderrun", &ae_stats->rx_trunc);
/*
* Receiver statistcics.
*/
stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "bcast",
"broadcast frames", &ae_stats->tx_bcast);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "mcast",
"multicast frames", &ae_stats->tx_mcast);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "pause",
"PAUSE frames", &ae_stats->tx_pause);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "control",
"control frames", &ae_stats->tx_ctrl);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "defers",
"deferrals occuried", &ae_stats->tx_defer);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "exc_defers",
"excessive deferrals occuried", &ae_stats->tx_excdefer);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "singlecols",
"single collisions occuried", &ae_stats->tx_singlecol);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "multicols",
"multiple collisions occuried", &ae_stats->tx_multicol);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "latecols",
"late collisions occuried", &ae_stats->tx_latecol);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "aborts",
"transmit aborts due collisions", &ae_stats->tx_abortcol);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "underruns",
"Tx FIFO underruns", &ae_stats->tx_underrun);
}
static void
ae_pcie_init(ae_softc_t *sc)
{
AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG, AE_PCIE_LTSSM_TESTMODE_DEFAULT);
AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, AE_PCIE_DLL_TX_CTRL_DEFAULT);
}
static void
ae_phy_reset(ae_softc_t *sc)
{
AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE);
DELAY(1000); /* XXX: pause(9) ? */
}
static int
ae_reset(ae_softc_t *sc)
{
int i;
/*
* Issue a soft reset.
*/
AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET);
bus_barrier(sc->mem[0], AE_MASTER_REG, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
/*
* Wait for reset to complete.
*/
for (i = 0; i < AE_RESET_TIMEOUT; i++) {
if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0)
break;
DELAY(10);
}
if (i == AE_RESET_TIMEOUT) {
device_printf(sc->dev, "reset timeout.\n");
return (ENXIO);
}
/*
* Wait for everything to enter idle state.
*/
for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
if (AE_READ_4(sc, AE_IDLE_REG) == 0)
break;
DELAY(100);
}
if (i == AE_IDLE_TIMEOUT) {
device_printf(sc->dev, "could not enter idle state.\n");
return (ENXIO);
}
return (0);
}
static void
ae_init(void *arg)
{
ae_softc_t *sc;
sc = (ae_softc_t *)arg;
AE_LOCK(sc);
ae_init_locked(sc);
AE_UNLOCK(sc);
}
static void
ae_phy_init(ae_softc_t *sc)
{
/*
* Enable link status change interrupt.
* XXX magic numbers.
*/
#ifdef notyet
AE_PHY_WRITE(sc, 18, 0xc00);
#endif
}
static int
ae_init_locked(ae_softc_t *sc)
{
struct ifnet *ifp;
struct mii_data *mii;
uint8_t eaddr[ETHER_ADDR_LEN];
uint32_t val;
bus_addr_t addr;
AE_LOCK_ASSERT(sc);
ifp = sc->ifp;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
return (0);
mii = device_get_softc(sc->miibus);
ae_stop(sc);
ae_reset(sc);
ae_pcie_init(sc); /* Initialize PCIE stuff. */
ae_phy_init(sc);
ae_powersave_disable(sc);
/*
* Clear and disable interrupts.
*/
AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
/*
* Set the MAC address.
*/
bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5];
AE_WRITE_4(sc, AE_EADDR0_REG, val);
val = eaddr[0] << 8 | eaddr[1];
AE_WRITE_4(sc, AE_EADDR1_REG, val);
bzero(sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING);
bzero(sc->txd_base, AE_TXD_BUFSIZE_DEFAULT);
bzero(sc->txs_base, AE_TXS_COUNT_DEFAULT * 4);
/*
* Set ring buffers base addresses.
*/
addr = sc->dma_rxd_busaddr;
AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr));
AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
addr = sc->dma_txd_busaddr;
AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
addr = sc->dma_txs_busaddr;
AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr));
/*
* Configure ring buffers sizes.
*/
AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT);
AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4);
AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT);
/*
* Configure interframe gap parameters.
*/
val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) &
AE_IFG_TXIPG_MASK) |
((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) &
AE_IFG_RXIPG_MASK) |
((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) &
AE_IFG_IPGR1_MASK) |
((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) &
AE_IFG_IPGR2_MASK);
AE_WRITE_4(sc, AE_IFG_REG, val);
/*
* Configure half-duplex operation.
*/
val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) &
AE_HDPX_LCOL_MASK) |
((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) &
AE_HDPX_RETRY_MASK) |
((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) &
AE_HDPX_ABEBT_MASK) |
((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) &
AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN;
AE_WRITE_4(sc, AE_HDPX_REG, val);
/*
* Configure interrupt moderate timer.
*/
AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT);
val = AE_READ_4(sc, AE_MASTER_REG);
val |= AE_MASTER_IMT_EN;
AE_WRITE_4(sc, AE_MASTER_REG, val);
/*
* Configure interrupt clearing timer.
*/
AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT);
/*
* Configure MTU.
*/
val = ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
ETHER_CRC_LEN;
AE_WRITE_2(sc, AE_MTU_REG, val);
/*
* Configure cut-through threshold.
*/
AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT);
/*
* Configure flow control.
*/
AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7);
AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) >
(AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) :
(AE_RXD_COUNT_DEFAULT / 12));
/*
* Init mailboxes.
*/
sc->txd_cur = sc->rxd_cur = 0;
sc->txs_ack = sc->txd_ack = 0;
sc->rxd_cur = 0;
AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur);
AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
sc->tx_inproc = 0; /* Number of packets the chip processes now. */
sc->flags |= AE_FLAG_TXAVAIL; /* Free Tx's available. */
/*
* Enable DMA.
*/
AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
/*
* Check if everything is OK.
*/
val = AE_READ_4(sc, AE_ISR_REG);
if ((val & AE_ISR_PHY_LINKDOWN) != 0) {
device_printf(sc->dev, "Initialization failed.\n");
return (ENXIO);
}
/*
* Clear interrupt status.
*/
AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff);
AE_WRITE_4(sc, AE_ISR_REG, 0x0);
/*
* Enable interrupts.
*/
val = AE_READ_4(sc, AE_MASTER_REG);
AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT);
AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT);
/*
* Disable WOL.
*/
AE_WRITE_4(sc, AE_WOL_REG, 0);
/*
* Configure MAC.
*/
val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD |
AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY |
AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN |
((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) |
((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) &
AE_MAC_PREAMBLE_MASK);
AE_WRITE_4(sc, AE_MAC_REG, val);
/*
* Configure Rx MAC.
*/
ae_rxfilter(sc);
ae_rxvlan(sc);
/*
* Enable Tx/Rx.
*/
val = AE_READ_4(sc, AE_MAC_REG);
AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN);
sc->flags &= ~AE_FLAG_LINK;
mii_mediachg(mii); /* Switch to the current media. */
callout_reset(&sc->tick_ch, hz, ae_tick, sc);
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
#ifdef AE_DEBUG
device_printf(sc->dev, "Initialization complete.\n");
#endif
return (0);
}
static int
ae_detach(device_t dev)
{
struct ae_softc *sc;
struct ifnet *ifp;
sc = device_get_softc(dev);
KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__));
ifp = sc->ifp;
if (device_is_attached(dev)) {
AE_LOCK(sc);
sc->flags |= AE_FLAG_DETACH;
ae_stop(sc);
AE_UNLOCK(sc);
callout_drain(&sc->tick_ch);
taskqueue_drain(sc->tq, &sc->int_task);
taskqueue_drain(taskqueue_swi, &sc->link_task);
ether_ifdetach(ifp);
}
if (sc->tq != NULL) {
taskqueue_drain(sc->tq, &sc->int_task);
taskqueue_free(sc->tq);
sc->tq = NULL;
}
if (sc->miibus != NULL) {
device_delete_child(dev, sc->miibus);
sc->miibus = NULL;
}
bus_generic_detach(sc->dev);
ae_dma_free(sc);
if (sc->intrhand != NULL) {
bus_teardown_intr(dev, sc->irq[0], sc->intrhand);
sc->intrhand = NULL;
}
if (ifp != NULL) {
if_free(ifp);
sc->ifp = NULL;
}
if (sc->spec_irq != NULL)
bus_release_resources(dev, sc->spec_irq, sc->irq);
if (sc->spec_mem != NULL)
bus_release_resources(dev, sc->spec_mem, sc->mem);
if ((sc->flags & AE_FLAG_MSI) != 0)
pci_release_msi(dev);
mtx_destroy(&sc->mtx);
return (0);
}
static int
ae_miibus_readreg(device_t dev, int phy, int reg)
{
ae_softc_t *sc;
uint32_t val;
int i;
sc = device_get_softc(dev);
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
/*
* Locking is done in upper layers.
*/
val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE |
((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK);
AE_WRITE_4(sc, AE_MDIO_REG, val);
/*
* Wait for operation to complete.
*/
for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
DELAY(2);
val = AE_READ_4(sc, AE_MDIO_REG);
if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
break;
}
if (i == AE_MDIO_TIMEOUT) {
device_printf(sc->dev, "phy read timeout: %d.\n", reg);
return (0);
}
return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
}
static int
ae_miibus_writereg(device_t dev, int phy, int reg, int val)
{
ae_softc_t *sc;
uint32_t aereg;
int i;
sc = device_get_softc(dev);
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
/*
* Locking is done in upper layers.
*/
aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
AE_MDIO_START | AE_MDIO_SUP_PREAMBLE |
((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) |
((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
AE_WRITE_4(sc, AE_MDIO_REG, aereg);
/*
* Wait for operation to complete.
*/
for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
DELAY(2);
aereg = AE_READ_4(sc, AE_MDIO_REG);
if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
break;
}
if (i == AE_MDIO_TIMEOUT) {
device_printf(sc->dev, "phy write timeout: %d.\n", reg);
}
return (0);
}
static void
ae_miibus_statchg(device_t dev)
{
ae_softc_t *sc;
sc = device_get_softc(dev);
taskqueue_enqueue(taskqueue_swi, &sc->link_task);
}
static void
ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
{
ae_softc_t *sc;
struct mii_data *mii;
sc = ifp->if_softc;
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
AE_LOCK(sc);
mii = device_get_softc(sc->miibus);
mii_pollstat(mii);
ifmr->ifm_status = mii->mii_media_status;
ifmr->ifm_active = mii->mii_media_active;
AE_UNLOCK(sc);
}
static int
ae_mediachange(struct ifnet *ifp)
{
ae_softc_t *sc;
struct mii_data *mii;
struct mii_softc *mii_sc;
int error;
/* XXX: check IFF_UP ?? */
sc = ifp->if_softc;
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
AE_LOCK(sc);
mii = device_get_softc(sc->miibus);
LIST_FOREACH(mii_sc, &mii->mii_phys, mii_list)
PHY_RESET(mii_sc);
error = mii_mediachg(mii);
AE_UNLOCK(sc);
return (error);
}
static int
ae_check_eeprom_present(ae_softc_t *sc, int *vpdc)
{
int error;
uint32_t val;
KASSERT(vpdc != NULL, ("[ae, %d]: vpdc is NULL!\n", __LINE__));
/*
* Not sure why, but Linux does this.
*/
val = AE_READ_4(sc, AE_SPICTL_REG);
if ((val & AE_SPICTL_VPD_EN) != 0) {
val &= ~AE_SPICTL_VPD_EN;
AE_WRITE_4(sc, AE_SPICTL_REG, val);
}
error = pci_find_cap(sc->dev, PCIY_VPD, vpdc);
return (error);
}
static int
ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word)
{
uint32_t val;
int i;
AE_WRITE_4(sc, AE_VPD_DATA_REG, 0); /* Clear register value. */
/*
* VPD registers start at offset 0x100. Read them.
*/
val = 0x100 + reg * 4;
AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) &
AE_VPD_CAP_ADDR_MASK);
for (i = 0; i < AE_VPD_TIMEOUT; i++) {
DELAY(2000);
val = AE_READ_4(sc, AE_VPD_CAP_REG);
if ((val & AE_VPD_CAP_DONE) != 0)
break;
}
if (i == AE_VPD_TIMEOUT) {
device_printf(sc->dev, "timeout reading VPD register %d.\n",
reg);
return (ETIMEDOUT);
}
*word = AE_READ_4(sc, AE_VPD_DATA_REG);
return (0);
}
static int
ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr)
{
uint32_t word, reg, val;
int error;
int found;
int vpdc;
int i;
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
KASSERT(eaddr != NULL, ("[ae, %d]: eaddr is NULL", __LINE__));
/*
* Check for EEPROM.
*/
error = ae_check_eeprom_present(sc, &vpdc);
if (error != 0)
return (error);
/*
* Read the VPD configuration space.
* Each register is prefixed with signature,
* so we can check if it is valid.
*/
for (i = 0, found = 0; i < AE_VPD_NREGS; i++) {
error = ae_vpd_read_word(sc, i, &word);
if (error != 0)
break;
/*
* Check signature.
*/
if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG)
break;
reg = word >> AE_VPD_REG_SHIFT;
i++; /* Move to the next word. */
if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG)
continue;
error = ae_vpd_read_word(sc, i, &val);
if (error != 0)
break;
if (reg == AE_EADDR0_REG)
eaddr[0] = val;
else
eaddr[1] = val;
found++;
}
if (found < 2)
return (ENOENT);
eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */
if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
if (bootverbose)
device_printf(sc->dev,
"VPD ethernet address registers are invalid.\n");
return (EINVAL);
}
return (0);
}
static int
ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr)
{
/*
* BIOS is supposed to set this.
*/
eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG);
eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG);
eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */
if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
if (bootverbose)
device_printf(sc->dev,
"Ethernet address registers are invalid.\n");
return (EINVAL);
}
return (0);
}
static void
ae_retrieve_address(ae_softc_t *sc)
{
uint32_t eaddr[2] = {0, 0};
int error;
/*
*Check for EEPROM.
*/
error = ae_get_vpd_eaddr(sc, eaddr);
if (error != 0)
error = ae_get_reg_eaddr(sc, eaddr);
if (error != 0) {
if (bootverbose)
device_printf(sc->dev,
"Generating random ethernet address.\n");
eaddr[0] = arc4random();
/*
* Set OUI to ASUSTek COMPUTER INC.
*/
sc->eaddr[0] = 0x02; /* U/L bit set. */
sc->eaddr[1] = 0x1f;
sc->eaddr[2] = 0xc6;
sc->eaddr[3] = (eaddr[0] >> 16) & 0xff;
sc->eaddr[4] = (eaddr[0] >> 8) & 0xff;
sc->eaddr[5] = (eaddr[0] >> 0) & 0xff;
} else {
sc->eaddr[0] = (eaddr[1] >> 8) & 0xff;
sc->eaddr[1] = (eaddr[1] >> 0) & 0xff;
sc->eaddr[2] = (eaddr[0] >> 24) & 0xff;
sc->eaddr[3] = (eaddr[0] >> 16) & 0xff;
sc->eaddr[4] = (eaddr[0] >> 8) & 0xff;
sc->eaddr[5] = (eaddr[0] >> 0) & 0xff;
}
}
static void
ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
bus_addr_t *addr = arg;
if (error != 0)
return;
KASSERT(nsegs == 1, ("[ae, %d]: %d segments instead of 1!", __LINE__,
nsegs));
*addr = segs[0].ds_addr;
}
static int
ae_alloc_rings(ae_softc_t *sc)
{
bus_addr_t busaddr;
int error;
/*
* Create parent DMA tag.
*/
error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0,
BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
&sc->dma_parent_tag);
if (error != 0) {
device_printf(sc->dev, "could not creare parent DMA tag.\n");
return (error);
}
/*
* Create DMA tag for TxD.
*/
error = bus_dma_tag_create(sc->dma_parent_tag,
8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
NULL, NULL, AE_TXD_BUFSIZE_DEFAULT, 1,
AE_TXD_BUFSIZE_DEFAULT, 0, NULL, NULL,
&sc->dma_txd_tag);
if (error != 0) {
device_printf(sc->dev, "could not creare TxD DMA tag.\n");
return (error);
}
/*
* Create DMA tag for TxS.
*/
error = bus_dma_tag_create(sc->dma_parent_tag,
8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
NULL, NULL, AE_TXS_COUNT_DEFAULT * 4, 1,
AE_TXS_COUNT_DEFAULT * 4, 0, NULL, NULL,
&sc->dma_txs_tag);
if (error != 0) {
device_printf(sc->dev, "could not creare TxS DMA tag.\n");
return (error);
}
/*
* Create DMA tag for RxD.
*/
error = bus_dma_tag_create(sc->dma_parent_tag,
128, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
NULL, NULL, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING, 1,
AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING, 0, NULL, NULL,
&sc->dma_rxd_tag);
if (error != 0) {
device_printf(sc->dev, "could not creare TxS DMA tag.\n");
return (error);
}
/*
* Allocate TxD DMA memory.
*/
error = bus_dmamem_alloc(sc->dma_txd_tag, (void **)&sc->txd_base,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->dma_txd_map);
if (error != 0) {
device_printf(sc->dev,
"could not allocate DMA memory for TxD ring.\n");
return (error);
}
error = bus_dmamap_load(sc->dma_txd_tag, sc->dma_txd_map, sc->txd_base,
AE_TXD_BUFSIZE_DEFAULT, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
if (error != 0 || busaddr == 0) {
device_printf(sc->dev,
"could not load DMA map for TxD ring.\n");
return (error);
}
sc->dma_txd_busaddr = busaddr;
/*
* Allocate TxS DMA memory.
*/
error = bus_dmamem_alloc(sc->dma_txs_tag, (void **)&sc->txs_base,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->dma_txs_map);
if (error != 0) {
device_printf(sc->dev,
"could not allocate DMA memory for TxS ring.\n");
return (error);
}
error = bus_dmamap_load(sc->dma_txs_tag, sc->dma_txs_map, sc->txs_base,
AE_TXS_COUNT_DEFAULT * 4, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
if (error != 0 || busaddr == 0) {
device_printf(sc->dev,
"could not load DMA map for TxS ring.\n");
return (error);
}
sc->dma_txs_busaddr = busaddr;
/*
* Allocate RxD DMA memory.
*/
error = bus_dmamem_alloc(sc->dma_rxd_tag, (void **)&sc->rxd_base_dma,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->dma_rxd_map);
if (error != 0) {
device_printf(sc->dev,
"could not allocate DMA memory for RxD ring.\n");
return (error);
}
error = bus_dmamap_load(sc->dma_rxd_tag, sc->dma_rxd_map,
sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING,
ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
if (error != 0 || busaddr == 0) {
device_printf(sc->dev,
"could not load DMA map for RxD ring.\n");
return (error);
}
sc->dma_rxd_busaddr = busaddr + AE_RXD_PADDING;
sc->rxd_base = (ae_rxd_t *)(sc->rxd_base_dma + AE_RXD_PADDING);
return (0);
}
static void
ae_dma_free(ae_softc_t *sc)
{
if (sc->dma_txd_tag != NULL) {
if (sc->dma_txd_busaddr != 0)
bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map);
if (sc->txd_base != NULL)
bus_dmamem_free(sc->dma_txd_tag, sc->txd_base,
sc->dma_txd_map);
bus_dma_tag_destroy(sc->dma_txd_tag);
sc->dma_txd_tag = NULL;
sc->txd_base = NULL;
sc->dma_txd_busaddr = 0;
}
if (sc->dma_txs_tag != NULL) {
if (sc->dma_txs_busaddr != 0)
bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map);
if (sc->txs_base != NULL)
bus_dmamem_free(sc->dma_txs_tag, sc->txs_base,
sc->dma_txs_map);
bus_dma_tag_destroy(sc->dma_txs_tag);
sc->dma_txs_tag = NULL;
sc->txs_base = NULL;
sc->dma_txs_busaddr = 0;
}
if (sc->dma_rxd_tag != NULL) {
if (sc->dma_rxd_busaddr != 0)
bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map);
if (sc->rxd_base_dma != NULL)
bus_dmamem_free(sc->dma_rxd_tag, sc->rxd_base_dma,
sc->dma_rxd_map);
bus_dma_tag_destroy(sc->dma_rxd_tag);
sc->dma_rxd_tag = NULL;
sc->rxd_base_dma = NULL;
sc->dma_rxd_busaddr = 0;
}
if (sc->dma_parent_tag != NULL) {
bus_dma_tag_destroy(sc->dma_parent_tag);
sc->dma_parent_tag = NULL;
}
}
static int
ae_shutdown(device_t dev)
{
ae_softc_t *sc;
int error;
sc = device_get_softc(dev);
KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__));
error = ae_suspend(dev);
AE_LOCK(sc);
ae_powersave_enable(sc);
AE_UNLOCK(sc);
return (error);
}
static void
ae_powersave_disable(ae_softc_t *sc)
{
uint32_t val;
AE_LOCK_ASSERT(sc);
AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
if (val & AE_PHY_DBG_POWERSAVE) {
val &= ~AE_PHY_DBG_POWERSAVE;
AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val);
DELAY(1000);
}
}
static void
ae_powersave_enable(ae_softc_t *sc)
{
uint32_t val;
AE_LOCK_ASSERT(sc);
/*
* XXX magic numbers.
*/
AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000);
AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2);
AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000);
AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3);
AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0);
}
static void
ae_pm_init(ae_softc_t *sc)
{
struct ifnet *ifp;
uint32_t val;
uint16_t pmstat;
struct mii_data *mii;
int pmc;
AE_LOCK_ASSERT(sc);
ifp = sc->ifp;
if ((sc->flags & AE_FLAG_PMG) == 0) {
/* Disable WOL entirely. */
AE_WRITE_4(sc, AE_WOL_REG, 0);
return;
}
/*
* Configure WOL if enabled.
*/
if ((ifp->if_capenable & IFCAP_WOL) != 0) {
mii = device_get_softc(sc->miibus);
mii_pollstat(mii);
if ((mii->mii_media_status & IFM_AVALID) != 0 &&
(mii->mii_media_status & IFM_ACTIVE) != 0) {
AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_MAGIC | \
AE_WOL_MAGIC_PME);
/*
* Configure MAC.
*/
val = AE_MAC_RX_EN | AE_MAC_CLK_PHY | \
AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | \
((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & \
AE_HALFBUF_MASK) | \
((AE_MAC_PREAMBLE_DEFAULT << \
AE_MAC_PREAMBLE_SHIFT) & AE_MAC_PREAMBLE_MASK) | \
AE_MAC_BCAST_EN | AE_MAC_MCAST_EN;
if ((IFM_OPTIONS(mii->mii_media_active) & \
IFM_FDX) != 0)
val |= AE_MAC_FULL_DUPLEX;
AE_WRITE_4(sc, AE_MAC_REG, val);
} else { /* No link. */
AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_LNKCHG | \
AE_WOL_LNKCHG_PME);
AE_WRITE_4(sc, AE_MAC_REG, 0);
}
} else {
ae_powersave_enable(sc);
}
/*
* PCIE hacks. Magic numbers.
*/
val = AE_READ_4(sc, AE_PCIE_PHYMISC_REG);
val |= AE_PCIE_PHYMISC_FORCE_RCV_DET;
AE_WRITE_4(sc, AE_PCIE_PHYMISC_REG, val);
val = AE_READ_4(sc, AE_PCIE_DLL_TX_CTRL_REG);
val |= AE_PCIE_DLL_TX_CTRL_SEL_NOR_CLK;
AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, val);
/*
* Configure PME.
*/
if (pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0) {
pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2);
pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((ifp->if_capenable & IFCAP_WOL) != 0)
pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
}
}
static int
ae_suspend(device_t dev)
{
ae_softc_t *sc;
sc = device_get_softc(dev);
AE_LOCK(sc);
ae_stop(sc);
ae_pm_init(sc);
AE_UNLOCK(sc);
return (0);
}
static int
ae_resume(device_t dev)
{
ae_softc_t *sc;
sc = device_get_softc(dev);
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
AE_LOCK(sc);
AE_READ_4(sc, AE_WOL_REG); /* Clear WOL status. */
if ((sc->ifp->if_flags & IFF_UP) != 0)
ae_init_locked(sc);
AE_UNLOCK(sc);
return (0);
}
static unsigned int
ae_tx_avail_size(ae_softc_t *sc)
{
unsigned int avail;
if (sc->txd_cur >= sc->txd_ack)
avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack);
else
avail = sc->txd_ack - sc->txd_cur;
return (avail);
}
static int
ae_encap(ae_softc_t *sc, struct mbuf **m_head)
{
struct mbuf *m0;
ae_txd_t *hdr;
unsigned int to_end;
uint16_t len;
AE_LOCK_ASSERT(sc);
m0 = *m_head;
len = m0->m_pkthdr.len;
if ((sc->flags & AE_FLAG_TXAVAIL) == 0 ||
len + sizeof(ae_txd_t) + 3 > ae_tx_avail_size(sc)) {
#ifdef AE_DEBUG
if_printf(sc->ifp, "No free Tx available.\n");
#endif
return ENOBUFS;
}
hdr = (ae_txd_t *)(sc->txd_base + sc->txd_cur);
bzero(hdr, sizeof(*hdr));
/* Skip header size. */
sc->txd_cur = (sc->txd_cur + sizeof(ae_txd_t)) % AE_TXD_BUFSIZE_DEFAULT;
/* Space available to the end of the ring */
to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur;
if (to_end >= len) {
m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur));
} else {
m_copydata(m0, 0, to_end, (caddr_t)(sc->txd_base +
sc->txd_cur));
m_copydata(m0, to_end, len - to_end, (caddr_t)sc->txd_base);
}
/*
* Set TxD flags and parameters.
*/
if ((m0->m_flags & M_VLANTAG) != 0) {
hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vtag));
hdr->len = htole16(len | AE_TXD_INSERT_VTAG);
} else {
hdr->len = htole16(len);
}
/*
* Set current TxD position and round up to a 4-byte boundary.
*/
sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
if (sc->txd_cur == sc->txd_ack)
sc->flags &= ~AE_FLAG_TXAVAIL;
#ifdef AE_DEBUG
if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur);
#endif
/*
* Update TxS position and check if there are empty TxS available.
*/
sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE);
sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT;
if (sc->txs_cur == sc->txs_ack)
sc->flags &= ~AE_FLAG_TXAVAIL;
/*
* Synchronize DMA memory.
*/
bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREREAD |
BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
static void
ae_start(struct ifnet *ifp)
{
ae_softc_t *sc;
sc = ifp->if_softc;
AE_LOCK(sc);
ae_start_locked(ifp);
AE_UNLOCK(sc);
}
static void
ae_start_locked(struct ifnet *ifp)
{
ae_softc_t *sc;
unsigned int count;
struct mbuf *m0;
int error;
sc = ifp->if_softc;
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
AE_LOCK_ASSERT(sc);
#ifdef AE_DEBUG
if_printf(ifp, "Start called.\n");
#endif
if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || (sc->flags & AE_FLAG_LINK) == 0)
return;
count = 0;
while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
if (m0 == NULL)
break; /* Nothing to do. */
error = ae_encap(sc, &m0);
if (error != 0) {
if (m0 != NULL) {
IFQ_DRV_PREPEND(&ifp->if_snd, m0);
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
#ifdef AE_DEBUG
if_printf(ifp, "Setting OACTIVE.\n");
#endif
}
break;
}
count++;
sc->tx_inproc++;
/* Bounce a copy of the frame to BPF. */
ETHER_BPF_MTAP(ifp, m0);
m_freem(m0);
}
if (count > 0) { /* Something was dequeued. */
AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4);
sc->wd_timer = AE_TX_TIMEOUT; /* Load watchdog. */
#ifdef AE_DEBUG
if_printf(ifp, "%d packets dequeued.\n", count);
if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur);
#endif
}
}
static void
ae_link_task(void *arg, int pending)
{
ae_softc_t *sc;
struct mii_data *mii;
struct ifnet *ifp;
uint32_t val;
sc = (ae_softc_t *)arg;
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
AE_LOCK(sc);
ifp = sc->ifp;
mii = device_get_softc(sc->miibus);
if (mii == NULL || ifp == NULL ||
(ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
AE_UNLOCK(sc); /* XXX: could happen? */
return;
}
sc->flags &= ~AE_FLAG_LINK;
if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
(IFM_AVALID | IFM_ACTIVE)) {
switch(IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
sc->flags |= AE_FLAG_LINK;
break;
default:
break;
}
}
/*
* Stop Rx/Tx MACs.
*/
ae_stop_rxmac(sc);
ae_stop_txmac(sc);
if ((sc->flags & AE_FLAG_LINK) != 0) {
ae_mac_config(sc);
/*
* Restart DMA engines.
*/
AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
/*
* Enable Rx and Tx MACs.
*/
val = AE_READ_4(sc, AE_MAC_REG);
val |= AE_MAC_TX_EN | AE_MAC_RX_EN;
AE_WRITE_4(sc, AE_MAC_REG, val);
}
AE_UNLOCK(sc);
}
static void
ae_stop_rxmac(ae_softc_t *sc)
{
uint32_t val;
int i;
AE_LOCK_ASSERT(sc);
/*
* Stop Rx MAC engine.
*/
val = AE_READ_4(sc, AE_MAC_REG);
if ((val & AE_MAC_RX_EN) != 0) {
val &= ~AE_MAC_RX_EN;
AE_WRITE_4(sc, AE_MAC_REG, val);
}
/*
* Stop Rx DMA engine.
*/
if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN)
AE_WRITE_1(sc, AE_DMAWRITE_REG, 0);
/*
* Wait for IDLE state.
*/
for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
val = AE_READ_4(sc, AE_IDLE_REG);
if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0)
break;
DELAY(100);
}
if (i == AE_IDLE_TIMEOUT)
device_printf(sc->dev, "timed out while stopping Rx MAC.\n");
}
static void
ae_stop_txmac(ae_softc_t *sc)
{
uint32_t val;
int i;
AE_LOCK_ASSERT(sc);
/*
* Stop Tx MAC engine.
*/
val = AE_READ_4(sc, AE_MAC_REG);
if ((val & AE_MAC_TX_EN) != 0) {
val &= ~AE_MAC_TX_EN;
AE_WRITE_4(sc, AE_MAC_REG, val);
}
/*
* Stop Tx DMA engine.
*/
if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN)
AE_WRITE_1(sc, AE_DMAREAD_REG, 0);
/*
* Wait for IDLE state.
*/
for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
val = AE_READ_4(sc, AE_IDLE_REG);
if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0)
break;
DELAY(100);
}
if (i == AE_IDLE_TIMEOUT)
device_printf(sc->dev, "timed out while stopping Tx MAC.\n");
}
static void
ae_mac_config(ae_softc_t *sc)
{
struct mii_data *mii;
uint32_t val;
AE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->miibus);
val = AE_READ_4(sc, AE_MAC_REG);
val &= ~AE_MAC_FULL_DUPLEX;
/* XXX disable AE_MAC_TX_FLOW_EN? */
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
val |= AE_MAC_FULL_DUPLEX;
AE_WRITE_4(sc, AE_MAC_REG, val);
}
static int
ae_intr(void *arg)
{
ae_softc_t *sc;
uint32_t val;
sc = (ae_softc_t *)arg;
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
val = AE_READ_4(sc, AE_ISR_REG);
if (val == 0 || (val & AE_IMR_DEFAULT) == 0)
return (FILTER_STRAY);
/* Disable interrupts. */
AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE);
/* Schedule interrupt processing. */
taskqueue_enqueue(sc->tq, &sc->int_task);
return (FILTER_HANDLED);
}
static void
ae_int_task(void *arg, int pending)
{
ae_softc_t *sc;
struct ifnet *ifp;
uint32_t val;
sc = (ae_softc_t *)arg;
AE_LOCK(sc);
ifp = sc->ifp;
val = AE_READ_4(sc, AE_ISR_REG); /* Read interrupt status. */
if (val == 0) {
AE_UNLOCK(sc);
return;
}
/*
* Clear interrupts and disable them.
*/
AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE);
#ifdef AE_DEBUG
if_printf(ifp, "Interrupt received: 0x%08x\n", val);
#endif
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
if ((val & (AE_ISR_DMAR_TIMEOUT | AE_ISR_DMAW_TIMEOUT |
AE_ISR_PHY_LINKDOWN)) != 0) {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
ae_init_locked(sc);
AE_UNLOCK(sc);
return;
}
if ((val & AE_ISR_TX_EVENT) != 0)
ae_tx_intr(sc);
if ((val & AE_ISR_RX_EVENT) != 0)
ae_rx_intr(sc);
/*
* Re-enable interrupts.
*/
AE_WRITE_4(sc, AE_ISR_REG, 0);
if ((sc->flags & AE_FLAG_TXAVAIL) != 0) {
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
ae_start_locked(ifp);
}
}
AE_UNLOCK(sc);
}
static void
ae_tx_intr(ae_softc_t *sc)
{
struct ifnet *ifp;
ae_txd_t *txd;
ae_txs_t *txs;
uint16_t flags;
AE_LOCK_ASSERT(sc);
ifp = sc->ifp;
#ifdef AE_DEBUG
if_printf(ifp, "Tx interrupt occuried.\n");
#endif
/*
* Syncronize DMA buffers.
*/
bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (;;) {
txs = sc->txs_base + sc->txs_ack;
flags = le16toh(txs->flags);
if ((flags & AE_TXS_UPDATE) == 0)
break;
txs->flags = htole16(flags & ~AE_TXS_UPDATE);
/* Update stats. */
ae_update_stats_tx(flags, &sc->stats);
/*
* Update TxS position.
*/
sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT;
sc->flags |= AE_FLAG_TXAVAIL;
txd = (ae_txd_t *)(sc->txd_base + sc->txd_ack);
if (txs->len != txd->len)
device_printf(sc->dev, "Size mismatch: TxS:%d TxD:%d\n",
le16toh(txs->len), le16toh(txd->len));
/*
* Move txd ack and align on 4-byte boundary.
*/
sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) +
sizeof(ae_txs_t) + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
if ((flags & AE_TXS_SUCCESS) != 0)
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
else
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
sc->tx_inproc--;
}
if ((sc->flags & AE_FLAG_TXAVAIL) != 0)
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
if (sc->tx_inproc < 0) {
if_printf(ifp, "Received stray Tx interrupt(s).\n");
sc->tx_inproc = 0;
}
if (sc->tx_inproc == 0)
sc->wd_timer = 0; /* Unarm watchdog. */
/*
* Syncronize DMA buffers.
*/
bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static void
ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd)
{
struct ifnet *ifp;
struct mbuf *m;
unsigned int size;
uint16_t flags;
AE_LOCK_ASSERT(sc);
ifp = sc->ifp;
flags = le16toh(rxd->flags);
#ifdef AE_DEBUG
if_printf(ifp, "Rx interrupt occuried.\n");
#endif
size = le16toh(rxd->len) - ETHER_CRC_LEN;
if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)) {
if_printf(ifp, "Runt frame received.");
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
return;
}
m = m_devget(&rxd->data[0], size, ETHER_ALIGN, ifp, NULL);
if (m == NULL) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
return;
}
if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
(flags & AE_RXD_HAS_VLAN) != 0) {
m->m_pkthdr.ether_vtag = AE_RXD_VLAN(le16toh(rxd->vlan));
m->m_flags |= M_VLANTAG;
}
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
/*
* Pass it through.
*/
AE_UNLOCK(sc);
(*ifp->if_input)(ifp, m);
AE_LOCK(sc);
}
static void
ae_rx_intr(ae_softc_t *sc)
{
ae_rxd_t *rxd;
struct ifnet *ifp;
uint16_t flags;
int count;
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
AE_LOCK_ASSERT(sc);
ifp = sc->ifp;
/*
* Syncronize DMA buffers.
*/
bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (count = 0;; count++) {
rxd = (ae_rxd_t *)(sc->rxd_base + sc->rxd_cur);
flags = le16toh(rxd->flags);
if ((flags & AE_RXD_UPDATE) == 0)
break;
rxd->flags = htole16(flags & ~AE_RXD_UPDATE);
/* Update stats. */
ae_update_stats_rx(flags, &sc->stats);
/*
* Update position index.
*/
sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT;
if ((flags & AE_RXD_SUCCESS) != 0)
ae_rxeof(sc, rxd);
else
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
}
if (count > 0) {
bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/*
* Update Rx index.
*/
AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
}
}
static void
ae_watchdog(ae_softc_t *sc)
{
struct ifnet *ifp;
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
AE_LOCK_ASSERT(sc);
ifp = sc->ifp;
if (sc->wd_timer == 0 || --sc->wd_timer != 0)
return; /* Noting to do. */
if ((sc->flags & AE_FLAG_LINK) == 0)
if_printf(ifp, "watchdog timeout (missed link).\n");
else
if_printf(ifp, "watchdog timeout - resetting.\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
ae_init_locked(sc);
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
ae_start_locked(ifp);
}
static void
ae_tick(void *arg)
{
ae_softc_t *sc;
struct mii_data *mii;
sc = (ae_softc_t *)arg;
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
AE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->miibus);
mii_tick(mii);
ae_watchdog(sc); /* Watchdog check. */
callout_reset(&sc->tick_ch, hz, ae_tick, sc);
}
static void
ae_rxvlan(ae_softc_t *sc)
{
struct ifnet *ifp;
uint32_t val;
AE_LOCK_ASSERT(sc);
ifp = sc->ifp;
val = AE_READ_4(sc, AE_MAC_REG);
val &= ~AE_MAC_RMVLAN_EN;
if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
val |= AE_MAC_RMVLAN_EN;
AE_WRITE_4(sc, AE_MAC_REG, val);
}
static u_int
ae_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t crc, *mchash = arg;
crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
return (1);
}
static void
ae_rxfilter(ae_softc_t *sc)
{
struct ifnet *ifp;
uint32_t mchash[2];
uint32_t rxcfg;
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
AE_LOCK_ASSERT(sc);
ifp = sc->ifp;
rxcfg = AE_READ_4(sc, AE_MAC_REG);
rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN);
if ((ifp->if_flags & IFF_BROADCAST) != 0)
rxcfg |= AE_MAC_BCAST_EN;
if ((ifp->if_flags & IFF_PROMISC) != 0)
rxcfg |= AE_MAC_PROMISC_EN;
if ((ifp->if_flags & IFF_ALLMULTI) != 0)
rxcfg |= AE_MAC_MCAST_EN;
/*
* Wipe old settings.
*/
AE_WRITE_4(sc, AE_REG_MHT0, 0);
AE_WRITE_4(sc, AE_REG_MHT1, 0);
if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff);
AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff);
AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
return;
}
/*
* Load multicast tables.
*/
bzero(mchash, sizeof(mchash));
if_foreach_llmaddr(ifp, ae_hash_maddr, &mchash);
AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]);
AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]);
AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
}
static int
ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct ae_softc *sc;
struct ifreq *ifr;
struct mii_data *mii;
int error, mask;
sc = ifp->if_softc;
ifr = (struct ifreq *)data;
error = 0;
switch (cmd) {
case SIOCSIFMTU:
if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
error = EINVAL;
else if (ifp->if_mtu != ifr->ifr_mtu) {
AE_LOCK(sc);
ifp->if_mtu = ifr->ifr_mtu;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
ae_init_locked(sc);
}
AE_UNLOCK(sc);
}
break;
case SIOCSIFFLAGS:
AE_LOCK(sc);
if ((ifp->if_flags & IFF_UP) != 0) {
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
if (((ifp->if_flags ^ sc->if_flags)
& (IFF_PROMISC | IFF_ALLMULTI)) != 0)
ae_rxfilter(sc);
} else {
if ((sc->flags & AE_FLAG_DETACH) == 0)
ae_init_locked(sc);
}
} else {
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
ae_stop(sc);
}
sc->if_flags = ifp->if_flags;
AE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
AE_LOCK(sc);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
ae_rxfilter(sc);
AE_UNLOCK(sc);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
mii = device_get_softc(sc->miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
break;
case SIOCSIFCAP:
AE_LOCK(sc);
mask = ifr->ifr_reqcap ^ ifp->if_capenable;
if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
ae_rxvlan(sc);
}
VLAN_CAPABILITIES(ifp);
AE_UNLOCK(sc);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
static void
ae_stop(ae_softc_t *sc)
{
struct ifnet *ifp;
int i;
AE_LOCK_ASSERT(sc);
ifp = sc->ifp;
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
sc->flags &= ~AE_FLAG_LINK;
sc->wd_timer = 0; /* Cancel watchdog. */
callout_stop(&sc->tick_ch);
/*
* Clear and disable interrupts.
*/
AE_WRITE_4(sc, AE_IMR_REG, 0);
AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
/*
* Stop Rx/Tx MACs.
*/
ae_stop_txmac(sc);
ae_stop_rxmac(sc);
/*
* Stop DMA engines.
*/
AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN);
AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN);
/*
* Wait for everything to enter idle state.
*/
for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
if (AE_READ_4(sc, AE_IDLE_REG) == 0)
break;
DELAY(100);
}
if (i == AE_IDLE_TIMEOUT)
device_printf(sc->dev, "could not enter idle state in stop.\n");
}
static void
ae_update_stats_tx(uint16_t flags, ae_stats_t *stats)
{
if ((flags & AE_TXS_BCAST) != 0)
stats->tx_bcast++;
if ((flags & AE_TXS_MCAST) != 0)
stats->tx_mcast++;
if ((flags & AE_TXS_PAUSE) != 0)
stats->tx_pause++;
if ((flags & AE_TXS_CTRL) != 0)
stats->tx_ctrl++;
if ((flags & AE_TXS_DEFER) != 0)
stats->tx_defer++;
if ((flags & AE_TXS_EXCDEFER) != 0)
stats->tx_excdefer++;
if ((flags & AE_TXS_SINGLECOL) != 0)
stats->tx_singlecol++;
if ((flags & AE_TXS_MULTICOL) != 0)
stats->tx_multicol++;
if ((flags & AE_TXS_LATECOL) != 0)
stats->tx_latecol++;
if ((flags & AE_TXS_ABORTCOL) != 0)
stats->tx_abortcol++;
if ((flags & AE_TXS_UNDERRUN) != 0)
stats->tx_underrun++;
}
static void
ae_update_stats_rx(uint16_t flags, ae_stats_t *stats)
{
if ((flags & AE_RXD_BCAST) != 0)
stats->rx_bcast++;
if ((flags & AE_RXD_MCAST) != 0)
stats->rx_mcast++;
if ((flags & AE_RXD_PAUSE) != 0)
stats->rx_pause++;
if ((flags & AE_RXD_CTRL) != 0)
stats->rx_ctrl++;
if ((flags & AE_RXD_CRCERR) != 0)
stats->rx_crcerr++;
if ((flags & AE_RXD_CODEERR) != 0)
stats->rx_codeerr++;
if ((flags & AE_RXD_RUNT) != 0)
stats->rx_runt++;
if ((flags & AE_RXD_FRAG) != 0)
stats->rx_frag++;
if ((flags & AE_RXD_TRUNC) != 0)
stats->rx_trunc++;
if ((flags & AE_RXD_ALIGN) != 0)
stats->rx_align++;
}
diff --git a/sys/dev/age/if_age.c b/sys/dev/age/if_age.c
index 7bb494d81de2..cd589dcb734d 100644
--- a/sys/dev/age/if_age.c
+++ b/sys/dev/age/if_age.c
@@ -1,3333 +1,3327 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/rman.h>
#include <sys/module.h>
#include <sys/queue.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <machine/bus.h>
#include <machine/in_cksum.h>
#include <dev/age/if_agereg.h>
#include <dev/age/if_agevar.h>
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
#define AGE_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
MODULE_DEPEND(age, pci, 1, 1, 1);
MODULE_DEPEND(age, ether, 1, 1, 1);
MODULE_DEPEND(age, miibus, 1, 1, 1);
/* Tunables. */
static int msi_disable = 0;
static int msix_disable = 0;
TUNABLE_INT("hw.age.msi_disable", &msi_disable);
TUNABLE_INT("hw.age.msix_disable", &msix_disable);
/*
* Devices supported by this driver.
*/
static struct age_dev {
uint16_t age_vendorid;
uint16_t age_deviceid;
const char *age_name;
} age_devs[] = {
{ VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L1,
"Attansic Technology Corp, L1 Gigabit Ethernet" },
};
static int age_miibus_readreg(device_t, int, int);
static int age_miibus_writereg(device_t, int, int, int);
static void age_miibus_statchg(device_t);
static void age_mediastatus(struct ifnet *, struct ifmediareq *);
static int age_mediachange(struct ifnet *);
static int age_probe(device_t);
static void age_get_macaddr(struct age_softc *);
static void age_phy_reset(struct age_softc *);
static int age_attach(device_t);
static int age_detach(device_t);
static void age_sysctl_node(struct age_softc *);
static void age_dmamap_cb(void *, bus_dma_segment_t *, int, int);
static int age_check_boundary(struct age_softc *);
static int age_dma_alloc(struct age_softc *);
static void age_dma_free(struct age_softc *);
static int age_shutdown(device_t);
static void age_setwol(struct age_softc *);
static int age_suspend(device_t);
static int age_resume(device_t);
static int age_encap(struct age_softc *, struct mbuf **);
static void age_start(struct ifnet *);
static void age_start_locked(struct ifnet *);
static void age_watchdog(struct age_softc *);
static int age_ioctl(struct ifnet *, u_long, caddr_t);
static void age_mac_config(struct age_softc *);
static void age_link_task(void *, int);
static void age_stats_update(struct age_softc *);
static int age_intr(void *);
static void age_int_task(void *, int);
static void age_txintr(struct age_softc *, int);
static void age_rxeof(struct age_softc *sc, struct rx_rdesc *);
static int age_rxintr(struct age_softc *, int, int);
static void age_tick(void *);
static void age_reset(struct age_softc *);
static void age_init(void *);
static void age_init_locked(struct age_softc *);
static void age_stop(struct age_softc *);
static void age_stop_txmac(struct age_softc *);
static void age_stop_rxmac(struct age_softc *);
static void age_init_tx_ring(struct age_softc *);
static int age_init_rx_ring(struct age_softc *);
static void age_init_rr_ring(struct age_softc *);
static void age_init_cmb_block(struct age_softc *);
static void age_init_smb_block(struct age_softc *);
#ifndef __NO_STRICT_ALIGNMENT
static struct mbuf *age_fixup_rx(struct ifnet *, struct mbuf *);
#endif
static int age_newbuf(struct age_softc *, struct age_rxdesc *);
static void age_rxvlan(struct age_softc *);
static void age_rxfilter(struct age_softc *);
static int sysctl_age_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
static int sysctl_hw_age_proc_limit(SYSCTL_HANDLER_ARGS);
static int sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS);
static device_method_t age_methods[] = {
/* Device interface. */
DEVMETHOD(device_probe, age_probe),
DEVMETHOD(device_attach, age_attach),
DEVMETHOD(device_detach, age_detach),
DEVMETHOD(device_shutdown, age_shutdown),
DEVMETHOD(device_suspend, age_suspend),
DEVMETHOD(device_resume, age_resume),
/* MII interface. */
DEVMETHOD(miibus_readreg, age_miibus_readreg),
DEVMETHOD(miibus_writereg, age_miibus_writereg),
DEVMETHOD(miibus_statchg, age_miibus_statchg),
{ NULL, NULL }
};
static driver_t age_driver = {
"age",
age_methods,
sizeof(struct age_softc)
};
static devclass_t age_devclass;
DRIVER_MODULE(age, pci, age_driver, age_devclass, 0, 0);
MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, age, age_devs,
nitems(age_devs));
DRIVER_MODULE(miibus, age, miibus_driver, miibus_devclass, 0, 0);
static struct resource_spec age_res_spec_mem[] = {
{ SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
{ -1, 0, 0 }
};
static struct resource_spec age_irq_spec_legacy[] = {
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0, 0 }
};
static struct resource_spec age_irq_spec_msi[] = {
{ SYS_RES_IRQ, 1, RF_ACTIVE },
{ -1, 0, 0 }
};
static struct resource_spec age_irq_spec_msix[] = {
{ SYS_RES_IRQ, 1, RF_ACTIVE },
{ -1, 0, 0 }
};
/*
* Read a PHY register on the MII of the L1.
*/
static int
age_miibus_readreg(device_t dev, int phy, int reg)
{
struct age_softc *sc;
uint32_t v;
int i;
sc = device_get_softc(dev);
CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
DELAY(1);
v = CSR_READ_4(sc, AGE_MDIO);
if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
break;
}
if (i == 0) {
device_printf(sc->age_dev, "phy read timeout : %d\n", reg);
return (0);
}
return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
}
/*
* Write a PHY register on the MII of the L1.
*/
static int
age_miibus_writereg(device_t dev, int phy, int reg, int val)
{
struct age_softc *sc;
uint32_t v;
int i;
sc = device_get_softc(dev);
CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
(val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
DELAY(1);
v = CSR_READ_4(sc, AGE_MDIO);
if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
break;
}
if (i == 0)
device_printf(sc->age_dev, "phy write timeout : %d\n", reg);
return (0);
}
/*
* Callback from MII layer when media changes.
*/
static void
age_miibus_statchg(device_t dev)
{
struct age_softc *sc;
sc = device_get_softc(dev);
taskqueue_enqueue(taskqueue_swi, &sc->age_link_task);
}
/*
* Get the current interface media status.
*/
static void
age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct age_softc *sc;
struct mii_data *mii;
sc = ifp->if_softc;
AGE_LOCK(sc);
mii = device_get_softc(sc->age_miibus);
mii_pollstat(mii);
ifmr->ifm_status = mii->mii_media_status;
ifmr->ifm_active = mii->mii_media_active;
AGE_UNLOCK(sc);
}
/*
* Set hardware to newly-selected media.
*/
static int
age_mediachange(struct ifnet *ifp)
{
struct age_softc *sc;
struct mii_data *mii;
struct mii_softc *miisc;
int error;
sc = ifp->if_softc;
AGE_LOCK(sc);
mii = device_get_softc(sc->age_miibus);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
error = mii_mediachg(mii);
AGE_UNLOCK(sc);
return (error);
}
static int
age_probe(device_t dev)
{
struct age_dev *sp;
int i;
uint16_t vendor, devid;
vendor = pci_get_vendor(dev);
devid = pci_get_device(dev);
sp = age_devs;
for (i = 0; i < nitems(age_devs); i++, sp++) {
if (vendor == sp->age_vendorid &&
devid == sp->age_deviceid) {
device_set_desc(dev, sp->age_name);
return (BUS_PROBE_DEFAULT);
}
}
return (ENXIO);
}
static void
age_get_macaddr(struct age_softc *sc)
{
uint32_t ea[2], reg;
int i, vpdc;
reg = CSR_READ_4(sc, AGE_SPI_CTRL);
if ((reg & SPI_VPD_ENB) != 0) {
/* Get VPD stored in TWSI EEPROM. */
reg &= ~SPI_VPD_ENB;
CSR_WRITE_4(sc, AGE_SPI_CTRL, reg);
}
if (pci_find_cap(sc->age_dev, PCIY_VPD, &vpdc) == 0) {
/*
* PCI VPD capability found, let TWSI reload EEPROM.
* This will set ethernet address of controller.
*/
CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) |
TWSI_CTRL_SW_LD_START);
for (i = 100; i > 0; i--) {
DELAY(1000);
reg = CSR_READ_4(sc, AGE_TWSI_CTRL);
if ((reg & TWSI_CTRL_SW_LD_START) == 0)
break;
}
if (i == 0)
device_printf(sc->age_dev,
"reloading EEPROM timeout!\n");
} else {
if (bootverbose)
device_printf(sc->age_dev,
"PCI VPD capability not found!\n");
}
ea[0] = CSR_READ_4(sc, AGE_PAR0);
ea[1] = CSR_READ_4(sc, AGE_PAR1);
sc->age_eaddr[0] = (ea[1] >> 8) & 0xFF;
sc->age_eaddr[1] = (ea[1] >> 0) & 0xFF;
sc->age_eaddr[2] = (ea[0] >> 24) & 0xFF;
sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF;
sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF;
sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF;
}
static void
age_phy_reset(struct age_softc *sc)
{
uint16_t reg, pn;
int i, linkup;
/* Reset PHY. */
CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST);
DELAY(2000);
CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR);
DELAY(2000);
#define ATPHY_DBG_ADDR 0x1D
#define ATPHY_DBG_DATA 0x1E
#define ATPHY_CDTC 0x16
#define PHY_CDTC_ENB 0x0001
#define PHY_CDTC_POFF 8
#define ATPHY_CDTS 0x1C
#define PHY_CDTS_STAT_OK 0x0000
#define PHY_CDTS_STAT_SHORT 0x0100
#define PHY_CDTS_STAT_OPEN 0x0200
#define PHY_CDTS_STAT_INVAL 0x0300
#define PHY_CDTS_STAT_MASK 0x0300
/* Check power saving mode. Magic from Linux. */
age_miibus_writereg(sc->age_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET);
for (linkup = 0, pn = 0; pn < 4; pn++) {
age_miibus_writereg(sc->age_dev, sc->age_phyaddr, ATPHY_CDTC,
(pn << PHY_CDTC_POFF) | PHY_CDTC_ENB);
for (i = 200; i > 0; i--) {
DELAY(1000);
reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr,
ATPHY_CDTC);
if ((reg & PHY_CDTC_ENB) == 0)
break;
}
DELAY(1000);
reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr,
ATPHY_CDTS);
if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) {
linkup++;
break;
}
}
age_miibus_writereg(sc->age_dev, sc->age_phyaddr, MII_BMCR,
BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
if (linkup == 0) {
age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
ATPHY_DBG_ADDR, 0);
age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
ATPHY_DBG_DATA, 0x124E);
age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
ATPHY_DBG_ADDR, 1);
reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr,
ATPHY_DBG_DATA);
age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
ATPHY_DBG_DATA, reg | 0x03);
/* XXX */
DELAY(1500 * 1000);
age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
ATPHY_DBG_ADDR, 0);
age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
ATPHY_DBG_DATA, 0x024E);
}
#undef ATPHY_DBG_ADDR
#undef ATPHY_DBG_DATA
#undef ATPHY_CDTC
#undef PHY_CDTC_ENB
#undef PHY_CDTC_POFF
#undef ATPHY_CDTS
#undef PHY_CDTS_STAT_OK
#undef PHY_CDTS_STAT_SHORT
#undef PHY_CDTS_STAT_OPEN
#undef PHY_CDTS_STAT_INVAL
#undef PHY_CDTS_STAT_MASK
}
static int
age_attach(device_t dev)
{
struct age_softc *sc;
struct ifnet *ifp;
uint16_t burst;
int error, i, msic, msixc, pmc;
error = 0;
sc = device_get_softc(dev);
sc->age_dev = dev;
mtx_init(&sc->age_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->age_tick_ch, &sc->age_mtx, 0);
TASK_INIT(&sc->age_int_task, 0, age_int_task, sc);
TASK_INIT(&sc->age_link_task, 0, age_link_task, sc);
/* Map the device. */
pci_enable_busmaster(dev);
sc->age_res_spec = age_res_spec_mem;
sc->age_irq_spec = age_irq_spec_legacy;
error = bus_alloc_resources(dev, sc->age_res_spec, sc->age_res);
if (error != 0) {
device_printf(dev, "cannot allocate memory resources.\n");
goto fail;
}
/* Set PHY address. */
sc->age_phyaddr = AGE_PHY_ADDR;
/* Reset PHY. */
age_phy_reset(sc);
/* Reset the ethernet controller. */
age_reset(sc);
/* Get PCI and chip id/revision. */
sc->age_rev = pci_get_revid(dev);
sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >>
MASTER_CHIP_REV_SHIFT;
if (bootverbose) {
device_printf(dev, "PCI device revision : 0x%04x\n",
sc->age_rev);
device_printf(dev, "Chip id/revision : 0x%04x\n",
sc->age_chip_rev);
}
/*
* XXX
* Unintialized hardware returns an invalid chip id/revision
* as well as 0xFFFFFFFF for Tx/Rx fifo length. It seems that
* unplugged cable results in putting hardware into automatic
* power down mode which in turn returns invalld chip revision.
*/
if (sc->age_chip_rev == 0xFFFF) {
device_printf(dev,"invalid chip revision : 0x%04x -- "
"not initialized?\n", sc->age_chip_rev);
error = ENXIO;
goto fail;
}
device_printf(dev, "%d Tx FIFO, %d Rx FIFO\n",
CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN),
CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN));
/* Allocate IRQ resources. */
msixc = pci_msix_count(dev);
msic = pci_msi_count(dev);
if (bootverbose) {
device_printf(dev, "MSIX count : %d\n", msixc);
device_printf(dev, "MSI count : %d\n", msic);
}
/* Prefer MSIX over MSI. */
if (msix_disable == 0 || msi_disable == 0) {
if (msix_disable == 0 && msixc == AGE_MSIX_MESSAGES &&
pci_alloc_msix(dev, &msixc) == 0) {
if (msic == AGE_MSIX_MESSAGES) {
device_printf(dev, "Using %d MSIX messages.\n",
msixc);
sc->age_flags |= AGE_FLAG_MSIX;
sc->age_irq_spec = age_irq_spec_msix;
} else
pci_release_msi(dev);
}
if (msi_disable == 0 && (sc->age_flags & AGE_FLAG_MSIX) == 0 &&
msic == AGE_MSI_MESSAGES &&
pci_alloc_msi(dev, &msic) == 0) {
if (msic == AGE_MSI_MESSAGES) {
device_printf(dev, "Using %d MSI messages.\n",
msic);
sc->age_flags |= AGE_FLAG_MSI;
sc->age_irq_spec = age_irq_spec_msi;
} else
pci_release_msi(dev);
}
}
error = bus_alloc_resources(dev, sc->age_irq_spec, sc->age_irq);
if (error != 0) {
device_printf(dev, "cannot allocate IRQ resources.\n");
goto fail;
}
/* Get DMA parameters from PCIe device control register. */
if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
sc->age_flags |= AGE_FLAG_PCIE;
burst = pci_read_config(dev, i + 0x08, 2);
/* Max read request size. */
sc->age_dma_rd_burst = ((burst >> 12) & 0x07) <<
DMA_CFG_RD_BURST_SHIFT;
/* Max payload size. */
sc->age_dma_wr_burst = ((burst >> 5) & 0x07) <<
DMA_CFG_WR_BURST_SHIFT;
if (bootverbose) {
device_printf(dev, "Read request size : %d bytes.\n",
128 << ((burst >> 12) & 0x07));
device_printf(dev, "TLP payload size : %d bytes.\n",
128 << ((burst >> 5) & 0x07));
}
} else {
sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128;
sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128;
}
/* Create device sysctl node. */
age_sysctl_node(sc);
if ((error = age_dma_alloc(sc)) != 0)
goto fail;
/* Load station address. */
age_get_macaddr(sc);
ifp = sc->age_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "cannot allocate ifnet structure.\n");
- error = ENXIO;
- goto fail;
- }
-
ifp->if_softc = sc;
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = age_ioctl;
ifp->if_start = age_start;
ifp->if_init = age_init;
ifp->if_snd.ifq_drv_maxlen = AGE_TX_RING_CNT - 1;
IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
IFQ_SET_READY(&ifp->if_snd);
ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
ifp->if_hwassist = AGE_CSUM_FEATURES | CSUM_TSO;
if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
sc->age_flags |= AGE_FLAG_PMCAP;
ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST;
}
ifp->if_capenable = ifp->if_capabilities;
/* Set up MII bus. */
error = mii_attach(dev, &sc->age_miibus, ifp, age_mediachange,
age_mediastatus, BMSR_DEFCAPMASK, sc->age_phyaddr, MII_OFFSET_ANY,
0);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
ether_ifattach(ifp, sc->age_eaddr);
/* VLAN capability setup. */
ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO;
ifp->if_capenable = ifp->if_capabilities;
/* Tell the upper layer(s) we support long frames. */
ifp->if_hdrlen = sizeof(struct ether_vlan_header);
/* Create local taskq. */
sc->age_tq = taskqueue_create_fast("age_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->age_tq);
taskqueue_start_threads(&sc->age_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->age_dev));
if ((sc->age_flags & AGE_FLAG_MSIX) != 0)
msic = AGE_MSIX_MESSAGES;
else if ((sc->age_flags & AGE_FLAG_MSI) != 0)
msic = AGE_MSI_MESSAGES;
else
msic = 1;
for (i = 0; i < msic; i++) {
error = bus_setup_intr(dev, sc->age_irq[i],
INTR_TYPE_NET | INTR_MPSAFE, age_intr, NULL, sc,
&sc->age_intrhand[i]);
if (error != 0)
break;
}
if (error != 0) {
device_printf(dev, "could not set up interrupt handler.\n");
taskqueue_free(sc->age_tq);
sc->age_tq = NULL;
ether_ifdetach(ifp);
goto fail;
}
fail:
if (error != 0)
age_detach(dev);
return (error);
}
static int
age_detach(device_t dev)
{
struct age_softc *sc;
struct ifnet *ifp;
int i, msic;
sc = device_get_softc(dev);
ifp = sc->age_ifp;
if (device_is_attached(dev)) {
AGE_LOCK(sc);
sc->age_flags |= AGE_FLAG_DETACH;
age_stop(sc);
AGE_UNLOCK(sc);
callout_drain(&sc->age_tick_ch);
taskqueue_drain(sc->age_tq, &sc->age_int_task);
taskqueue_drain(taskqueue_swi, &sc->age_link_task);
ether_ifdetach(ifp);
}
if (sc->age_tq != NULL) {
taskqueue_drain(sc->age_tq, &sc->age_int_task);
taskqueue_free(sc->age_tq);
sc->age_tq = NULL;
}
if (sc->age_miibus != NULL) {
device_delete_child(dev, sc->age_miibus);
sc->age_miibus = NULL;
}
bus_generic_detach(dev);
age_dma_free(sc);
if (ifp != NULL) {
if_free(ifp);
sc->age_ifp = NULL;
}
if ((sc->age_flags & AGE_FLAG_MSIX) != 0)
msic = AGE_MSIX_MESSAGES;
else if ((sc->age_flags & AGE_FLAG_MSI) != 0)
msic = AGE_MSI_MESSAGES;
else
msic = 1;
for (i = 0; i < msic; i++) {
if (sc->age_intrhand[i] != NULL) {
bus_teardown_intr(dev, sc->age_irq[i],
sc->age_intrhand[i]);
sc->age_intrhand[i] = NULL;
}
}
bus_release_resources(dev, sc->age_irq_spec, sc->age_irq);
if ((sc->age_flags & (AGE_FLAG_MSI | AGE_FLAG_MSIX)) != 0)
pci_release_msi(dev);
bus_release_resources(dev, sc->age_res_spec, sc->age_res);
mtx_destroy(&sc->age_mtx);
return (0);
}
static void
age_sysctl_node(struct age_softc *sc)
{
int error;
SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
"stats", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
sc, 0, sysctl_age_stats, "I", "Statistics");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
"int_mod", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
&sc->age_int_mod, 0, sysctl_hw_age_int_mod, "I",
"age interrupt moderation");
/* Pull in device tunables. */
sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
error = resource_int_value(device_get_name(sc->age_dev),
device_get_unit(sc->age_dev), "int_mod", &sc->age_int_mod);
if (error == 0) {
if (sc->age_int_mod < AGE_IM_TIMER_MIN ||
sc->age_int_mod > AGE_IM_TIMER_MAX) {
device_printf(sc->age_dev,
"int_mod value out of range; using default: %d\n",
AGE_IM_TIMER_DEFAULT);
sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
}
}
SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
"process_limit", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
&sc->age_process_limit, 0, sysctl_hw_age_proc_limit, "I",
"max number of Rx events to process");
/* Pull in device tunables. */
sc->age_process_limit = AGE_PROC_DEFAULT;
error = resource_int_value(device_get_name(sc->age_dev),
device_get_unit(sc->age_dev), "process_limit",
&sc->age_process_limit);
if (error == 0) {
if (sc->age_process_limit < AGE_PROC_MIN ||
sc->age_process_limit > AGE_PROC_MAX) {
device_printf(sc->age_dev,
"process_limit value out of range; "
"using default: %d\n", AGE_PROC_DEFAULT);
sc->age_process_limit = AGE_PROC_DEFAULT;
}
}
}
struct age_dmamap_arg {
bus_addr_t age_busaddr;
};
static void
age_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
struct age_dmamap_arg *ctx;
if (error != 0)
return;
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
ctx = (struct age_dmamap_arg *)arg;
ctx->age_busaddr = segs[0].ds_addr;
}
/*
* Attansic L1 controller have single register to specify high
* address part of DMA blocks. So all descriptor structures and
* DMA memory blocks should have the same high address of given
* 4GB address space(i.e. crossing 4GB boundary is not allowed).
*/
static int
age_check_boundary(struct age_softc *sc)
{
bus_addr_t rx_ring_end, rr_ring_end, tx_ring_end;
bus_addr_t cmb_block_end, smb_block_end;
/* Tx/Rx descriptor queue should reside within 4GB boundary. */
tx_ring_end = sc->age_rdata.age_tx_ring_paddr + AGE_TX_RING_SZ;
rx_ring_end = sc->age_rdata.age_rx_ring_paddr + AGE_RX_RING_SZ;
rr_ring_end = sc->age_rdata.age_rr_ring_paddr + AGE_RR_RING_SZ;
cmb_block_end = sc->age_rdata.age_cmb_block_paddr + AGE_CMB_BLOCK_SZ;
smb_block_end = sc->age_rdata.age_smb_block_paddr + AGE_SMB_BLOCK_SZ;
if ((AGE_ADDR_HI(tx_ring_end) !=
AGE_ADDR_HI(sc->age_rdata.age_tx_ring_paddr)) ||
(AGE_ADDR_HI(rx_ring_end) !=
AGE_ADDR_HI(sc->age_rdata.age_rx_ring_paddr)) ||
(AGE_ADDR_HI(rr_ring_end) !=
AGE_ADDR_HI(sc->age_rdata.age_rr_ring_paddr)) ||
(AGE_ADDR_HI(cmb_block_end) !=
AGE_ADDR_HI(sc->age_rdata.age_cmb_block_paddr)) ||
(AGE_ADDR_HI(smb_block_end) !=
AGE_ADDR_HI(sc->age_rdata.age_smb_block_paddr)))
return (EFBIG);
if ((AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rx_ring_end)) ||
(AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rr_ring_end)) ||
(AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(cmb_block_end)) ||
(AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(smb_block_end)))
return (EFBIG);
return (0);
}
static int
age_dma_alloc(struct age_softc *sc)
{
struct age_txdesc *txd;
struct age_rxdesc *rxd;
bus_addr_t lowaddr;
struct age_dmamap_arg ctx;
int error, i;
lowaddr = BUS_SPACE_MAXADDR;
again:
/* Create parent ring/DMA block tag. */
error = bus_dma_tag_create(
bus_get_dma_tag(sc->age_dev), /* parent */
1, 0, /* alignment, boundary */
lowaddr, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->age_cdata.age_parent_tag);
if (error != 0) {
device_printf(sc->age_dev,
"could not create parent DMA tag.\n");
goto fail;
}
/* Create tag for Tx ring. */
error = bus_dma_tag_create(
sc->age_cdata.age_parent_tag, /* parent */
AGE_TX_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
AGE_TX_RING_SZ, /* maxsize */
1, /* nsegments */