Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/cadence/if_cgem.c
Show First 20 Lines • Show All 80 Lines • ▼ Show 20 Lines | |||||
#include "miibus_if.h" | #include "miibus_if.h" | ||||
#define IF_CGEM_NAME "cgem" | #define IF_CGEM_NAME "cgem" | ||||
#define CGEM_NUM_RX_DESCS 512 /* size of receive descriptor ring */ | #define CGEM_NUM_RX_DESCS 512 /* size of receive descriptor ring */ | ||||
#define CGEM_NUM_TX_DESCS 512 /* size of transmit descriptor ring */ | #define CGEM_NUM_TX_DESCS 512 /* size of transmit descriptor ring */ | ||||
#define MAX_DESC_RING_SIZE (MAX(CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),\ | |||||
CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc))) | |||||
/* Default for sysctl rxbufs. Must be < CGEM_NUM_RX_DESCS of course. */ | /* Default for sysctl rxbufs. Must be < CGEM_NUM_RX_DESCS of course. */ | ||||
#define DEFAULT_NUM_RX_BUFS 256 /* number of receive bufs to queue. */ | #define DEFAULT_NUM_RX_BUFS 256 /* number of receive bufs to queue. */ | ||||
#define TX_MAX_DMA_SEGS 8 /* maximum segs in a tx mbuf dma */ | #define TX_MAX_DMA_SEGS 8 /* maximum segs in a tx mbuf dma */ | ||||
#define CGEM_CKSUM_ASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP | \ | #define CGEM_CKSUM_ASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP | \ | ||||
CSUM_TCP_IPV6 | CSUM_UDP_IPV6) | CSUM_TCP_IPV6 | CSUM_UDP_IPV6) | ||||
#define HWTYPE_GENERIC_GEM 1 | |||||
#define HWTYPE_ZYNQ 2 | |||||
#define HWTYPE_ZYNQMP 3 | |||||
#define HWTYPE_SIFIVE_FU540 4 | |||||
static struct ofw_compat_data compat_data[] = { | static struct ofw_compat_data compat_data[] = { | ||||
{ "cadence,gem", 1 }, | { "cdns,zynq-gem", HWTYPE_ZYNQ }, | ||||
{ "cdns,macb", 1 }, | { "cdns,zynqmp-gem", HWTYPE_ZYNQMP }, | ||||
{ "sifive,fu540-c000-gem", 1 }, | { "sifive,fu540-c000-gem", HWTYPE_SIFIVE_FU540 }, | ||||
{ NULL, 0 }, | { "cdns,gem", HWTYPE_GENERIC_GEM }, | ||||
{ "cadence,gem", HWTYPE_GENERIC_GEM }, | |||||
{ NULL, 0 } | |||||
}; | }; | ||||
struct cgem_softc { | struct cgem_softc { | ||||
if_t ifp; | if_t ifp; | ||||
struct mtx sc_mtx; | struct mtx sc_mtx; | ||||
device_t dev; | device_t dev; | ||||
device_t miibus; | device_t miibus; | ||||
u_int mii_media_active; /* last active media */ | u_int mii_media_active; /* last active media */ | ||||
int if_old_flags; | int if_old_flags; | ||||
struct resource *mem_res; | struct resource *mem_res; | ||||
struct resource *irq_res; | struct resource *irq_res; | ||||
void *intrhand; | void *intrhand; | ||||
struct callout tick_ch; | struct callout tick_ch; | ||||
uint32_t net_ctl_shadow; | uint32_t net_ctl_shadow; | ||||
uint32_t net_cfg_shadow; | |||||
int ref_clk_num; | int ref_clk_num; | ||||
u_char eaddr[6]; | int descwds; /* descriptor size in 32-bit words */ | ||||
int is64bit; | |||||
int neednullqs; | |||||
bus_dma_tag_t desc_dma_tag; | bus_dma_tag_t desc_dma_tag; | ||||
bus_dma_tag_t mbuf_dma_tag; | bus_dma_tag_t mbuf_dma_tag; | ||||
/* receive descriptor ring */ | /* receive descriptor ring */ | ||||
struct cgem_rx_desc *rxring; | uint32_t *rxring; | ||||
bus_addr_t rxring_physaddr; | bus_addr_t rxring_physaddr; | ||||
struct mbuf *rxring_m[CGEM_NUM_RX_DESCS]; | struct mbuf *rxring_m[CGEM_NUM_RX_DESCS]; | ||||
bus_dmamap_t rxring_m_dmamap[CGEM_NUM_RX_DESCS]; | bus_dmamap_t rxring_m_dmamap[CGEM_NUM_RX_DESCS]; | ||||
int rxring_hd_ptr; /* where to put rcv bufs */ | int rxring_hd_ptr; /* where to put rcv bufs */ | ||||
int rxring_tl_ptr; /* where to get receives */ | int rxring_tl_ptr; /* where to get receives */ | ||||
int rxring_queued; /* how many rcv bufs queued */ | int rxring_queued; /* how many rcv bufs queued */ | ||||
bus_dmamap_t rxring_dma_map; | bus_dmamap_t rxring_dma_map; | ||||
int rxbufs; /* tunable number rcv bufs */ | int rxbufs; /* tunable number rcv bufs */ | ||||
int rxhangwar; /* rx hang work-around */ | int rxhangwar; /* rx hang work-around */ | ||||
u_int rxoverruns; /* rx overruns */ | u_int rxoverruns; /* rx overruns */ | ||||
u_int rxnobufs; /* rx buf ring empty events */ | u_int rxnobufs; /* rx buf ring empty events */ | ||||
u_int rxdmamapfails; /* rx dmamap failures */ | u_int rxdmamapfails; /* rx dmamap failures */ | ||||
uint32_t rx_frames_prev; | uint32_t rx_frames_prev; | ||||
/* transmit descriptor ring */ | /* transmit descriptor ring */ | ||||
struct cgem_tx_desc *txring; | uint32_t *txring; | ||||
bus_addr_t txring_physaddr; | bus_addr_t txring_physaddr; | ||||
struct mbuf *txring_m[CGEM_NUM_TX_DESCS]; | struct mbuf *txring_m[CGEM_NUM_TX_DESCS]; | ||||
bus_dmamap_t txring_m_dmamap[CGEM_NUM_TX_DESCS]; | bus_dmamap_t txring_m_dmamap[CGEM_NUM_TX_DESCS]; | ||||
int txring_hd_ptr; /* where to put next xmits */ | int txring_hd_ptr; /* where to put next xmits */ | ||||
int txring_tl_ptr; /* next xmit mbuf to free */ | int txring_tl_ptr; /* next xmit mbuf to free */ | ||||
int txring_queued; /* num xmits segs queued */ | int txring_queued; /* num xmits segs queued */ | ||||
bus_dmamap_t txring_dma_map; | |||||
u_int txfull; /* tx ring full events */ | u_int txfull; /* tx ring full events */ | ||||
u_int txdefrags; /* tx calls to m_defrag() */ | u_int txdefrags; /* tx calls to m_defrag() */ | ||||
u_int txdefragfails; /* tx m_defrag() failures */ | u_int txdefragfails; /* tx m_defrag() failures */ | ||||
u_int txdmamapfails; /* tx dmamap failures */ | u_int txdmamapfails; /* tx dmamap failures */ | ||||
/* null descriptor rings */ | |||||
uint32_t *null_qs; | |||||
bus_addr_t null_qs_physaddr; | |||||
/* hardware provided statistics */ | /* hardware provided statistics */ | ||||
struct cgem_hw_stats { | struct cgem_hw_stats { | ||||
uint64_t tx_bytes; | uint64_t tx_bytes; | ||||
uint32_t tx_frames; | uint32_t tx_frames; | ||||
uint32_t tx_frames_bcast; | uint32_t tx_frames_bcast; | ||||
uint32_t tx_frames_multi; | uint32_t tx_frames_multi; | ||||
uint32_t tx_frames_pause; | uint32_t tx_frames_pause; | ||||
uint32_t tx_frames_64b; | uint32_t tx_frames_64b; | ||||
▲ Show 20 Lines • Show All 106 Lines • ▼ Show 20 Lines | cgem_get_mac(struct cgem_softc *sc, u_char eaddr[]) | ||||
for (i = 1; i < 4; i++) { | for (i = 1; i < 4; i++) { | ||||
WR4(sc, CGEM_SPEC_ADDR_LOW(i), 0); | WR4(sc, CGEM_SPEC_ADDR_LOW(i), 0); | ||||
WR4(sc, CGEM_SPEC_ADDR_HI(i), 0); | WR4(sc, CGEM_SPEC_ADDR_HI(i), 0); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* cgem_mac_hash(): map 48-bit address to a 6-bit hash. The 6-bit hash | * cgem_mac_hash(): map 48-bit address to a 6-bit hash. The 6-bit hash | ||||
* corresponds to a bit in a 64-bit hash register. Setting that bit in the hash | * corresponds to a bit in a 64-bit hash register. Setting that bit in the | ||||
* register enables reception of all frames with a destination address that | * hash register enables reception of all frames with a destination address | ||||
* hashes to that 6-bit value. | * that hashes to that 6-bit value. | ||||
* | * | ||||
* The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech | * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech | ||||
* Reference Manual. Bits 0-5 in the hash are the exclusive-or of | * Reference Manual. Bits 0-5 in the hash are the exclusive-or of | ||||
* every sixth bit in the destination address. | * every sixth bit in the destination address. | ||||
*/ | */ | ||||
static int | static int | ||||
cgem_mac_hash(u_char eaddr[]) | cgem_mac_hash(u_char eaddr[]) | ||||
{ | { | ||||
Show All 28 Lines | |||||
* After any change in rx flags or multi-cast addresses, set up hash registers | * After any change in rx flags or multi-cast addresses, set up hash registers | ||||
* and net config register bits. | * and net config register bits. | ||||
*/ | */ | ||||
static void | static void | ||||
cgem_rx_filter(struct cgem_softc *sc) | cgem_rx_filter(struct cgem_softc *sc) | ||||
{ | { | ||||
if_t ifp = sc->ifp; | if_t ifp = sc->ifp; | ||||
uint32_t hashes[2] = { 0, 0 }; | uint32_t hashes[2] = { 0, 0 }; | ||||
uint32_t net_cfg; | |||||
net_cfg = RD4(sc, CGEM_NET_CFG); | sc->net_cfg_shadow &= ~(CGEM_NET_CFG_MULTI_HASH_EN | | ||||
net_cfg &= ~(CGEM_NET_CFG_MULTI_HASH_EN | | |||||
CGEM_NET_CFG_NO_BCAST | CGEM_NET_CFG_COPY_ALL); | CGEM_NET_CFG_NO_BCAST | CGEM_NET_CFG_COPY_ALL); | ||||
if ((if_getflags(ifp) & IFF_PROMISC) != 0) | if ((if_getflags(ifp) & IFF_PROMISC) != 0) | ||||
net_cfg |= CGEM_NET_CFG_COPY_ALL; | sc->net_cfg_shadow |= CGEM_NET_CFG_COPY_ALL; | ||||
else { | else { | ||||
if ((if_getflags(ifp) & IFF_BROADCAST) == 0) | if ((if_getflags(ifp) & IFF_BROADCAST) == 0) | ||||
net_cfg |= CGEM_NET_CFG_NO_BCAST; | sc->net_cfg_shadow |= CGEM_NET_CFG_NO_BCAST; | ||||
if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) { | if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) { | ||||
hashes[0] = 0xffffffff; | hashes[0] = 0xffffffff; | ||||
hashes[1] = 0xffffffff; | hashes[1] = 0xffffffff; | ||||
} else | } else | ||||
if_foreach_llmaddr(ifp, cgem_hash_maddr, hashes); | if_foreach_llmaddr(ifp, cgem_hash_maddr, hashes); | ||||
if (hashes[0] != 0 || hashes[1] != 0) | if (hashes[0] != 0 || hashes[1] != 0) | ||||
net_cfg |= CGEM_NET_CFG_MULTI_HASH_EN; | sc->net_cfg_shadow |= CGEM_NET_CFG_MULTI_HASH_EN; | ||||
} | } | ||||
WR4(sc, CGEM_HASH_TOP, hashes[0]); | WR4(sc, CGEM_HASH_TOP, hashes[0]); | ||||
WR4(sc, CGEM_HASH_BOT, hashes[1]); | WR4(sc, CGEM_HASH_BOT, hashes[1]); | ||||
WR4(sc, CGEM_NET_CFG, net_cfg); | WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); | ||||
} | } | ||||
/* For bus_dmamap_load() callback. */ | /* For bus_dmamap_load() callback. */ | ||||
static void | static void | ||||
cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) | cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) | ||||
{ | { | ||||
if (nsegs != 1 || error != 0) | if (nsegs != 1 || error != 0) | ||||
return; | return; | ||||
*(bus_addr_t *)arg = segs[0].ds_addr; | *(bus_addr_t *)arg = segs[0].ds_addr; | ||||
} | } | ||||
/* Set up null queues for priority queues we actually can't disable. */ | |||||
static void | |||||
cgem_null_qs(struct cgem_softc *sc) | |||||
{ | |||||
uint32_t *rx_desc; | |||||
uint32_t *tx_desc; | |||||
uint32_t queue_mask; | |||||
int n; | |||||
/* Read design config register 6 to determine number of queues. */ | |||||
queue_mask = (RD4(sc, CGEM_DESIGN_CFG6) & | |||||
CGEM_DESIGN_CFG6_DMA_PRIO_Q_MASK) >> 1; | |||||
if (queue_mask == 0) | |||||
return; | |||||
/* Create empty RX queue and empty TX buf queues. */ | |||||
memset(sc->null_qs, 0, sc->descwds * sizeof(uint32_t)); | |||||
rx_desc = sc->null_qs; | |||||
rx_desc[0] = CGEM_RXDESC_OWN | CGEM_RXDESC_WRAP; | |||||
tx_desc = rx_desc + sc->descwds; | |||||
tx_desc[1] = CGEM_TXDESC_USED | CGEM_TXDESC_WRAP; | |||||
/* Point all valid ring base pointers to the null queues. */ | |||||
for (n = 1; (queue_mask & 1) != 0; n++, queue_mask >>= 1) { | |||||
WR4(sc, CGEM_RX_QN_BAR(n), sc->null_qs_physaddr); | |||||
WR4(sc, CGEM_TX_QN_BAR(n), sc->null_qs_physaddr + | |||||
sc->descwds * sizeof(uint32_t)); | |||||
} | |||||
} | |||||
/* Create DMA'able descriptor rings. */ | /* Create DMA'able descriptor rings. */ | ||||
static int | static int | ||||
cgem_setup_descs(struct cgem_softc *sc) | cgem_setup_descs(struct cgem_softc *sc) | ||||
{ | { | ||||
int i, err; | int i, err; | ||||
int desc_rings_size = (CGEM_NUM_RX_DESCS + CGEM_NUM_TX_DESCS) * | |||||
sc->descwds * sizeof(uint32_t); | |||||
if (sc->neednullqs) | |||||
desc_rings_size += 2 * sc->descwds * sizeof(uint32_t); | |||||
sc->txring = NULL; | sc->txring = NULL; | ||||
sc->rxring = NULL; | sc->rxring = NULL; | ||||
/* Allocate non-cached DMA space for RX and TX descriptors. */ | /* Allocate non-cached DMA space for RX and TX descriptors. */ | ||||
err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, | err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, | ||||
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, | #if INTPTR_MAX == INT64_MAX | ||||
MAX_DESC_RING_SIZE, 1, MAX_DESC_RING_SIZE, 0, | 1ULL << 32, /* Do not cross a 4G boundary. */ | ||||
busdma_lock_mutex, &sc->sc_mtx, &sc->desc_dma_tag); | #else | ||||
0, | |||||
#endif | |||||
philip: Isn't it clearer to simply always restrict it to `1ULL << 32`? | |||||
Done Inline ActionsThe compiler balks at this in 32-bit mode. skibo: The compiler balks at this in 32-bit mode. | |||||
sc->is64bit ? BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT, | |||||
BUS_SPACE_MAXADDR, NULL, NULL, desc_rings_size, 1, | |||||
desc_rings_size, 0, busdma_lock_mutex, &sc->sc_mtx, | |||||
&sc->desc_dma_tag); | |||||
if (err) | if (err) | ||||
return (err); | return (err); | ||||
/* Set up a bus_dma_tag for mbufs. */ | /* Set up a bus_dma_tag for mbufs. */ | ||||
err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, | err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, | ||||
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, | sc->is64bit ? BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT, | ||||
MCLBYTES, TX_MAX_DMA_SEGS, MCLBYTES, 0, | BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, TX_MAX_DMA_SEGS, | ||||
busdma_lock_mutex, &sc->sc_mtx, &sc->mbuf_dma_tag); | MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx, &sc->mbuf_dma_tag); | ||||
if (err) | if (err) | ||||
return (err); | return (err); | ||||
/* Allocate DMA memory in non-cacheable space. */ | /* | ||||
* Allocate DMA memory in non-cacheable space. We allocate transmit, | |||||
* receive and null descriptor queues all at once because the | |||||
* hardware only provides one register for the upper 32 bits of | |||||
* rx and tx descriptor queues hardware addresses. | |||||
*/ | |||||
err = bus_dmamem_alloc(sc->desc_dma_tag, (void **)&sc->rxring, | err = bus_dmamem_alloc(sc->desc_dma_tag, (void **)&sc->rxring, | ||||
BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->rxring_dma_map); | #ifdef __arm__ | ||||
BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, | |||||
#else | |||||
BUS_DMA_NOWAIT | BUS_DMA_NOCACHE | BUS_DMA_ZERO, | |||||
#endif | |||||
&sc->rxring_dma_map); | |||||
if (err) | if (err) | ||||
return (err); | return (err); | ||||
/* Load descriptor DMA memory. */ | /* Load descriptor DMA memory. */ | ||||
err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map, | err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map, | ||||
(void *)sc->rxring, CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc), | (void *)sc->rxring, desc_rings_size, | ||||
cgem_getaddr, &sc->rxring_physaddr, BUS_DMA_NOWAIT); | cgem_getaddr, &sc->rxring_physaddr, BUS_DMA_NOWAIT); | ||||
if (err) | if (err) | ||||
return (err); | return (err); | ||||
/* Initialize RX descriptors. */ | /* Initialize RX descriptors. */ | ||||
for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { | for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { | ||||
sc->rxring[i].addr = CGEM_RXDESC_OWN; | sc->rxring[i * sc->descwds] = CGEM_RXDESC_OWN; | ||||
sc->rxring[i].ctl = 0; | |||||
sc->rxring_m[i] = NULL; | sc->rxring_m[i] = NULL; | ||||
sc->rxring_m_dmamap[i] = NULL; | sc->rxring_m_dmamap[i] = NULL; | ||||
} | } | ||||
sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; | sc->rxring[sc->descwds * (CGEM_NUM_RX_DESCS - 1)] |= CGEM_RXDESC_WRAP; | ||||
sc->rxring_hd_ptr = 0; | sc->rxring_hd_ptr = 0; | ||||
sc->rxring_tl_ptr = 0; | sc->rxring_tl_ptr = 0; | ||||
sc->rxring_queued = 0; | sc->rxring_queued = 0; | ||||
/* Allocate DMA memory for TX descriptors in non-cacheable space. */ | sc->txring = sc->rxring + CGEM_NUM_RX_DESCS * sc->descwds; | ||||
err = bus_dmamem_alloc(sc->desc_dma_tag, (void **)&sc->txring, | sc->txring_physaddr = sc->rxring_physaddr + CGEM_NUM_RX_DESCS * | ||||
BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->txring_dma_map); | sc->descwds * sizeof(uint32_t); | ||||
if (err) | |||||
return (err); | |||||
/* Load TX descriptor DMA memory. */ | |||||
err = bus_dmamap_load(sc->desc_dma_tag, sc->txring_dma_map, | |||||
(void *)sc->txring, CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc), | |||||
cgem_getaddr, &sc->txring_physaddr, BUS_DMA_NOWAIT); | |||||
if (err) | |||||
return (err); | |||||
/* Initialize TX descriptor ring. */ | /* Initialize TX descriptor ring. */ | ||||
for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { | for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { | ||||
sc->txring[i].addr = 0; | sc->txring[i * sc->descwds + 1] = CGEM_TXDESC_USED; | ||||
sc->txring[i].ctl = CGEM_TXDESC_USED; | |||||
sc->txring_m[i] = NULL; | sc->txring_m[i] = NULL; | ||||
sc->txring_m_dmamap[i] = NULL; | sc->txring_m_dmamap[i] = NULL; | ||||
} | } | ||||
sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; | sc->txring[sc->descwds * (CGEM_NUM_TX_DESCS - 1) + 1] |= | ||||
CGEM_TXDESC_WRAP; | |||||
sc->txring_hd_ptr = 0; | sc->txring_hd_ptr = 0; | ||||
sc->txring_tl_ptr = 0; | sc->txring_tl_ptr = 0; | ||||
sc->txring_queued = 0; | sc->txring_queued = 0; | ||||
if (sc->neednullqs) { | |||||
sc->null_qs = sc->txring + CGEM_NUM_TX_DESCS * sc->descwds; | |||||
sc->null_qs_physaddr = sc->txring_physaddr + | |||||
CGEM_NUM_TX_DESCS * sc->descwds * sizeof(uint32_t); | |||||
cgem_null_qs(sc); | |||||
} | |||||
return (0); | return (0); | ||||
} | } | ||||
/* Fill receive descriptor ring with mbufs. */ | /* Fill receive descriptor ring with mbufs. */ | ||||
static void | static void | ||||
cgem_fill_rqueue(struct cgem_softc *sc) | cgem_fill_rqueue(struct cgem_softc *sc) | ||||
{ | { | ||||
struct mbuf *m = NULL; | struct mbuf *m = NULL; | ||||
Show All 32 Lines | while (sc->rxring_queued < sc->rxbufs) { | ||||
sc->rxring_m[sc->rxring_hd_ptr] = m; | sc->rxring_m[sc->rxring_hd_ptr] = m; | ||||
/* Sync cache with receive buffer. */ | /* Sync cache with receive buffer. */ | ||||
bus_dmamap_sync(sc->mbuf_dma_tag, | bus_dmamap_sync(sc->mbuf_dma_tag, | ||||
sc->rxring_m_dmamap[sc->rxring_hd_ptr], | sc->rxring_m_dmamap[sc->rxring_hd_ptr], | ||||
BUS_DMASYNC_PREREAD); | BUS_DMASYNC_PREREAD); | ||||
/* Write rx descriptor and increment head pointer. */ | /* Write rx descriptor and increment head pointer. */ | ||||
sc->rxring[sc->rxring_hd_ptr].ctl = 0; | sc->rxring[sc->rxring_hd_ptr * sc->descwds + 1] = 0; | ||||
#if INTPTR_MAX == INT64_MAX | |||||
if (sc->is64bit) | |||||
sc->rxring[sc->rxring_hd_ptr * sc->descwds + 2] = | |||||
segs[0].ds_addr >> 32; | |||||
#endif | |||||
if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) { | if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) { | ||||
sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr | | sc->rxring[sc->rxring_hd_ptr * sc->descwds] = | ||||
CGEM_RXDESC_WRAP; | segs[0].ds_addr | CGEM_RXDESC_WRAP; | ||||
sc->rxring_hd_ptr = 0; | sc->rxring_hd_ptr = 0; | ||||
} else | } else | ||||
sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr; | sc->rxring[sc->rxring_hd_ptr++ * sc->descwds] = | ||||
segs[0].ds_addr; | |||||
sc->rxring_queued++; | sc->rxring_queued++; | ||||
} | } | ||||
} | } | ||||
/* Pull received packets off of receive descriptor ring. */ | /* Pull received packets off of receive descriptor ring. */ | ||||
static void | static void | ||||
cgem_recv(struct cgem_softc *sc) | cgem_recv(struct cgem_softc *sc) | ||||
{ | { | ||||
if_t ifp = sc->ifp; | if_t ifp = sc->ifp; | ||||
struct mbuf *m, *m_hd, **m_tl; | struct mbuf *m, *m_hd, **m_tl; | ||||
uint32_t ctl; | uint32_t ctl; | ||||
CGEM_ASSERT_LOCKED(sc); | CGEM_ASSERT_LOCKED(sc); | ||||
/* Pick up all packets in which the OWN bit is set. */ | /* Pick up all packets in which the OWN bit is set. */ | ||||
m_hd = NULL; | m_hd = NULL; | ||||
m_tl = &m_hd; | m_tl = &m_hd; | ||||
while (sc->rxring_queued > 0 && | while (sc->rxring_queued > 0 && (sc->rxring[sc->rxring_tl_ptr * | ||||
(sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) { | sc->descwds] & CGEM_RXDESC_OWN) != 0) { | ||||
ctl = sc->rxring[sc->rxring_tl_ptr].ctl; | ctl = sc->rxring[sc->rxring_tl_ptr * sc->descwds + 1]; | ||||
/* Grab filled mbuf. */ | /* Grab filled mbuf. */ | ||||
m = sc->rxring_m[sc->rxring_tl_ptr]; | m = sc->rxring_m[sc->rxring_tl_ptr]; | ||||
sc->rxring_m[sc->rxring_tl_ptr] = NULL; | sc->rxring_m[sc->rxring_tl_ptr] = NULL; | ||||
/* Sync cache with receive buffer. */ | /* Sync cache with receive buffer. */ | ||||
bus_dmamap_sync(sc->mbuf_dma_tag, | bus_dmamap_sync(sc->mbuf_dma_tag, | ||||
sc->rxring_m_dmamap[sc->rxring_tl_ptr], | sc->rxring_m_dmamap[sc->rxring_tl_ptr], | ||||
▲ Show 20 Lines • Show All 80 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
struct mbuf *m; | struct mbuf *m; | ||||
uint32_t ctl; | uint32_t ctl; | ||||
CGEM_ASSERT_LOCKED(sc); | CGEM_ASSERT_LOCKED(sc); | ||||
/* free up finished transmits. */ | /* free up finished transmits. */ | ||||
while (sc->txring_queued > 0 && | while (sc->txring_queued > 0 && | ||||
((ctl = sc->txring[sc->txring_tl_ptr].ctl) & | ((ctl = sc->txring[sc->txring_tl_ptr * sc->descwds + 1]) & | ||||
CGEM_TXDESC_USED) != 0) { | CGEM_TXDESC_USED) != 0) { | ||||
/* Sync cache. */ | /* Sync cache. */ | ||||
bus_dmamap_sync(sc->mbuf_dma_tag, | bus_dmamap_sync(sc->mbuf_dma_tag, | ||||
sc->txring_m_dmamap[sc->txring_tl_ptr], | sc->txring_m_dmamap[sc->txring_tl_ptr], | ||||
BUS_DMASYNC_POSTWRITE); | BUS_DMASYNC_POSTWRITE); | ||||
/* Unload and destroy DMA map. */ | /* Unload and destroy DMA map. */ | ||||
bus_dmamap_unload(sc->mbuf_dma_tag, | bus_dmamap_unload(sc->mbuf_dma_tag, | ||||
sc->txring_m_dmamap[sc->txring_tl_ptr]); | sc->txring_m_dmamap[sc->txring_tl_ptr]); | ||||
bus_dmamap_destroy(sc->mbuf_dma_tag, | bus_dmamap_destroy(sc->mbuf_dma_tag, | ||||
sc->txring_m_dmamap[sc->txring_tl_ptr]); | sc->txring_m_dmamap[sc->txring_tl_ptr]); | ||||
sc->txring_m_dmamap[sc->txring_tl_ptr] = NULL; | sc->txring_m_dmamap[sc->txring_tl_ptr] = NULL; | ||||
/* Free up the mbuf. */ | /* Free up the mbuf. */ | ||||
m = sc->txring_m[sc->txring_tl_ptr]; | m = sc->txring_m[sc->txring_tl_ptr]; | ||||
sc->txring_m[sc->txring_tl_ptr] = NULL; | sc->txring_m[sc->txring_tl_ptr] = NULL; | ||||
m_freem(m); | m_freem(m); | ||||
/* Check the status. */ | /* Check the status. */ | ||||
if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) { | if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) { | ||||
/* Serious bus error. log to console. */ | /* Serious bus error. log to console. */ | ||||
#if INTPTR_MAX == INT64_MAX | |||||
device_printf(sc->dev, | device_printf(sc->dev, | ||||
"cgem_clean_tx: AHB error, addr=0x%x%08x\n", | |||||
sc->txring[sc->txring_tl_ptr * sc->descwds + 2], | |||||
sc->txring[sc->txring_tl_ptr * sc->descwds]); | |||||
#else | |||||
device_printf(sc->dev, | |||||
"cgem_clean_tx: AHB error, addr=0x%x\n", | "cgem_clean_tx: AHB error, addr=0x%x\n", | ||||
sc->txring[sc->txring_tl_ptr].addr); | sc->txring[sc->txring_tl_ptr * sc->descwds]); | ||||
#endif | |||||
} else if ((ctl & (CGEM_TXDESC_RETRY_ERR | | } else if ((ctl & (CGEM_TXDESC_RETRY_ERR | | ||||
CGEM_TXDESC_LATE_COLL)) != 0) { | CGEM_TXDESC_LATE_COLL)) != 0) { | ||||
if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); | if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); | ||||
} else | } else | ||||
if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); | if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); | ||||
/* | /* | ||||
* If the packet spanned more than one tx descriptor, skip | * If the packet spanned more than one tx descriptor, skip | ||||
* descriptors until we find the end so that only start-of-frame | * descriptors until we find the end so that only | ||||
* descriptors are processed. | * start-of-frame descriptors are processed. | ||||
*/ | */ | ||||
while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) { | while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) { | ||||
if ((ctl & CGEM_TXDESC_WRAP) != 0) | if ((ctl & CGEM_TXDESC_WRAP) != 0) | ||||
sc->txring_tl_ptr = 0; | sc->txring_tl_ptr = 0; | ||||
else | else | ||||
sc->txring_tl_ptr++; | sc->txring_tl_ptr++; | ||||
sc->txring_queued--; | sc->txring_queued--; | ||||
ctl = sc->txring[sc->txring_tl_ptr].ctl; | ctl = sc->txring[sc->txring_tl_ptr * sc->descwds + 1]; | ||||
sc->txring[sc->txring_tl_ptr].ctl = | sc->txring[sc->txring_tl_ptr * sc->descwds + 1] = | ||||
ctl | CGEM_TXDESC_USED; | ctl | CGEM_TXDESC_USED; | ||||
} | } | ||||
/* Next descriptor. */ | /* Next descriptor. */ | ||||
if ((ctl & CGEM_TXDESC_WRAP) != 0) | if ((ctl & CGEM_TXDESC_WRAP) != 0) | ||||
sc->txring_tl_ptr = 0; | sc->txring_tl_ptr = 0; | ||||
else | else | ||||
sc->txring_tl_ptr++; | sc->txring_tl_ptr++; | ||||
▲ Show 20 Lines • Show All 89 Lines • ▼ Show 20 Lines | wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >= | ||||
CGEM_NUM_TX_DESCS; | CGEM_NUM_TX_DESCS; | ||||
/* | /* | ||||
* Fill in the TX descriptors back to front so that USED bit in | * Fill in the TX descriptors back to front so that USED bit in | ||||
* first descriptor is cleared last. | * first descriptor is cleared last. | ||||
*/ | */ | ||||
for (i = nsegs - 1; i >= 0; i--) { | for (i = nsegs - 1; i >= 0; i--) { | ||||
/* Descriptor address. */ | /* Descriptor address. */ | ||||
sc->txring[sc->txring_hd_ptr + i].addr = | sc->txring[(sc->txring_hd_ptr + i) * sc->descwds] = | ||||
segs[i].ds_addr; | segs[i].ds_addr; | ||||
#if INTPTR_MAX == INT64_MAX | |||||
if (sc->is64bit) | |||||
sc->txring[(sc->txring_hd_ptr + i) * | |||||
sc->descwds + 2] = segs[i].ds_addr >> 32; | |||||
#endif | |||||
/* Descriptor control word. */ | /* Descriptor control word. */ | ||||
ctl = segs[i].ds_len; | ctl = segs[i].ds_len; | ||||
if (i == nsegs - 1) { | if (i == nsegs - 1) { | ||||
ctl |= CGEM_TXDESC_LAST_BUF; | ctl |= CGEM_TXDESC_LAST_BUF; | ||||
if (wrap) | if (wrap) | ||||
ctl |= CGEM_TXDESC_WRAP; | ctl |= CGEM_TXDESC_WRAP; | ||||
} | } | ||||
sc->txring[sc->txring_hd_ptr + i].ctl = ctl; | sc->txring[(sc->txring_hd_ptr + i) * sc->descwds + 1] = | ||||
ctl; | |||||
if (i != 0) | if (i != 0) | ||||
sc->txring_m[sc->txring_hd_ptr + i] = NULL; | sc->txring_m[sc->txring_hd_ptr + i] = NULL; | ||||
} | } | ||||
if (wrap) | if (wrap) | ||||
sc->txring_hd_ptr = 0; | sc->txring_hd_ptr = 0; | ||||
else | else | ||||
▲ Show 20 Lines • Show All 177 Lines • ▼ Show 20 Lines | |||||
/* Reset hardware. */ | /* Reset hardware. */ | ||||
static void | static void | ||||
cgem_reset(struct cgem_softc *sc) | cgem_reset(struct cgem_softc *sc) | ||||
{ | { | ||||
CGEM_ASSERT_LOCKED(sc); | CGEM_ASSERT_LOCKED(sc); | ||||
/* Determine data bus width from design configuration register. */ | |||||
switch (RD4(sc, CGEM_DESIGN_CFG1) & | |||||
CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_MASK) { | |||||
case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_64: | |||||
sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_64; | |||||
break; | |||||
case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_128: | |||||
sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_128; | |||||
break; | |||||
default: | |||||
sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_32; | |||||
} | |||||
WR4(sc, CGEM_NET_CTRL, 0); | WR4(sc, CGEM_NET_CTRL, 0); | ||||
WR4(sc, CGEM_NET_CFG, 0); | WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); | ||||
WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS); | WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS); | ||||
WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL); | WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL); | ||||
WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL); | WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL); | ||||
WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL); | WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL); | ||||
WR4(sc, CGEM_HASH_BOT, 0); | WR4(sc, CGEM_HASH_BOT, 0); | ||||
WR4(sc, CGEM_HASH_TOP, 0); | WR4(sc, CGEM_HASH_TOP, 0); | ||||
WR4(sc, CGEM_TX_QBAR, 0); /* manual says do this. */ | WR4(sc, CGEM_TX_QBAR, 0); /* manual says do this. */ | ||||
WR4(sc, CGEM_RX_QBAR, 0); | WR4(sc, CGEM_RX_QBAR, 0); | ||||
/* Get management port running even if interface is down. */ | /* Get management port running even if interface is down. */ | ||||
WR4(sc, CGEM_NET_CFG, CGEM_NET_CFG_DBUS_WIDTH_32 | | sc->net_cfg_shadow |= CGEM_NET_CFG_MDC_CLK_DIV_48; | ||||
CGEM_NET_CFG_MDC_CLK_DIV_64); | WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); | ||||
sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN; | sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN; | ||||
WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); | WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); | ||||
} | } | ||||
/* Bring up the hardware. */ | /* Bring up the hardware. */ | ||||
static void | static void | ||||
cgem_config(struct cgem_softc *sc) | cgem_config(struct cgem_softc *sc) | ||||
{ | { | ||||
if_t ifp = sc->ifp; | if_t ifp = sc->ifp; | ||||
uint32_t net_cfg; | |||||
uint32_t dma_cfg; | uint32_t dma_cfg; | ||||
u_char *eaddr = if_getlladdr(ifp); | u_char *eaddr = if_getlladdr(ifp); | ||||
CGEM_ASSERT_LOCKED(sc); | CGEM_ASSERT_LOCKED(sc); | ||||
/* Program Net Config Register. */ | /* Program Net Config Register. */ | ||||
net_cfg = CGEM_NET_CFG_DBUS_WIDTH_32 | | sc->net_cfg_shadow &= (CGEM_NET_CFG_MDC_CLK_DIV_MASK | | ||||
CGEM_NET_CFG_MDC_CLK_DIV_64 | | CGEM_NET_CFG_DBUS_WIDTH_MASK); | ||||
CGEM_NET_CFG_FCS_REMOVE | | sc->net_cfg_shadow |= (CGEM_NET_CFG_FCS_REMOVE | | ||||
CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) | | CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) | | ||||
CGEM_NET_CFG_GIGE_EN | | CGEM_NET_CFG_GIGE_EN | CGEM_NET_CFG_1536RXEN | | ||||
CGEM_NET_CFG_1536RXEN | | CGEM_NET_CFG_FULL_DUPLEX | CGEM_NET_CFG_SPEED100); | ||||
CGEM_NET_CFG_FULL_DUPLEX | | |||||
CGEM_NET_CFG_SPEED100; | |||||
/* Enable receive checksum offloading? */ | /* Enable receive checksum offloading? */ | ||||
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) | if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) | ||||
net_cfg |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; | sc->net_cfg_shadow |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; | ||||
WR4(sc, CGEM_NET_CFG, net_cfg); | WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); | ||||
/* Program DMA Config Register. */ | /* Program DMA Config Register. */ | ||||
dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) | | dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) | | ||||
CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K | | CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K | | ||||
CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL | | CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL | | ||||
CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 | | CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 | | ||||
(sc->is64bit ? CGEM_DMA_CFG_ADDR_BUS_64 : 0) | | |||||
CGEM_DMA_CFG_DISC_WHEN_NO_AHB; | CGEM_DMA_CFG_DISC_WHEN_NO_AHB; | ||||
/* Enable transmit checksum offloading? */ | /* Enable transmit checksum offloading? */ | ||||
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) | if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) | ||||
dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN; | dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN; | ||||
WR4(sc, CGEM_DMA_CFG, dma_cfg); | WR4(sc, CGEM_DMA_CFG, dma_cfg); | ||||
/* Write the rx and tx descriptor ring addresses to the QBAR regs. */ | /* Write the rx and tx descriptor ring addresses to the QBAR regs. */ | ||||
WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr); | WR4(sc, CGEM_RX_QBAR, (uint32_t)sc->rxring_physaddr); | ||||
WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr); | WR4(sc, CGEM_TX_QBAR, (uint32_t)sc->txring_physaddr); | ||||
#if INTPTR_MAX == INT64_MAX | |||||
if (sc->is64bit) { | |||||
WR4(sc, CGEM_RX_QBAR_HI, | |||||
(uint32_t)(sc->rxring_physaddr >> 32)); | |||||
WR4(sc, CGEM_TX_QBAR_HI, | |||||
(uint32_t)(sc->txring_physaddr >> 32)); | |||||
} | |||||
#endif | |||||
/* Enable rx and tx. */ | /* Enable rx and tx. */ | ||||
sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN); | sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN); | ||||
WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); | WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); | ||||
/* Set receive address in case it changed. */ | /* Set receive address in case it changed. */ | ||||
WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) | | WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) | | ||||
(eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]); | (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]); | ||||
Show All 16 Lines | cgem_init_locked(struct cgem_softc *sc) | ||||
if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0) | if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0) | ||||
return; | return; | ||||
cgem_config(sc); | cgem_config(sc); | ||||
cgem_fill_rqueue(sc); | cgem_fill_rqueue(sc); | ||||
if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); | if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); | ||||
if (sc->miibus != NULL) { | |||||
mii = device_get_softc(sc->miibus); | mii = device_get_softc(sc->miibus); | ||||
mii_mediachg(mii); | mii_mediachg(mii); | ||||
} | |||||
callout_reset(&sc->tick_ch, hz, cgem_tick, sc); | callout_reset(&sc->tick_ch, hz, cgem_tick, sc); | ||||
} | } | ||||
static void | static void | ||||
cgem_init(void *arg) | cgem_init(void *arg) | ||||
{ | { | ||||
struct cgem_softc *sc = (struct cgem_softc *)arg; | struct cgem_softc *sc = (struct cgem_softc *)arg; | ||||
Show All 12 Lines | cgem_stop(struct cgem_softc *sc) | ||||
CGEM_ASSERT_LOCKED(sc); | CGEM_ASSERT_LOCKED(sc); | ||||
callout_stop(&sc->tick_ch); | callout_stop(&sc->tick_ch); | ||||
/* Shut down hardware. */ | /* Shut down hardware. */ | ||||
cgem_reset(sc); | cgem_reset(sc); | ||||
/* Clear out transmit queue. */ | /* Clear out transmit queue. */ | ||||
memset(sc->txring, 0, CGEM_NUM_TX_DESCS * sc->descwds * | |||||
sizeof(uint32_t)); | |||||
for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { | for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { | ||||
sc->txring[i].ctl = CGEM_TXDESC_USED; | sc->txring[i * sc->descwds + 1] = CGEM_TXDESC_USED; | ||||
sc->txring[i].addr = 0; | |||||
if (sc->txring_m[i]) { | if (sc->txring_m[i]) { | ||||
/* Unload and destroy dmamap. */ | /* Unload and destroy dmamap. */ | ||||
bus_dmamap_unload(sc->mbuf_dma_tag, | bus_dmamap_unload(sc->mbuf_dma_tag, | ||||
sc->txring_m_dmamap[i]); | sc->txring_m_dmamap[i]); | ||||
bus_dmamap_destroy(sc->mbuf_dma_tag, | bus_dmamap_destroy(sc->mbuf_dma_tag, | ||||
sc->txring_m_dmamap[i]); | sc->txring_m_dmamap[i]); | ||||
sc->txring_m_dmamap[i] = NULL; | sc->txring_m_dmamap[i] = NULL; | ||||
m_freem(sc->txring_m[i]); | m_freem(sc->txring_m[i]); | ||||
sc->txring_m[i] = NULL; | sc->txring_m[i] = NULL; | ||||
} | } | ||||
} | } | ||||
sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; | sc->txring[(CGEM_NUM_TX_DESCS - 1) * sc->descwds + 1] |= | ||||
CGEM_TXDESC_WRAP; | |||||
sc->txring_hd_ptr = 0; | sc->txring_hd_ptr = 0; | ||||
sc->txring_tl_ptr = 0; | sc->txring_tl_ptr = 0; | ||||
sc->txring_queued = 0; | sc->txring_queued = 0; | ||||
/* Clear out receive queue. */ | /* Clear out receive queue. */ | ||||
memset(sc->rxring, 0, CGEM_NUM_RX_DESCS * sc->descwds * | |||||
sizeof(uint32_t)); | |||||
for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { | for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { | ||||
sc->rxring[i].addr = CGEM_RXDESC_OWN; | sc->rxring[i * sc->descwds] = CGEM_RXDESC_OWN; | ||||
sc->rxring[i].ctl = 0; | |||||
if (sc->rxring_m[i]) { | if (sc->rxring_m[i]) { | ||||
/* Unload and destroy dmamap. */ | /* Unload and destroy dmamap. */ | ||||
bus_dmamap_unload(sc->mbuf_dma_tag, | bus_dmamap_unload(sc->mbuf_dma_tag, | ||||
sc->rxring_m_dmamap[i]); | sc->rxring_m_dmamap[i]); | ||||
bus_dmamap_destroy(sc->mbuf_dma_tag, | bus_dmamap_destroy(sc->mbuf_dma_tag, | ||||
sc->rxring_m_dmamap[i]); | sc->rxring_m_dmamap[i]); | ||||
sc->rxring_m_dmamap[i] = NULL; | sc->rxring_m_dmamap[i] = NULL; | ||||
m_freem(sc->rxring_m[i]); | m_freem(sc->rxring_m[i]); | ||||
sc->rxring_m[i] = NULL; | sc->rxring_m[i] = NULL; | ||||
} | } | ||||
} | } | ||||
sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; | sc->rxring[(CGEM_NUM_RX_DESCS - 1) * sc->descwds] |= CGEM_RXDESC_WRAP; | ||||
sc->rxring_hd_ptr = 0; | sc->rxring_hd_ptr = 0; | ||||
sc->rxring_tl_ptr = 0; | sc->rxring_tl_ptr = 0; | ||||
sc->rxring_queued = 0; | sc->rxring_queued = 0; | ||||
/* Force next statchg or linkchg to program net config register. */ | /* Force next statchg or linkchg to program net config register. */ | ||||
sc->mii_media_active = 0; | sc->mii_media_active = 0; | ||||
} | } | ||||
Show All 34 Lines | if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { | ||||
CGEM_LOCK(sc); | CGEM_LOCK(sc); | ||||
cgem_rx_filter(sc); | cgem_rx_filter(sc); | ||||
CGEM_UNLOCK(sc); | CGEM_UNLOCK(sc); | ||||
} | } | ||||
break; | break; | ||||
case SIOCSIFMEDIA: | case SIOCSIFMEDIA: | ||||
case SIOCGIFMEDIA: | case SIOCGIFMEDIA: | ||||
if (sc->miibus == NULL) | |||||
return (ENXIO); | |||||
mii = device_get_softc(sc->miibus); | mii = device_get_softc(sc->miibus); | ||||
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); | error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); | ||||
break; | break; | ||||
case SIOCSIFCAP: | case SIOCSIFCAP: | ||||
CGEM_LOCK(sc); | CGEM_LOCK(sc); | ||||
mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap; | mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap; | ||||
Show All 18 Lines | if ((mask & IFCAP_TXCSUM) != 0) { | ||||
~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); | ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); | ||||
} | } | ||||
} | } | ||||
if ((mask & IFCAP_RXCSUM) != 0) { | if ((mask & IFCAP_RXCSUM) != 0) { | ||||
if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) { | if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) { | ||||
/* Turn on RX checksumming. */ | /* Turn on RX checksumming. */ | ||||
if_setcapenablebit(ifp, IFCAP_RXCSUM | | if_setcapenablebit(ifp, IFCAP_RXCSUM | | ||||
IFCAP_RXCSUM_IPV6, 0); | IFCAP_RXCSUM_IPV6, 0); | ||||
WR4(sc, CGEM_NET_CFG, | sc->net_cfg_shadow |= | ||||
RD4(sc, CGEM_NET_CFG) | | CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; | ||||
CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN); | WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); | ||||
} else { | } else { | ||||
/* Turn off RX checksumming. */ | /* Turn off RX checksumming. */ | ||||
if_setcapenablebit(ifp, 0, IFCAP_RXCSUM | | if_setcapenablebit(ifp, 0, IFCAP_RXCSUM | | ||||
IFCAP_RXCSUM_IPV6); | IFCAP_RXCSUM_IPV6); | ||||
WR4(sc, CGEM_NET_CFG, | sc->net_cfg_shadow &= | ||||
RD4(sc, CGEM_NET_CFG) & | ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; | ||||
~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN); | WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); | ||||
} | } | ||||
} | } | ||||
if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_TXCSUM)) == | if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_TXCSUM)) == | ||||
(IFCAP_RXCSUM | IFCAP_TXCSUM)) | (IFCAP_RXCSUM | IFCAP_TXCSUM)) | ||||
if_setcapenablebit(ifp, IFCAP_VLAN_HWCSUM, 0); | if_setcapenablebit(ifp, IFCAP_VLAN_HWCSUM, 0); | ||||
else | else | ||||
if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWCSUM); | if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWCSUM); | ||||
CGEM_UNLOCK(sc); | CGEM_UNLOCK(sc); | ||||
break; | break; | ||||
default: | default: | ||||
error = ether_ioctl(ifp, cmd, data); | error = ether_ioctl(ifp, cmd, data); | ||||
break; | break; | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
/* MII bus support routines. | /* MII bus support routines. | ||||
*/ | */ | ||||
static void | |||||
cgem_child_detached(device_t dev, device_t child) | |||||
{ | |||||
struct cgem_softc *sc = device_get_softc(dev); | |||||
if (child == sc->miibus) | |||||
sc->miibus = NULL; | |||||
} | |||||
static int | static int | ||||
cgem_ifmedia_upd(if_t ifp) | cgem_ifmedia_upd(if_t ifp) | ||||
{ | { | ||||
struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); | struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); | ||||
struct mii_data *mii; | struct mii_data *mii; | ||||
struct mii_softc *miisc; | struct mii_softc *miisc; | ||||
int error = 0; | int error = 0; | ||||
▲ Show 20 Lines • Show All 120 Lines • ▼ Show 20 Lines | cgem_default_set_ref_clk(int unit, int frequency) | ||||
return 0; | return 0; | ||||
} | } | ||||
__weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk); | __weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk); | ||||
/* Call to set reference clock and network config bits according to media. */ | /* Call to set reference clock and network config bits according to media. */ | ||||
static void | static void | ||||
cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii) | cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii) | ||||
{ | { | ||||
uint32_t net_cfg; | |||||
int ref_clk_freq; | int ref_clk_freq; | ||||
CGEM_ASSERT_LOCKED(sc); | CGEM_ASSERT_LOCKED(sc); | ||||
/* Update hardware to reflect media. */ | /* Update hardware to reflect media. */ | ||||
net_cfg = RD4(sc, CGEM_NET_CFG); | sc->net_cfg_shadow &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN | | ||||
net_cfg &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN | | |||||
CGEM_NET_CFG_FULL_DUPLEX); | CGEM_NET_CFG_FULL_DUPLEX); | ||||
switch (IFM_SUBTYPE(mii->mii_media_active)) { | switch (IFM_SUBTYPE(mii->mii_media_active)) { | ||||
case IFM_1000_T: | case IFM_1000_T: | ||||
net_cfg |= (CGEM_NET_CFG_SPEED100 | | sc->net_cfg_shadow |= (CGEM_NET_CFG_SPEED100 | | ||||
CGEM_NET_CFG_GIGE_EN); | CGEM_NET_CFG_GIGE_EN); | ||||
ref_clk_freq = 125000000; | ref_clk_freq = 125000000; | ||||
break; | break; | ||||
case IFM_100_TX: | case IFM_100_TX: | ||||
net_cfg |= CGEM_NET_CFG_SPEED100; | sc->net_cfg_shadow |= CGEM_NET_CFG_SPEED100; | ||||
ref_clk_freq = 25000000; | ref_clk_freq = 25000000; | ||||
break; | break; | ||||
default: | default: | ||||
ref_clk_freq = 2500000; | ref_clk_freq = 2500000; | ||||
} | } | ||||
if ((mii->mii_media_active & IFM_FDX) != 0) | if ((mii->mii_media_active & IFM_FDX) != 0) | ||||
net_cfg |= CGEM_NET_CFG_FULL_DUPLEX; | sc->net_cfg_shadow |= CGEM_NET_CFG_FULL_DUPLEX; | ||||
WR4(sc, CGEM_NET_CFG, net_cfg); | WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); | ||||
/* Set the reference clock if necessary. */ | /* Set the reference clock if necessary. */ | ||||
if (cgem_set_ref_clk(sc->ref_clk_num, ref_clk_freq)) | if (cgem_set_ref_clk(sc->ref_clk_num, ref_clk_freq)) | ||||
device_printf(sc->dev, | device_printf(sc->dev, | ||||
"cgem_mediachange: could not set ref clk%d to %d.\n", | "cgem_mediachange: could not set ref clk%d to %d.\n", | ||||
sc->ref_clk_num, ref_clk_freq); | sc->ref_clk_num, ref_clk_freq); | ||||
sc->mii_media_active = mii->mii_media_active; | sc->mii_media_active = mii->mii_media_active; | ||||
▲ Show 20 Lines • Show All 225 Lines • ▼ Show 20 Lines | cgem_attach(device_t dev) | ||||
phandle_t node; | phandle_t node; | ||||
pcell_t cell; | pcell_t cell; | ||||
int rid, err; | int rid, err; | ||||
u_char eaddr[ETHER_ADDR_LEN]; | u_char eaddr[ETHER_ADDR_LEN]; | ||||
sc->dev = dev; | sc->dev = dev; | ||||
CGEM_LOCK_INIT(sc); | CGEM_LOCK_INIT(sc); | ||||
/* Key off of compatible string and set hardware-specific options. */ | |||||
switch(ofw_bus_search_compatible(dev, compat_data)->ocd_data) { | |||||
case HWTYPE_ZYNQMP: | |||||
sc->is64bit = 1; | |||||
sc->neednullqs = 1; | |||||
break; | |||||
case HWTYPE_SIFIVE_FU540: | |||||
sc->is64bit = 1; | |||||
break; | |||||
default: | |||||
/* Implement receive hang bug work-around. */ | |||||
sc->rxhangwar = 1; | |||||
break; | |||||
} | |||||
sc->descwds = sc->is64bit ? 4 : 2; | |||||
/* Get reference clock number and base divider from fdt. */ | /* Get reference clock number and base divider from fdt. */ | ||||
node = ofw_bus_get_node(dev); | node = ofw_bus_get_node(dev); | ||||
sc->ref_clk_num = 0; | sc->ref_clk_num = 0; | ||||
if (OF_getprop(node, "ref-clock-num", &cell, sizeof(cell)) > 0) | if (OF_getprop(node, "ref-clock-num", &cell, sizeof(cell)) > 0) | ||||
sc->ref_clk_num = fdt32_to_cpu(cell); | sc->ref_clk_num = fdt32_to_cpu(cell); | ||||
/* Get memory resource. */ | /* Get memory resource. */ | ||||
rid = 0; | rid = 0; | ||||
sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, | sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, | ||||
RF_ACTIVE); | RF_ACTIVE); | ||||
if (sc->mem_res == NULL) { | if (sc->mem_res == NULL) { | ||||
device_printf(dev, "could not allocate memory resources.\n"); | device_printf(dev, "could not allocate memory resources.\n"); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
/* Get IRQ resource. */ | /* Get IRQ resource. */ | ||||
rid = 0; | rid = 0; | ||||
sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); | sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, | ||||
RF_ACTIVE); | |||||
if (sc->irq_res == NULL) { | if (sc->irq_res == NULL) { | ||||
device_printf(dev, "could not allocate interrupt resource.\n"); | device_printf(dev, "could not allocate interrupt resource.\n"); | ||||
cgem_detach(dev); | cgem_detach(dev); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
/* Set up ifnet structure. */ | /* Set up ifnet structure. */ | ||||
ifp = sc->ifp = if_alloc(IFT_ETHER); | ifp = sc->ifp = if_alloc(IFT_ETHER); | ||||
Show All 15 Lines | cgem_attach(device_t dev) | ||||
/* Disable hardware checksumming by default. */ | /* Disable hardware checksumming by default. */ | ||||
if_sethwassist(ifp, 0); | if_sethwassist(ifp, 0); | ||||
if_setcapenable(ifp, if_getcapabilities(ifp) & | if_setcapenable(ifp, if_getcapabilities(ifp) & | ||||
~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM)); | ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM)); | ||||
sc->if_old_flags = if_getflags(ifp); | sc->if_old_flags = if_getflags(ifp); | ||||
sc->rxbufs = DEFAULT_NUM_RX_BUFS; | sc->rxbufs = DEFAULT_NUM_RX_BUFS; | ||||
sc->rxhangwar = 1; | |||||
/* Reset hardware. */ | /* Reset hardware. */ | ||||
CGEM_LOCK(sc); | CGEM_LOCK(sc); | ||||
cgem_reset(sc); | cgem_reset(sc); | ||||
CGEM_UNLOCK(sc); | CGEM_UNLOCK(sc); | ||||
/* Attach phy to mii bus. */ | /* Attach phy to mii bus. */ | ||||
err = mii_attach(dev, &sc->miibus, ifp, | err = mii_attach(dev, &sc->miibus, ifp, | ||||
cgem_ifmedia_upd, cgem_ifmedia_sts, BMSR_DEFCAPMASK, | cgem_ifmedia_upd, cgem_ifmedia_sts, BMSR_DEFCAPMASK, | ||||
MII_PHY_ANY, MII_OFFSET_ANY, 0); | MII_PHY_ANY, MII_OFFSET_ANY, 0); | ||||
if (err) { | if (err) | ||||
device_printf(dev, "attaching PHYs failed\n"); | device_printf(dev, "warning: attaching PHYs failed\n"); | ||||
cgem_detach(dev); | |||||
return (err); | |||||
} | |||||
/* Set up TX and RX descriptor area. */ | /* Set up TX and RX descriptor area. */ | ||||
err = cgem_setup_descs(sc); | err = cgem_setup_descs(sc); | ||||
if (err) { | if (err) { | ||||
device_printf(dev, "could not set up dma mem for descs.\n"); | device_printf(dev, "could not set up dma mem for descs.\n"); | ||||
cgem_detach(dev); | cgem_detach(dev); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 58 Lines • ▼ Show 20 Lines | cgem_detach(device_t dev) | ||||
} | } | ||||
/* Release DMA resources. */ | /* Release DMA resources. */ | ||||
if (sc->rxring != NULL) { | if (sc->rxring != NULL) { | ||||
if (sc->rxring_physaddr != 0) { | if (sc->rxring_physaddr != 0) { | ||||
bus_dmamap_unload(sc->desc_dma_tag, | bus_dmamap_unload(sc->desc_dma_tag, | ||||
sc->rxring_dma_map); | sc->rxring_dma_map); | ||||
sc->rxring_physaddr = 0; | sc->rxring_physaddr = 0; | ||||
sc->txring_physaddr = 0; | |||||
sc->null_qs_physaddr = 0; | |||||
} | } | ||||
bus_dmamem_free(sc->desc_dma_tag, sc->rxring, | bus_dmamem_free(sc->desc_dma_tag, sc->rxring, | ||||
sc->rxring_dma_map); | sc->rxring_dma_map); | ||||
sc->rxring = NULL; | sc->rxring = NULL; | ||||
sc->txring = NULL; | |||||
sc->null_qs = NULL; | |||||
for (i = 0; i < CGEM_NUM_RX_DESCS; i++) | for (i = 0; i < CGEM_NUM_RX_DESCS; i++) | ||||
if (sc->rxring_m_dmamap[i] != NULL) { | if (sc->rxring_m_dmamap[i] != NULL) { | ||||
bus_dmamap_destroy(sc->mbuf_dma_tag, | bus_dmamap_destroy(sc->mbuf_dma_tag, | ||||
sc->rxring_m_dmamap[i]); | sc->rxring_m_dmamap[i]); | ||||
sc->rxring_m_dmamap[i] = NULL; | sc->rxring_m_dmamap[i] = NULL; | ||||
} | } | ||||
} | |||||
if (sc->txring != NULL) { | |||||
if (sc->txring_physaddr != 0) { | |||||
bus_dmamap_unload(sc->desc_dma_tag, | |||||
sc->txring_dma_map); | |||||
sc->txring_physaddr = 0; | |||||
} | |||||
bus_dmamem_free(sc->desc_dma_tag, sc->txring, | |||||
sc->txring_dma_map); | |||||
sc->txring = NULL; | |||||
for (i = 0; i < CGEM_NUM_TX_DESCS; i++) | for (i = 0; i < CGEM_NUM_TX_DESCS; i++) | ||||
if (sc->txring_m_dmamap[i] != NULL) { | if (sc->txring_m_dmamap[i] != NULL) { | ||||
bus_dmamap_destroy(sc->mbuf_dma_tag, | bus_dmamap_destroy(sc->mbuf_dma_tag, | ||||
sc->txring_m_dmamap[i]); | sc->txring_m_dmamap[i]); | ||||
sc->txring_m_dmamap[i] = NULL; | sc->txring_m_dmamap[i] = NULL; | ||||
} | } | ||||
} | } | ||||
if (sc->desc_dma_tag != NULL) { | if (sc->desc_dma_tag != NULL) { | ||||
Show All 13 Lines | |||||
} | } | ||||
static device_method_t cgem_methods[] = { | static device_method_t cgem_methods[] = { | ||||
/* Device interface */ | /* Device interface */ | ||||
DEVMETHOD(device_probe, cgem_probe), | DEVMETHOD(device_probe, cgem_probe), | ||||
DEVMETHOD(device_attach, cgem_attach), | DEVMETHOD(device_attach, cgem_attach), | ||||
DEVMETHOD(device_detach, cgem_detach), | DEVMETHOD(device_detach, cgem_detach), | ||||
/* Bus interface */ | |||||
DEVMETHOD(bus_child_detached, cgem_child_detached), | |||||
/* MII interface */ | /* MII interface */ | ||||
DEVMETHOD(miibus_readreg, cgem_miibus_readreg), | DEVMETHOD(miibus_readreg, cgem_miibus_readreg), | ||||
DEVMETHOD(miibus_writereg, cgem_miibus_writereg), | DEVMETHOD(miibus_writereg, cgem_miibus_writereg), | ||||
DEVMETHOD(miibus_statchg, cgem_miibus_statchg), | DEVMETHOD(miibus_statchg, cgem_miibus_statchg), | ||||
DEVMETHOD(miibus_linkchg, cgem_miibus_linkchg), | DEVMETHOD(miibus_linkchg, cgem_miibus_linkchg), | ||||
DEVMETHOD_END | DEVMETHOD_END | ||||
}; | }; | ||||
static driver_t cgem_driver = { | static driver_t cgem_driver = { | ||||
"cgem", | "cgem", | ||||
cgem_methods, | cgem_methods, | ||||
sizeof(struct cgem_softc), | sizeof(struct cgem_softc), | ||||
}; | }; | ||||
DRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL); | DRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL); | ||||
DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL); | DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL); | ||||
MODULE_DEPEND(cgem, miibus, 1, 1, 1); | MODULE_DEPEND(cgem, miibus, 1, 1, 1); | ||||
MODULE_DEPEND(cgem, ether, 1, 1, 1); | MODULE_DEPEND(cgem, ether, 1, 1, 1); | ||||
SIMPLEBUS_PNP_INFO(compat_data); |
Isn't it clearer to simply always restrict it to 1ULL << 32?