Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F142789419
D24226.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
59 KB
Referenced Files
None
Subscribers
None
D24226.diff
View Options
Index: head/sys/dev/cadence/if_cgem.c
===================================================================
--- head/sys/dev/cadence/if_cgem.c
+++ head/sys/dev/cadence/if_cgem.c
@@ -112,8 +112,8 @@
device_t miibus;
u_int mii_media_active; /* last active media */
int if_old_flags;
- struct resource *mem_res;
- struct resource *irq_res;
+ struct resource *mem_res;
+ struct resource *irq_res;
void *intrhand;
struct callout tick_ch;
uint32_t net_ctl_shadow;
@@ -131,7 +131,7 @@
int rxring_hd_ptr; /* where to put rcv bufs */
int rxring_tl_ptr; /* where to get receives */
int rxring_queued; /* how many rcv bufs queued */
- bus_dmamap_t rxring_dma_map;
+ bus_dmamap_t rxring_dma_map;
int rxbufs; /* tunable number rcv bufs */
int rxhangwar; /* rx hang work-around */
u_int rxoverruns; /* rx overruns */
@@ -200,16 +200,15 @@
} stats;
};
-#define RD4(sc, off) (bus_read_4((sc)->mem_res, (off)))
-#define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val)))
+#define RD4(sc, off) (bus_read_4((sc)->mem_res, (off)))
+#define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val)))
#define BARRIER(sc, off, len, flags) \
(bus_barrier((sc)->mem_res, (off), (len), (flags))
#define CGEM_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
-#define CGEM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
-#define CGEM_LOCK_INIT(sc) \
- mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \
- MTX_NETWORK_LOCK, MTX_DEF)
+#define CGEM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
+#define CGEM_LOCK_INIT(sc) mtx_init(&(sc)->sc_mtx, \
+ device_get_nameunit((sc)->dev), MTX_NETWORK_LOCK, MTX_DEF)
#define CGEM_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx)
#define CGEM_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
@@ -259,9 +258,8 @@
eaddr[5] = rnd & 0xff;
device_printf(sc->dev, "no mac address found, assigning "
- "random: %02x:%02x:%02x:%02x:%02x:%02x\n",
- eaddr[0], eaddr[1], eaddr[2],
- eaddr[3], eaddr[4], eaddr[5]);
+ "random: %02x:%02x:%02x:%02x:%02x:%02x\n", eaddr[0],
+ eaddr[1], eaddr[2], eaddr[3], eaddr[4], eaddr[5]);
}
/* Move address to first slot and zero out the rest. */
@@ -275,11 +273,11 @@
}
}
-/* cgem_mac_hash(): map 48-bit address to a 6-bit hash.
- * The 6-bit hash corresponds to a bit in a 64-bit hash
- * register. Setting that bit in the hash register enables
- * reception of all frames with a destination address that hashes
- * to that 6-bit value.
+/*
+ * cgem_mac_hash(): map 48-bit address to a 6-bit hash. The 6-bit hash
+ * corresponds to a bit in a 64-bit hash register. Setting that bit in the hash
+ * register enables reception of all frames with a destination address that
+ * hashes to that 6-bit value.
*
* The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech
* Reference Manual. Bits 0-5 in the hash are the exclusive-or of
@@ -308,15 +306,16 @@
index = cgem_mac_hash(LLADDR(sdl));
if (index > 31)
- hashes[0] |= (1 << (index - 32));
+ hashes[0] |= (1U << (index - 32));
else
- hashes[1] |= (1 << index);
+ hashes[1] |= (1U << index);
return (1);
}
-/* After any change in rx flags or multi-cast addresses, set up
- * hash registers and net config register bits.
+/*
+ * After any change in rx flags or multi-cast addresses, set up hash registers
+ * and net config register bits.
*/
static void
cgem_rx_filter(struct cgem_softc *sc)
@@ -328,8 +327,7 @@
net_cfg = RD4(sc, CGEM_NET_CFG);
net_cfg &= ~(CGEM_NET_CFG_MULTI_HASH_EN |
- CGEM_NET_CFG_NO_BCAST |
- CGEM_NET_CFG_COPY_ALL);
+ CGEM_NET_CFG_NO_BCAST | CGEM_NET_CFG_COPY_ALL);
if ((if_getflags(ifp) & IFF_PROMISC) != 0)
net_cfg |= CGEM_NET_CFG_COPY_ALL;
@@ -370,51 +368,32 @@
sc->txring = NULL;
sc->rxring = NULL;
- /* Allocate non-cached DMA space for RX and TX descriptors.
- */
+ /* Allocate non-cached DMA space for RX and TX descriptors. */
err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
- BUS_SPACE_MAXADDR_32BIT,
- BUS_SPACE_MAXADDR,
- NULL, NULL,
- MAX_DESC_RING_SIZE,
- 1,
- MAX_DESC_RING_SIZE,
- 0,
- busdma_lock_mutex,
- &sc->sc_mtx,
- &sc->desc_dma_tag);
+ BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+ MAX_DESC_RING_SIZE, 1, MAX_DESC_RING_SIZE, 0,
+ busdma_lock_mutex, &sc->sc_mtx, &sc->desc_dma_tag);
if (err)
return (err);
/* Set up a bus_dma_tag for mbufs. */
err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
- BUS_SPACE_MAXADDR_32BIT,
- BUS_SPACE_MAXADDR,
- NULL, NULL,
- MCLBYTES,
- TX_MAX_DMA_SEGS,
- MCLBYTES,
- 0,
- busdma_lock_mutex,
- &sc->sc_mtx,
- &sc->mbuf_dma_tag);
+ BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+ MCLBYTES, TX_MAX_DMA_SEGS, MCLBYTES, 0,
+ busdma_lock_mutex, &sc->sc_mtx, &sc->mbuf_dma_tag);
if (err)
return (err);
/* Allocate DMA memory in non-cacheable space. */
- err = bus_dmamem_alloc(sc->desc_dma_tag,
- (void **)&sc->rxring,
- BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
- &sc->rxring_dma_map);
+ err = bus_dmamem_alloc(sc->desc_dma_tag, (void **)&sc->rxring,
+ BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->rxring_dma_map);
if (err)
return (err);
/* Load descriptor DMA memory. */
err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map,
- (void *)sc->rxring,
- CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),
- cgem_getaddr, &sc->rxring_physaddr,
- BUS_DMA_NOWAIT);
+ (void *)sc->rxring, CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),
+ cgem_getaddr, &sc->rxring_physaddr, BUS_DMA_NOWAIT);
if (err)
return (err);
@@ -432,19 +411,15 @@
sc->rxring_queued = 0;
/* Allocate DMA memory for TX descriptors in non-cacheable space. */
- err = bus_dmamem_alloc(sc->desc_dma_tag,
- (void **)&sc->txring,
- BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
- &sc->txring_dma_map);
+ err = bus_dmamem_alloc(sc->desc_dma_tag, (void **)&sc->txring,
+ BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->txring_dma_map);
if (err)
return (err);
/* Load TX descriptor DMA memory. */
err = bus_dmamap_load(sc->desc_dma_tag, sc->txring_dma_map,
- (void *)sc->txring,
- CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc),
- cgem_getaddr, &sc->txring_physaddr,
- BUS_DMA_NOWAIT);
+ (void *)sc->txring, CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc),
+ cgem_getaddr, &sc->txring_physaddr, BUS_DMA_NOWAIT);
if (err)
return (err);
@@ -486,14 +461,14 @@
/* Load map and plug in physical address. */
if (bus_dmamap_create(sc->mbuf_dma_tag, 0,
- &sc->rxring_m_dmamap[sc->rxring_hd_ptr])) {
+ &sc->rxring_m_dmamap[sc->rxring_hd_ptr])) {
sc->rxdmamapfails++;
m_free(m);
break;
}
- if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
- sc->rxring_m_dmamap[sc->rxring_hd_ptr], m,
- segs, &nsegs, BUS_DMA_NOWAIT)) {
+ if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
+ sc->rxring_m_dmamap[sc->rxring_hd_ptr], m,
+ segs, &nsegs, BUS_DMA_NOWAIT)) {
sc->rxdmamapfails++;
bus_dmamap_destroy(sc->mbuf_dma_tag,
sc->rxring_m_dmamap[sc->rxring_hd_ptr]);
@@ -505,18 +480,18 @@
/* Sync cache with receive buffer. */
bus_dmamap_sync(sc->mbuf_dma_tag,
- sc->rxring_m_dmamap[sc->rxring_hd_ptr],
- BUS_DMASYNC_PREREAD);
+ sc->rxring_m_dmamap[sc->rxring_hd_ptr],
+ BUS_DMASYNC_PREREAD);
/* Write rx descriptor and increment head pointer. */
sc->rxring[sc->rxring_hd_ptr].ctl = 0;
if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) {
sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr |
- CGEM_RXDESC_WRAP;
+ CGEM_RXDESC_WRAP;
sc->rxring_hd_ptr = 0;
} else
sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr;
-
+
sc->rxring_queued++;
}
}
@@ -535,7 +510,7 @@
m_hd = NULL;
m_tl = &m_hd;
while (sc->rxring_queued > 0 &&
- (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) {
+ (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) {
ctl = sc->rxring[sc->rxring_tl_ptr].ctl;
@@ -545,14 +520,14 @@
/* Sync cache with receive buffer. */
bus_dmamap_sync(sc->mbuf_dma_tag,
- sc->rxring_m_dmamap[sc->rxring_tl_ptr],
- BUS_DMASYNC_POSTREAD);
+ sc->rxring_m_dmamap[sc->rxring_tl_ptr],
+ BUS_DMASYNC_POSTREAD);
/* Unload and destroy dmamap. */
bus_dmamap_unload(sc->mbuf_dma_tag,
- sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
+ sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
bus_dmamap_destroy(sc->mbuf_dma_tag,
- sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
+ sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
sc->rxring_m_dmamap[sc->rxring_tl_ptr] = NULL;
/* Increment tail pointer. */
@@ -560,13 +535,14 @@
sc->rxring_tl_ptr = 0;
sc->rxring_queued--;
- /* Check FCS and make sure entire packet landed in one mbuf
+ /*
+ * Check FCS and make sure entire packet landed in one mbuf
* cluster (which is much bigger than the largest ethernet
* packet).
*/
if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 ||
(ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) !=
- (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) {
+ (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) {
/* discard. */
m_free(m);
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
@@ -579,8 +555,9 @@
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len;
- /* Are we using hardware checksumming? Check the
- * status in the receive descriptor.
+ /*
+ * Are we using hardware checksumming? Check the status in the
+ * receive descriptor.
*/
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
/* TCP or UDP checks out, IP checks out too. */
@@ -589,14 +566,14 @@
(ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) {
m->m_pkthdr.csum_flags |=
- CSUM_IP_CHECKED | CSUM_IP_VALID |
- CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ CSUM_IP_CHECKED | CSUM_IP_VALID |
+ CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xffff;
} else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
- CGEM_RXDESC_CKSUM_STAT_IP_GOOD) {
+ CGEM_RXDESC_CKSUM_STAT_IP_GOOD) {
/* Only IP checks out. */
m->m_pkthdr.csum_flags |=
- CSUM_IP_CHECKED | CSUM_IP_VALID;
+ CSUM_IP_CHECKED | CSUM_IP_VALID;
m->m_pkthdr.csum_data = 0xffff;
}
}
@@ -632,19 +609,19 @@
/* free up finished transmits. */
while (sc->txring_queued > 0 &&
- ((ctl = sc->txring[sc->txring_tl_ptr].ctl) &
- CGEM_TXDESC_USED) != 0) {
+ ((ctl = sc->txring[sc->txring_tl_ptr].ctl) &
+ CGEM_TXDESC_USED) != 0) {
/* Sync cache. */
bus_dmamap_sync(sc->mbuf_dma_tag,
- sc->txring_m_dmamap[sc->txring_tl_ptr],
- BUS_DMASYNC_POSTWRITE);
+ sc->txring_m_dmamap[sc->txring_tl_ptr],
+ BUS_DMASYNC_POSTWRITE);
/* Unload and destroy DMA map. */
bus_dmamap_unload(sc->mbuf_dma_tag,
- sc->txring_m_dmamap[sc->txring_tl_ptr]);
+ sc->txring_m_dmamap[sc->txring_tl_ptr]);
bus_dmamap_destroy(sc->mbuf_dma_tag,
- sc->txring_m_dmamap[sc->txring_tl_ptr]);
+ sc->txring_m_dmamap[sc->txring_tl_ptr]);
sc->txring_m_dmamap[sc->txring_tl_ptr] = NULL;
/* Free up the mbuf. */
@@ -655,18 +632,19 @@
/* Check the status. */
if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) {
/* Serious bus error. log to console. */
- device_printf(sc->dev, "cgem_clean_tx: Whoa! "
- "AHB error, addr=0x%x\n",
- sc->txring[sc->txring_tl_ptr].addr);
+ device_printf(sc->dev,
+ "cgem_clean_tx: AHB error, addr=0x%x\n",
+ sc->txring[sc->txring_tl_ptr].addr);
} else if ((ctl & (CGEM_TXDESC_RETRY_ERR |
- CGEM_TXDESC_LATE_COLL)) != 0) {
+ CGEM_TXDESC_LATE_COLL)) != 0) {
if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
} else
if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
- /* If the packet spanned more than one tx descriptor,
- * skip descriptors until we find the end so that only
- * start-of-frame descriptors are processed.
+ /*
+ * If the packet spanned more than one tx descriptor, skip
+ * descriptors until we find the end so that only start-of-frame
+ * descriptors are processed.
*/
while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) {
if ((ctl & CGEM_TXDESC_WRAP) != 0)
@@ -678,7 +656,7 @@
ctl = sc->txring[sc->txring_tl_ptr].ctl;
sc->txring[sc->txring_tl_ptr].ctl =
- ctl | CGEM_TXDESC_USED;
+ ctl | CGEM_TXDESC_USED;
}
/* Next descriptor. */
@@ -731,14 +709,14 @@
/* Create and load DMA map. */
if (bus_dmamap_create(sc->mbuf_dma_tag, 0,
- &sc->txring_m_dmamap[sc->txring_hd_ptr])) {
+ &sc->txring_m_dmamap[sc->txring_hd_ptr])) {
m_freem(m);
sc->txdmamapfails++;
continue;
}
err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
- sc->txring_m_dmamap[sc->txring_hd_ptr],
- m, segs, &nsegs, BUS_DMA_NOWAIT);
+ sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs, &nsegs,
+ BUS_DMA_NOWAIT);
if (err == EFBIG) {
/* Too many segments! defrag and try again. */
struct mbuf *m2 = m_defrag(m, M_NOWAIT);
@@ -747,21 +725,21 @@
sc->txdefragfails++;
m_freem(m);
bus_dmamap_destroy(sc->mbuf_dma_tag,
- sc->txring_m_dmamap[sc->txring_hd_ptr]);
+ sc->txring_m_dmamap[sc->txring_hd_ptr]);
sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL;
continue;
}
m = m2;
err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
- sc->txring_m_dmamap[sc->txring_hd_ptr],
- m, segs, &nsegs, BUS_DMA_NOWAIT);
+ sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs,
+ &nsegs, BUS_DMA_NOWAIT);
sc->txdefrags++;
}
if (err) {
/* Give up. */
m_freem(m);
bus_dmamap_destroy(sc->mbuf_dma_tag,
- sc->txring_m_dmamap[sc->txring_hd_ptr]);
+ sc->txring_m_dmamap[sc->txring_hd_ptr]);
sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL;
sc->txdmamapfails++;
continue;
@@ -770,20 +748,21 @@
/* Sync tx buffer with cache. */
bus_dmamap_sync(sc->mbuf_dma_tag,
- sc->txring_m_dmamap[sc->txring_hd_ptr],
- BUS_DMASYNC_PREWRITE);
+ sc->txring_m_dmamap[sc->txring_hd_ptr],
+ BUS_DMASYNC_PREWRITE);
/* Set wrap flag if next packet might run off end of ring. */
wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >=
- CGEM_NUM_TX_DESCS;
+ CGEM_NUM_TX_DESCS;
- /* Fill in the TX descriptors back to front so that USED
- * bit in first descriptor is cleared last.
+ /*
+ * Fill in the TX descriptors back to front so that USED bit in
+ * first descriptor is cleared last.
*/
for (i = nsegs - 1; i >= 0; i--) {
/* Descriptor address. */
sc->txring[sc->txring_hd_ptr + i].addr =
- segs[i].ds_addr;
+ segs[i].ds_addr;
/* Descriptor control word. */
ctl = segs[i].ds_len;
@@ -951,8 +930,9 @@
/* Hresp not ok. Something is very bad with DMA. Try to clear. */
if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) {
- device_printf(sc->dev, "cgem_intr: hresp not okay! "
- "rx_status=0x%x\n", RD4(sc, CGEM_RX_STAT));
+ device_printf(sc->dev,
+ "cgem_intr: hresp not okay! rx_status=0x%x\n",
+ RD4(sc, CGEM_RX_STAT));
WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK);
}
@@ -997,8 +977,7 @@
WR4(sc, CGEM_RX_QBAR, 0);
/* Get management port running even if interface is down. */
- WR4(sc, CGEM_NET_CFG,
- CGEM_NET_CFG_DBUS_WIDTH_32 |
+ WR4(sc, CGEM_NET_CFG, CGEM_NET_CFG_DBUS_WIDTH_32 |
CGEM_NET_CFG_MDC_CLK_DIV_64);
sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN;
@@ -1018,13 +997,13 @@
/* Program Net Config Register. */
net_cfg = CGEM_NET_CFG_DBUS_WIDTH_32 |
- CGEM_NET_CFG_MDC_CLK_DIV_64 |
- CGEM_NET_CFG_FCS_REMOVE |
- CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) |
- CGEM_NET_CFG_GIGE_EN |
- CGEM_NET_CFG_1536RXEN |
- CGEM_NET_CFG_FULL_DUPLEX |
- CGEM_NET_CFG_SPEED100;
+ CGEM_NET_CFG_MDC_CLK_DIV_64 |
+ CGEM_NET_CFG_FCS_REMOVE |
+ CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) |
+ CGEM_NET_CFG_GIGE_EN |
+ CGEM_NET_CFG_1536RXEN |
+ CGEM_NET_CFG_FULL_DUPLEX |
+ CGEM_NET_CFG_SPEED100;
/* Enable receive checksum offloading? */
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
@@ -1034,10 +1013,10 @@
/* Program DMA Config Register. */
dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) |
- CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K |
- CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL |
- CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 |
- CGEM_DMA_CFG_DISC_WHEN_NO_AHB;
+ CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K |
+ CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL |
+ CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 |
+ CGEM_DMA_CFG_DISC_WHEN_NO_AHB;
/* Enable transmit checksum offloading? */
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
@@ -1048,7 +1027,7 @@
/* Write the rx and tx descriptor ring addresses to the QBAR regs. */
WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr);
WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr);
-
+
/* Enable rx and tx. */
sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN);
WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
@@ -1059,8 +1038,7 @@
WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
/* Set up interrupts. */
- WR4(sc, CGEM_INTR_EN,
- CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN |
+ WR4(sc, CGEM_INTR_EN, CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN |
CGEM_INTR_TX_USED_READ | CGEM_INTR_RX_USED_READ |
CGEM_INTR_HRESP_NOT_OK);
}
@@ -1117,9 +1095,9 @@
if (sc->txring_m[i]) {
/* Unload and destroy dmamap. */
bus_dmamap_unload(sc->mbuf_dma_tag,
- sc->txring_m_dmamap[i]);
+ sc->txring_m_dmamap[i]);
bus_dmamap_destroy(sc->mbuf_dma_tag,
- sc->txring_m_dmamap[i]);
+ sc->txring_m_dmamap[i]);
sc->txring_m_dmamap[i] = NULL;
m_freem(sc->txring_m[i]);
sc->txring_m[i] = NULL;
@@ -1138,9 +1116,9 @@
if (sc->rxring_m[i]) {
/* Unload and destroy dmamap. */
bus_dmamap_unload(sc->mbuf_dma_tag,
- sc->rxring_m_dmamap[i]);
+ sc->rxring_m_dmamap[i]);
bus_dmamap_destroy(sc->mbuf_dma_tag,
- sc->rxring_m_dmamap[i]);
+ sc->rxring_m_dmamap[i]);
sc->rxring_m_dmamap[i] = NULL;
m_freem(sc->rxring_m[i]);
@@ -1172,7 +1150,7 @@
if ((if_getflags(ifp) & IFF_UP) != 0) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if (((if_getflags(ifp) ^ sc->if_old_flags) &
- (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
+ (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
cgem_rx_filter(sc);
}
} else {
@@ -1210,41 +1188,41 @@
if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) {
/* Turn on TX checksumming. */
if_setcapenablebit(ifp, IFCAP_TXCSUM |
- IFCAP_TXCSUM_IPV6, 0);
+ IFCAP_TXCSUM_IPV6, 0);
if_sethwassistbits(ifp, CGEM_CKSUM_ASSIST, 0);
WR4(sc, CGEM_DMA_CFG,
RD4(sc, CGEM_DMA_CFG) |
- CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
+ CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
} else {
/* Turn off TX checksumming. */
if_setcapenablebit(ifp, 0, IFCAP_TXCSUM |
- IFCAP_TXCSUM_IPV6);
+ IFCAP_TXCSUM_IPV6);
if_sethwassistbits(ifp, 0, CGEM_CKSUM_ASSIST);
WR4(sc, CGEM_DMA_CFG,
RD4(sc, CGEM_DMA_CFG) &
- ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
+ ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
}
}
if ((mask & IFCAP_RXCSUM) != 0) {
if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) {
/* Turn on RX checksumming. */
if_setcapenablebit(ifp, IFCAP_RXCSUM |
- IFCAP_RXCSUM_IPV6, 0);
+ IFCAP_RXCSUM_IPV6, 0);
WR4(sc, CGEM_NET_CFG,
RD4(sc, CGEM_NET_CFG) |
- CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
+ CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
} else {
/* Turn off RX checksumming. */
if_setcapenablebit(ifp, 0, IFCAP_RXCSUM |
- IFCAP_RXCSUM_IPV6);
+ IFCAP_RXCSUM_IPV6);
WR4(sc, CGEM_NET_CFG,
RD4(sc, CGEM_NET_CFG) &
- ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
+ ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
}
}
- if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_TXCSUM)) ==
+ if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_TXCSUM)) ==
(IFCAP_RXCSUM | IFCAP_TXCSUM))
if_setcapenablebit(ifp, IFCAP_VLAN_HWCSUM, 0);
else
@@ -1311,9 +1289,8 @@
struct cgem_softc *sc = device_get_softc(dev);
int tries, val;
- WR4(sc, CGEM_PHY_MAINT,
- CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
- CGEM_PHY_MAINT_OP_READ |
+ WR4(sc, CGEM_PHY_MAINT, CGEM_PHY_MAINT_CLAUSE_22 |
+ CGEM_PHY_MAINT_MUST_10 | CGEM_PHY_MAINT_OP_READ |
(phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
(reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT));
@@ -1344,10 +1321,9 @@
{
struct cgem_softc *sc = device_get_softc(dev);
int tries;
-
- WR4(sc, CGEM_PHY_MAINT,
- CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
- CGEM_PHY_MAINT_OP_WRITE |
+
+ WR4(sc, CGEM_PHY_MAINT, CGEM_PHY_MAINT_CLAUSE_22 |
+ CGEM_PHY_MAINT_MUST_10 | CGEM_PHY_MAINT_OP_WRITE |
(phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
(reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) |
(data & CGEM_PHY_MAINT_DATA_MASK));
@@ -1417,12 +1393,12 @@
/* Update hardware to reflect media. */
net_cfg = RD4(sc, CGEM_NET_CFG);
net_cfg &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN |
- CGEM_NET_CFG_FULL_DUPLEX);
+ CGEM_NET_CFG_FULL_DUPLEX);
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_1000_T:
net_cfg |= (CGEM_NET_CFG_SPEED100 |
- CGEM_NET_CFG_GIGE_EN);
+ CGEM_NET_CFG_GIGE_EN);
ref_clk_freq = 125000000;
break;
case IFM_100_TX:
@@ -1440,9 +1416,9 @@
/* Set the reference clock if necessary. */
if (cgem_set_ref_clk(sc->ref_clk_num, ref_clk_freq))
- device_printf(sc->dev, "cgem_mediachange: "
- "could not set ref clk%d to %d.\n",
- sc->ref_clk_num, ref_clk_freq);
+ device_printf(sc->dev,
+ "cgem_mediachange: could not set ref clk%d to %d.\n",
+ sc->ref_clk_num, ref_clk_freq);
sc->mii_media_active = mii->mii_media_active;
}
@@ -1459,169 +1435,195 @@
child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxbufs", CTLFLAG_RW,
- &sc->rxbufs, 0,
- "Number receive buffers to provide");
+ &sc->rxbufs, 0, "Number receive buffers to provide");
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxhangwar", CTLFLAG_RW,
- &sc->rxhangwar, 0,
- "Enable receive hang work-around");
+ &sc->rxhangwar, 0, "Enable receive hang work-around");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxoverruns", CTLFLAG_RD,
- &sc->rxoverruns, 0,
- "Receive overrun events");
+ &sc->rxoverruns, 0, "Receive overrun events");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxnobufs", CTLFLAG_RD,
- &sc->rxnobufs, 0,
- "Receive buf queue empty events");
+ &sc->rxnobufs, 0, "Receive buf queue empty events");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxdmamapfails", CTLFLAG_RD,
- &sc->rxdmamapfails, 0,
- "Receive DMA map failures");
+ &sc->rxdmamapfails, 0, "Receive DMA map failures");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txfull", CTLFLAG_RD,
- &sc->txfull, 0,
- "Transmit ring full events");
+ &sc->txfull, 0, "Transmit ring full events");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdmamapfails", CTLFLAG_RD,
- &sc->txdmamapfails, 0,
- "Transmit DMA map failures");
+ &sc->txdmamapfails, 0, "Transmit DMA map failures");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefrags", CTLFLAG_RD,
- &sc->txdefrags, 0,
- "Transmit m_defrag() calls");
+ &sc->txdefrags, 0, "Transmit m_defrag() calls");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefragfails", CTLFLAG_RD,
- &sc->txdefragfails, 0,
- "Transmit m_defrag() failures");
+ &sc->txdefragfails, 0, "Transmit m_defrag() failures");
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "GEM statistics");
child = SYSCTL_CHILDREN(tree);
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_bytes", CTLFLAG_RD,
- &sc->stats.tx_bytes, "Total bytes transmitted");
+ &sc->stats.tx_bytes, "Total bytes transmitted");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames", CTLFLAG_RD,
- &sc->stats.tx_frames, 0, "Total frames transmitted");
+ &sc->stats.tx_frames, 0, "Total frames transmitted");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_bcast", CTLFLAG_RD,
- &sc->stats.tx_frames_bcast, 0,
- "Number broadcast frames transmitted");
+ &sc->stats.tx_frames_bcast, 0,
+ "Number broadcast frames transmitted");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_multi", CTLFLAG_RD,
- &sc->stats.tx_frames_multi, 0,
- "Number multicast frames transmitted");
+ &sc->stats.tx_frames_multi, 0,
+ "Number multicast frames transmitted");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_pause",
- CTLFLAG_RD, &sc->stats.tx_frames_pause, 0,
- "Number pause frames transmitted");
+ CTLFLAG_RD, &sc->stats.tx_frames_pause, 0,
+ "Number pause frames transmitted");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_64b", CTLFLAG_RD,
- &sc->stats.tx_frames_64b, 0,
- "Number frames transmitted of size 64 bytes or less");
+ &sc->stats.tx_frames_64b, 0,
+ "Number frames transmitted of size 64 bytes or less");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_65to127b", CTLFLAG_RD,
- &sc->stats.tx_frames_65to127b, 0,
- "Number frames transmitted of size 65-127 bytes");
+ &sc->stats.tx_frames_65to127b, 0,
+ "Number frames transmitted of size 65-127 bytes");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_128to255b",
- CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0,
- "Number frames transmitted of size 128-255 bytes");
+ CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0,
+ "Number frames transmitted of size 128-255 bytes");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_256to511b",
- CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0,
- "Number frames transmitted of size 256-511 bytes");
+ CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0,
+ "Number frames transmitted of size 256-511 bytes");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_512to1023b",
- CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0,
- "Number frames transmitted of size 512-1023 bytes");
+ CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0,
+ "Number frames transmitted of size 512-1023 bytes");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_1024to1536b",
- CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0,
- "Number frames transmitted of size 1024-1536 bytes");
+ CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0,
+ "Number frames transmitted of size 1024-1536 bytes");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_under_runs",
- CTLFLAG_RD, &sc->stats.tx_under_runs, 0,
- "Number transmit under-run events");
+ CTLFLAG_RD, &sc->stats.tx_under_runs, 0,
+ "Number transmit under-run events");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_single_collisn",
- CTLFLAG_RD, &sc->stats.tx_single_collisn, 0,
- "Number single-collision transmit frames");
+ CTLFLAG_RD, &sc->stats.tx_single_collisn, 0,
+ "Number single-collision transmit frames");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_multi_collisn",
- CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0,
- "Number multi-collision transmit frames");
+ CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0,
+ "Number multi-collision transmit frames");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_excsv_collisn",
- CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0,
- "Number excessive collision transmit frames");
+ CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0,
+ "Number excessive collision transmit frames");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_late_collisn",
- CTLFLAG_RD, &sc->stats.tx_late_collisn, 0,
- "Number late-collision transmit frames");
+ CTLFLAG_RD, &sc->stats.tx_late_collisn, 0,
+ "Number late-collision transmit frames");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_deferred_frames",
- CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0,
- "Number deferred transmit frames");
+ CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0,
+ "Number deferred transmit frames");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_carrier_sense_errs",
- CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0,
- "Number carrier sense errors on transmit");
+ CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0,
+ "Number carrier sense errors on transmit");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_bytes", CTLFLAG_RD,
- &sc->stats.rx_bytes, "Total bytes received");
+ &sc->stats.rx_bytes, "Total bytes received");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames", CTLFLAG_RD,
- &sc->stats.rx_frames, 0, "Total frames received");
+ &sc->stats.rx_frames, 0, "Total frames received");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_bcast",
- CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0,
- "Number broadcast frames received");
+ CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0,
+ "Number broadcast frames received");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_multi",
- CTLFLAG_RD, &sc->stats.rx_frames_multi, 0,
- "Number multicast frames received");
+ CTLFLAG_RD, &sc->stats.rx_frames_multi, 0,
+ "Number multicast frames received");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_pause",
- CTLFLAG_RD, &sc->stats.rx_frames_pause, 0,
- "Number pause frames received");
+ CTLFLAG_RD, &sc->stats.rx_frames_pause, 0,
+ "Number pause frames received");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_64b",
- CTLFLAG_RD, &sc->stats.rx_frames_64b, 0,
- "Number frames received of size 64 bytes or less");
+ CTLFLAG_RD, &sc->stats.rx_frames_64b, 0,
+ "Number frames received of size 64 bytes or less");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_65to127b",
- CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0,
- "Number frames received of size 65-127 bytes");
+ CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0,
+ "Number frames received of size 65-127 bytes");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_128to255b",
- CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0,
- "Number frames received of size 128-255 bytes");
+ CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0,
+ "Number frames received of size 128-255 bytes");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_256to511b",
- CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0,
- "Number frames received of size 256-511 bytes");
+ CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0,
+ "Number frames received of size 256-511 bytes");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_512to1023b",
- CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0,
- "Number frames received of size 512-1023 bytes");
+ CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0,
+ "Number frames received of size 512-1023 bytes");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_1024to1536b",
- CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0,
- "Number frames received of size 1024-1536 bytes");
+ CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0,
+ "Number frames received of size 1024-1536 bytes");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_undersize",
- CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0,
- "Number undersize frames received");
+ CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0,
+ "Number undersize frames received");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_oversize",
- CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0,
- "Number oversize frames received");
+ CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0,
+ "Number oversize frames received");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_jabber",
- CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0,
- "Number jabber frames received");
+ CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0,
+ "Number jabber frames received");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_fcs_errs",
- CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0,
- "Number frames received with FCS errors");
+ CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0,
+ "Number frames received with FCS errors");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_length_errs",
- CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0,
- "Number frames received with length errors");
+ CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0,
+ "Number frames received with length errors");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_symbol_errs",
- CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0,
- "Number receive symbol errors");
+ CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0,
+ "Number receive symbol errors");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_align_errs",
- CTLFLAG_RD, &sc->stats.rx_align_errs, 0,
- "Number receive alignment errors");
+ CTLFLAG_RD, &sc->stats.rx_align_errs, 0,
+ "Number receive alignment errors");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_resource_errs",
- CTLFLAG_RD, &sc->stats.rx_resource_errs, 0,
- "Number frames received when no rx buffer available");
+ CTLFLAG_RD, &sc->stats.rx_resource_errs, 0,
+ "Number frames received when no rx buffer available");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overrun_errs",
- CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0,
- "Number frames received but not copied due to "
- "receive overrun");
+ CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0,
+ "Number frames received but not copied due to receive overrun");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_ip_hdr_csum_errs",
- CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0,
- "Number frames received with IP header checksum "
- "errors");
+ CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0,
+ "Number frames received with IP header checksum errors");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_tcp_csum_errs",
- CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0,
- "Number frames received with TCP checksum errors");
+ CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0,
+ "Number frames received with TCP checksum errors");
+
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_udp_csum_errs",
- CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0,
- "Number frames received with UDP checksum errors");
+ CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0,
+ "Number frames received with UDP checksum errors");
}
@@ -1661,7 +1663,7 @@
/* Get memory resource. */
rid = 0;
sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
- RF_ACTIVE);
+ RF_ACTIVE);
if (sc->mem_res == NULL) {
device_printf(dev, "could not allocate memory resources.\n");
return (ENOMEM);
@@ -1669,8 +1671,7 @@
/* Get IRQ resource. */
rid = 0;
- sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
- RF_ACTIVE);
+ sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE);
if (sc->irq_res == NULL) {
device_printf(dev, "could not allocate interrupt resource.\n");
cgem_detach(dev);
@@ -1691,14 +1692,14 @@
if_setioctlfn(ifp, cgem_ioctl);
if_setstartfn(ifp, cgem_start);
if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
- IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM, 0);
+ IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM, 0);
if_setsendqlen(ifp, CGEM_NUM_TX_DESCS);
if_setsendqready(ifp);
/* Disable hardware checksumming by default. */
if_sethwassist(ifp, 0);
if_setcapenable(ifp, if_getcapabilities(ifp) &
- ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM));
+ ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM));
sc->if_old_flags = if_getflags(ifp);
sc->rxbufs = DEFAULT_NUM_RX_BUFS;
@@ -1711,8 +1712,8 @@
/* Attach phy to mii bus. */
err = mii_attach(dev, &sc->miibus, ifp,
- cgem_ifmedia_upd, cgem_ifmedia_sts,
- BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
+ cgem_ifmedia_upd, cgem_ifmedia_sts, BMSR_DEFCAPMASK,
+ MII_PHY_ANY, MII_OFFSET_ANY, 0);
if (err) {
device_printf(dev, "attaching PHYs failed\n");
cgem_detach(dev);
@@ -1736,7 +1737,7 @@
ether_ifattach(ifp, eaddr);
err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE |
- INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand);
+ INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand);
if (err) {
device_printf(dev, "could not set interrupt handler.\n");
ether_ifdetach(ifp);
@@ -1775,14 +1776,14 @@
/* Release resources. */
if (sc->mem_res != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY,
- rman_get_rid(sc->mem_res), sc->mem_res);
+ rman_get_rid(sc->mem_res), sc->mem_res);
sc->mem_res = NULL;
}
if (sc->irq_res != NULL) {
if (sc->intrhand)
bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
bus_release_resource(dev, SYS_RES_IRQ,
- rman_get_rid(sc->irq_res), sc->irq_res);
+ rman_get_rid(sc->irq_res), sc->irq_res);
sc->irq_res = NULL;
}
@@ -1790,7 +1791,7 @@
if (sc->rxring != NULL) {
if (sc->rxring_physaddr != 0) {
bus_dmamap_unload(sc->desc_dma_tag,
- sc->rxring_dma_map);
+ sc->rxring_dma_map);
sc->rxring_physaddr = 0;
}
bus_dmamem_free(sc->desc_dma_tag, sc->rxring,
@@ -1799,14 +1800,14 @@
for (i = 0; i < CGEM_NUM_RX_DESCS; i++)
if (sc->rxring_m_dmamap[i] != NULL) {
bus_dmamap_destroy(sc->mbuf_dma_tag,
- sc->rxring_m_dmamap[i]);
+ sc->rxring_m_dmamap[i]);
sc->rxring_m_dmamap[i] = NULL;
}
}
if (sc->txring != NULL) {
if (sc->txring_physaddr != 0) {
bus_dmamap_unload(sc->desc_dma_tag,
- sc->txring_dma_map);
+ sc->txring_dma_map);
sc->txring_physaddr = 0;
}
bus_dmamem_free(sc->desc_dma_tag, sc->txring,
@@ -1815,7 +1816,7 @@
for (i = 0; i < CGEM_NUM_TX_DESCS; i++)
if (sc->txring_m_dmamap[i] != NULL) {
bus_dmamap_destroy(sc->mbuf_dma_tag,
- sc->txring_m_dmamap[i]);
+ sc->txring_m_dmamap[i]);
sc->txring_m_dmamap[i] = NULL;
}
}
Index: head/sys/dev/cadence/if_cgem_hw.h
===================================================================
--- head/sys/dev/cadence/if_cgem_hw.h
+++ head/sys/dev/cadence/if_cgem_hw.h
@@ -42,160 +42,160 @@
/* Cadence GEM hardware register definitions. */
#define CGEM_NET_CTRL 0x000 /* Network Control */
-#define CGEM_NET_CTRL_FLUSH_DPRAM_PKT (1<<18)
-#define CGEM_NET_CTRL_TX_PFC_PRI_PAUSE_FRAME (1<<17)
-#define CGEM_NET_CTRL_EN_PFC_PRI_PAUSE_RX (1<<16)
-#define CGEM_NET_CTRL_STORE_RX_TSTAMP (1<<15)
-#define CGEM_NET_CTRL_TX_ZEROQ_PAUSE_FRAME (1<<12)
-#define CGEM_NET_CTRL_TX_PAUSE_FRAME (1<<11)
-#define CGEM_NET_CTRL_TX_HALT (1<<10)
-#define CGEM_NET_CTRL_START_TX (1<<9)
-#define CGEM_NET_CTRL_BACK_PRESSURE (1<<8)
-#define CGEM_NET_CTRL_WREN_STAT_REGS (1<<7)
-#define CGEM_NET_CTRL_INCR_STAT_REGS (1<<6)
-#define CGEM_NET_CTRL_CLR_STAT_REGS (1<<5)
-#define CGEM_NET_CTRL_MGMT_PORT_EN (1<<4)
-#define CGEM_NET_CTRL_TX_EN (1<<3)
-#define CGEM_NET_CTRL_RX_EN (1<<2)
-#define CGEM_NET_CTRL_LOOP_LOCAL (1<<1)
+#define CGEM_NET_CTRL_FLUSH_DPRAM_PKT (1 << 18)
+#define CGEM_NET_CTRL_TX_PFC_PRI_PAUSE_FRAME (1 << 17)
+#define CGEM_NET_CTRL_EN_PFC_PRI_PAUSE_RX (1 << 16)
+#define CGEM_NET_CTRL_STORE_RX_TSTAMP (1 << 15)
+#define CGEM_NET_CTRL_TX_ZEROQ_PAUSE_FRAME (1 << 12)
+#define CGEM_NET_CTRL_TX_PAUSE_FRAME (1 << 11)
+#define CGEM_NET_CTRL_TX_HALT (1 << 10)
+#define CGEM_NET_CTRL_START_TX (1 << 9)
+#define CGEM_NET_CTRL_BACK_PRESSURE (1 << 8)
+#define CGEM_NET_CTRL_WREN_STAT_REGS (1 << 7)
+#define CGEM_NET_CTRL_INCR_STAT_REGS (1 << 6)
+#define CGEM_NET_CTRL_CLR_STAT_REGS (1 << 5)
+#define CGEM_NET_CTRL_MGMT_PORT_EN (1 << 4)
+#define CGEM_NET_CTRL_TX_EN (1 << 3)
+#define CGEM_NET_CTRL_RX_EN (1 << 2)
+#define CGEM_NET_CTRL_LOOP_LOCAL (1 << 1)
#define CGEM_NET_CFG 0x004 /* Netowrk Configuration */
-#define CGEM_NET_CFG_UNIDIR_EN (1<<31)
-#define CGEM_NET_CFG_IGNORE_IPG_RX_ER (1<<30)
-#define CGEM_NET_CFG_RX_BAD_PREAMBLE (1<<29)
-#define CGEM_NET_CFG_IPG_STRETCH_EN (1<<28)
-#define CGEM_NET_CFG_SGMII_EN (1<<27)
-#define CGEM_NET_CFG_IGNORE_RX_FCS (1<<26)
-#define CGEM_NET_CFG_RX_HD_WHILE_TX (1<<25)
-#define CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN (1<<24)
-#define CGEM_NET_CFG_DIS_CP_PAUSE_FRAME (1<<23)
-#define CGEM_NET_CFG_DBUS_WIDTH_32 (0<<21)
-#define CGEM_NET_CFG_DBUS_WIDTH_64 (1<<21)
-#define CGEM_NET_CFG_DBUS_WIDTH_128 (2<<21)
-#define CGEM_NET_CFG_DBUS_WIDTH_MASK (3<<21)
-#define CGEM_NET_CFG_MDC_CLK_DIV_8 (0<<18)
-#define CGEM_NET_CFG_MDC_CLK_DIV_16 (1<<18)
-#define CGEM_NET_CFG_MDC_CLK_DIV_32 (2<<18)
-#define CGEM_NET_CFG_MDC_CLK_DIV_48 (3<<18)
-#define CGEM_NET_CFG_MDC_CLK_DIV_64 (4<<18)
-#define CGEM_NET_CFG_MDC_CLK_DIV_96 (5<<18)
-#define CGEM_NET_CFG_MDC_CLK_DIV_128 (6<<18)
-#define CGEM_NET_CFG_MDC_CLK_DIV_224 (7<<18)
-#define CGEM_NET_CFG_MDC_CLK_DIV_MASK (7<<18)
-#define CGEM_NET_CFG_FCS_REMOVE (1<<17)
-#define CGEM_NET_CFG_LEN_ERR_FRAME_DISC (1<<16)
+#define CGEM_NET_CFG_UNIDIR_EN (1U << 31)
+#define CGEM_NET_CFG_IGNORE_IPG_RX_ER (1 << 30)
+#define CGEM_NET_CFG_RX_BAD_PREAMBLE (1 << 29)
+#define CGEM_NET_CFG_IPG_STRETCH_EN (1 << 28)
+#define CGEM_NET_CFG_SGMII_EN (1 << 27)
+#define CGEM_NET_CFG_IGNORE_RX_FCS (1 << 26)
+#define CGEM_NET_CFG_RX_HD_WHILE_TX (1 << 25)
+#define CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN (1 << 24)
+#define CGEM_NET_CFG_DIS_CP_PAUSE_FRAME (1 << 23)
+#define CGEM_NET_CFG_DBUS_WIDTH_32 (0 << 21)
+#define CGEM_NET_CFG_DBUS_WIDTH_64 (1 << 21)
+#define CGEM_NET_CFG_DBUS_WIDTH_128 (2 << 21)
+#define CGEM_NET_CFG_DBUS_WIDTH_MASK (3 << 21)
+#define CGEM_NET_CFG_MDC_CLK_DIV_8 (0 << 18)
+#define CGEM_NET_CFG_MDC_CLK_DIV_16 (1 << 18)
+#define CGEM_NET_CFG_MDC_CLK_DIV_32 (2 << 18)
+#define CGEM_NET_CFG_MDC_CLK_DIV_48 (3 << 18)
+#define CGEM_NET_CFG_MDC_CLK_DIV_64 (4 << 18)
+#define CGEM_NET_CFG_MDC_CLK_DIV_96 (5 << 18)
+#define CGEM_NET_CFG_MDC_CLK_DIV_128 (6 << 18)
+#define CGEM_NET_CFG_MDC_CLK_DIV_224 (7 << 18)
+#define CGEM_NET_CFG_MDC_CLK_DIV_MASK (7 << 18)
+#define CGEM_NET_CFG_FCS_REMOVE (1 << 17)
+#define CGEM_NET_CFG_LEN_ERR_FRAME_DISC (1 << 16)
#define CGEM_NET_CFG_RX_BUF_OFFSET_SHFT 14
-#define CGEM_NET_CFG_RX_BUF_OFFSET_MASK (3<<14)
-#define CGEM_NET_CFG_RX_BUF_OFFSET(n) ((n)<<14)
-#define CGEM_NET_CFG_PAUSE_EN (1<<13)
-#define CGEM_NET_CFG_RETRY_TEST (1<<12)
-#define CGEM_NET_CFG_PCS_SEL (1<<11)
-#define CGEM_NET_CFG_GIGE_EN (1<<10)
-#define CGEM_NET_CFG_EXT_ADDR_MATCH_EN (1<<9)
-#define CGEM_NET_CFG_1536RXEN (1<<8)
-#define CGEM_NET_CFG_UNI_HASH_EN (1<<7)
-#define CGEM_NET_CFG_MULTI_HASH_EN (1<<6)
-#define CGEM_NET_CFG_NO_BCAST (1<<5)
-#define CGEM_NET_CFG_COPY_ALL (1<<4)
-#define CGEM_NET_CFG_DISC_NON_VLAN (1<<2)
-#define CGEM_NET_CFG_FULL_DUPLEX (1<<1)
-#define CGEM_NET_CFG_SPEED100 (1<<0)
+#define CGEM_NET_CFG_RX_BUF_OFFSET_MASK (3 << 14)
+#define CGEM_NET_CFG_RX_BUF_OFFSET(n) ((n) << 14)
+#define CGEM_NET_CFG_PAUSE_EN (1 << 13)
+#define CGEM_NET_CFG_RETRY_TEST (1 << 12)
+#define CGEM_NET_CFG_PCS_SEL (1 << 11)
+#define CGEM_NET_CFG_GIGE_EN (1 << 10)
+#define CGEM_NET_CFG_EXT_ADDR_MATCH_EN (1 << 9)
+#define CGEM_NET_CFG_1536RXEN (1 << 8)
+#define CGEM_NET_CFG_UNI_HASH_EN (1 << 7)
+#define CGEM_NET_CFG_MULTI_HASH_EN (1 << 6)
+#define CGEM_NET_CFG_NO_BCAST (1 << 5)
+#define CGEM_NET_CFG_COPY_ALL (1 << 4)
+#define CGEM_NET_CFG_DISC_NON_VLAN (1 << 2)
+#define CGEM_NET_CFG_FULL_DUPLEX (1 << 1)
+#define CGEM_NET_CFG_SPEED100 (1 << 0)
#define CGEM_NET_STAT 0x008 /* Network Status */
-#define CGEM_NET_STAT_PFC_PRI_PAUSE_NEG (1<<6)
-#define CGEM_NET_STAT_PCS_AUTONEG_PAUSE_TX_RES (1<<5)
-#define CGEM_NET_STAT_PCS_AUTONEG_PAUSE_RX_RES (1<<4)
-#define CGEM_NET_STAT_PCS_AUTONEG_DUP_RES (1<<3)
-#define CGEM_NET_STAT_PHY_MGMT_IDLE (1<<2)
-#define CGEM_NET_STAT_MDIO_IN_PIN_STATUS (1<<1)
-#define CGEM_NET_STAT_PCS_LINK_STATE (1<<0)
+#define CGEM_NET_STAT_PFC_PRI_PAUSE_NEG (1 << 6)
+#define CGEM_NET_STAT_PCS_AUTONEG_PAUSE_TX_RES (1 << 5)
+#define CGEM_NET_STAT_PCS_AUTONEG_PAUSE_RX_RES (1 << 4)
+#define CGEM_NET_STAT_PCS_AUTONEG_DUP_RES (1 << 3)
+#define CGEM_NET_STAT_PHY_MGMT_IDLE (1 << 2)
+#define CGEM_NET_STAT_MDIO_IN_PIN_STATUS (1 << 1)
+#define CGEM_NET_STAT_PCS_LINK_STATE (1 << 0)
#define CGEM_USER_IO 0x00C /* User I/O */
#define CGEM_DMA_CFG 0x010 /* DMA Config */
-#define CGEM_DMA_CFG_DISC_WHEN_NO_AHB (1<<24)
+#define CGEM_DMA_CFG_DISC_WHEN_NO_AHB (1 << 24)
#define CGEM_DMA_CFG_RX_BUF_SIZE_SHIFT 16
-#define CGEM_DMA_CFG_RX_BUF_SIZE_MASK (0xff<<16)
-#define CGEM_DMA_CFG_RX_BUF_SIZE(sz) ((((sz) + 63) / 64) << 16)
-#define CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN (1<<11)
-#define CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL (1<<10)
-#define CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_1K (0<<8)
-#define CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_2K (1<<8)
-#define CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_4K (2<<8)
-#define CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K (3<<8)
-#define CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_MASK (3<<8)
-#define CGEM_DMA_CFG_AHB_ENDIAN_SWAP_PKT_EN (1<<7)
-#define CGEM_DMA_CFG_AHB_ENDIAN_SWAP_MGMT_EN (1<<6)
-#define CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_1 (1<<0)
-#define CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_4 (4<<0)
-#define CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_8 (8<<0)
-#define CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 (16<<0)
-#define CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_MASK (0x1f<<0)
+#define CGEM_DMA_CFG_RX_BUF_SIZE_MASK (0xff << 16)
+#define CGEM_DMA_CFG_RX_BUF_SIZE(sz) ((((sz) + 63) / 64) << 16)
+#define CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN (1 << 11)
+#define CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL (1 << 10)
+#define CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_1K (0 << 8)
+#define CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_2K (1 << 8)
+#define CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_4K (2 << 8)
+#define CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K (3 << 8)
+#define CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_MASK (3 << 8)
+#define CGEM_DMA_CFG_AHB_ENDIAN_SWAP_PKT_EN (1 << 7)
+#define CGEM_DMA_CFG_AHB_ENDIAN_SWAP_MGMT_EN (1 << 6)
+#define CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_1 (1 << 0)
+#define CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_4 (4 << 0)
+#define CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_8 (8 << 0)
+#define CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 (16 << 0)
+#define CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_MASK (0x1f << 0)
#define CGEM_TX_STAT 0x014 /* Transmit Status */
-#define CGEM_TX_STAT_HRESP_NOT_OK (1<<8)
-#define CGEM_TX_STAT_LATE_COLL (1<<7)
-#define CGEM_TX_STAT_UNDERRUN (1<<6)
-#define CGEM_TX_STAT_COMPLETE (1<<5)
-#define CGEM_TX_STAT_CORRUPT_AHB_ERR (1<<4)
-#define CGEM_TX_STAT_GO (1<<3)
-#define CGEM_TX_STAT_RETRY_LIMIT_EXC (1<<2)
-#define CGEM_TX_STAT_COLLISION (1<<1)
-#define CGEM_TX_STAT_USED_BIT_READ (1<<0)
+#define CGEM_TX_STAT_HRESP_NOT_OK (1 << 8)
+#define CGEM_TX_STAT_LATE_COLL (1 << 7)
+#define CGEM_TX_STAT_UNDERRUN (1 << 6)
+#define CGEM_TX_STAT_COMPLETE (1 << 5)
+#define CGEM_TX_STAT_CORRUPT_AHB_ERR (1 << 4)
+#define CGEM_TX_STAT_GO (1 << 3)
+#define CGEM_TX_STAT_RETRY_LIMIT_EXC (1 << 2)
+#define CGEM_TX_STAT_COLLISION (1 << 1)
+#define CGEM_TX_STAT_USED_BIT_READ (1 << 0)
#define CGEM_TX_STAT_ALL 0x1ff
#define CGEM_RX_QBAR 0x018 /* Receive Buf Q Base Addr */
#define CGEM_TX_QBAR 0x01C /* Transmit Buf Q Base Addr */
#define CGEM_RX_STAT 0x020 /* Receive Status */
-#define CGEM_RX_STAT_HRESP_NOT_OK (1<<3)
-#define CGEM_RX_STAT_OVERRUN (1<<2)
-#define CGEM_RX_STAT_FRAME_RECD (1<<1)
-#define CGEM_RX_STAT_BUF_NOT_AVAIL (1<<0)
+#define CGEM_RX_STAT_HRESP_NOT_OK (1 << 3)
+#define CGEM_RX_STAT_OVERRUN (1 << 2)
+#define CGEM_RX_STAT_FRAME_RECD (1 << 1)
+#define CGEM_RX_STAT_BUF_NOT_AVAIL (1 << 0)
#define CGEM_RX_STAT_ALL 0xf
#define CGEM_INTR_STAT 0x024 /* Interrupt Status */
#define CGEM_INTR_EN 0x028 /* Interrupt Enable */
#define CGEM_INTR_DIS 0x02C /* Interrupt Disable */
#define CGEM_INTR_MASK 0x030 /* Interrupt Mask */
-#define CGEM_INTR_TSU_SEC_INCR (1<<26)
-#define CGEM_INTR_PDELAY_RESP_TX (1<<25)
-#define CGEM_INTR_PDELAY_REQ_TX (1<<24)
-#define CGEM_INTR_PDELAY_RESP_RX (1<<23)
-#define CGEM_INTR_PDELAY_REQ_RX (1<<22)
-#define CGEM_INTR_SYNX_TX (1<<21)
-#define CGEM_INTR_DELAY_REQ_TX (1<<20)
-#define CGEM_INTR_SYNC_RX (1<<19)
-#define CGEM_INTR_DELAY_REQ_RX (1<<18)
-#define CGEM_INTR_PARTNER_PG_RX (1<<17)
-#define CGEM_INTR_AUTONEG_COMPL (1<<16)
-#define CGEM_INTR_EXT_INTR (1<<15)
-#define CGEM_INTR_PAUSE_TX (1<<14)
-#define CGEM_INTR_PAUSE_ZERO (1<<13)
-#define CGEM_INTR_PAUSE_NONZEROQ_RX (1<<12)
-#define CGEM_INTR_HRESP_NOT_OK (1<<11)
-#define CGEM_INTR_RX_OVERRUN (1<<10)
-#define CGEM_INTR_LINK_CHNG (1<<9)
-#define CGEM_INTR_TX_COMPLETE (1<<7)
-#define CGEM_INTR_TX_CORRUPT_AHB_ERR (1<<6)
-#define CGEM_INTR_RETRY_EX_LATE_COLLISION (1<<5)
-#define CGEM_INTR_TX_USED_READ (1<<3)
-#define CGEM_INTR_RX_USED_READ (1<<2)
-#define CGEM_INTR_RX_COMPLETE (1<<1)
-#define CGEM_INTR_MGMT_SENT (1<<0)
+#define CGEM_INTR_TSU_SEC_INCR (1 << 26)
+#define CGEM_INTR_PDELAY_RESP_TX (1 << 25)
+#define CGEM_INTR_PDELAY_REQ_TX (1 << 24)
+#define CGEM_INTR_PDELAY_RESP_RX (1 << 23)
+#define CGEM_INTR_PDELAY_REQ_RX (1 << 22)
+#define CGEM_INTR_SYNX_TX (1 << 21)
+#define CGEM_INTR_DELAY_REQ_TX (1 << 20)
+#define CGEM_INTR_SYNC_RX (1 << 19)
+#define CGEM_INTR_DELAY_REQ_RX (1 << 18)
+#define CGEM_INTR_PARTNER_PG_RX (1 << 17)
+#define CGEM_INTR_AUTONEG_COMPL (1 << 16)
+#define CGEM_INTR_EXT_INTR (1 << 15)
+#define CGEM_INTR_PAUSE_TX (1 << 14)
+#define CGEM_INTR_PAUSE_ZERO (1 << 13)
+#define CGEM_INTR_PAUSE_NONZEROQ_RX (1 << 12)
+#define CGEM_INTR_HRESP_NOT_OK (1 << 11)
+#define CGEM_INTR_RX_OVERRUN (1 << 10)
+#define CGEM_INTR_LINK_CHNG (1 << 9)
+#define CGEM_INTR_TX_COMPLETE (1 << 7)
+#define CGEM_INTR_TX_CORRUPT_AHB_ERR (1 << 6)
+#define CGEM_INTR_RETRY_EX_LATE_COLLISION (1 << 5)
+#define CGEM_INTR_TX_USED_READ (1 << 3)
+#define CGEM_INTR_RX_USED_READ (1 << 2)
+#define CGEM_INTR_RX_COMPLETE (1 << 1)
+#define CGEM_INTR_MGMT_SENT (1 << 0)
#define CGEM_INTR_ALL 0x7FFFEFF
#define CGEM_PHY_MAINT 0x034 /* PHY Maintenenace */
-#define CGEM_PHY_MAINT_CLAUSE_22 (1<<30)
+#define CGEM_PHY_MAINT_CLAUSE_22 (1 << 30)
#define CGEM_PHY_MAINT_OP_SHIFT 28
-#define CGEM_PHY_MAINT_OP_MASK (3<<28)
-#define CGEM_PHY_MAINT_OP_READ (2<<28)
-#define CGEM_PHY_MAINT_OP_WRITE (1<<28)
+#define CGEM_PHY_MAINT_OP_MASK (3 << 28)
+#define CGEM_PHY_MAINT_OP_READ (2 << 28)
+#define CGEM_PHY_MAINT_OP_WRITE (1 << 28)
#define CGEM_PHY_MAINT_PHY_ADDR_SHIFT 23
-#define CGEM_PHY_MAINT_PHY_ADDR_MASK (0x1f<<23)
+#define CGEM_PHY_MAINT_PHY_ADDR_MASK (0x1f << 23)
#define CGEM_PHY_MAINT_REG_ADDR_SHIFT 18
-#define CGEM_PHY_MAINT_REG_ADDR_MASK (0x1f<<18)
-#define CGEM_PHY_MAINT_MUST_10 (2<<16)
+#define CGEM_PHY_MAINT_REG_ADDR_MASK (0x1f << 18)
+#define CGEM_PHY_MAINT_MUST_10 (2 << 16)
#define CGEM_PHY_MAINT_DATA_MASK 0xffff
#define CGEM_RX_PAUSEQ 0x038 /* Received Pause Quantum */
@@ -203,30 +203,30 @@
#define CGEM_HASH_BOT 0x080 /* Hash Reg Bottom [31:0] */
#define CGEM_HASH_TOP 0x084 /* Hash Reg Top [63:32] */
-#define CGEM_SPEC_ADDR_LOW(n) (0x088+(n)*8) /* Specific Addr low */
-#define CGEM_SPEC_ADDR_HI(n) (0x08C+(n)*8) /* Specific Addr hi */
+#define CGEM_SPEC_ADDR_LOW(n) (0x088 + (n) * 8)
+#define CGEM_SPEC_ADDR_HI(n) (0x08C + (n) * 8)
#define CGEM_TYPE_ID_MATCH1 0x0A8 /* Type ID Match 1 */
-#define CGEM_TYPE_ID_MATCH_COPY_EN (1<<31)
+#define CGEM_TYPE_ID_MATCH_COPY_EN (1U << 31)
#define CGEM_TYPE_ID_MATCH2 0x0AC /* Type ID Match 2 */
#define CGEM_TYPE_ID_MATCH3 0x0B0 /* Type ID Match 3 */
#define CGEM_TYPE_ID_MATCH4 0x0B4 /* Type ID Match 4 */
#define CGEM_WAKE_ON_LAN 0x0B8 /* Wake on LAN Register */
-#define CGEM_WOL_MULTI_HASH_EN (1<<19)
-#define CGEM_WOL_SPEC_ADDR1_EN (1<<18)
-#define CGEM_WOL_ARP_REQ_EN (1<<17)
-#define CGEM_WOL_MAGIC_PKT_EN (1<<16)
+#define CGEM_WOL_MULTI_HASH_EN (1 << 19)
+#define CGEM_WOL_SPEC_ADDR1_EN (1 << 18)
+#define CGEM_WOL_ARP_REQ_EN (1 << 17)
+#define CGEM_WOL_MAGIC_PKT_EN (1 << 16)
#define CGEM_WOL_ARP_REQ_IP_ADDR_MASK 0xffff
#define CGEM_IPG_STRETCH /* IPG Stretch Register */
#define CGEM_STACKED_VLAN 0x0C0 /* Stacked VLAN Register */
-#define CGEM_STACKED_VLAN_EN (1<<31)
+#define CGEM_STACKED_VLAN_EN (1U << 31)
#define CGEM_TX_PFC_PAUSE 0x0C4 /* Transmit PFC Pause Reg */
#define CGEM_TX_PFC_PAUSEQ_SEL_SHIFT 8
-#define CGEM_TX_PFC_PAUSEQ_SEL_MASK (0xff<<8)
+#define CGEM_TX_PFC_PAUSEQ_SEL_MASK (0xff << 8)
#define CGEM_TX_PFC_PAUSE_PRI_EN_VEC_VAL_MASK 0xff
#define CGEM_SPEC_ADDR1_MASK_BOT 0x0C8 /* Specific Addr Mask1 [31:0]*/
@@ -269,7 +269,7 @@
#define CGEM_FCS_ERRS 0x190 /* Frame Check Sequence Errs */
#define CGEM_LENGTH_FIELD_ERRS 0x194 /* Length Firled Frame Errs */
#define CGEM_RX_SYMBOL_ERRS 0x198 /* Receive Symbol Errs */
-#define CGEM_ALIGN_ERRS 0x19C /* Alignment Errors */
+#define CGEM_ALIGN_ERRS 0x19C /* Alignment Errors */
#define CGEM_RX_RESOURCE_ERRS 0x1A0 /* Receive Resoure Errors */
#define CGEM_RX_OVERRUN_ERRS 0x1A4 /* Receive Overrun Errors */
#define CGEM_IP_HDR_CKSUM_ERRS 0x1A8 /* IP Hdr Checksum Errors */
@@ -292,92 +292,92 @@
#define CGEM_DESIGN_CFG2 0x284 /* Design Configuration 2 */
#define CGEM_DESIGN_CFG2_TX_PBUF_ADDR_SHIFT 26
-#define CGEM_DESIGN_CFG2_TX_PBUF_ADDR_MASK (0xf<<26)
+#define CGEM_DESIGN_CFG2_TX_PBUF_ADDR_MASK (0xf << 26)
#define CGEM_DESIGN_CFG2_RX_PBUF_ADDR_SHIFT 22
-#define CGEM_DESIGN_CFG2_RX_PBUF_ADDR_MASK (0xf<<22)
-#define CGEM_DESIGN_CFG2_TX_PKT_BUF (1<<21)
-#define CGEM_DESIGN_CFG2_RX_PKT_BUF (1<<20)
+#define CGEM_DESIGN_CFG2_RX_PBUF_ADDR_MASK (0xf << 22)
+#define CGEM_DESIGN_CFG2_TX_PKT_BUF (1 << 21)
+#define CGEM_DESIGN_CFG2_RX_PKT_BUF (1 << 20)
#define CGEM_DESIGN_CFG2_HPROT_VAL_SHIFT 16
-#define CGEM_DESIGN_CFG2_HPROT_VAL_MASK (0xf<<16)
+#define CGEM_DESIGN_CFG2_HPROT_VAL_MASK (0xf << 16)
#define CGEM_DESIGN_CFG2_JUMBO_MAX_LEN_MASK 0xffff
#define CGEM_DESIGN_CFG3 0x288 /* Design Configuration 3 */
-#define CGEM_DESIGN_CFG3_RX_BASE2_FIFO_SZ_MASK (0xffff<<16)
+#define CGEM_DESIGN_CFG3_RX_BASE2_FIFO_SZ_MASK (0xffffU << 16)
#define CGEM_DESIGN_CFG3_RX_BASE2_FIFO_SZ_SHIFT 16
#define CGEM_DESIGN_CFG3_RX_FIFO_SIZE_MASK 0xffff
#define CGEM_DESIGN_CFG4 0x28C /* Design Configuration 4 */
#define CGEM_DESIGN_CFG4_TX_BASE2_FIFO_SZ_SHIFT 16
-#define CGEM_DESIGN_CFG4_TX_BASE2_FIFO_SZ_MASK (0xffff<<16)
+#define CGEM_DESIGN_CFG4_TX_BASE2_FIFO_SZ_MASK (0xffffU << 16)
#define CGEM_DESIGN_CFG4_TX_FIFO_SIZE_MASK 0xffff
#define CGEM_DESIGN_CFG5 0x290 /* Design Configuration 5 */
-#define CGEM_DESIGN_CFG5_TSU_CLK (1<<28)
+#define CGEM_DESIGN_CFG5_TSU_CLK (1 << 28)
#define CGEM_DESIGN_CFG5_RX_BUF_LEN_DEF_SHIFT 20
-#define CGEM_DESIGN_CFG5_RX_BUF_LEN_DEF_MASK (0xff<<20)
-#define CGEM_DESIGN_CFG5_TX_PBUF_SIZE_DEF (1<<19)
+#define CGEM_DESIGN_CFG5_RX_BUF_LEN_DEF_MASK (0xff << 20)
+#define CGEM_DESIGN_CFG5_TX_PBUF_SIZE_DEF (1 << 19)
#define CGEM_DESIGN_CFG5_RX_PBUF_SIZE_DEF_SHIFT 17
-#define CGEM_DESIGN_CFG5_RX_PBUF_SIZE_DEF_MASK (3<<17)
+#define CGEM_DESIGN_CFG5_RX_PBUF_SIZE_DEF_MASK (3 << 17)
#define CGEM_DESIGN_CFG5_ENDIAN_SWAP_DEF_SHIFT 15
-#define CGEM_DESIGN_CFG5_ENDIAN_SWAP_DEF_MASK (3<<15)
+#define CGEM_DESIGN_CFG5_ENDIAN_SWAP_DEF_MASK (3 << 15)
#define CGEM_DESIGN_CFG5_MDC_CLOCK_DIV_SHIFT 12
-#define CGEM_DESIGN_CFG5_MDC_CLOCK_DIV_MASK (7<<12)
+#define CGEM_DESIGN_CFG5_MDC_CLOCK_DIV_MASK (7 << 12)
#define CGEM_DESIGN_CFG5_DMA_BUS_WIDTH_SHIFT 10
-#define CGEM_DESIGN_CFG5_DMA_BUS_WIDTH_MASK (3<<10)
-#define CGEM_DESIGN_CFG5_PHY_IDENT (1<<9)
-#define CGEM_DESIGN_CFG5_TSU (1<<8)
+#define CGEM_DESIGN_CFG5_DMA_BUS_WIDTH_MASK (3 << 10)
+#define CGEM_DESIGN_CFG5_PHY_IDENT (1 << 9)
+#define CGEM_DESIGN_CFG5_TSU (1 << 8)
#define CGEM_DESIGN_CFG5_TX_FIFO_CNT_WIDTH_SHIFT 4
-#define CGEM_DESIGN_CFG5_TX_FIFO_CNT_WIDTH_MASK (0xf<<4)
+#define CGEM_DESIGN_CFG5_TX_FIFO_CNT_WIDTH_MASK (0xf << 4)
#define CGEM_DESIGN_CFG5_RX_FIFO_CNT_WIDTH_MASK 0xf
/* Transmit Descriptors */
struct cgem_tx_desc {
uint32_t addr;
uint32_t ctl;
-#define CGEM_TXDESC_USED (1<<31) /* done transmitting */
-#define CGEM_TXDESC_WRAP (1<<30) /* end of descr ring */
-#define CGEM_TXDESC_RETRY_ERR (1<<29)
-#define CGEM_TXDESC_AHB_ERR (1<<27)
-#define CGEM_TXDESC_LATE_COLL (1<<26)
-#define CGEM_TXDESC_CKSUM_GEN_STAT_MASK (7<<20)
-#define CGEM_TXDESC_CKSUM_GEN_STAT_VLAN_HDR_ERR (1<<20)
-#define CGEM_TXDESC_CKSUM_GEN_STAT_SNAP_HDR_ERR (2<<20)
-#define CGEM_TXDESC_CKSUM_GEN_STAT_IP_HDR_ERR (3<<20)
-#define CGEM_TXDESC_CKSUM_GEN_STAT_UNKNOWN_TYPE (4<<20)
-#define CGEM_TXDESC_CKSUM_GEN_STAT_UNSUPP_FRAG (5<<20)
-#define CGEM_TXDESC_CKSUM_GEN_STAT_NOT_TCPUDP (6<<20)
-#define CGEM_TXDESC_CKSUM_GEN_STAT_SHORT_PKT (7<<20)
-#define CGEM_TXDESC_NO_CRC_APPENDED (1<<16)
-#define CGEM_TXDESC_LAST_BUF (1<<15) /* last buf in frame */
+#define CGEM_TXDESC_USED (1U << 31) /* done txmitting */
+#define CGEM_TXDESC_WRAP (1 << 30) /* end descr ring */
+#define CGEM_TXDESC_RETRY_ERR (1 << 29)
+#define CGEM_TXDESC_AHB_ERR (1 << 27)
+#define CGEM_TXDESC_LATE_COLL (1 << 26)
+#define CGEM_TXDESC_CKSUM_GEN_STAT_MASK (7 << 20)
+#define CGEM_TXDESC_CKSUM_GEN_STAT_VLAN_HDR_ERR (1 << 20)
+#define CGEM_TXDESC_CKSUM_GEN_STAT_SNAP_HDR_ERR (2 << 20)
+#define CGEM_TXDESC_CKSUM_GEN_STAT_IP_HDR_ERR (3 << 20)
+#define CGEM_TXDESC_CKSUM_GEN_STAT_UNKNOWN_TYPE (4 << 20)
+#define CGEM_TXDESC_CKSUM_GEN_STAT_UNSUPP_FRAG (5 << 20)
+#define CGEM_TXDESC_CKSUM_GEN_STAT_NOT_TCPUDP (6 << 20)
+#define CGEM_TXDESC_CKSUM_GEN_STAT_SHORT_PKT (7 << 20)
+#define CGEM_TXDESC_NO_CRC_APPENDED (1 << 16)
+#define CGEM_TXDESC_LAST_BUF (1 << 15) /* last in frame */
#define CGEM_TXDESC_LENGTH_MASK 0x3fff
};
struct cgem_rx_desc {
uint32_t addr;
-#define CGEM_RXDESC_WRAP (1<<1) /* goes in addr! */
-#define CGEM_RXDESC_OWN (1<<0) /* buf filled */
+#define CGEM_RXDESC_WRAP (1 << 1) /* goes in addr! */
+#define CGEM_RXDESC_OWN (1 << 0) /* buf filled */
uint32_t ctl;
-#define CGEM_RXDESC_BCAST (1<<31) /* all 1's broadcast */
-#define CGEM_RXDESC_MULTI_MATCH (1<<30) /* mutlicast match */
-#define CGEM_RXDESC_UNICAST_MATCH (1<<29)
-#define CGEM_RXDESC_EXTERNAL_MATCH (1<<28) /* ext addr match */
+#define CGEM_RXDESC_BCAST (1U << 31)/* all 1's bcast */
+#define CGEM_RXDESC_MULTI_MATCH (1 << 30) /* mutlicast match */
+#define CGEM_RXDESC_UNICAST_MATCH (1 << 29)
+#define CGEM_RXDESC_EXTERNAL_MATCH (1 << 28) /* ext addr match */
#define CGEM_RXDESC_SPEC_MATCH_SHIFT 25
-#define CGEM_RXDESC_SPEC_MATCH_MASK (3<<25)
+#define CGEM_RXDESC_SPEC_MATCH_MASK (3 << 25)
#define CGEM_RXDESC_TYPE_ID_MATCH_SHIFT 22
-#define CGEM_RXDESC_TYPE_ID_MATCH_MASK (3<<22)
-#define CGEM_RXDESC_CKSUM_STAT_MASK (3<<22) /* same field above */
-#define CGEM_RXDESC_CKSUM_STAT_NONE (0<<22)
-#define CGEM_RXDESC_CKSUM_STAT_IP_GOOD (1<<22)
-#define CGEM_RXDESC_CKSUM_STAT_TCP_GOOD (2<<22) /* and ip good */
-#define CGEM_RXDESC_CKSUM_STAT_UDP_GOOD (3<<22) /* and ip good */
-#define CGEM_RXDESC_VLAN_DETECTED (1<<21)
-#define CGEM_RXDESC_PRIO_DETECTED (1<<20)
+#define CGEM_RXDESC_TYPE_ID_MATCH_MASK (3 << 22)
+#define CGEM_RXDESC_CKSUM_STAT_MASK (3 << 22) /* same as above */
+#define CGEM_RXDESC_CKSUM_STAT_NONE (0 << 22)
+#define CGEM_RXDESC_CKSUM_STAT_IP_GOOD (1 << 22)
+#define CGEM_RXDESC_CKSUM_STAT_TCP_GOOD (2 << 22) /* and ip good */
+#define CGEM_RXDESC_CKSUM_STAT_UDP_GOOD (3 << 22) /* and ip good */
+#define CGEM_RXDESC_VLAN_DETECTED (1 << 21)
+#define CGEM_RXDESC_PRIO_DETECTED (1 << 20)
#define CGEM_RXDESC_VLAN_PRIO_SHIFT 17
-#define CGEM_RXDESC_VLAN_PRIO_MASK (7<<17)
-#define CGEM_RXDESC_CFI (1<<16)
-#define CGEM_RXDESC_EOF (1<<15) /* end of frame */
-#define CGEM_RXDESC_SOF (1<<14) /* start of frame */
-#define CGEM_RXDESC_BAD_FCS (1<<13)
+#define CGEM_RXDESC_VLAN_PRIO_MASK (7 << 17)
+#define CGEM_RXDESC_CFI (1 << 16)
+#define CGEM_RXDESC_EOF (1 << 15) /* end of frame */
+#define CGEM_RXDESC_SOF (1 << 14) /* start of frame */
+#define CGEM_RXDESC_BAD_FCS (1 << 13)
#define CGEM_RXDESC_LENGTH_MASK 0x1fff
};
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Sat, Jan 24, 4:18 PM (12 h, 58 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
27898355
Default Alt Text
D24226.diff (59 KB)
Attached To
Mode
D24226: Cadence GEM driver style clean-up.
Attached
Detach File
Event Timeline
Log In to Comment