Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F156587319
D10706.id29145.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
145 KB
Referenced Files
None
Subscribers
None
D10706.id29145.diff
View Options
Index: sys/arm/mv/files.mv
===================================================================
--- sys/arm/mv/files.mv
+++ sys/arm/mv/files.mv
@@ -23,6 +23,8 @@
dev/cesa/cesa.c optional cesa
dev/iicbus/twsi/mv_twsi.c optional twsi
dev/mge/if_mge.c optional mge
+dev/neta/if_mvneta_fdt.c optional neta fdt
+dev/neta/if_mvneta.c optional neta mdio mii
dev/nand/nfc_mv.c optional nand
dev/mvs/mvs_soc.c optional mvs
dev/uart/uart_dev_ns8250.c optional uart
Index: sys/arm/mv/mv_common.c
===================================================================
--- sys/arm/mv/mv_common.c
+++ sys/arm/mv/mv_common.c
@@ -151,6 +151,7 @@
static struct soc_node_spec soc_nodes[] = {
{ "mrvl,ge", &decode_win_eth_setup, &decode_win_eth_dump },
+ { "marvell,armada-370-neta", &decode_win_eth_setup, &decode_win_eth_dump },
{ "mrvl,usb-ehci", &decode_win_usb_setup, &decode_win_usb_dump },
{ "marvell,orion-ehci", &decode_win_usb_setup, &decode_win_usb_dump },
{ "marvell,armada-380-xhci", &decode_win_usb3_setup, &decode_win_usb3_dump },
@@ -648,10 +649,17 @@
WIN_REG_BASE_IDX_WR2(win_xor, har, MV_WIN_XOR_REMAP)
WIN_REG_BASE_IDX_WR2(win_xor, ctrl, MV_WIN_XOR_CTRL)
+#if defined(SOC_MV_ARMADA38X)
+WIN_REG_BASE_RD(win_eth, bare, 0x2290)
+WIN_REG_BASE_RD(win_eth, epap, 0x2294)
+WIN_REG_BASE_WR(win_eth, bare, 0x2290)
+WIN_REG_BASE_WR(win_eth, epap, 0x2294)
+#else
WIN_REG_BASE_RD(win_eth, bare, 0x290)
WIN_REG_BASE_RD(win_eth, epap, 0x294)
WIN_REG_BASE_WR(win_eth, bare, 0x290)
WIN_REG_BASE_WR(win_eth, epap, 0x294)
+#endif
WIN_REG_BASE_IDX_RD(win_pcie, cr, MV_WIN_PCIE_CTRL);
WIN_REG_BASE_IDX_RD(win_pcie, br, MV_WIN_PCIE_BASE);
Index: sys/arm/mv/mvwin.h
===================================================================
--- sys/arm/mv/mvwin.h
+++ sys/arm/mv/mvwin.h
@@ -229,10 +229,18 @@
#define MV_WIN_USB3_BASE(n) (0x8 * (n) + 0x4004)
#define MV_WIN_USB3_MAX 8
+#if defined(SOC_MV_ARMADA38X)
+#define MV_WIN_ETH_BASE(n) (0x8 * (n) + 0x2200)
+#define MV_WIN_ETH_SIZE(n) (0x8 * (n) + 0x2204)
+#define MV_WIN_ETH_REMAP(n) (0x4 * (n) + 0x2280)
+#define MV_WIN_ETH_MAX 6
+#else
#define MV_WIN_ETH_BASE(n) (0x8 * (n) + 0x200)
#define MV_WIN_ETH_SIZE(n) (0x8 * (n) + 0x204)
#define MV_WIN_ETH_REMAP(n) (0x4 * (n) + 0x280)
#define MV_WIN_ETH_MAX 6
+#endif
+
#define MV_WIN_IDMA_BASE(n) (0x8 * (n) + 0xa00)
#define MV_WIN_IDMA_SIZE(n) (0x8 * (n) + 0xa04)
Index: sys/conf/options
===================================================================
--- sys/conf/options
+++ sys/conf/options
@@ -872,6 +872,9 @@
MWL_AGGR_SIZE opt_mwl.h
MWL_TX_NODROP opt_mwl.h
+# Options for the Marvell NETA driver
+MVNETA_DMA_COHERENT opt_mvneta.h # I/O coherency tweaks
+
# Options for the Intel 802.11ac wireless driver
IWM_DEBUG opt_iwm.h
Index: sys/dev/neta/if_mvneta.c
===================================================================
--- /dev/null
+++ sys/dev/neta/if_mvneta.c
@@ -0,0 +1,3583 @@
+/*
+ * Copyright (c) 2017 Stormshield.
+ * Copyright (c) 2017 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "opt_platform.h"
+#include "opt_mvneta.h"
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/endian.h>
+#include <sys/mbuf.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+#include <sys/smp.h>
+#include <sys/taskqueue.h>
+#ifdef MVNETA_KTR
+#include <sys/ktr.h>
+#endif
+
+#include <net/ethernet.h>
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/tcp_lro.h>
+
+#include <sys/sockio.h>
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+
+#include <dev/mdio/mdio.h>
+
+#include <arm/mv/mvreg.h>
+#include <arm/mv/mvvar.h>
+
+#include "if_mvnetareg.h"
+#include "if_mvnetavar.h"
+
+#include "miibus_if.h"
+#include "mdio_if.h"
+
+#ifdef MVNETA_DEBUG
+#define STATIC /* nothing */
+#else
+#define STATIC static
+#endif
+
+#define DASSERT(x) KASSERT((x), (#x))
+
+/* Device Register Initialization */
+STATIC int mvneta_initreg(struct ifnet *);
+
+/* Descriptor Ring Control for each of queues */
+STATIC int mvneta_ring_alloc_rx_queue(struct mvneta_softc *, int);
+STATIC int mvneta_ring_alloc_tx_queue(struct mvneta_softc *, int);
+STATIC void mvneta_ring_dealloc_rx_queue(struct mvneta_softc *, int);
+STATIC void mvneta_ring_dealloc_tx_queue(struct mvneta_softc *, int);
+STATIC int mvneta_ring_init_rx_queue(struct mvneta_softc *, int);
+STATIC int mvneta_ring_init_tx_queue(struct mvneta_softc *, int);
+STATIC void mvneta_ring_flush_rx_queue(struct mvneta_softc *, int);
+STATIC void mvneta_ring_flush_tx_queue(struct mvneta_softc *, int);
+STATIC void mvneta_dmamap_cb(void *, bus_dma_segment_t *, int, int);
+STATIC int mvneta_dma_create(struct mvneta_softc *);
+
+/* Rx/Tx Queue Control */
+STATIC int mvneta_rx_queue_init(struct ifnet *, int);
+STATIC int mvneta_tx_queue_init(struct ifnet *, int);
+STATIC int mvneta_rx_queue_enable(struct ifnet *, int);
+STATIC int mvneta_tx_queue_enable(struct ifnet *, int);
+STATIC void mvneta_rx_lockq(struct mvneta_softc *, int);
+STATIC void mvneta_rx_unlockq(struct mvneta_softc *, int);
+STATIC void mvneta_tx_lockq(struct mvneta_softc *, int);
+STATIC void mvneta_tx_unlockq(struct mvneta_softc *, int);
+
+/* Interrupt Handlers */
+STATIC void mvneta_disable_intr(struct mvneta_softc *);
+STATIC void mvneta_enable_intr(struct mvneta_softc *);
+STATIC void mvneta_rxtxth_intr(void *);
+STATIC int mvneta_misc_intr(struct mvneta_softc *);
+STATIC void mvneta_tick(void *);
+/* struct ifnet and mii callbacks*/
+STATIC int mvneta_xmitfast_locked(struct mvneta_softc *, int, struct mbuf **);
+STATIC int mvneta_xmit_locked(struct mvneta_softc *, int);
+#ifdef MVNETA_MULTIQUEUE
+STATIC int mvneta_transmit(struct ifnet *, struct mbuf *);
+#else /* !MVNETA_MULTIQUEUE */
+STATIC void mvneta_start(struct ifnet *);
+#endif
+STATIC void mvneta_qflush(struct ifnet *);
+STATIC void mvneta_tx_task(void *, int);
+STATIC int mvneta_ioctl(struct ifnet *, u_long, caddr_t);
+STATIC void mvneta_init(void *);
+STATIC void mvneta_init_locked(void *);
+STATIC void mvneta_stop(struct mvneta_softc *);
+STATIC void mvneta_stop_locked(struct mvneta_softc *);
+STATIC int mvneta_mediachange(struct ifnet *);
+STATIC void mvneta_mediastatus(struct ifnet *, struct ifmediareq *);
+STATIC void mvneta_portup(struct mvneta_softc *);
+STATIC void mvneta_portdown(struct mvneta_softc *);
+
+/* Link State Notify */
+STATIC void mvneta_update_autoneg(struct mvneta_softc *, int);
+STATIC int mvneta_update_media(struct mvneta_softc *, int);
+STATIC void mvneta_adjust_link(struct mvneta_softc *);
+STATIC void mvneta_update_eee(struct mvneta_softc *);
+STATIC void mvneta_update_fc(struct mvneta_softc *);
+STATIC void mvneta_link_isr(struct mvneta_softc *);
+STATIC void mvneta_linkupdate(struct mvneta_softc *, boolean_t);
+STATIC void mvneta_linkup(struct mvneta_softc *);
+STATIC void mvneta_linkdown(struct mvneta_softc *);
+STATIC void mvneta_linkreset(struct mvneta_softc *);
+
+/* Tx Subroutines */
+STATIC int mvneta_tx_queue(struct mvneta_softc *, struct mbuf **, int);
+STATIC void mvneta_tx_set_csumflag(struct ifnet *,
+ struct mvneta_tx_desc *, struct mbuf *);
+STATIC void mvneta_tx_queue_complete(struct mvneta_softc *, int);
+STATIC void mvneta_tx_drain(struct mvneta_softc *);
+
+/* Rx Subroutines */
+STATIC int mvneta_rx(struct mvneta_softc *, int, int);
+STATIC void mvneta_rx_queue(struct mvneta_softc *, int, int);
+STATIC void mvneta_rx_queue_refill(struct mvneta_softc *, int);
+STATIC void mvneta_rx_set_csumflag(struct ifnet *,
+ struct mvneta_rx_desc *, struct mbuf *);
+STATIC void mvneta_rx_buf_free(struct mvneta_softc *, struct mvneta_buf *);
+
+/* MAC address filter */
+STATIC void mvneta_filter_setup(struct mvneta_softc *);
+
+/* sysctl(9) */
+STATIC int sysctl_read_mib(SYSCTL_HANDLER_ARGS);
+STATIC int sysctl_clear_mib(SYSCTL_HANDLER_ARGS);
+STATIC int sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS);
+STATIC void sysctl_mvneta_init(struct mvneta_softc *);
+
+/* MIB */
+STATIC void mvneta_clear_mib(struct mvneta_softc *);
+STATIC void mvneta_update_mib(struct mvneta_softc *);
+
+/* Switch */
+STATIC boolean_t mvneta_has_switch(device_t);
+
+#define mvneta_sc_lock(sc) mtx_lock(&sc->mtx)
+#define mvneta_sc_unlock(sc) mtx_unlock(&sc->mtx)
+
+STATIC struct mtx mii_mutex;
+STATIC int mii_init = 0;
+
+/* Device */
+STATIC int mvneta_detach(device_t);
+/* MII */
+STATIC int mvneta_miibus_readreg(device_t, int, int);
+STATIC int mvneta_miibus_writereg(device_t, int, int, int);
+
+static device_method_t mvneta_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_detach, mvneta_detach),
+ /* MII interface */
+ DEVMETHOD(miibus_readreg, mvneta_miibus_readreg),
+ DEVMETHOD(miibus_writereg, mvneta_miibus_writereg),
+ /* MDIO interface */
+ DEVMETHOD(mdio_readreg, mvneta_miibus_readreg),
+ DEVMETHOD(mdio_writereg, mvneta_miibus_writereg),
+
+ /* End */
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(mvneta, mvneta_driver, mvneta_methods, sizeof(struct mvneta_softc));
+
+DRIVER_MODULE(miibus, mvneta, miibus_driver, miibus_devclass, 0, 0);
+DRIVER_MODULE(mdio, mvneta, mdio_driver, mdio_devclass, 0, 0);
+MODULE_DEPEND(mvneta, mdio, 1, 1, 1);
+MODULE_DEPEND(mvneta, ether, 1, 1, 1);
+MODULE_DEPEND(mvneta, miibus, 1, 1, 1);
+MODULE_DEPEND(mvneta, mvxpbm, 1, 1, 1);
+
+/*
+ * List of MIB register and names
+ */
+enum mvneta_mib_idx
+{
+ MVNETA_MIB_RX_GOOD_OCT_IDX,
+ MVNETA_MIB_RX_BAD_OCT_IDX,
+ MVNETA_MIB_TX_MAC_TRNS_ERR_IDX,
+ MVNETA_MIB_RX_GOOD_FRAME_IDX,
+ MVNETA_MIB_RX_BAD_FRAME_IDX,
+ MVNETA_MIB_RX_BCAST_FRAME_IDX,
+ MVNETA_MIB_RX_MCAST_FRAME_IDX,
+ MVNETA_MIB_RX_FRAME64_OCT_IDX,
+ MVNETA_MIB_RX_FRAME127_OCT_IDX,
+ MVNETA_MIB_RX_FRAME255_OCT_IDX,
+ MVNETA_MIB_RX_FRAME511_OCT_IDX,
+ MVNETA_MIB_RX_FRAME1023_OCT_IDX,
+ MVNETA_MIB_RX_FRAMEMAX_OCT_IDX,
+ MVNETA_MIB_TX_GOOD_OCT_IDX,
+ MVNETA_MIB_TX_GOOD_FRAME_IDX,
+ MVNETA_MIB_TX_EXCES_COL_IDX,
+ MVNETA_MIB_TX_MCAST_FRAME_IDX,
+ MVNETA_MIB_TX_BCAST_FRAME_IDX,
+ MVNETA_MIB_TX_MAC_CTL_ERR_IDX,
+ MVNETA_MIB_FC_SENT_IDX,
+ MVNETA_MIB_FC_GOOD_IDX,
+ MVNETA_MIB_FC_BAD_IDX,
+ MVNETA_MIB_PKT_UNDERSIZE_IDX,
+ MVNETA_MIB_PKT_FRAGMENT_IDX,
+ MVNETA_MIB_PKT_OVERSIZE_IDX,
+ MVNETA_MIB_PKT_JABBER_IDX,
+ MVNETA_MIB_MAC_RX_ERR_IDX,
+ MVNETA_MIB_MAC_CRC_ERR_IDX,
+ MVNETA_MIB_MAC_COL_IDX,
+ MVNETA_MIB_MAC_LATE_COL_IDX,
+};
+
+STATIC struct mvneta_mib_def {
+ uint32_t regnum;
+ int reg64;
+ const char *sysctl_name;
+ const char *desc;
+} mvneta_mib_list[] = {
+ [MVNETA_MIB_RX_GOOD_OCT_IDX] = {MVNETA_MIB_RX_GOOD_OCT, 1,
+ "rx_good_oct", "Good Octets Rx"},
+ [MVNETA_MIB_RX_BAD_OCT_IDX] = {MVNETA_MIB_RX_BAD_OCT, 0,
+ "rx_bad_oct", "Bad Octets Rx"},
+ [MVNETA_MIB_TX_MAC_TRNS_ERR_IDX] = {MVNETA_MIB_TX_MAC_TRNS_ERR, 0,
+ "tx_mac_err", "MAC Transmit Error"},
+ [MVNETA_MIB_RX_GOOD_FRAME_IDX] = {MVNETA_MIB_RX_GOOD_FRAME, 0,
+ "rx_good_frame", "Good Frames Rx"},
+ [MVNETA_MIB_RX_BAD_FRAME_IDX] = {MVNETA_MIB_RX_BAD_FRAME, 0,
+ "rx_bad_frame", "Bad Frames Rx"},
+ [MVNETA_MIB_RX_BCAST_FRAME_IDX] = {MVNETA_MIB_RX_BCAST_FRAME, 0,
+ "rx_bcast_frame", "Broadcast Frames Rx"},
+ [MVNETA_MIB_RX_MCAST_FRAME_IDX] = {MVNETA_MIB_RX_MCAST_FRAME, 0,
+ "rx_mcast_frame", "Multicast Frames Rx"},
+ [MVNETA_MIB_RX_FRAME64_OCT_IDX] = {MVNETA_MIB_RX_FRAME64_OCT, 0,
+ "rx_frame_1_64", "Frame Size 1 - 64"},
+ [MVNETA_MIB_RX_FRAME127_OCT_IDX] = {MVNETA_MIB_RX_FRAME127_OCT, 0,
+ "rx_frame_65_127", "Frame Size 65 - 127"},
+ [MVNETA_MIB_RX_FRAME255_OCT_IDX] = {MVNETA_MIB_RX_FRAME255_OCT, 0,
+ "rx_frame_128_255", "Frame Size 128 - 255"},
+ [MVNETA_MIB_RX_FRAME511_OCT_IDX] = {MVNETA_MIB_RX_FRAME511_OCT, 0,
+ "rx_frame_256_511", "Frame Size 256 - 511"},
+ [MVNETA_MIB_RX_FRAME1023_OCT_IDX] = {MVNETA_MIB_RX_FRAME1023_OCT, 0,
+ "rx_frame_512_1023", "Frame Size 512 - 1023"},
+ [MVNETA_MIB_RX_FRAMEMAX_OCT_IDX] = {MVNETA_MIB_RX_FRAMEMAX_OCT, 0,
+ "rx_fame_1024_max", "Frame Size 1024 - Max"},
+ [MVNETA_MIB_TX_GOOD_OCT_IDX] = {MVNETA_MIB_TX_GOOD_OCT, 1,
+ "tx_good_oct", "Good Octets Tx"},
+ [MVNETA_MIB_TX_GOOD_FRAME_IDX] = {MVNETA_MIB_TX_GOOD_FRAME, 0,
+ "tx_good_frame", "Good Frames Tx"},
+ [MVNETA_MIB_TX_EXCES_COL_IDX] = {MVNETA_MIB_TX_EXCES_COL, 0,
+ "tx_exces_collision", "Excessive Collision"},
+ [MVNETA_MIB_TX_MCAST_FRAME_IDX] = {MVNETA_MIB_TX_MCAST_FRAME, 0,
+ "tx_mcast_frame", "Multicast Frames Tx"},
+ [MVNETA_MIB_TX_BCAST_FRAME_IDX] = {MVNETA_MIB_TX_BCAST_FRAME, 0,
+ "tx_bcast_frame", "Broadcast Frames Tx"},
+ [MVNETA_MIB_TX_MAC_CTL_ERR_IDX] = {MVNETA_MIB_TX_MAC_CTL_ERR, 0,
+ "tx_mac_ctl_err", "Unknown MAC Control"},
+ [MVNETA_MIB_FC_SENT_IDX] = {MVNETA_MIB_FC_SENT, 0,
+ "fc_tx", "Flow Control Tx"},
+ [MVNETA_MIB_FC_GOOD_IDX] = {MVNETA_MIB_FC_GOOD, 0,
+ "fc_rx_good", "Good Flow Control Rx"},
+ [MVNETA_MIB_FC_BAD_IDX] = {MVNETA_MIB_FC_BAD, 0,
+ "fc_rx_bad", "Bad Flow Control Rx"},
+ [MVNETA_MIB_PKT_UNDERSIZE_IDX] = {MVNETA_MIB_PKT_UNDERSIZE, 0,
+ "pkt_undersize", "Undersized Packets Rx"},
+ [MVNETA_MIB_PKT_FRAGMENT_IDX] = {MVNETA_MIB_PKT_FRAGMENT, 0,
+ "pkt_fragment", "Fragmented Packets Rx"},
+ [MVNETA_MIB_PKT_OVERSIZE_IDX] = {MVNETA_MIB_PKT_OVERSIZE, 0,
+ "pkt_oversize", "Oversized Packets Rx"},
+ [MVNETA_MIB_PKT_JABBER_IDX] = {MVNETA_MIB_PKT_JABBER, 0,
+ "pkt_jabber", "Jabber Packets Rx"},
+ [MVNETA_MIB_MAC_RX_ERR_IDX] = {MVNETA_MIB_MAC_RX_ERR, 0,
+ "mac_rx_err", "MAC Rx Errors"},
+ [MVNETA_MIB_MAC_CRC_ERR_IDX] = {MVNETA_MIB_MAC_CRC_ERR, 0,
+ "mac_crc_err", "MAC CRC Errors"},
+ [MVNETA_MIB_MAC_COL_IDX] = {MVNETA_MIB_MAC_COL, 0,
+ "mac_collision", "MAC Collision"},
+ [MVNETA_MIB_MAC_LATE_COL_IDX] = {MVNETA_MIB_MAC_LATE_COL, 0,
+ "mac_late_collision", "MAC Late Collision"},
+};
+
+static struct resource_spec res_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { SYS_RES_IRQ, 0, RF_ACTIVE },
+ { -1, 0}
+};
+
+static struct {
+ driver_intr_t *handler;
+ char * description;
+} mvneta_intrs[] = {
+ { mvneta_rxtxth_intr, "MVNETA aggregated interrupt" },
+};
+
+static int
+mvneta_set_mac_address(struct mvneta_softc *sc, uint8_t *addr)
+{
+ unsigned int mac_h;
+ unsigned int mac_l;
+
+ mac_l = (addr[4] << 8) | (addr[5]);
+ mac_h = (addr[0] << 24) | (addr[1] << 16) |
+ (addr[2] << 8) | (addr[3] << 0);
+
+ MVNETA_WRITE(sc, MVNETA_MACAL, mac_l);
+ MVNETA_WRITE(sc, MVNETA_MACAH, mac_h);
+ return (0);
+}
+
+static int
+mvneta_get_mac_address(struct mvneta_softc *sc, uint8_t *addr)
+{
+ uint32_t mac_l, mac_h;
+
+#ifdef FDT
+ if (mvneta_fdt_mac_address(sc, addr) == 0)
+ return (0);
+#endif
+ /*
+ * Fall back -- use the currently programmed address.
+ */
+ mac_l = MVNETA_READ(sc, MVNETA_MACAL);
+ mac_h = MVNETA_READ(sc, MVNETA_MACAH);
+ if (mac_l == 0 && mac_h == 0) {
+ /*
+ * Generate pseudo-random MAC.
+ * Set lower part to random number | unit number.
+ */
+ mac_l = arc4random() & ~0xff;
+ mac_l |= device_get_unit(sc->dev) & 0xff;
+ mac_h = arc4random();
+ mac_h &= ~(3 << 24); /* Clear multicast and LAA bits */
+ if (bootverbose) {
+ device_printf(sc->dev,
+ "Could not acquire MAC address. "
+ "Using randomized one.\n");
+ }
+ }
+
+ addr[0] = (mac_h & 0xff000000) >> 24;
+ addr[1] = (mac_h & 0x00ff0000) >> 16;
+ addr[2] = (mac_h & 0x0000ff00) >> 8;
+ addr[3] = (mac_h & 0x000000ff);
+ addr[4] = (mac_l & 0x0000ff00) >> 8;
+ addr[5] = (mac_l & 0x000000ff);
+ return (0);
+}
+
+STATIC boolean_t
+mvneta_has_switch(device_t self)
+{
+ phandle_t node, switch_node, switch_eth, switch_eth_handle;
+
+ node = ofw_bus_get_node(self);
+ switch_node =
+ fdt_find_compatible(OF_finddevice("/"), "marvell,dsa", 0);
+ switch_eth = 0;
+
+ OF_getencprop(switch_node, "dsa,ethernet",
+ (void*)&switch_eth_handle, sizeof(switch_eth_handle));
+
+ if (switch_eth_handle > 0)
+ switch_eth = OF_node_from_xref(switch_eth_handle);
+
+ /* Return true if dsa,ethernet cell points to us */
+ return (node == switch_eth);
+}
+
+STATIC int
+mvneta_dma_create(struct mvneta_softc *sc)
+{
+ size_t maxsize, maxsegsz;
+ size_t q;
+ int error;
+
+ /*
+ * Create Tx DMA
+ */
+ maxsize = maxsegsz = sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT;
+
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev), /* parent */
+ 16, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filtfunc, filtfuncarg */
+ maxsize, /* maxsize */
+ 1, /* nsegments */
+ maxsegsz, /* maxsegsz */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockfuncarg */
+ &sc->tx_dtag); /* dmat */
+ if (error != 0) {
+ device_printf(sc->dev,
+ "Failed to create DMA tag for Tx descriptors.\n");
+ goto fail;
+ }
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev), /* parent */
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filtfunc, filtfuncarg */
+ MVNETA_PACKET_SIZE, /* maxsize */
+ MVNETA_TX_SEGLIMIT, /* nsegments */
+ MVNETA_PACKET_SIZE, /* maxsegsz */
+ BUS_DMA_ALLOCNOW, /* flags */
+ NULL, NULL, /* lockfunc, lockfuncarg */
+ &sc->txmbuf_dtag);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "Failed to create DMA tag for Tx mbufs.\n");
+ goto fail;
+ }
+
+ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
+ error = mvneta_ring_alloc_tx_queue(sc, q);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "Failed to allocate DMA safe memory for TxQ: %d\n", q);
+ goto fail;
+ }
+ }
+
+ /*
+ * Create Rx DMA.
+ */
+ /* Create tag for Rx descripors */
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev), /* parent */
+ 32, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filtfunc, filtfuncarg */
+ sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsize */
+ 1, /* nsegments */
+ sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsegsz */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockfuncarg */
+ &sc->rx_dtag); /* dmat */
+ if (error != 0) {
+ device_printf(sc->dev,
+ "Failed to create DMA tag for Rx descriptors.\n");
+ goto fail;
+ }
+
+ /* Create tag for Rx buffers */
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev), /* parent */
+ 32, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filtfunc, filtfuncarg */
+ MVNETA_PACKET_SIZE, 1, /* maxsize, nsegments */
+ MVNETA_PACKET_SIZE, /* maxsegsz */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockfuncarg */
+ &sc->rxbuf_dtag); /* dmat */
+ if (error != 0) {
+ device_printf(sc->dev,
+ "Failed to create DMA tag for Rx buffers.\n");
+ goto fail;
+ }
+
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
+ if (mvneta_ring_alloc_rx_queue(sc, q) != 0) {
+ device_printf(sc->dev,
+ "Failed to allocate DMA safe memory for RxQ: %d\n", q);
+ goto fail;
+ }
+ }
+
+ return (0);
+fail:
+ mvneta_detach(sc->dev);
+
+ return (error);
+}
+
+/* ARGSUSED */
+int
+mvneta_attach(device_t self)
+{
+ struct mvneta_softc *sc;
+ struct ifnet *ifp;
+ device_t child;
+ int ifm_target;
+ int q, error;
+
+ sc = device_get_softc(self);
+ sc->dev = self;
+
+ mtx_init(&sc->mtx, "mvneta_sc", NULL, MTX_DEF);
+
+ error = bus_alloc_resources(self, res_spec, sc->res);
+ if (error) {
+ device_printf(self, "could not allocate resources\n");
+ return (ENXIO);
+ }
+
+ sc->version = MVNETA_READ(sc, MVNETA_PV);
+ device_printf(self, "version is %x\n", sc->version);
+ callout_init(&sc->tick_ch, 0);
+
+#if !defined(MVNETA_DMA_COHERENT)
+ /*
+ * Disable port snoop for buffers and descriptors
+ * to avoid L2 caching of both without DRAM copy.
+ */
+ uint32_t reg;
+ reg = MVNETA_READ(sc, MVNETA_PSNPCFG);
+ reg &= ~MVNETA_PSNPCFG_DESCSNP_MASK;
+ reg &= ~MVNETA_PSNPCFG_BUFSNP_MASK;
+ MVNETA_WRITE(sc, MVNETA_PSNPCFG, reg);
+#endif
+ /*
+ * make sure DMA engines are in reset state
+ */
+ MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
+ MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
+
+ /*
+ * MAC address
+ */
+ if (mvneta_get_mac_address(sc, sc->enaddr)) {
+ device_printf(self, "no mac address.\n");
+ return (ENXIO);
+ }
+ mvneta_set_mac_address(sc, sc->enaddr);
+
+ mvneta_disable_intr(sc);
+
+ /* Allocate network interface */
+ ifp = sc->ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ device_printf(self, "if_alloc() failed\n");
+ mvneta_detach(self);
+ return (ENOMEM);
+ }
+ if_initname(ifp, device_get_name(self), device_get_unit(self));
+
+ /*
+ * We can support 802.1Q VLAN-sized frames and jumbo
+ * Ethernet frames.
+ */
+ ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU;
+
+ ifp->if_softc = sc;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+#ifdef MVNETA_MULTIQUEUE
+ ifp->if_transmit = mvneta_transmit;
+ ifp->if_qflush = mvneta_qflush;
+#else /* !MVNETA_MULTIQUEUE */
+ ifp->if_start = mvneta_start;
+ ifp->if_snd.ifq_drv_maxlen = MVNETA_TX_RING_CNT - 1;
+ IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
+ IFQ_SET_READY(&ifp->if_snd);
+#endif
+ ifp->if_init = mvneta_init;
+ ifp->if_ioctl = mvneta_ioctl;
+
+ /*
+ * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware.
+ */
+ ifp->if_capabilities |= IFCAP_HWCSUM;
+
+ /*
+ * As VLAN hardware tagging is not supported
+ * but is necessary to perform VLAN hardware checksums,
+ * it is done in the driver
+ */
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
+#ifdef HWCSUM_IPV6
+ ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
+#endif
+ ifp->if_capenable = ifp->if_capabilities;
+
+ /*
+ * Disabled option(s):
+ * - Support for Large Receive Offload
+ */
+ ifp->if_capabilities |= IFCAP_LRO;
+
+ ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
+
+ /*
+ * Device DMA Buffer allocation.
+ * Handles resource deallocation in case of failure.
+ */
+ error = mvneta_dma_create(sc);
+ if (error != 0)
+ return (error);
+
+ /* Initialize queues */
+ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
+ error = mvneta_ring_init_tx_queue(sc, q);
+ if (error != 0) {
+ mvneta_detach(self);
+ return (error);
+ }
+ }
+
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
+ error = mvneta_ring_init_rx_queue(sc, q);
+ if (error != 0) {
+ mvneta_detach(self);
+ return (error);
+ }
+ }
+
+ ether_ifattach(ifp, sc->enaddr);
+
+ /*
+ * Enable DMA engines and Initialize Device Registers.
+ */
+ MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
+ MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
+ MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
+ mvneta_sc_lock(sc);
+ mvneta_filter_setup(sc);
+ mvneta_sc_unlock(sc);
+ mvneta_initreg(ifp);
+
+ /*
+ * Now MAC is working, setup MII.
+ */
+ if (mii_init == 0) {
+ /*
+ * MII bus is shared by all MACs and all PHYs in SoC.
+ * serializing the bus access should be safe.
+ */
+ mtx_init(&mii_mutex, "mvneta_mii", NULL, MTX_DEF);
+ mii_init = 1;
+ }
+
+ /* Attach PHY(s) */
+ if ((sc->phy_addr != MII_PHY_ANY) && (!sc->use_inband_status)) {
+ error = mii_attach(self, &sc->miibus, ifp, mvneta_mediachange,
+ mvneta_mediastatus, BMSR_DEFCAPMASK, sc->phy_addr,
+ MII_OFFSET_ANY, 0);
+ if (error != 0) {
+ if (bootverbose) {
+ device_printf(self,
+ "MII attach failed, error: %d\n", error);
+ }
+ ether_ifdetach(sc->ifp);
+ mvneta_detach(self);
+ return (error);
+ }
+ sc->mii = device_get_softc(sc->miibus);
+ sc->phy_attached = 1;
+
+ /* Disable auto-negotiation in MAC - rely on PHY layer */
+ mvneta_update_autoneg(sc, FALSE);
+ } else if (sc->use_inband_status == TRUE) {
+ /* In-band link status */
+ ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
+ mvneta_mediastatus);
+
+ /* Configure media */
+ ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
+ 0, NULL);
+ ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
+ ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
+ 0, NULL);
+ ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
+ ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
+ 0, NULL);
+ ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO);
+
+ /* Enable auto-negotiation */
+ mvneta_update_autoneg(sc, TRUE);
+
+ mvneta_sc_lock(sc);
+ if (MVNETA_IS_LINKUP(sc))
+ mvneta_linkup(sc);
+ else
+ mvneta_linkdown(sc);
+ mvneta_sc_unlock(sc);
+
+ } else {
+ /* Fixed-link, use predefined values */
+ ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
+ mvneta_mediastatus);
+
+ ifm_target = IFM_ETHER;
+ switch (sc->phy_speed) {
+ case 2500:
+ if (sc->phy_mode != MVNETA_PHY_SGMII &&
+ sc->phy_mode != MVNETA_PHY_QSGMII) {
+ device_printf(self,
+ "2.5G speed can work only in (Q)SGMII mode\n");
+ ether_ifdetach(sc->ifp);
+ mvneta_detach(self);
+ return (ENXIO);
+ }
+ ifm_target |= IFM_2500_T;
+ break;
+ case 1000:
+ ifm_target |= IFM_1000_T;
+ break;
+ case 100:
+ ifm_target |= IFM_100_TX;
+ break;
+ case 10:
+ ifm_target |= IFM_10_T;
+ break;
+ default:
+ ether_ifdetach(sc->ifp);
+ mvneta_detach(self);
+ return (ENXIO);
+ }
+
+ if (sc->phy_fdx)
+ ifm_target |= IFM_FDX;
+ else
+ ifm_target |= IFM_HDX;
+
+ ifmedia_add(&sc->mvneta_ifmedia, ifm_target, 0, NULL);
+ ifmedia_set(&sc->mvneta_ifmedia, ifm_target);
+ if_link_state_change(sc->ifp, LINK_STATE_UP);
+
+ if (mvneta_has_switch(self)) {
+ child = device_add_child(sc->dev, "mdio", -1);
+ if (child == NULL) {
+ ether_ifdetach(sc->ifp);
+ mvneta_detach(self);
+ return (ENXIO);
+ }
+ bus_generic_attach(sc->dev);
+ bus_generic_attach(child);
+ }
+
+ /* Configure MAC media */
+ mvneta_update_media(sc, ifm_target);
+ }
+
+ sysctl_mvneta_init(sc);
+
+ callout_reset(&sc->tick_ch, 0, mvneta_tick, sc);
+
+ error = bus_setup_intr(self, sc->res[1],
+ INTR_TYPE_NET | INTR_MPSAFE, NULL, mvneta_intrs[0].handler, sc,
+ &sc->ih_cookie[0]);
+ if (error) {
+ device_printf(self, "could not setup %s\n",
+ mvneta_intrs[0].description);
+ ether_ifdetach(sc->ifp);
+ mvneta_detach(self);
+ return (error);
+ }
+
+ return (0);
+}
+
+STATIC int
+mvneta_detach(device_t dev)
+{
+ struct mvneta_softc *sc;
+ struct ifnet *ifp;
+ int q;
+
+ sc = device_get_softc(dev);
+ ifp = sc->ifp;
+
+ mvneta_stop(sc);
+ /* Detach network interface */
+ if (sc->ifp)
+ if_free(sc->ifp);
+
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++)
+ mvneta_ring_dealloc_rx_queue(sc, q);
+ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++)
+ mvneta_ring_dealloc_tx_queue(sc, q);
+
+ if (sc->tx_dtag != NULL)
+ bus_dma_tag_destroy(sc->tx_dtag);
+ if (sc->rx_dtag != NULL)
+ bus_dma_tag_destroy(sc->rx_dtag);
+ if (sc->txmbuf_dtag != NULL)
+ bus_dma_tag_destroy(sc->txmbuf_dtag);
+
+ bus_release_resources(dev, res_spec, sc->res);
+ return (0);
+}
+
+/*
+ * MII
+ */
+STATIC int
+mvneta_miibus_readreg(device_t dev, int phy, int reg)
+{
+ struct mvneta_softc *sc;
+ struct ifnet *ifp;
+ uint32_t smi, val;
+ int i;
+
+ sc = device_get_softc(dev);
+ ifp = sc->ifp;
+
+ mtx_lock(&mii_mutex);
+
+ for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
+ DELAY(1);
+ if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
+ break;
+ }
+ if (i == MVNETA_PHY_TIMEOUT) {
+ if_printf(ifp, "SMI busy timeout\n");
+ mtx_unlock(&mii_mutex);
+ return (-1);
+ }
+
+ smi = MVNETA_SMI_PHYAD(phy) |
+ MVNETA_SMI_REGAD(reg) | MVNETA_SMI_OPCODE_READ;
+ MVNETA_WRITE(sc, MVNETA_SMI, smi);
+
+ for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
+ DELAY(1);
+ if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
+ break;
+ }
+
+ if (i == MVNETA_PHY_TIMEOUT) {
+ if_printf(ifp, "SMI busy timeout\n");
+ mtx_unlock(&mii_mutex);
+ return (-1);
+ }
+ for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
+ DELAY(1);
+ smi = MVNETA_READ(sc, MVNETA_SMI);
+ if (smi & MVNETA_SMI_READVALID)
+ break;
+ }
+
+ if (i == MVNETA_PHY_TIMEOUT) {
+ if_printf(ifp, "SMI busy timeout\n");
+ mtx_unlock(&mii_mutex);
+ return (-1);
+ }
+
+ mtx_unlock(&mii_mutex);
+
+#ifdef MVNETA_KTR
+ CTR3(KTR_SPARE2, "%s i=%d, timeout=%d\n", ifp->if_xname, i,
+ MVNETA_PHY_TIMEOUT);
+#endif
+
+ val = smi & MVNETA_SMI_DATA_MASK;
+
+#ifdef MVNETA_KTR
+ CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", ifp->if_xname, phy,
+ reg, val);
+#endif
+ return (val);
+}
+
+STATIC int
+mvneta_miibus_writereg(device_t dev, int phy, int reg, int val)
+{
+ struct mvneta_softc *sc;
+ struct ifnet *ifp;
+ uint32_t smi;
+ int i;
+
+ sc = device_get_softc(dev);
+ ifp = sc->ifp;
+#ifdef MVNETA_KTR
+ CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", ifp->if_xname,
+ phy, reg, val);
+#endif
+
+ mtx_lock(&mii_mutex);
+
+ for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
+ DELAY(1);
+ if (!(MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY))
+ break;
+ }
+ if (i == MVNETA_PHY_TIMEOUT) {
+ if_printf(ifp, "SMI busy timeout\n");
+ mtx_unlock(&mii_mutex);
+ return (0);
+ }
+
+ smi = MVNETA_SMI_PHYAD(phy) | MVNETA_SMI_REGAD(reg) |
+ MVNETA_SMI_OPCODE_WRITE | (val & MVNETA_SMI_DATA_MASK);
+ MVNETA_WRITE(sc, MVNETA_SMI, smi);
+
+ for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
+ DELAY(1);
+ if (!(MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY))
+ break;
+ }
+
+ mtx_unlock(&mii_mutex);
+
+ if (i == MVNETA_PHY_TIMEOUT)
+ if_printf(ifp, "phy write timed out\n");
+
+ return (0);
+}
+
+STATIC void
+mvneta_portup(struct mvneta_softc *sc)
+{
+ int q;
+
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
+ mvneta_rx_lockq(sc, q);
+ mvneta_rx_queue_enable(sc->ifp, q);
+ mvneta_rx_unlockq(sc, q);
+ }
+
+ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
+ mvneta_tx_lockq(sc, q);
+ mvneta_tx_queue_enable(sc->ifp, q);
+ mvneta_tx_unlockq(sc, q);
+ }
+
+}
+
+STATIC void
+mvneta_portdown(struct mvneta_softc *sc)
+{
+ struct mvneta_rx_ring *rx;
+ struct mvneta_tx_ring *tx;
+ int q, cnt;
+ uint32_t reg;
+
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
+ rx = MVNETA_RX_RING(sc, q);
+ mvneta_rx_lockq(sc, q);
+ rx->queue_status = MVNETA_QUEUE_DISABLED;
+ mvneta_rx_unlockq(sc, q);
+ }
+
+ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
+ tx = MVNETA_TX_RING(sc, q);
+ mvneta_tx_lockq(sc, q);
+ tx->queue_status = MVNETA_QUEUE_DISABLED;
+ mvneta_tx_unlockq(sc, q);
+ }
+
+ /* Wait for all Rx activity to terminate. */
+ reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
+ reg = MVNETA_RQC_DIS(reg);
+ MVNETA_WRITE(sc, MVNETA_RQC, reg);
+ cnt = 0;
+ do {
+ if (cnt >= RX_DISABLE_TIMEOUT) {
+ if_printf(sc->ifp,
+ "timeout for RX stopped. rqc 0x%x\n", reg);
+ break;
+ }
+ cnt++;
+ reg = MVNETA_READ(sc, MVNETA_RQC);
+ } while (reg & MVNETA_RQC_EN_MASK);
+
+ /* Wait for all Tx activity to terminate. */
+ reg = MVNETA_READ(sc, MVNETA_PIE);
+ reg &= ~MVNETA_PIE_TXPKTINTRPTENB_MASK;
+ MVNETA_WRITE(sc, MVNETA_PIE, reg);
+
+ reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
+ reg &= ~MVNETA_PRXTXTI_TBTCQ_MASK;
+ MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
+
+ reg = MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_EN_MASK;
+ reg = MVNETA_TQC_DIS(reg);
+ MVNETA_WRITE(sc, MVNETA_TQC, reg);
+ cnt = 0;
+ do {
+ if (cnt >= TX_DISABLE_TIMEOUT) {
+ if_printf(sc->ifp,
+ "timeout for TX stopped. tqc 0x%x\n", reg);
+ break;
+ }
+ cnt++;
+ reg = MVNETA_READ(sc, MVNETA_TQC);
+ } while (reg & MVNETA_TQC_EN_MASK);
+
+ /* Wait for all Tx FIFO is empty */
+ cnt = 0;
+ do {
+ if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
+ if_printf(sc->ifp,
+ "timeout for TX FIFO drained. ps0 0x%x\n", reg);
+ break;
+ }
+ cnt++;
+ reg = MVNETA_READ(sc, MVNETA_PS0);
+ } while (!(reg & MVNETA_PS0_TXFIFOEMP) && (reg & MVNETA_PS0_TXINPROG));
+}
+
+/*
+ * Device Register Initialization
+ * reset device registers to device driver default value.
+ * the device is not enabled here.
+ */
+STATIC int
+mvneta_initreg(struct ifnet *ifp)
+{
+ struct mvneta_softc *sc;
+ int q, i;
+ uint32_t reg;
+
+ sc = ifp->if_softc;
+#ifdef MVNETA_KTR
+ CTR1(KTR_SPARE2, "%s initializing device register", ifp->if_xname);
+#endif
+
+ /* Disable Legacy WRR, Disable EJP, Release from reset. */
+ MVNETA_WRITE(sc, MVNETA_TQC_1, 0);
+ /* Enable mbus retry. */
+ MVNETA_WRITE(sc, MVNETA_MBUSRETRY, 0x20);
+
+ /* Init TX/RX Queue Registers */
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
+ mvneta_rx_lockq(sc, q);
+ if (mvneta_rx_queue_init(ifp, q) != 0) {
+ device_printf(sc->dev,
+ "initialization failed: cannot initialize queue\n");
+ mvneta_rx_unlockq(sc, q);
+ return (ENOBUFS);
+ }
+ mvneta_rx_unlockq(sc, q);
+ }
+ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
+ mvneta_tx_lockq(sc, q);
+ if (mvneta_tx_queue_init(ifp, q) != 0) {
+ device_printf(sc->dev,
+ "initialization failed: cannot initialize queue\n");
+ mvneta_tx_unlockq(sc, q);
+ return (ENOBUFS);
+ }
+ mvneta_tx_unlockq(sc, q);
+ }
+
+ /*
+ * Ethernet Unit Control - disable automatic PHY management by HW.
+ * In case the port uses SMI-controlled PHY, poll its status with
+ * mii_tick() and update MAC settings accordingly.
+ */
+ reg = MVNETA_READ(sc, MVNETA_EUC);
+ reg &= ~MVNETA_EUC_POLLING;
+ MVNETA_WRITE(sc, MVNETA_EUC, reg);
+
+ /* EEE: Low Power Idle */
+ reg = MVNETA_LPIC0_LILIMIT(MVNETA_LPI_LI);
+ reg |= MVNETA_LPIC0_TSLIMIT(MVNETA_LPI_TS);
+ MVNETA_WRITE(sc, MVNETA_LPIC0, reg);
+
+ reg = MVNETA_LPIC1_TWLIMIT(MVNETA_LPI_TW);
+ MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
+
+ reg = MVNETA_LPIC2_MUSTSET;
+ MVNETA_WRITE(sc, MVNETA_LPIC2, reg);
+
+ /* Port MAC Control set 0 */
+ reg = MVNETA_PMACC0_MUSTSET; /* must write 0x1 */
+ reg &= ~MVNETA_PMACC0_PORTEN; /* port is still disabled */
+ reg |= MVNETA_PMACC0_FRAMESIZELIMIT(MVNETA_MAX_FRAME);
+ MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
+
+ /* Port MAC Control set 2 */
+ reg = MVNETA_READ(sc, MVNETA_PMACC2);
+ switch (sc->phy_mode) {
+ case MVNETA_PHY_QSGMII:
+ reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
+ MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_QSGMII);
+ break;
+ case MVNETA_PHY_SGMII:
+ reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
+ MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_SGMII);
+ break;
+ case MVNETA_PHY_RGMII:
+ case MVNETA_PHY_RGMII_ID:
+ reg |= MVNETA_PMACC2_RGMIIEN;
+ break;
+ }
+ reg |= MVNETA_PMACC2_MUSTSET;
+ reg &= ~MVNETA_PMACC2_PORTMACRESET;
+ MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
+
+ /* Port Configuration Extended: enable Tx CRC generation */
+ reg = MVNETA_READ(sc, MVNETA_PXCX);
+ reg &= ~MVNETA_PXCX_TXCRCDIS;
+ MVNETA_WRITE(sc, MVNETA_PXCX, reg);
+
+ /* clear MIB counter registers(clear by read) */
+ for (i = 0; i < nitems(mvneta_mib_list); i++) {
+ if (mvneta_mib_list[i].reg64)
+ MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum);
+ else
+ MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum);
+ }
+ MVNETA_READ(sc, MVNETA_PDFC);
+ MVNETA_READ(sc, MVNETA_POFC);
+
+ /* Set SDC register except IPGINT bits */
+ reg = MVNETA_SDC_RXBSZ_16_64BITWORDS;
+ reg |= MVNETA_SDC_TXBSZ_16_64BITWORDS;
+ reg |= MVNETA_SDC_BLMR;
+ reg |= MVNETA_SDC_BLMT;
+ MVNETA_WRITE(sc, MVNETA_SDC, reg);
+
+ return (0);
+}
+
+STATIC void
+mvneta_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
+{
+
+ if (__predict_false(error))
+ return;
+ *(bus_addr_t *)arg = segs->ds_addr;
+}
+
+STATIC int
+mvneta_ring_alloc_rx_queue(struct mvneta_softc *sc, int q)
+{
+ struct mvneta_rx_ring *rx;
+ struct mvneta_buf *rxbuf;
+ bus_dmamap_t dmap;
+ int i, error;
+
+ if (q >= MVNETA_RX_QNUM_MAX)
+ return (EINVAL);
+
+ rx = MVNETA_RX_RING(sc, q);
+ mtx_init(&rx->ring_mtx, "mvneta_rx", NULL, MTX_DEF);
+ /* Allocate DMA memory for Rx descriptors */
+ error = bus_dmamem_alloc(sc->rx_dtag,
+ (void**)&(rx->desc),
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO,
+ &rx->desc_map);
+ if (error != 0 || rx->desc == NULL)
+ goto fail;
+ error = bus_dmamap_load(sc->rx_dtag, rx->desc_map,
+ rx->desc,
+ sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT,
+ mvneta_dmamap_cb, &rx->desc_pa, BUS_DMA_NOWAIT);
+ if (error != 0)
+ goto fail;
+
+ for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
+ error = bus_dmamap_create(sc->rxbuf_dtag, 0, &dmap);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "Failed to create DMA map for Rx buffer num: %d\n", i);
+ goto fail;
+ }
+ rxbuf = &rx->rxbuf[i];
+ rxbuf->dmap = dmap;
+ rxbuf->m = NULL;
+ }
+
+ return (0);
+fail:
+ mvneta_ring_dealloc_rx_queue(sc, q);
+ device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
+ return (error);
+}
+
+STATIC int
+mvneta_ring_alloc_tx_queue(struct mvneta_softc *sc, int q)
+{
+ struct mvneta_tx_ring *tx;
+ int error;
+
+ if (q >= MVNETA_TX_QNUM_MAX)
+ return (EINVAL);
+ tx = MVNETA_TX_RING(sc, q);
+ mtx_init(&tx->ring_mtx, "mvneta_tx", NULL, MTX_DEF);
+ error = bus_dmamem_alloc(sc->tx_dtag,
+ (void**)&(tx->desc),
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO,
+ &tx->desc_map);
+ if (error != 0 || tx->desc == NULL)
+ goto fail;
+ error = bus_dmamap_load(sc->tx_dtag, tx->desc_map,
+ tx->desc,
+ sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT,
+ mvneta_dmamap_cb, &tx->desc_pa, BUS_DMA_NOWAIT);
+ if (error != 0)
+ goto fail;
+
+#ifdef MVNETA_MULTIQUEUE
+ tx->br = buf_ring_alloc(MVNETA_BUFRING_SIZE, M_DEVBUF, M_NOWAIT,
+ &tx->ring_mtx);
+ if (tx->br == NULL) {
+ device_printf(sc->dev,
+ "Could not setup buffer ring for TxQ(%d)\n", q);
+ error = ENOMEM;
+ goto fail;
+ }
+#endif
+
+ return (0);
+fail:
+ mvneta_ring_dealloc_tx_queue(sc, q);
+ device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
+ return (error);
+}
+
+STATIC void
+mvneta_ring_dealloc_tx_queue(struct mvneta_softc *sc, int q)
+{
+ struct mvneta_tx_ring *tx;
+ struct mvneta_buf *txbuf;
+ void *kva;
+ int error;
+ int i;
+
+ if (q >= MVNETA_TX_QNUM_MAX)
+ return;
+ tx = MVNETA_TX_RING(sc, q);
+
+ if (tx->taskq != NULL) {
+ /* Remove task */
+ while (taskqueue_cancel(tx->taskq, &tx->task, NULL) != 0)
+ taskqueue_drain(tx->taskq, &tx->task);
+ }
+#ifdef MVNETA_MULTIQUEUE
+ if (tx->br != NULL)
+ drbr_free(tx->br, M_DEVBUF);
+#endif
+
+ if (sc->txmbuf_dtag != NULL) {
+ if (mtx_name(&tx->ring_mtx) != NULL) {
+ /*
+ * It is assumed that maps are being loaded after mutex
+ * is initialized. Therefore we can skip unloading maps
+ * when mutex is empty.
+ */
+ mvneta_tx_lockq(sc, q);
+ mvneta_ring_flush_tx_queue(sc, q);
+ mvneta_tx_unlockq(sc, q);
+ }
+ for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
+ txbuf = &tx->txbuf[i];
+ if (txbuf->dmap != NULL) {
+ error = bus_dmamap_destroy(sc->txmbuf_dtag,
+ txbuf->dmap);
+ if (error != 0) {
+ panic("%s: map busy for Tx descriptor (Q%d, %d)",
+ __func__, q, i);
+ }
+ }
+ }
+ }
+
+ if (tx->desc_pa != 0)
+ bus_dmamap_unload(sc->tx_dtag, tx->desc_map);
+
+ kva = (void *)tx->desc;
+ if (kva != NULL)
+ bus_dmamem_free(sc->tx_dtag, tx->desc, tx->desc_map);
+
+ if (mtx_name(&tx->ring_mtx) != NULL)
+ mtx_destroy(&tx->ring_mtx);
+
+ memset(tx, 0, sizeof(*tx));
+}
+
+STATIC void
+mvneta_ring_dealloc_rx_queue(struct mvneta_softc *sc, int q)
+{
+ struct mvneta_rx_ring *rx;
+ struct lro_ctrl *lro;
+ void *kva;
+
+ if (q >= MVNETA_RX_QNUM_MAX)
+ return;
+
+ rx = MVNETA_RX_RING(sc, q);
+
+ mvneta_ring_flush_rx_queue(sc, q);
+
+ if (rx->desc_pa != 0)
+ bus_dmamap_unload(sc->rx_dtag, rx->desc_map);
+
+ kva = (void *)rx->desc;
+ if (kva != NULL)
+ bus_dmamem_free(sc->rx_dtag, rx->desc, rx->desc_map);
+
+ lro = &rx->lro;
+ tcp_lro_free(lro);
+
+ if (mtx_name(&rx->ring_mtx) != NULL)
+ mtx_destroy(&rx->ring_mtx);
+
+ memset(rx, 0, sizeof(*rx));
+}
+
+STATIC int
+mvneta_ring_init_rx_queue(struct mvneta_softc *sc, int q)
+{
+ struct mvneta_rx_ring *rx;
+ struct lro_ctrl *lro;
+ int error;
+
+ if (q >= MVNETA_RX_QNUM_MAX)
+ return (0);
+
+ rx = MVNETA_RX_RING(sc, q);
+ rx->dma = rx->cpu = 0;
+ rx->queue_th_received = MVNETA_RXTH_COUNT;
+ rx->queue_th_time = (get_tclk() / 1000) / 10; /* 0.1 [ms] */
+
+ /* Initialize LRO */
+ rx->lro_enabled = FALSE;
+ if ((sc->ifp->if_capenable & IFCAP_LRO) != 0) {
+ lro = &rx->lro;
+ error = tcp_lro_init(lro);
+ if (error != 0)
+ device_printf(sc->dev, "LRO Initialization failed!\n");
+ else {
+ rx->lro_enabled = TRUE;
+ lro->ifp = sc->ifp;
+ }
+ }
+
+ return (0);
+}
+
+STATIC int
+mvneta_ring_init_tx_queue(struct mvneta_softc *sc, int q)
+{
+ struct mvneta_tx_ring *tx;
+ struct mvneta_buf *txbuf;
+ int i, error;
+
+ if (q >= MVNETA_TX_QNUM_MAX)
+ return (0);
+
+ tx = MVNETA_TX_RING(sc, q);
+
+ /* Tx handle */
+ for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
+ txbuf = &tx->txbuf[i];
+ txbuf->m = NULL;
+ /* Tx handle needs DMA map for busdma_load_mbuf() */
+ error = bus_dmamap_create(sc->txmbuf_dtag, 0,
+ &txbuf->dmap);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "can't create dma map (tx ring %d)\n", i);
+ return (error);
+ }
+ }
+ tx->dma = tx->cpu = 0;
+ tx->used = 0;
+ tx->drv_error = 0;
+ tx->queue_status = MVNETA_QUEUE_DISABLED;
+ tx->queue_hung = FALSE;
+
+ tx->ifp = sc->ifp;
+ tx->qidx = q;
+ TASK_INIT(&tx->task, 0, mvneta_tx_task, tx);
+ tx->taskq = taskqueue_create_fast("mvneta_tx_taskq", M_WAITOK,
+ taskqueue_thread_enqueue, &tx->taskq);
+ taskqueue_start_threads(&tx->taskq, 1, PI_NET, "%s: tx_taskq(%d)",
+ device_get_nameunit(sc->dev), q);
+
+ return (0);
+}
+
+STATIC void
+mvneta_ring_flush_tx_queue(struct mvneta_softc *sc, int q)
+{
+ struct mvneta_tx_ring *tx;
+ struct mvneta_buf *txbuf;
+ int i;
+
+ tx = MVNETA_TX_RING(sc, q);
+ KASSERT_TX_MTX(sc, q);
+
+ /* Tx handle */
+ for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
+ txbuf = &tx->txbuf[i];
+ bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
+ if (txbuf->m != NULL) {
+ m_freem(txbuf->m);
+ txbuf->m = NULL;
+ }
+ }
+ tx->dma = tx->cpu = 0;
+ tx->used = 0;
+}
+
+STATIC void
+mvneta_ring_flush_rx_queue(struct mvneta_softc *sc, int q)
+{
+ struct mvneta_rx_ring *rx;
+ struct mvneta_buf *rxbuf;
+ int i;
+
+ rx = MVNETA_RX_RING(sc, q);
+ KASSERT_RX_MTX(sc, q);
+
+ /* Rx handle */
+ for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
+ rxbuf = &rx->rxbuf[i];
+ mvneta_rx_buf_free(sc, rxbuf);
+ }
+ rx->dma = rx->cpu = 0;
+}
+
+/*
+ * Rx/Tx Queue Control
+ */
+STATIC int
+mvneta_rx_queue_init(struct ifnet *ifp, int q)
+{
+ struct mvneta_softc *sc;
+ struct mvneta_rx_ring *rx;
+ uint32_t reg;
+
+ sc = ifp->if_softc;
+ KASSERT_RX_MTX(sc, q);
+ rx = MVNETA_RX_RING(sc, q);
+ DASSERT(rx->desc_pa != 0);
+
+ /* descriptor address */
+ MVNETA_WRITE(sc, MVNETA_PRXDQA(q), rx->desc_pa);
+
+ /* Rx buffer size and descriptor ring size */
+ reg = MVNETA_PRXDQS_BUFFERSIZE(MVNETA_PACKET_SIZE >> 3);
+ reg |= MVNETA_PRXDQS_DESCRIPTORSQUEUESIZE(MVNETA_RX_RING_CNT);
+ MVNETA_WRITE(sc, MVNETA_PRXDQS(q), reg);
+#ifdef MVNETA_KTR
+ CTR3(KTR_SPARE2, "%s PRXDQS(%d): %#x", ifp->if_xname, q,
+ MVNETA_READ(sc, MVNETA_PRXDQS(q)));
+#endif
+ /* Rx packet offset address */
+ reg = MVNETA_PRXC_PACKETOFFSET(MVNETA_PACKET_OFFSET >> 3);
+ MVNETA_WRITE(sc, MVNETA_PRXC(q), reg);
+#ifdef MVNETA_KTR
+ CTR3(KTR_SPARE2, "%s PRXC(%d): %#x", ifp->if_xname, q,
+ MVNETA_READ(sc, MVNETA_PRXC(q)));
+#endif
+
+ /* if DMA is not working, register is not updated */
+ DASSERT(MVNETA_READ(sc, MVNETA_PRXDQA(q)) == rx->desc_pa);
+ return (0);
+}
+
+STATIC int
+mvneta_tx_queue_init(struct ifnet *ifp, int q)
+{
+ struct mvneta_softc *sc;
+ struct mvneta_tx_ring *tx;
+ uint32_t reg;
+
+ sc = ifp->if_softc;
+ KASSERT_TX_MTX(sc, q);
+ tx = MVNETA_TX_RING(sc, q);
+ DASSERT(tx->desc_pa != 0);
+
+ /* descriptor address */
+ MVNETA_WRITE(sc, MVNETA_PTXDQA(q), tx->desc_pa);
+
+ /* descriptor ring size */
+ reg = MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT);
+ MVNETA_WRITE(sc, MVNETA_PTXDQS(q), reg);
+
+ /* if DMA is not working, register is not updated */
+ DASSERT(MVNETA_READ(sc, MVNETA_PTXDQA(q)) == tx->desc_pa);
+ return (0);
+}
+
+STATIC int
+mvneta_rx_queue_enable(struct ifnet *ifp, int q)
+{
+ struct mvneta_softc *sc;
+ struct mvneta_rx_ring *rx;
+ uint32_t reg;
+
+ sc = ifp->if_softc;
+ rx = MVNETA_RX_RING(sc, q);
+ KASSERT_RX_MTX(sc, q);
+
+ /* Set Rx interrupt threshold */
+ reg = MVNETA_PRXDQTH_ODT(rx->queue_th_received);
+ MVNETA_WRITE(sc, MVNETA_PRXDQTH(q), reg);
+
+ reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
+ MVNETA_WRITE(sc, MVNETA_PRXITTH(q), reg);
+
+ /* Unmask RXTX_TH Intr. */
+ reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
+ reg |= MVNETA_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */
+ MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
+
+ /* Enable Rx queue */
+ reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
+ reg |= MVNETA_RQC_ENQ(q);
+ MVNETA_WRITE(sc, MVNETA_RQC, reg);
+
+ rx->queue_status = MVNETA_QUEUE_WORKING;
+ return (0);
+}
+
+STATIC int
+mvneta_tx_queue_enable(struct ifnet *ifp, int q)
+{
+ struct mvneta_softc *sc;
+ struct mvneta_tx_ring *tx;
+
+ sc = ifp->if_softc;
+ tx = MVNETA_TX_RING(sc, q);
+ KASSERT_TX_MTX(sc, q);
+
+ /* Enable Tx queue */
+ MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(q));
+
+ tx->queue_status = MVNETA_QUEUE_IDLE;
+ tx->queue_hung = FALSE;
+ return (0);
+}
+
+STATIC __inline void
+mvneta_rx_lockq(struct mvneta_softc *sc, int q)
+{
+
+ DASSERT(q >= 0);
+ DASSERT(q < MVNETA_RX_QNUM_MAX);
+ mtx_lock(&sc->rx_ring[q].ring_mtx);
+}
+
+STATIC __inline void
+mvneta_rx_unlockq(struct mvneta_softc *sc, int q)
+{
+
+ DASSERT(q >= 0);
+ DASSERT(q < MVNETA_RX_QNUM_MAX);
+ mtx_unlock(&sc->rx_ring[q].ring_mtx);
+}
+
+STATIC __inline int __unused
+mvneta_tx_trylockq(struct mvneta_softc *sc, int q)
+{
+
+ DASSERT(q >= 0);
+ DASSERT(q < MVNETA_TX_QNUM_MAX);
+ return (mtx_trylock(&sc->tx_ring[q].ring_mtx));
+}
+
+STATIC __inline void
+mvneta_tx_lockq(struct mvneta_softc *sc, int q)
+{
+
+ DASSERT(q >= 0);
+ DASSERT(q < MVNETA_TX_QNUM_MAX);
+ mtx_lock(&sc->tx_ring[q].ring_mtx);
+}
+
+STATIC __inline void
+mvneta_tx_unlockq(struct mvneta_softc *sc, int q)
+{
+
+ DASSERT(q >= 0);
+ DASSERT(q < MVNETA_TX_QNUM_MAX);
+ mtx_unlock(&sc->tx_ring[q].ring_mtx);
+}
+
+/*
+ * Interrupt Handlers
+ */
+STATIC void
+mvneta_disable_intr(struct mvneta_softc *sc)
+{
+
+ MVNETA_WRITE(sc, MVNETA_EUIM, 0);
+ MVNETA_WRITE(sc, MVNETA_EUIC, 0);
+ MVNETA_WRITE(sc, MVNETA_PRXTXTIM, 0);
+ MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
+ MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
+ MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
+ MVNETA_WRITE(sc, MVNETA_PMIM, 0);
+ MVNETA_WRITE(sc, MVNETA_PMIC, 0);
+ MVNETA_WRITE(sc, MVNETA_PIE, 0);
+}
+
+STATIC void
+mvneta_enable_intr(struct mvneta_softc *sc)
+{
+ uint32_t reg;
+
+ /* Enable Summary Bit to check all interrupt cause. */
+ reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
+ reg |= MVNETA_PRXTXTI_PMISCICSUMMARY;
+ MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
+
+ if (sc->use_inband_status) {
+ /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */
+ MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
+ MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE);
+ }
+
+ /* Enable All Queue Interrupt */
+ reg = MVNETA_READ(sc, MVNETA_PIE);
+ reg |= MVNETA_PIE_RXPKTINTRPTENB_MASK;
+ reg |= MVNETA_PIE_TXPKTINTRPTENB_MASK;
+ MVNETA_WRITE(sc, MVNETA_PIE, reg);
+}
+
+STATIC void
+mvneta_rxtxth_intr(void *arg)
+{
+ struct mvneta_softc *sc;
+ struct ifnet *ifp;
+ uint32_t ic, queues;
+
+ sc = arg;
+ ifp = sc->ifp;
+#ifdef MVNETA_KTR
+ CTR1(KTR_SPARE2, "%s got RXTX_TH_Intr", ifp->if_xname);
+#endif
+ ic = MVNETA_READ(sc, MVNETA_PRXTXTIC);
+ if (ic == 0)
+ return;
+ MVNETA_WRITE(sc, MVNETA_PRXTXTIC, ~ic);
+
+ /* Ack maintance interrupt first */
+ if (__predict_false((ic & MVNETA_PRXTXTI_PMISCICSUMMARY) &&
+ sc->use_inband_status)) {
+ mvneta_sc_lock(sc);
+ mvneta_misc_intr(sc);
+ mvneta_sc_unlock(sc);
+ }
+ if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
+ return;
+ /* RxTxTH interrupt */
+ queues = MVNETA_PRXTXTI_GET_RBICTAPQ(ic);
+ if (__predict_true(queues)) {
+#ifdef MVNETA_KTR
+ CTR1(KTR_SPARE2, "%s got PRXTXTIC: +RXEOF", ifp->if_xname);
+#endif
+ /* At the moment the driver support only one RX queue. */
+ DASSERT(MVNETA_IS_QUEUE_SET(queues, 0));
+ mvneta_rx(sc, 0, 0);
+ }
+}
+
+STATIC int
+mvneta_misc_intr(struct mvneta_softc *sc)
+{
+ uint32_t ic;
+ int claimed = 0;
+
+#ifdef MVNETA_KTR
+ CTR1(KTR_SPARE2, "%s got MISC_INTR", sc->ifp->if_xname);
+#endif
+ KASSERT_SC_MTX(sc);
+
+ for (;;) {
+ ic = MVNETA_READ(sc, MVNETA_PMIC);
+ ic &= MVNETA_READ(sc, MVNETA_PMIM);
+ if (ic == 0)
+ break;
+ MVNETA_WRITE(sc, MVNETA_PMIC, ~ic);
+ claimed = 1;
+
+ if (ic & (MVNETA_PMI_PHYSTATUSCHNG |
+ MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE))
+ mvneta_link_isr(sc);
+ }
+ return (claimed);
+}
+
+STATIC void
+mvneta_tick(void *arg)
+{
+ struct mvneta_softc *sc;
+ struct mvneta_tx_ring *tx;
+ struct mvneta_rx_ring *rx;
+ int q;
+ uint32_t fc_prev, fc_curr;
+
+ sc = arg;
+
+ /*
+ * This is done before mib update to get the right stats
+ * for this tick.
+ */
+ mvneta_tx_drain(sc);
+
+ /* Extract previous flow-control frame received counter. */
+ fc_prev = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
+ /* Read mib registers (clear by read). */
+ mvneta_update_mib(sc);
+ /* Extract current flow-control frame received counter. */
+ fc_curr = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
+
+
+ if (sc->phy_attached && sc->ifp->if_flags & IFF_UP) {
+ mvneta_sc_lock(sc);
+ mii_tick(sc->mii);
+
+ /* Adjust MAC settings */
+ mvneta_adjust_link(sc);
+ mvneta_sc_unlock(sc);
+ }
+
+ /*
+ * We were unable to refill the rx queue and left the rx func, leaving
+ * the ring without mbuf and no way to call the refill func.
+ */
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
+ rx = MVNETA_RX_RING(sc, q);
+ if (rx->needs_refill == TRUE) {
+ mvneta_rx_lockq(sc, q);
+ mvneta_rx_queue_refill(sc, q);
+ mvneta_rx_unlockq(sc, q);
+ }
+ }
+
+ /*
+ * Watchdog:
+ * - check if queue is mark as hung.
+ * - ignore hung status if we received some pause frame
+ * as hardware may have paused packet transmit.
+ */
+ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
+ /*
+ * We should take queue lock, but as we only read
+ * queue status we can do it without lock, we may
+ * only missdetect queue status for one tick.
+ */
+ tx = MVNETA_TX_RING(sc, q);
+
+ if (tx->queue_hung && (fc_curr - fc_prev) == 0)
+ goto timeout;
+ }
+
+ callout_schedule(&sc->tick_ch, hz);
+ return;
+
+timeout:
+ if_printf(sc->ifp, "watchdog timeout\n");
+
+ mvneta_sc_lock(sc);
+ sc->counter_watchdog++;
+ sc->counter_watchdog_mib++;
+ /* Trigger reinitialize sequence. */
+ mvneta_stop_locked(sc);
+ mvneta_init_locked(sc);
+ mvneta_sc_unlock(sc);
+}
+
+STATIC void
+mvneta_qflush(struct ifnet *ifp)
+{
+#ifdef MVNETA_MULTIQUEUE
+ struct mvneta_softc *sc;
+ struct mvneta_tx_ring *tx;
+ struct mbuf *m;
+ size_t q;
+
+ sc = ifp->if_softc;
+
+ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
+ tx = MVNETA_TX_RING(sc, q);
+ mvneta_tx_lockq(sc, q);
+ while ((m = buf_ring_dequeue_sc(tx->br)) != NULL)
+ m_freem(m);
+ mvneta_tx_unlockq(sc, q);
+ }
+#endif
+ if_qflush(ifp);
+}
+
+STATIC void
+mvneta_tx_task(void *arg, int pending)
+{
+ struct mvneta_softc *sc;
+ struct mvneta_tx_ring *tx;
+ struct ifnet *ifp;
+ int error;
+
+ tx = arg;
+ ifp = tx->ifp;
+ sc = ifp->if_softc;
+
+ mvneta_tx_lockq(sc, tx->qidx);
+ error = mvneta_xmit_locked(sc, tx->qidx);
+ mvneta_tx_unlockq(sc, tx->qidx);
+
+ /* Try again */
+ if (__predict_false(error != 0 && error != ENETDOWN)) {
+ pause("mvneta_tx_task_sleep", 1);
+ taskqueue_enqueue(tx->taskq, &tx->task);
+ }
+}
+
+STATIC int
+mvneta_xmitfast_locked(struct mvneta_softc *sc, int q, struct mbuf **m)
+{
+ struct mvneta_tx_ring *tx;
+ struct ifnet *ifp;
+ int error;
+
+ KASSERT_TX_MTX(sc, q);
+ tx = MVNETA_TX_RING(sc, q);
+ error = 0;
+
+ ifp = sc->ifp;
+
+ /* Dont enqueue packet if the queue is disabled. */
+ if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) {
+ m_freem(*m);
+ *m = NULL;
+ return (ENETDOWN);
+ }
+
+ /* Reclaim mbuf if above threshold. */
+ if (__predict_true(tx->used > MVNETA_TX_RECLAIM_COUNT))
+ mvneta_tx_queue_complete(sc, q);
+
+ /* Do not call transmit path if queue is already too full. */
+ if (__predict_false(tx->used >
+ MVNETA_TX_RING_CNT - MVNETA_TX_SEGLIMIT))
+ return (ENOBUFS);
+
+ error = mvneta_tx_queue(sc, m, q);
+ if (__predict_false(error != 0))
+ return (error);
+
+ /* Send a copy of the frame to the BPF listener */
+ ETHER_BPF_MTAP(ifp, *m);
+
+ /* Set watchdog on */
+ tx->watchdog_time = ticks;
+ tx->queue_status = MVNETA_QUEUE_WORKING;
+
+ return (error);
+}
+
+#ifdef MVNETA_MULTIQUEUE
+STATIC int
+mvneta_transmit(struct ifnet *ifp, struct mbuf *m)
+{
+ struct mvneta_softc *sc;
+ struct mvneta_tx_ring *tx;
+ int error;
+ int q;
+
+ sc = ifp->if_softc;
+
+ /* Use default queue if there is no flow id as thread can migrate. */
+ if (__predict_true(M_HASHTYPE_GET(m) != M_HASHTYPE_NONE))
+ q = m->m_pkthdr.flowid % MVNETA_TX_QNUM_MAX;
+ else
+ q = 0;
+
+ tx = MVNETA_TX_RING(sc, q);
+
+ /* If buf_ring is full start transmit immediatly. */
+ if (buf_ring_full(tx->br)) {
+ mvneta_tx_lockq(sc, q);
+ mvneta_xmit_locked(sc, q);
+ mvneta_tx_unlockq(sc, q);
+ }
+
+ /*
+ * If the buf_ring is empty we will not reorder packets.
+ * If the lock is available transmit without using buf_ring.
+ */
+ if (buf_ring_empty(tx->br) && mvneta_tx_trylockq(sc, q) != 0) {
+ error = mvneta_xmitfast_locked(sc, q, &m);
+ mvneta_tx_unlockq(sc, q);
+ if (__predict_true(error == 0))
+ return (0);
+
+ /* Transmit can fail in fastpath. */
+ if (__predict_false(m == NULL))
+ return (error);
+ }
+
+ /* Enqueue then schedule taskqueue. */
+ error = drbr_enqueue(ifp, tx->br, m);
+ if (__predict_false(error != 0))
+ return (error);
+
+ taskqueue_enqueue(tx->taskq, &tx->task);
+ return (0);
+}
+
+STATIC int
+mvneta_xmit_locked(struct mvneta_softc *sc, int q)
+{
+ struct ifnet *ifp;
+ struct mvneta_tx_ring *tx;
+ struct mbuf *m;
+ int error;
+
+ KASSERT_TX_MTX(sc, q);
+ ifp = sc->ifp;
+ tx = MVNETA_TX_RING(sc, q);
+ error = 0;
+
+ while ((m = drbr_peek(ifp, tx->br)) != NULL) {
+ error = mvneta_xmitfast_locked(sc, q, &m);
+ if (__predict_false(error != 0)) {
+ if (m != NULL)
+ drbr_putback(ifp, tx->br, m);
+ else
+ drbr_advance(ifp, tx->br);
+ break;
+ }
+ drbr_advance(ifp, tx->br);
+ }
+
+ return (error);
+}
+#else /* !MVNETA_MULTIQUEUE */
+STATIC void
+mvneta_start(struct ifnet *ifp)
+{
+ struct mvneta_softc *sc;
+ struct mvneta_tx_ring *tx;
+ int error;
+
+ sc = ifp->if_softc;
+ tx = MVNETA_TX_RING(sc, 0);
+
+ mvneta_tx_lockq(sc, 0);
+ error = mvneta_xmit_locked(sc, 0);
+ mvneta_tx_unlockq(sc, 0);
+ /* Handle retransmit in the background taskq. */
+ if (__predict_false(error != 0 && error != ENETDOWN))
+ taskqueue_enqueue(tx->taskq, &tx->task);
+}
+
+STATIC int
+mvneta_xmit_locked(struct mvneta_softc *sc, int q)
+{
+ struct ifnet *ifp;
+ struct mvneta_tx_ring *tx;
+ struct mbuf *m;
+ int error;
+
+ KASSERT_TX_MTX(sc, q);
+ ifp = sc->ifp;
+ tx = MVNETA_TX_RING(sc, 0);
+ error = 0;
+
+ while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
+ if (m == NULL)
+ break;
+
+ error = mvneta_xmitfast_locked(sc, q, &m);
+ if (__predict_false(error != 0)) {
+ if (m != NULL)
+ IFQ_DRV_PREPEND(&ifp->if_snd, m);
+ break;
+ }
+ }
+
+ return (error);
+}
+#endif
+
+STATIC int
+mvneta_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct mvneta_softc *sc;
+ struct mvneta_rx_ring *rx;
+ struct ifreq *ifr;
+ int error, mask;
+ uint32_t flags;
+ int q;
+
+ error = 0;
+ sc = ifp->if_softc;
+ ifr = (struct ifreq *)data;
+ switch (cmd) {
+ case SIOCSIFFLAGS:
+ mvneta_sc_lock(sc);
+ if (ifp->if_flags & IFF_UP) {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ flags = ifp->if_flags ^ sc->mvneta_if_flags;
+
+ if (flags != 0)
+ sc->mvneta_if_flags = ifp->if_flags;
+
+ if ((flags & IFF_PROMISC) != 0)
+ mvneta_filter_setup(sc);
+ } else {
+ mvneta_init_locked(sc);
+ sc->mvneta_if_flags = ifp->if_flags;
+ if (sc->phy_attached)
+ mii_mediachg(sc->mii);
+ mvneta_sc_unlock(sc);
+ break;
+ }
+ } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ mvneta_stop_locked(sc);
+
+ sc->mvneta_if_flags = ifp->if_flags;
+ mvneta_sc_unlock(sc);
+ break;
+ case SIOCSIFCAP:
+ if (ifp->if_mtu > MVNETA_MAX_CSUM_MTU &&
+ ifr->ifr_reqcap & IFCAP_TXCSUM)
+ ifr->ifr_reqcap &= ~IFCAP_TXCSUM;
+ mask = ifp->if_capenable ^ ifr->ifr_reqcap;
+ if (mask & IFCAP_HWCSUM) {
+ ifp->if_capenable &= ~IFCAP_HWCSUM;
+ ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
+ if (ifp->if_capenable & IFCAP_TXCSUM)
+ ifp->if_hwassist = CSUM_IP | CSUM_TCP |
+ CSUM_UDP;
+ else
+ ifp->if_hwassist = 0;
+ }
+ if (mask & IFCAP_LRO) {
+ mvneta_sc_lock(sc);
+ ifp->if_capenable ^= IFCAP_LRO;
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
+ rx = MVNETA_RX_RING(sc, q);
+ rx->lro_enabled = !rx->lro_enabled;
+ }
+ }
+ mvneta_sc_unlock(sc);
+ }
+ VLAN_CAPABILITIES(ifp);
+ break;
+ case SIOCSIFMEDIA:
+ if ((IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ||
+ IFM_SUBTYPE(ifr->ifr_media) == IFM_2500_T) &&
+ (ifr->ifr_media & IFM_FDX) == 0) {
+ device_printf(sc->dev,
+ "%s half-duplex unsupported\n",
+ IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ?
+ "1000Base-T" :
+ "2500Base-T");
+ error = EINVAL;
+ break;
+ }
+ case SIOCGIFMEDIA: /* FALLTHROUGH */
+ if (!sc->phy_attached)
+ error = ifmedia_ioctl(ifp, ifr, &sc->mvneta_ifmedia,
+ cmd);
+ else
+ error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media,
+ cmd);
+ break;
+ case SIOCSIFMTU:
+ if (ifr->ifr_mtu < 68 || ifr->ifr_mtu > MVNETA_MAX_FRAME -
+ MVNETA_ETHER_SIZE) {
+ error = EINVAL;
+ } else {
+ ifp->if_mtu = ifr->ifr_mtu;
+ mvneta_sc_lock(sc);
+ if (ifp->if_mtu > MVNETA_MAX_CSUM_MTU) {
+ ifp->if_capenable &= ~IFCAP_TXCSUM;
+ ifp->if_hwassist = 0;
+ } else {
+ ifp->if_capenable |= IFCAP_TXCSUM;
+ ifp->if_hwassist = CSUM_IP | CSUM_TCP |
+ CSUM_UDP;
+ }
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ /* Trigger reinitialize sequence */
+ mvneta_stop_locked(sc);
+ mvneta_init_locked(sc);
+ }
+ mvneta_sc_unlock(sc);
+ }
+ break;
+
+ default:
+ error = ether_ioctl(ifp, cmd, data);
+ break;
+ }
+
+ return (error);
+}
+
+STATIC void
+mvneta_init_locked(void *arg)
+{
+ struct mvneta_softc *sc;
+ struct ifnet *ifp;
+ uint32_t reg;
+ int q, cpu;
+
+ sc = arg;
+ ifp = sc->ifp;
+
+ if (!device_is_attached(sc->dev) ||
+ (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
+ return;
+
+ mvneta_disable_intr(sc);
+ callout_stop(&sc->tick_ch);
+
+ /* Get the latest mac address */
+ bcopy(IF_LLADDR(ifp), sc->enaddr, ETHER_ADDR_LEN);
+ mvneta_set_mac_address(sc, sc->enaddr);
+ mvneta_filter_setup(sc);
+
+ /* Start DMA Engine */
+ MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
+ MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
+ MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
+
+ /* Enable port */
+ reg = MVNETA_READ(sc, MVNETA_PMACC0);
+ reg |= MVNETA_PMACC0_PORTEN;
+ MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
+
+ /* Allow access to each TXQ/RXQ from both CPU's */
+ for (cpu = 0; cpu < mp_ncpus; ++cpu)
+ MVNETA_WRITE(sc, MVNETA_PCP2Q(cpu),
+ MVNETA_PCP2Q_TXQEN_MASK | MVNETA_PCP2Q_RXQEN_MASK);
+
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
+ mvneta_rx_lockq(sc, q);
+ mvneta_rx_queue_refill(sc, q);
+ mvneta_rx_unlockq(sc, q);
+ }
+
+ if (!sc->phy_attached)
+ mvneta_linkup(sc);
+
+ /* Enable interrupt */
+ mvneta_enable_intr(sc);
+
+ /* Set Counter */
+ callout_schedule(&sc->tick_ch, hz);
+
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+}
+
+STATIC void
+mvneta_init(void *arg)
+{
+ struct mvneta_softc *sc;
+
+ sc = arg;
+ mvneta_sc_lock(sc);
+ mvneta_init_locked(sc);
+ if (sc->phy_attached)
+ mii_mediachg(sc->mii);
+ mvneta_sc_unlock(sc);
+}
+
+/* ARGSUSED */
+STATIC void
+mvneta_stop_locked(struct mvneta_softc *sc)
+{
+ struct ifnet *ifp;
+ struct mvneta_rx_ring *rx;
+ struct mvneta_tx_ring *tx;
+ uint32_t reg;
+ int q;
+
+ ifp = sc->ifp;
+ if (ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+
+ mvneta_disable_intr(sc);
+
+ callout_stop(&sc->tick_ch);
+
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+
+ /* Link down */
+ if (sc->linkup == TRUE)
+ mvneta_linkdown(sc);
+
+ /* Reset the MAC Port Enable bit */
+ reg = MVNETA_READ(sc, MVNETA_PMACC0);
+ reg &= ~MVNETA_PMACC0_PORTEN;
+ MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
+
+ /* Disable each of queue */
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
+ rx = MVNETA_RX_RING(sc, q);
+
+ mvneta_rx_lockq(sc, q);
+ mvneta_ring_flush_rx_queue(sc, q);
+ mvneta_rx_unlockq(sc, q);
+ }
+
+ /*
+ * Hold Reset state of DMA Engine
+ * (must write 0x0 to restart it)
+ */
+ MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
+ MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
+
+ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
+ tx = MVNETA_TX_RING(sc, q);
+
+ mvneta_tx_lockq(sc, q);
+ mvneta_ring_flush_tx_queue(sc, q);
+ mvneta_tx_unlockq(sc, q);
+ }
+}
+
+STATIC void
+mvneta_stop(struct mvneta_softc *sc)
+{
+
+ mvneta_sc_lock(sc);
+ mvneta_stop_locked(sc);
+ mvneta_sc_unlock(sc);
+}
+
+STATIC int
+mvneta_mediachange(struct ifnet *ifp)
+{
+ struct mvneta_softc *sc;
+
+ sc = ifp->if_softc;
+
+ if (!sc->phy_attached && !sc->use_inband_status) {
+ /* We shouldn't be here */
+ if_printf(ifp, "Cannot change media in fixed-link mode!\n");
+ return (0);
+ }
+
+ if (sc->use_inband_status) {
+ mvneta_update_media(sc, sc->mvneta_ifmedia.ifm_media);
+ return (0);
+ }
+
+ mvneta_sc_lock(sc);
+
+ /* Update PHY */
+ mii_mediachg(sc->mii);
+
+ mvneta_sc_unlock(sc);
+
+ return (0);
+}
+
+STATIC void
+mvneta_get_media(struct mvneta_softc *sc, struct ifmediareq *ifmr)
+{
+ uint32_t psr;
+
+ psr = MVNETA_READ(sc, MVNETA_PSR);
+
+ /* Speed */
+ if (psr & MVNETA_PSR_GMIISPEED)
+ ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_1000_T);
+ else if (psr & MVNETA_PSR_MIISPEED)
+ ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_100_TX);
+ else if (psr & MVNETA_PSR_LINKUP)
+ ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_10_T);
+
+ /* Duplex */
+ if (psr & MVNETA_PSR_FULLDX)
+ ifmr->ifm_active |= IFM_FDX;
+
+ /* Link */
+ ifmr->ifm_status = IFM_AVALID;
+ if (psr & MVNETA_PSR_LINKUP)
+ ifmr->ifm_status |= IFM_ACTIVE;
+}
+
+STATIC void
+mvneta_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct mvneta_softc *sc;
+ struct mii_data *mii;
+
+ sc = ifp->if_softc;
+
+ if (!sc->phy_attached && !sc->use_inband_status) {
+ ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
+ return;
+ }
+
+ mvneta_sc_lock(sc);
+
+ if (sc->use_inband_status) {
+ mvneta_get_media(sc, ifmr);
+ mvneta_sc_unlock(sc);
+ return;
+ }
+
+ mii = sc->mii;
+ mii_pollstat(mii);
+
+ ifmr->ifm_active = mii->mii_media_active;
+ ifmr->ifm_status = mii->mii_media_status;
+
+ mvneta_sc_unlock(sc);
+}
+
+/*
+ * Link State Notify
+ */
+STATIC void
+mvneta_update_autoneg(struct mvneta_softc *sc, int enable)
+{
+ int reg;
+
+ if (enable) {
+ reg = MVNETA_READ(sc, MVNETA_PANC);
+ reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
+ MVNETA_PANC_ANFCEN);
+ reg |= MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
+ MVNETA_PANC_INBANDANEN;
+ MVNETA_WRITE(sc, MVNETA_PANC, reg);
+
+ reg = MVNETA_READ(sc, MVNETA_PMACC2);
+ reg |= MVNETA_PMACC2_INBANDANMODE;
+ MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
+
+ reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
+ reg |= MVNETA_PSOMSCD_ENABLE;
+ MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
+ } else {
+ reg = MVNETA_READ(sc, MVNETA_PANC);
+ reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
+ MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
+ MVNETA_PANC_INBANDANEN);
+ MVNETA_WRITE(sc, MVNETA_PANC, reg);
+
+ reg = MVNETA_READ(sc, MVNETA_PMACC2);
+ reg &= ~MVNETA_PMACC2_INBANDANMODE;
+ MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
+
+ reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
+ reg &= ~MVNETA_PSOMSCD_ENABLE;
+ MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
+ }
+}
+
+STATIC int
+mvneta_update_media(struct mvneta_softc *sc, int media)
+{
+ int reg, err;
+ boolean_t running;
+
+ err = 0;
+
+ mvneta_sc_lock(sc);
+
+ mvneta_linkreset(sc);
+
+ running = (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
+ if (running)
+ mvneta_stop_locked(sc);
+
+ sc->autoneg = (IFM_SUBTYPE(media) == IFM_AUTO);
+
+ if (sc->use_inband_status)
+ mvneta_update_autoneg(sc, IFM_SUBTYPE(media) == IFM_AUTO);
+
+ mvneta_update_eee(sc);
+ mvneta_update_fc(sc);
+
+ if (IFM_SUBTYPE(media) != IFM_AUTO) {
+ reg = MVNETA_READ(sc, MVNETA_PANC);
+ reg &= ~(MVNETA_PANC_SETGMIISPEED |
+ MVNETA_PANC_SETMIISPEED |
+ MVNETA_PANC_SETFULLDX);
+ if (IFM_SUBTYPE(media) == IFM_1000_T ||
+ IFM_SUBTYPE(media) == IFM_2500_T) {
+ if ((media & IFM_FDX) == 0) {
+ device_printf(sc->dev,
+ "%s half-duplex unsupported\n",
+ IFM_SUBTYPE(media) == IFM_1000_T ?
+ "1000Base-T" :
+ "2500Base-T");
+ err = EINVAL;
+ goto out;
+ }
+ reg |= MVNETA_PANC_SETGMIISPEED;
+ } else if (IFM_SUBTYPE(media) == IFM_100_TX)
+ reg |= MVNETA_PANC_SETMIISPEED;
+
+ if (media & IFM_FDX)
+ reg |= MVNETA_PANC_SETFULLDX;
+
+ MVNETA_WRITE(sc, MVNETA_PANC, reg);
+ }
+out:
+ if (running)
+ mvneta_init_locked(sc);
+ mvneta_sc_unlock(sc);
+ return (err);
+}
+
+STATIC void
+mvneta_adjust_link(struct mvneta_softc *sc)
+{
+ boolean_t phy_linkup;
+ int reg;
+
+ /* Update eee/fc */
+ mvneta_update_eee(sc);
+ mvneta_update_fc(sc);
+
+ /* Check for link change */
+ phy_linkup = (sc->mii->mii_media_status &
+ (IFM_AVALID | IFM_ACTIVE)) == (IFM_AVALID | IFM_ACTIVE);
+
+ if (sc->linkup != phy_linkup)
+ mvneta_linkupdate(sc, phy_linkup);
+
+ /* Don't update media on disabled link */
+ if (!phy_linkup )
+ return;
+
+ /* Check for media type change */
+ if (sc->mvneta_media != sc->mii->mii_media_active) {
+ sc->mvneta_media = sc->mii->mii_media_active;
+
+ reg = MVNETA_READ(sc, MVNETA_PANC);
+ reg &= ~(MVNETA_PANC_SETGMIISPEED |
+ MVNETA_PANC_SETMIISPEED |
+ MVNETA_PANC_SETFULLDX);
+ if (IFM_SUBTYPE(sc->mvneta_media) == IFM_1000_T ||
+ IFM_SUBTYPE(sc->mvneta_media) == IFM_2500_T) {
+ reg |= MVNETA_PANC_SETGMIISPEED;
+ } else if (IFM_SUBTYPE(sc->mvneta_media) == IFM_100_TX)
+ reg |= MVNETA_PANC_SETMIISPEED;
+
+ if (sc->mvneta_media & IFM_FDX)
+ reg |= MVNETA_PANC_SETFULLDX;
+
+ MVNETA_WRITE(sc, MVNETA_PANC, reg);
+ }
+}
+
+STATIC void
+mvneta_link_isr(struct mvneta_softc *sc)
+{
+ int linkup;
+
+ KASSERT_SC_MTX(sc);
+
+ linkup = MVNETA_IS_LINKUP(sc) ? TRUE : FALSE;
+ if (sc->linkup == linkup)
+ return;
+
+ if (linkup == TRUE)
+ mvneta_linkup(sc);
+ else
+ mvneta_linkdown(sc);
+
+#ifdef DEBUG
+ log(LOG_DEBUG,
+ "%s: link %s\n", device_xname(sc->dev), linkup ? "up" : "down");
+#endif
+}
+
+STATIC void
+mvneta_linkupdate(struct mvneta_softc *sc, boolean_t linkup)
+{
+
+ KASSERT_SC_MTX(sc);
+
+ if (linkup == TRUE)
+ mvneta_linkup(sc);
+ else
+ mvneta_linkdown(sc);
+
+#ifdef DEBUG
+ log(LOG_DEBUG,
+ "%s: link %s\n", device_xname(sc->dev), linkup ? "up" : "down");
+#endif
+}
+
+STATIC void
+mvneta_update_eee(struct mvneta_softc *sc)
+{
+ uint32_t reg;
+
+ KASSERT_SC_MTX(sc);
+
+ /* set EEE parameters */
+ reg = MVNETA_READ(sc, MVNETA_LPIC1);
+ if (sc->cf_lpi)
+ reg |= MVNETA_LPIC1_LPIRE;
+ else
+ reg &= ~MVNETA_LPIC1_LPIRE;
+ MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
+}
+
+STATIC void
+mvneta_update_fc(struct mvneta_softc *sc)
+{
+ uint32_t reg;
+
+ KASSERT_SC_MTX(sc);
+
+ reg = MVNETA_READ(sc, MVNETA_PANC);
+ if (sc->cf_fc) {
+ /* Flow control negotiation */
+ reg |= MVNETA_PANC_PAUSEADV;
+ reg |= MVNETA_PANC_ANFCEN;
+ } else {
+ /* Disable flow control negotiation */
+ reg &= ~MVNETA_PANC_PAUSEADV;
+ reg &= ~MVNETA_PANC_ANFCEN;
+ }
+
+ MVNETA_WRITE(sc, MVNETA_PANC, reg);
+}
+
+STATIC void
+mvneta_linkup(struct mvneta_softc *sc)
+{
+ uint32_t reg;
+
+ KASSERT_SC_MTX(sc);
+
+ if (!sc->use_inband_status) {
+ reg = MVNETA_READ(sc, MVNETA_PANC);
+ reg |= MVNETA_PANC_FORCELINKPASS;
+ reg &= ~MVNETA_PANC_FORCELINKFAIL;
+ MVNETA_WRITE(sc, MVNETA_PANC, reg);
+ }
+
+ mvneta_qflush(sc->ifp);
+ mvneta_portup(sc);
+ sc->linkup = TRUE;
+ if_link_state_change(sc->ifp, LINK_STATE_UP);
+}
+
+STATIC void
+mvneta_linkdown(struct mvneta_softc *sc)
+{
+ uint32_t reg;
+
+ KASSERT_SC_MTX(sc);
+
+ if (!sc->use_inband_status) {
+ reg = MVNETA_READ(sc, MVNETA_PANC);
+ reg &= ~MVNETA_PANC_FORCELINKPASS;
+ reg |= MVNETA_PANC_FORCELINKFAIL;
+ MVNETA_WRITE(sc, MVNETA_PANC, reg);
+ }
+
+ mvneta_portdown(sc);
+ mvneta_qflush(sc->ifp);
+ sc->linkup = FALSE;
+ if_link_state_change(sc->ifp, LINK_STATE_DOWN);
+}
+
+STATIC void
+mvneta_linkreset(struct mvneta_softc *sc)
+{
+ struct mii_softc *mii;
+
+ if (sc->phy_attached) {
+ /* Force reset PHY */
+ mii = LIST_FIRST(&sc->mii->mii_phys);
+ if (mii)
+ mii_phy_reset(mii);
+ }
+}
+
+/*
+ * Tx Subroutines
+ */
+STATIC int
+mvneta_tx_queue(struct mvneta_softc *sc, struct mbuf **mbufp, int q)
+{
+ struct ifnet *ifp;
+ bus_dma_segment_t txsegs[MVNETA_TX_SEGLIMIT];
+ struct mbuf *mtmp, *mbuf;
+ struct mvneta_tx_ring *tx;
+ struct mvneta_buf *txbuf;
+ struct mvneta_tx_desc *t;
+ uint32_t ptxsu;
+ int start, used, error, i, txnsegs;
+
+ mbuf = *mbufp;
+ tx = MVNETA_TX_RING(sc, q);
+ DASSERT(tx->used >= 0);
+ DASSERT(tx->used <= MVNETA_TX_RING_CNT);
+ t = NULL;
+ ifp = sc->ifp;
+
+ if (__predict_false(mbuf->m_flags & M_VLANTAG)) {
+ mbuf = ether_vlanencap(mbuf, mbuf->m_pkthdr.ether_vtag);
+ if (mbuf == NULL) {
+ tx->drv_error++;
+ *mbufp = NULL;
+ return (ENOBUFS);
+ }
+ mbuf->m_flags &= ~M_VLANTAG;
+ *mbufp = mbuf;
+ }
+
+ if (__predict_false(mbuf->m_next != NULL &&
+ (mbuf->m_pkthdr.csum_flags &
+ (CSUM_IP | CSUM_TCP | CSUM_UDP)) != 0)) {
+ if (M_WRITABLE(mbuf) == 0) {
+ mtmp = m_dup(mbuf, M_NOWAIT);
+ m_freem(mbuf);
+ if (mtmp == NULL) {
+ tx->drv_error++;
+ *mbufp = NULL;
+ return (ENOBUFS);
+ }
+ *mbufp = mbuf = mtmp;
+ }
+ }
+
+ /* load mbuf using dmamap of 1st descriptor */
+ txbuf = &tx->txbuf[tx->cpu];
+ error = bus_dmamap_load_mbuf_sg(sc->txmbuf_dtag,
+ txbuf->dmap, mbuf, txsegs, &txnsegs,
+ BUS_DMA_NOWAIT);
+ if (__predict_false(error != 0)) {
+#ifdef MVNETA_KTR
+ CTR3(KTR_SPARE2, "%s:%u bus_dmamap_load_mbuf_sg error=%d", ifp->if_xname, q, error);
+#endif
+ /* This is the only recoverable error (except EFBIG). */
+ if (error != ENOMEM) {
+ tx->drv_error++;
+ m_freem(mbuf);
+ *mbufp = NULL;
+ return (ENOBUFS);
+ }
+ return (error);
+ }
+
+ if (__predict_false(txnsegs <= 0
+ || (txnsegs + tx->used) > MVNETA_TX_RING_CNT)) {
+ /* we have no enough descriptors or mbuf is broken */
+#ifdef MVNETA_KTR
+ CTR3(KTR_SPARE2, "%s:%u not enough descriptors txnsegs=%d",
+ ifp->if_xname, q, txnsegs);
+#endif
+ bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
+ return (ENOBUFS);
+ }
+ DASSERT(txbuf->m == NULL);
+
+ /* remember mbuf using 1st descriptor */
+ txbuf->m = mbuf;
+ bus_dmamap_sync(sc->txmbuf_dtag, txbuf->dmap,
+ BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+ /* load to tx descriptors */
+ start = tx->cpu;
+ used = 0;
+ for (i = 0; i < txnsegs; i++) {
+ t = &tx->desc[tx->cpu];
+ t->command = 0;
+ t->l4ichk = 0;
+ t->flags = 0;
+ if (__predict_true(i == 0)) {
+ /* 1st descriptor */
+ t->command |= MVNETA_TX_CMD_W_PACKET_OFFSET(0);
+ t->command |= MVNETA_TX_CMD_F;
+ mvneta_tx_set_csumflag(ifp, t, mbuf);
+ }
+ t->bufptr_pa = txsegs[i].ds_addr;
+ t->bytecnt = txsegs[i].ds_len;
+ tx->cpu = tx_counter_adv(tx->cpu, 1);
+
+ tx->used++;
+ used++;
+ }
+ /* t is last descriptor here */
+ DASSERT(t != NULL);
+ t->command |= MVNETA_TX_CMD_L|MVNETA_TX_CMD_PADDING;
+
+ bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
+ BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+ while (__predict_false(used > 255)) {
+ ptxsu = MVNETA_PTXSU_NOWD(255);
+ MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
+ used -= 255;
+ }
+ if (__predict_true(used > 0)) {
+ ptxsu = MVNETA_PTXSU_NOWD(used);
+ MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
+ }
+ return (0);
+}
+
+STATIC void
+mvneta_tx_set_csumflag(struct ifnet *ifp,
+ struct mvneta_tx_desc *t, struct mbuf *m)
+{
+ struct ether_header *eh;
+ int csum_flags;
+ uint32_t iphl, ipoff;
+ struct ip *ip;
+
+ iphl = ipoff = 0;
+ csum_flags = ifp->if_hwassist & m->m_pkthdr.csum_flags;
+ eh = mtod(m, struct ether_header *);
+ switch (ntohs(eh->ether_type)) {
+ case ETHERTYPE_IP:
+#ifdef HWCSUM_IPV6
+ case ETHERTYPE_IPV6:
+#endif
+ ipoff = ETHER_HDR_LEN;
+ break;
+ case ETHERTYPE_VLAN:
+ ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ break;
+ }
+
+ if (__predict_true(csum_flags & (CSUM_IP|CSUM_IP_TCP|CSUM_IP_UDP))) {
+ ip = (struct ip *)(m->m_data + ipoff);
+ iphl = ip->ip_hl<<2;
+ t->command |= MVNETA_TX_CMD_L3_IP4;
+ }
+#ifdef HWCSUM_IPV6
+ else if (csum_flags & (CSUM_IP6_TCP|CSUM_IP6_UDP)) {
+ /* XXX
+ iphl = m->m_pkthdr.csum_data>>16;*/
+ t->command |= MVNETA_TX_CMD_L3_IP6;
+ }
+#endif
+ else {
+ t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
+ return;
+ }
+
+
+ /* L3 */
+ if (csum_flags & CSUM_IP) {
+ t->command |= MVNETA_TX_CMD_IP4_CHECKSUM;
+ }
+
+ /* L4 */
+ if (csum_flags & CSUM_IP_TCP) {
+ t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
+ t->command |= MVNETA_TX_CMD_L4_TCP;
+ }
+ else if (csum_flags & CSUM_IP_UDP) {
+ t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
+ t->command |= MVNETA_TX_CMD_L4_UDP;
+ }
+#ifdef HWCSUM_IPV6
+ else if (csum_flags & CSUM_IP6_TCP) {
+ t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
+ t->command |= MVNETA_TX_CMD_L4_TCP;
+ }
+ else if (csum_flags & CSUM_IP6_UDP) {
+ t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
+ t->command |= MVNETA_TX_CMD_L4_UDP;
+ }
+#endif
+ else
+ t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
+
+ t->l4ichk = 0;
+ t->command |= MVNETA_TX_CMD_IP_HEADER_LEN(iphl >> 2);
+ t->command |= MVNETA_TX_CMD_L3_OFFSET(ipoff);
+}
+
+STATIC void
+mvneta_tx_queue_complete(struct mvneta_softc *sc, int q)
+{
+ struct mvneta_tx_ring *tx;
+ struct mvneta_buf *txbuf;
+ struct mvneta_tx_desc *t;
+ uint32_t ptxs, ptxsu, ndesc;
+ int i;
+
+ KASSERT_TX_MTX(sc, q);
+
+ tx = MVNETA_TX_RING(sc, q);
+ if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED))
+ return;
+
+ ptxs = MVNETA_READ(sc, MVNETA_PTXS(q));
+ ndesc = MVNETA_PTXS_GET_TBC(ptxs);
+
+ if (__predict_false(ndesc == 0)) {
+ if (tx->used == 0)
+ tx->queue_status = MVNETA_QUEUE_IDLE;
+ else if (tx->queue_status == MVNETA_QUEUE_WORKING &&
+ ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG))
+ tx->queue_hung = TRUE;
+ return;
+ }
+
+#ifdef MVNETA_KTR
+ CTR3(KTR_SPARE2, "%s:%u tx_complete begin ndesc=%u",
+ sc->ifp->if_xname, q, ndesc);
+#endif
+
+ bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
+ BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+
+ for (i = 0; i < ndesc; i++) {
+ t = &tx->desc[tx->dma];
+#ifdef MVNETA_KTR
+ if (t->flags & MVNETA_TX_F_ES)
+ CTR3(KTR_SPARE2, "%s tx error queue %d desc %d",
+ sc->ifp->if_xname, q, tx->dma);
+#endif
+ txbuf = &tx->txbuf[tx->dma];
+ if (__predict_true(txbuf->m != NULL)) {
+ DASSERT((t->command & MVNETA_TX_CMD_F) != 0);
+ bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
+ m_freem(txbuf->m);
+ txbuf->m = NULL;
+ }
+ else
+ DASSERT((t->flags & MVNETA_TX_CMD_F) == 0);
+ tx->dma = tx_counter_adv(tx->dma, 1);
+ tx->used--;
+ }
+ DASSERT(tx->used >= 0);
+ DASSERT(tx->used <= MVNETA_TX_RING_CNT);
+ while (__predict_false(ndesc > 255)) {
+ ptxsu = MVNETA_PTXSU_NORB(255);
+ MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
+ ndesc -= 255;
+ }
+ if (__predict_true(ndesc > 0)) {
+ ptxsu = MVNETA_PTXSU_NORB(ndesc);
+ MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
+ }
+#ifdef MVNETA_KTR
+ CTR5(KTR_SPARE2, "%s:%u tx_complete tx_cpu=%d tx_dma=%d tx_used=%d",
+ sc->ifp->if_xname, q, tx->cpu, tx->dma, tx->used);
+#endif
+
+ tx->watchdog_time = ticks;
+
+ if (tx->used == 0)
+ tx->queue_status = MVNETA_QUEUE_IDLE;
+}
+
+/*
+ * Do a final TX complete when TX is idle.
+ */
+STATIC void
+mvneta_tx_drain(struct mvneta_softc *sc)
+{
+ struct mvneta_tx_ring *tx;
+ int q;
+
+ /*
+ * Handle trailing mbuf on TX queue.
+ * Check is done lockess to avoid TX path contention.
+ */
+ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
+ tx = MVNETA_TX_RING(sc, q);
+ if ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG_TXCOMP &&
+ tx->used > 0) {
+ mvneta_tx_lockq(sc, q);
+ mvneta_tx_queue_complete(sc, q);
+ mvneta_tx_unlockq(sc, q);
+ }
+ }
+}
+
+/*
+ * Rx Subroutines
+ */
+STATIC int
+mvneta_rx(struct mvneta_softc *sc, int q, int count)
+{
+ uint32_t prxs, npkt;
+ int more;
+
+ more = 0;
+ mvneta_rx_lockq(sc, q);
+ prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
+ npkt = MVNETA_PRXS_GET_ODC(prxs);
+ if (__predict_false(npkt == 0))
+ goto out;
+
+ if (count > 0 && npkt > count) {
+ more = 1;
+ npkt = count;
+ }
+ mvneta_rx_queue(sc, q, npkt);
+out:
+ mvneta_rx_unlockq(sc, q);
+ return more;
+}
+
+/*
+ * Helper routine for updating PRXSU register of a given queue.
+ * Handles number of processed descriptors bigger than maximum acceptable value.
+ */
+STATIC __inline void
+mvneta_prxsu_update(struct mvneta_softc *sc, int q, int processed)
+{
+ uint32_t prxsu;
+
+ while (__predict_false(processed > 255)) {
+ prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(255);
+ MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
+ processed -= 255;
+ }
+ prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(processed);
+ MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
+}
+
+static __inline void
+mvneta_prefetch(void *p)
+{
+
+ __builtin_prefetch(p);
+}
+
+STATIC void
+mvneta_rx_queue(struct mvneta_softc *sc, int q, int npkt)
+{
+ struct ifnet *ifp;
+ struct mvneta_rx_ring *rx;
+ struct mvneta_rx_desc *r;
+ struct mvneta_buf *rxbuf;
+ struct mbuf *m;
+ struct lro_ctrl *lro;
+ struct lro_entry *queued;
+ void *pktbuf;
+ int i, pktlen, processed, ndma;
+
+ KASSERT_RX_MTX(sc, q);
+
+ ifp = sc->ifp;
+ rx = MVNETA_RX_RING(sc, q);
+ processed = 0;
+
+ if (__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
+ return;
+
+ bus_dmamap_sync(sc->rx_dtag, rx->desc_map,
+ BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+
+ for (i = 0; i < npkt; i++) {
+ /* Prefetch next desc, rxbuf. */
+ ndma = rx_counter_adv(rx->dma, 1);
+ mvneta_prefetch(&rx->desc[ndma]);
+ mvneta_prefetch(&rx->rxbuf[ndma]);
+
+ /* get descriptor and packet */
+ r = &rx->desc[rx->dma];
+ rxbuf = &rx->rxbuf[rx->dma];
+ m = rxbuf->m;
+ rxbuf->m = NULL;
+ DASSERT(m != NULL);
+ bus_dmamap_sync(sc->rxbuf_dtag, rxbuf->dmap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
+ /* Prefetch mbuf header. */
+ mvneta_prefetch(m);
+
+ processed++;
+ /* Drop desc with error status or not in a single buffer. */
+ DASSERT((r->status & (MVNETA_RX_F|MVNETA_RX_L)) ==
+ (MVNETA_RX_F|MVNETA_RX_L));
+ if (__predict_false((r->status & MVNETA_RX_ES) ||
+ (r->status & (MVNETA_RX_F|MVNETA_RX_L)) !=
+ (MVNETA_RX_F|MVNETA_RX_L)))
+ goto rx_error;
+
+ /*
+ * [ OFF | MH | PKT | CRC ]
+ * bytecnt cover MH, PKT, CRC
+ */
+ pktlen = r->bytecnt - ETHER_CRC_LEN - MVNETA_HWHEADER_SIZE;
+ pktbuf = (uint8_t *)r->bufptr_va + MVNETA_PACKET_OFFSET +
+ MVNETA_HWHEADER_SIZE;
+
+ /* Prefetch mbuf data. */
+ mvneta_prefetch(pktbuf);
+
+ /* Write value to mbuf (avoid read). */
+ m->m_data = pktbuf;
+ m->m_len = m->m_pkthdr.len = pktlen;
+ m->m_pkthdr.rcvif = ifp;
+ mvneta_rx_set_csumflag(ifp, r, m);
+
+ /* Increase rx_dma before releasing the lock. */
+ rx->dma = ndma;
+
+ if (__predict_false(rx->lro_enabled &&
+ ((r->status & MVNETA_RX_L3_IP) != 0) &&
+ ((r->status & MVNETA_RX_L4_MASK) == MVNETA_RX_L4_TCP) &&
+ (m->m_pkthdr.csum_flags &
+ (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
+ (CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) {
+ if (rx->lro.lro_cnt != 0) {
+ if (tcp_lro_rx(&rx->lro, m, 0) == 0)
+ goto rx_done;
+ }
+ }
+
+ mvneta_rx_unlockq(sc, q);
+ (*ifp->if_input)(ifp, m);
+ mvneta_rx_lockq(sc, q);
+ /*
+ * Check whether this queue has been disabled in the
+ * meantime. If yes, then clear LRO and exit.
+ */
+ if(__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
+ goto rx_lro;
+rx_done:
+ /* Refresh receive ring to avoid stall and minimize jitter. */
+ if (processed >= MVNETA_RX_REFILL_COUNT) {
+ mvneta_prxsu_update(sc, q, processed);
+ mvneta_rx_queue_refill(sc, q);
+ processed = 0;
+ }
+ continue;
+rx_error:
+ m_freem(m);
+ rx->dma = ndma;
+ /* Refresh receive ring to avoid stall and minimize jitter. */
+ if (processed >= MVNETA_RX_REFILL_COUNT) {
+ mvneta_prxsu_update(sc, q, processed);
+ mvneta_rx_queue_refill(sc, q);
+ processed = 0;
+ }
+ }
+#ifdef MVNETA_KTR
+ CTR3(KTR_SPARE2, "%s:%u %u packets received", ifp->if_xname, q, npkt);
+#endif
+ /* DMA status update */
+ mvneta_prxsu_update(sc, q, processed);
+ /* Refill the rest of buffers if there are any to refill */
+ mvneta_rx_queue_refill(sc, q);
+
+rx_lro:
+ /*
+ * Flush any outstanding LRO work
+ */
+ lro = &rx->lro;
+ while (__predict_false((queued = LIST_FIRST(&lro->lro_active)) != NULL)) {
+ LIST_REMOVE(LIST_FIRST((&lro->lro_active)), next);
+ tcp_lro_flush(lro, queued);
+ }
+}
+
+STATIC void
+mvneta_rx_buf_free(struct mvneta_softc *sc, struct mvneta_buf *rxbuf)
+{
+
+ bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
+ /* This will remove all data at once */
+ m_freem(rxbuf->m);
+}
+
+STATIC void
+mvneta_rx_queue_refill(struct mvneta_softc *sc, int q)
+{
+ struct mvneta_rx_ring *rx;
+ struct mvneta_rx_desc *r;
+ struct mvneta_buf *rxbuf;
+ bus_dma_segment_t segs;
+ struct mbuf *m;
+ uint32_t prxs, prxsu, ndesc;
+ int npkt, refill, nsegs, error;
+
+ KASSERT_RX_MTX(sc, q);
+
+ rx = MVNETA_RX_RING(sc, q);
+ prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
+ ndesc = MVNETA_PRXS_GET_NODC(prxs) + MVNETA_PRXS_GET_ODC(prxs);
+ refill = MVNETA_RX_RING_CNT - ndesc;
+#ifdef MVNETA_KTR
+ CTR3(KTR_SPARE2, "%s:%u refill %u packets", sc->ifp->if_xname, q,
+ refill);
+#endif
+ if (__predict_false(refill <= 0))
+ return;
+
+ for (npkt = 0; npkt < refill; npkt++) {
+ rxbuf = &rx->rxbuf[rx->cpu];
+ m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
+ if (__predict_false(m == NULL)) {
+ error = ENOBUFS;
+ break;
+ }
+ m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
+
+ error = bus_dmamap_load_mbuf_sg(sc->rxbuf_dtag, rxbuf->dmap,
+ m, &segs, &nsegs, BUS_DMA_NOWAIT);
+ if (__predict_false(error != 0 || nsegs != 1)) {
+ KASSERT(1, ("Failed to load Rx mbuf DMA map"));
+ m_freem(m);
+ break;
+ }
+
+ /* Add the packet to the ring */
+ rxbuf->m = m;
+ r = &rx->desc[rx->cpu];
+ r->bufptr_pa = segs.ds_addr;
+ r->bufptr_va = (uint32_t)m->m_data;
+
+ rx->cpu = rx_counter_adv(rx->cpu, 1);
+ }
+ if (npkt == 0) {
+ if (refill == MVNETA_RX_RING_CNT)
+ rx->needs_refill = TRUE;
+ return;
+ }
+
+ rx->needs_refill = FALSE;
+ bus_dmamap_sync(sc->rx_dtag, rx->desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+ while (__predict_false(npkt > 255)) {
+ prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(255);
+ MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
+ npkt -= 255;
+ }
+ if (__predict_true(npkt > 0)) {
+ prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(npkt);
+ MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
+ }
+}
+
+STATIC __inline void
+mvneta_rx_set_csumflag(struct ifnet *ifp,
+ struct mvneta_rx_desc *r, struct mbuf *m)
+{
+ uint32_t csum_flags;
+
+ csum_flags = 0;
+ if (__predict_false((r->status &
+ (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) == 0))
+ return; /* not a IP packet */
+
+ /* L3 */
+ if (__predict_true((r->status & MVNETA_RX_IP_HEADER_OK) ==
+ MVNETA_RX_IP_HEADER_OK))
+ csum_flags |= CSUM_L3_CALC|CSUM_L3_VALID;
+
+ if (__predict_true((r->status & (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) ==
+ (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP))) {
+ /* L4 */
+ switch (r->status & MVNETA_RX_L4_MASK) {
+ case MVNETA_RX_L4_TCP:
+ case MVNETA_RX_L4_UDP:
+ csum_flags |= CSUM_L4_CALC;
+ if (__predict_true((r->status &
+ MVNETA_RX_L4_CHECKSUM_OK) == MVNETA_RX_L4_CHECKSUM_OK)) {
+ csum_flags |= CSUM_L4_VALID;
+ m->m_pkthdr.csum_data = htons(0xffff);
+ }
+ break;
+ case MVNETA_RX_L4_OTH:
+ default:
+ break;
+ }
+ }
+ m->m_pkthdr.csum_flags = csum_flags;
+}
+
+/*
+ * MAC address filter
+ */
+STATIC void
+mvneta_filter_setup(struct mvneta_softc *sc)
+{
+ struct ifnet *ifp;
+ uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
+ uint32_t pxc;
+ int i;
+
+ KASSERT_SC_MTX(sc);
+
+ memset(dfut, 0, sizeof(dfut));
+ memset(dfsmt, 0, sizeof(dfsmt));
+ memset(dfomt, 0, sizeof(dfomt));
+
+ ifp = sc->ifp;
+ ifp->if_flags |= IFF_ALLMULTI;
+ if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
+ for (i = 0; i < MVNETA_NDFSMT; i++) {
+ dfsmt[i] = dfomt[i] =
+ MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
+ MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
+ MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
+ MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
+ }
+ }
+
+ pxc = MVNETA_READ(sc, MVNETA_PXC);
+ pxc &= ~(MVNETA_PXC_UPM | MVNETA_PXC_RXQ_MASK | MVNETA_PXC_RXQARP_MASK |
+ MVNETA_PXC_TCPQ_MASK | MVNETA_PXC_UDPQ_MASK | MVNETA_PXC_BPDUQ_MASK);
+ pxc |= MVNETA_PXC_RXQ(MVNETA_RX_QNUM_MAX-1);
+ pxc |= MVNETA_PXC_RXQARP(MVNETA_RX_QNUM_MAX-1);
+ pxc |= MVNETA_PXC_TCPQ(MVNETA_RX_QNUM_MAX-1);
+ pxc |= MVNETA_PXC_UDPQ(MVNETA_RX_QNUM_MAX-1);
+ pxc |= MVNETA_PXC_BPDUQ(MVNETA_RX_QNUM_MAX-1);
+ pxc |= MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP;
+ if (ifp->if_flags & IFF_BROADCAST) {
+ pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP);
+ }
+ if (ifp->if_flags & IFF_PROMISC) {
+ pxc |= MVNETA_PXC_UPM;
+ }
+ MVNETA_WRITE(sc, MVNETA_PXC, pxc);
+
+ /* Set Destination Address Filter Unicast Table */
+ if (ifp->if_flags & IFF_PROMISC) {
+ /* pass all unicast addresses */
+ for (i = 0; i < MVNETA_NDFUT; i++) {
+ dfut[i] =
+ MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
+ MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
+ MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
+ MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
+ }
+ } else {
+ i = sc->enaddr[5] & 0xf; /* last nibble */
+ dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
+ }
+ MVNETA_WRITE_REGION(sc, MVNETA_DFUT(0), dfut, MVNETA_NDFUT);
+
+ /* Set Destination Address Filter Multicast Tables */
+ MVNETA_WRITE_REGION(sc, MVNETA_DFSMT(0), dfsmt, MVNETA_NDFSMT);
+ MVNETA_WRITE_REGION(sc, MVNETA_DFOMT(0), dfomt, MVNETA_NDFOMT);
+}
+
+/*
+ * sysctl(9)
+ */
+STATIC int
+sysctl_read_mib(SYSCTL_HANDLER_ARGS)
+{
+ struct mvneta_sysctl_mib *arg;
+ struct mvneta_softc *sc;
+ uint64_t val;
+
+ arg = (struct mvneta_sysctl_mib *)arg1;
+ if (arg == NULL)
+ return (EINVAL);
+
+ sc = arg->sc;
+ if (sc == NULL)
+ return (EINVAL);
+ if (arg->index < 0 || arg->index > MVNETA_PORTMIB_NOCOUNTER)
+ return (EINVAL);
+
+ mvneta_sc_lock(sc);
+ val = arg->counter;
+ mvneta_sc_unlock(sc);
+ return sysctl_handle_64(oidp, &val, 0, req);
+}
+
+
+STATIC int
+sysctl_clear_mib(SYSCTL_HANDLER_ARGS)
+{
+ struct mvneta_softc *sc;
+ int err, val;
+
+ val = 0;
+ sc = (struct mvneta_softc *)arg1;
+ if (sc == NULL)
+ return (EINVAL);
+
+ err = sysctl_handle_int(oidp, &val, 0, req);
+ if (err != 0)
+ return (err);
+
+ if (val < 0 || val > 1)
+ return (EINVAL);
+
+ if (val == 1) {
+ mvneta_sc_lock(sc);
+ mvneta_clear_mib(sc);
+ mvneta_sc_unlock(sc);
+ }
+
+ return (0);
+}
+
+STATIC int
+sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS)
+{
+ struct mvneta_sysctl_queue *arg;
+ struct mvneta_rx_ring *rx;
+ struct mvneta_softc *sc;
+ uint32_t reg, time_mvtclk;
+ int err, time_us;
+
+ rx = NULL;
+ arg = (struct mvneta_sysctl_queue *)arg1;
+ if (arg == NULL)
+ return (EINVAL);
+ if (arg->queue < 0 || arg->queue > MVNETA_RX_RING_CNT)
+ return (EINVAL);
+ if (arg->rxtx != MVNETA_SYSCTL_RX)
+ return (EINVAL);
+
+ sc = arg->sc;
+ if (sc == NULL)
+ return (EINVAL);
+
+ /* read queue length */
+ mvneta_sc_lock(sc);
+ mvneta_rx_lockq(sc, arg->queue);
+ rx = MVNETA_RX_RING(sc, arg->queue);
+ time_mvtclk = rx->queue_th_time;
+ time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / get_tclk();
+ mvneta_rx_unlockq(sc, arg->queue);
+ mvneta_sc_unlock(sc);
+
+ err = sysctl_handle_int(oidp, &time_us, 0, req);
+ if (err != 0)
+ return (err);
+
+ mvneta_sc_lock(sc);
+ mvneta_rx_lockq(sc, arg->queue);
+
+ /* update queue length (0[sec] - 1[sec]) */
+ if (time_us < 0 || time_us > (1000 * 1000)) {
+ mvneta_rx_unlockq(sc, arg->queue);
+ mvneta_sc_unlock(sc);
+ return (EINVAL);
+ }
+ time_mvtclk =
+ (uint64_t)get_tclk() * (uint64_t)time_us / (1000ULL * 1000ULL);
+ rx->queue_th_time = time_mvtclk;
+ reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
+ MVNETA_WRITE(sc, MVNETA_PRXITTH(arg->queue), reg);
+ mvneta_rx_unlockq(sc, arg->queue);
+ mvneta_sc_unlock(sc);
+
+ return (0);
+}
+
+STATIC void
+sysctl_mvneta_init(struct mvneta_softc *sc)
+{
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid_list *children;
+ struct sysctl_oid_list *rxchildren;
+ struct sysctl_oid_list *qchildren, *mchildren;
+ struct sysctl_oid *tree;
+ int i, q;
+ struct mvneta_sysctl_queue *rxarg;
+#define MVNETA_SYSCTL_NAME(num) "queue" # num
+ static const char *sysctl_queue_names[] = {
+ MVNETA_SYSCTL_NAME(0), MVNETA_SYSCTL_NAME(1),
+ MVNETA_SYSCTL_NAME(2), MVNETA_SYSCTL_NAME(3),
+ MVNETA_SYSCTL_NAME(4), MVNETA_SYSCTL_NAME(5),
+ MVNETA_SYSCTL_NAME(6), MVNETA_SYSCTL_NAME(7),
+ };
+#undef MVNETA_SYSCTL_NAME
+
+#define MVNETA_SYSCTL_DESCR(num) "configuration parameters for queue " # num
+ static const char *sysctl_queue_descrs[] = {
+ MVNETA_SYSCTL_DESCR(0), MVNETA_SYSCTL_DESCR(1),
+ MVNETA_SYSCTL_DESCR(2), MVNETA_SYSCTL_DESCR(3),
+ MVNETA_SYSCTL_DESCR(4), MVNETA_SYSCTL_DESCR(5),
+ MVNETA_SYSCTL_DESCR(6), MVNETA_SYSCTL_DESCR(7),
+ };
+#undef MVNETA_SYSCTL_DESCR
+
+
+ ctx = device_get_sysctl_ctx(sc->dev);
+ children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
+
+ tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rx",
+ CTLFLAG_RD, 0, "NETA RX");
+ rxchildren = SYSCTL_CHILDREN(tree);
+ tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "mib",
+ CTLFLAG_RD, 0, "NETA MIB");
+ mchildren = SYSCTL_CHILDREN(tree);
+
+
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "flow_control",
+ CTLFLAG_RW, &sc->cf_fc, 0, "flow control");
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpi",
+ CTLFLAG_RW, &sc->cf_lpi, 0, "Low Power Idle");
+
+ /*
+ * MIB access
+ */
+ /* dev.mvneta.[unit].mib.<mibs> */
+ for (i = 0; i < MVNETA_PORTMIB_NOCOUNTER; i++) {
+ const char *name = mvneta_mib_list[i].sysctl_name;
+ const char *desc = mvneta_mib_list[i].desc;
+ struct mvneta_sysctl_mib *mib_arg = &sc->sysctl_mib[i];
+
+ mib_arg->sc = sc;
+ mib_arg->index = i;
+ SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO, name,
+ CTLTYPE_U64|CTLFLAG_RD, (void *)mib_arg, 0,
+ sysctl_read_mib, "I", desc);
+ }
+ SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "rx_discard",
+ CTLFLAG_RD, &sc->counter_pdfc, "Port Rx Discard Frame Counter");
+ SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "overrun",
+ CTLFLAG_RD, &sc->counter_pofc, "Port Overrun Frame Counter");
+ SYSCTL_ADD_UINT(ctx, mchildren, OID_AUTO, "watchdog",
+ CTLFLAG_RD, &sc->counter_watchdog, 0, "TX Watchdog Counter");
+
+ SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO, "reset",
+ CTLTYPE_INT|CTLFLAG_RW, (void *)sc, 0,
+ sysctl_clear_mib, "I", "Reset MIB counters");
+
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
+ rxarg = &sc->sysctl_rx_queue[q];
+
+ rxarg->sc = sc;
+ rxarg->queue = q;
+ rxarg->rxtx = MVNETA_SYSCTL_RX;
+
+ /* hw.mvneta.mvneta[unit].rx.[queue] */
+ tree = SYSCTL_ADD_NODE(ctx, rxchildren, OID_AUTO,
+ sysctl_queue_names[q], CTLFLAG_RD, 0,
+ sysctl_queue_descrs[q]);
+ qchildren = SYSCTL_CHILDREN(tree);
+
+ /* hw.mvneta.mvneta[unit].rx.[queue].threshold_timer_us */
+ SYSCTL_ADD_PROC(ctx, qchildren, OID_AUTO, "threshold_timer_us",
+ CTLTYPE_UINT | CTLFLAG_RW, rxarg, 0,
+ sysctl_set_queue_rxthtime, "I",
+ "interrupt coalescing threshold timer [us]");
+ }
+}
+
+/*
+ * MIB
+ */
+STATIC void
+mvneta_clear_mib(struct mvneta_softc *sc)
+{
+ int i;
+
+ KASSERT_SC_MTX(sc);
+
+ for (i = 0; i < nitems(mvneta_mib_list); i++) {
+ if (mvneta_mib_list[i].reg64)
+ MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum);
+ else
+ MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum);
+ sc->sysctl_mib[i].counter = 0;
+ }
+ MVNETA_READ(sc, MVNETA_PDFC);
+ sc->counter_pdfc = 0;
+ MVNETA_READ(sc, MVNETA_POFC);
+ sc->counter_pofc = 0;
+ sc->counter_watchdog = 0;
+}
+
+STATIC void
+mvneta_update_mib(struct mvneta_softc *sc)
+{
+ struct mvneta_tx_ring *tx;
+ int i;
+ uint64_t val;
+ uint32_t reg;
+
+ for (i = 0; i < nitems(mvneta_mib_list); i++) {
+
+ if (mvneta_mib_list[i].reg64)
+ val = MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum);
+ else
+ val = MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum);
+
+ if (val == 0)
+ continue;
+
+ sc->sysctl_mib[i].counter += val;
+ switch (mvneta_mib_list[i].regnum) {
+ case MVNETA_MIB_RX_GOOD_OCT:
+ if_inc_counter(sc->ifp, IFCOUNTER_IBYTES, val);
+ break;
+ case MVNETA_MIB_RX_BAD_FRAME:
+ if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, val);
+ break;
+ case MVNETA_MIB_RX_GOOD_FRAME:
+ if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, val);
+ break;
+ case MVNETA_MIB_RX_MCAST_FRAME:
+ if_inc_counter(sc->ifp, IFCOUNTER_IMCASTS, val);
+ break;
+ case MVNETA_MIB_TX_GOOD_OCT:
+ if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, val);
+ break;
+ case MVNETA_MIB_TX_GOOD_FRAME:
+ if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, val);
+ break;
+ case MVNETA_MIB_TX_MCAST_FRAME:
+ if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, val);
+ break;
+ case MVNETA_MIB_MAC_COL:
+ if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, val);
+ break;
+ case MVNETA_MIB_TX_MAC_TRNS_ERR:
+ case MVNETA_MIB_TX_EXCES_COL:
+ case MVNETA_MIB_MAC_LATE_COL:
+ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, val);
+ break;
+ }
+ }
+
+ reg = MVNETA_READ(sc, MVNETA_PDFC);
+ sc->counter_pdfc += reg;
+ if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
+ reg = MVNETA_READ(sc, MVNETA_POFC);
+ sc->counter_pofc += reg;
+ if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
+
+ /* TX watchdog. */
+ if (sc->counter_watchdog_mib > 0 ) {
+ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, sc->counter_watchdog_mib);
+ sc->counter_watchdog_mib = 0;
+ }
+ /*
+ * TX driver errors:
+ * We do not take queue locks to not disrupt TX path.
+ * We may only miss one drv error which will be fixed at
+ * next mib update. We may also clear counter when TX path
+ * is incrementing it but we only do it if counter was not zero
+ * thus we may only loose one error.
+ */
+ for (i = 0; i < MVNETA_TX_QNUM_MAX; i++) {
+ tx = MVNETA_TX_RING(sc, i);
+
+ if (tx->drv_error > 0) {
+ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, tx->drv_error);
+ tx->drv_error = 0;
+ }
+ }
+}
Index: sys/dev/neta/if_mvneta_fdt.c
===================================================================
--- /dev/null
+++ sys/dev/neta/if_mvneta_fdt.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2017 Stormshield.
+ * Copyright (c) 2017 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "opt_platform.h"
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/socket.h>
+#include <sys/taskqueue.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_media.h>
+
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/tcp_lro.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include "if_mvnetareg.h"
+#include "if_mvnetavar.h"
+
+#define PHY_MODE_MAXLEN 10
+#define INBAND_STATUS_MAXLEN 16
+
+static int mvneta_fdt_probe(device_t);
+static int mvneta_fdt_attach(device_t);
+
+static device_method_t mvneta_fdt_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, mvneta_fdt_probe),
+ DEVMETHOD(device_attach, mvneta_fdt_attach),
+
+ /* End */
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(mvneta, mvneta_fdt_driver, mvneta_fdt_methods,
+ sizeof(struct mvneta_softc), mvneta_driver);
+
+static devclass_t mvneta_fdt_devclass;
+
+DRIVER_MODULE(mvneta, ofwbus, mvneta_fdt_driver, mvneta_fdt_devclass, 0, 0);
+DRIVER_MODULE(mvneta, simplebus, mvneta_fdt_driver, mvneta_fdt_devclass, 0, 0);
+
+static int mvneta_fdt_phy_acquire(device_t);
+
+static int
+mvneta_fdt_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_is_compatible(dev, "marvell,armada-370-neta"))
+ return (ENXIO);
+
+ device_set_desc(dev, "NETA controller");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+mvneta_fdt_attach(device_t dev)
+{
+ int err;
+
+ /* Try to fetch PHY information from FDT */
+ err = mvneta_fdt_phy_acquire(dev);
+ if (err != 0)
+ return (err);
+
+ return (mvneta_attach(dev));
+}
+
+static int
+mvneta_fdt_phy_acquire(device_t dev)
+{
+ struct mvneta_softc *sc;
+ phandle_t node, child, phy_handle;
+ char phymode[PHY_MODE_MAXLEN];
+ char managed[INBAND_STATUS_MAXLEN];
+ char *name;
+
+ sc = device_get_softc(dev);
+ node = ofw_bus_get_node(dev);
+
+ /* PHY mode is crucial */
+ if (OF_getprop(node, "phy-mode", phymode, sizeof(phymode)) <= 0) {
+ device_printf(dev, "Failed to acquire PHY mode from FDT.\n");
+ return (ENXIO);
+ }
+
+ if (strncmp(phymode, "rgmii-id", 8) == 0)
+ sc->phy_mode = MVNETA_PHY_RGMII_ID;
+ else if (strncmp(phymode, "rgmii", 5) == 0)
+ sc->phy_mode = MVNETA_PHY_RGMII;
+ else if (strncmp(phymode, "sgmii", 5) == 0)
+ sc->phy_mode = MVNETA_PHY_SGMII;
+ else if (strncmp(phymode, "qsgmii", 6) == 0)
+ sc->phy_mode = MVNETA_PHY_QSGMII;
+ else
+ sc->phy_mode = MVNETA_PHY_SGMII;
+
+ /* Check if in-band link status will be used */
+ if (OF_getprop(node, "managed", managed, sizeof(managed)) > 0) {
+ if (strncmp(managed, "in-band-status", 14) == 0) {
+ sc->use_inband_status = TRUE;
+ device_printf(dev, "Use in-band link status.\n");
+ return (0);
+ }
+ }
+
+ if (OF_getencprop(node, "phy", (void *)&phy_handle,
+ sizeof(phy_handle)) <= 0) {
+ /* Test for fixed-link (present i.e. in 388-gp) */
+ for (child = OF_child(node); child != 0; child = OF_peer(child)) {
+ if (OF_getprop_alloc(child,
+ "name", 1, (void **)&name) <= 0) {
+ continue;
+ }
+ if (strncmp(name, "fixed-link", 10) == 0) {
+ free(name, M_OFWPROP);
+ if (OF_getencprop(child, "speed",
+ &sc->phy_speed, sizeof(sc->phy_speed)) <= 0) {
+ if (bootverbose) {
+ device_printf(dev,
+ "No PHY information.\n");
+ }
+ return (ENXIO);
+ }
+ if (OF_hasprop(child, "full-duplex"))
+ sc->phy_fdx = TRUE;
+ else
+ sc->phy_fdx = FALSE;
+
+ /* Keep this flag just for the record */
+ sc->phy_addr = MII_PHY_ANY;
+
+ return (0);
+ }
+ free(name, M_OFWPROP);
+ }
+ if (bootverbose) {
+ device_printf(dev,
+ "Could not find PHY information in FDT.\n");
+ }
+ return (ENXIO);
+ } else {
+ phy_handle = OF_instance_to_package(phy_handle);
+ if (OF_getencprop(phy_handle, "reg", &sc->phy_addr,
+ sizeof(sc->phy_addr)) <= 0) {
+ device_printf(dev,
+ "Could not find PHY address in FDT.\n");
+ return (ENXIO);
+ }
+ }
+
+ return (0);
+}
+
+int
+mvneta_fdt_mac_address(struct mvneta_softc *sc, uint8_t *addr)
+{
+ phandle_t node;
+ uint8_t lmac[ETHER_ADDR_LEN];
+ uint8_t zeromac[] = {[0 ... (ETHER_ADDR_LEN - 1)] = 0};
+ int len;
+
+ /*
+ * Retrieve hw address from the device tree.
+ */
+ node = ofw_bus_get_node(sc->dev);
+ if (node == 0)
+ return (ENXIO);
+
+ len = OF_getprop(node, "local-mac-address", (void *)lmac, sizeof(lmac));
+ if (len != ETHER_ADDR_LEN)
+ return (ENOENT);
+
+ if (memcmp(lmac, zeromac, ETHER_ADDR_LEN) == 0) {
+ /* Invalid MAC address (all zeros) */
+ return (EINVAL);
+ }
+ memcpy(addr, lmac, ETHER_ADDR_LEN);
+
+ return (0);
+}
Index: sys/dev/neta/if_mvnetareg.h
===================================================================
--- /dev/null
+++ sys/dev/neta/if_mvnetareg.h
@@ -0,0 +1,923 @@
+/*
+ * Copyright (c) 2017 Stormshield.
+ * Copyright (c) 2017 Semihalf.
+ * Copyright (c) 2015 Internet Initiative Japan Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IF_MVNETAREG_H_
+#define _IF_MVNETAREG_H_
+
+#if BYTE_ORDER == BIG_ENDIAN
+#error "BIG ENDIAN not supported"
+#endif
+
+#define MVNETA_SIZE 0x4000
+
+#define MVNETA_NWINDOW 6
+#define MVNETA_NREMAP 4
+
+#define MVNETA_MAX_QUEUE_SIZE 8
+#define MVNETA_RX_QNUM_MAX 1
+/* XXX: Currently multi-queue can be used on the Tx side only */
+#ifdef MVNETA_MULTIQUEUE
+#define MVNETA_TX_QNUM_MAX 2
+#else
+#define MVNETA_TX_QNUM_MAX 1
+#endif
+
+#if MVNETA_TX_QNUM_MAX & (MVNETA_TX_QNUM_MAX - 1) != 0
+#error "MVNETA_TX_QNUM_MAX Should be a power of 2"
+#endif
+#if MVNETA_RX_QNUM_MAX & (MVNETA_RX_QNUM_MAX - 1) != 0
+#error "MVNETA_RX_QNUM_MAX Should be a power of 2"
+#endif
+
+#define MVNETA_QUEUE(n) (1 << (n))
+#define MVNETA_QUEUE_ALL 0xff
+#define MVNETA_TX_QUEUE_ALL ((1<<MVNETA_TX_QNUM_MAX)-1)
+#define MVNETA_RX_QUEUE_ALL ((1<<MVNETA_RX_QNUM_MAX)-1)
+
+/*
+ * Ethernet Unit Registers
+ * GbE0 BASE 0x00007.0000 SIZE 0x4000
+ * GbE1 BASE 0x00007.4000 SIZE 0x4000
+ *
+ * TBD: reasonable bus space submapping....
+ */
+/* Address Decoder Registers */
+#define MVNETA_BASEADDR(n) (0x2200 + ((n) << 3)) /* Base Address */
+#define MVNETA_S(n) (0x2204 + ((n) << 3)) /* Size */
+#define MVNETA_HA(n) (0x2280 + ((n) << 2)) /* High Address Remap */
+#define MVNETA_BARE 0x2290 /* Base Address Enable */
+#define MVNETA_EPAP 0x2294 /* Ethernet Port Access Protect */
+
+/* Global Miscellaneous Registers */
+#define MVNETA_PHYADDR 0x2000
+#define MVNETA_SMI 0x2004
+#define MVNETA_EUDA 0x2008 /* Ethernet Unit Default Address */
+#define MVNETA_EUDID 0x200c /* Ethernet Unit Default ID */
+#define MVNETA_MBUSRETRY 0x2010 /* MBUS retry */
+#define MVNETA_EUIC 0x2080 /* Ethernet Unit Interrupt Cause */
+#define MVNETA_EUIM 0x2084 /* Ethernet Unit Interrupt Mask */
+#define MVNETA_EUEA 0x2094 /* Ethernet Unit Error Address */
+#define MVNETA_EUIAE 0x2098 /* Ethernet Unit Internal Addr Error */
+#define MVNETA_EUC 0x20b0 /* Ethernet Unit Control */
+
+/* Miscellaneous Registers */
+#define MVNETA_SDC 0x241c /* SDMA Configuration */
+
+/* Networking Controller Miscellaneous Registers */
+#define MVNETA_PACC 0x2500 /* Port Acceleration Mode */
+#define MVNETA_PV 0x25bc /* Port Version */
+
+/* Rx DMA Hardware Parser Registers */
+#define MVNETA_EVLANE 0x2410 /* VLAN EtherType */
+#define MVNETA_MACAL 0x2414 /* MAC Address Low */
+#define MVNETA_MACAH 0x2418 /* MAC Address High */
+#define MVNETA_NDSCP 7
+#define MVNETA_DSCP(n) (0x2420 + ((n) << 2))
+#define MVNETA_VPT2P 0x2440 /* VLAN Priority Tag to Priority */
+#define MVNETA_ETP 0x24bc /* Ethernet Type Priority */
+#define MVNETA_NDFSMT 64
+#define MVNETA_DFSMT(n) (0x3400 + ((n) << 2))
+ /* Destination Address Filter Special Multicast Table */
+#define MVNETA_NDFOMT 64
+#define MVNETA_DFOMT(n) (0x3500 + ((n) << 2))
+ /* Destination Address Filter Other Multicast Table */
+#define MVNETA_NDFUT 4
+#define MVNETA_DFUT(n) (0x3600 + ((n) << 2))
+ /* Destination Address Filter Unicast Table */
+
+/* Rx DMA Miscellaneous Registers */
+#define MVNETA_PMFS 0x247c /* Port Rx Minimal Frame Size */
+#define MVNETA_PDFC 0x2484 /* Port Rx Discard Frame Counter */
+#define MVNETA_POFC 0x2488 /* Port Overrun Frame Counter */
+#define MVNETA_RQC 0x2680 /* Receive Queue Command */
+
+/* Rx DMA Networking Controller Miscellaneous Registers */
+#define MVNETA_PRXC(q) (0x1400 + ((q) << 2)) /*Port RX queues Config*/
+#define MVNETA_PRXSNP(q) (0x1420 + ((q) << 2)) /* Port RX queues Snoop */
+#define MVNETA_PRXDQA(q) (0x1480 + ((q) << 2)) /*P RXqueues desc Q Addr*/
+#define MVNETA_PRXDQS(q) (0x14a0 + ((q) << 2)) /*P RXqueues desc Q Size*/
+#define MVNETA_PRXDQTH(q) (0x14c0 + ((q) << 2)) /*P RXqueues desc Q Thrs*/
+#define MVNETA_PRXS(q) (0x14e0 + ((q) << 2)) /*Port RX queues Status */
+#define MVNETA_PRXSU(q) (0x1500 + ((q) << 2)) /*P RXqueues Stat Update*/
+#define MVNETA_PRXDI(q) (0x1520 + ((q) << 2)) /*P RXqueues Stat Update*/
+#define MVNETA_PRXINIT 0x1cc0 /* Port RX Initialization */
+
+/* Rx DMA Wake on LAN Registers 0x3690 - 0x36b8 */
+
+/* Tx DMA Miscellaneous Registers */
+#define MVNETA_TQC 0x2448 /* Transmit Queue Command */
+#define MVNETA_TQC_1 0x24e4
+#define MVNETA_PXTFTT 0x2478 /* Port Tx FIFO Threshold */
+#define MVNETA_TXBADFCS 0x3cc0 /*Tx Bad FCS Transmitted Pckts Counter*/
+#define MVNETA_TXDROPPED 0x3cc4 /* Tx Dropped Packets Counter */
+
+/* Tx DMA Networking Controller Miscellaneous Registers */
+#define MVNETA_PTXDQA(q) (0x3c00 + ((q) << 2)) /*P TXqueues desc Q Addr*/
+#define MVNETA_PTXDQS(q) (0x3c20 + ((q) << 2)) /*P TXqueues desc Q Size*/
+#define MVNETA_PTXS(q) (0x3c40 + ((q) << 2)) /* Port TX queues Status*/
+#define MVNETA_PTXSU(q) (0x3c60 + ((q) << 2)) /*P TXqueues Stat Update*/
+#define MVNETA_PTXDI(q) (0x3c80 + ((q) << 2)) /* P TXqueues Desc Index*/
+#define MVNETA_TXTBC(q) (0x3ca0 + ((q) << 2)) /* TX Trans-ed Buf Count*/
+#define MVNETA_PTXINIT 0x3cf0 /* Port TX Initialization */
+
+/* Tx DMA Packet Modification Registers */
+#define MVNETA_NMH 15
+#define MVNETA_TXMH(n) (0x3d44 + ((n) << 2))
+#define MVNETA_TXMTU 0x3d88
+
+/* Tx DMA Queue Arbiter Registers (Version 1) */
+#define MVNETA_TQFPC_V1 0x24dc /* Transmit Queue Fixed Priority Cfg */
+#define MVNETA_TQTBC_V1 0x24e0 /* Transmit Queue Token-Bucket Cfg */
+#define MVNETA_MTU_V1 0x24e8 /* MTU */
+#define MVNETA_PMTBS_V1 0x24ec /* Port Max Token-Bucket Size */
+#define MVNETA_TQTBCOUNT_V1(q) (0x2700 + ((q) << 4))
+ /* Transmit Queue Token-Bucket Counter */
+#define MVNETA_TQTBCONFIG_V1(q) (0x2704 + ((q) << 4))
+ /* Transmit Queue Token-Bucket Configuration */
+#define MVNETA_PTTBC_V1 0x2740 /* Port Transmit Backet Counter */
+
+/* Tx DMA Queue Arbiter Registers (Version 3) */
+#define MVNETA_TQC1_V3 0x3e00 /* Transmit Queue Command1 */
+#define MVNETA_TQFPC_V3 0x3e04 /* Transmit Queue Fixed Priority Cfg */
+#define MVNETA_BRC_V3 0x3e08 /* Basic Refill No of Clocks */
+#define MVNETA_MTU_V3 0x3e0c /* MTU */
+#define MVNETA_PREFILL_V3 0x3e10 /* Port Backet Refill */
+#define MVNETA_PMTBS_V3 0x3e14 /* Port Max Token-Bucket Size */
+#define MVNETA_QREFILL_V3(q) (0x3e20 + ((q) << 2))
+ /* Transmit Queue Refill */
+#define MVNETA_QMTBS_V3(q) (0x3e40 + ((q) << 2))
+ /* Transmit Queue Max Token-Bucket Size */
+#define MVNETA_QTTBC_V3(q) (0x3e60 + ((q) << 2))
+ /* Transmit Queue Token-Bucket Counter */
+#define MVNETA_TQAC_V3(q) (0x3e80 + ((q) << 2))
+ /* Transmit Queue Arbiter Cfg */
+#define MVNETA_TQIPG_V3(q) (0x3ea0 + ((q) << 2))
+ /* Transmit Queue IPG(valid q=2..3) */
+#define MVNETA_HITKNINLOPKT_V3 0x3eb0 /* High Token in Low Packet */
+#define MVNETA_HITKNINASYNCPKT_V3 0x3eb4 /* High Token in Async Packet */
+#define MVNETA_LOTKNINASYNCPKT_V3 0x3eb8 /* Low Token in Async Packet */
+#define MVNETA_TS_V3 0x3ebc /* Token Speed */
+
+/* RX_TX DMA Registers */
+#define MVNETA_PXC 0x2400 /* Port Configuration */
+#define MVNETA_PXCX 0x2404 /* Port Configuration Extend */
+#define MVNETA_MH 0x2454 /* Marvell Header */
+
+/* Serial(SMI/MII) Registers */
+#define MVNETA_PSC0 0x243c /* Port Serial Control0 */
+#define MVNETA_PS0 0x2444 /* Ethernet Port Status */
+#define MVNETA_PSERDESCFG 0x24a0 /* Serdes Configuration */
+#define MVNETA_PSERDESSTS 0x24a4 /* Serdes Status */
+#define MVNETA_PSOMSCD 0x24f4 /* One mS Clock Divider */
+#define MVNETA_PSPFCCD 0x24f8 /* Periodic Flow Control Clock Divider*/
+
+/* Gigabit Ethernet MAC Serial Parameters Configuration Registers */
+#define MVNETA_PSPC 0x2c14 /* Port Serial Parameters Config */
+#define MVNETA_PSP1C 0x2c94 /* Port Serial Parameters 1 Config */
+
+/* Gigabit Ethernet Auto-Negotiation Configuration Registers */
+#define MVNETA_PANC 0x2c0c /* Port Auto-Negotiation Configuration*/
+
+/* Gigabit Ethernet MAC Control Registers */
+#define MVNETA_PMACC0 0x2c00 /* Port MAC Control 0 */
+#define MVNETA_PMACC1 0x2c04 /* Port MAC Control 1 */
+#define MVNETA_PMACC2 0x2c08 /* Port MAC Control 2 */
+#define MVNETA_PMACC3 0x2c48 /* Port MAC Control 3 */
+#define MVNETA_CCFCPST(p) (0x2c58 + ((p) << 2)) /*CCFC Port Speed Timerp*/
+#define MVNETA_PMACC4 0x2c90 /* Port MAC Control 4 */
+
+/* Gigabit Ethernet MAC Interrupt Registers */
+#define MVNETA_PIC 0x2c20
+#define MVNETA_PIM 0x2c24
+
+/* Gigabit Ethernet Low Power Idle Registers */
+#define MVNETA_LPIC0 0x2cc0 /* LowPowerIdle control 0 */
+#define MVNETA_LPIC1 0x2cc4 /* LPI control 1 */
+#define MVNETA_LPIC2 0x2cc8 /* LPI control 2 */
+#define MVNETA_LPIS 0x2ccc /* LPI status */
+#define MVNETA_LPIC 0x2cd0 /* LPI counter */
+
+/* Gigabit Ethernet MAC PRBS Check Status Registers */
+#define MVNETA_PPRBSS 0x2c38 /* Port PRBS Status */
+#define MVNETA_PPRBSEC 0x2c3c /* Port PRBS Error Counter */
+
+/* Gigabit Ethernet MAC Status Registers */
+#define MVNETA_PSR 0x2c10 /* Port Status Register0 */
+
+/* Networking Controller Interrupt Registers */
+#define MVNETA_PCP2Q(cpu) (0x2540 + ((cpu) << 2))
+#define MVNETA_PRXITTH(q) (0x2580 + ((q) << 2))
+ /* Port Rx Interrupt Threshold */
+#define MVNETA_PRXTXTIC 0x25a0 /*Port RX_TX Threshold Interrupt Cause*/
+#define MVNETA_PRXTXTIM 0x25a4 /*Port RX_TX Threshold Interrupt Mask */
+#define MVNETA_PRXTXIC 0x25a8 /* Port RX_TX Interrupt Cause */
+#define MVNETA_PRXTXIM 0x25ac /* Port RX_TX Interrupt Mask */
+#define MVNETA_PMIC 0x25b0 /* Port Misc Interrupt Cause */
+#define MVNETA_PMIM 0x25b4 /* Port Misc Interrupt Mask */
+#define MVNETA_PIE 0x25b8 /* Port Interrupt Enable */
+
+#define MVNETA_PSNPCFG 0x25e4 /* Port Snoop Config */
+#define MVNETA_PSNPCFG_DESCSNP_MASK (0x3 << 4)
+#define MVNETA_PSNPCFG_BUFSNP_MASK (0x3 << 8)
+
+/* Miscellaneous Interrupt Registers */
+#define MVNETA_PEUIAE 0x2494 /* Port Internal Address Error */
+
+/* SGMII PHY Registers */
+#define MVNETA_PPLLC 0x2e04 /* Power and PLL Control */
+#define MVNETA_TESTC0 0x2e54 /* PHY Test Control 0 */
+#define MVNETA_TESTPRBSEC0 0x2e7c /* PHY Test PRBS Error Counter 0 */
+#define MVNETA_TESTPRBSEC1 0x2e80 /* PHY Test PRBS Error Counter 1 */
+#define MVNETA_TESTOOB0 0x2e84 /* PHY Test OOB 0 */
+#define MVNETA_DLE 0x2e8c /* Digital Loopback Enable */
+#define MVNETA_RCS 0x2f18 /* Reference Clock Select */
+#define MVNETA_COMPHYC 0x2f18 /* COMPHY Control */
+
+/*
+ * Ethernet MAC MIB Registers
+ * GbE0 BASE 0x00007.3000
+ * GbE1 BASE 0x00007.7000
+ */
+/* MAC MIB Counters 0x3000 - 0x307c */
+#define MVNETA_PORTMIB_BASE 0x3000
+#define MVNETA_PORTMIB_SIZE 0x0080
+#define MVNETA_PORTMIB_NOCOUNTER 30
+
+/* Rx */
+#define MVNETA_MIB_RX_GOOD_OCT 0x00 /* 64bit */
+#define MVNETA_MIB_RX_BAD_OCT 0x08
+#define MVNETA_MIB_RX_GOOD_FRAME 0x10
+#define MVNETA_MIB_RX_BAD_FRAME 0x14
+#define MVNETA_MIB_RX_BCAST_FRAME 0x18
+#define MVNETA_MIB_RX_MCAST_FRAME 0x1c
+#define MVNETA_MIB_RX_FRAME64_OCT 0x20
+#define MVNETA_MIB_RX_FRAME127_OCT 0x24
+#define MVNETA_MIB_RX_FRAME255_OCT 0x28
+#define MVNETA_MIB_RX_FRAME511_OCT 0x2c
+#define MVNETA_MIB_RX_FRAME1023_OCT 0x30
+#define MVNETA_MIB_RX_FRAMEMAX_OCT 0x34
+
+/* Tx */
+#define MVNETA_MIB_TX_MAC_TRNS_ERR 0x0c
+#define MVNETA_MIB_TX_GOOD_OCT 0x38 /* 64bit */
+#define MVNETA_MIB_TX_GOOD_FRAME 0x40
+#define MVNETA_MIB_TX_EXCES_COL 0x44
+#define MVNETA_MIB_TX_MCAST_FRAME 0x48
+#define MVNETA_MIB_TX_BCAST_FRAME 0x4c
+#define MVNETA_MIB_TX_MAC_CTL_ERR 0x50
+
+/* Flow Control */
+#define MVNETA_MIB_FC_SENT 0x54
+#define MVNETA_MIB_FC_GOOD 0x58
+#define MVNETA_MIB_FC_BAD 0x5c
+
+/* Packet Processing */
+#define MVNETA_MIB_PKT_UNDERSIZE 0x60
+#define MVNETA_MIB_PKT_FRAGMENT 0x64
+#define MVNETA_MIB_PKT_OVERSIZE 0x68
+#define MVNETA_MIB_PKT_JABBER 0x6c
+
+/* MAC Layer Errors */
+#define MVNETA_MIB_MAC_RX_ERR 0x70
+#define MVNETA_MIB_MAC_CRC_ERR 0x74
+#define MVNETA_MIB_MAC_COL 0x78
+#define MVNETA_MIB_MAC_LATE_COL 0x7c
+
+/* END OF REGISTER NUMBERS */
+
+/*
+ *
+ * Register Formats
+ *
+ */
+/*
+ * Address Decoder Registers
+ */
+/* Base Address (MVNETA_BASEADDR) */
+#define MVNETA_BASEADDR_TARGET(target) ((target) & 0xf)
+#define MVNETA_BASEADDR_ATTR(attr) (((attr) & 0xff) << 8)
+#define MVNETA_BASEADDR_BASE(base) ((base) & 0xffff0000)
+
+/* Size (MVNETA_S) */
+#define MVNETA_S_SIZE(size) (((size) - 1) & 0xffff0000)
+
+/* Base Address Enable (MVNETA_BARE) */
+#define MVNETA_BARE_EN_MASK ((1 << MVNETA_NWINDOW) - 1)
+#define MVNETA_BARE_EN(win) ((1 << (win)) & MVNETA_BARE_EN_MASK)
+
+/* Ethernet Port Access Protect (MVNETA_EPAP) */
+#define MVNETA_EPAP_AC_NAC 0x0 /* No access allowed */
+#define MVNETA_EPAP_AC_RO 0x1 /* Read Only */
+#define MVNETA_EPAP_AC_FA 0x3 /* Full access (r/w) */
+#define MVNETA_EPAP_EPAR(win, ac) ((ac) << ((win) * 2))
+
+/*
+ * Global Miscellaneous Registers
+ */
+/* PHY Address (MVNETA_PHYADDR) */
+#define MVNETA_PHYADDR_PHYAD(phy) ((phy) & 0x1f)
+#define MVNETA_PHYADDR_GET_PHYAD(reg) ((reg) & 0x1f)
+
+/* SMI register fields (MVNETA_SMI) */
+#define MVNETA_SMI_DATA_MASK 0x0000ffff
+#define MVNETA_SMI_PHYAD(phy) (((phy) & 0x1f) << 16)
+#define MVNETA_SMI_REGAD(reg) (((reg) & 0x1f) << 21)
+#define MVNETA_SMI_OPCODE_WRITE (0 << 26)
+#define MVNETA_SMI_OPCODE_READ (1 << 26)
+#define MVNETA_SMI_READVALID (1 << 27)
+#define MVNETA_SMI_BUSY (1 << 28)
+
+/* Ethernet Unit Default ID (MVNETA_EUDID) */
+#define MVNETA_EUDID_DIDR_MASK 0x0000000f
+#define MVNETA_EUDID_DIDR(id) ((id) & 0x0f)
+#define MVNETA_EUDID_DATTR_MASK 0x00000ff0
+#define MVNETA_EUDID_DATTR(attr) (((attr) & 0xff) << 4)
+
+/* Ethernet Unit Interrupt Cause (MVNETA_EUIC) */
+#define MVNETA_EUIC_ETHERINTSUM (1 << 0)
+#define MVNETA_EUIC_PARITY (1 << 1)
+#define MVNETA_EUIC_ADDRVIOL (1 << 2)
+#define MVNETA_EUIC_ADDRVNOMATCH (1 << 3)
+#define MVNETA_EUIC_SMIDONE (1 << 4)
+#define MVNETA_EUIC_COUNTWA (1 << 5)
+#define MVNETA_EUIC_INTADDRERR (1 << 7)
+#define MVNETA_EUIC_PORT0DPERR (1 << 9)
+#define MVNETA_EUIC_TOPDPERR (1 << 12)
+
+/* Ethernet Unit Internal Addr Error (MVNETA_EUIAE) */
+#define MVNETA_EUIAE_INTADDR_MASK 0x000001ff
+#define MVNETA_EUIAE_INTADDR(addr) ((addr) & 0x1ff)
+#define MVNETA_EUIAE_GET_INTADDR(addr) ((addr) & 0x1ff)
+
+/* Ethernet Unit Control (MVNETA_EUC) */
+#define MVNETA_EUC_POLLING (1 << 1)
+#define MVNETA_EUC_PORTRESET (1 << 24)
+#define MVNETA_EUC_RAMSINITIALIZATIONCOMPLETED (1 << 25)
+
+/*
+ * Miscellaneous Registers
+ */
+/* SDMA Configuration (MVNETA_SDC) */
+#define MVNETA_SDC_RXBSZ(x) ((x) << 1)
+#define MVNETA_SDC_RXBSZ_MASK MVNETA_SDC_RXBSZ(7)
+#define MVNETA_SDC_RXBSZ_1_64BITWORDS MVNETA_SDC_RXBSZ(0)
+#define MVNETA_SDC_RXBSZ_2_64BITWORDS MVNETA_SDC_RXBSZ(1)
+#define MVNETA_SDC_RXBSZ_4_64BITWORDS MVNETA_SDC_RXBSZ(2)
+#define MVNETA_SDC_RXBSZ_8_64BITWORDS MVNETA_SDC_RXBSZ(3)
+#define MVNETA_SDC_RXBSZ_16_64BITWORDS MVNETA_SDC_RXBSZ(4)
+#define MVNETA_SDC_BLMR (1 << 4)
+#define MVNETA_SDC_BLMT (1 << 5)
+#define MVNETA_SDC_SWAPMODE (1 << 6)
+#define MVNETA_SDC_TXBSZ(x) ((x) << 22)
+#define MVNETA_SDC_TXBSZ_MASK MVNETA_SDC_TXBSZ(7)
+#define MVNETA_SDC_TXBSZ_1_64BITWORDS MVNETA_SDC_TXBSZ(0)
+#define MVNETA_SDC_TXBSZ_2_64BITWORDS MVNETA_SDC_TXBSZ(1)
+#define MVNETA_SDC_TXBSZ_4_64BITWORDS MVNETA_SDC_TXBSZ(2)
+#define MVNETA_SDC_TXBSZ_8_64BITWORDS MVNETA_SDC_TXBSZ(3)
+#define MVNETA_SDC_TXBSZ_16_64BITWORDS MVNETA_SDC_TXBSZ(4)
+
+/*
+ * Networking Controller Miscellaneous Registers
+ */
+/* Port Acceleration Mode (MVNETA_PACC) */
+#define MVNETA_PACC_ACCELERATIONMODE_MASK 0x7
+#define MVNETA_PACC_ACCELERATIONMODE_EDM 0x1 /* Enhanced Desc Mode */
+
+/* Port Version (MVNETA_PV) */
+#define MVNETA_PV_VERSION_MASK 0xff
+#define MVNETA_PV_VERSION(v) ((v) & 0xff)
+#define MVNETA_PV_GET_VERSION(reg) ((reg) & 0xff)
+
+/*
+ * Rx DMA Hardware Parser Registers
+ */
+/* Ether Type Priority (MVNETA_ETP) */
+#define MVNETA_ETP_ETHERTYPEPRIEN (1 << 0) /* EtherType Prio Ena */
+#define MVNETA_ETP_ETHERTYPEPRIFRSTEN (1 << 1)
+#define MVNETA_ETP_ETHERTYPEPRIQ (0x7 << 2) /*EtherType Prio Queue*/
+#define MVNETA_ETP_ETHERTYPEPRIVAL (0xffff << 5) /*EtherType Prio Value*/
+#define MVNETA_ETP_FORCEUNICSTHIT (1 << 21) /* Force Unicast hit */
+
+/* Destination Address Filter Registers (MVNETA_DF{SM,OM,U}T) */
+#define MVNETA_DF(n, x) ((x) << (8 * (n)))
+#define MVNETA_DF_PASS (1 << 0)
+#define MVNETA_DF_QUEUE(q) ((q) << 1)
+#define MVNETA_DF_QUEUE_ALL ((MVNETA_RX_QNUM_MAX-1) << 1)
+#define MVNETA_DF_QUEUE_MASK ((MVNETA_RX_QNUM_MAX-1) << 1)
+
+/*
+ * Rx DMA Miscellaneous Registers
+ */
+/* Port Rx Minimal Frame Size (MVNETA_PMFS) */
+#define MVNETA_PMFS_RXMFS(rxmfs) (((rxmfs) - 40) & 0x7c)
+
+/* Receive Queue Command (MVNETA_RQC) */
+#define MVNETA_RQC_EN_MASK (0xff << 0) /* Enable Q */
+#define MVNETA_RQC_ENQ(q) (1 << (0 + (q)))
+#define MVNETA_RQC_EN(n) ((n) << 0)
+#define MVNETA_RQC_DIS_MASK (0xff << 8) /* Disable Q */
+#define MVNETA_RQC_DISQ(q) (1 << (8 + (q)))
+#define MVNETA_RQC_DIS(n) ((n) << 8)
+
+/*
+ * Rx DMA Networking Controller Miscellaneous Registers
+ */
+/* Port RX queues Configuration (MVNETA_PRXC) */
+#define MVNETA_PRXC_PACKETOFFSET(o) (((o) & 0xf) << 8)
+
+/* Port RX queues Snoop (MVNETA_PRXSNP) */
+#define MVNETA_PRXSNP_SNOOPNOOFBYTES(b) (((b) & 0x3fff) << 0)
+#define MVNETA_PRXSNP_L2DEPOSITNOOFBYTES(b) (((b) & 0x3fff) << 16)
+
+/* Port RX queues Descriptors Queue Size (MVNETA_PRXDQS) */
+#define MVNETA_PRXDQS_DESCRIPTORSQUEUESIZE(s) (((s) & 0x3fff) << 0)
+#define MVNETA_PRXDQS_BUFFERSIZE(s) (((s) & 0x1fff) << 19)
+
+/* Port RX queues Descriptors Queue Threshold (MVNETA_PRXDQTH) */
+ /* Occupied Descriptors Threshold */
+#define MVNETA_PRXDQTH_ODT(x) (((x) & 0x3fff) << 0)
+ /* Non Occupied Descriptors Threshold */
+#define MVNETA_PRXDQTH_NODT(x) (((x) & 0x3fff) << 16)
+
+/* Port RX queues Status (MVNETA_PRXS) */
+ /* Occupied Descriptors Counter */
+#define MVNETA_PRXS_ODC(x) (((x) & 0x3fff) << 0)
+ /* Non Occupied Descriptors Counter */
+#define MVNETA_PRXS_NODC(x) (((x) & 0x3fff) << 16)
+#define MVNETA_PRXS_GET_ODC(reg) (((reg) >> 0) & 0x3fff)
+#define MVNETA_PRXS_GET_NODC(reg) (((reg) >> 16) & 0x3fff)
+
+/* Port RX queues Status Update (MVNETA_PRXSU) */
+#define MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(x) (((x) & 0xff) << 0)
+#define MVNETA_PRXSU_NOOFNEWDESCRIPTORS(x) (((x) & 0xff) << 16)
+
+/* Port RX Initialization (MVNETA_PRXINIT) */
+#define MVNETA_PRXINIT_RXDMAINIT (1 << 0)
+
+/*
+ * Tx DMA Miscellaneous Registers
+ */
+/* Transmit Queue Command (MVNETA_TQC) */
+#define MVNETA_TQC_EN_MASK (0xff << 0)
+#define MVNETA_TQC_ENQ(q) (1 << ((q) + 0))/* Enable Q */
+#define MVNETA_TQC_EN(n) ((n) << 0)
+#define MVNETA_TQC_DIS_MASK (0xff << 8)
+#define MVNETA_TQC_DISQ(q) (1 << ((q) + 8))/* Disable Q */
+#define MVNETA_TQC_DIS(n) ((n) << 8)
+
+/*
+ * Tx DMA Networking Controller Miscellaneous Registers
+ */
+/* Port TX queues Descriptors Queue Size (MVNETA_PTXDQS) */
+ /* Descriptors Queue Size */
+#define MVNETA_PTXDQS_DQS_MASK (0x3fff << 0)
+#define MVNETA_PTXDQS_DQS(x) (((x) & 0x3fff) << 0)
+ /* Transmitted Buffer Threshold */
+#define MVNETA_PTXDQS_TBT_MASK (0x3fff << 16)
+#define MVNETA_PTXDQS_TBT(x) (((x) & 0x3fff) << 16)
+
+/* Port TX queues Status (MVNETA_PTXS) */
+ /* Transmitted Buffer Counter */
+#define MVNETA_PTXS_TBC(x) (((x) & 0x3fff) << 16)
+
+#define MVNETA_PTXS_GET_TBC(reg) (((reg) >> 16) & 0x3fff)
+ /* Pending Descriptors Counter */
+#define MVNETA_PTXS_PDC(x) ((x) & 0x3fff)
+#define MVNETA_PTXS_GET_PDC(x) ((x) & 0x3fff)
+
+/* Port TX queues Status Update (MVNETA_PTXSU) */
+ /* Number Of Written Descriptors */
+#define MVNETA_PTXSU_NOWD(x) (((x) & 0xff) << 0)
+ /* Number Of Released Buffers */
+#define MVNETA_PTXSU_NORB(x) (((x) & 0xff) << 16)
+
+/* TX Transmitted Buffers Counter (MVNETA_TXTBC) */
+ /* Transmitted Buffers Counter */
+#define MVNETA_TXTBC_TBC(x) (((x) & 0x3fff) << 16)
+
+/* Port TX Initialization (MVNETA_PTXINIT) */
+#define MVNETA_PTXINIT_TXDMAINIT (1 << 0)
+
+/*
+ * Tx DMA Queue Arbiter Registers (Version 1 )
+ */
+/* Transmit Queue Fixed Priority Configuration */
+#define MVNETA_TQFPC_EN(q) (1 << (q))
+
+/*
+ * RX_TX DMA Registers
+ */
+/* Port Configuration (MVNETA_PXC) */
+#define MVNETA_PXC_UPM (1 << 0) /* Uni Promisc mode */
+#define MVNETA_PXC_RXQ(q) ((q) << 1)
+#define MVNETA_PXC_RXQ_MASK MVNETA_PXC_RXQ(7)
+#define MVNETA_PXC_RXQARP(q) ((q) << 4)
+#define MVNETA_PXC_RXQARP_MASK MVNETA_PXC_RXQARP(7)
+#define MVNETA_PXC_RB (1 << 7) /* Rej mode of MAC */
+#define MVNETA_PXC_RBIP (1 << 8)
+#define MVNETA_PXC_RBARP (1 << 9)
+#define MVNETA_PXC_AMNOTXES (1 << 12)
+#define MVNETA_PXC_RBARPF (1 << 13)
+#define MVNETA_PXC_TCPCAPEN (1 << 14)
+#define MVNETA_PXC_UDPCAPEN (1 << 15)
+#define MVNETA_PXC_TCPQ(q) ((q) << 16)
+#define MVNETA_PXC_TCPQ_MASK MVNETA_PXC_TCPQ(7)
+#define MVNETA_PXC_UDPQ(q) ((q) << 19)
+#define MVNETA_PXC_UDPQ_MASK MVNETA_PXC_UDPQ(7)
+#define MVNETA_PXC_BPDUQ(q) ((q) << 22)
+#define MVNETA_PXC_BPDUQ_MASK MVNETA_PXC_BPDUQ(7)
+#define MVNETA_PXC_RXCS (1 << 25)
+
+/* Port Configuration Extend (MVNETA_PXCX) */
+#define MVNETA_PXCX_SPAN (1 << 1)
+#define MVNETA_PXCX_TXCRCDIS (1 << 3)
+
+/* Marvell Header (MVNETA_MH) */
+#define MVNETA_MH_MHEN (1 << 0)
+#define MVNETA_MH_DAPREFIX (0x3 << 1)
+#define MVNETA_MH_SPID (0xf << 4)
+#define MVNETA_MH_MHMASK (0x3 << 8)
+#define MVNETA_MH_MHMASK_8QUEUES (0x0 << 8)
+#define MVNETA_MH_MHMASK_4QUEUES (0x1 << 8)
+#define MVNETA_MH_MHMASK_2QUEUES (0x3 << 8)
+#define MVNETA_MH_DSAEN_MASK (0x3 << 10)
+#define MVNETA_MH_DSAEN_DISABLE (0x0 << 10)
+#define MVNETA_MH_DSAEN_NONEXTENDED (0x1 << 10)
+#define MVNETA_MH_DSAEN_EXTENDED (0x2 << 10)
+
+/*
+ * Serial(SMI/MII) Registers
+ */
+#define MVNETA_PSOMSCD_ENABLE (1UL<<31)
+#define MVNETA_PSERDESCFG_QSGMII (0x0667)
+#define MVNETA_PSERDESCFG_SGMII (0x0cc7)
+/* Port Seiral Control0 (MVNETA_PSC0) */
+#define MVNETA_PSC0_FORCE_FC_MASK (0x3 << 5)
+#define MVNETA_PSC0_FORCE_FC(fc) (((fc) & 0x3) << 5)
+#define MVNETA_PSC0_FORCE_FC_PAUSE MVNETA_PSC0_FORCE_FC(0x1)
+#define MVNETA_PSC0_FORCE_FC_NO_PAUSE MVNETA_PSC0_FORCE_FC(0x0)
+#define MVNETA_PSC0_FORCE_BP_MASK (0x3 << 7)
+#define MVNETA_PSC0_FORCE_BP(fc) (((fc) & 0x3) << 5)
+#define MVNETA_PSC0_FORCE_BP_JAM MVNETA_PSC0_FORCE_BP(0x1)
+#define MVNETA_PSC0_FORCE_BP_NO_JAM MVNETA_PSC0_FORCE_BP(0x0)
+#define MVNETA_PSC0_DTE_ADV (1 << 14)
+#define MVNETA_PSC0_IGN_RXERR (1 << 28)
+#define MVNETA_PSC0_IGN_COLLISION (1 << 29)
+#define MVNETA_PSC0_IGN_CARRIER (1 << 30)
+
+/* Ethernet Port Status0 (MVNETA_PS0) */
+#define MVNETA_PS0_TXINPROG (1 << 0)
+#define MVNETA_PS0_TXFIFOEMP (1 << 8)
+#define MVNETA_PS0_RXFIFOEMPTY (1 << 16)
+
+/*
+ * Gigabit Ethernet MAC Serial Parameters Configuration Registers
+ */
+#define MVNETA_PSPC_MUST_SET (1 << 3 | 1 << 4 | 1 << 5 | 0x23 << 6)
+#define MVNETA_PSP1C_MUST_SET (1 << 0 | 1 << 1 | 1 << 2)
+
+/*
+ * Gigabit Ethernet Auto-Negotiation Configuration Registers
+ */
+/* Port Auto-Negotiation Configuration (MVNETA_PANC) */
+#define MVNETA_PANC_FORCELINKFAIL (1 << 0)
+#define MVNETA_PANC_FORCELINKPASS (1 << 1)
+#define MVNETA_PANC_INBANDANEN (1 << 2)
+#define MVNETA_PANC_INBANDANBYPASSEN (1 << 3)
+#define MVNETA_PANC_INBANDRESTARTAN (1 << 4)
+#define MVNETA_PANC_SETMIISPEED (1 << 5)
+#define MVNETA_PANC_SETGMIISPEED (1 << 6)
+#define MVNETA_PANC_ANSPEEDEN (1 << 7)
+#define MVNETA_PANC_SETFCEN (1 << 8)
+#define MVNETA_PANC_PAUSEADV (1 << 9)
+#define MVNETA_PANC_ANFCEN (1 << 11)
+#define MVNETA_PANC_SETFULLDX (1 << 12)
+#define MVNETA_PANC_ANDUPLEXEN (1 << 13)
+#define MVNETA_PANC_MUSTSET (1 << 15)
+
+/*
+ * Gigabit Ethernet MAC Control Registers
+ */
+/* Port MAC Control 0 (MVNETA_PMACC0) */
+#define MVNETA_PMACC0_PORTEN (1 << 0)
+#define MVNETA_PMACC0_PORTTYPE (1 << 1)
+#define MVNETA_PMACC0_FRAMESIZELIMIT(x) ((((x) >> 1) << 2) & 0x7ffc)
+#define MVNETA_PMACC0_FRAMESIZELIMIT_MASK (0x7ffc)
+#define MVNETA_PMACC0_MUSTSET (1 << 15)
+
+/* Port MAC Control 1 (MVNETA_PMACC1) */
+#define MVNETA_PMACC1_PCSLB (1 << 6)
+
+/* Port MAC Control 2 (MVNETA_PMACC2) */
+#define MVNETA_PMACC2_INBANDANMODE (1 << 0)
+#define MVNETA_PMACC2_PCSEN (1 << 3)
+#define MVNETA_PMACC2_PCSEN (1 << 3)
+#define MVNETA_PMACC2_RGMIIEN (1 << 4)
+#define MVNETA_PMACC2_PADDINGDIS (1 << 5)
+#define MVNETA_PMACC2_PORTMACRESET (1 << 6)
+#define MVNETA_PMACC2_PRBSCHECKEN (1 << 10)
+#define MVNETA_PMACC2_PRBSGENEN (1 << 11)
+#define MVNETA_PMACC2_SDTT_MASK (3 << 12) /* Select Data To Transmit */
+#define MVNETA_PMACC2_SDTT_RM (0 << 12) /* Regular Mode */
+#define MVNETA_PMACC2_SDTT_PRBS (1 << 12) /* PRBS Mode */
+#define MVNETA_PMACC2_SDTT_ZC (2 << 12) /* Zero Constant */
+#define MVNETA_PMACC2_SDTT_OC (3 << 12) /* One Constant */
+#define MVNETA_PMACC2_MUSTSET (3 << 14)
+
+/* Port MAC Control 3 (MVNETA_PMACC3) */
+#define MVNETA_PMACC3_IPG_MASK 0x7f80
+
+/*
+ * Gigabit Ethernet MAC Interrupt Registers
+ */
+/* Port Interrupt Cause/Mask (MVNETA_PIC/MVNETA_PIM) */
+#define MVNETA_PI_INTSUM (1 << 0)
+#define MVNETA_PI_LSC (1 << 1) /* LinkStatus Change */
+#define MVNETA_PI_ACOP (1 << 2) /* AnCompleted OnPort */
+#define MVNETA_PI_AOOR (1 << 5) /* AddressOut Of Range */
+#define MVNETA_PI_SSC (1 << 6) /* SyncStatus Change */
+#define MVNETA_PI_PRBSEOP (1 << 7) /* QSGMII PRBS error */
+#define MVNETA_PI_MIBCWA (1 << 15) /* MIB counter wrap around */
+#define MVNETA_PI_QSGMIIPRBSE (1 << 10) /* QSGMII PRBS error */
+#define MVNETA_PI_PCSRXPRLPI (1 << 11) /* PCS Rx path received LPI*/
+#define MVNETA_PI_PCSTXPRLPI (1 << 12) /* PCS Tx path received LPI*/
+#define MVNETA_PI_MACRXPRLPI (1 << 13) /* MAC Rx path received LPI*/
+#define MVNETA_PI_MIBCCD (1 << 14) /* MIB counters copy done */
+
+/*
+ * Gigabit Ethernet MAC Low Power Idle Registers
+ */
+/* LPI Control 0 (MVNETA_LPIC0) */
+#define MVNETA_LPIC0_LILIMIT(x) (((x) & 0xff) << 0)
+#define MVNETA_LPIC0_TSLIMIT(x) (((x) & 0xff) << 8)
+
+/* LPI Control 1 (MVNETA_LPIC1) */
+#define MVNETA_LPIC1_LPIRE (1 << 0) /* LPI request enable */
+#define MVNETA_LPIC1_LPIRF (1 << 1) /* LPI request force */
+#define MVNETA_LPIC1_LPIMM (1 << 2) /* LPI manual mode */
+#define MVNETA_LPIC1_TWLIMIT(x) (((x) & 0xfff) << 4)
+
+/* LPI Control 2 (MVNETA_LPIC2) */
+#define MVNETA_LPIC2_MUSTSET 0x17d
+
+/* LPI Status (MVNETA_LPIS) */
+#define MVNETA_LPIS_PCSRXPLPIS (1 << 0) /* PCS Rx path LPI status */
+#define MVNETA_LPIS_PCSTXPLPIS (1 << 1) /* PCS Tx path LPI status */
+#define MVNETA_LPIS_MACRXPLPIS (1 << 2)/* MAC Rx path LP idle status */
+#define MVNETA_LPIS_MACTXPLPWS (1 << 3)/* MAC Tx path LP wait status */
+#define MVNETA_LPIS_MACTXPLPIS (1 << 4)/* MAC Tx path LP idle status */
+
+/*
+ * Gigabit Ethernet MAC PRBS Check Status Registers
+ */
+/* Port PRBS Status (MVNETA_PPRBSS) */
+#define MVNETA_PPRBSS_PRBSCHECKLOCKED (1 << 0)
+#define MVNETA_PPRBSS_PRBSCHECKRDY (1 << 1)
+
+/*
+ * Gigabit Ethernet MAC Status Registers
+ */
+/* Port Status Register (MVNETA_PSR) */
+#define MVNETA_PSR_LINKUP (1 << 0)
+#define MVNETA_PSR_GMIISPEED (1 << 1)
+#define MVNETA_PSR_MIISPEED (1 << 2)
+#define MVNETA_PSR_FULLDX (1 << 3)
+#define MVNETA_PSR_RXFCEN (1 << 4)
+#define MVNETA_PSR_TXFCEN (1 << 5)
+#define MVNETA_PSR_PRP (1 << 6) /* Port Rx Pause */
+#define MVNETA_PSR_PTP (1 << 7) /* Port Tx Pause */
+#define MVNETA_PSR_PDP (1 << 8) /*Port is Doing Back-Pressure*/
+#define MVNETA_PSR_SYNCFAIL10MS (1 << 10)
+#define MVNETA_PSR_ANDONE (1 << 11)
+#define MVNETA_PSR_IBANBA (1 << 12) /* InBand AutoNeg BypassAct */
+#define MVNETA_PSR_SYNCOK (1 << 14)
+
+/*
+ * Networking Controller Interrupt Registers
+ */
+/* Port CPU to Queue */
+#define MVNETA_MAXCPU 2
+#define MVNETA_PCP2Q_TXQEN(q) (1 << ((q) + 8))
+#define MVNETA_PCP2Q_TXQEN_MASK (0xff << 8)
+#define MVNETA_PCP2Q_RXQEN(q) (1 << ((q) + 0))
+#define MVNETA_PCP2Q_RXQEN_MASK (0xff << 0)
+
+/* Port RX_TX Interrupt Threshold */
+#define MVNETA_PRXITTH_RITT(t) ((t) & 0xffffff)
+
+/* Port RX_TX Threshold Interrupt Cause/Mask (MVNETA_PRXTXTIC/MVNETA_PRXTXTIM) */
+#define MVNETA_PRXTXTI_TBTCQ(q) (1 << ((q) + 0))
+#define MVNETA_PRXTXTI_TBTCQ_MASK (0xff << 0)
+#define MVNETA_PRXTXTI_GET_TBTCQ(reg) (((reg) >> 0) & 0xff)
+ /* Tx Buffer Threshold Cross Queue*/
+#define MVNETA_PRXTXTI_RBICTAPQ(q) (1 << ((q) + 8))
+#define MVNETA_PRXTXTI_RBICTAPQ_MASK (0xff << 8)
+#define MVNETA_PRXTXTI_GET_RBICTAPQ(reg) (((reg) >> 8) & 0xff)
+ /* Rx Buffer Int. Coaleasing Th. Pri. Alrt Q */
+#define MVNETA_PRXTXTI_RDTAQ(q) (1 << ((q) + 16))
+#define MVNETA_PRXTXTI_RDTAQ_MASK (0xff << 16)
+#define MVNETA_PRXTXTI_GET_RDTAQ(reg) (((reg) >> 16) & 0xff)
+ /* Rx Descriptor Threshold Alert Queue*/
+#define MVNETA_PRXTXTI_PRXTXICSUMMARY (1 << 29) /* PRXTXI summary */
+#define MVNETA_PRXTXTI_PTXERRORSUMMARY (1 << 30) /* PTEXERROR summary */
+#define MVNETA_PRXTXTI_PMISCICSUMMARY (1UL << 31) /* PMISCIC summary */
+
+/* Port RX_TX Interrupt Cause/Mask (MVNETA_PRXTXIC/MVNETA_PRXTXIM) */
+#define MVNETA_PRXTXI_TBRQ(q) (1 << ((q) + 0))
+#define MVNETA_PRXTXI_TBRQ_MASK (0xff << 0)
+#define MVNETA_PRXTXI_GET_TBRQ(reg) (((reg) >> 0) & 0xff)
+#define MVNETA_PRXTXI_RPQ(q) (1 << ((q) + 8))
+#define MVNETA_PRXTXI_RPQ_MASK (0xff << 8)
+#define MVNETA_PRXTXI_GET_RPQ(reg) (((reg) >> 8) & 0xff)
+#define MVNETA_PRXTXI_RREQ(q) (1 << ((q) + 16))
+#define MVNETA_PRXTXI_RREQ_MASK (0xff << 16)
+#define MVNETA_PRXTXI_GET_RREQ(reg) (((reg) >> 16) & 0xff)
+#define MVNETA_PRXTXI_PRXTXTHICSUMMARY (1 << 29)
+#define MVNETA_PRXTXI_PTXERRORSUMMARY (1 << 30)
+#define MVNETA_PRXTXI_PMISCICSUMMARY (1UL << 31)
+
+/* Port Misc Interrupt Cause/Mask (MVNETA_PMIC/MVNETA_PMIM) */
+#define MVNETA_PMI_PHYSTATUSCHNG (1 << 0)
+#define MVNETA_PMI_LINKCHANGE (1 << 1)
+#define MVNETA_PMI_IAE (1 << 7) /* Internal Address Error */
+#define MVNETA_PMI_RXOVERRUN (1 << 8)
+#define MVNETA_PMI_RXCRCERROR (1 << 9)
+#define MVNETA_PMI_RXLARGEPACKET (1 << 10)
+#define MVNETA_PMI_TXUNDRN (1 << 11)
+#define MVNETA_PMI_PRBSERROR (1 << 12)
+#define MVNETA_PMI_PSCSYNCCHANGE (1 << 13)
+#define MVNETA_PMI_SRSE (1 << 14) /* SerdesRealignSyncError */
+#define MVNETA_PMI_TREQ(q) (1 << ((q) + 24)) /* TxResourceErrorQ */
+#define MVNETA_PMI_TREQ_MASK (0xff << 24) /* TxResourceErrorQ */
+
+/* Port Interrupt Enable (MVNETA_PIE) */
+#define MVNETA_PIE_RXPKTINTRPTENB(q) (1 << ((q) + 0))
+#define MVNETA_PIE_TXPKTINTRPTENB(q) (1 << ((q) + 8))
+#define MVNETA_PIE_RXPKTINTRPTENB_MASK (0xff << 0)
+#define MVNETA_PIE_TXPKTINTRPTENB_MASK (0xff << 8)
+
+/*
+ * Miscellaneous Interrupt Registers
+ */
+#define MVNETA_PEUIAE_ADDR_MASK (0x3fff)
+#define MVNETA_PEUIAE_ADDR(addr) ((addr) & 0x3fff)
+#define MVNETA_PEUIAE_GET_ADDR(reg) ((reg) & 0x3fff)
+
+/*
+ * SGMII PHY Registers
+ */
+/* Power and PLL Control (MVNETA_PPLLC) */
+#define MVNETA_PPLLC_REF_FREF_SEL_MASK (0xf << 0)
+#define MVNETA_PPLLC_PHY_MODE_MASK (7 << 5)
+#define MVNETA_PPLLC_PHY_MODE_SATA (0 << 5)
+#define MVNETA_PPLLC_PHY_MODE_SAS (1 << 5)
+#define MVNETA_PPLLC_PLL_LOCK (1 << 8)
+#define MVNETA_PPLLC_PU_DFE (1 << 10)
+#define MVNETA_PPLLC_PU_TX_INTP (1 << 11)
+#define MVNETA_PPLLC_PU_TX (1 << 12)
+#define MVNETA_PPLLC_PU_RX (1 << 13)
+#define MVNETA_PPLLC_PU_PLL (1 << 14)
+
+/* Digital Loopback Enable (MVNETA_DLE) */
+#define MVNETA_DLE_LOCAL_SEL_BITS_MASK (3 << 10)
+#define MVNETA_DLE_LOCAL_SEL_BITS_10BITS (0 << 10)
+#define MVNETA_DLE_LOCAL_SEL_BITS_20BITS (1 << 10)
+#define MVNETA_DLE_LOCAL_SEL_BITS_40BITS (2 << 10)
+#define MVNETA_DLE_LOCAL_RXPHER_TO_TX_EN (1 << 12)
+#define MVNETA_DLE_LOCAL_ANA_TX2RX_LPBK_EN (1 << 13)
+#define MVNETA_DLE_LOCAL_DIG_TX2RX_LPBK_EN (1 << 14)
+#define MVNETA_DLE_LOCAL_DIG_RX2TX_LPBK_EN (1 << 15)
+
+/* Reference Clock Select (MVNETA_RCS) */
+#define MVNETA_RCS_REFCLK_SEL (1 << 10)
+
+/*
+ * DMA descriptors
+ */
+struct mvneta_tx_desc {
+ /* LITTLE_ENDIAN */
+ uint32_t command; /* off 0x00: commands */
+ uint16_t l4ichk; /* initial checksum */
+ uint16_t bytecnt; /* 0ff 0x04: buffer byte count */
+ uint32_t bufptr_pa; /* off 0x08: buffer ptr(PA) */
+ uint32_t flags; /* off 0x0c: flags */
+ uint32_t reserved0; /* off 0x10 */
+ uint32_t reserved1; /* off 0x14 */
+ uint32_t reserved2; /* off 0x18 */
+ uint32_t reserved3; /* off 0x1c */
+};
+
+struct mvneta_rx_desc {
+ /* LITTLE_ENDIAN */
+ uint32_t status; /* status and flags */
+ uint16_t reserved0;
+ uint16_t bytecnt; /* buffer byte count */
+ uint32_t bufptr_pa; /* packet buffer pointer */
+ uint32_t reserved1;
+ uint32_t bufptr_va;
+ uint16_t reserved2;
+ uint16_t l4chk; /* L4 checksum */
+ uint32_t reserved3;
+ uint32_t reserved4;
+};
+
+/*
+ * Received packet command header:
+ * network controller => software
+ * the controller parse the packet and set some flags.
+ */
+#define MVNETA_RX_IPV4_FRAGMENT (1UL << 31) /* Fragment Indicator */
+#define MVNETA_RX_L4_CHECKSUM_OK (1 << 30) /* L4 Checksum */
+/* bit 29 reserved */
+#define MVNETA_RX_U (1 << 28) /* Unknown Destination */
+#define MVNETA_RX_F (1 << 27) /* First buffer */
+#define MVNETA_RX_L (1 << 26) /* Last buffer */
+#define MVNETA_RX_IP_HEADER_OK (1 << 25) /* IP Header is OK */
+#define MVNETA_RX_L3_IP (1 << 24) /* IP Type 0:IP6 1:IP4 */
+#define MVNETA_RX_L2_EV2 (1 << 23) /* Ethernet v2 frame */
+#define MVNETA_RX_L4_MASK (3 << 21) /* L4 Type */
+#define MVNETA_RX_L4_TCP (0x00 << 21)
+#define MVNETA_RX_L4_UDP (0x01 << 21)
+#define MVNETA_RX_L4_OTH (0x10 << 21)
+#define MVNETA_RX_BPDU (1 << 20) /* BPDU frame */
+#define MVNETA_RX_VLAN (1 << 19) /* VLAN tag found */
+#define MVNETA_RX_EC_MASK (3 << 17) /* Error code */
+#define MVNETA_RX_EC_CE (0x00 << 17) /* CRC error */
+#define MVNETA_RX_EC_OR (0x01 << 17) /* FIFO overrun */
+#define MVNETA_RX_EC_MF (0x10 << 17) /* Max. frame len */
+#define MVNETA_RX_EC_RE (0x11 << 17) /* Resource error */
+#define MVNETA_RX_ES (1 << 16) /* Error summary */
+/* bit 15:0 reserved */
+
+/*
+ * Transmit packet command header:
+ * software => network controller
+ */
+#define MVNETA_TX_CMD_L4_CHECKSUM_MASK (0x3 << 30) /* Do L4 Checksum */
+#define MVNETA_TX_CMD_L4_CHECKSUM_FRAG (0x0 << 30)
+#define MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG (0x1 << 30)
+#define MVNETA_TX_CMD_L4_CHECKSUM_NONE (0x2 << 30)
+#define MVNETA_TX_CMD_PACKET_OFFSET_MASK (0x7f << 23) /* Payload offset */
+#define MVNETA_TX_CMD_W_PACKET_OFFSET(v) (((v) & 0x7f) << 23)
+/* bit 22 reserved */
+#define MVNETA_TX_CMD_F (1 << 21) /* First buffer */
+#define MVNETA_TX_CMD_L (1 << 20) /* Last buffer */
+#define MVNETA_TX_CMD_PADDING (1 << 19) /* Pad short frame */
+#define MVNETA_TX_CMD_IP4_CHECKSUM (1 << 18) /* Do IPv4 Checksum */
+#define MVNETA_TX_CMD_L3_IP4 (0 << 17)
+#define MVNETA_TX_CMD_L3_IP6 (1 << 17)
+#define MVNETA_TX_CMD_L4_TCP (0 << 16)
+#define MVNETA_TX_CMD_L4_UDP (1 << 16)
+/* bit 15:13 reserved */
+#define MVNETA_TX_CMD_IP_HEADER_LEN_MASK (0x1f << 8) /* IP header len >> 2 */
+#define MVNETA_TX_CMD_IP_HEADER_LEN(v) (((v) & 0x1f) << 8)
+/* bit 7 reserved */
+#define MVNETA_TX_CMD_L3_OFFSET_MASK (0x7f << 0) /* offset of L3 hdr. */
+#define MVNETA_TX_CMD_L3_OFFSET(v) (((v) & 0x7f) << 0)
+
+/*
+ * Transmit packet extra attributes
+ * and error status returned from network controller.
+ */
+#define MVNETA_TX_F_DSA_TAG (3 << 30) /* DSA Tag */
+/* bit 29:8 reserved */
+#define MVNETA_TX_F_MH_SEL (0xf << 4) /* Marvell Header */
+/* bit 3 reserved */
+#define MVNETA_TX_F_EC_MASK (3 << 1) /* Error code */
+#define MVNETA_TX_F_EC_LC (0x00 << 1) /* Late Collision */
+#define MVNETA_TX_F_EC_UR (0x01 << 1) /* Underrun */
+#define MVNETA_TX_F_EC_RL (0x10 << 1) /* Excess. Collision */
+#define MVNETA_TX_F_EC_RESERVED (0x11 << 1)
+#define MVNETA_TX_F_ES (1 << 0) /* Error summary */
+
+#define MVNETA_ERROR_SUMMARY (1 << 0)
+#define MVNETA_BUFFER_OWNED_MASK (1UL << 31)
+#define MVNETA_BUFFER_OWNED_BY_HOST (0UL << 31)
+#define MVNETA_BUFFER_OWNED_BY_DMA (1UL << 31)
+
+#endif /* _IF_MVNETAREG_H_ */
Index: sys/dev/neta/if_mvnetavar.h
===================================================================
--- /dev/null
+++ sys/dev/neta/if_mvnetavar.h
@@ -0,0 +1,325 @@
+/*
+ * Copyright (c) 2017 Stormshield.
+ * Copyright (c) 2017 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IF_MVNETAVAR_H_
+#define _IF_MVNETAVAR_H_
+#include <net/if.h>
+
+#define MVNETA_HWHEADER_SIZE 2 /* Marvell Header */
+#define MVNETA_ETHER_SIZE 22 /* Maximum ether size */
+#define MVNETA_MAX_CSUM_MTU 1600 /* Port1,2 hw limit */
+
+/*
+ * Limit support for frame up to hw csum limit
+ * until jumbo frame support is added.
+ */
+#define MVNETA_MAX_FRAME (MVNETA_MAX_CSUM_MTU + MVNETA_ETHER_SIZE)
+
+/*
+ * Default limit of queue length
+ *
+ * queue 0 is lowest priority and queue 7 is highest priority.
+ * IP packet is received on queue 7 by default.
+ */
+#define MVNETA_TX_RING_CNT 512
+#define MVNETA_RX_RING_CNT 256
+
+#define MVNETA_BUFRING_SIZE 1024
+
+#define MVNETA_PACKET_OFFSET 64
+#define MVNETA_PACKET_SIZE MCLBYTES
+
+#define MVNETA_RXTH_COUNT 128
+#define MVNETA_RX_REFILL_COUNT 8
+#define MVNETA_TX_RECLAIM_COUNT 32
+
+/*
+ * Device Register access
+ */
+#define MVNETA_READ(sc, reg) \
+ bus_read_4((sc)->res[0], (reg))
+#define MVNETA_WRITE(sc, reg, val) \
+ bus_write_4((sc)->res[0], (reg), (val))
+
+#define MVNETA_READ_REGION(sc, reg, val, c) \
+ bus_read_region_4((sc)->res[0], (reg), (val), (c))
+#define MVNETA_WRITE_REGION(sc, reg, val, c) \
+ bus_write_region_4((sc)->res[0], (reg), (val), (c))
+
+#define MVNETA_READ_MIB_4(sc, reg) \
+ bus_read_4((sc)->res[0], MVNETA_PORTMIB_BASE + (reg))
+#define MVNETA_READ_MIB_8(sc, reg) \
+ bus_read_8((sc)->res[0], MVNETA_PORTMIB_BASE + (reg))
+
+#define MVNETA_IS_LINKUP(sc) \
+ (MVNETA_READ((sc), MVNETA_PSR) & MVNETA_PSR_LINKUP)
+
+#define MVNETA_IS_QUEUE_SET(queues, q) \
+ ((((queues) >> (q)) & 0x1))
+
+/*
+ * EEE: Lower Power Idle config
+ * Default timer is duration of MTU sized frame transmission.
+ * The timer can be negotiated by LLDP protocol, but we have no
+ * support.
+ */
+#define MVNETA_LPI_TS (ETHERMTU * 8 / 1000) /* [us] */
+#define MVNETA_LPI_TW (ETHERMTU * 8 / 1000) /* [us] */
+#define MVNETA_LPI_LI (ETHERMTU * 8 / 1000) /* [us] */
+
+/*
+ * DMA Descriptor
+ *
+ * the ethernet device has 8 rx/tx DMA queues. each of queue has its own
+ * decriptor list. descriptors are simply index by counter inside the device.
+ */
+#define MVNETA_TX_SEGLIMIT 32
+
+#define MVNETA_QUEUE_IDLE 1
+#define MVNETA_QUEUE_WORKING 2
+#define MVNETA_QUEUE_DISABLED 3
+
+struct mvneta_buf {
+ struct mbuf * m; /* pointer to related mbuf */
+ bus_dmamap_t dmap;
+};
+
+struct mvneta_rx_ring {
+ int queue_status;
+ /* Real descriptors array. shared by RxDMA */
+ struct mvneta_rx_desc *desc;
+ bus_dmamap_t desc_map;
+ bus_addr_t desc_pa;
+
+ /* Managment entries for each of descritors */
+ struct mvneta_buf rxbuf[MVNETA_RX_RING_CNT];
+
+ /* locks */
+ struct mtx ring_mtx;
+
+ /* Index */
+ int dma;
+ int cpu;
+
+ /* Limit */
+ int queue_th_received;
+ int queue_th_time; /* [Tclk] */
+
+ /* LRO */
+ struct lro_ctrl lro;
+ boolean_t lro_enabled;
+ /* Is this queue out of mbuf */
+ boolean_t needs_refill;
+} __aligned(CACHE_LINE_SIZE);
+
+struct mvneta_tx_ring {
+ /* Index of this queue */
+ int qidx;
+ /* IFNET pointer */
+ struct ifnet *ifp;
+ /* Ring buffer for IFNET */
+ struct buf_ring *br;
+ /* Real descriptors array. shared by TxDMA */
+ struct mvneta_tx_desc *desc;
+ bus_dmamap_t desc_map;
+ bus_addr_t desc_pa;
+
+ /* Managment entries for each of descritors */
+ struct mvneta_buf txbuf[MVNETA_TX_RING_CNT];
+
+ /* locks */
+ struct mtx ring_mtx;
+
+ /* Index */
+ int used;
+ int dma;
+ int cpu;
+
+ /* watchdog */
+#define MVNETA_WATCHDOG_TXCOMP (hz / 10) /* 100ms */
+#define MVNETA_WATCHDOG (10 * hz) /* 10s */
+ int watchdog_time;
+ int queue_status;
+ boolean_t queue_hung;
+
+ /* Task */
+ struct task task;
+ struct taskqueue *taskq;
+
+ /* Stats */
+ uint32_t drv_error;
+} __aligned(CACHE_LINE_SIZE);
+
+static __inline int
+tx_counter_adv(int ctr, int n)
+{
+
+ ctr += n;
+ while (__predict_false(ctr >= MVNETA_TX_RING_CNT))
+ ctr -= MVNETA_TX_RING_CNT;
+
+ return (ctr);
+}
+
+static __inline int
+rx_counter_adv(int ctr, int n)
+{
+
+ ctr += n;
+ while (__predict_false(ctr >= MVNETA_RX_RING_CNT))
+ ctr -= MVNETA_RX_RING_CNT;
+
+ return (ctr);
+}
+
+/*
+ * Timeout control
+ */
+#define MVNETA_PHY_TIMEOUT 10000 /* msec */
+#define RX_DISABLE_TIMEOUT 0x1000000 /* times */
+#define TX_DISABLE_TIMEOUT 0x1000000 /* times */
+#define TX_FIFO_EMPTY_TIMEOUT 0x1000000 /* times */
+
+/*
+ * Debug
+ */
+#define KASSERT_SC_MTX(sc) \
+ KASSERT(mtx_owned(&(sc)->mtx), ("SC mutex not owned"))
+#define KASSERT_BM_MTX(sc) \
+ KASSERT(mtx_owned(&(sc)->bm.bm_mtx), ("BM mutex not owned"))
+#define KASSERT_RX_MTX(sc, q) \
+ KASSERT(mtx_owned(&(sc)->rx_ring[(q)].ring_mtx),\
+ ("RX mutex not owned"))
+#define KASSERT_TX_MTX(sc, q) \
+ KASSERT(mtx_owned(&(sc)->tx_ring[(q)].ring_mtx),\
+ ("TX mutex not owned"))
+
+/*
+ * sysctl(9) parameters
+ */
+struct mvneta_sysctl_queue {
+ struct mvneta_softc *sc;
+ int rxtx;
+ int queue;
+};
+#define MVNETA_SYSCTL_RX 0
+#define MVNETA_SYSCTL_TX 1
+
+struct mvneta_sysctl_mib {
+ struct mvneta_softc *sc;
+ int index;
+ uint64_t counter;
+};
+
+enum mvneta_phy_mode {
+ MVNETA_PHY_QSGMII,
+ MVNETA_PHY_SGMII,
+ MVNETA_PHY_RGMII,
+ MVNETA_PHY_RGMII_ID
+};
+
+/*
+ * Ethernet Device main context
+ */
+DECLARE_CLASS(mvneta_driver);
+
+struct mvneta_softc {
+ device_t dev;
+ uint32_t version;
+ /*
+ * mtx must be held by interface functions to/from
+ * other frameworks. interrupt hander, sysctl hander,
+ * ioctl hander, and so on.
+ */
+ struct mtx mtx;
+ struct resource *res[2];
+ void *ih_cookie[1];
+
+ struct ifnet *ifp;
+ uint32_t mvneta_if_flags;
+ uint32_t mvneta_media;
+
+ int phy_attached;
+ enum mvneta_phy_mode phy_mode;
+ int phy_addr;
+ int phy_speed; /* PHY speed */
+ boolean_t phy_fdx; /* Full duplex mode */
+ boolean_t autoneg; /* Autonegotiation status */
+ boolean_t use_inband_status; /* In-band link status */
+
+ /*
+ * Link State control
+ */
+ boolean_t linkup;
+ device_t miibus;
+ struct mii_data *mii;
+ uint8_t enaddr[ETHER_ADDR_LEN];
+ struct ifmedia mvneta_ifmedia;
+
+ bus_dma_tag_t rx_dtag;
+ bus_dma_tag_t rxbuf_dtag;
+ bus_dma_tag_t tx_dtag;
+ bus_dma_tag_t txmbuf_dtag;
+ struct mvneta_rx_ring rx_ring[MVNETA_RX_QNUM_MAX];
+ struct mvneta_tx_ring tx_ring[MVNETA_TX_QNUM_MAX];
+
+ /*
+ * Maintance clock
+ */
+ struct callout tick_ch;
+
+ int cf_lpi;
+ int cf_fc;
+ int debug;
+
+ /*
+ * Sysctl interfaces
+ */
+ struct mvneta_sysctl_queue sysctl_rx_queue[MVNETA_RX_QNUM_MAX];
+ struct mvneta_sysctl_queue sysctl_tx_queue[MVNETA_TX_QNUM_MAX];
+
+ /*
+ * MIB counter
+ */
+ struct mvneta_sysctl_mib sysctl_mib[MVNETA_PORTMIB_NOCOUNTER];
+ uint64_t counter_pdfc;
+ uint64_t counter_pofc;
+ uint32_t counter_watchdog; /* manual reset when clearing mib */
+ uint32_t counter_watchdog_mib; /* reset after each mib update */
+};
+#define MVNETA_RX_RING(sc, q) \
+ (&(sc)->rx_ring[(q)])
+#define MVNETA_TX_RING(sc, q) \
+ (&(sc)->tx_ring[(q)])
+
+int mvneta_attach(device_t);
+
+#ifdef FDT
+int mvneta_fdt_mac_address(struct mvneta_softc *, uint8_t *);
+#endif
+
+#endif /* _IF_MVNETAVAR_H_ */
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Fri, May 15, 9:43 PM (1 h, 33 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
33099927
Default Alt Text
D10706.id29145.diff (145 KB)
Attached To
Mode
D10706: Introduce Armada 38x/XP network controller support
Attached
Detach File
Event Timeline
Log In to Comment