Index: projects/ifnet/sys/dev/bge/if_bge.c =================================================================== --- projects/ifnet/sys/dev/bge/if_bge.c (revision 277242) +++ projects/ifnet/sys/dev/bge/if_bge.c (revision 277243) @@ -1,6789 +1,6754 @@ /*- * Copyright (c) 2001 Wind River Systems * Copyright (c) 1997, 1998, 1999, 2001 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Broadcom BCM57xx(x)/BCM590x NetXtreme and NetLink family Ethernet driver * * The Broadcom BCM5700 is based on technology originally developed by * Alteon Networks as part of the Tigon I and Tigon II Gigabit Ethernet * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has * two on-board MIPS R4000 CPUs and can have as much as 16MB of external * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo * frames, highly configurable RX filtering, and 16 RX and TX queues * (which, along with RX filter rules, can be used for QOS applications). * Other features, such as TCP segmentation, may be available as part * of value-added firmware updates. Unlike the Tigon I and Tigon II, * firmware images can be stored in hardware and need not be compiled * into the driver. * * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. * * The BCM5701 is a single-chip solution incorporating both the BCM5700 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 * does not support external SSRAM. * * Broadcom also produces a variation of the BCM5700 under the "Altima" * brand name, which is functionally similar but lacks PCI-X support. * * Without external SSRAM, you can only have at most 4 TX rings, * and the use of the mini RX ring is disabled. This seems to imply * that these features are simply not available on the BCM5701. As a * result, this driver does not implement any support for the mini RX * ring. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "miidevs.h" #include #ifdef __sparc64__ #include #include #include #include #endif #include #include #include -#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP) #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ MODULE_DEPEND(bge, pci, 1, 1, 1); MODULE_DEPEND(bge, ether, 1, 1, 1); MODULE_DEPEND(bge, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Various supported device vendors/types and their names. Note: the * spec seems to indicate that the hardware still has Alteon's vendor * ID burned into it, though it will always be overriden by the vendor * ID in the EEPROM. Just to be safe, we cover all possibilities. */ static const struct bge_type { uint16_t bge_vid; uint16_t bge_did; } bge_devs[] = { { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 }, { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 }, { APPLE_VENDORID, APPLE_DEVICE_BCM5701 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5719 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5725 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5727 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5762 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57761 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57762 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57764 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57765 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57766 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57767 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57781 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57782 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57785 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57786 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57787 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57791 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57795 }, { SK_VENDORID, SK_DEVICEID_ALTIMA }, { TC_VENDORID, TC_DEVICEID_3C996 }, { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 }, { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 }, { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 }, { 0, 0 } }; static const struct bge_vendor { uint16_t v_id; const char *v_name; } bge_vendors[] = { { ALTEON_VENDORID, "Alteon" }, { ALTIMA_VENDORID, "Altima" }, { APPLE_VENDORID, "Apple" }, { BCOM_VENDORID, "Broadcom" }, { SK_VENDORID, "SysKonnect" }, { TC_VENDORID, "3Com" }, { FJTSU_VENDORID, "Fujitsu" }, { 0, NULL } }; static const struct bge_revision { uint32_t br_chipid; const char *br_name; } bge_revisions[] = { { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" }, { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" }, { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" }, { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" }, { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" }, { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" }, { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" }, { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" }, { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" }, { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" }, { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, { BGE_CHIPID_BCM5762_A0, "BCM5762 A0" }, { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, /* 5754 and 5787 share the same ASIC ID */ { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" }, { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" }, { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, { 0, NULL } }; /* * Some defaults for major revisions, so that newer steppings * that we don't know about have a shot at working. */ static const struct bge_revision bge_majorrevs[] = { { BGE_ASICREV_BCM5700, "unknown BCM5700" }, { BGE_ASICREV_BCM5701, "unknown BCM5701" }, { BGE_ASICREV_BCM5703, "unknown BCM5703" }, { BGE_ASICREV_BCM5704, "unknown BCM5704" }, { BGE_ASICREV_BCM5705, "unknown BCM5705" }, { BGE_ASICREV_BCM5750, "unknown BCM5750" }, { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, { BGE_ASICREV_BCM5752, "unknown BCM5752" }, { BGE_ASICREV_BCM5780, "unknown BCM5780" }, { BGE_ASICREV_BCM5714, "unknown BCM5714" }, { BGE_ASICREV_BCM5755, "unknown BCM5755" }, { BGE_ASICREV_BCM5761, "unknown BCM5761" }, { BGE_ASICREV_BCM5784, "unknown BCM5784" }, { BGE_ASICREV_BCM5785, "unknown BCM5785" }, /* 5754 and 5787 share the same ASIC ID */ { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, { BGE_ASICREV_BCM5906, "unknown BCM5906" }, { BGE_ASICREV_BCM57765, "unknown BCM57765" }, { BGE_ASICREV_BCM57766, "unknown BCM57766" }, { BGE_ASICREV_BCM57780, "unknown BCM57780" }, { BGE_ASICREV_BCM5717, "unknown BCM5717" }, { BGE_ASICREV_BCM5719, "unknown BCM5719" }, { BGE_ASICREV_BCM5720, "unknown BCM5720" }, { BGE_ASICREV_BCM5762, "unknown BCM5762" }, { 0, NULL } }; #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO) #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY) #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS) #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY) #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS) #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS) #define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS) #define BGE_IS_57765_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_57765_PLUS) static uint32_t bge_chipid(device_t); static const struct bge_vendor * bge_lookup_vendor(uint16_t); static const struct bge_revision * bge_lookup_rev(uint32_t); typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); static int bge_probe(device_t); static int bge_attach(device_t); static int bge_detach(device_t); static int bge_suspend(device_t); static int bge_resume(device_t); static void bge_release_resources(struct bge_softc *); static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int bge_dma_alloc(struct bge_softc *); static void bge_dma_free(struct bge_softc *); static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t, bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *); static void bge_devinfo(struct bge_softc *); static int bge_mbox_reorder(struct bge_softc *); static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]); static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); static int bge_get_eaddr(struct bge_softc *, uint8_t[]); static void bge_txeof(struct bge_softc *, uint16_t); static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *); static int bge_rxeof(struct bge_softc *, uint16_t, int); static void bge_asf_driver_up (struct bge_softc *); static void bge_tick(void *); static void bge_stats_clear_regs(struct bge_softc *); static void bge_stats_update(struct bge_softc *); static void bge_stats_update_regs(struct bge_softc *); static struct mbuf *bge_check_short_dma(struct mbuf *); static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *, uint16_t *, uint16_t *); static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *); static void bge_intr(void *); static int bge_msi_intr(void *); static void bge_intr_task(void *, int); static int bge_start_locked(struct bge_softc *); static int bge_transmit(if_t, struct mbuf *); static int bge_ioctl(if_t, u_long, void *, struct thread *); static void bge_init_locked(struct bge_softc *); static void bge_init(void *); static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t); static void bge_stop(struct bge_softc *); static void bge_watchdog(struct bge_softc *); static int bge_shutdown(device_t); static int bge_ifmedia_upd_locked(if_t); static int bge_ifmedia_upd(if_t); static void bge_ifmedia_sts(if_t, struct ifmediareq *); static uint64_t bge_get_counter(if_t, ift_counter); static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); static int bge_read_nvram(struct bge_softc *, caddr_t, int, int); static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); static void bge_setpromisc(struct bge_softc *); static void bge_setmulti(struct bge_softc *); static void bge_setvlan(struct bge_softc *); static __inline void bge_rxreuse_std(struct bge_softc *, int); static __inline void bge_rxreuse_jumbo(struct bge_softc *, int); static int bge_newbuf_std(struct bge_softc *, int); static int bge_newbuf_jumbo(struct bge_softc *, int); static int bge_init_rx_ring_std(struct bge_softc *); static void bge_free_rx_ring_std(struct bge_softc *); static int bge_init_rx_ring_jumbo(struct bge_softc *); static void bge_free_rx_ring_jumbo(struct bge_softc *); static void bge_free_tx_ring(struct bge_softc *); static int bge_init_tx_ring(struct bge_softc *); static int bge_chipinit(struct bge_softc *); static int bge_blockinit(struct bge_softc *); static uint32_t bge_dma_swap_options(struct bge_softc *); static int bge_has_eaddr(struct bge_softc *); static uint32_t bge_readmem_ind(struct bge_softc *, int); static void bge_writemem_ind(struct bge_softc *, int, int); static void bge_writembx(struct bge_softc *, int, int); #ifdef notdef static uint32_t bge_readreg_ind(struct bge_softc *, int); #endif static void bge_writemem_direct(struct bge_softc *, int, int); static void bge_writereg_ind(struct bge_softc *, int, int); static int bge_miibus_readreg(device_t, int, int); static int bge_miibus_writereg(device_t, int, int, int); static void bge_miibus_statchg(device_t); static uint64_t bge_miibus_readvar(device_t, int); #ifdef DEVICE_POLLING static int bge_poll(if_t ifp, enum poll_cmd cmd, int count); #endif #define BGE_RESET_SHUTDOWN 0 #define BGE_RESET_START 1 #define BGE_RESET_SUSPEND 2 static void bge_sig_post_reset(struct bge_softc *, int); static void bge_sig_legacy(struct bge_softc *, int); static void bge_sig_pre_reset(struct bge_softc *, int); static void bge_stop_fw(struct bge_softc *); static int bge_reset(struct bge_softc *); static void bge_link_upd(struct bge_softc *); static void bge_ape_lock_init(struct bge_softc *); static void bge_ape_read_fw_ver(struct bge_softc *); static int bge_ape_lock(struct bge_softc *, int); static void bge_ape_unlock(struct bge_softc *, int); static void bge_ape_send_event(struct bge_softc *, uint32_t); static void bge_ape_driver_state_change(struct bge_softc *, int); /* * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may * leak information to untrusted users. It is also known to cause alignment * traps on certain architectures. */ #ifdef BGE_REGISTER_DEBUG static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS); static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS); static int bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS); static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS); #endif static void bge_add_sysctls(struct bge_softc *); static void bge_add_sysctl_stats_regs(struct bge_softc *, struct sysctl_ctx_list *, struct sysctl_oid_list *); static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *, struct sysctl_oid_list *); static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS); static device_method_t bge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bge_probe), DEVMETHOD(device_attach, bge_attach), DEVMETHOD(device_detach, bge_detach), DEVMETHOD(device_shutdown, bge_shutdown), DEVMETHOD(device_suspend, bge_suspend), DEVMETHOD(device_resume, bge_resume), /* MII interface */ DEVMETHOD(miibus_readreg, bge_miibus_readreg), DEVMETHOD(miibus_writereg, bge_miibus_writereg), DEVMETHOD(miibus_statchg, bge_miibus_statchg), DEVMETHOD(miibus_readvar, bge_miibus_readvar), DEVMETHOD_END }; static driver_t bge_driver = { "bge", bge_methods, sizeof(struct bge_softc) }; static struct ifdriver bge_ifdrv = { .ifdrv_ops = { .ifop_origin = IFOP_ORIGIN_DRIVER, .ifop_ioctl = bge_ioctl, .ifop_init = bge_init, .ifop_transmit = bge_transmit, .ifop_get_counter = bge_get_counter, }, .ifdrv_name = "bge", .ifdrv_type = IFT_ETHER, .ifdrv_hdrlen = sizeof(struct ether_vlan_header), .ifdrv_maxqlen = BGE_TX_RING_CNT - 1, }; static devclass_t bge_devclass; DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); static int bge_allow_asf = 1; static SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters"); SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RDTUN, &bge_allow_asf, 0, "Allow ASF mode if available"); #define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500" #define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2" #define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500" #define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3" #define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id" static int bge_has_eaddr(struct bge_softc *sc) { #ifdef __sparc64__ char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)]; device_t dev; uint32_t subvendor; dev = sc->bge_dev; /* * The on-board BGEs found in sun4u machines aren't fitted with * an EEPROM which means that we have to obtain the MAC address * via OFW and that some tests will always fail. We distinguish * such BGEs by the subvendor ID, which also has to be obtained * from OFW instead of the PCI configuration space as the latter * indicates Broadcom as the subvendor of the netboot interface. * For early Blade 1500 and 2500 we even have to check the OFW * device path as the subvendor ID always defaults to Broadcom * there. */ if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR, &subvendor, sizeof(subvendor)) == sizeof(subvendor) && (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID)) return (0); memset(buf, 0, sizeof(buf)); if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) { if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 && strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0) return (0); if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 && strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0) return (0); } #endif return (1); } static uint32_t bge_readmem_ind(struct bge_softc *sc, int off) { device_t dev; uint32_t val; if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) return (0); dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); return (val); } static void bge_writemem_ind(struct bge_softc *sc, int off, int val) { device_t dev; if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) return; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); } #ifdef notdef static uint32_t bge_readreg_ind(struct bge_softc *sc, int off) { device_t dev; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); return (pci_read_config(dev, BGE_PCI_REG_DATA, 4)); } #endif static void bge_writereg_ind(struct bge_softc *sc, int off, int val) { device_t dev; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); } static void bge_writemem_direct(struct bge_softc *sc, int off, int val) { CSR_WRITE_4(sc, off, val); } static void bge_writembx(struct bge_softc *sc, int off, int val) { if (sc->bge_asicrev == BGE_ASICREV_BCM5906) off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; CSR_WRITE_4(sc, off, val); if ((sc->bge_flags & BGE_FLAG_MBOX_REORDER) != 0) CSR_READ_4(sc, off); } /* * Clear all stale locks and select the lock for this driver instance. */ static void bge_ape_lock_init(struct bge_softc *sc) { uint32_t bit, regbase; int i; if (sc->bge_asicrev == BGE_ASICREV_BCM5761) regbase = BGE_APE_LOCK_GRANT; else regbase = BGE_APE_PER_LOCK_GRANT; /* Clear any stale locks. */ for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) { switch (i) { case BGE_APE_LOCK_PHY0: case BGE_APE_LOCK_PHY1: case BGE_APE_LOCK_PHY2: case BGE_APE_LOCK_PHY3: bit = BGE_APE_LOCK_GRANT_DRIVER0; break; default: if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_GRANT_DRIVER0; else bit = (1 << sc->bge_func_addr); } APE_WRITE_4(sc, regbase + 4 * i, bit); } /* Select the PHY lock based on the device's function number. */ switch (sc->bge_func_addr) { case 0: sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0; break; case 1: sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1; break; case 2: sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2; break; case 3: sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3; break; default: device_printf(sc->bge_dev, "PHY lock not supported on this function\n"); } } /* * Check for APE firmware, set flags, and print version info. */ static void bge_ape_read_fw_ver(struct bge_softc *sc) { const char *fwtype; uint32_t apedata, features; /* Check for a valid APE signature in shared memory. */ apedata = APE_READ_4(sc, BGE_APE_SEG_SIG); if (apedata != BGE_APE_SEG_SIG_MAGIC) { sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE; return; } /* Check if APE firmware is running. */ apedata = APE_READ_4(sc, BGE_APE_FW_STATUS); if ((apedata & BGE_APE_FW_STATUS_READY) == 0) { device_printf(sc->bge_dev, "APE signature found " "but FW status not ready! 0x%08x\n", apedata); return; } sc->bge_mfw_flags |= BGE_MFW_ON_APE; /* Fetch the APE firwmare type and version. */ apedata = APE_READ_4(sc, BGE_APE_FW_VERSION); features = APE_READ_4(sc, BGE_APE_FW_FEATURES); if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) { sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI; fwtype = "NCSI"; } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) { sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH; fwtype = "DASH"; } else fwtype = "UNKN"; /* Print the APE firmware version. */ device_printf(sc->bge_dev, "APE FW version: %s v%d.%d.%d.%d\n", fwtype, (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT, (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT, (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT, (apedata & BGE_APE_FW_VERSION_BLDMSK)); } static int bge_ape_lock(struct bge_softc *sc, int locknum) { uint32_t bit, gnt, req, status; int i, off; if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) return (0); /* Lock request/grant registers have different bases. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5761) { req = BGE_APE_LOCK_REQ; gnt = BGE_APE_LOCK_GRANT; } else { req = BGE_APE_PER_LOCK_REQ; gnt = BGE_APE_PER_LOCK_GRANT; } off = 4 * locknum; switch (locknum) { case BGE_APE_LOCK_GPIO: /* Lock required when using GPIO. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5761) return (0); if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_REQ_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_GRC: /* Lock required to reset the device. */ if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_REQ_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_MEM: /* Lock required when accessing certain APE memory. */ if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_REQ_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_PHY0: case BGE_APE_LOCK_PHY1: case BGE_APE_LOCK_PHY2: case BGE_APE_LOCK_PHY3: /* Lock required when accessing PHYs. */ bit = BGE_APE_LOCK_REQ_DRIVER0; break; default: return (EINVAL); } /* Request a lock. */ APE_WRITE_4(sc, req + off, bit); /* Wait up to 1 second to acquire lock. */ for (i = 0; i < 20000; i++) { status = APE_READ_4(sc, gnt + off); if (status == bit) break; DELAY(50); } /* Handle any errors. */ if (status != bit) { device_printf(sc->bge_dev, "APE lock %d request failed! " "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n", locknum, req + off, bit & 0xFFFF, gnt + off, status & 0xFFFF); /* Revoke the lock request. */ APE_WRITE_4(sc, gnt + off, bit); return (EBUSY); } return (0); } static void bge_ape_unlock(struct bge_softc *sc, int locknum) { uint32_t bit, gnt; int off; if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) return; if (sc->bge_asicrev == BGE_ASICREV_BCM5761) gnt = BGE_APE_LOCK_GRANT; else gnt = BGE_APE_PER_LOCK_GRANT; off = 4 * locknum; switch (locknum) { case BGE_APE_LOCK_GPIO: if (sc->bge_asicrev == BGE_ASICREV_BCM5761) return; if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_GRANT_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_GRC: if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_GRANT_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_MEM: if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_GRANT_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_PHY0: case BGE_APE_LOCK_PHY1: case BGE_APE_LOCK_PHY2: case BGE_APE_LOCK_PHY3: bit = BGE_APE_LOCK_GRANT_DRIVER0; break; default: return; } APE_WRITE_4(sc, gnt + off, bit); } /* * Send an event to the APE firmware. */ static void bge_ape_send_event(struct bge_softc *sc, uint32_t event) { uint32_t apedata; int i; /* NCSI does not support APE events. */ if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) return; /* Wait up to 1ms for APE to service previous event. */ for (i = 10; i > 0; i--) { if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0) break; apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS); if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) { APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event | BGE_APE_EVENT_STATUS_EVENT_PENDING); bge_ape_unlock(sc, BGE_APE_LOCK_MEM); APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1); break; } bge_ape_unlock(sc, BGE_APE_LOCK_MEM); DELAY(100); } if (i == 0) device_printf(sc->bge_dev, "APE event 0x%08x send timed out\n", event); } static void bge_ape_driver_state_change(struct bge_softc *sc, int kind) { uint32_t apedata, event; if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) return; switch (kind) { case BGE_RESET_START: /* If this is the first load, clear the load counter. */ apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG); if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC) APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0); else { apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT); APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata); } APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG, BGE_APE_HOST_SEG_SIG_MAGIC); APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN, BGE_APE_HOST_SEG_LEN_MAGIC); /* Add some version info if bge(4) supports it. */ APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID, BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0)); APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR, BGE_APE_HOST_BEHAV_NO_PHYLOCK); APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS, BGE_APE_HOST_HEARTBEAT_INT_DISABLE); APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, BGE_APE_HOST_DRVR_STATE_START); event = BGE_APE_EVENT_STATUS_STATE_START; break; case BGE_RESET_SHUTDOWN: APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, BGE_APE_HOST_DRVR_STATE_UNLOAD); event = BGE_APE_EVENT_STATUS_STATE_UNLOAD; break; case BGE_RESET_SUSPEND: event = BGE_APE_EVENT_STATUS_STATE_SUSPEND; break; default: return; } bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT | BGE_APE_EVENT_STATUS_STATE_CHNGE); } /* * Map a single buffer address. */ static void bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bge_dmamap_arg *ctx; if (error) return; KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg)); ctx = arg; ctx->bge_busaddr = segs->ds_addr; } static uint8_t bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) { uint32_t access, byte = 0; int i; /* Lock. */ CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); for (i = 0; i < 8000; i++) { if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) break; DELAY(20); } if (i == 8000) return (1); /* Enable access. */ access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); for (i = 0; i < BGE_TIMEOUT * 10; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { DELAY(10); break; } } if (i == BGE_TIMEOUT * 10) { if_printf(sc->bge_ifp, "nvram read timed out\n"); return (1); } /* Get result. */ byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; /* Disable access. */ CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); /* Unlock. */ CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); CSR_READ_4(sc, BGE_NVRAM_SWARB); return (0); } /* * Read a sequence of bytes from NVRAM. */ static int bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt) { int err = 0, i; uint8_t byte = 0; if (sc->bge_asicrev != BGE_ASICREV_BCM5906) return (1); for (i = 0; i < cnt; i++) { err = bge_nvram_getbyte(sc, off + i, &byte); if (err) break; *(dest + i) = byte; } return (err ? 1 : 0); } /* * Read a byte of data stored in the EEPROM at address 'addr.' The * BCM570x supports both the traditional bitbang interface and an * auto access interface for reading the EEPROM. We use the auto * access method. */ static uint8_t bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) { int i; uint32_t byte = 0; /* * Enable use of auto EEPROM access so we can avoid * having to use the bitbang method. */ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); /* Reset the EEPROM, load the clock period. */ CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); DELAY(20); /* Issue the read EEPROM command. */ CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); /* Wait for completion */ for(i = 0; i < BGE_TIMEOUT * 10; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) break; } if (i == BGE_TIMEOUT * 10) { device_printf(sc->bge_dev, "EEPROM read timed out\n"); return (1); } /* Get result. */ byte = CSR_READ_4(sc, BGE_EE_DATA); *dest = (byte >> ((addr % 4) * 8)) & 0xFF; return (0); } /* * Read a sequence of bytes from the EEPROM. */ static int bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) { int i, error = 0; uint8_t byte = 0; for (i = 0; i < cnt; i++) { error = bge_eeprom_getbyte(sc, off + i, &byte); if (error) break; *(dest + i) = byte; } return (error ? 1 : 0); } static int bge_miibus_readreg(device_t dev, int phy, int reg) { struct bge_softc *sc; uint32_t val; int i; sc = device_get_softc(dev); if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) return (0); /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL); DELAY(80); } CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | BGE_MIPHY(phy) | BGE_MIREG(reg)); /* Poll for the PHY register access to complete. */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); val = CSR_READ_4(sc, BGE_MI_COMM); if ((val & BGE_MICOMM_BUSY) == 0) { DELAY(5); val = CSR_READ_4(sc, BGE_MI_COMM); break; } } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "PHY read timed out (phy %d, reg %d, val 0x%08x)\n", phy, reg, val); val = 0; } /* Restore the autopoll bit if necessary. */ if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); DELAY(80); } bge_ape_unlock(sc, sc->bge_phy_ape_lock); if (val & BGE_MICOMM_READFAIL) return (0); return (val & 0xFFFF); } static int bge_miibus_writereg(device_t dev, int phy, int reg, int val) { struct bge_softc *sc; int i; sc = device_get_softc(dev); if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) return (0); if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) return (0); /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL); DELAY(80); } CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | BGE_MIPHY(phy) | BGE_MIREG(reg) | val); for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { DELAY(5); CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */ break; } } /* Restore the autopoll bit if necessary. */ if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); DELAY(80); } bge_ape_unlock(sc, sc->bge_phy_ape_lock); if (i == BGE_TIMEOUT) device_printf(sc->bge_dev, "PHY write timed out (phy %d, reg %d, val 0x%04x)\n", phy, reg, val); return (0); } static void bge_miibus_statchg(device_t dev) { struct bge_softc *sc; struct mii_data *mii; uint32_t mac_mode, rx_mode, tx_mode; sc = device_get_softc(dev); if ((sc->bge_flags & BGE_FLAG_RUNNING) == 0) return; mii = device_get_softc(sc->bge_miibus); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->bge_link = 1; break; case IFM_1000_T: case IFM_1000_SX: case IFM_2500_SX: if (sc->bge_asicrev != BGE_ASICREV_BCM5906) sc->bge_link = 1; else sc->bge_link = 0; break; default: sc->bge_link = 0; break; } } else sc->bge_link = 0; if (sc->bge_ifp != NULL) { if_set(sc->bge_ifp, IF_BAUDRATE, ifmedia_baudrate(mii->mii_media_active)); if_link_state_change(sc->bge_ifp, ifmedia_link_state(mii->mii_media_status)); } if (sc->bge_link == 0) return; /* * APE firmware touches these registers to keep the MAC * connected to the outside world. Try to keep the * accesses atomic. */ /* Set the port mode (MII/GMII) to match the link speed. */ mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX); tx_mode = CSR_READ_4(sc, BGE_TX_MODE); rx_mode = CSR_READ_4(sc, BGE_RX_MODE); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) mac_mode |= BGE_PORTMODE_GMII; else mac_mode |= BGE_PORTMODE_MII; /* Set MAC flow control behavior to match link flow control settings. */ tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE; rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE; } else mac_mode |= BGE_MACMODE_HALF_DUPLEX; CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode); DELAY(40); CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode); CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode); } static uint64_t bge_miibus_readvar(device_t dev, int var) { struct bge_softc *sc; - if_t ifp; sc = device_get_softc(dev); - ifp = sc->bge_ifp; switch (var) { - case IF_MTU: - return (if_get(ifp, IF_MTU)); + case MIIVAR_MTU: + return (sc->bge_mtu); default: return (0); } } /* * Intialize a standard receive ring descriptor. */ static int bge_newbuf_std(struct bge_softc *sc, int i) { struct mbuf *m; struct bge_rx_bd *r; bus_dma_segment_t segs[1]; bus_dmamap_t map; int error, nsegs; if (sc->bge_flags & BGE_FLAG_JUMBO_STD && - (if_get(sc->bge_ifp, IF_MTU) + ETHER_HDR_LEN + ETHER_CRC_LEN + + (sc->bge_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) { m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MJUM9BYTES; } else { m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; } if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) m_adj(m, ETHER_ALIGN); error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0); if (error != 0) { m_freem(m); return (error); } if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i]); } map = sc->bge_cdata.bge_rx_std_dmamap[i]; sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap; sc->bge_cdata.bge_rx_std_sparemap = map; sc->bge_cdata.bge_rx_std_chain[i] = m; sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len; r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std]; r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); r->bge_flags = BGE_RXBDFLAG_END; r->bge_len = segs[0].ds_len; r->bge_idx = i; bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD); return (0); } /* * Initialize a jumbo receive ring descriptor. This allocates * a jumbo buffer from the pool managed internally by the driver. */ static int bge_newbuf_jumbo(struct bge_softc *sc, int i) { bus_dma_segment_t segs[BGE_NSEG_JUMBO]; bus_dmamap_t map; struct bge_extrx_bd *r; struct mbuf *m; int error, nsegs; MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return (ENOBUFS); if (m_cljget(m, M_NOWAIT, MJUM9BYTES) == NULL) { m_freem(m); return (ENOBUFS); } m->m_len = m->m_pkthdr.len = MJUM9BYTES; if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) m_adj(m, ETHER_ALIGN); error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0); if (error != 0) { m_freem(m); return (error); } if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i]); } map = sc->bge_cdata.bge_rx_jumbo_dmamap[i]; sc->bge_cdata.bge_rx_jumbo_dmamap[i] = sc->bge_cdata.bge_rx_jumbo_sparemap; sc->bge_cdata.bge_rx_jumbo_sparemap = map; sc->bge_cdata.bge_rx_jumbo_chain[i] = m; sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0; sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0; sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0; sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0; /* * Fill in the extended RX buffer descriptor. */ r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo]; r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; r->bge_idx = i; r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; switch (nsegs) { case 4: r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr); r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr); r->bge_len3 = segs[3].ds_len; sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len; case 3: r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr); r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr); r->bge_len2 = segs[2].ds_len; sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len; case 2: r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr); r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr); r->bge_len1 = segs[1].ds_len; sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len; case 1: r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); r->bge_len0 = segs[0].ds_len; sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len; break; default: panic("%s: %d segments\n", __func__, nsegs); } bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD); return (0); } static int bge_init_rx_ring_std(struct bge_softc *sc) { int error, i; bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); sc->bge_std = 0; for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { if ((error = bge_newbuf_std(sc, i)) != 0) return (error); BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); } bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); sc->bge_std = 0; bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1); return (0); } static void bge_free_rx_ring_std(struct bge_softc *sc) { int i; for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i]); m_freem(sc->bge_cdata.bge_rx_std_chain[i]); sc->bge_cdata.bge_rx_std_chain[i] = NULL; } bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], sizeof(struct bge_rx_bd)); } } static int bge_init_rx_ring_jumbo(struct bge_softc *sc) { struct bge_rcb *rcb; int error, i; bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ); sc->bge_jumbo = 0; for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if ((error = bge_newbuf_jumbo(sc, i)) != 0) return (error); BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); } bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); sc->bge_jumbo = 0; /* Enable the jumbo receive producer ring. */ rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1); return (0); } static void bge_free_rx_ring_jumbo(struct bge_softc *sc) { int i; for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i]); m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; } bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], sizeof(struct bge_extrx_bd)); } } static void bge_free_tx_ring(struct bge_softc *sc) { int i; if (sc->bge_ldata.bge_tx_ring == NULL) return; for (i = 0; i < BGE_TX_RING_CNT; i++) { if (sc->bge_cdata.bge_tx_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[i]); m_freem(sc->bge_cdata.bge_tx_chain[i]); sc->bge_cdata.bge_tx_chain[i] = NULL; } bzero((char *)&sc->bge_ldata.bge_tx_ring[i], sizeof(struct bge_tx_bd)); } } static int bge_init_tx_ring(struct bge_softc *sc) { sc->bge_txcnt = 0; sc->bge_tx_saved_considx = 0; bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); /* Initialize transmit producer index for host-memory send ring. */ sc->bge_tx_prodidx = 0; bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); /* NIC-memory send ring not used; initialize to zero. */ bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); return (0); } static void bge_setpromisc(struct bge_softc *sc) { if_t ifp; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; /* Enable or disable promiscuous mode as needed. */ if (if_get(ifp, IF_FLAGS) & IFF_PROMISC) BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); else BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); } static void bge_hash_maddr(void *arg, struct sockaddr *maddr) { struct sockaddr_dl *sdl = (struct sockaddr_dl *)maddr; uint32_t *hashes = arg; int h; if (sdl->sdl_family != AF_LINK) return; h = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN) & 0x7F; hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); } static void bge_setmulti(struct bge_softc *sc) { if_t ifp; uint32_t hashes[4] = { 0, 0, 0, 0 }; int i; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; if (if_get(ifp, IF_FLAGS) & (IFF_ALLMULTI | IFF_PROMISC)) { for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); return; } /* First, zot all the existing filters. */ for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); if_foreach_maddr(ifp, bge_hash_maddr, hashes); for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); } static void bge_setvlan(struct bge_softc *sc) { if_t ifp; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; /* Enable or disable VLAN tag stripping as needed. */ - if (if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWTAGGING) + if (sc->bge_capenable & IFCAP_VLAN_HWTAGGING) BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); else BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); } static void bge_sig_pre_reset(struct bge_softc *sc, int type) { /* * Some chips don't like this so only do this if ASF is enabled */ if (sc->bge_asf_mode) bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { switch (type) { case BGE_RESET_START: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_START); break; case BGE_RESET_SHUTDOWN: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_UNLOAD); break; case BGE_RESET_SUSPEND: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_SUSPEND); break; } } if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND) bge_ape_driver_state_change(sc, type); } static void bge_sig_post_reset(struct bge_softc *sc, int type) { if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { switch (type) { case BGE_RESET_START: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_START_DONE); /* START DONE */ break; case BGE_RESET_SHUTDOWN: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_UNLOAD_DONE); break; } } if (type == BGE_RESET_SHUTDOWN) bge_ape_driver_state_change(sc, type); } static void bge_sig_legacy(struct bge_softc *sc, int type) { if (sc->bge_asf_mode) { switch (type) { case BGE_RESET_START: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_START); break; case BGE_RESET_SHUTDOWN: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_UNLOAD); break; } } } static void bge_stop_fw(struct bge_softc *sc) { int i; if (sc->bge_asf_mode) { bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE); CSR_WRITE_4(sc, BGE_RX_CPU_EVENT, CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT); for (i = 0; i < 100; i++ ) { if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) & BGE_RX_CPU_DRV_EVENT)) break; DELAY(10); } } } static uint32_t bge_dma_swap_options(struct bge_softc *sc) { uint32_t dma_options; dma_options = BGE_MODECTL_WORDSWAP_NONFRAME | BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA; #if BYTE_ORDER == BIG_ENDIAN dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME; #endif return (dma_options); } /* * Do endian, PCI and DMA initialization. */ static int bge_chipinit(struct bge_softc *sc) { uint32_t dma_rw_ctl, misc_ctl, mode_ctl; uint16_t val; int i; /* Set endianness before we access any non-PCI registers. */ misc_ctl = BGE_INIT; if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS) misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS; pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4); /* * Clear the MAC statistics block in the NIC's * internal memory. */ for (i = BGE_STATS_BLOCK; i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) BGE_MEMWIN_WRITE(sc, i, 0); for (i = BGE_STATUS_BLOCK; i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) BGE_MEMWIN_WRITE(sc, i, 0); if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) { /* * Fix data corruption caused by non-qword write with WB. * Fix master abort in PCI mode. * Fix PCI latency timer. */ val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2); val |= (1 << 10) | (1 << 12) | (1 << 13); pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2); } if (sc->bge_asicrev == BGE_ASICREV_BCM57765 || sc->bge_asicrev == BGE_ASICREV_BCM57766) { /* * For the 57766 and non Ax versions of 57765, bootcode * needs to setup the PCIE Fast Training Sequence (FTS) * value to prevent transmit hangs. */ if (sc->bge_chiprev != BGE_CHIPREV_57765_AX) { CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL) | BGE_CPMU_PADRNG_CTL_RDIV2); } } /* * Set up the PCI DMA control register. */ dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) | BGE_PCIDMARWCTL_WR_CMD_SHIFT(7); if (sc->bge_flags & BGE_FLAG_PCIE) { if (sc->bge_mps >= 256) dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); else dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); } else if (sc->bge_flags & BGE_FLAG_PCIX) { if (BGE_IS_5714_FAMILY(sc)) { /* 256 bytes for read and write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(2); dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ? BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL : BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) { /* * In the BCM5703, the DMA read watermark should * be set to less than or equal to the maximum * memory read byte count of the PCI-X command * register. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { /* 1536 bytes for read, 384 bytes for write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); } else { /* 384 bytes for read and write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) | 0x0F; } if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || sc->bge_asicrev == BGE_ASICREV_BCM5704) { uint32_t tmp; /* Set ONE_DMA_AT_ONCE for hardware workaround. */ tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F; if (tmp == 6 || tmp == 7) dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; /* Set PCI-X DMA write workaround. */ dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; } } else { /* Conventional PCI bus: 256 bytes for read and write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) dma_rw_ctl |= 0x0F; } if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || sc->bge_asicrev == BGE_ASICREV_BCM5701) dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | BGE_PCIDMARWCTL_ASRT_ALL_BE; if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || sc->bge_asicrev == BGE_ASICREV_BCM5704) dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; if (BGE_IS_5717_PLUS(sc)) { dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; /* * Enable HW workaround for controllers that misinterpret * a status tag update and leave interrupts permanently * disabled. */ if (!BGE_IS_57765_PLUS(sc) && sc->bge_asicrev != BGE_ASICREV_BCM5717 && sc->bge_asicrev != BGE_ASICREV_BCM5762) dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; } pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); /* * Set up general mode register. */ mode_ctl = bge_dma_swap_options(sc); if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || sc->bge_asicrev == BGE_ASICREV_BCM5762) { /* Retain Host-2-BMC settings written by APE firmware. */ mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) & (BGE_MODECTL_BYTESWAP_B2HRX_DATA | BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE); } mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM; /* * BCM5701 B5 have a bug causing data corruption when using * 64-bit DMA reads, which can be terminated early and then * completed later as 32-bit accesses, in combination with * certain bridges. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && sc->bge_chipid == BGE_CHIPID_BCM5701_B5) mode_ctl |= BGE_MODECTL_FORCE_PCI32; /* * Tell the firmware the driver is running */ if (sc->bge_asf_mode & ASF_STACKUP) mode_ctl |= BGE_MODECTL_STACKUP; CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); /* * Disable memory write invalidate. Apparently it is not supported * properly by these devices. */ PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); /* Set the timer prescaler (always 66 MHz). */ CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { DELAY(40); /* XXX */ /* Put PHY into ready state */ BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ DELAY(40); } return (0); } static int bge_blockinit(struct bge_softc *sc) { struct bge_rcb *rcb; bus_size_t vrcb; bge_hostaddr taddr; uint32_t dmactl, rdmareg, val; int i, limit; /* * Initialize the memory window pointer register so that * we can access the first 32K of internal NIC RAM. This will * allow us to set up the TX send ring RCBs and the RX return * ring RCBs, plus other things which live in NIC memory. */ CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); /* Note: the BCM5704 has a smaller mbuf space than other chips. */ if (!(BGE_IS_5705_PLUS(sc))) { /* Configure mbuf memory pool */ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); if (sc->bge_asicrev == BGE_ASICREV_BCM5704) CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); else CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); /* Configure DMA resource pool */ CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS); CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); } /* Configure mbuf pool watermarks */ if (BGE_IS_5717_PLUS(sc)) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); - if (if_get(sc->bge_ifp, IF_MTU) > ETHERMTU) { + if (sc->bge_mtu > ETHERMTU) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea); } else { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); } } else if (!BGE_IS_5705_PLUS(sc)) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); } else { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); } /* Configure DMA resource watermarks */ CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); /* Enable buffer manager */ val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN; /* * Change the arbitration algorithm of TXMBUF read request to * round-robin instead of priority based for BCM5719. When * TXFIFO is almost empty, RDMA will hold its request until * TXFIFO is not almost empty. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5719) val |= BGE_BMANMODE_NO_TX_UNDERRUN; CSR_WRITE_4(sc, BGE_BMAN_MODE, val); /* Poll for buffer manager start indication */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) break; } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "buffer manager failed to start\n"); return (ENXIO); } /* Enable flow-through queues */ CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); /* Wait until queue initialization is complete */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) break; } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "flow-through queue init failed\n"); return (ENXIO); } /* * Summary of rings supported by the controller: * * Standard Receive Producer Ring * - This ring is used to feed receive buffers for "standard" * sized frames (typically 1536 bytes) to the controller. * * Jumbo Receive Producer Ring * - This ring is used to feed receive buffers for jumbo sized * frames (i.e. anything bigger than the "standard" frames) * to the controller. * * Mini Receive Producer Ring * - This ring is used to feed receive buffers for "mini" * sized frames to the controller. * - This feature required external memory for the controller * but was never used in a production system. Should always * be disabled. * * Receive Return Ring * - After the controller has placed an incoming frame into a * receive buffer that buffer is moved into a receive return * ring. The driver is then responsible to passing the * buffer up to the stack. Many versions of the controller * support multiple RR rings. * * Send Ring * - This ring is used for outgoing frames. Many versions of * the controller support multiple send rings. */ /* Initialize the standard receive producer ring control block. */ rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; rcb->bge_hostaddr.bge_addr_lo = BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); rcb->bge_hostaddr.bge_addr_hi = BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); if (BGE_IS_5717_PLUS(sc)) { /* * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) * Bits 15-2 : Maximum RX frame size * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled * Bit 0 : Reserved */ rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2); } else if (BGE_IS_5705_PLUS(sc)) { /* * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) * Bits 15-2 : Reserved (should be 0) * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled * Bit 0 : Reserved */ rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); } else { /* * Ring size is always XXX entries * Bits 31-16: Maximum RX frame size * Bits 15-2 : Reserved (should be 0) * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled * Bit 0 : Reserved */ rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); } if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; else rcb->bge_nicaddr = BGE_STD_RX_RINGS; /* Write the standard receive producer ring control block. */ CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); /* Reset the standard receive producer ring producer index. */ bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); /* * Initialize the jumbo RX producer ring control * block. We set the 'ring disabled' bit in the * flags field until we're actually ready to start * using this ring (i.e. once we set the MTU * high enough to require it). */ if (BGE_IS_JUMBO_CAPABLE(sc)) { rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; /* Get the jumbo receive producer ring RCB parameters. */ rcb->bge_hostaddr.bge_addr_lo = BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); rcb->bge_hostaddr.bge_addr_hi = BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREREAD); rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; else rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); /* Program the jumbo receive producer ring RCB parameters. */ CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); /* Reset the jumbo receive producer ring producer index. */ bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); } /* Disable the mini receive producer ring RCB. */ if (BGE_IS_5700_FAMILY(sc)) { rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); /* Reset the mini receive producer ring producer index. */ bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); } /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 || sc->bge_chipid == BGE_CHIPID_BCM5906_A1 || sc->bge_chipid == BGE_CHIPID_BCM5906_A2) CSR_WRITE_4(sc, BGE_ISO_PKT_TX, (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2); } /* * The BD ring replenish thresholds control how often the * hardware fetches new BD's from the producer rings in host * memory. Setting the value too low on a busy system can * starve the hardware and recue the throughpout. * * Set the BD ring replentish thresholds. The recommended * values are 1/8th the number of descriptors allocated to * each ring. * XXX The 5754 requires a lower threshold, so it might be a * requirement of all 575x family chips. The Linux driver sets * the lower threshold for all 5705 family chips as well, but there * are reports that it might not need to be so strict. * * XXX Linux does some extra fiddling here for the 5906 parts as * well. */ if (BGE_IS_5705_PLUS(sc)) val = 8; else val = BGE_STD_RX_RING_CNT / 8; CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); if (BGE_IS_JUMBO_CAPABLE(sc)) CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); if (BGE_IS_5717_PLUS(sc)) { CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32); CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16); } /* * Disable all send rings by setting the 'ring disabled' bit * in the flags field of all the TX send ring control blocks, * located in NIC memory. */ if (!BGE_IS_5705_PLUS(sc)) /* 5700 to 5704 had 16 send rings. */ limit = BGE_TX_RINGS_EXTSSRAM_MAX; else if (BGE_IS_57765_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5762) limit = 2; else if (BGE_IS_5717_PLUS(sc)) limit = 4; else limit = 1; vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; for (i = 0; i < limit; i++) { RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); vrcb += sizeof(struct bge_rcb); } /* Configure send ring RCB 0 (we use only the first ring) */ vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717); else RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); /* * Disable all receive return rings by setting the * 'ring diabled' bit in the flags field of all the receive * return ring control blocks, located in NIC memory. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) { /* Should be 17, use 16 until we get an SRAM map. */ limit = 16; } else if (!BGE_IS_5705_PLUS(sc)) limit = BGE_RX_RINGS_MAX; else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || sc->bge_asicrev == BGE_ASICREV_BCM5762 || BGE_IS_57765_PLUS(sc)) limit = 4; else limit = 1; /* Disable all receive return rings. */ vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; for (i = 0; i < limit; i++) { RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, BGE_RCB_FLAG_RING_DISABLED); RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); bge_writembx(sc, BGE_MBX_RX_CONS0_LO + (i * (sizeof(uint64_t))), 0); vrcb += sizeof(struct bge_rcb); } /* * Set up receive return ring 0. Note that the NIC address * for RX return rings is 0x0. The return rings live entirely * within the host, so the nicaddr field in the RCB isn't used. */ vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); /* Set random backoff seed for TX */ CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, (if_lladdr(sc->bge_ifp)[0] + if_lladdr(sc->bge_ifp)[1] + if_lladdr(sc->bge_ifp)[2] + if_lladdr(sc->bge_ifp)[3] + if_lladdr(sc->bge_ifp)[4] + if_lladdr(sc->bge_ifp)[5]) & BGE_TX_BACKOFF_SEED_MASK); /* Set inter-packet gap */ val = 0x2620; if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || sc->bge_asicrev == BGE_ASICREV_BCM5762) val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); CSR_WRITE_4(sc, BGE_TX_LENGTHS, val); /* * Specify which ring to use for packets that don't match * any RX rules. */ CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); /* * Configure number of RX lists. One interrupt distribution * list, sixteen active lists, one bad frames class. */ CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); /* Inialize RX list placement stats mask. */ CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); /* Disable host coalescing until we get it set up */ CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); /* Poll to make sure it's shut down. */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) break; } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "host coalescing engine failed to idle\n"); return (ENXIO); } /* Set up host coalescing defaults */ CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); if (!(BGE_IS_5705_PLUS(sc))) { CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); } CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1); CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1); /* Set up address of statistics block */ if (!(BGE_IS_5705_PLUS(sc))) { CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); } /* Set up address of status block */ CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); /* Set up status block size. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_C0) { val = BGE_STATBLKSZ_FULL; bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); } else { val = BGE_STATBLKSZ_32BYTE; bzero(sc->bge_ldata.bge_status_block, 32); } bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Turn on host coalescing state machine */ CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); /* Turn on RX BD completion state machine and enable attentions */ CSR_WRITE_4(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); /* Turn on RX list placement state machine */ CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); /* Turn on RX list selector state machine. */ if (!(BGE_IS_5705_PLUS(sc))) CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); /* Turn on DMA, clear stats. */ val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | BGE_MACMODE_FRMHDR_DMA_ENB; if (sc->bge_flags & BGE_FLAG_TBI) val |= BGE_PORTMODE_TBI; else if (sc->bge_flags & BGE_FLAG_MII_SERDES) val |= BGE_PORTMODE_GMII; else val |= BGE_PORTMODE_MII; /* Allow APE to send/receive frames. */ if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; CSR_WRITE_4(sc, BGE_MAC_MODE, val); DELAY(40); /* Set misc. local control, enable interrupts on attentions */ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); #ifdef notdef /* Assert GPIO pins for PHY reset */ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 | BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2); BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 | BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2); #endif /* Turn on DMA completion state machine */ if (!(BGE_IS_5705_PLUS(sc))) CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS; /* Enable host coalescing bug fix. */ if (BGE_IS_5755_PLUS(sc)) val |= BGE_WDMAMODE_STATUS_TAG_FIX; /* Request larger DMA burst size to get better performance. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5785) val |= BGE_WDMAMODE_BURST_ALL_DATA; /* Turn on write DMA state machine */ CSR_WRITE_4(sc, BGE_WDMA_MODE, val); DELAY(40); /* Turn on read DMA state machine */ val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; if (sc->bge_asicrev == BGE_ASICREV_BCM5717) val |= BGE_RDMAMODE_MULT_DMA_RD_DIS; if (sc->bge_asicrev == BGE_ASICREV_BCM5784 || sc->bge_asicrev == BGE_ASICREV_BCM5785 || sc->bge_asicrev == BGE_ASICREV_BCM57780) val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; if (sc->bge_flags & BGE_FLAG_PCIE) val |= BGE_RDMAMODE_FIFO_LONG_BURST; if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) { val |= BGE_RDMAMODE_TSO4_ENABLE; if (sc->bge_flags & BGE_FLAG_TSO3 || sc->bge_asicrev == BGE_ASICREV_BCM5785 || sc->bge_asicrev == BGE_ASICREV_BCM57780) val |= BGE_RDMAMODE_TSO6_ENABLE; } if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || sc->bge_asicrev == BGE_ASICREV_BCM5762) { val |= CSR_READ_4(sc, BGE_RDMA_MODE) & BGE_RDMAMODE_H2BNC_VLAN_DET; /* * Allow multiple outstanding read requests from * non-LSO read DMA engine. */ val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; } if (sc->bge_asicrev == BGE_ASICREV_BCM5761 || sc->bge_asicrev == BGE_ASICREV_BCM5784 || sc->bge_asicrev == BGE_ASICREV_BCM5785 || sc->bge_asicrev == BGE_ASICREV_BCM57780 || BGE_IS_5717_PLUS(sc) || BGE_IS_57765_PLUS(sc)) { if (sc->bge_asicrev == BGE_ASICREV_BCM5762) rdmareg = BGE_RDMA_RSRVCTRL_REG2; else rdmareg = BGE_RDMA_RSRVCTRL; dmactl = CSR_READ_4(sc, rdmareg); /* * Adjust tx margin to prevent TX data corruption and * fix internal FIFO overflow. */ if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 || sc->bge_asicrev == BGE_ASICREV_BCM5762) { dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | BGE_RDMA_RSRVCTRL_TXMRGN_MASK); dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K | BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K | BGE_RDMA_RSRVCTRL_TXMRGN_320B; } /* * Enable fix for read DMA FIFO overruns. * The fix is to limit the number of RX BDs * the hardware would fetch at a fime. */ CSR_WRITE_4(sc, rdmareg, dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); } if (sc->bge_asicrev == BGE_ASICREV_BCM5719) { CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); } else if (sc->bge_asicrev == BGE_ASICREV_BCM5720) { /* * Allow 4KB burst length reads for non-LSO frames. * Enable 512B burst length reads for buffer descriptors. */ CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); } else if (sc->bge_asicrev == BGE_ASICREV_BCM5762) { CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2, CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); } CSR_WRITE_4(sc, BGE_RDMA_MODE, val); DELAY(40); if (sc->bge_flags & BGE_FLAG_RDMA_BUG) { for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) { val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4); if ((val & 0xFFFF) > BGE_FRAMELEN) break; if (((val >> 16) & 0xFFFF) > BGE_FRAMELEN) break; } if (i != BGE_NUM_RDMA_CHANNELS / 2) { val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); if (sc->bge_asicrev == BGE_ASICREV_BCM5719) val |= BGE_RDMA_TX_LENGTH_WA_5719; else val |= BGE_RDMA_TX_LENGTH_WA_5720; CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); } } /* Turn on RX data completion state machine */ CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); /* Turn on RX BD initiator state machine */ CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); /* Turn on RX data and RX BD initiator state machine */ CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); /* Turn on Mbuf cluster free state machine */ if (!(BGE_IS_5705_PLUS(sc))) CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); /* Turn on send BD completion state machine */ CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); /* Turn on send data completion state machine */ val = BGE_SDCMODE_ENABLE; if (sc->bge_asicrev == BGE_ASICREV_BCM5761) val |= BGE_SDCMODE_CDELAY; CSR_WRITE_4(sc, BGE_SDC_MODE, val); /* Turn on send data initiator state machine */ if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | BGE_SDIMODE_HW_LSO_PRE_DMA); else CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); /* Turn on send BD initiator state machine */ CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); /* Turn on send BD selector state machine */ CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); /* ack/clear link change events */ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | BGE_MACSTAT_LINK_CHANGED); CSR_WRITE_4(sc, BGE_MI_STS, 0); /* * Enable attention when the link has changed state for * devices that use auto polling. */ if (sc->bge_flags & BGE_FLAG_TBI) { CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); } else { if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) { CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); DELAY(80); } if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_B2) CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT); } /* * Clear any pending link state attention. * Otherwise some link state change events may be lost until attention * is cleared by bge_intr() -> bge_link_upd() sequence. * It's not necessary on newer BCM chips - perhaps enabling link * state change attentions implies clearing pending attention. */ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | BGE_MACSTAT_LINK_CHANGED); /* Enable link state change attentions. */ BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); return (0); } static const struct bge_revision * bge_lookup_rev(uint32_t chipid) { const struct bge_revision *br; for (br = bge_revisions; br->br_name != NULL; br++) { if (br->br_chipid == chipid) return (br); } for (br = bge_majorrevs; br->br_name != NULL; br++) { if (br->br_chipid == BGE_ASICREV(chipid)) return (br); } return (NULL); } static const struct bge_vendor * bge_lookup_vendor(uint16_t vid) { const struct bge_vendor *v; for (v = bge_vendors; v->v_name != NULL; v++) if (v->v_id == vid) return (v); return (NULL); } static uint32_t bge_chipid(device_t dev) { uint32_t id; id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> BGE_PCIMISCCTL_ASICREV_SHIFT; if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) { /* * Find the ASCI revision. Different chips use different * registers. */ switch (pci_get_device(dev)) { case BCOM_DEVICEID_BCM5717: case BCOM_DEVICEID_BCM5718: case BCOM_DEVICEID_BCM5719: case BCOM_DEVICEID_BCM5720: case BCOM_DEVICEID_BCM5725: case BCOM_DEVICEID_BCM5727: case BCOM_DEVICEID_BCM5762: case BCOM_DEVICEID_BCM57764: case BCOM_DEVICEID_BCM57767: case BCOM_DEVICEID_BCM57787: id = pci_read_config(dev, BGE_PCI_GEN2_PRODID_ASICREV, 4); break; case BCOM_DEVICEID_BCM57761: case BCOM_DEVICEID_BCM57762: case BCOM_DEVICEID_BCM57765: case BCOM_DEVICEID_BCM57766: case BCOM_DEVICEID_BCM57781: case BCOM_DEVICEID_BCM57782: case BCOM_DEVICEID_BCM57785: case BCOM_DEVICEID_BCM57786: case BCOM_DEVICEID_BCM57791: case BCOM_DEVICEID_BCM57795: id = pci_read_config(dev, BGE_PCI_GEN15_PRODID_ASICREV, 4); break; default: id = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 4); } } return (id); } /* * Probe for a Broadcom chip. Check the PCI vendor and device IDs * against our list and return its name if we find a match. * * Note that since the Broadcom controller contains VPD support, we * try to get the device name string from the controller itself instead * of the compiled-in string. It guarantees we'll always announce the * right product name. We fall back to the compiled-in string when * VPD is unavailable or corrupt. */ static int bge_probe(device_t dev) { char buf[96]; char model[64]; const struct bge_revision *br; const char *pname; struct bge_softc *sc; const struct bge_type *t = bge_devs; const struct bge_vendor *v; uint32_t id; uint16_t did, vid; sc = device_get_softc(dev); sc->bge_dev = dev; vid = pci_get_vendor(dev); did = pci_get_device(dev); while(t->bge_vid != 0) { if ((vid == t->bge_vid) && (did == t->bge_did)) { id = bge_chipid(dev); br = bge_lookup_rev(id); if (bge_has_eaddr(sc) && pci_get_vpd_ident(dev, &pname) == 0) snprintf(model, sizeof(model), "%s", pname); else { v = bge_lookup_vendor(vid); snprintf(model, sizeof(model), "%s %s", v != NULL ? v->v_name : "Unknown", br != NULL ? br->br_name : "NetXtreme/NetLink Ethernet Controller"); } snprintf(buf, sizeof(buf), "%s, %sASIC rev. %#08x", model, br != NULL ? "" : "unknown ", id); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } static void bge_dma_free(struct bge_softc *sc) { int i; /* Destroy DMA maps for RX buffers. */ for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_std_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i]); } if (sc->bge_cdata.bge_rx_std_sparemap) bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_sparemap); /* Destroy DMA maps for jumbo RX buffers. */ for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i]); } if (sc->bge_cdata.bge_rx_jumbo_sparemap) bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_sparemap); /* Destroy DMA maps for TX buffers. */ for (i = 0; i < BGE_TX_RING_CNT; i++) { if (sc->bge_cdata.bge_tx_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[i]); } if (sc->bge_cdata.bge_rx_mtag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag); if (sc->bge_cdata.bge_mtag_jumbo) bus_dma_tag_destroy(sc->bge_cdata.bge_mtag_jumbo); if (sc->bge_cdata.bge_tx_mtag) bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag); /* Destroy standard RX ring. */ if (sc->bge_ldata.bge_rx_std_ring_paddr) bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map); if (sc->bge_ldata.bge_rx_std_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_ldata.bge_rx_std_ring, sc->bge_cdata.bge_rx_std_ring_map); if (sc->bge_cdata.bge_rx_std_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); /* Destroy jumbo RX ring. */ if (sc->bge_ldata.bge_rx_jumbo_ring_paddr) bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map); if (sc->bge_ldata.bge_rx_jumbo_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_ldata.bge_rx_jumbo_ring, sc->bge_cdata.bge_rx_jumbo_ring_map); if (sc->bge_cdata.bge_rx_jumbo_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); /* Destroy RX return ring. */ if (sc->bge_ldata.bge_rx_return_ring_paddr) bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map); if (sc->bge_ldata.bge_rx_return_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_ldata.bge_rx_return_ring, sc->bge_cdata.bge_rx_return_ring_map); if (sc->bge_cdata.bge_rx_return_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); /* Destroy TX ring. */ if (sc->bge_ldata.bge_tx_ring_paddr) bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map); if (sc->bge_ldata.bge_tx_ring) bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, sc->bge_ldata.bge_tx_ring, sc->bge_cdata.bge_tx_ring_map); if (sc->bge_cdata.bge_tx_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); /* Destroy status block. */ if (sc->bge_ldata.bge_status_block_paddr) bus_dmamap_unload(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map); if (sc->bge_ldata.bge_status_block) bus_dmamem_free(sc->bge_cdata.bge_status_tag, sc->bge_ldata.bge_status_block, sc->bge_cdata.bge_status_map); if (sc->bge_cdata.bge_status_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); /* Destroy statistics block. */ if (sc->bge_ldata.bge_stats_paddr) bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, sc->bge_cdata.bge_stats_map); if (sc->bge_ldata.bge_stats) bus_dmamem_free(sc->bge_cdata.bge_stats_tag, sc->bge_ldata.bge_stats, sc->bge_cdata.bge_stats_map); if (sc->bge_cdata.bge_stats_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); if (sc->bge_cdata.bge_buffer_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag); /* Destroy the parent tag. */ if (sc->bge_cdata.bge_parent_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); } static int bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment, bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr, const char *msg) { struct bge_dmamap_arg ctx; int error; error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, alignment, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag); if (error != 0) { device_printf(sc->bge_dev, "could not create %s dma tag\n", msg); return (ENOMEM); } /* Allocate DMA'able memory for ring. */ error = bus_dmamem_alloc(*tag, (void **)ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map); if (error != 0) { device_printf(sc->bge_dev, "could not allocate DMA'able memory for %s\n", msg); return (ENOMEM); } /* Load the address of the ring. */ ctx.bge_busaddr = 0; error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->bge_dev, "could not load DMA'able memory for %s\n", msg); return (ENOMEM); } *paddr = ctx.bge_busaddr; return (0); } static int bge_dma_alloc(struct bge_softc *sc) { bus_addr_t lowaddr; bus_size_t rxmaxsegsz, sbsz, txsegsz, txmaxsegsz; int i, error; lowaddr = BUS_SPACE_MAXADDR; if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0) lowaddr = BGE_DMA_MAXADDR; /* * Allocate the parent bus DMA tag appropriate for PCI. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag); if (error != 0) { device_printf(sc->bge_dev, "could not allocate parent dma tag\n"); return (ENOMEM); } /* Create tag for standard RX ring. */ error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ, &sc->bge_cdata.bge_rx_std_ring_tag, (uint8_t **)&sc->bge_ldata.bge_rx_std_ring, &sc->bge_cdata.bge_rx_std_ring_map, &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring"); if (error) return (error); /* Create tag for RX return ring. */ error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc), &sc->bge_cdata.bge_rx_return_ring_tag, (uint8_t **)&sc->bge_ldata.bge_rx_return_ring, &sc->bge_cdata.bge_rx_return_ring_map, &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring"); if (error) return (error); /* Create tag for TX ring. */ error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ, &sc->bge_cdata.bge_tx_ring_tag, (uint8_t **)&sc->bge_ldata.bge_tx_ring, &sc->bge_cdata.bge_tx_ring_map, &sc->bge_ldata.bge_tx_ring_paddr, "TX ring"); if (error) return (error); /* * Create tag for status block. * Because we only use single Tx/Rx/Rx return ring, use * minimum status block size except BCM5700 AX/BX which * seems to want to see full status block size regardless * of configured number of ring. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_C0) sbsz = BGE_STATUS_BLK_SZ; else sbsz = 32; error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz, &sc->bge_cdata.bge_status_tag, (uint8_t **)&sc->bge_ldata.bge_status_block, &sc->bge_cdata.bge_status_map, &sc->bge_ldata.bge_status_block_paddr, "status block"); if (error) return (error); /* Create tag for statistics block. */ error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ, &sc->bge_cdata.bge_stats_tag, (uint8_t **)&sc->bge_ldata.bge_stats, &sc->bge_cdata.bge_stats_map, &sc->bge_ldata.bge_stats_paddr, "statistics block"); if (error) return (error); /* Create tag for jumbo RX ring. */ if (BGE_IS_JUMBO_CAPABLE(sc)) { error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ, &sc->bge_cdata.bge_rx_jumbo_ring_tag, (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring, &sc->bge_cdata.bge_rx_jumbo_ring_map, &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring"); if (error) return (error); } /* Create parent tag for buffers. */ if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) { /* * XXX * watchdog timeout issue was observed on BCM5704 which * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge). * Both limiting DMA address space to 32bits and flushing * mailbox write seem to address the issue. */ if (sc->bge_pcixcap != 0) lowaddr = BUS_SPACE_MAXADDR_32BIT; } error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag); if (error != 0) { device_printf(sc->bge_dev, "could not allocate buffer dma tag\n"); return (ENOMEM); } /* Create tag for Tx mbufs. */ if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) { txsegsz = BGE_TSOSEG_SZ; txmaxsegsz = 65535 + sizeof(struct ether_vlan_header); } else { txsegsz = MCLBYTES; txmaxsegsz = MCLBYTES * BGE_NSEG_NEW; } error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_tx_mtag); if (error) { device_printf(sc->bge_dev, "could not allocate TX dma tag\n"); return (ENOMEM); } /* Create tag for Rx mbufs. */ if (sc->bge_flags & BGE_FLAG_JUMBO_STD) rxmaxsegsz = MJUM9BYTES; else rxmaxsegsz = MCLBYTES; error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1, rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag); if (error) { device_printf(sc->bge_dev, "could not allocate RX dma tag\n"); return (ENOMEM); } /* Create DMA maps for RX buffers. */ error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0, &sc->bge_cdata.bge_rx_std_sparemap); if (error) { device_printf(sc->bge_dev, "can't create spare DMA map for RX\n"); return (ENOMEM); } for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0, &sc->bge_cdata.bge_rx_std_dmamap[i]); if (error) { device_printf(sc->bge_dev, "can't create DMA map for RX\n"); return (ENOMEM); } } /* Create DMA maps for TX buffers. */ for (i = 0; i < BGE_TX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0, &sc->bge_cdata.bge_tx_dmamap[i]); if (error) { device_printf(sc->bge_dev, "can't create DMA map for TX\n"); return (ENOMEM); } } /* Create tags for jumbo RX buffers. */ if (BGE_IS_JUMBO_CAPABLE(sc)) { error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE, 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); if (error) { device_printf(sc->bge_dev, "could not allocate jumbo dma tag\n"); return (ENOMEM); } /* Create DMA maps for jumbo RX buffers. */ error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 0, &sc->bge_cdata.bge_rx_jumbo_sparemap); if (error) { device_printf(sc->bge_dev, "can't create spare DMA map for jumbo RX\n"); return (ENOMEM); } for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); if (error) { device_printf(sc->bge_dev, "can't create DMA map for jumbo RX\n"); return (ENOMEM); } } } return (0); } /* * Return true if this device has more than one port. */ static int bge_has_multiple_ports(struct bge_softc *sc) { device_t dev = sc->bge_dev; u_int b, d, f, fscan, s; d = pci_get_domain(dev); b = pci_get_bus(dev); s = pci_get_slot(dev); f = pci_get_function(dev); for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++) if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL) return (1); return (0); } /* * Return true if MSI can be used with this device. */ static int bge_can_use_msi(struct bge_softc *sc) { int can_use_msi = 0; if (sc->bge_msi == 0) return (0); /* Disable MSI for polling(4). */ #ifdef DEVICE_POLLING return (0); #endif switch (sc->bge_asicrev) { case BGE_ASICREV_BCM5714_A0: case BGE_ASICREV_BCM5714: /* * Apparently, MSI doesn't work when these chips are * configured in single-port mode. */ if (bge_has_multiple_ports(sc)) can_use_msi = 1; break; case BGE_ASICREV_BCM5750: if (sc->bge_chiprev != BGE_CHIPREV_5750_AX && sc->bge_chiprev != BGE_CHIPREV_5750_BX) can_use_msi = 1; break; default: if (BGE_IS_575X_PLUS(sc)) can_use_msi = 1; } return (can_use_msi); } static int bge_mbox_reorder(struct bge_softc *sc) { /* Lists of PCI bridges that are known to reorder mailbox writes. */ static const struct mbox_reorder { const uint16_t vendor; const uint16_t device; const char *desc; } mbox_reorder_lists[] = { { 0x1022, 0x7450, "AMD-8131 PCI-X Bridge" }, }; devclass_t pci, pcib; device_t bus, dev; int i; pci = devclass_find("pci"); pcib = devclass_find("pcib"); dev = sc->bge_dev; bus = device_get_parent(dev); for (;;) { dev = device_get_parent(bus); bus = device_get_parent(dev); if (device_get_devclass(dev) != pcib) break; for (i = 0; i < nitems(mbox_reorder_lists); i++) { if (pci_get_vendor(dev) == mbox_reorder_lists[i].vendor && pci_get_device(dev) == mbox_reorder_lists[i].device) { device_printf(sc->bge_dev, "enabling MBOX workaround for %s\n", mbox_reorder_lists[i].desc); return (1); } } if (device_get_devclass(bus) != pci) break; } return (0); } static void bge_devinfo(struct bge_softc *sc) { uint32_t cfg, clk; device_printf(sc->bge_dev, "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; ", sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev); if (sc->bge_flags & BGE_FLAG_PCIE) printf("PCI-E\n"); else if (sc->bge_flags & BGE_FLAG_PCIX) { printf("PCI-X "); cfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK; if (cfg == BGE_MISCCFG_BOARD_ID_5704CIOBE) clk = 133; else { clk = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F; switch (clk) { case 0: clk = 33; break; case 2: clk = 50; break; case 4: clk = 66; break; case 6: clk = 100; break; case 7: clk = 133; break; } } printf("%u MHz\n", clk); } else { if (sc->bge_pcixcap != 0) printf("PCI on PCI-X "); else printf("PCI "); cfg = pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4); if (cfg & BGE_PCISTATE_PCI_BUSSPEED) clk = 66; else clk = 33; if (cfg & BGE_PCISTATE_32BIT_BUS) printf("%u MHz; 32bit\n", clk); else printf("%u MHz; 64bit\n", clk); } } static int bge_attach(device_t dev) { struct if_attach_args ifat = { .ifat_version = IF_ATTACH_VERSION, .ifat_drv = &bge_ifdrv, .ifat_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST, .ifat_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM, }; struct bge_softc *sc; uint32_t hwcfg = 0, misccfg, pcistate; u_char eaddr[ETHER_ADDR_LEN]; int capmask, error, reg, rid, trys; sc = device_get_softc(dev); sc->bge_dev = dev; BGE_LOCK_INIT(sc, device_get_nameunit(dev)); TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc); callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0); pci_enable_busmaster(dev); /* * Allocate control/status registers. */ rid = PCIR_BAR(0); sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->bge_res == NULL) { device_printf (sc->bge_dev, "couldn't map BAR0 memory\n"); error = ENXIO; goto fail; } /* Save various chip information. */ sc->bge_func_addr = pci_get_function(dev); sc->bge_chipid = bge_chipid(dev); sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); /* Set default PHY address. */ sc->bge_phy_addr = 1; /* * PHY address mapping for various devices. * * | F0 Cu | F0 Sr | F1 Cu | F1 Sr | * ---------+-------+-------+-------+-------+ * BCM57XX | 1 | X | X | X | * BCM5704 | 1 | X | 1 | X | * BCM5717 | 1 | 8 | 2 | 9 | * BCM5719 | 1 | 8 | 2 | 9 | * BCM5720 | 1 | 8 | 2 | 9 | * * | F2 Cu | F2 Sr | F3 Cu | F3 Sr | * ---------+-------+-------+-------+-------+ * BCM57XX | X | X | X | X | * BCM5704 | X | X | X | X | * BCM5717 | X | X | X | X | * BCM5719 | 3 | 10 | 4 | 11 | * BCM5720 | X | X | X | X | * * Other addresses may respond but they are not * IEEE compliant PHYs and should be ignored. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) { if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) { if (CSR_READ_4(sc, BGE_SGDIG_STS) & BGE_SGDIGSTS_IS_SERDES) sc->bge_phy_addr = sc->bge_func_addr + 8; else sc->bge_phy_addr = sc->bge_func_addr + 1; } else { if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) & BGE_CPMU_PHY_STRAP_IS_SERDES) sc->bge_phy_addr = sc->bge_func_addr + 8; else sc->bge_phy_addr = sc->bge_func_addr + 1; } } if (bge_has_eaddr(sc)) sc->bge_flags |= BGE_FLAG_EADDR; /* Save chipset family. */ switch (sc->bge_asicrev) { case BGE_ASICREV_BCM5762: case BGE_ASICREV_BCM57765: case BGE_ASICREV_BCM57766: sc->bge_flags |= BGE_FLAG_57765_PLUS; /* FALLTHROUGH */ case BGE_ASICREV_BCM5717: case BGE_ASICREV_BCM5719: case BGE_ASICREV_BCM5720: sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO | BGE_FLAG_JUMBO_FRAME; if (sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) { /* * Enable work around for DMA engine miscalculation * of TXMBUF available space. */ sc->bge_flags |= BGE_FLAG_RDMA_BUG; if (sc->bge_asicrev == BGE_ASICREV_BCM5719 && sc->bge_chipid == BGE_CHIPID_BCM5719_A0) { /* Jumbo frame on BCM5719 A0 does not work. */ sc->bge_flags &= ~BGE_FLAG_JUMBO; } } break; case BGE_ASICREV_BCM5755: case BGE_ASICREV_BCM5761: case BGE_ASICREV_BCM5784: case BGE_ASICREV_BCM5785: case BGE_ASICREV_BCM5787: case BGE_ASICREV_BCM57780: sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS; break; case BGE_ASICREV_BCM5700: case BGE_ASICREV_BCM5701: case BGE_ASICREV_BCM5703: case BGE_ASICREV_BCM5704: sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO; break; case BGE_ASICREV_BCM5714_A0: case BGE_ASICREV_BCM5780: case BGE_ASICREV_BCM5714: sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD; /* FALLTHROUGH */ case BGE_ASICREV_BCM5750: case BGE_ASICREV_BCM5752: case BGE_ASICREV_BCM5906: sc->bge_flags |= BGE_FLAG_575X_PLUS; /* FALLTHROUGH */ case BGE_ASICREV_BCM5705: sc->bge_flags |= BGE_FLAG_5705_PLUS; break; } /* Identify chips with APE processor. */ switch (sc->bge_asicrev) { case BGE_ASICREV_BCM5717: case BGE_ASICREV_BCM5719: case BGE_ASICREV_BCM5720: case BGE_ASICREV_BCM5761: case BGE_ASICREV_BCM5762: sc->bge_flags |= BGE_FLAG_APE; break; } /* Chips with APE need BAR2 access for APE registers/memory. */ if ((sc->bge_flags & BGE_FLAG_APE) != 0) { rid = PCIR_BAR(2); sc->bge_res2 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->bge_res2 == NULL) { device_printf (sc->bge_dev, "couldn't map BAR2 memory\n"); error = ENXIO; goto fail; } /* Enable APE register/memory access by host driver. */ pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); pcistate |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | BGE_PCISTATE_ALLOW_APE_SHMEM_WR | BGE_PCISTATE_ALLOW_APE_PSPACE_WR; pci_write_config(dev, BGE_PCI_PCISTATE, pcistate, 4); bge_ape_lock_init(sc); bge_ape_read_fw_ver(sc); } /* Add SYSCTLs, requires the chipset family to be set. */ bge_add_sysctls(sc); /* Identify the chips that use an CPMU. */ if (BGE_IS_5717_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5784 || sc->bge_asicrev == BGE_ASICREV_BCM5761 || sc->bge_asicrev == BGE_ASICREV_BCM5785 || sc->bge_asicrev == BGE_ASICREV_BCM57780) sc->bge_flags |= BGE_FLAG_CPMU_PRESENT; if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0) sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST; else sc->bge_mi_mode = BGE_MIMODE_BASE; /* Enable auto polling for BCM570[0-5]. */ if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705) sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL; /* * All Broadcom controllers have 4GB boundary DMA bug. * Whenever an address crosses a multiple of the 4GB boundary * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA * state machine will lockup and cause the device to hang. */ sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG; /* BCM5755 or higher and BCM5906 have short DMA bug. */ if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906) sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG; /* * BCM5719 cannot handle DMA requests for DMA segments that * have larger than 4KB in size. However the maximum DMA * segment size created in DMA tag is 4KB for TSO, so we * wouldn't encounter the issue here. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5719) sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG; misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK; if (sc->bge_asicrev == BGE_ASICREV_BCM5705) { if (misccfg == BGE_MISCCFG_BOARD_ID_5788 || misccfg == BGE_MISCCFG_BOARD_ID_5788M) sc->bge_flags |= BGE_FLAG_5788; } capmask = BMSR_DEFCAPMASK; if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 && (misccfg == 0x4000 || misccfg == 0x8000)) || (sc->bge_asicrev == BGE_ASICREV_BCM5705 && pci_get_vendor(dev) == BCOM_VENDORID && (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 || pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 || pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) || (pci_get_vendor(dev) == BCOM_VENDORID && (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F || pci_get_device(dev) == BCOM_DEVICEID_BCM5753F || pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) || pci_get_device(dev) == BCOM_DEVICEID_BCM57790 || pci_get_device(dev) == BCOM_DEVICEID_BCM57791 || pci_get_device(dev) == BCOM_DEVICEID_BCM57795 || sc->bge_asicrev == BGE_ASICREV_BCM5906) { /* These chips are 10/100 only. */ capmask &= ~BMSR_EXTSTAT; sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED; } /* * Some controllers seem to require a special firmware to use * TSO. But the firmware is not available to FreeBSD and Linux * claims that the TSO performed by the firmware is slower than * hardware based TSO. Moreover the firmware based TSO has one * known bug which can't handle TSO if Ethernet header + IP/TCP * header is greater than 80 bytes. A workaround for the TSO * bug exist but it seems it's too expensive than not using * TSO at all. Some hardwares also have the TSO bug so limit * the TSO to the controllers that are not affected TSO issues * (e.g. 5755 or higher). */ if (BGE_IS_5717_PLUS(sc)) { /* BCM5717 requires different TSO configuration. */ sc->bge_flags |= BGE_FLAG_TSO3; if (sc->bge_asicrev == BGE_ASICREV_BCM5719 && sc->bge_chipid == BGE_CHIPID_BCM5719_A0) { /* TSO on BCM5719 A0 does not work. */ sc->bge_flags &= ~BGE_FLAG_TSO3; } } else if (BGE_IS_5755_PLUS(sc)) { /* * BCM5754 and BCM5787 shares the same ASIC id so * explicit device id check is required. * Due to unknown reason TSO does not work on BCM5755M. */ if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 && pci_get_device(dev) != BCOM_DEVICEID_BCM5754M && pci_get_device(dev) != BCOM_DEVICEID_BCM5755M) sc->bge_flags |= BGE_FLAG_TSO; } /* * Check if this is a PCI-X or PCI Express device. */ if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { /* * Found a PCI Express capabilities register, this * must be a PCI Express device. */ sc->bge_flags |= BGE_FLAG_PCIE; sc->bge_expcap = reg; /* Extract supported maximum payload size. */ sc->bge_mps = pci_read_config(dev, sc->bge_expcap + PCIER_DEVICE_CAP, 2); sc->bge_mps = 128 << (sc->bge_mps & PCIEM_CAP_MAX_PAYLOAD); if (sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) sc->bge_expmrq = 2048; else sc->bge_expmrq = 4096; pci_set_max_read_req(dev, sc->bge_expmrq); } else { /* * Check if the device is in PCI-X Mode. * (This bit is not valid on PCI Express controllers.) */ if (pci_find_cap(dev, PCIY_PCIX, ®) == 0) sc->bge_pcixcap = reg; if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) & BGE_PCISTATE_PCI_BUSMODE) == 0) sc->bge_flags |= BGE_FLAG_PCIX; } /* * The 40bit DMA bug applies to the 5714/5715 controllers and is * not actually a MAC controller bug but an issue with the embedded * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround. */ if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX)) sc->bge_flags |= BGE_FLAG_40BIT_BUG; /* * Some PCI-X bridges are known to trigger write reordering to * the mailbox registers. Typical phenomena is watchdog timeouts * caused by out-of-order TX completions. Enable workaround for * PCI-X devices that live behind these bridges. * Note, PCI-X controllers can run in PCI mode so we can't use * BGE_FLAG_PCIX flag to detect PCI-X controllers. */ if (sc->bge_pcixcap != 0 && bge_mbox_reorder(sc) != 0) sc->bge_flags |= BGE_FLAG_MBOX_REORDER; /* * Allocate the interrupt, using MSI if possible. These devices * support 8 MSI messages, but only the first one is used in * normal operation. */ rid = 0; if (pci_find_cap(sc->bge_dev, PCIY_MSI, ®) == 0) { sc->bge_msicap = reg; reg = 1; if (bge_can_use_msi(sc) && pci_alloc_msi(dev, ®) == 0) { rid = 1; sc->bge_flags |= BGE_FLAG_MSI; } } /* * All controllers except BCM5700 supports tagged status but * we use tagged status only for MSI case on BCM5717. Otherwise * MSI on BCM5717 does not work. */ #ifndef DEVICE_POLLING if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc)) sc->bge_flags |= BGE_FLAG_TAGGED_STATUS; #endif sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | (rid != 0 ? 0 : RF_SHAREABLE)); if (sc->bge_irq == NULL) { device_printf(sc->bge_dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } bge_devinfo(sc); sc->bge_asf_mode = 0; /* No ASF if APE present. */ if ((sc->bge_flags & BGE_FLAG_APE) == 0) { if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC)) { if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) & BGE_HWCFG_ASF) { sc->bge_asf_mode |= ASF_ENABLE; sc->bge_asf_mode |= ASF_STACKUP; if (BGE_IS_575X_PLUS(sc)) sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; } } } bge_stop_fw(sc); bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); if (bge_reset(sc)) { device_printf(sc->bge_dev, "chip reset failed\n"); error = ENXIO; goto fail; } bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); if (bge_chipinit(sc)) { device_printf(sc->bge_dev, "chip initialization failed\n"); error = ENXIO; goto fail; } error = bge_get_eaddr(sc, eaddr); if (error) { device_printf(sc->bge_dev, "failed to read station address\n"); error = ENXIO; goto fail; } /* 5705 limits RX return ring to 512 entries. */ if (BGE_IS_5717_PLUS(sc)) sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; else if (BGE_IS_5705_PLUS(sc)) sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; else sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; if (bge_dma_alloc(sc)) { device_printf(sc->bge_dev, "failed to allocate DMA resources\n"); error = ENXIO; goto fail; } /* Set default tuneable values. */ sc->bge_stat_ticks = BGE_TICKS_PER_SEC; sc->bge_rx_coal_ticks = 150; sc->bge_tx_coal_ticks = 150; sc->bge_rx_max_coal_bds = 10; sc->bge_tx_max_coal_bds = 10; /* Initialize checksum features to use. */ - sc->bge_csum_features = BGE_CSUM_FEATURES; + sc->bge_hwassist = (CSUM_IP | CSUM_TCP); if (sc->bge_forced_udpcsum != 0) - sc->bge_csum_features |= CSUM_UDP; + sc->bge_hwassist |= CSUM_UDP; /* * Figure out what sort of media we have by checking the * hardware config word in the first 32k of NIC internal memory, * or fall back to examining the EEPROM if necessary. * Note: on some BCM5700 cards, this value appears to be unset. * If that's the case, we have to rely on identifying the NIC * by its PCI subsystem ID, as we do below for the SysKonnect * SK-9D41. */ if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC) hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG); else if ((sc->bge_flags & BGE_FLAG_EADDR) && (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, sizeof(hwcfg))) { device_printf(sc->bge_dev, "failed to read EEPROM\n"); error = ENXIO; goto fail; } hwcfg = ntohl(hwcfg); } /* The SysKonnect SK-9D41 is a 1000baseSX card. */ if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) { if (BGE_IS_5705_PLUS(sc)) { sc->bge_flags |= BGE_FLAG_MII_SERDES; sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED; } else sc->bge_flags |= BGE_FLAG_TBI; } /* Set various PHY bug flags. */ if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || sc->bge_chipid == BGE_CHIPID_BCM5701_B0) sc->bge_phy_flags |= BGE_PHY_CRC_BUG; if (sc->bge_chiprev == BGE_CHIPREV_5703_AX || sc->bge_chiprev == BGE_CHIPREV_5704_AX) sc->bge_phy_flags |= BGE_PHY_ADC_BUG; if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG; if (pci_get_subvendor(dev) == DELL_VENDORID) sc->bge_phy_flags |= BGE_PHY_NO_3LED; if ((BGE_IS_5705_PLUS(sc)) && sc->bge_asicrev != BGE_ASICREV_BCM5906 && sc->bge_asicrev != BGE_ASICREV_BCM5785 && sc->bge_asicrev != BGE_ASICREV_BCM57780 && !BGE_IS_5717_PLUS(sc)) { if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || sc->bge_asicrev == BGE_ASICREV_BCM5761 || sc->bge_asicrev == BGE_ASICREV_BCM5784 || sc->bge_asicrev == BGE_ASICREV_BCM5787) { if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 && pci_get_device(dev) != BCOM_DEVICEID_BCM5756) sc->bge_phy_flags |= BGE_PHY_JITTER_BUG; if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M) sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM; } else sc->bge_phy_flags |= BGE_PHY_BER_BUG; } /* * Don't enable Ethernet@WireSpeed for the 5700 or the * 5705 A0 and A1 chips. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || (sc->bge_asicrev == BGE_ASICREV_BCM5705 && (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && sc->bge_chipid != BGE_CHIPID_BCM5705_A1))) sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED; if (sc->bge_flags & BGE_FLAG_TBI) { ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, bge_ifmedia_sts); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; } else { /* * Do transceiver setup and tell the firmware the * driver is down so we can try to get access the * probe if ASF is running. Retry a couple of times * if we get a conflict with the ASF firmware accessing * the PHY. */ trys = 0; BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); again: bge_asf_driver_up(sc); error = mii_attach(dev, &sc->bge_miibus, (ifm_change_cb_t)bge_ifmedia_upd, (ifm_stat_cb_t)bge_ifmedia_sts, capmask, sc->bge_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); if (error != 0) { if (trys++ < 4) { device_printf(sc->bge_dev, "Try again\n"); bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr, MII_BMCR, BMCR_RESET); goto again; } device_printf(sc->bge_dev, "attaching PHYs failed\n"); goto fail; } /* * Now tell the firmware we are going up after probing the PHY */ if (sc->bge_asf_mode & ASF_STACKUP) BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); } /* * When using the BCM5701 in PCI-X mode, data corruption has * been observed in the first few bytes of some received packets. * Aligning the packet buffer in memory eliminates the corruption. * Unfortunately, this misaligns the packet payloads. On platforms * which do not support unaligned accesses, we will realign the * payloads by copying the received packets. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && sc->bge_flags & BGE_FLAG_PCIX) sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; /* * Hookup IRQ last. */ if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) { /* Take advantage of single-shot MSI. */ CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) & ~BGE_MSIMODE_ONE_SHOT_DISABLE); sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->bge_tq); if (sc->bge_tq == NULL) { device_printf(dev, "could not create taskqueue.\n"); error = ENOMEM; goto fail; } error = taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->bge_dev)); if (error != 0) { device_printf(dev, "could not start threads.\n"); goto fail; } error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc, &sc->bge_intrhand); } else error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc, &sc->bge_intrhand); if (error) { device_printf(sc->bge_dev, "couldn't set up irq\n"); goto fail; } /* Attach interface. */ ifat.ifat_softc = sc; ifat.ifat_dunit = device_get_unit(dev); ifat.ifat_lla = eaddr; - ifat.ifat_hwassist = sc->bge_csum_features; + ifat.ifat_hwassist = sc->bge_hwassist; if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) { ifat.ifat_hwassist |= CSUM_TSO; ifat.ifat_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO; } ifat.ifat_capenable = ifat.ifat_capabilities; #ifdef DEVICE_POLLING ifat.ifat_capabilities |= IFCAP_POLLING; #endif /* * 5700 B0 chips do not support checksumming correctly due * to hardware bugs. */ if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { ifat.ifat_capabilities &= ~IFCAP_HWCSUM; ifat.ifat_capenable &= ~IFCAP_HWCSUM; ifat.ifat_hwassist = 0; } - + sc->bge_capenable = ifat.ifat_capenable; + sc->bge_mtu = ETHERMTU; sc->bge_ifp = if_attach(&ifat); return (0); fail: bge_detach(dev); return (error); } static int bge_detach(device_t dev) { struct bge_softc *sc; if_t ifp; sc = device_get_softc(dev); ifp = sc->bge_ifp; #ifdef DEVICE_POLLING - if (if_get(ifp, IF_CAPENABLE) & IFCAP_POLLING) + if (sc->bge_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif if (device_is_attached(dev)) { if_detach(ifp); BGE_LOCK(sc); bge_stop(sc); BGE_UNLOCK(sc); callout_drain(&sc->bge_stat_ch); } if (sc->bge_tq) taskqueue_drain(sc->bge_tq, &sc->bge_intr_task); if (sc->bge_flags & BGE_FLAG_TBI) ifmedia_removeall(&sc->bge_ifmedia); else if (sc->bge_miibus != NULL) { bus_generic_detach(dev); device_delete_child(dev, sc->bge_miibus); } bge_release_resources(sc); return (0); } static void bge_release_resources(struct bge_softc *sc) { device_t dev; dev = sc->bge_dev; if (sc->bge_tq != NULL) taskqueue_free(sc->bge_tq); if (sc->bge_intrhand != NULL) bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); if (sc->bge_irq != NULL) { bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->bge_irq), sc->bge_irq); pci_release_msi(dev); } if (sc->bge_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->bge_res), sc->bge_res); if (sc->bge_res2 != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->bge_res2), sc->bge_res2); bge_dma_free(sc); if (mtx_initialized(&sc->bge_mtx)) /* XXX */ BGE_LOCK_DESTROY(sc); } static int bge_reset(struct bge_softc *sc) { device_t dev; uint32_t cachesize, command, mac_mode, mac_mode_mask, reset, val; void (*write_op)(struct bge_softc *, int, int); uint16_t devctl; int i; dev = sc->bge_dev; mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE; if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask; if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { if (sc->bge_flags & BGE_FLAG_PCIE) write_op = bge_writemem_direct; else write_op = bge_writemem_ind; } else write_op = bge_writereg_ind; if (sc->bge_asicrev != BGE_ASICREV_BCM5700 && sc->bge_asicrev != BGE_ASICREV_BCM5701) { CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); for (i = 0; i < 8000; i++) { if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) break; DELAY(20); } if (i == 8000) { if (bootverbose) device_printf(dev, "NVRAM lock timedout!\n"); } } /* Take APE lock when performing reset. */ bge_ape_lock(sc, BGE_APE_LOCK_GRC); /* Save some important PCI state. */ cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); command = pci_read_config(dev, BGE_PCI_CMD, 4); pci_write_config(dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); /* Disable fastboot on controllers that support it. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5752 || BGE_IS_5755_PLUS(sc)) { if (bootverbose) device_printf(dev, "Disabling fastboot\n"); CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); } /* * Write the magic number to SRAM at offset 0xB50. * When firmware finishes its initialization it will * write ~BGE_SRAM_FW_MB_MAGIC to the same location. */ bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; /* XXX: Broadcom Linux driver. */ if (sc->bge_flags & BGE_FLAG_PCIE) { if (sc->bge_asicrev != BGE_ASICREV_BCM5785 && (sc->bge_flags & BGE_FLAG_5717_PLUS) == 0) { if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */ CSR_WRITE_4(sc, 0x7E2C, 0x20); } if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { /* Prevent PCIE link training during global reset */ CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); reset |= 1 << 29; } } if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { val = CSR_READ_4(sc, BGE_VCPU_STATUS); CSR_WRITE_4(sc, BGE_VCPU_STATUS, val | BGE_VCPU_STATUS_DRV_RESET); val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, val & ~BGE_VCPU_EXT_CTRL_HALT_CPU); } /* * Set GPHY Power Down Override to leave GPHY * powered up in D0 uninitialized. */ if (BGE_IS_5705_PLUS(sc) && (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0) reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; /* Issue global reset */ write_op(sc, BGE_MISC_CFG, reset); if (sc->bge_flags & BGE_FLAG_PCIE) DELAY(100 * 1000); else DELAY(1000); /* XXX: Broadcom Linux driver. */ if (sc->bge_flags & BGE_FLAG_PCIE) { if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { DELAY(500000); /* wait for link training to complete */ val = pci_read_config(dev, 0xC4, 4); pci_write_config(dev, 0xC4, val | (1 << 15), 4); } devctl = pci_read_config(dev, sc->bge_expcap + PCIER_DEVICE_CTL, 2); /* Clear enable no snoop and disable relaxed ordering. */ devctl &= ~(PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE); pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_CTL, devctl, 2); pci_set_max_read_req(dev, sc->bge_expmrq); /* Clear error status. */ pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_STA, PCIEM_STA_CORRECTABLE_ERROR | PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR | PCIEM_STA_UNSUPPORTED_REQ, 2); } /* Reset some of the PCI state that got zapped by reset. */ pci_write_config(dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE; if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 && (sc->bge_flags & BGE_FLAG_PCIX) != 0) val |= BGE_PCISTATE_RETRY_SAME_DMA; if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | BGE_PCISTATE_ALLOW_APE_SHMEM_WR | BGE_PCISTATE_ALLOW_APE_PSPACE_WR; pci_write_config(dev, BGE_PCI_PCISTATE, val, 4); pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); pci_write_config(dev, BGE_PCI_CMD, command, 4); /* * Disable PCI-X relaxed ordering to ensure status block update * comes first then packet buffer DMA. Otherwise driver may * read stale status block. */ if (sc->bge_flags & BGE_FLAG_PCIX) { devctl = pci_read_config(dev, sc->bge_pcixcap + PCIXR_COMMAND, 2); devctl &= ~PCIXM_COMMAND_ERO; if (sc->bge_asicrev == BGE_ASICREV_BCM5703) { devctl &= ~PCIXM_COMMAND_MAX_READ; devctl |= PCIXM_COMMAND_MAX_READ_2048; } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { devctl &= ~(PCIXM_COMMAND_MAX_SPLITS | PCIXM_COMMAND_MAX_READ); devctl |= PCIXM_COMMAND_MAX_READ_2048; } pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND, devctl, 2); } /* Re-enable MSI, if necessary, and enable the memory arbiter. */ if (BGE_IS_5714_FAMILY(sc)) { /* This chip disables MSI on reset. */ if (sc->bge_flags & BGE_FLAG_MSI) { val = pci_read_config(dev, sc->bge_msicap + PCIR_MSI_CTRL, 2); pci_write_config(dev, sc->bge_msicap + PCIR_MSI_CTRL, val | PCIM_MSICTRL_MSI_ENABLE, 2); val = CSR_READ_4(sc, BGE_MSI_MODE); CSR_WRITE_4(sc, BGE_MSI_MODE, val | BGE_MSIMODE_ENABLE); } val = CSR_READ_4(sc, BGE_MARB_MODE); CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); } else CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); /* Fix up byte swapping. */ CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc)); val = CSR_READ_4(sc, BGE_MAC_MODE); val = (val & ~mac_mode_mask) | mac_mode; CSR_WRITE_4(sc, BGE_MAC_MODE, val); DELAY(40); bge_ape_unlock(sc, BGE_APE_LOCK_GRC); if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { for (i = 0; i < BGE_TIMEOUT; i++) { val = CSR_READ_4(sc, BGE_VCPU_STATUS); if (val & BGE_VCPU_STATUS_INIT_DONE) break; DELAY(100); } if (i == BGE_TIMEOUT) { device_printf(dev, "reset timed out\n"); return (1); } } else { /* * Poll until we see the 1's complement of the magic number. * This indicates that the firmware initialization is complete. * We expect this to fail if no chip containing the Ethernet * address is fitted though. */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); val = bge_readmem_ind(sc, BGE_SRAM_FW_MB); if (val == ~BGE_SRAM_FW_MB_MAGIC) break; } if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT) device_printf(dev, "firmware handshake timed out, found 0x%08x\n", val); /* BCM57765 A0 needs additional time before accessing. */ if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) DELAY(10 * 1000); /* XXX */ } /* * The 5704 in TBI mode apparently needs some special * adjustment to insure the SERDES drive level is set * to 1.2V. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_flags & BGE_FLAG_TBI) { val = CSR_READ_4(sc, BGE_SERDES_CFG); val = (val & ~0xFFF) | 0x880; CSR_WRITE_4(sc, BGE_SERDES_CFG, val); } /* XXX: Broadcom Linux driver. */ if (sc->bge_flags & BGE_FLAG_PCIE && !BGE_IS_5717_PLUS(sc) && sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && sc->bge_asicrev != BGE_ASICREV_BCM5785) { /* Enable Data FIFO protection. */ val = CSR_READ_4(sc, 0x7C00); CSR_WRITE_4(sc, 0x7C00, val | (1 << 25)); } if (sc->bge_asicrev == BGE_ASICREV_BCM5720) BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN); return (0); } static __inline void bge_rxreuse_std(struct bge_softc *sc, int i) { struct bge_rx_bd *r; r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std]; r->bge_flags = BGE_RXBDFLAG_END; r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i]; r->bge_idx = i; BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); } static __inline void bge_rxreuse_jumbo(struct bge_softc *sc, int i) { struct bge_extrx_bd *r; r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo]; r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0]; r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1]; r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2]; r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3]; r->bge_idx = i; BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); } /* * Frame reception handling. This is called if there's a frame * on the receive return list. * * Note: we have to be able to handle two possibilities here: * 1) the frame is from the jumbo receive ring * 2) the frame is from the standard receive ring */ static int bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck) { if_t ifp; int rx_npkts = 0, stdcnt = 0, jumbocnt = 0; uint16_t rx_cons; rx_cons = sc->bge_rx_saved_considx; /* Nothing to do. */ if (rx_cons == rx_prod) return (rx_npkts); ifp = sc->bge_ifp; bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE); if (BGE_IS_JUMBO_CAPABLE(sc) && - if_get(ifp, IF_MTU) + ETHER_HDR_LEN + ETHER_CRC_LEN + + sc->bge_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN)) bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE); while (rx_cons != rx_prod) { struct bge_rx_bd *cur_rx; uint32_t rxidx; struct mbuf *m = NULL; uint16_t vlan_tag = 0; int have_tag = 0; #ifdef DEVICE_POLLING - if (if_get(ifp, IF_CAPENABLE) & IFCAP_POLLING) { + if (sc->bge_capenable & IFCAP_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons]; rxidx = cur_rx->bge_idx; BGE_INC(rx_cons, sc->bge_return_ring_cnt); - if (if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWTAGGING && + if (sc->bge_capenable & IFCAP_VLAN_HWTAGGING && cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { have_tag = 1; vlan_tag = cur_rx->bge_vlan_tag; } if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { jumbocnt++; m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { bge_rxreuse_jumbo(sc, rxidx); continue; } if (bge_newbuf_jumbo(sc, rxidx) != 0) { bge_rxreuse_jumbo(sc, rxidx); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); continue; } BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); } else { stdcnt++; m = sc->bge_cdata.bge_rx_std_chain[rxidx]; if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { bge_rxreuse_std(sc, rxidx); continue; } if (bge_newbuf_std(sc, rxidx) != 0) { bge_rxreuse_std(sc, rxidx); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); continue; } BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); #ifndef __NO_STRICT_ALIGNMENT /* * For architectures with strict alignment we must make sure * the payload is aligned. */ if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { bcopy(m->m_data, m->m_data + ETHER_ALIGN, cur_rx->bge_len); m->m_data += ETHER_ALIGN; } #endif m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; m->m_pkthdr.rcvif = ifp; - if (if_get(ifp, IF_CAPENABLE) & IFCAP_RXCSUM) + if (sc->bge_capenable & IFCAP_RXCSUM) bge_rxcsum(sc, cur_rx, m); /* * If we received a packet with a vlan tag, * attach that information to the packet. */ if (have_tag) { m->m_pkthdr.ether_vtag = vlan_tag; m->m_flags |= M_VLANTAG; } if (holdlck != 0) { BGE_UNLOCK(sc); if_input(ifp, m); BGE_LOCK(sc); } else if_input(ifp, m); rx_npkts++; if (!(sc->bge_flags & BGE_FLAG_RUNNING)) return (rx_npkts); } bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD); if (stdcnt > 0) bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); if (jumbocnt > 0) bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); sc->bge_rx_saved_considx = rx_cons; bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); if (stdcnt) bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std + BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT); if (jumbocnt) bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo + BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT); #ifdef notyet /* * This register wraps very quickly under heavy packet drops. * If you need correct statistics, you can enable this check. */ if (BGE_IS_5705_PLUS(sc)) if_incierrors(ifp, CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS)); #endif return (rx_npkts); } static void bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m) { if (BGE_IS_5717_PLUS(sc)) { if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if ((cur_rx->bge_error_flag & BGE_RXERRFLAG_IP_CSUM_NOK) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { m->m_pkthdr.csum_data = cur_rx->bge_tcp_udp_csum; m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; } } } else { if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && m->m_pkthdr.len >= ETHER_MIN_NOPAD) { m->m_pkthdr.csum_data = cur_rx->bge_tcp_udp_csum; m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; } } } static void bge_txeof(struct bge_softc *sc, uint16_t tx_cons) { struct bge_tx_bd *cur_tx; if_t ifp; BGE_LOCK_ASSERT(sc); /* Nothing to do. */ if (sc->bge_tx_saved_considx == tx_cons) return; ifp = sc->bge_ifp; bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE); /* * Go through our tx ring and free mbufs for those * frames that have been sent. */ while (sc->bge_tx_saved_considx != tx_cons) { uint32_t idx; idx = sc->bge_tx_saved_considx; cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; if (cur_tx->bge_flags & BGE_TXBDFLAG_END) if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { struct mbuf *m; bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[idx], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[idx]); m = sc->bge_cdata.bge_tx_chain[idx]; sc->bge_cdata.bge_tx_chain[idx] = NULL; if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); m_freem(m); } sc->bge_txcnt--; BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); } if (sc->bge_txcnt == 0) sc->bge_timer = 0; } #ifdef DEVICE_POLLING static int bge_poll(if_t ifp, enum poll_cmd cmd, int count) { struct bge_softc *sc = if_getsoftc(ifp, IF_DRIVER_SOFTC); uint16_t rx_prod, tx_cons; uint32_t statusword; int rx_npkts = 0; BGE_LOCK(sc); if (!(sc->bge_flags & BGE_FLAG_RUNNING)) { BGE_UNLOCK(sc); return (rx_npkts); } bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* Fetch updates from the status block. */ rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; statusword = sc->bge_ldata.bge_status_block->bge_status; /* Clear the status so the next pass only sees the changes. */ sc->bge_ldata.bge_status_block->bge_status = 0; bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */ if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) sc->bge_link_evt++; if (cmd == POLL_AND_CHECK_STATUS) if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI)) bge_link_upd(sc); sc->rxcycles = count; rx_npkts = bge_rxeof(sc, rx_prod, 1); if (!(sc->bge_flags & BGE_FLAG_RUNNING)) { BGE_UNLOCK(sc); return (rx_npkts); } bge_txeof(sc, tx_cons); if (if_snd_len(ifp)) bge_start_locked(sc); BGE_UNLOCK(sc); return (rx_npkts); } #endif /* DEVICE_POLLING */ static int bge_msi_intr(void *arg) { struct bge_softc *sc; sc = (struct bge_softc *)arg; /* * This interrupt is not shared and controller already * disabled further interrupt. */ taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task); return (FILTER_HANDLED); } static void bge_intr_task(void *arg, int pending) { struct bge_softc *sc; if_t ifp; uint32_t status, status_tag; uint16_t rx_prod, tx_cons; sc = (struct bge_softc *)arg; ifp = sc->bge_ifp; BGE_LOCK(sc); if ((sc->bge_flags & BGE_FLAG_RUNNING) == 0) { BGE_UNLOCK(sc); return; } /* Get updated status block. */ bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* Save producer/consumer indices. */ rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; status = sc->bge_ldata.bge_status_block->bge_status; status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24; /* Dirty the status flag. */ sc->bge_ldata.bge_status_block->bge_status = 0; bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0) status_tag = 0; if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0) bge_link_upd(sc); /* Let controller work. */ bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag); if (sc->bge_flags & BGE_FLAG_RUNNING && sc->bge_rx_saved_considx != rx_prod) { /* Check RX return ring producer/consumer. */ BGE_UNLOCK(sc); bge_rxeof(sc, rx_prod, 0); BGE_LOCK(sc); } if (sc->bge_flags & BGE_FLAG_RUNNING) { /* Check TX ring producer/consumer. */ bge_txeof(sc, tx_cons); if (if_snd_len(ifp)) bge_start_locked(sc); } BGE_UNLOCK(sc); } static void bge_intr(void *xsc) { struct bge_softc *sc; if_t ifp; uint32_t statusword; uint16_t rx_prod, tx_cons; sc = xsc; BGE_LOCK(sc); ifp = sc->bge_ifp; #ifdef DEVICE_POLLING - if (if_get(ifp, IF_CAPENABLE) & IFCAP_POLLING) { + if (sc->bge_capenable & IFCAP_POLLING) { BGE_UNLOCK(sc); return; } #endif /* * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't * disable interrupts by writing nonzero like we used to, since with * our current organization this just gives complications and * pessimizations for re-enabling interrupts. We used to have races * instead of the necessary complications. Disabling interrupts * would just reduce the chance of a status update while we are * running (by switching to the interrupt-mode coalescence * parameters), but this chance is already very low so it is more * efficient to get another interrupt than prevent it. * * We do the ack first to ensure another interrupt if there is a * status update after the ack. We don't check for the status * changing later because it is more efficient to get another * interrupt than prevent it, not quite as above (not checking is * a smaller optimization than not toggling the interrupt enable, * since checking doesn't involve PCI accesses and toggling require * the status check). So toggling would probably be a pessimization * even with MSI. It would only be needed for using a task queue. */ bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); /* * Do the mandatory PCI flush as well as get the link status. */ statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED; /* Make sure the descriptor ring indexes are coherent. */ bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; sc->bge_ldata.bge_status_block->bge_status = 0; bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || statusword || sc->bge_link_evt) bge_link_upd(sc); if (sc->bge_flags & BGE_FLAG_RUNNING) { /* Check RX return ring producer/consumer. */ bge_rxeof(sc, rx_prod, 1); } if (sc->bge_flags & BGE_FLAG_RUNNING) { /* Check TX ring producer/consumer. */ bge_txeof(sc, tx_cons); } if (sc->bge_flags & BGE_FLAG_RUNNING && if_snd_len(ifp)) bge_start_locked(sc); BGE_UNLOCK(sc); } static void bge_asf_driver_up(struct bge_softc *sc) { if (sc->bge_asf_mode & ASF_STACKUP) { /* Send ASF heartbeat aprox. every 2s */ if (sc->bge_asf_count) sc->bge_asf_count --; else { sc->bge_asf_count = 2; bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_DRV_ALIVE); bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4); bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB, BGE_FW_HB_TIMEOUT_SEC); CSR_WRITE_4(sc, BGE_RX_CPU_EVENT, CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT); } } } static void bge_tick(void *xsc) { struct bge_softc *sc = xsc; struct mii_data *mii = NULL; BGE_LOCK_ASSERT(sc); /* Synchronize with possible callout reset/stop. */ if (callout_pending(&sc->bge_stat_ch) || !callout_active(&sc->bge_stat_ch)) return; if (BGE_IS_5705_PLUS(sc)) bge_stats_update_regs(sc); else bge_stats_update(sc); /* XXX Add APE heartbeat check here? */ if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { mii = device_get_softc(sc->bge_miibus); /* * Do not touch PHY if we have link up. This could break * IPMI/ASF mode or produce extra input errors * (extra errors was reported for bcm5701 & bcm5704). */ if (!sc->bge_link) mii_tick(mii); } else { /* * Since in TBI mode auto-polling can't be used we should poll * link status manually. Here we register pending link event * and trigger interrupt. */ #ifdef DEVICE_POLLING /* In polling mode we poll link state in bge_poll(). */ if (!(if_getcapenable(sc->bge_ifp) & IFCAP_POLLING)) #endif { sc->bge_link_evt++; if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || sc->bge_flags & BGE_FLAG_5788) BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); else BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); } } bge_asf_driver_up(sc); bge_watchdog(sc); callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); } static void bge_stats_update_regs(struct bge_softc *sc) { if_t ifp; struct bge_mac_stats *stats; uint32_t val; ifp = sc->bge_ifp; stats = &sc->bge_mac_stats; stats->ifHCOutOctets += CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS); stats->etherStatsCollisions += CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS); stats->outXonSent += CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT); stats->outXoffSent += CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT); stats->dot3StatsInternalMacTransmitErrors += CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS); stats->dot3StatsSingleCollisionFrames += CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL); stats->dot3StatsMultipleCollisionFrames += CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL); stats->dot3StatsDeferredTransmissions += CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED); stats->dot3StatsExcessiveCollisions += CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL); stats->dot3StatsLateCollisions += CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL); stats->ifHCOutUcastPkts += CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST); stats->ifHCOutMulticastPkts += CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST); stats->ifHCOutBroadcastPkts += CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST); stats->ifHCInOctets += CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS); stats->etherStatsFragments += CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS); stats->ifHCInUcastPkts += CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST); stats->ifHCInMulticastPkts += CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST); stats->ifHCInBroadcastPkts += CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST); stats->dot3StatsFCSErrors += CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS); stats->dot3StatsAlignmentErrors += CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS); stats->xonPauseFramesReceived += CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD); stats->xoffPauseFramesReceived += CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD); stats->macControlFramesReceived += CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD); stats->xoffStateEntered += CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED); stats->dot3StatsFramesTooLong += CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG); stats->etherStatsJabbers += CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS); stats->etherStatsUndersizePkts += CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE); stats->FramesDroppedDueToFilters += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP); stats->DmaWriteQueueFull += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL); stats->DmaWriteHighPriQueueFull += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL); stats->NoMoreRxBDs += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); /* * XXX * Unlike other controllers, BGE_RXLP_LOCSTAT_IFIN_DROPS * counter of BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0 * includes number of unwanted multicast frames. This comes * from silicon bug and known workaround to get rough(not * exact) counter is to enable interrupt on MBUF low water * attention. This can be accomplished by setting * BGE_HCCMODE_ATTN bit of BGE_HCC_MODE, * BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE and * BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL. * However that change would generate more interrupts and * there are still possibilities of losing multiple frames * during BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling. * Given that the workaround still would not get correct * counter I don't think it's worth to implement it. So * ignore reading the counter on controllers that have the * silicon bug. */ if (sc->bge_asicrev != BGE_ASICREV_BCM5717 && sc->bge_chipid != BGE_CHIPID_BCM5719_A0 && sc->bge_chipid != BGE_CHIPID_BCM5720_A0) stats->InputDiscards += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); stats->InputErrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); stats->RecvThresholdHit += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT); if (sc->bge_flags & BGE_FLAG_RDMA_BUG) { /* * If controller transmitted more than BGE_NUM_RDMA_CHANNELS * frames, it's safe to disable workaround for DMA engine's * miscalculation of TXMBUF space. */ if (stats->ifHCOutUcastPkts + stats->ifHCOutMulticastPkts + stats->ifHCOutBroadcastPkts > BGE_NUM_RDMA_CHANNELS) { val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); if (sc->bge_asicrev == BGE_ASICREV_BCM5719) val &= ~BGE_RDMA_TX_LENGTH_WA_5719; else val &= ~BGE_RDMA_TX_LENGTH_WA_5720; CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); sc->bge_flags &= ~BGE_FLAG_RDMA_BUG; } } } static void bge_stats_clear_regs(struct bge_softc *sc) { CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS); CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS); CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT); CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT); CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS); CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL); CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL); CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED); CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL); CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL); CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST); CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST); CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST); CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS); CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS); CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST); CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST); CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST); CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS); CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS); CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD); CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD); CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD); CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED); CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG); CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS); CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT); } static void bge_stats_update(struct bge_softc *sc) { if_t ifp; bus_size_t stats; uint32_t cnt; /* current register value */ ifp = sc->bge_ifp; stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; #define READ_STAT(sc, stats, stat) \ CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, cnt - sc->bge_tx_collisions); sc->bge_tx_collisions = cnt; cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo); if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_nobds); sc->bge_rx_nobds = cnt; cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo); if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_inerrs); sc->bge_rx_inerrs = cnt; cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo); if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_discards); sc->bge_rx_discards = cnt; cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo); if_inc_counter(ifp, IFCOUNTER_OERRORS, cnt - sc->bge_tx_discards); sc->bge_tx_discards = cnt; #undef READ_STAT } /* * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, * but when such padded frames employ the bge IP/TCP checksum offload, * the hardware checksum assist gives incorrect results (possibly * from incorporating its own padding into the UDP/TCP checksum; who knows). * If we pad such runts with zeros, the onboard checksum comes out correct. */ static __inline int bge_cksum_pad(struct mbuf *m) { int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len; struct mbuf *last; /* If there's only the packet-header and we can pad there, use it. */ if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) && M_TRAILINGSPACE(m) >= padlen) { last = m; } else { /* * Walk packet chain to find last mbuf. We will either * pad there, or append a new mbuf and pad it. */ for (last = m; last->m_next != NULL; last = last->m_next); if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) { /* Allocate new empty mbuf, pad it. Compact later. */ struct mbuf *n; MGET(n, M_NOWAIT, MT_DATA); if (n == NULL) return (ENOBUFS); n->m_len = 0; last->m_next = n; last = n; } } /* Now zero the pad area, to avoid the bge cksum-assist bug. */ memset(mtod(last, caddr_t) + last->m_len, 0, padlen); last->m_len += padlen; m->m_pkthdr.len += padlen; return (0); } static struct mbuf * bge_check_short_dma(struct mbuf *m) { struct mbuf *n; int found; /* * If device receive two back-to-back send BDs with less than * or equal to 8 total bytes then the device may hang. The two * back-to-back send BDs must in the same frame for this failure * to occur. Scan mbuf chains and see whether two back-to-back * send BDs are there. If this is the case, allocate new mbuf * and copy the frame to workaround the silicon bug. */ for (n = m, found = 0; n != NULL; n = n->m_next) { if (n->m_len < 8) { found++; if (found > 1) break; continue; } found = 0; } if (found > 1) { n = m_defrag(m, M_NOWAIT); if (n == NULL) m_freem(m); } else n = m; return (n); } static struct mbuf * bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss, uint16_t *flags) { struct ip *ip; struct tcphdr *tcp; struct mbuf *n; uint16_t hlen; uint32_t poff; if (M_WRITABLE(m) == 0) { /* Get a writable copy. */ n = m_dup(m, M_NOWAIT); m_freem(m); if (n == NULL) return (NULL); m = n; } m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip)); if (m == NULL) return (NULL); ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header)); poff = sizeof(struct ether_header) + (ip->ip_hl << 2); m = m_pullup(m, poff + sizeof(struct tcphdr)); if (m == NULL) return (NULL); tcp = (struct tcphdr *)(mtod(m, char *) + poff); m = m_pullup(m, poff + (tcp->th_off << 2)); if (m == NULL) return (NULL); /* * It seems controller doesn't modify IP length and TCP pseudo * checksum. These checksum computed by upper stack should be 0. */ *mss = m->m_pkthdr.tso_segsz; ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header)); ip->ip_sum = 0; ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2)); /* Clear pseudo checksum computed by TCP stack. */ tcp = (struct tcphdr *)(mtod(m, char *) + poff); tcp->th_sum = 0; /* * Broadcom controllers uses different descriptor format for * TSO depending on ASIC revision. Due to TSO-capable firmware * license issue and lower performance of firmware based TSO * we only support hardware based TSO. */ /* Calculate header length, incl. TCP/IP options, in 32 bit units. */ hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2; if (sc->bge_flags & BGE_FLAG_TSO3) { /* * For BCM5717 and newer controllers, hardware based TSO * uses the 14 lower bits of the bge_mss field to store the * MSS and the upper 2 bits to store the lowest 2 bits of * the IP/TCP header length. The upper 6 bits of the header * length are stored in the bge_flags[14:10,4] field. Jumbo * frames are supported. */ *mss |= ((hlen & 0x3) << 14); *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2); } else { /* * For BCM5755 and newer controllers, hardware based TSO uses * the lower 11 bits to store the MSS and the upper 5 bits to * store the IP/TCP header length. Jumbo frames are not * supported. */ *mss |= (hlen << 11); } return (m); } /* * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data * pointers to descriptors. */ static int bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx) { bus_dma_segment_t segs[BGE_NSEG_NEW]; bus_dmamap_t map; struct bge_tx_bd *d; struct mbuf *m = *m_head; uint32_t idx = *txidx; uint16_t csum_flags, mss, vlan_tag; int nsegs, i, error; csum_flags = 0; mss = 0; vlan_tag = 0; if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 && m->m_next != NULL) { *m_head = bge_check_short_dma(m); if (*m_head == NULL) return (ENOBUFS); m = *m_head; } if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags); if (*m_head == NULL) return (ENOBUFS); csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA; - } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) { + } else if ((m->m_pkthdr.csum_flags & sc->bge_hwassist) != 0) { if (m->m_pkthdr.csum_flags & CSUM_IP) csum_flags |= BGE_TXBDFLAG_IP_CSUM; if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) { csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; if (m->m_pkthdr.len < ETHER_MIN_NOPAD && (error = bge_cksum_pad(m)) != 0) { m_freem(m); *m_head = NULL; return (error); } } } if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) { if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME && m->m_pkthdr.len > ETHER_MAX_LEN) csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME; if (sc->bge_forced_collapse > 0 && (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) { /* * Forcedly collapse mbuf chains to overcome hardware * limitation which only support a single outstanding * DMA read operation. */ if (sc->bge_forced_collapse == 1) m = m_defrag(m, M_NOWAIT); else m = m_collapse(m, M_NOWAIT, sc->bge_forced_collapse); if (m == NULL) m = *m_head; *m_head = m; } } map = sc->bge_cdata.bge_tx_dmamap[idx]; error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(m, M_NOWAIT, BGE_NSEG_NEW); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error) { m_freem(m); *m_head = NULL; return (error); } } else if (error != 0) return (error); /* Check if we have enough free send BDs. */ if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) { bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map); return (ENOBUFS); } bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE); if (m->m_flags & M_VLANTAG) { csum_flags |= BGE_TXBDFLAG_VLAN_TAG; vlan_tag = m->m_pkthdr.ether_vtag; } if (sc->bge_asicrev == BGE_ASICREV_BCM5762 && (m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { /* * 5725 family of devices corrupts TSO packets when TSO DMA * buffers cross into regions which are within MSS bytes of * a 4GB boundary. If we encounter the condition, drop the * packet. */ for (i = 0; ; i++) { d = &sc->bge_ldata.bge_tx_ring[idx]; d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); d->bge_len = segs[i].ds_len; if (d->bge_addr.bge_addr_lo + segs[i].ds_len + mss < d->bge_addr.bge_addr_lo) break; d->bge_flags = csum_flags; d->bge_vlan_tag = vlan_tag; d->bge_mss = mss; if (i == nsegs - 1) break; BGE_INC(idx, BGE_TX_RING_CNT); } if (i != nsegs - 1) { bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map); m_freem(*m_head); *m_head = NULL; return (EIO); } } else { for (i = 0; ; i++) { d = &sc->bge_ldata.bge_tx_ring[idx]; d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); d->bge_len = segs[i].ds_len; d->bge_flags = csum_flags; d->bge_vlan_tag = vlan_tag; d->bge_mss = mss; if (i == nsegs - 1) break; BGE_INC(idx, BGE_TX_RING_CNT); } } /* Mark the last segment as end of packet... */ d->bge_flags |= BGE_TXBDFLAG_END; /* * Insure that the map for this transmission * is placed at the array index of the last descriptor * in this chain. */ sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx]; sc->bge_cdata.bge_tx_dmamap[idx] = map; sc->bge_cdata.bge_tx_chain[idx] = m; sc->bge_txcnt += nsegs; BGE_INC(idx, BGE_TX_RING_CNT); *txidx = idx; return (0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit descriptors. */ static int bge_start_locked(struct bge_softc *sc) { if_t ifp; struct mbuf *m; uint32_t prodidx; int error, count; BGE_LOCK_ASSERT(sc); if (!sc->bge_link || (sc->bge_flags & BGE_FLAG_RUNNING) == 0) return (ENETDOWN); ifp = sc->bge_ifp; prodidx = sc->bge_tx_prodidx; error = count = 0; while (sc->bge_txcnt <= BGE_TX_RING_CNT - 16 && (m = if_snd_dequeue(ifp)) != NULL) { /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (bge_encap(sc, &m, &prodidx)) { if (m == NULL) break; if_snd_prepend(ifp, m); break; } ++count; if_mtap(ifp, m, NULL, 0); } if (count > 0) { bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); /* Transmit. */ bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); sc->bge_tx_prodidx = prodidx; /* * Set a timeout in case the chip goes out to lunch. */ sc->bge_timer = BGE_TX_TIMEOUT; } return (0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit descriptors. */ static int bge_transmit(if_t ifp, struct mbuf *m) { struct bge_softc *sc; int error; if ((error = if_snd_enqueue(ifp, m)) != 0) return (error); sc = if_getsoftc(ifp, IF_DRIVER_SOFTC); if (BGE_TRYLOCK(sc) == 0) return (0); error = bge_start_locked(sc); BGE_UNLOCK(sc); return (error); } static void bge_init_locked(struct bge_softc *sc) { if_t ifp; uint16_t *m; uint32_t mode; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; if (sc->bge_flags & BGE_FLAG_RUNNING) return; /* Cancel pending I/O and flush buffers. */ bge_stop(sc); bge_stop_fw(sc); bge_sig_pre_reset(sc, BGE_RESET_START); bge_reset(sc); bge_sig_legacy(sc, BGE_RESET_START); bge_sig_post_reset(sc, BGE_RESET_START); bge_chipinit(sc); /* * Init the various state machines, ring * control blocks and firmware. */ if (bge_blockinit(sc)) { device_printf(sc->bge_dev, "initialization failure\n"); return; } ifp = sc->bge_ifp; /* Specify MTU. */ - CSR_WRITE_4(sc, BGE_RX_MTU, if_get(ifp, IF_MTU) + + CSR_WRITE_4(sc, BGE_RX_MTU, sc->bge_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + - (if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0)); + (sc->bge_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0)); /* Load our MAC address. */ m = (uint16_t *)if_lladdr(sc->bge_ifp); CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); /* Program promiscuous mode. */ bge_setpromisc(sc); /* Program multicast filter. */ bge_setmulti(sc); /* Program VLAN tag stripping. */ bge_setvlan(sc); /* Override UDP checksum offloading. */ if (sc->bge_forced_udpcsum == 0) - sc->bge_csum_features &= ~CSUM_UDP; + sc->bge_hwassist &= ~CSUM_UDP; else - sc->bge_csum_features |= CSUM_UDP; - if (if_get(ifp, IF_CAPABILITIES) & IFCAP_TXCSUM && - if_get(ifp, IF_CAPENABLE) & IFCAP_TXCSUM) { - if_clrflags(ifp, IF_CAPABILITIES, BGE_CSUM_FEATURES | CSUM_UDP); - if_addflags(ifp, IF_CAPABILITIES, sc->bge_csum_features); - } + sc->bge_hwassist |= CSUM_UDP; /* Init RX ring. */ if (bge_init_rx_ring_std(sc) != 0) { device_printf(sc->bge_dev, "no memory for std Rx buffers.\n"); bge_stop(sc); return; } /* * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's * memory to insure that the chip has in fact read the first * entry of the ring. */ if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { uint32_t v, i; for (i = 0; i < 10; i++) { DELAY(20); v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); if (v == (MCLBYTES - ETHER_ALIGN)) break; } if (i == 10) device_printf (sc->bge_dev, "5705 A0 chip failed to load RX ring\n"); } /* Init jumbo RX ring. */ if (BGE_IS_JUMBO_CAPABLE(sc) && - if_get(ifp, IF_MTU) + ETHER_HDR_LEN + ETHER_CRC_LEN + + sc->bge_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN)) { if (bge_init_rx_ring_jumbo(sc) != 0) { device_printf(sc->bge_dev, "no memory for jumbo Rx buffers.\n"); bge_stop(sc); return; } } /* Init our RX return ring index. */ sc->bge_rx_saved_considx = 0; /* Init our RX/TX stat counters. */ sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0; /* Init TX ring. */ bge_init_tx_ring(sc); /* Enable TX MAC state machine lockup fix. */ mode = CSR_READ_4(sc, BGE_TX_MODE); if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906) mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || sc->bge_asicrev == BGE_ASICREV_BCM5762) { mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); mode |= CSR_READ_4(sc, BGE_TX_MODE) & (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); } /* Turn on transmitter. */ CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); DELAY(100); /* Turn on receiver. */ mode = CSR_READ_4(sc, BGE_RX_MODE); if (BGE_IS_5755_PLUS(sc)) mode |= BGE_RXMODE_IPV6_ENABLE; if (sc->bge_asicrev == BGE_ASICREV_BCM5762) mode |= BGE_RXMODE_IPV4_FRAG_FIX; CSR_WRITE_4(sc,BGE_RX_MODE, mode | BGE_RXMODE_ENABLE); DELAY(10); /* * Set the number of good frames to receive after RX MBUF * Low Watermark has been reached. After the RX MAC receives * this number of frames, it will drop subsequent incoming * frames until the MBUF High Watermark is reached. */ if (BGE_IS_57765_PLUS(sc)) CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1); else CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); /* Clear MAC statistics. */ if (BGE_IS_5705_PLUS(sc)) bge_stats_clear_regs(sc); /* Tell firmware we're alive. */ BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); #ifdef DEVICE_POLLING /* Disable interrupts if we are polling. */ - if (if_get(ifp, IF_CAPENABLE) & IFCAP_POLLING) { + if (sc->bge_capenable & IFCAP_POLLING) { BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); } else #endif /* Enable host interrupts. */ { BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); } sc->bge_flags |= BGE_FLAG_RUNNING; bge_ifmedia_upd_locked(ifp); callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); } static void bge_init(void *xsc) { struct bge_softc *sc = xsc; BGE_LOCK(sc); bge_init_locked(sc); BGE_UNLOCK(sc); } /* * Set media options. */ static int bge_ifmedia_upd(if_t ifp) { struct bge_softc *sc = if_getsoftc(ifp, IF_DRIVER_SOFTC); int res; BGE_LOCK(sc); res = bge_ifmedia_upd_locked(ifp); BGE_UNLOCK(sc); return (res); } static int bge_ifmedia_upd_locked(if_t ifp) { struct bge_softc *sc = if_getsoftc(ifp, IF_DRIVER_SOFTC); struct mii_data *mii; struct mii_softc *miisc; struct ifmedia *ifm; BGE_LOCK_ASSERT(sc); ifm = &sc->bge_ifmedia; /* If this is a 1000baseX NIC, enable the TBI port. */ if (sc->bge_flags & BGE_FLAG_TBI) { if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); switch(IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: /* * The BCM5704 ASIC appears to have a special * mechanism for programming the autoneg * advertisement registers in TBI mode. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { uint32_t sgdig; sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); if (sgdig & BGE_SGDIGSTS_DONE) { CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); sgdig |= BGE_SGDIGCFG_AUTO | BGE_SGDIGCFG_PAUSE_CAP | BGE_SGDIGCFG_ASYM_PAUSE; CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig | BGE_SGDIGCFG_SEND); DELAY(5); CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); } } break; case IFM_1000_SX: if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); } else { BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); } DELAY(40); break; default: return (EINVAL); } return (0); } sc->bge_link_evt++; mii = device_get_softc(sc->bge_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); mii_mediachg(mii); /* * Force an interrupt so that we will call bge_link_upd * if needed and clear any pending link state attention. * Without this we are not getting any further interrupts * for link state changes and thus will not UP the link and * not be able to send in bge_start_locked. The only * way to get things working was to receive a packet and * get an RX intr. * bge_tick should help for fiber cards and we might not * need to do this here if BGE_FLAG_TBI is set but as * we poll for fiber anyway it should not harm. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || sc->bge_flags & BGE_FLAG_5788) BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); else BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); return (0); } /* * Report current media status. */ static void bge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct bge_softc *sc = if_getsoftc(ifp, IF_DRIVER_SOFTC); struct mii_data *mii; BGE_LOCK(sc); if ((if_get(ifp, IF_FLAGS) & IFF_UP) == 0) { BGE_UNLOCK(sc); return; } if (sc->bge_flags & BGE_FLAG_TBI) { ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_TBI_PCS_SYNCHED) ifmr->ifm_status |= IFM_ACTIVE; else { ifmr->ifm_active |= IFM_NONE; BGE_UNLOCK(sc); return; } ifmr->ifm_active |= IFM_1000_SX; if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) ifmr->ifm_active |= IFM_HDX; else ifmr->ifm_active |= IFM_FDX; BGE_UNLOCK(sc); return; } mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; BGE_UNLOCK(sc); } static int bge_ioctl(if_t ifp, u_long command, void *data, struct thread *td) { struct bge_softc *sc = if_getsoftc(ifp, IF_DRIVER_SOFTC); struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int flags, mask, error = 0; switch (command) { case SIOCSIFMTU: if (BGE_IS_JUMBO_CAPABLE(sc) || (sc->bge_flags & BGE_FLAG_JUMBO_STD)) { if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > BGE_JUMBO_MTU) { error = EINVAL; break; } } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) { error = EINVAL; break; } BGE_LOCK(sc); - if (if_get(ifp, IF_MTU) != ifr->ifr_mtu) { - if_set(ifp, IF_MTU, ifr->ifr_mtu); - if (sc->bge_flags & BGE_FLAG_RUNNING) { - sc->bge_flags &= ~BGE_FLAG_RUNNING; - bge_init_locked(sc); - } + sc->bge_mtu = ifr->ifr_mtu; + if (sc->bge_flags & BGE_FLAG_RUNNING) { + sc->bge_flags &= ~BGE_FLAG_RUNNING; + bge_init_locked(sc); } BGE_UNLOCK(sc); break; case SIOCSIFFLAGS: BGE_LOCK(sc); if (if_get(ifp, IF_FLAGS) & IFF_UP) { /* * If only the state of the PROMISC flag changed, * then just use the 'set promisc mode' command * instead of reinitializing the entire NIC. Doing * a full re-init means reloading the firmware and * waiting for it to start up, which may take a * second or two. Similarly for ALLMULTI. */ if (sc->bge_flags & BGE_FLAG_RUNNING) { flags = if_get(ifp, IF_FLAGS) ^ sc->bge_if_flags; if (flags & IFF_PROMISC) bge_setpromisc(sc); if (flags & IFF_ALLMULTI) bge_setmulti(sc); } else bge_init_locked(sc); } else { if (sc->bge_flags & BGE_FLAG_RUNNING) { bge_stop(sc); } } sc->bge_if_flags = if_get(ifp, IF_FLAGS); BGE_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: if (sc->bge_flags & BGE_FLAG_RUNNING) { BGE_LOCK(sc); bge_setmulti(sc); BGE_UNLOCK(sc); error = 0; } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: if (sc->bge_flags & BGE_FLAG_TBI) { error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, command); } else { mii = device_get_softc(sc->bge_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } break; case SIOCSIFCAP: - mask = ifr->ifr_reqcap ^ if_get(ifp, IF_CAPENABLE); + mask = ifr->ifr_reqcap ^ ifr->ifr_curcap; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(bge_poll, ifp); if (error) return (error); BGE_LOCK(sc); BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); - if_setcapenablebit(ifp, IFCAP_POLLING, 0); BGE_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ BGE_LOCK(sc); BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); - if_setcapenablebit(ifp, 0, IFCAP_POLLING); BGE_UNLOCK(sc); + if (error) + return (error); } } #endif - if ((mask & IFCAP_TXCSUM) != 0 && - (if_get(ifp, IF_CAPABILITIES) & IFCAP_TXCSUM) != 0) { - if_xorflags(ifp, IF_CAPENABLE, IFCAP_TXCSUM); - if ((if_get(ifp, IF_CAPENABLE) & IFCAP_TXCSUM) != 0) - if_addflags(ifp, IF_HWASSIST, - sc->bge_csum_features); - else - if_clrflags(ifp, IF_HWASSIST, - sc->bge_csum_features); - } - - if ((mask & IFCAP_RXCSUM) != 0 && - (if_get(ifp, IF_CAPABILITIES) & IFCAP_RXCSUM) != 0) - if_xorflags(ifp, IF_CAPENABLE, IFCAP_RXCSUM); - - if ((mask & IFCAP_TSO4) != 0 && - (if_get(ifp, IF_CAPABILITIES) & IFCAP_TSO4) != 0) { - if_xorflags(ifp, IF_CAPENABLE, IFCAP_TSO4); - if ((if_get(ifp, IF_CAPENABLE) & IFCAP_TSO4) != 0) - if_addflags(ifp, IF_HWASSIST, CSUM_TSO); - else - if_clrflags(ifp, IF_HWASSIST, CSUM_TSO); - } - + sc->bge_capenable = ifr->ifr_reqcap; + ifr->ifr_hwassist = 0; + if ((sc->bge_capenable & IFCAP_TXCSUM) != 0) + ifr->ifr_hwassist = sc->bge_hwassist; + if ((sc->bge_capenable & IFCAP_TSO4) != 0 && + (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) + ifr->ifr_hwassist |= CSUM_TSO; if (mask & IFCAP_VLAN_MTU) { - if_xorflags(ifp, IF_CAPENABLE, IFCAP_VLAN_MTU); sc->bge_flags &= ~BGE_FLAG_RUNNING; bge_init(sc); } - - if ((mask & IFCAP_VLAN_HWTSO) != 0 && - (if_get(ifp, IF_CAPABILITIES) & IFCAP_VLAN_HWTSO) != 0) - if_xorflags(ifp, IF_CAPENABLE, IFCAP_VLAN_HWTSO); - if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && - (if_get(ifp, IF_CAPABILITIES) & IFCAP_VLAN_HWTAGGING) != 0) { - if_xorflags(ifp, IF_CAPENABLE, IFCAP_VLAN_HWTAGGING); - if ((if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWTAGGING) == 0) - if_clrflags(ifp, IF_CAPENABLE, IFCAP_VLAN_HWTSO); + if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { BGE_LOCK(sc); bge_setvlan(sc); BGE_UNLOCK(sc); } break; default: error = EOPNOTSUPP; break; } return (error); } static void bge_watchdog(struct bge_softc *sc) { if_t ifp; uint32_t status; BGE_LOCK_ASSERT(sc); if (sc->bge_timer == 0 || --sc->bge_timer) return; /* If pause frames are active then don't reset the hardware. */ if ((CSR_READ_4(sc, BGE_RX_MODE) & BGE_RXMODE_FLOWCTL_ENABLE) != 0) { status = CSR_READ_4(sc, BGE_RX_STS); if ((status & BGE_RXSTAT_REMOTE_XOFFED) != 0) { /* * If link partner has us in XOFF state then wait for * the condition to clear. */ CSR_WRITE_4(sc, BGE_RX_STS, status); sc->bge_timer = BGE_TX_TIMEOUT; return; } else if ((status & BGE_RXSTAT_RCVD_XOFF) != 0 && (status & BGE_RXSTAT_RCVD_XON) != 0) { /* * If link partner has us in XOFF state then wait for * the condition to clear. */ CSR_WRITE_4(sc, BGE_RX_STS, status); sc->bge_timer = BGE_TX_TIMEOUT; return; } /* * Any other condition is unexpected and the controller * should be reset. */ } ifp = sc->bge_ifp; if_printf(ifp, "watchdog timeout -- resetting\n"); sc->bge_flags &= ~BGE_FLAG_RUNNING; bge_init_locked(sc); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } static void bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit) { int i; BGE_CLRBIT(sc, reg, bit); for (i = 0; i < BGE_TIMEOUT; i++) { if ((CSR_READ_4(sc, reg) & bit) == 0) return; DELAY(100); } } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void bge_stop(struct bge_softc *sc) { if_t ifp; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; callout_stop(&sc->bge_stat_ch); /* Disable host interrupts. */ BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); /* * Tell firmware we're shutting down. */ bge_stop_fw(sc); bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); /* * Disable all of the receiver blocks. */ bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); if (BGE_IS_5700_FAMILY(sc)) bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); /* * Disable all of the transmit blocks. */ bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); if (BGE_IS_5700_FAMILY(sc)) bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); /* * Shut down all of the memory managers and related * state machines. */ bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); if (BGE_IS_5700_FAMILY(sc)) bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); if (!(BGE_IS_5705_PLUS(sc))) { BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); } /* Update MAC statistics. */ if (BGE_IS_5705_PLUS(sc)) bge_stats_update_regs(sc); bge_reset(sc); bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); /* * Keep the ASF firmware running if up. */ if (sc->bge_asf_mode & ASF_STACKUP) BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); else BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); /* Free the RX lists. */ bge_free_rx_ring_std(sc); /* Free jumbo RX list. */ if (BGE_IS_JUMBO_CAPABLE(sc)) bge_free_rx_ring_jumbo(sc); /* Free TX buffers. */ bge_free_tx_ring(sc); sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; /* Clear MAC's link state (PHY may still have link UP). */ if (bootverbose && sc->bge_link) if_printf(sc->bge_ifp, "link DOWN\n"); sc->bge_link = 0; sc->bge_flags &= ~BGE_FLAG_RUNNING; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int bge_shutdown(device_t dev) { struct bge_softc *sc; sc = device_get_softc(dev); BGE_LOCK(sc); bge_stop(sc); BGE_UNLOCK(sc); return (0); } static int bge_suspend(device_t dev) { struct bge_softc *sc; sc = device_get_softc(dev); BGE_LOCK(sc); bge_stop(sc); BGE_UNLOCK(sc); return (0); } static int bge_resume(device_t dev) { struct bge_softc *sc; if_t ifp; sc = device_get_softc(dev); BGE_LOCK(sc); ifp = sc->bge_ifp; if (if_get(ifp, IF_FLAGS) & IFF_UP) { bge_init_locked(sc); if (sc->bge_flags & BGE_FLAG_RUNNING) bge_start_locked(sc); } BGE_UNLOCK(sc); return (0); } static void bge_link_upd(struct bge_softc *sc) { struct mii_data *mii; uint32_t link, status; BGE_LOCK_ASSERT(sc); /* Clear 'pending link event' flag. */ sc->bge_link_evt = 0; /* * Process link state changes. * Grrr. The link status word in the status block does * not work correctly on the BCM5700 rev AX and BX chips, * according to all available information. Hence, we have * to enable MII interrupts in order to properly obtain * async link changes. Unfortunately, this also means that * we have to read the MAC status register to detect link * changes, thereby adding an additional register access to * the interrupt handler. * * XXX: perhaps link state detection procedure used for * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { status = CSR_READ_4(sc, BGE_MAC_STS); if (status & BGE_MACSTAT_MI_INTERRUPT) { mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->bge_link++; if (bootverbose) if_printf(sc->bge_ifp, "link UP\n"); } else if (sc->bge_link && (!(mii->mii_media_status & IFM_ACTIVE) || IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { sc->bge_link = 0; if (bootverbose) if_printf(sc->bge_ifp, "link DOWN\n"); } /* Clear the interrupt. */ CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT); bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr, BRGPHY_MII_ISR); bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr, BRGPHY_MII_IMR, BRGPHY_INTRS); } return; } if (sc->bge_flags & BGE_FLAG_TBI) { status = CSR_READ_4(sc, BGE_MAC_STS); if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { if (!sc->bge_link) { sc->bge_link++; if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_TBI_SEND_CFGS); DELAY(40); } CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); if (bootverbose) if_printf(sc->bge_ifp, "link UP\n"); if_link_state_change(sc->bge_ifp, LINK_STATE_UP); } } else if (sc->bge_link) { sc->bge_link = 0; if (bootverbose) if_printf(sc->bge_ifp, "link DOWN\n"); if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN); } } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { /* * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit * in status word always set. Workaround this bug by reading * PHY link status directly. */ link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0; if (link != sc->bge_link || sc->bge_asicrev == BGE_ASICREV_BCM5700) { mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->bge_link++; if (bootverbose) if_printf(sc->bge_ifp, "link UP\n"); } else if (sc->bge_link && (!(mii->mii_media_status & IFM_ACTIVE) || IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { sc->bge_link = 0; if (bootverbose) if_printf(sc->bge_ifp, "link DOWN\n"); } } } else { /* * For controllers that call mii_tick, we have to poll * link status. */ mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); bge_miibus_statchg(sc->bge_dev); } /* Disable MAC attention when link is up. */ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | BGE_MACSTAT_LINK_CHANGED); } static void bge_add_sysctls(struct bge_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; int unit; ctx = device_get_sysctl_ctx(sc->bge_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev)); #ifdef BGE_REGISTER_DEBUG SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info", CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I", "Debug Information"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read", CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I", "MAC Register Read"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ape_read", CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_ape_read, "I", "APE Register Read"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read", CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I", "Memory Read"); #endif unit = device_get_unit(sc->bge_dev); /* * A common design characteristic for many Broadcom client controllers * is that they only support a single outstanding DMA read operation * on the PCIe bus. This means that it will take twice as long to fetch * a TX frame that is split into header and payload buffers as it does * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For * these controllers, coalescing buffers to reduce the number of memory * reads is effective way to get maximum performance(about 940Mbps). * Without collapsing TX buffers the maximum TCP bulk transfer * performance is about 850Mbps. However forcing coalescing mbufs * consumes a lot of CPU cycles, so leave it off by default. */ sc->bge_forced_collapse = 0; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse", CTLFLAG_RWTUN, &sc->bge_forced_collapse, 0, "Number of fragmented TX buffers of a frame allowed before " "forced collapsing"); sc->bge_msi = 1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "msi", CTLFLAG_RDTUN, &sc->bge_msi, 0, "Enable MSI"); /* * It seems all Broadcom controllers have a bug that can generate UDP * datagrams with checksum value 0 when TX UDP checksum offloading is * enabled. Generating UDP checksum value 0 is RFC 768 violation. * Even though the probability of generating such UDP datagrams is * low, I don't want to see FreeBSD boxes to inject such datagrams * into network so disable UDP checksum offloading by default. Users * still override this behavior by setting a sysctl variable, * dev.bge.0.forced_udpcsum. */ sc->bge_forced_udpcsum = 0; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum", CTLFLAG_RWTUN, &sc->bge_forced_udpcsum, 0, "Enable UDP checksum offloading even if controller can " "generate UDP checksum value 0"); if (BGE_IS_5705_PLUS(sc)) bge_add_sysctl_stats_regs(sc, ctx, children); else bge_add_sysctl_stats(sc, ctx, children); } #define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \ SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \ sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \ desc) static void bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent) { struct sysctl_oid *tree; struct sysctl_oid_list *children, *schildren; tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD, NULL, "BGE Statistics"); schildren = children = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters", children, COSFramesDroppedDueToFilters, "FramesDroppedDueToFilters"); BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full", children, nicDmaWriteQueueFull, "DmaWriteQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full", children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors", children, nicNoMoreRxBDs, "NoMoreRxBDs"); BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames", children, ifInDiscards, "InputDiscards"); BGE_SYSCTL_STAT(sc, ctx, "Input Errors", children, ifInErrors, "InputErrors"); BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit", children, nicRecvThresholdHit, "RecvThresholdHit"); BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full", children, nicDmaReadQueueFull, "DmaReadQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full", children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full", children, nicSendDataCompQueueFull, "SendDataCompQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index", children, nicRingSetSendProdIndex, "RingSetSendProdIndex"); BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update", children, nicRingStatusUpdate, "RingStatusUpdate"); BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts", children, nicInterrupts, "Interrupts"); BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts", children, nicAvoidedInterrupts, "AvoidedInterrupts"); BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit", children, nicSendThresholdHit, "SendThresholdHit"); tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD, NULL, "BGE RX Statistics"); children = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets", children, rxstats.ifHCInOctets, "ifHCInOctets"); BGE_SYSCTL_STAT(sc, ctx, "Fragments", children, rxstats.etherStatsFragments, "Fragments"); BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets", children, rxstats.ifHCInUcastPkts, "UnicastPkts"); BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets", children, rxstats.ifHCInMulticastPkts, "MulticastPkts"); BGE_SYSCTL_STAT(sc, ctx, "FCS Errors", children, rxstats.dot3StatsFCSErrors, "FCSErrors"); BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors", children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors"); BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received", children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived"); BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received", children, rxstats.xoffPauseFramesReceived, "xoffPauseFramesReceived"); BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received", children, rxstats.macControlFramesReceived, "ControlFramesReceived"); BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered", children, rxstats.xoffStateEntered, "xoffStateEntered"); BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long", children, rxstats.dot3StatsFramesTooLong, "FramesTooLong"); BGE_SYSCTL_STAT(sc, ctx, "Jabbers", children, rxstats.etherStatsJabbers, "Jabbers"); BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets", children, rxstats.etherStatsUndersizePkts, "UndersizePkts"); BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors", children, rxstats.inRangeLengthError, "inRangeLengthError"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors", children, rxstats.outRangeLengthError, "outRangeLengthError"); tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD, NULL, "BGE TX Statistics"); children = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets", children, txstats.ifHCOutOctets, "ifHCOutOctets"); BGE_SYSCTL_STAT(sc, ctx, "TX Collisions", children, txstats.etherStatsCollisions, "Collisions"); BGE_SYSCTL_STAT(sc, ctx, "XON Sent", children, txstats.outXonSent, "XonSent"); BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent", children, txstats.outXoffSent, "XoffSent"); BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done", children, txstats.flowControlDone, "flowControlDone"); BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors", children, txstats.dot3StatsInternalMacTransmitErrors, "InternalMacTransmitErrors"); BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames", children, txstats.dot3StatsSingleCollisionFrames, "SingleCollisionFrames"); BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames", children, txstats.dot3StatsMultipleCollisionFrames, "MultipleCollisionFrames"); BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions", children, txstats.dot3StatsDeferredTransmissions, "DeferredTransmissions"); BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions", children, txstats.dot3StatsExcessiveCollisions, "ExcessiveCollisions"); BGE_SYSCTL_STAT(sc, ctx, "Late Collisions", children, txstats.dot3StatsLateCollisions, "LateCollisions"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets", children, txstats.ifHCOutUcastPkts, "UnicastPkts"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets", children, txstats.ifHCOutMulticastPkts, "MulticastPkts"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets", children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts"); BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors", children, txstats.dot3StatsCarrierSenseErrors, "CarrierSenseErrors"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards", children, txstats.ifOutDiscards, "Discards"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors", children, txstats.ifOutErrors, "Errors"); } #undef BGE_SYSCTL_STAT #define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) static void bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent) { struct sysctl_oid *tree; struct sysctl_oid_list *child, *schild; struct bge_mac_stats *stats; stats = &sc->bge_mac_stats; tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD, NULL, "BGE Statistics"); schild = child = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters", &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters"); BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull", &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full"); BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull", &stats->DmaWriteHighPriQueueFull, "NIC DMA Write High Priority Queue Full"); BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs", &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors"); BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards", &stats->InputDiscards, "Discarded Input Frames"); BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors", &stats->InputErrors, "Input Errors"); BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit", &stats->RecvThresholdHit, "NIC Recv Threshold Hit"); tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD, NULL, "BGE RX Statistics"); child = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets", &stats->ifHCInOctets, "Inbound Octets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments", &stats->etherStatsFragments, "Fragments"); BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts", &stats->ifHCInUcastPkts, "Inbound Unicast Packets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts", &stats->ifHCInMulticastPkts, "Inbound Multicast Packets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts", &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors", &stats->dot3StatsFCSErrors, "FCS Errors"); BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors", &stats->dot3StatsAlignmentErrors, "Alignment Errors"); BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived", &stats->xonPauseFramesReceived, "XON Pause Frames Received"); BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived", &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received"); BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived", &stats->macControlFramesReceived, "MAC Control Frames Received"); BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered", &stats->xoffStateEntered, "XOFF State Entered"); BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong", &stats->dot3StatsFramesTooLong, "Frames Too Long"); BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers", &stats->etherStatsJabbers, "Jabbers"); BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts", &stats->etherStatsUndersizePkts, "Undersized Packets"); tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD, NULL, "BGE TX Statistics"); child = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets", &stats->ifHCOutOctets, "Outbound Octets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions", &stats->etherStatsCollisions, "TX Collisions"); BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent", &stats->outXonSent, "XON Sent"); BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent", &stats->outXoffSent, "XOFF Sent"); BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors", &stats->dot3StatsInternalMacTransmitErrors, "Internal MAC TX Errors"); BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames", &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames"); BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames", &stats->dot3StatsMultipleCollisionFrames, "Multiple Collision Frames"); BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions", &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions"); BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions", &stats->dot3StatsExcessiveCollisions, "Excessive Collisions"); BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions", &stats->dot3StatsLateCollisions, "Late Collisions"); BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts", &stats->ifHCOutUcastPkts, "Outbound Unicast Packets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts", &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts", &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets"); } #undef BGE_SYSCTL_STAT_ADD64 static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; uint32_t result; int offset; sc = (struct bge_softc *)arg1; offset = arg2; result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset + offsetof(bge_hostaddr, bge_addr_lo)); return (sysctl_handle_int(oidp, &result, 0, req)); } #ifdef BGE_REGISTER_DEBUG static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; uint16_t *sbdata; int error, result, sbsz; int i, j; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); if (result == 1) { sc = (struct bge_softc *)arg1; if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_C0) sbsz = BGE_STATUS_BLK_SZ; else sbsz = 32; sbdata = (uint16_t *)sc->bge_ldata.bge_status_block; printf("Status Block:\n"); BGE_LOCK(sc); bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (i = 0x0; i < sbsz / sizeof(uint16_t); ) { printf("%06x:", i); for (j = 0; j < 8; j++) printf(" %04x", sbdata[i++]); printf("\n"); } printf("Registers:\n"); for (i = 0x800; i < 0xA00; ) { printf("%06x:", i); for (j = 0; j < 8; j++) { printf(" %08x", CSR_READ_4(sc, i)); i += 4; } printf("\n"); } BGE_UNLOCK(sc); printf("Hardware Flags:\n"); if (BGE_IS_5717_PLUS(sc)) printf(" - 5717 Plus\n"); if (BGE_IS_5755_PLUS(sc)) printf(" - 5755 Plus\n"); if (BGE_IS_575X_PLUS(sc)) printf(" - 575X Plus\n"); if (BGE_IS_5705_PLUS(sc)) printf(" - 5705 Plus\n"); if (BGE_IS_5714_FAMILY(sc)) printf(" - 5714 Family\n"); if (BGE_IS_5700_FAMILY(sc)) printf(" - 5700 Family\n"); if (sc->bge_flags & BGE_FLAG_JUMBO) printf(" - Supports Jumbo Frames\n"); if (sc->bge_flags & BGE_FLAG_PCIX) printf(" - PCI-X Bus\n"); if (sc->bge_flags & BGE_FLAG_PCIE) printf(" - PCI Express Bus\n"); if (sc->bge_phy_flags & BGE_PHY_NO_3LED) printf(" - No 3 LEDs\n"); if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) printf(" - RX Alignment Bug\n"); } return (error); } static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; int error; uint16_t result; uint32_t val; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); if (result < 0x8000) { sc = (struct bge_softc *)arg1; val = CSR_READ_4(sc, result); printf("reg 0x%06X = 0x%08X\n", result, val); } return (error); } static int bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; int error; uint16_t result; uint32_t val; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); if (result < 0x8000) { sc = (struct bge_softc *)arg1; val = APE_READ_4(sc, result); printf("reg 0x%06X = 0x%08X\n", result, val); } return (error); } static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; int error; uint16_t result; uint32_t val; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); if (result < 0x8000) { sc = (struct bge_softc *)arg1; val = bge_readmem_ind(sc, result); printf("mem 0x%06X = 0x%08X\n", result, val); } return (error); } #endif static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]) { if (sc->bge_flags & BGE_FLAG_EADDR) return (1); #ifdef __sparc64__ OF_getetheraddr(sc->bge_dev, ether_addr); return (0); #endif return (1); } static int bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) { uint32_t mac_addr; mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB); if ((mac_addr >> 16) == 0x484b) { ether_addr[0] = (uint8_t)(mac_addr >> 8); ether_addr[1] = (uint8_t)mac_addr; mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB); ether_addr[2] = (uint8_t)(mac_addr >> 24); ether_addr[3] = (uint8_t)(mac_addr >> 16); ether_addr[4] = (uint8_t)(mac_addr >> 8); ether_addr[5] = (uint8_t)mac_addr; return (0); } return (1); } static int bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) { int mac_offset = BGE_EE_MAC_OFFSET; if (sc->bge_asicrev == BGE_ASICREV_BCM5906) mac_offset = BGE_EE_MAC_OFFSET_5906; return (bge_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN)); } static int bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) { if (sc->bge_asicrev == BGE_ASICREV_BCM5906) return (1); return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)); } static int bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) { static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { /* NOTE: Order is critical */ bge_get_eaddr_fw, bge_get_eaddr_mem, bge_get_eaddr_nvram, bge_get_eaddr_eeprom, NULL }; const bge_eaddr_fcn_t *func; for (func = bge_eaddr_funcs; *func != NULL; ++func) { if ((*func)(sc, eaddr) == 0) break; } return (*func == NULL ? ENXIO : 0); } static uint64_t bge_get_counter(if_t ifp, ift_counter cnt) { struct bge_softc *sc; struct bge_mac_stats *stats; sc = if_getsoftc(ifp, IF_DRIVER_SOFTC); if (!BGE_IS_5705_PLUS(sc)) return (if_get_counter_default(ifp, cnt)); stats = &sc->bge_mac_stats; switch (cnt) { case IFCOUNTER_IERRORS: return (stats->NoMoreRxBDs + stats->InputDiscards + stats->InputErrors); case IFCOUNTER_COLLISIONS: return (stats->etherStatsCollisions); default: return (if_get_counter_default(ifp, cnt)); } } Index: projects/ifnet/sys/dev/bge/if_bgereg.h =================================================================== --- projects/ifnet/sys/dev/bge/if_bgereg.h (revision 277242) +++ projects/ifnet/sys/dev/bge/if_bgereg.h (revision 277243) @@ -1,3067 +1,3069 @@ /*- * Copyright (c) 2001 Wind River Systems * Copyright (c) 1997, 1998, 1999, 2001 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * BCM570x memory map. The internal memory layout varies somewhat * depending on whether or not we have external SSRAM attached. * The BCM5700 can have up to 16MB of external memory. The BCM5701 * is apparently not designed to use external SSRAM. The mappings * up to the first 4 send rings are the same for both internal and * external memory configurations. Note that mini RX ring space is * only available with external SSRAM configurations, which means * the mini RX ring is not supported on the BCM5701. * * The NIC's memory can be accessed by the host in one of 3 ways: * * 1) Indirect register access. The MEMWIN_BASEADDR and MEMWIN_DATA * registers in PCI config space can be used to read any 32-bit * address within the NIC's memory. * * 2) Memory window access. The MEMWIN_BASEADDR register in PCI config * space can be used in conjunction with the memory window in the * device register space at offset 0x8000 to read any 32K chunk * of NIC memory. * * 3) Flat mode. If the 'flat mode' bit in the PCI state register is * set, the device I/O mapping consumes 32MB of host address space, * allowing all of the registers and internal NIC memory to be * accessed directly. NIC memory addresses are offset by 0x01000000. * Flat mode consumes so much host address space that it is not * recommended. */ #define BGE_PAGE_ZERO 0x00000000 #define BGE_PAGE_ZERO_END 0x000000FF #define BGE_SEND_RING_RCB 0x00000100 #define BGE_SEND_RING_RCB_END 0x000001FF #define BGE_RX_RETURN_RING_RCB 0x00000200 #define BGE_RX_RETURN_RING_RCB_END 0x000002FF #define BGE_STATS_BLOCK 0x00000300 #define BGE_STATS_BLOCK_END 0x00000AFF #define BGE_STATUS_BLOCK 0x00000B00 #define BGE_STATUS_BLOCK_END 0x00000B4F #define BGE_SRAM_FW_MB 0x00000B50 #define BGE_SRAM_DATA_SIG 0x00000B54 #define BGE_SRAM_DATA_CFG 0x00000B58 #define BGE_SRAM_FW_CMD_MB 0x00000B78 #define BGE_SRAM_FW_CMD_LEN_MB 0x00000B7C #define BGE_SRAM_FW_CMD_DATA_MB 0x00000B80 #define BGE_SRAM_FW_DRV_STATE_MB 0x00000C04 #define BGE_SRAM_MAC_ADDR_HIGH_MB 0x00000C14 #define BGE_SRAM_MAC_ADDR_LOW_MB 0x00000C18 #define BGE_SOFTWARE_GENCOMM_END 0x00000FFF #define BGE_UNMAPPED 0x00001000 #define BGE_UNMAPPED_END 0x00001FFF #define BGE_DMA_DESCRIPTORS 0x00002000 #define BGE_DMA_DESCRIPTORS_END 0x00003FFF #define BGE_SEND_RING_5717 0x00004000 #define BGE_SEND_RING_1_TO_4 0x00004000 #define BGE_SEND_RING_1_TO_4_END 0x00005FFF /* Firmware interface */ #define BGE_SRAM_DATA_SIG_MAGIC 0x4B657654 /* 'KevT' */ #define BGE_FW_CMD_DRV_ALIVE 0x00000001 #define BGE_FW_CMD_PAUSE 0x00000002 #define BGE_FW_CMD_IPV4_ADDR_CHANGE 0x00000003 #define BGE_FW_CMD_IPV6_ADDR_CHANGE 0x00000004 #define BGE_FW_CMD_LINK_UPDATE 0x0000000C #define BGE_FW_CMD_DRV_ALIVE2 0x0000000D #define BGE_FW_CMD_DRV_ALIVE3 0x0000000E #define BGE_FW_HB_TIMEOUT_SEC 3 #define BGE_FW_DRV_STATE_START 0x00000001 #define BGE_FW_DRV_STATE_START_DONE 0x80000001 #define BGE_FW_DRV_STATE_UNLOAD 0x00000002 #define BGE_FW_DRV_STATE_UNLOAD_DONE 0x80000002 #define BGE_FW_DRV_STATE_WOL 0x00000003 #define BGE_FW_DRV_STATE_SUSPEND 0x00000004 /* Mappings for internal memory configuration */ #define BGE_STD_RX_RINGS 0x00006000 #define BGE_STD_RX_RINGS_END 0x00006FFF #define BGE_JUMBO_RX_RINGS 0x00007000 #define BGE_JUMBO_RX_RINGS_END 0x00007FFF #define BGE_BUFFPOOL_1 0x00008000 #define BGE_BUFFPOOL_1_END 0x0000FFFF #define BGE_BUFFPOOL_2 0x00010000 /* or expansion ROM */ #define BGE_BUFFPOOL_2_END 0x00017FFF #define BGE_BUFFPOOL_3 0x00018000 /* or expansion ROM */ #define BGE_BUFFPOOL_3_END 0x0001FFFF #define BGE_STD_RX_RINGS_5717 0x00040000 #define BGE_JUMBO_RX_RINGS_5717 0x00044400 /* Mappings for external SSRAM configurations */ #define BGE_SEND_RING_5_TO_6 0x00006000 #define BGE_SEND_RING_5_TO_6_END 0x00006FFF #define BGE_SEND_RING_7_TO_8 0x00007000 #define BGE_SEND_RING_7_TO_8_END 0x00007FFF #define BGE_SEND_RING_9_TO_16 0x00008000 #define BGE_SEND_RING_9_TO_16_END 0x0000BFFF #define BGE_EXT_STD_RX_RINGS 0x0000C000 #define BGE_EXT_STD_RX_RINGS_END 0x0000CFFF #define BGE_EXT_JUMBO_RX_RINGS 0x0000D000 #define BGE_EXT_JUMBO_RX_RINGS_END 0x0000DFFF #define BGE_MINI_RX_RINGS 0x0000E000 #define BGE_MINI_RX_RINGS_END 0x0000FFFF #define BGE_AVAIL_REGION1 0x00010000 /* or expansion ROM */ #define BGE_AVAIL_REGION1_END 0x00017FFF #define BGE_AVAIL_REGION2 0x00018000 /* or expansion ROM */ #define BGE_AVAIL_REGION2_END 0x0001FFFF #define BGE_EXT_SSRAM 0x00020000 #define BGE_EXT_SSRAM_END 0x000FFFFF /* * BCM570x register offsets. These are memory mapped registers * which can be accessed with the CSR_READ_4()/CSR_WRITE_4() macros. * Each register must be accessed using 32 bit operations. * * All registers are accessed through a 32K shared memory block. * The first group of registers are actually copies of the PCI * configuration space registers. */ /* * PCI registers defined in the PCI 2.2 spec. */ #define BGE_PCI_VID 0x00 #define BGE_PCI_DID 0x02 #define BGE_PCI_CMD 0x04 #define BGE_PCI_STS 0x06 #define BGE_PCI_REV 0x08 #define BGE_PCI_CLASS 0x09 #define BGE_PCI_CACHESZ 0x0C #define BGE_PCI_LATTIMER 0x0D #define BGE_PCI_HDRTYPE 0x0E #define BGE_PCI_BIST 0x0F #define BGE_PCI_BAR0 0x10 #define BGE_PCI_BAR1 0x14 #define BGE_PCI_SUBSYS 0x2C #define BGE_PCI_SUBVID 0x2E #define BGE_PCI_ROMBASE 0x30 #define BGE_PCI_CAPPTR 0x34 #define BGE_PCI_INTLINE 0x3C #define BGE_PCI_INTPIN 0x3D #define BGE_PCI_MINGNT 0x3E #define BGE_PCI_MAXLAT 0x3F #define BGE_PCI_PCIXCAP 0x40 #define BGE_PCI_NEXTPTR_PM 0x41 #define BGE_PCI_PCIX_CMD 0x42 #define BGE_PCI_PCIX_STS 0x44 #define BGE_PCI_PWRMGMT_CAPID 0x48 #define BGE_PCI_NEXTPTR_VPD 0x49 #define BGE_PCI_PWRMGMT_CAPS 0x4A #define BGE_PCI_PWRMGMT_CMD 0x4C #define BGE_PCI_PWRMGMT_STS 0x4D #define BGE_PCI_PWRMGMT_DATA 0x4F #define BGE_PCI_VPD_CAPID 0x50 #define BGE_PCI_NEXTPTR_MSI 0x51 #define BGE_PCI_VPD_ADDR 0x52 #define BGE_PCI_VPD_DATA 0x54 #define BGE_PCI_MSI_CAPID 0x58 #define BGE_PCI_NEXTPTR_NONE 0x59 #define BGE_PCI_MSI_CTL 0x5A #define BGE_PCI_MSI_ADDR_HI 0x5C #define BGE_PCI_MSI_ADDR_LO 0x60 #define BGE_PCI_MSI_DATA 0x64 /* * PCI Express definitions * According to * PCI Express base specification, REV. 1.0a */ /* PCI Express device control, 16bits */ #define BGE_PCIE_DEVCTL 0x08 #define BGE_PCIE_DEVCTL_MAX_READRQ_MASK 0x7000 #define BGE_PCIE_DEVCTL_MAX_READRQ_128 0x0000 #define BGE_PCIE_DEVCTL_MAX_READRQ_256 0x1000 #define BGE_PCIE_DEVCTL_MAX_READRQ_512 0x2000 #define BGE_PCIE_DEVCTL_MAX_READRQ_1024 0x3000 #define BGE_PCIE_DEVCTL_MAX_READRQ_2048 0x4000 #define BGE_PCIE_DEVCTL_MAX_READRQ_4096 0x5000 /* PCI MSI. ??? */ #define BGE_PCIE_CAPID_REG 0xD0 #define BGE_PCIE_CAPID 0x10 /* * PCI registers specific to the BCM570x family. */ #define BGE_PCI_MISC_CTL 0x68 #define BGE_PCI_DMA_RW_CTL 0x6C #define BGE_PCI_PCISTATE 0x70 #define BGE_PCI_CLKCTL 0x74 #define BGE_PCI_REG_BASEADDR 0x78 #define BGE_PCI_MEMWIN_BASEADDR 0x7C #define BGE_PCI_REG_DATA 0x80 #define BGE_PCI_MEMWIN_DATA 0x84 #define BGE_PCI_MODECTL 0x88 #define BGE_PCI_MISC_CFG 0x8C #define BGE_PCI_MISC_LOCALCTL 0x90 #define BGE_PCI_UNDI_RX_STD_PRODIDX_HI 0x98 #define BGE_PCI_UNDI_RX_STD_PRODIDX_LO 0x9C #define BGE_PCI_UNDI_RX_RTN_CONSIDX_HI 0xA0 #define BGE_PCI_UNDI_RX_RTN_CONSIDX_LO 0xA4 #define BGE_PCI_UNDI_TX_BD_PRODIDX_HI 0xA8 #define BGE_PCI_UNDI_TX_BD_PRODIDX_LO 0xAC #define BGE_PCI_ISR_MBX_HI 0xB0 #define BGE_PCI_ISR_MBX_LO 0xB4 #define BGE_PCI_PRODID_ASICREV 0xBC #define BGE_PCI_GEN2_PRODID_ASICREV 0xF4 #define BGE_PCI_GEN15_PRODID_ASICREV 0xFC /* PCI Misc. Host control register */ #define BGE_PCIMISCCTL_CLEAR_INTA 0x00000001 #define BGE_PCIMISCCTL_MASK_PCI_INTR 0x00000002 #define BGE_PCIMISCCTL_ENDIAN_BYTESWAP 0x00000004 #define BGE_PCIMISCCTL_ENDIAN_WORDSWAP 0x00000008 #define BGE_PCIMISCCTL_PCISTATE_RW 0x00000010 #define BGE_PCIMISCCTL_CLOCKCTL_RW 0x00000020 #define BGE_PCIMISCCTL_REG_WORDSWAP 0x00000040 #define BGE_PCIMISCCTL_INDIRECT_ACCESS 0x00000080 #define BGE_PCIMISCCTL_TAGGED_STATUS 0x00000200 #define BGE_PCIMISCCTL_ASICREV 0xFFFF0000 #define BGE_PCIMISCCTL_ASICREV_SHIFT 16 #define BGE_HIF_SWAP_OPTIONS (BGE_PCIMISCCTL_ENDIAN_WORDSWAP) #define BGE_INIT \ (BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_CLEAR_INTA| \ BGE_PCIMISCCTL_MASK_PCI_INTR|BGE_PCIMISCCTL_INDIRECT_ACCESS) #define BGE_CHIPID_TIGON_I 0x4000 #define BGE_CHIPID_TIGON_II 0x6000 #define BGE_CHIPID_BCM5700_A0 0x7000 #define BGE_CHIPID_BCM5700_A1 0x7001 #define BGE_CHIPID_BCM5700_B0 0x7100 #define BGE_CHIPID_BCM5700_B1 0x7101 #define BGE_CHIPID_BCM5700_B2 0x7102 #define BGE_CHIPID_BCM5700_B3 0x7103 #define BGE_CHIPID_BCM5700_ALTIMA 0x7104 #define BGE_CHIPID_BCM5700_C0 0x7200 #define BGE_CHIPID_BCM5701_A0 0x0000 /* grrrr */ #define BGE_CHIPID_BCM5701_B0 0x0100 #define BGE_CHIPID_BCM5701_B2 0x0102 #define BGE_CHIPID_BCM5701_B5 0x0105 #define BGE_CHIPID_BCM5703_A0 0x1000 #define BGE_CHIPID_BCM5703_A1 0x1001 #define BGE_CHIPID_BCM5703_A2 0x1002 #define BGE_CHIPID_BCM5703_A3 0x1003 #define BGE_CHIPID_BCM5703_B0 0x1100 #define BGE_CHIPID_BCM5704_A0 0x2000 #define BGE_CHIPID_BCM5704_A1 0x2001 #define BGE_CHIPID_BCM5704_A2 0x2002 #define BGE_CHIPID_BCM5704_A3 0x2003 #define BGE_CHIPID_BCM5704_B0 0x2100 #define BGE_CHIPID_BCM5705_A0 0x3000 #define BGE_CHIPID_BCM5705_A1 0x3001 #define BGE_CHIPID_BCM5705_A2 0x3002 #define BGE_CHIPID_BCM5705_A3 0x3003 #define BGE_CHIPID_BCM5750_A0 0x4000 #define BGE_CHIPID_BCM5750_A1 0x4001 #define BGE_CHIPID_BCM5750_A3 0x4000 #define BGE_CHIPID_BCM5750_B0 0x4100 #define BGE_CHIPID_BCM5750_B1 0x4101 #define BGE_CHIPID_BCM5750_C0 0x4200 #define BGE_CHIPID_BCM5750_C1 0x4201 #define BGE_CHIPID_BCM5750_C2 0x4202 #define BGE_CHIPID_BCM5714_A0 0x5000 #define BGE_CHIPID_BCM5752_A0 0x6000 #define BGE_CHIPID_BCM5752_A1 0x6001 #define BGE_CHIPID_BCM5752_A2 0x6002 #define BGE_CHIPID_BCM5714_B0 0x8000 #define BGE_CHIPID_BCM5714_B3 0x8003 #define BGE_CHIPID_BCM5715_A0 0x9000 #define BGE_CHIPID_BCM5715_A1 0x9001 #define BGE_CHIPID_BCM5715_A3 0x9003 #define BGE_CHIPID_BCM5755_A0 0xa000 #define BGE_CHIPID_BCM5755_A1 0xa001 #define BGE_CHIPID_BCM5755_A2 0xa002 #define BGE_CHIPID_BCM5722_A0 0xa200 #define BGE_CHIPID_BCM5754_A0 0xb000 #define BGE_CHIPID_BCM5754_A1 0xb001 #define BGE_CHIPID_BCM5754_A2 0xb002 #define BGE_CHIPID_BCM5761_A0 0x5761000 #define BGE_CHIPID_BCM5761_A1 0x5761100 #define BGE_CHIPID_BCM5784_A0 0x5784000 #define BGE_CHIPID_BCM5784_A1 0x5784100 #define BGE_CHIPID_BCM5787_A0 0xb000 #define BGE_CHIPID_BCM5787_A1 0xb001 #define BGE_CHIPID_BCM5787_A2 0xb002 #define BGE_CHIPID_BCM5906_A0 0xc000 #define BGE_CHIPID_BCM5906_A1 0xc001 #define BGE_CHIPID_BCM5906_A2 0xc002 #define BGE_CHIPID_BCM57780_A0 0x57780000 #define BGE_CHIPID_BCM57780_A1 0x57780001 #define BGE_CHIPID_BCM5717_A0 0x05717000 #define BGE_CHIPID_BCM5717_B0 0x05717100 #define BGE_CHIPID_BCM5719_A0 0x05719000 #define BGE_CHIPID_BCM5720_A0 0x05720000 #define BGE_CHIPID_BCM5762_A0 0x05762000 #define BGE_CHIPID_BCM57765_A0 0x57785000 #define BGE_CHIPID_BCM57765_B0 0x57785100 /* shorthand one */ #define BGE_ASICREV(x) ((x) >> 12) #define BGE_ASICREV_BCM5701 0x00 #define BGE_ASICREV_BCM5703 0x01 #define BGE_ASICREV_BCM5704 0x02 #define BGE_ASICREV_BCM5705 0x03 #define BGE_ASICREV_BCM5750 0x04 #define BGE_ASICREV_BCM5714_A0 0x05 #define BGE_ASICREV_BCM5752 0x06 #define BGE_ASICREV_BCM5700 0x07 #define BGE_ASICREV_BCM5780 0x08 #define BGE_ASICREV_BCM5714 0x09 #define BGE_ASICREV_BCM5755 0x0a #define BGE_ASICREV_BCM5754 0x0b #define BGE_ASICREV_BCM5787 0x0b #define BGE_ASICREV_BCM5906 0x0c /* Should consult BGE_PCI_PRODID_ASICREV for ChipID */ #define BGE_ASICREV_USE_PRODID_REG 0x0f /* BGE_PCI_PRODID_ASICREV ASIC rev. identifiers. */ #define BGE_ASICREV_BCM5717 0x5717 #define BGE_ASICREV_BCM5719 0x5719 #define BGE_ASICREV_BCM5720 0x5720 #define BGE_ASICREV_BCM5761 0x5761 #define BGE_ASICREV_BCM5762 0x5762 #define BGE_ASICREV_BCM5784 0x5784 #define BGE_ASICREV_BCM5785 0x5785 #define BGE_ASICREV_BCM57765 0x57785 #define BGE_ASICREV_BCM57766 0x57766 #define BGE_ASICREV_BCM57780 0x57780 /* chip revisions */ #define BGE_CHIPREV(x) ((x) >> 8) #define BGE_CHIPREV_5700_AX 0x70 #define BGE_CHIPREV_5700_BX 0x71 #define BGE_CHIPREV_5700_CX 0x72 #define BGE_CHIPREV_5701_AX 0x00 #define BGE_CHIPREV_5703_AX 0x10 #define BGE_CHIPREV_5704_AX 0x20 #define BGE_CHIPREV_5704_BX 0x21 #define BGE_CHIPREV_5750_AX 0x40 #define BGE_CHIPREV_5750_BX 0x41 /* BGE_PCI_PRODID_ASICREV chip rev. identifiers. */ #define BGE_CHIPREV_5717_AX 0x57170 #define BGE_CHIPREV_5717_BX 0x57171 #define BGE_CHIPREV_5761_AX 0x57611 #define BGE_CHIPREV_57765_AX 0x577850 #define BGE_CHIPREV_5784_AX 0x57841 /* PCI DMA Read/Write Control register */ #define BGE_PCIDMARWCTL_MINDMA 0x000000FF #define BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT 0x00000001 #define BGE_PCIDMARWCTL_RDADRR_BNDRY 0x00000700 #define BGE_PCIDMARWCTL_WRADDR_BNDRY 0x00003800 #define BGE_PCIDMARWCTL_ONEDMA_ATONCE 0x0000C000 #define BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL 0x00004000 #define BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL 0x00008000 #define BGE_PCIDMARWCTL_RD_WAT 0x00070000 #define BGE_PCIDMARWCTL_WR_WAT 0x00380000 #define BGE_PCIDMARWCTL_USE_MRM 0x00400000 #define BGE_PCIDMARWCTL_ASRT_ALL_BE 0x00800000 #define BGE_PCIDMARWCTL_DFLT_PCI_RD_CMD 0x0F000000 #define BGE_PCIDMARWCTL_DFLT_PCI_WR_CMD 0xF0000000 #define BGE_PCIDMARWCTL_RD_WAT_SHIFT(x) ((x) << 16) #define BGE_PCIDMARWCTL_WR_WAT_SHIFT(x) ((x) << 19) #define BGE_PCIDMARWCTL_RD_CMD_SHIFT(x) ((x) << 24) #define BGE_PCIDMARWCTL_WR_CMD_SHIFT(x) ((x) << 28) #define BGE_PCIDMARWCTL_TAGGED_STATUS_WA 0x00000080 #define BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK 0x00000380 #define BGE_PCI_READ_BNDRY_DISABLE 0x00000000 #define BGE_PCI_READ_BNDRY_16BYTES 0x00000100 #define BGE_PCI_READ_BNDRY_32BYTES 0x00000200 #define BGE_PCI_READ_BNDRY_64BYTES 0x00000300 #define BGE_PCI_READ_BNDRY_128BYTES 0x00000400 #define BGE_PCI_READ_BNDRY_256BYTES 0x00000500 #define BGE_PCI_READ_BNDRY_512BYTES 0x00000600 #define BGE_PCI_READ_BNDRY_1024BYTES 0x00000700 #define BGE_PCI_WRITE_BNDRY_DISABLE 0x00000000 #define BGE_PCI_WRITE_BNDRY_16BYTES 0x00000800 #define BGE_PCI_WRITE_BNDRY_32BYTES 0x00001000 #define BGE_PCI_WRITE_BNDRY_64BYTES 0x00001800 #define BGE_PCI_WRITE_BNDRY_128BYTES 0x00002000 #define BGE_PCI_WRITE_BNDRY_256BYTES 0x00002800 #define BGE_PCI_WRITE_BNDRY_512BYTES 0x00003000 #define BGE_PCI_WRITE_BNDRY_1024BYTES 0x00003800 /* * PCI state register -- note, this register is read only * unless the PCISTATE_WR bit of the PCI Misc. Host Control * register is set. */ #define BGE_PCISTATE_FORCE_RESET 0x00000001 #define BGE_PCISTATE_INTR_STATE 0x00000002 #define BGE_PCISTATE_PCI_BUSMODE 0x00000004 /* 1 = PCI, 0 = PCI-X */ #define BGE_PCISTATE_PCI_BUSSPEED 0x00000008 /* 1 = 66/133, 0 = 33/66 */ #define BGE_PCISTATE_32BIT_BUS 0x00000010 /* 1 = 32bit, 0 = 64bit */ #define BGE_PCISTATE_ROM_ENABLE 0x00000020 #define BGE_PCISTATE_ROM_RETRY_ENABLE 0x00000040 #define BGE_PCISTATE_FLATVIEW_MODE 0x00000100 #define BGE_PCISTATE_PCI_TGT_RETRY_MAX 0x00000E00 #define BGE_PCISTATE_RETRY_SAME_DMA 0x00002000 #define BGE_PCISTATE_ALLOW_APE_CTLSPC_WR 0x00010000 #define BGE_PCISTATE_ALLOW_APE_SHMEM_WR 0x00020000 #define BGE_PCISTATE_ALLOW_APE_PSPACE_WR 0x00040000 /* * PCI Clock Control register -- note, this register is read only * unless the CLOCKCTL_RW bit of the PCI Misc. Host Control * register is set. */ #define BGE_PCICLOCKCTL_DETECTED_SPEED 0x0000000F #define BGE_PCICLOCKCTL_M66EN 0x00000080 #define BGE_PCICLOCKCTL_LOWPWR_CLKMODE 0x00000200 #define BGE_PCICLOCKCTL_RXCPU_CLK_DIS 0x00000400 #define BGE_PCICLOCKCTL_TXCPU_CLK_DIS 0x00000800 #define BGE_PCICLOCKCTL_ALTCLK 0x00001000 #define BGE_PCICLOCKCTL_ALTCLK_SRC 0x00002000 #define BGE_PCICLOCKCTL_PCIPLL_DISABLE 0x00004000 #define BGE_PCICLOCKCTL_SYSPLL_DISABLE 0x00008000 #define BGE_PCICLOCKCTL_BIST_ENABLE 0x00010000 #ifndef PCIM_CMD_MWIEN #define PCIM_CMD_MWIEN 0x0010 #endif #ifndef PCIM_CMD_INTxDIS #define PCIM_CMD_INTxDIS 0x0400 #endif /* BAR0 (MAC) Register Definitions */ /* * High priority mailbox registers * Each mailbox is 64-bits wide, though we only use the * lower 32 bits. To write a 64-bit value, write the upper 32 bits * first. The NIC will load the mailbox after the lower 32 bit word * has been updated. */ #define BGE_MBX_IRQ0_HI 0x0200 #define BGE_MBX_IRQ0_LO 0x0204 #define BGE_MBX_IRQ1_HI 0x0208 #define BGE_MBX_IRQ1_LO 0x020C #define BGE_MBX_IRQ2_HI 0x0210 #define BGE_MBX_IRQ2_LO 0x0214 #define BGE_MBX_IRQ3_HI 0x0218 #define BGE_MBX_IRQ3_LO 0x021C #define BGE_MBX_GEN0_HI 0x0220 #define BGE_MBX_GEN0_LO 0x0224 #define BGE_MBX_GEN1_HI 0x0228 #define BGE_MBX_GEN1_LO 0x022C #define BGE_MBX_GEN2_HI 0x0230 #define BGE_MBX_GEN2_LO 0x0234 #define BGE_MBX_GEN3_HI 0x0228 #define BGE_MBX_GEN3_LO 0x022C #define BGE_MBX_GEN4_HI 0x0240 #define BGE_MBX_GEN4_LO 0x0244 #define BGE_MBX_GEN5_HI 0x0248 #define BGE_MBX_GEN5_LO 0x024C #define BGE_MBX_GEN6_HI 0x0250 #define BGE_MBX_GEN6_LO 0x0254 #define BGE_MBX_GEN7_HI 0x0258 #define BGE_MBX_GEN7_LO 0x025C #define BGE_MBX_RELOAD_STATS_HI 0x0260 #define BGE_MBX_RELOAD_STATS_LO 0x0264 #define BGE_MBX_RX_STD_PROD_HI 0x0268 #define BGE_MBX_RX_STD_PROD_LO 0x026C #define BGE_MBX_RX_JUMBO_PROD_HI 0x0270 #define BGE_MBX_RX_JUMBO_PROD_LO 0x0274 #define BGE_MBX_RX_MINI_PROD_HI 0x0278 #define BGE_MBX_RX_MINI_PROD_LO 0x027C #define BGE_MBX_RX_CONS0_HI 0x0280 #define BGE_MBX_RX_CONS0_LO 0x0284 #define BGE_MBX_RX_CONS1_HI 0x0288 #define BGE_MBX_RX_CONS1_LO 0x028C #define BGE_MBX_RX_CONS2_HI 0x0290 #define BGE_MBX_RX_CONS2_LO 0x0294 #define BGE_MBX_RX_CONS3_HI 0x0298 #define BGE_MBX_RX_CONS3_LO 0x029C #define BGE_MBX_RX_CONS4_HI 0x02A0 #define BGE_MBX_RX_CONS4_LO 0x02A4 #define BGE_MBX_RX_CONS5_HI 0x02A8 #define BGE_MBX_RX_CONS5_LO 0x02AC #define BGE_MBX_RX_CONS6_HI 0x02B0 #define BGE_MBX_RX_CONS6_LO 0x02B4 #define BGE_MBX_RX_CONS7_HI 0x02B8 #define BGE_MBX_RX_CONS7_LO 0x02BC #define BGE_MBX_RX_CONS8_HI 0x02C0 #define BGE_MBX_RX_CONS8_LO 0x02C4 #define BGE_MBX_RX_CONS9_HI 0x02C8 #define BGE_MBX_RX_CONS9_LO 0x02CC #define BGE_MBX_RX_CONS10_HI 0x02D0 #define BGE_MBX_RX_CONS10_LO 0x02D4 #define BGE_MBX_RX_CONS11_HI 0x02D8 #define BGE_MBX_RX_CONS11_LO 0x02DC #define BGE_MBX_RX_CONS12_HI 0x02E0 #define BGE_MBX_RX_CONS12_LO 0x02E4 #define BGE_MBX_RX_CONS13_HI 0x02E8 #define BGE_MBX_RX_CONS13_LO 0x02EC #define BGE_MBX_RX_CONS14_HI 0x02F0 #define BGE_MBX_RX_CONS14_LO 0x02F4 #define BGE_MBX_RX_CONS15_HI 0x02F8 #define BGE_MBX_RX_CONS15_LO 0x02FC #define BGE_MBX_TX_HOST_PROD0_HI 0x0300 #define BGE_MBX_TX_HOST_PROD0_LO 0x0304 #define BGE_MBX_TX_HOST_PROD1_HI 0x0308 #define BGE_MBX_TX_HOST_PROD1_LO 0x030C #define BGE_MBX_TX_HOST_PROD2_HI 0x0310 #define BGE_MBX_TX_HOST_PROD2_LO 0x0314 #define BGE_MBX_TX_HOST_PROD3_HI 0x0318 #define BGE_MBX_TX_HOST_PROD3_LO 0x031C #define BGE_MBX_TX_HOST_PROD4_HI 0x0320 #define BGE_MBX_TX_HOST_PROD4_LO 0x0324 #define BGE_MBX_TX_HOST_PROD5_HI 0x0328 #define BGE_MBX_TX_HOST_PROD5_LO 0x032C #define BGE_MBX_TX_HOST_PROD6_HI 0x0330 #define BGE_MBX_TX_HOST_PROD6_LO 0x0334 #define BGE_MBX_TX_HOST_PROD7_HI 0x0338 #define BGE_MBX_TX_HOST_PROD7_LO 0x033C #define BGE_MBX_TX_HOST_PROD8_HI 0x0340 #define BGE_MBX_TX_HOST_PROD8_LO 0x0344 #define BGE_MBX_TX_HOST_PROD9_HI 0x0348 #define BGE_MBX_TX_HOST_PROD9_LO 0x034C #define BGE_MBX_TX_HOST_PROD10_HI 0x0350 #define BGE_MBX_TX_HOST_PROD10_LO 0x0354 #define BGE_MBX_TX_HOST_PROD11_HI 0x0358 #define BGE_MBX_TX_HOST_PROD11_LO 0x035C #define BGE_MBX_TX_HOST_PROD12_HI 0x0360 #define BGE_MBX_TX_HOST_PROD12_LO 0x0364 #define BGE_MBX_TX_HOST_PROD13_HI 0x0368 #define BGE_MBX_TX_HOST_PROD13_LO 0x036C #define BGE_MBX_TX_HOST_PROD14_HI 0x0370 #define BGE_MBX_TX_HOST_PROD14_LO 0x0374 #define BGE_MBX_TX_HOST_PROD15_HI 0x0378 #define BGE_MBX_TX_HOST_PROD15_LO 0x037C #define BGE_MBX_TX_NIC_PROD0_HI 0x0380 #define BGE_MBX_TX_NIC_PROD0_LO 0x0384 #define BGE_MBX_TX_NIC_PROD1_HI 0x0388 #define BGE_MBX_TX_NIC_PROD1_LO 0x038C #define BGE_MBX_TX_NIC_PROD2_HI 0x0390 #define BGE_MBX_TX_NIC_PROD2_LO 0x0394 #define BGE_MBX_TX_NIC_PROD3_HI 0x0398 #define BGE_MBX_TX_NIC_PROD3_LO 0x039C #define BGE_MBX_TX_NIC_PROD4_HI 0x03A0 #define BGE_MBX_TX_NIC_PROD4_LO 0x03A4 #define BGE_MBX_TX_NIC_PROD5_HI 0x03A8 #define BGE_MBX_TX_NIC_PROD5_LO 0x03AC #define BGE_MBX_TX_NIC_PROD6_HI 0x03B0 #define BGE_MBX_TX_NIC_PROD6_LO 0x03B4 #define BGE_MBX_TX_NIC_PROD7_HI 0x03B8 #define BGE_MBX_TX_NIC_PROD7_LO 0x03BC #define BGE_MBX_TX_NIC_PROD8_HI 0x03C0 #define BGE_MBX_TX_NIC_PROD8_LO 0x03C4 #define BGE_MBX_TX_NIC_PROD9_HI 0x03C8 #define BGE_MBX_TX_NIC_PROD9_LO 0x03CC #define BGE_MBX_TX_NIC_PROD10_HI 0x03D0 #define BGE_MBX_TX_NIC_PROD10_LO 0x03D4 #define BGE_MBX_TX_NIC_PROD11_HI 0x03D8 #define BGE_MBX_TX_NIC_PROD11_LO 0x03DC #define BGE_MBX_TX_NIC_PROD12_HI 0x03E0 #define BGE_MBX_TX_NIC_PROD12_LO 0x03E4 #define BGE_MBX_TX_NIC_PROD13_HI 0x03E8 #define BGE_MBX_TX_NIC_PROD13_LO 0x03EC #define BGE_MBX_TX_NIC_PROD14_HI 0x03F0 #define BGE_MBX_TX_NIC_PROD14_LO 0x03F4 #define BGE_MBX_TX_NIC_PROD15_HI 0x03F8 #define BGE_MBX_TX_NIC_PROD15_LO 0x03FC #define BGE_TX_RINGS_MAX 4 #define BGE_TX_RINGS_EXTSSRAM_MAX 16 #define BGE_RX_RINGS_MAX 16 #define BGE_RX_RINGS_MAX_5717 17 /* Ethernet MAC control registers */ #define BGE_MAC_MODE 0x0400 #define BGE_MAC_STS 0x0404 #define BGE_MAC_EVT_ENB 0x0408 #define BGE_MAC_LED_CTL 0x040C #define BGE_MAC_ADDR1_LO 0x0410 #define BGE_MAC_ADDR1_HI 0x0414 #define BGE_MAC_ADDR2_LO 0x0418 #define BGE_MAC_ADDR2_HI 0x041C #define BGE_MAC_ADDR3_LO 0x0420 #define BGE_MAC_ADDR3_HI 0x0424 #define BGE_MAC_ADDR4_LO 0x0428 #define BGE_MAC_ADDR4_HI 0x042C #define BGE_WOL_PATPTR 0x0430 #define BGE_WOL_PATCFG 0x0434 #define BGE_TX_RANDOM_BACKOFF 0x0438 #define BGE_RX_MTU 0x043C #define BGE_GBIT_PCS_TEST 0x0440 #define BGE_TX_TBI_AUTONEG 0x0444 #define BGE_RX_TBI_AUTONEG 0x0448 #define BGE_MI_COMM 0x044C #define BGE_MI_STS 0x0450 #define BGE_MI_MODE 0x0454 #define BGE_AUTOPOLL_STS 0x0458 #define BGE_TX_MODE 0x045C #define BGE_TX_STS 0x0460 #define BGE_TX_LENGTHS 0x0464 #define BGE_RX_MODE 0x0468 #define BGE_RX_STS 0x046C #define BGE_MAR0 0x0470 #define BGE_MAR1 0x0474 #define BGE_MAR2 0x0478 #define BGE_MAR3 0x047C #define BGE_RX_BD_RULES_CTL0 0x0480 #define BGE_RX_BD_RULES_MASKVAL0 0x0484 #define BGE_RX_BD_RULES_CTL1 0x0488 #define BGE_RX_BD_RULES_MASKVAL1 0x048C #define BGE_RX_BD_RULES_CTL2 0x0490 #define BGE_RX_BD_RULES_MASKVAL2 0x0494 #define BGE_RX_BD_RULES_CTL3 0x0498 #define BGE_RX_BD_RULES_MASKVAL3 0x049C #define BGE_RX_BD_RULES_CTL4 0x04A0 #define BGE_RX_BD_RULES_MASKVAL4 0x04A4 #define BGE_RX_BD_RULES_CTL5 0x04A8 #define BGE_RX_BD_RULES_MASKVAL5 0x04AC #define BGE_RX_BD_RULES_CTL6 0x04B0 #define BGE_RX_BD_RULES_MASKVAL6 0x04B4 #define BGE_RX_BD_RULES_CTL7 0x04B8 #define BGE_RX_BD_RULES_MASKVAL7 0x04BC #define BGE_RX_BD_RULES_CTL8 0x04C0 #define BGE_RX_BD_RULES_MASKVAL8 0x04C4 #define BGE_RX_BD_RULES_CTL9 0x04C8 #define BGE_RX_BD_RULES_MASKVAL9 0x04CC #define BGE_RX_BD_RULES_CTL10 0x04D0 #define BGE_RX_BD_RULES_MASKVAL10 0x04D4 #define BGE_RX_BD_RULES_CTL11 0x04D8 #define BGE_RX_BD_RULES_MASKVAL11 0x04DC #define BGE_RX_BD_RULES_CTL12 0x04E0 #define BGE_RX_BD_RULES_MASKVAL12 0x04E4 #define BGE_RX_BD_RULES_CTL13 0x04E8 #define BGE_RX_BD_RULES_MASKVAL13 0x04EC #define BGE_RX_BD_RULES_CTL14 0x04F0 #define BGE_RX_BD_RULES_MASKVAL14 0x04F4 #define BGE_RX_BD_RULES_CTL15 0x04F8 #define BGE_RX_BD_RULES_MASKVAL15 0x04FC #define BGE_RX_RULES_CFG 0x0500 #define BGE_MAX_RX_FRAME_LOWAT 0x0504 #define BGE_SERDES_CFG 0x0590 #define BGE_SERDES_STS 0x0594 #define BGE_SGDIG_CFG 0x05B0 #define BGE_SGDIG_STS 0x05B4 #define BGE_TX_MAC_STATS_OCTETS 0x0800 #define BGE_TX_MAC_STATS_RESERVE_0 0x0804 #define BGE_TX_MAC_STATS_COLLS 0x0808 #define BGE_TX_MAC_STATS_XON_SENT 0x080C #define BGE_TX_MAC_STATS_XOFF_SENT 0x0810 #define BGE_TX_MAC_STATS_RESERVE_1 0x0814 #define BGE_TX_MAC_STATS_ERRORS 0x0818 #define BGE_TX_MAC_STATS_SINGLE_COLL 0x081C #define BGE_TX_MAC_STATS_MULTI_COLL 0x0820 #define BGE_TX_MAC_STATS_DEFERRED 0x0824 #define BGE_TX_MAC_STATS_RESERVE_2 0x0828 #define BGE_TX_MAC_STATS_EXCESS_COLL 0x082C #define BGE_TX_MAC_STATS_LATE_COLL 0x0830 #define BGE_TX_MAC_STATS_RESERVE_3 0x0834 #define BGE_TX_MAC_STATS_RESERVE_4 0x0838 #define BGE_TX_MAC_STATS_RESERVE_5 0x083C #define BGE_TX_MAC_STATS_RESERVE_6 0x0840 #define BGE_TX_MAC_STATS_RESERVE_7 0x0844 #define BGE_TX_MAC_STATS_RESERVE_8 0x0848 #define BGE_TX_MAC_STATS_RESERVE_9 0x084C #define BGE_TX_MAC_STATS_RESERVE_10 0x0850 #define BGE_TX_MAC_STATS_RESERVE_11 0x0854 #define BGE_TX_MAC_STATS_RESERVE_12 0x0858 #define BGE_TX_MAC_STATS_RESERVE_13 0x085C #define BGE_TX_MAC_STATS_RESERVE_14 0x0860 #define BGE_TX_MAC_STATS_RESERVE_15 0x0864 #define BGE_TX_MAC_STATS_RESERVE_16 0x0868 #define BGE_TX_MAC_STATS_UCAST 0x086C #define BGE_TX_MAC_STATS_MCAST 0x0870 #define BGE_TX_MAC_STATS_BCAST 0x0874 #define BGE_TX_MAC_STATS_RESERVE_17 0x0878 #define BGE_TX_MAC_STATS_RESERVE_18 0x087C #define BGE_RX_MAC_STATS_OCTESTS 0x0880 #define BGE_RX_MAC_STATS_RESERVE_0 0x0884 #define BGE_RX_MAC_STATS_FRAGMENTS 0x0888 #define BGE_RX_MAC_STATS_UCAST 0x088C #define BGE_RX_MAC_STATS_MCAST 0x0890 #define BGE_RX_MAC_STATS_BCAST 0x0894 #define BGE_RX_MAC_STATS_FCS_ERRORS 0x0898 #define BGE_RX_MAC_STATS_ALGIN_ERRORS 0x089C #define BGE_RX_MAC_STATS_XON_RCVD 0x08A0 #define BGE_RX_MAC_STATS_XOFF_RCVD 0x08A4 #define BGE_RX_MAC_STATS_CTRL_RCVD 0x08A8 #define BGE_RX_MAC_STATS_XOFF_ENTERED 0x08AC #define BGE_RX_MAC_STATS_FRAME_TOO_LONG 0x08B0 #define BGE_RX_MAC_STATS_JABBERS 0x08B4 #define BGE_RX_MAC_STATS_UNDERSIZE 0x08B8 /* Ethernet MAC Mode register */ #define BGE_MACMODE_RESET 0x00000001 #define BGE_MACMODE_HALF_DUPLEX 0x00000002 #define BGE_MACMODE_PORTMODE 0x0000000C #define BGE_MACMODE_LOOPBACK 0x00000010 #define BGE_MACMODE_RX_TAGGEDPKT 0x00000080 #define BGE_MACMODE_TX_BURST_ENB 0x00000100 #define BGE_MACMODE_MAX_DEFER 0x00000200 #define BGE_MACMODE_LINK_POLARITY 0x00000400 #define BGE_MACMODE_RX_STATS_ENB 0x00000800 #define BGE_MACMODE_RX_STATS_CLEAR 0x00001000 #define BGE_MACMODE_RX_STATS_FLUSH 0x00002000 #define BGE_MACMODE_TX_STATS_ENB 0x00004000 #define BGE_MACMODE_TX_STATS_CLEAR 0x00008000 #define BGE_MACMODE_TX_STATS_FLUSH 0x00010000 #define BGE_MACMODE_TBI_SEND_CFGS 0x00020000 #define BGE_MACMODE_MAGIC_PKT_ENB 0x00040000 #define BGE_MACMODE_ACPI_PWRON_ENB 0x00080000 #define BGE_MACMODE_MIP_ENB 0x00100000 #define BGE_MACMODE_TXDMA_ENB 0x00200000 #define BGE_MACMODE_RXDMA_ENB 0x00400000 #define BGE_MACMODE_FRMHDR_DMA_ENB 0x00800000 #define BGE_MACMODE_APE_RX_EN 0x08000000 #define BGE_MACMODE_APE_TX_EN 0x10000000 #define BGE_PORTMODE_NONE 0x00000000 #define BGE_PORTMODE_MII 0x00000004 #define BGE_PORTMODE_GMII 0x00000008 #define BGE_PORTMODE_TBI 0x0000000C /* MAC Status register */ #define BGE_MACSTAT_TBI_PCS_SYNCHED 0x00000001 #define BGE_MACSTAT_TBI_SIGNAL_DETECT 0x00000002 #define BGE_MACSTAT_RX_CFG 0x00000004 #define BGE_MACSTAT_CFG_CHANGED 0x00000008 #define BGE_MACSTAT_SYNC_CHANGED 0x00000010 #define BGE_MACSTAT_PORT_DECODE_ERROR 0x00000400 #define BGE_MACSTAT_LINK_CHANGED 0x00001000 #define BGE_MACSTAT_MI_COMPLETE 0x00400000 #define BGE_MACSTAT_MI_INTERRUPT 0x00800000 #define BGE_MACSTAT_AUTOPOLL_ERROR 0x01000000 #define BGE_MACSTAT_ODI_ERROR 0x02000000 #define BGE_MACSTAT_RXSTAT_OFLOW 0x04000000 #define BGE_MACSTAT_TXSTAT_OFLOW 0x08000000 /* MAC Event Enable Register */ #define BGE_EVTENB_PORT_DECODE_ERROR 0x00000400 #define BGE_EVTENB_LINK_CHANGED 0x00001000 #define BGE_EVTENB_MI_COMPLETE 0x00400000 #define BGE_EVTENB_MI_INTERRUPT 0x00800000 #define BGE_EVTENB_AUTOPOLL_ERROR 0x01000000 #define BGE_EVTENB_ODI_ERROR 0x02000000 #define BGE_EVTENB_RXSTAT_OFLOW 0x04000000 #define BGE_EVTENB_TXSTAT_OFLOW 0x08000000 /* LED Control Register */ #define BGE_LEDCTL_LINKLED_OVERRIDE 0x00000001 #define BGE_LEDCTL_1000MBPS_LED 0x00000002 #define BGE_LEDCTL_100MBPS_LED 0x00000004 #define BGE_LEDCTL_10MBPS_LED 0x00000008 #define BGE_LEDCTL_TRAFLED_OVERRIDE 0x00000010 #define BGE_LEDCTL_TRAFLED_BLINK 0x00000020 #define BGE_LEDCTL_TRAFLED_BLINK_2 0x00000040 #define BGE_LEDCTL_1000MBPS_STS 0x00000080 #define BGE_LEDCTL_100MBPS_STS 0x00000100 #define BGE_LEDCTL_10MBPS_STS 0x00000200 #define BGE_LEDCTL_TRAFLED_STS 0x00000400 #define BGE_LEDCTL_BLINKPERIOD 0x7FF80000 #define BGE_LEDCTL_BLINKPERIOD_OVERRIDE 0x80000000 /* TX backoff seed register */ #define BGE_TX_BACKOFF_SEED_MASK 0x3FF /* Autopoll status register */ #define BGE_AUTOPOLLSTS_ERROR 0x00000001 /* Transmit MAC mode register */ #define BGE_TXMODE_RESET 0x00000001 #define BGE_TXMODE_ENABLE 0x00000002 #define BGE_TXMODE_FLOWCTL_ENABLE 0x00000010 #define BGE_TXMODE_BIGBACKOFF_ENABLE 0x00000020 #define BGE_TXMODE_LONGPAUSE_ENABLE 0x00000040 #define BGE_TXMODE_MBUF_LOCKUP_FIX 0x00000100 #define BGE_TXMODE_JMB_FRM_LEN 0x00400000 #define BGE_TXMODE_CNT_DN_MODE 0x00800000 /* Transmit MAC status register */ #define BGE_TXSTAT_RX_XOFFED 0x00000001 #define BGE_TXSTAT_SENT_XOFF 0x00000002 #define BGE_TXSTAT_SENT_XON 0x00000004 #define BGE_TXSTAT_LINK_UP 0x00000008 #define BGE_TXSTAT_ODI_UFLOW 0x00000010 #define BGE_TXSTAT_ODI_OFLOW 0x00000020 /* Transmit MAC lengths register */ #define BGE_TXLEN_SLOTTIME 0x000000FF #define BGE_TXLEN_IPG 0x00000F00 #define BGE_TXLEN_CRS 0x00003000 #define BGE_TXLEN_JMB_FRM_LEN_MSK 0x00FF0000 #define BGE_TXLEN_CNT_DN_VAL_MSK 0xFF000000 /* Receive MAC mode register */ #define BGE_RXMODE_RESET 0x00000001 #define BGE_RXMODE_ENABLE 0x00000002 #define BGE_RXMODE_FLOWCTL_ENABLE 0x00000004 #define BGE_RXMODE_RX_GIANTS 0x00000020 #define BGE_RXMODE_RX_RUNTS 0x00000040 #define BGE_RXMODE_8022_LENCHECK 0x00000080 #define BGE_RXMODE_RX_PROMISC 0x00000100 #define BGE_RXMODE_RX_NO_CRC_CHECK 0x00000200 #define BGE_RXMODE_RX_KEEP_VLAN_DIAG 0x00000400 #define BGE_RXMODE_IPV6_ENABLE 0x01000000 #define BGE_RXMODE_IPV4_FRAG_FIX 0x02000000 /* Receive MAC status register */ #define BGE_RXSTAT_REMOTE_XOFFED 0x00000001 #define BGE_RXSTAT_RCVD_XOFF 0x00000002 #define BGE_RXSTAT_RCVD_XON 0x00000004 /* Receive Rules Control register */ #define BGE_RXRULECTL_OFFSET 0x000000FF #define BGE_RXRULECTL_CLASS 0x00001F00 #define BGE_RXRULECTL_HDRTYPE 0x0000E000 #define BGE_RXRULECTL_COMPARE_OP 0x00030000 #define BGE_RXRULECTL_MAP 0x01000000 #define BGE_RXRULECTL_DISCARD 0x02000000 #define BGE_RXRULECTL_MASK 0x04000000 #define BGE_RXRULECTL_ACTIVATE_PROC3 0x08000000 #define BGE_RXRULECTL_ACTIVATE_PROC2 0x10000000 #define BGE_RXRULECTL_ACTIVATE_PROC1 0x20000000 #define BGE_RXRULECTL_ANDWITHNEXT 0x40000000 /* Receive Rules Mask register */ #define BGE_RXRULEMASK_VALUE 0x0000FFFF #define BGE_RXRULEMASK_MASKVAL 0xFFFF0000 /* SERDES configuration register */ #define BGE_SERDESCFG_RXR 0x00000007 /* phase interpolator */ #define BGE_SERDESCFG_RXG 0x00000018 /* rx gain setting */ #define BGE_SERDESCFG_RXEDGESEL 0x00000040 /* rising/falling egde */ #define BGE_SERDESCFG_TX_BIAS 0x00000380 /* TXDAC bias setting */ #define BGE_SERDESCFG_IBMAX 0x00000400 /* bias current +25% */ #define BGE_SERDESCFG_IBMIN 0x00000800 /* bias current -25% */ #define BGE_SERDESCFG_TXMODE 0x00001000 #define BGE_SERDESCFG_TXEDGESEL 0x00002000 /* rising/falling edge */ #define BGE_SERDESCFG_MODE 0x00004000 /* TXCP/TXCN disabled */ #define BGE_SERDESCFG_PLLTEST 0x00008000 /* PLL test mode */ #define BGE_SERDESCFG_CDET 0x00010000 /* comma detect enable */ #define BGE_SERDESCFG_TBILOOP 0x00020000 /* local loopback */ #define BGE_SERDESCFG_REMLOOP 0x00040000 /* remote loopback */ #define BGE_SERDESCFG_INVPHASE 0x00080000 /* Reverse 125Mhz clock */ #define BGE_SERDESCFG_12REGCTL 0x00300000 /* 1.2v regulator ctl */ #define BGE_SERDESCFG_REGCTL 0x00C00000 /* regulator ctl (2.5v) */ /* SERDES status register */ #define BGE_SERDESSTS_RXSTAT 0x0000000F /* receive status bits */ #define BGE_SERDESSTS_CDET 0x00000010 /* comma code detected */ /* SGDIG config (not documented) */ #define BGE_SGDIGCFG_PAUSE_CAP 0x00000800 #define BGE_SGDIGCFG_ASYM_PAUSE 0x00001000 #define BGE_SGDIGCFG_SEND 0x40000000 #define BGE_SGDIGCFG_AUTO 0x80000000 /* SGDIG status (not documented) */ #define BGE_SGDIGSTS_DONE 0x00000002 #define BGE_SGDIGSTS_IS_SERDES 0x00000100 #define BGE_SGDIGSTS_PAUSE_CAP 0x00080000 #define BGE_SGDIGSTS_ASYM_PAUSE 0x00100000 /* MI communication register */ #define BGE_MICOMM_DATA 0x0000FFFF #define BGE_MICOMM_REG 0x001F0000 #define BGE_MICOMM_PHY 0x03E00000 #define BGE_MICOMM_CMD 0x0C000000 #define BGE_MICOMM_READFAIL 0x10000000 #define BGE_MICOMM_BUSY 0x20000000 #define BGE_MIREG(x) ((x & 0x1F) << 16) #define BGE_MIPHY(x) ((x & 0x1F) << 21) #define BGE_MICMD_WRITE 0x04000000 #define BGE_MICMD_READ 0x08000000 /* MI status register */ #define BGE_MISTS_LINK 0x00000001 #define BGE_MISTS_10MBPS 0x00000002 #define BGE_MIMODE_CLK_10MHZ 0x00000001 #define BGE_MIMODE_SHORTPREAMBLE 0x00000002 #define BGE_MIMODE_AUTOPOLL 0x00000010 #define BGE_MIMODE_CLKCNT 0x001F0000 #define BGE_MIMODE_500KHZ_CONST 0x00008000 #define BGE_MIMODE_BASE 0x000C0000 /* * Send data initiator control registers. */ #define BGE_SDI_MODE 0x0C00 #define BGE_SDI_STATUS 0x0C04 #define BGE_SDI_STATS_CTL 0x0C08 #define BGE_SDI_STATS_ENABLE_MASK 0x0C0C #define BGE_SDI_STATS_INCREMENT_MASK 0x0C10 #define BGE_ISO_PKT_TX 0x0C20 #define BGE_LOCSTATS_COS0 0x0C80 #define BGE_LOCSTATS_COS1 0x0C84 #define BGE_LOCSTATS_COS2 0x0C88 #define BGE_LOCSTATS_COS3 0x0C8C #define BGE_LOCSTATS_COS4 0x0C90 #define BGE_LOCSTATS_COS5 0x0C84 #define BGE_LOCSTATS_COS6 0x0C98 #define BGE_LOCSTATS_COS7 0x0C9C #define BGE_LOCSTATS_COS8 0x0CA0 #define BGE_LOCSTATS_COS9 0x0CA4 #define BGE_LOCSTATS_COS10 0x0CA8 #define BGE_LOCSTATS_COS11 0x0CAC #define BGE_LOCSTATS_COS12 0x0CB0 #define BGE_LOCSTATS_COS13 0x0CB4 #define BGE_LOCSTATS_COS14 0x0CB8 #define BGE_LOCSTATS_COS15 0x0CBC #define BGE_LOCSTATS_DMA_RQ_FULL 0x0CC0 #define BGE_LOCSTATS_DMA_HIPRIO_RQ_FULL 0x0CC4 #define BGE_LOCSTATS_SDC_QUEUE_FULL 0x0CC8 #define BGE_LOCSTATS_NIC_SENDPROD_SET 0x0CCC #define BGE_LOCSTATS_STATS_UPDATED 0x0CD0 #define BGE_LOCSTATS_IRQS 0x0CD4 #define BGE_LOCSTATS_AVOIDED_IRQS 0x0CD8 #define BGE_LOCSTATS_TX_THRESH_HIT 0x0CDC /* Send Data Initiator mode register */ #define BGE_SDIMODE_RESET 0x00000001 #define BGE_SDIMODE_ENABLE 0x00000002 #define BGE_SDIMODE_STATS_OFLOW_ATTN 0x00000004 #define BGE_SDIMODE_HW_LSO_PRE_DMA 0x00000008 /* Send Data Initiator stats register */ #define BGE_SDISTAT_STATS_OFLOW_ATTN 0x00000004 /* Send Data Initiator stats control register */ #define BGE_SDISTATSCTL_ENABLE 0x00000001 #define BGE_SDISTATSCTL_FASTER 0x00000002 #define BGE_SDISTATSCTL_CLEAR 0x00000004 #define BGE_SDISTATSCTL_FORCEFLUSH 0x00000008 #define BGE_SDISTATSCTL_FORCEZERO 0x00000010 /* * Send Data Completion Control registers */ #define BGE_SDC_MODE 0x1000 #define BGE_SDC_STATUS 0x1004 /* Send Data completion mode register */ #define BGE_SDCMODE_RESET 0x00000001 #define BGE_SDCMODE_ENABLE 0x00000002 #define BGE_SDCMODE_ATTN 0x00000004 #define BGE_SDCMODE_CDELAY 0x00000010 /* Send Data completion status register */ #define BGE_SDCSTAT_ATTN 0x00000004 /* * Send BD Ring Selector Control registers */ #define BGE_SRS_MODE 0x1400 #define BGE_SRS_STATUS 0x1404 #define BGE_SRS_HWDIAG 0x1408 #define BGE_SRS_LOC_NIC_CONS0 0x1440 #define BGE_SRS_LOC_NIC_CONS1 0x1444 #define BGE_SRS_LOC_NIC_CONS2 0x1448 #define BGE_SRS_LOC_NIC_CONS3 0x144C #define BGE_SRS_LOC_NIC_CONS4 0x1450 #define BGE_SRS_LOC_NIC_CONS5 0x1454 #define BGE_SRS_LOC_NIC_CONS6 0x1458 #define BGE_SRS_LOC_NIC_CONS7 0x145C #define BGE_SRS_LOC_NIC_CONS8 0x1460 #define BGE_SRS_LOC_NIC_CONS9 0x1464 #define BGE_SRS_LOC_NIC_CONS10 0x1468 #define BGE_SRS_LOC_NIC_CONS11 0x146C #define BGE_SRS_LOC_NIC_CONS12 0x1470 #define BGE_SRS_LOC_NIC_CONS13 0x1474 #define BGE_SRS_LOC_NIC_CONS14 0x1478 #define BGE_SRS_LOC_NIC_CONS15 0x147C /* Send BD Ring Selector Mode register */ #define BGE_SRSMODE_RESET 0x00000001 #define BGE_SRSMODE_ENABLE 0x00000002 #define BGE_SRSMODE_ATTN 0x00000004 /* Send BD Ring Selector Status register */ #define BGE_SRSSTAT_ERROR 0x00000004 /* Send BD Ring Selector HW Diagnostics register */ #define BGE_SRSHWDIAG_STATE 0x0000000F #define BGE_SRSHWDIAG_CURRINGNUM 0x000000F0 #define BGE_SRSHWDIAG_STAGEDRINGNUM 0x00000F00 #define BGE_SRSHWDIAG_RINGNUM_IN_MBX 0x0000F000 /* * Send BD Initiator Selector Control registers */ #define BGE_SBDI_MODE 0x1800 #define BGE_SBDI_STATUS 0x1804 #define BGE_SBDI_LOC_NIC_PROD0 0x1808 #define BGE_SBDI_LOC_NIC_PROD1 0x180C #define BGE_SBDI_LOC_NIC_PROD2 0x1810 #define BGE_SBDI_LOC_NIC_PROD3 0x1814 #define BGE_SBDI_LOC_NIC_PROD4 0x1818 #define BGE_SBDI_LOC_NIC_PROD5 0x181C #define BGE_SBDI_LOC_NIC_PROD6 0x1820 #define BGE_SBDI_LOC_NIC_PROD7 0x1824 #define BGE_SBDI_LOC_NIC_PROD8 0x1828 #define BGE_SBDI_LOC_NIC_PROD9 0x182C #define BGE_SBDI_LOC_NIC_PROD10 0x1830 #define BGE_SBDI_LOC_NIC_PROD11 0x1834 #define BGE_SBDI_LOC_NIC_PROD12 0x1838 #define BGE_SBDI_LOC_NIC_PROD13 0x183C #define BGE_SBDI_LOC_NIC_PROD14 0x1840 #define BGE_SBDI_LOC_NIC_PROD15 0x1844 /* Send BD Initiator Mode register */ #define BGE_SBDIMODE_RESET 0x00000001 #define BGE_SBDIMODE_ENABLE 0x00000002 #define BGE_SBDIMODE_ATTN 0x00000004 /* Send BD Initiator Status register */ #define BGE_SBDISTAT_ERROR 0x00000004 /* * Send BD Completion Control registers */ #define BGE_SBDC_MODE 0x1C00 #define BGE_SBDC_STATUS 0x1C04 /* Send BD Completion Control Mode register */ #define BGE_SBDCMODE_RESET 0x00000001 #define BGE_SBDCMODE_ENABLE 0x00000002 #define BGE_SBDCMODE_ATTN 0x00000004 /* Send BD Completion Control Status register */ #define BGE_SBDCSTAT_ATTN 0x00000004 /* * Receive List Placement Control registers */ #define BGE_RXLP_MODE 0x2000 #define BGE_RXLP_STATUS 0x2004 #define BGE_RXLP_SEL_LIST_LOCK 0x2008 #define BGE_RXLP_SEL_NON_EMPTY_BITS 0x200C #define BGE_RXLP_CFG 0x2010 #define BGE_RXLP_STATS_CTL 0x2014 #define BGE_RXLP_STATS_ENABLE_MASK 0x2018 #define BGE_RXLP_STATS_INCREMENT_MASK 0x201C #define BGE_RXLP_HEAD0 0x2100 #define BGE_RXLP_TAIL0 0x2104 #define BGE_RXLP_COUNT0 0x2108 #define BGE_RXLP_HEAD1 0x2110 #define BGE_RXLP_TAIL1 0x2114 #define BGE_RXLP_COUNT1 0x2118 #define BGE_RXLP_HEAD2 0x2120 #define BGE_RXLP_TAIL2 0x2124 #define BGE_RXLP_COUNT2 0x2128 #define BGE_RXLP_HEAD3 0x2130 #define BGE_RXLP_TAIL3 0x2134 #define BGE_RXLP_COUNT3 0x2138 #define BGE_RXLP_HEAD4 0x2140 #define BGE_RXLP_TAIL4 0x2144 #define BGE_RXLP_COUNT4 0x2148 #define BGE_RXLP_HEAD5 0x2150 #define BGE_RXLP_TAIL5 0x2154 #define BGE_RXLP_COUNT5 0x2158 #define BGE_RXLP_HEAD6 0x2160 #define BGE_RXLP_TAIL6 0x2164 #define BGE_RXLP_COUNT6 0x2168 #define BGE_RXLP_HEAD7 0x2170 #define BGE_RXLP_TAIL7 0x2174 #define BGE_RXLP_COUNT7 0x2178 #define BGE_RXLP_HEAD8 0x2180 #define BGE_RXLP_TAIL8 0x2184 #define BGE_RXLP_COUNT8 0x2188 #define BGE_RXLP_HEAD9 0x2190 #define BGE_RXLP_TAIL9 0x2194 #define BGE_RXLP_COUNT9 0x2198 #define BGE_RXLP_HEAD10 0x21A0 #define BGE_RXLP_TAIL10 0x21A4 #define BGE_RXLP_COUNT10 0x21A8 #define BGE_RXLP_HEAD11 0x21B0 #define BGE_RXLP_TAIL11 0x21B4 #define BGE_RXLP_COUNT11 0x21B8 #define BGE_RXLP_HEAD12 0x21C0 #define BGE_RXLP_TAIL12 0x21C4 #define BGE_RXLP_COUNT12 0x21C8 #define BGE_RXLP_HEAD13 0x21D0 #define BGE_RXLP_TAIL13 0x21D4 #define BGE_RXLP_COUNT13 0x21D8 #define BGE_RXLP_HEAD14 0x21E0 #define BGE_RXLP_TAIL14 0x21E4 #define BGE_RXLP_COUNT14 0x21E8 #define BGE_RXLP_HEAD15 0x21F0 #define BGE_RXLP_TAIL15 0x21F4 #define BGE_RXLP_COUNT15 0x21F8 #define BGE_RXLP_LOCSTAT_COS0 0x2200 #define BGE_RXLP_LOCSTAT_COS1 0x2204 #define BGE_RXLP_LOCSTAT_COS2 0x2208 #define BGE_RXLP_LOCSTAT_COS3 0x220C #define BGE_RXLP_LOCSTAT_COS4 0x2210 #define BGE_RXLP_LOCSTAT_COS5 0x2214 #define BGE_RXLP_LOCSTAT_COS6 0x2218 #define BGE_RXLP_LOCSTAT_COS7 0x221C #define BGE_RXLP_LOCSTAT_COS8 0x2220 #define BGE_RXLP_LOCSTAT_COS9 0x2224 #define BGE_RXLP_LOCSTAT_COS10 0x2228 #define BGE_RXLP_LOCSTAT_COS11 0x222C #define BGE_RXLP_LOCSTAT_COS12 0x2230 #define BGE_RXLP_LOCSTAT_COS13 0x2234 #define BGE_RXLP_LOCSTAT_COS14 0x2238 #define BGE_RXLP_LOCSTAT_COS15 0x223C #define BGE_RXLP_LOCSTAT_FILTDROP 0x2240 #define BGE_RXLP_LOCSTAT_DMA_WRQ_FULL 0x2244 #define BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL 0x2248 #define BGE_RXLP_LOCSTAT_OUT_OF_BDS 0x224C #define BGE_RXLP_LOCSTAT_IFIN_DROPS 0x2250 #define BGE_RXLP_LOCSTAT_IFIN_ERRORS 0x2254 #define BGE_RXLP_LOCSTAT_RXTHRESH_HIT 0x2258 /* Receive List Placement mode register */ #define BGE_RXLPMODE_RESET 0x00000001 #define BGE_RXLPMODE_ENABLE 0x00000002 #define BGE_RXLPMODE_CLASS0_ATTN 0x00000004 #define BGE_RXLPMODE_MAPOUTRANGE_ATTN 0x00000008 #define BGE_RXLPMODE_STATSOFLOW_ATTN 0x00000010 /* Receive List Placement Status register */ #define BGE_RXLPSTAT_CLASS0_ATTN 0x00000004 #define BGE_RXLPSTAT_MAPOUTRANGE_ATTN 0x00000008 #define BGE_RXLPSTAT_STATSOFLOW_ATTN 0x00000010 /* * Receive Data and Receive BD Initiator Control Registers */ #define BGE_RDBDI_MODE 0x2400 #define BGE_RDBDI_STATUS 0x2404 #define BGE_RX_JUMBO_RCB_HADDR_HI 0x2440 #define BGE_RX_JUMBO_RCB_HADDR_LO 0x2444 #define BGE_RX_JUMBO_RCB_MAXLEN_FLAGS 0x2448 #define BGE_RX_JUMBO_RCB_NICADDR 0x244C #define BGE_RX_STD_RCB_HADDR_HI 0x2450 #define BGE_RX_STD_RCB_HADDR_LO 0x2454 #define BGE_RX_STD_RCB_MAXLEN_FLAGS 0x2458 #define BGE_RX_STD_RCB_NICADDR 0x245C #define BGE_RX_MINI_RCB_HADDR_HI 0x2460 #define BGE_RX_MINI_RCB_HADDR_LO 0x2464 #define BGE_RX_MINI_RCB_MAXLEN_FLAGS 0x2468 #define BGE_RX_MINI_RCB_NICADDR 0x246C #define BGE_RDBDI_JUMBO_RX_CONS 0x2470 #define BGE_RDBDI_STD_RX_CONS 0x2474 #define BGE_RDBDI_MINI_RX_CONS 0x2478 #define BGE_RDBDI_RETURN_PROD0 0x2480 #define BGE_RDBDI_RETURN_PROD1 0x2484 #define BGE_RDBDI_RETURN_PROD2 0x2488 #define BGE_RDBDI_RETURN_PROD3 0x248C #define BGE_RDBDI_RETURN_PROD4 0x2490 #define BGE_RDBDI_RETURN_PROD5 0x2494 #define BGE_RDBDI_RETURN_PROD6 0x2498 #define BGE_RDBDI_RETURN_PROD7 0x249C #define BGE_RDBDI_RETURN_PROD8 0x24A0 #define BGE_RDBDI_RETURN_PROD9 0x24A4 #define BGE_RDBDI_RETURN_PROD10 0x24A8 #define BGE_RDBDI_RETURN_PROD11 0x24AC #define BGE_RDBDI_RETURN_PROD12 0x24B0 #define BGE_RDBDI_RETURN_PROD13 0x24B4 #define BGE_RDBDI_RETURN_PROD14 0x24B8 #define BGE_RDBDI_RETURN_PROD15 0x24BC #define BGE_RDBDI_HWDIAG 0x24C0 /* Receive Data and Receive BD Initiator Mode register */ #define BGE_RDBDIMODE_RESET 0x00000001 #define BGE_RDBDIMODE_ENABLE 0x00000002 #define BGE_RDBDIMODE_JUMBO_ATTN 0x00000004 #define BGE_RDBDIMODE_GIANT_ATTN 0x00000008 #define BGE_RDBDIMODE_BADRINGSZ_ATTN 0x00000010 /* Receive Data and Receive BD Initiator Status register */ #define BGE_RDBDISTAT_JUMBO_ATTN 0x00000004 #define BGE_RDBDISTAT_GIANT_ATTN 0x00000008 #define BGE_RDBDISTAT_BADRINGSZ_ATTN 0x00000010 /* * Receive Data Completion Control registers */ #define BGE_RDC_MODE 0x2800 /* Receive Data Completion Mode register */ #define BGE_RDCMODE_RESET 0x00000001 #define BGE_RDCMODE_ENABLE 0x00000002 #define BGE_RDCMODE_ATTN 0x00000004 /* * Receive BD Initiator Control registers */ #define BGE_RBDI_MODE 0x2C00 #define BGE_RBDI_STATUS 0x2C04 #define BGE_RBDI_NIC_JUMBO_BD_PROD 0x2C08 #define BGE_RBDI_NIC_STD_BD_PROD 0x2C0C #define BGE_RBDI_NIC_MINI_BD_PROD 0x2C10 #define BGE_RBDI_MINI_REPL_THRESH 0x2C14 #define BGE_RBDI_STD_REPL_THRESH 0x2C18 #define BGE_RBDI_JUMBO_REPL_THRESH 0x2C1C #define BGE_STD_REPLENISH_LWM 0x2D00 #define BGE_JMB_REPLENISH_LWM 0x2D04 /* Receive BD Initiator Mode register */ #define BGE_RBDIMODE_RESET 0x00000001 #define BGE_RBDIMODE_ENABLE 0x00000002 #define BGE_RBDIMODE_ATTN 0x00000004 /* Receive BD Initiator Status register */ #define BGE_RBDISTAT_ATTN 0x00000004 /* * Receive BD Completion Control registers */ #define BGE_RBDC_MODE 0x3000 #define BGE_RBDC_STATUS 0x3004 #define BGE_RBDC_JUMBO_BD_PROD 0x3008 #define BGE_RBDC_STD_BD_PROD 0x300C #define BGE_RBDC_MINI_BD_PROD 0x3010 /* Receive BD completion mode register */ #define BGE_RBDCMODE_RESET 0x00000001 #define BGE_RBDCMODE_ENABLE 0x00000002 #define BGE_RBDCMODE_ATTN 0x00000004 /* Receive BD completion status register */ #define BGE_RBDCSTAT_ERROR 0x00000004 /* * Receive List Selector Control registers */ #define BGE_RXLS_MODE 0x3400 #define BGE_RXLS_STATUS 0x3404 /* Receive List Selector Mode register */ #define BGE_RXLSMODE_RESET 0x00000001 #define BGE_RXLSMODE_ENABLE 0x00000002 #define BGE_RXLSMODE_ATTN 0x00000004 /* Receive List Selector Status register */ #define BGE_RXLSSTAT_ERROR 0x00000004 #define BGE_CPMU_CTRL 0x3600 #define BGE_CPMU_LSPD_10MB_CLK 0x3604 #define BGE_CPMU_LSPD_1000MB_CLK 0x360C #define BGE_CPMU_LNK_AWARE_PWRMD 0x3610 #define BGE_CPMU_HST_ACC 0x361C #define BGE_CPMU_CLCK_ORIDE 0x3624 #define BGE_CPMU_CLCK_STAT 0x3630 #define BGE_CPMU_MUTEX_REQ 0x365C #define BGE_CPMU_MUTEX_GNT 0x3660 #define BGE_CPMU_PHY_STRAP 0x3664 #define BGE_CPMU_PADRNG_CTL 0x3668 /* Central Power Management Unit (CPMU) register */ #define BGE_CPMU_CTRL_LINK_IDLE_MODE 0x00000200 #define BGE_CPMU_CTRL_LINK_AWARE_MODE 0x00000400 #define BGE_CPMU_CTRL_LINK_SPEED_MODE 0x00004000 #define BGE_CPMU_CTRL_GPHY_10MB_RXONLY 0x00010000 /* Link Speed 10MB/No Link Power Mode Clock Policy register */ #define BGE_CPMU_LSPD_10MB_MACCLK_MASK 0x001F0000 #define BGE_CPMU_LSPD_10MB_MACCLK_6_25 0x00130000 /* Link Speed 1000MB Power Mode Clock Policy register */ #define BGE_CPMU_LSPD_1000MB_MACCLK_62_5 0x00000000 #define BGE_CPMU_LSPD_1000MB_MACCLK_12_5 0x00110000 #define BGE_CPMU_LSPD_1000MB_MACCLK_MASK 0x001F0000 /* Link Aware Power Mode Clock Policy register */ #define BGE_CPMU_LNK_AWARE_MACCLK_MASK 0x001F0000 #define BGE_CPMU_LNK_AWARE_MACCLK_6_25 0x00130000 #define BGE_CPMU_HST_ACC_MACCLK_MASK 0x001F0000 #define BGE_CPMU_HST_ACC_MACCLK_6_25 0x00130000 /* Clock Speed Override Policy register */ #define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000 /* CPMU Clock Status register */ #define BGE_CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001F0000 #define BGE_CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000 #define BGE_CPMU_CLCK_STAT_MAC_CLCK_12_5 0x00110000 #define BGE_CPMU_CLCK_STAT_MAC_CLCK_6_25 0x00130000 /* CPMU Mutex Request register */ #define BGE_CPMU_MUTEX_REQ_DRIVER 0x00001000 #define BGE_CPMU_MUTEX_GNT_DRIVER 0x00001000 /* CPMU GPHY Strap register */ #define BGE_CPMU_PHY_STRAP_IS_SERDES 0x00000020 /* CPMU Padring Control register */ #define BGE_CPMU_PADRNG_CTL_RDIV2 0x00040000 /* * Mbuf Cluster Free registers (has nothing to do with BSD mbufs) */ #define BGE_MBCF_MODE 0x3800 #define BGE_MBCF_STATUS 0x3804 /* Mbuf Cluster Free mode register */ #define BGE_MBCFMODE_RESET 0x00000001 #define BGE_MBCFMODE_ENABLE 0x00000002 #define BGE_MBCFMODE_ATTN 0x00000004 /* Mbuf Cluster Free status register */ #define BGE_MBCFSTAT_ERROR 0x00000004 /* * Host Coalescing Control registers */ #define BGE_HCC_MODE 0x3C00 #define BGE_HCC_STATUS 0x3C04 #define BGE_HCC_RX_COAL_TICKS 0x3C08 #define BGE_HCC_TX_COAL_TICKS 0x3C0C #define BGE_HCC_RX_MAX_COAL_BDS 0x3C10 #define BGE_HCC_TX_MAX_COAL_BDS 0x3C14 #define BGE_HCC_RX_COAL_TICKS_INT 0x3C18 /* ticks during interrupt */ #define BGE_HCC_TX_COAL_TICKS_INT 0x3C1C /* ticks during interrupt */ #define BGE_HCC_RX_MAX_COAL_BDS_INT 0x3C20 /* BDs during interrupt */ #define BGE_HCC_TX_MAX_COAL_BDS_INT 0x3C24 /* BDs during interrupt */ #define BGE_HCC_STATS_TICKS 0x3C28 #define BGE_HCC_STATS_ADDR_HI 0x3C30 #define BGE_HCC_STATS_ADDR_LO 0x3C34 #define BGE_HCC_STATUSBLK_ADDR_HI 0x3C38 #define BGE_HCC_STATUSBLK_ADDR_LO 0x3C3C #define BGE_HCC_STATS_BASEADDR 0x3C40 /* address in NIC memory */ #define BGE_HCC_STATUSBLK_BASEADDR 0x3C44 /* address in NIC memory */ #define BGE_FLOW_ATTN 0x3C48 #define BGE_HCC_JUMBO_BD_CONS 0x3C50 #define BGE_HCC_STD_BD_CONS 0x3C54 #define BGE_HCC_MINI_BD_CONS 0x3C58 #define BGE_HCC_RX_RETURN_PROD0 0x3C80 #define BGE_HCC_RX_RETURN_PROD1 0x3C84 #define BGE_HCC_RX_RETURN_PROD2 0x3C88 #define BGE_HCC_RX_RETURN_PROD3 0x3C8C #define BGE_HCC_RX_RETURN_PROD4 0x3C90 #define BGE_HCC_RX_RETURN_PROD5 0x3C94 #define BGE_HCC_RX_RETURN_PROD6 0x3C98 #define BGE_HCC_RX_RETURN_PROD7 0x3C9C #define BGE_HCC_RX_RETURN_PROD8 0x3CA0 #define BGE_HCC_RX_RETURN_PROD9 0x3CA4 #define BGE_HCC_RX_RETURN_PROD10 0x3CA8 #define BGE_HCC_RX_RETURN_PROD11 0x3CAC #define BGE_HCC_RX_RETURN_PROD12 0x3CB0 #define BGE_HCC_RX_RETURN_PROD13 0x3CB4 #define BGE_HCC_RX_RETURN_PROD14 0x3CB8 #define BGE_HCC_RX_RETURN_PROD15 0x3CBC #define BGE_HCC_TX_BD_CONS0 0x3CC0 #define BGE_HCC_TX_BD_CONS1 0x3CC4 #define BGE_HCC_TX_BD_CONS2 0x3CC8 #define BGE_HCC_TX_BD_CONS3 0x3CCC #define BGE_HCC_TX_BD_CONS4 0x3CD0 #define BGE_HCC_TX_BD_CONS5 0x3CD4 #define BGE_HCC_TX_BD_CONS6 0x3CD8 #define BGE_HCC_TX_BD_CONS7 0x3CDC #define BGE_HCC_TX_BD_CONS8 0x3CE0 #define BGE_HCC_TX_BD_CONS9 0x3CE4 #define BGE_HCC_TX_BD_CONS10 0x3CE8 #define BGE_HCC_TX_BD_CONS11 0x3CEC #define BGE_HCC_TX_BD_CONS12 0x3CF0 #define BGE_HCC_TX_BD_CONS13 0x3CF4 #define BGE_HCC_TX_BD_CONS14 0x3CF8 #define BGE_HCC_TX_BD_CONS15 0x3CFC /* Host coalescing mode register */ #define BGE_HCCMODE_RESET 0x00000001 #define BGE_HCCMODE_ENABLE 0x00000002 #define BGE_HCCMODE_ATTN 0x00000004 #define BGE_HCCMODE_COAL_NOW 0x00000008 #define BGE_HCCMODE_MSI_BITS 0x00000070 #define BGE_HCCMODE_STATBLK_SIZE 0x00000180 #define BGE_STATBLKSZ_FULL 0x00000000 #define BGE_STATBLKSZ_64BYTE 0x00000080 #define BGE_STATBLKSZ_32BYTE 0x00000100 /* Host coalescing status register */ #define BGE_HCCSTAT_ERROR 0x00000004 /* Flow attention register */ #define BGE_FLOWATTN_MB_LOWAT 0x00000040 #define BGE_FLOWATTN_MEMARB 0x00000080 #define BGE_FLOWATTN_HOSTCOAL 0x00008000 #define BGE_FLOWATTN_DMADONE_DISCARD 0x00010000 #define BGE_FLOWATTN_RCB_INVAL 0x00020000 #define BGE_FLOWATTN_RXDATA_CORRUPT 0x00040000 #define BGE_FLOWATTN_RDBDI 0x00080000 #define BGE_FLOWATTN_RXLS 0x00100000 #define BGE_FLOWATTN_RXLP 0x00200000 #define BGE_FLOWATTN_RBDC 0x00400000 #define BGE_FLOWATTN_RBDI 0x00800000 #define BGE_FLOWATTN_SDC 0x08000000 #define BGE_FLOWATTN_SDI 0x10000000 #define BGE_FLOWATTN_SRS 0x20000000 #define BGE_FLOWATTN_SBDC 0x40000000 #define BGE_FLOWATTN_SBDI 0x80000000 /* * Memory arbiter registers */ #define BGE_MARB_MODE 0x4000 #define BGE_MARB_STATUS 0x4004 #define BGE_MARB_TRAPADDR_HI 0x4008 #define BGE_MARB_TRAPADDR_LO 0x400C /* Memory arbiter mode register */ #define BGE_MARBMODE_RESET 0x00000001 #define BGE_MARBMODE_ENABLE 0x00000002 #define BGE_MARBMODE_TX_ADDR_TRAP 0x00000004 #define BGE_MARBMODE_RX_ADDR_TRAP 0x00000008 #define BGE_MARBMODE_DMAW1_TRAP 0x00000010 #define BGE_MARBMODE_DMAR1_TRAP 0x00000020 #define BGE_MARBMODE_RXRISC_TRAP 0x00000040 #define BGE_MARBMODE_TXRISC_TRAP 0x00000080 #define BGE_MARBMODE_PCI_TRAP 0x00000100 #define BGE_MARBMODE_DMAR2_TRAP 0x00000200 #define BGE_MARBMODE_RXQ_TRAP 0x00000400 #define BGE_MARBMODE_RXDI1_TRAP 0x00000800 #define BGE_MARBMODE_RXDI2_TRAP 0x00001000 #define BGE_MARBMODE_DC_GRPMEM_TRAP 0x00002000 #define BGE_MARBMODE_HCOAL_TRAP 0x00004000 #define BGE_MARBMODE_MBUF_TRAP 0x00008000 #define BGE_MARBMODE_TXDI_TRAP 0x00010000 #define BGE_MARBMODE_SDC_DMAC_TRAP 0x00020000 #define BGE_MARBMODE_TXBD_TRAP 0x00040000 #define BGE_MARBMODE_BUFFMAN_TRAP 0x00080000 #define BGE_MARBMODE_DMAW2_TRAP 0x00100000 #define BGE_MARBMODE_XTSSRAM_ROFLO_TRAP 0x00200000 #define BGE_MARBMODE_XTSSRAM_RUFLO_TRAP 0x00400000 #define BGE_MARBMODE_XTSSRAM_WOFLO_TRAP 0x00800000 #define BGE_MARBMODE_XTSSRAM_WUFLO_TRAP 0x01000000 #define BGE_MARBMODE_XTSSRAM_PERR_TRAP 0x02000000 /* Memory arbiter status register */ #define BGE_MARBSTAT_TX_ADDR_TRAP 0x00000004 #define BGE_MARBSTAT_RX_ADDR_TRAP 0x00000008 #define BGE_MARBSTAT_DMAW1_TRAP 0x00000010 #define BGE_MARBSTAT_DMAR1_TRAP 0x00000020 #define BGE_MARBSTAT_RXRISC_TRAP 0x00000040 #define BGE_MARBSTAT_TXRISC_TRAP 0x00000080 #define BGE_MARBSTAT_PCI_TRAP 0x00000100 #define BGE_MARBSTAT_DMAR2_TRAP 0x00000200 #define BGE_MARBSTAT_RXQ_TRAP 0x00000400 #define BGE_MARBSTAT_RXDI1_TRAP 0x00000800 #define BGE_MARBSTAT_RXDI2_TRAP 0x00001000 #define BGE_MARBSTAT_DC_GRPMEM_TRAP 0x00002000 #define BGE_MARBSTAT_HCOAL_TRAP 0x00004000 #define BGE_MARBSTAT_MBUF_TRAP 0x00008000 #define BGE_MARBSTAT_TXDI_TRAP 0x00010000 #define BGE_MARBSTAT_SDC_DMAC_TRAP 0x00020000 #define BGE_MARBSTAT_TXBD_TRAP 0x00040000 #define BGE_MARBSTAT_BUFFMAN_TRAP 0x00080000 #define BGE_MARBSTAT_DMAW2_TRAP 0x00100000 #define BGE_MARBSTAT_XTSSRAM_ROFLO_TRAP 0x00200000 #define BGE_MARBSTAT_XTSSRAM_RUFLO_TRAP 0x00400000 #define BGE_MARBSTAT_XTSSRAM_WOFLO_TRAP 0x00800000 #define BGE_MARBSTAT_XTSSRAM_WUFLO_TRAP 0x01000000 #define BGE_MARBSTAT_XTSSRAM_PERR_TRAP 0x02000000 /* * Buffer manager control registers */ #define BGE_BMAN_MODE 0x4400 #define BGE_BMAN_STATUS 0x4404 #define BGE_BMAN_MBUFPOOL_BASEADDR 0x4408 #define BGE_BMAN_MBUFPOOL_LEN 0x440C #define BGE_BMAN_MBUFPOOL_READDMA_LOWAT 0x4410 #define BGE_BMAN_MBUFPOOL_MACRX_LOWAT 0x4414 #define BGE_BMAN_MBUFPOOL_HIWAT 0x4418 #define BGE_BMAN_RXCPU_MBALLOC_REQ 0x441C #define BGE_BMAN_RXCPU_MBALLOC_RESP 0x4420 #define BGE_BMAN_TXCPU_MBALLOC_REQ 0x4424 #define BGE_BMAN_TXCPU_MBALLOC_RESP 0x4428 #define BGE_BMAN_DMA_DESCPOOL_BASEADDR 0x442C #define BGE_BMAN_DMA_DESCPOOL_LEN 0x4430 #define BGE_BMAN_DMA_DESCPOOL_LOWAT 0x4434 #define BGE_BMAN_DMA_DESCPOOL_HIWAT 0x4438 #define BGE_BMAN_RXCPU_DMAALLOC_REQ 0x443C #define BGE_BMAN_RXCPU_DMAALLOC_RESP 0x4440 #define BGE_BMAN_TXCPU_DMAALLOC_REQ 0x4444 #define BGE_BMAN_TXCPU_DMALLLOC_RESP 0x4448 #define BGE_BMAN_HWDIAG_1 0x444C #define BGE_BMAN_HWDIAG_2 0x4450 #define BGE_BMAN_HWDIAG_3 0x4454 /* Buffer manager mode register */ #define BGE_BMANMODE_RESET 0x00000001 #define BGE_BMANMODE_ENABLE 0x00000002 #define BGE_BMANMODE_ATTN 0x00000004 #define BGE_BMANMODE_TESTMODE 0x00000008 #define BGE_BMANMODE_LOMBUF_ATTN 0x00000010 #define BGE_BMANMODE_NO_TX_UNDERRUN 0x80000000 /* Buffer manager status register */ #define BGE_BMANSTAT_ERRO 0x00000004 #define BGE_BMANSTAT_LOWMBUF_ERROR 0x00000010 /* * Read DMA Control registers */ #define BGE_RDMA_MODE 0x4800 #define BGE_RDMA_STATUS 0x4804 #define BGE_RDMA_RSRVCTRL_REG2 0x4890 #define BGE_RDMA_LSO_CRPTEN_CTRL_REG2 0x48A0 #define BGE_RDMA_RSRVCTRL 0x4900 #define BGE_RDMA_LSO_CRPTEN_CTRL 0x4910 /* Read DMA mode register */ #define BGE_RDMAMODE_RESET 0x00000001 #define BGE_RDMAMODE_ENABLE 0x00000002 #define BGE_RDMAMODE_PCI_TGT_ABRT_ATTN 0x00000004 #define BGE_RDMAMODE_PCI_MSTR_ABRT_ATTN 0x00000008 #define BGE_RDMAMODE_PCI_PERR_ATTN 0x00000010 #define BGE_RDMAMODE_PCI_ADDROFLOW_ATTN 0x00000020 #define BGE_RDMAMODE_PCI_FIFOOFLOW_ATTN 0x00000040 #define BGE_RDMAMODE_PCI_FIFOUFLOW_ATTN 0x00000080 #define BGE_RDMAMODE_PCI_FIFOOREAD_ATTN 0x00000100 #define BGE_RDMAMODE_LOCWRITE_TOOBIG 0x00000200 #define BGE_RDMAMODE_ALL_ATTNS 0x000003FC #define BGE_RDMAMODE_BD_SBD_CRPT_ATTN 0x00000800 #define BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN 0x00001000 #define BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN 0x00002000 #define BGE_RDMAMODE_FIFO_SIZE_128 0x00020000 #define BGE_RDMAMODE_FIFO_LONG_BURST 0x00030000 #define BGE_RDMAMODE_MULT_DMA_RD_DIS 0x01000000 #define BGE_RDMAMODE_TSO4_ENABLE 0x08000000 #define BGE_RDMAMODE_TSO6_ENABLE 0x10000000 #define BGE_RDMAMODE_H2BNC_VLAN_DET 0x20000000 /* Read DMA status register */ #define BGE_RDMASTAT_PCI_TGT_ABRT_ATTN 0x00000004 #define BGE_RDMASTAT_PCI_MSTR_ABRT_ATTN 0x00000008 #define BGE_RDMASTAT_PCI_PERR_ATTN 0x00000010 #define BGE_RDMASTAT_PCI_ADDROFLOW_ATTN 0x00000020 #define BGE_RDMASTAT_PCI_FIFOOFLOW_ATTN 0x00000040 #define BGE_RDMASTAT_PCI_FIFOUFLOW_ATTN 0x00000080 #define BGE_RDMASTAT_PCI_FIFOOREAD_ATTN 0x00000100 #define BGE_RDMASTAT_LOCWRITE_TOOBIG 0x00000200 /* Read DMA Reserved Control register */ #define BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004 #define BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K 0x00000C00 #define BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K 0x000C0000 #define BGE_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000 #define BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK 0x00000FF0 #define BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK 0x000FF000 #define BGE_RDMA_RSRVCTRL_TXMRGN_MASK 0xFFE00000 #define BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 0x00020000 #define BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K 0x00030000 #define BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K 0x000C0000 #define BGE_RDMA_TX_LENGTH_WA_5719 0x02000000 #define BGE_RDMA_TX_LENGTH_WA_5720 0x00200000 /* BD Read DMA Mode register */ #define BGE_RDMA_BD_MODE 0x4A00 /* BD Read DMA Mode status register */ #define BGE_RDMA_BD_STATUS 0x4A04 #define BGE_RDMA_BD_MODE_RESET 0x00000001 #define BGE_RDMA_BD_MODE_ENABLE 0x00000002 /* Non-LSO Read DMA Mode register */ #define BGE_RDMA_NON_LSO_MODE 0x4B00 /* Non-LSO Read DMA Mode status register */ #define BGE_RDMA_NON_LSO_STATUS 0x4B04 #define BGE_RDMA_NON_LSO_MODE_RESET 0x00000001 #define BGE_RDMA_NON_LSO_MODE_ENABLE 0x00000002 #define BGE_RDMA_LENGTH 0x4BE0 #define BGE_NUM_RDMA_CHANNELS 4 /* * Write DMA control registers */ #define BGE_WDMA_MODE 0x4C00 #define BGE_WDMA_STATUS 0x4C04 /* Write DMA mode register */ #define BGE_WDMAMODE_RESET 0x00000001 #define BGE_WDMAMODE_ENABLE 0x00000002 #define BGE_WDMAMODE_PCI_TGT_ABRT_ATTN 0x00000004 #define BGE_WDMAMODE_PCI_MSTR_ABRT_ATTN 0x00000008 #define BGE_WDMAMODE_PCI_PERR_ATTN 0x00000010 #define BGE_WDMAMODE_PCI_ADDROFLOW_ATTN 0x00000020 #define BGE_WDMAMODE_PCI_FIFOOFLOW_ATTN 0x00000040 #define BGE_WDMAMODE_PCI_FIFOUFLOW_ATTN 0x00000080 #define BGE_WDMAMODE_PCI_FIFOOREAD_ATTN 0x00000100 #define BGE_WDMAMODE_LOCREAD_TOOBIG 0x00000200 #define BGE_WDMAMODE_ALL_ATTNS 0x000003FC #define BGE_WDMAMODE_STATUS_TAG_FIX 0x20000000 #define BGE_WDMAMODE_BURST_ALL_DATA 0xC0000000 /* Write DMA status register */ #define BGE_WDMASTAT_PCI_TGT_ABRT_ATTN 0x00000004 #define BGE_WDMASTAT_PCI_MSTR_ABRT_ATTN 0x00000008 #define BGE_WDMASTAT_PCI_PERR_ATTN 0x00000010 #define BGE_WDMASTAT_PCI_ADDROFLOW_ATTN 0x00000020 #define BGE_WDMASTAT_PCI_FIFOOFLOW_ATTN 0x00000040 #define BGE_WDMASTAT_PCI_FIFOUFLOW_ATTN 0x00000080 #define BGE_WDMASTAT_PCI_FIFOOREAD_ATTN 0x00000100 #define BGE_WDMASTAT_LOCREAD_TOOBIG 0x00000200 /* * RX CPU registers */ #define BGE_RXCPU_MODE 0x5000 #define BGE_RXCPU_STATUS 0x5004 #define BGE_RXCPU_PC 0x501C /* RX CPU mode register */ #define BGE_RXCPUMODE_RESET 0x00000001 #define BGE_RXCPUMODE_SINGLESTEP 0x00000002 #define BGE_RXCPUMODE_P0_DATAHLT_ENB 0x00000004 #define BGE_RXCPUMODE_P0_INSTRHLT_ENB 0x00000008 #define BGE_RXCPUMODE_WR_POSTBUF_ENB 0x00000010 #define BGE_RXCPUMODE_DATACACHE_ENB 0x00000020 #define BGE_RXCPUMODE_ROMFAIL 0x00000040 #define BGE_RXCPUMODE_WATCHDOG_ENB 0x00000080 #define BGE_RXCPUMODE_INSTRCACHE_PRF 0x00000100 #define BGE_RXCPUMODE_INSTRCACHE_FLUSH 0x00000200 #define BGE_RXCPUMODE_HALTCPU 0x00000400 #define BGE_RXCPUMODE_INVDATAHLT_ENB 0x00000800 #define BGE_RXCPUMODE_MADDRTRAPHLT_ENB 0x00001000 #define BGE_RXCPUMODE_RADDRTRAPHLT_ENB 0x00002000 /* RX CPU status register */ #define BGE_RXCPUSTAT_HW_BREAKPOINT 0x00000001 #define BGE_RXCPUSTAT_HLTINSTR_EXECUTED 0x00000002 #define BGE_RXCPUSTAT_INVALID_INSTR 0x00000004 #define BGE_RXCPUSTAT_P0_DATAREF 0x00000008 #define BGE_RXCPUSTAT_P0_INSTRREF 0x00000010 #define BGE_RXCPUSTAT_INVALID_DATAACC 0x00000020 #define BGE_RXCPUSTAT_INVALID_INSTRFTCH 0x00000040 #define BGE_RXCPUSTAT_BAD_MEMALIGN 0x00000080 #define BGE_RXCPUSTAT_MADDR_TRAP 0x00000100 #define BGE_RXCPUSTAT_REGADDR_TRAP 0x00000200 #define BGE_RXCPUSTAT_DATAACC_STALL 0x00001000 #define BGE_RXCPUSTAT_INSTRFETCH_STALL 0x00002000 #define BGE_RXCPUSTAT_MA_WR_FIFOOFLOW 0x08000000 #define BGE_RXCPUSTAT_MA_RD_FIFOOFLOW 0x10000000 #define BGE_RXCPUSTAT_MA_DATAMASK_OFLOW 0x20000000 #define BGE_RXCPUSTAT_MA_REQ_FIFOOFLOW 0x40000000 #define BGE_RXCPUSTAT_BLOCKING_READ 0x80000000 /* * V? CPU registers */ #define BGE_VCPU_STATUS 0x5100 #define BGE_VCPU_EXT_CTRL 0x6890 #define BGE_VCPU_STATUS_INIT_DONE 0x04000000 #define BGE_VCPU_STATUS_DRV_RESET 0x08000000 #define BGE_VCPU_EXT_CTRL_HALT_CPU 0x00400000 #define BGE_VCPU_EXT_CTRL_DISABLE_WOL 0x20000000 /* * TX CPU registers */ #define BGE_TXCPU_MODE 0x5400 #define BGE_TXCPU_STATUS 0x5404 #define BGE_TXCPU_PC 0x541C /* TX CPU mode register */ #define BGE_TXCPUMODE_RESET 0x00000001 #define BGE_TXCPUMODE_SINGLESTEP 0x00000002 #define BGE_TXCPUMODE_P0_DATAHLT_ENB 0x00000004 #define BGE_TXCPUMODE_P0_INSTRHLT_ENB 0x00000008 #define BGE_TXCPUMODE_WR_POSTBUF_ENB 0x00000010 #define BGE_TXCPUMODE_DATACACHE_ENB 0x00000020 #define BGE_TXCPUMODE_ROMFAIL 0x00000040 #define BGE_TXCPUMODE_WATCHDOG_ENB 0x00000080 #define BGE_TXCPUMODE_INSTRCACHE_PRF 0x00000100 #define BGE_TXCPUMODE_INSTRCACHE_FLUSH 0x00000200 #define BGE_TXCPUMODE_HALTCPU 0x00000400 #define BGE_TXCPUMODE_INVDATAHLT_ENB 0x00000800 #define BGE_TXCPUMODE_MADDRTRAPHLT_ENB 0x00001000 /* TX CPU status register */ #define BGE_TXCPUSTAT_HW_BREAKPOINT 0x00000001 #define BGE_TXCPUSTAT_HLTINSTR_EXECUTED 0x00000002 #define BGE_TXCPUSTAT_INVALID_INSTR 0x00000004 #define BGE_TXCPUSTAT_P0_DATAREF 0x00000008 #define BGE_TXCPUSTAT_P0_INSTRREF 0x00000010 #define BGE_TXCPUSTAT_INVALID_DATAACC 0x00000020 #define BGE_TXCPUSTAT_INVALID_INSTRFTCH 0x00000040 #define BGE_TXCPUSTAT_BAD_MEMALIGN 0x00000080 #define BGE_TXCPUSTAT_MADDR_TRAP 0x00000100 #define BGE_TXCPUSTAT_REGADDR_TRAP 0x00000200 #define BGE_TXCPUSTAT_DATAACC_STALL 0x00001000 #define BGE_TXCPUSTAT_INSTRFETCH_STALL 0x00002000 #define BGE_TXCPUSTAT_MA_WR_FIFOOFLOW 0x08000000 #define BGE_TXCPUSTAT_MA_RD_FIFOOFLOW 0x10000000 #define BGE_TXCPUSTAT_MA_DATAMASK_OFLOW 0x20000000 #define BGE_TXCPUSTAT_MA_REQ_FIFOOFLOW 0x40000000 #define BGE_TXCPUSTAT_BLOCKING_READ 0x80000000 /* * Low priority mailbox registers */ #define BGE_LPMBX_IRQ0_HI 0x5800 #define BGE_LPMBX_IRQ0_LO 0x5804 #define BGE_LPMBX_IRQ1_HI 0x5808 #define BGE_LPMBX_IRQ1_LO 0x580C #define BGE_LPMBX_IRQ2_HI 0x5810 #define BGE_LPMBX_IRQ2_LO 0x5814 #define BGE_LPMBX_IRQ3_HI 0x5818 #define BGE_LPMBX_IRQ3_LO 0x581C #define BGE_LPMBX_GEN0_HI 0x5820 #define BGE_LPMBX_GEN0_LO 0x5824 #define BGE_LPMBX_GEN1_HI 0x5828 #define BGE_LPMBX_GEN1_LO 0x582C #define BGE_LPMBX_GEN2_HI 0x5830 #define BGE_LPMBX_GEN2_LO 0x5834 #define BGE_LPMBX_GEN3_HI 0x5828 #define BGE_LPMBX_GEN3_LO 0x582C #define BGE_LPMBX_GEN4_HI 0x5840 #define BGE_LPMBX_GEN4_LO 0x5844 #define BGE_LPMBX_GEN5_HI 0x5848 #define BGE_LPMBX_GEN5_LO 0x584C #define BGE_LPMBX_GEN6_HI 0x5850 #define BGE_LPMBX_GEN6_LO 0x5854 #define BGE_LPMBX_GEN7_HI 0x5858 #define BGE_LPMBX_GEN7_LO 0x585C #define BGE_LPMBX_RELOAD_STATS_HI 0x5860 #define BGE_LPMBX_RELOAD_STATS_LO 0x5864 #define BGE_LPMBX_RX_STD_PROD_HI 0x5868 #define BGE_LPMBX_RX_STD_PROD_LO 0x586C #define BGE_LPMBX_RX_JUMBO_PROD_HI 0x5870 #define BGE_LPMBX_RX_JUMBO_PROD_LO 0x5874 #define BGE_LPMBX_RX_MINI_PROD_HI 0x5878 #define BGE_LPMBX_RX_MINI_PROD_LO 0x587C #define BGE_LPMBX_RX_CONS0_HI 0x5880 #define BGE_LPMBX_RX_CONS0_LO 0x5884 #define BGE_LPMBX_RX_CONS1_HI 0x5888 #define BGE_LPMBX_RX_CONS1_LO 0x588C #define BGE_LPMBX_RX_CONS2_HI 0x5890 #define BGE_LPMBX_RX_CONS2_LO 0x5894 #define BGE_LPMBX_RX_CONS3_HI 0x5898 #define BGE_LPMBX_RX_CONS3_LO 0x589C #define BGE_LPMBX_RX_CONS4_HI 0x58A0 #define BGE_LPMBX_RX_CONS4_LO 0x58A4 #define BGE_LPMBX_RX_CONS5_HI 0x58A8 #define BGE_LPMBX_RX_CONS5_LO 0x58AC #define BGE_LPMBX_RX_CONS6_HI 0x58B0 #define BGE_LPMBX_RX_CONS6_LO 0x58B4 #define BGE_LPMBX_RX_CONS7_HI 0x58B8 #define BGE_LPMBX_RX_CONS7_LO 0x58BC #define BGE_LPMBX_RX_CONS8_HI 0x58C0 #define BGE_LPMBX_RX_CONS8_LO 0x58C4 #define BGE_LPMBX_RX_CONS9_HI 0x58C8 #define BGE_LPMBX_RX_CONS9_LO 0x58CC #define BGE_LPMBX_RX_CONS10_HI 0x58D0 #define BGE_LPMBX_RX_CONS10_LO 0x58D4 #define BGE_LPMBX_RX_CONS11_HI 0x58D8 #define BGE_LPMBX_RX_CONS11_LO 0x58DC #define BGE_LPMBX_RX_CONS12_HI 0x58E0 #define BGE_LPMBX_RX_CONS12_LO 0x58E4 #define BGE_LPMBX_RX_CONS13_HI 0x58E8 #define BGE_LPMBX_RX_CONS13_LO 0x58EC #define BGE_LPMBX_RX_CONS14_HI 0x58F0 #define BGE_LPMBX_RX_CONS14_LO 0x58F4 #define BGE_LPMBX_RX_CONS15_HI 0x58F8 #define BGE_LPMBX_RX_CONS15_LO 0x58FC #define BGE_LPMBX_TX_HOST_PROD0_HI 0x5900 #define BGE_LPMBX_TX_HOST_PROD0_LO 0x5904 #define BGE_LPMBX_TX_HOST_PROD1_HI 0x5908 #define BGE_LPMBX_TX_HOST_PROD1_LO 0x590C #define BGE_LPMBX_TX_HOST_PROD2_HI 0x5910 #define BGE_LPMBX_TX_HOST_PROD2_LO 0x5914 #define BGE_LPMBX_TX_HOST_PROD3_HI 0x5918 #define BGE_LPMBX_TX_HOST_PROD3_LO 0x591C #define BGE_LPMBX_TX_HOST_PROD4_HI 0x5920 #define BGE_LPMBX_TX_HOST_PROD4_LO 0x5924 #define BGE_LPMBX_TX_HOST_PROD5_HI 0x5928 #define BGE_LPMBX_TX_HOST_PROD5_LO 0x592C #define BGE_LPMBX_TX_HOST_PROD6_HI 0x5930 #define BGE_LPMBX_TX_HOST_PROD6_LO 0x5934 #define BGE_LPMBX_TX_HOST_PROD7_HI 0x5938 #define BGE_LPMBX_TX_HOST_PROD7_LO 0x593C #define BGE_LPMBX_TX_HOST_PROD8_HI 0x5940 #define BGE_LPMBX_TX_HOST_PROD8_LO 0x5944 #define BGE_LPMBX_TX_HOST_PROD9_HI 0x5948 #define BGE_LPMBX_TX_HOST_PROD9_LO 0x594C #define BGE_LPMBX_TX_HOST_PROD10_HI 0x5950 #define BGE_LPMBX_TX_HOST_PROD10_LO 0x5954 #define BGE_LPMBX_TX_HOST_PROD11_HI 0x5958 #define BGE_LPMBX_TX_HOST_PROD11_LO 0x595C #define BGE_LPMBX_TX_HOST_PROD12_HI 0x5960 #define BGE_LPMBX_TX_HOST_PROD12_LO 0x5964 #define BGE_LPMBX_TX_HOST_PROD13_HI 0x5968 #define BGE_LPMBX_TX_HOST_PROD13_LO 0x596C #define BGE_LPMBX_TX_HOST_PROD14_HI 0x5970 #define BGE_LPMBX_TX_HOST_PROD14_LO 0x5974 #define BGE_LPMBX_TX_HOST_PROD15_HI 0x5978 #define BGE_LPMBX_TX_HOST_PROD15_LO 0x597C #define BGE_LPMBX_TX_NIC_PROD0_HI 0x5980 #define BGE_LPMBX_TX_NIC_PROD0_LO 0x5984 #define BGE_LPMBX_TX_NIC_PROD1_HI 0x5988 #define BGE_LPMBX_TX_NIC_PROD1_LO 0x598C #define BGE_LPMBX_TX_NIC_PROD2_HI 0x5990 #define BGE_LPMBX_TX_NIC_PROD2_LO 0x5994 #define BGE_LPMBX_TX_NIC_PROD3_HI 0x5998 #define BGE_LPMBX_TX_NIC_PROD3_LO 0x599C #define BGE_LPMBX_TX_NIC_PROD4_HI 0x59A0 #define BGE_LPMBX_TX_NIC_PROD4_LO 0x59A4 #define BGE_LPMBX_TX_NIC_PROD5_HI 0x59A8 #define BGE_LPMBX_TX_NIC_PROD5_LO 0x59AC #define BGE_LPMBX_TX_NIC_PROD6_HI 0x59B0 #define BGE_LPMBX_TX_NIC_PROD6_LO 0x59B4 #define BGE_LPMBX_TX_NIC_PROD7_HI 0x59B8 #define BGE_LPMBX_TX_NIC_PROD7_LO 0x59BC #define BGE_LPMBX_TX_NIC_PROD8_HI 0x59C0 #define BGE_LPMBX_TX_NIC_PROD8_LO 0x59C4 #define BGE_LPMBX_TX_NIC_PROD9_HI 0x59C8 #define BGE_LPMBX_TX_NIC_PROD9_LO 0x59CC #define BGE_LPMBX_TX_NIC_PROD10_HI 0x59D0 #define BGE_LPMBX_TX_NIC_PROD10_LO 0x59D4 #define BGE_LPMBX_TX_NIC_PROD11_HI 0x59D8 #define BGE_LPMBX_TX_NIC_PROD11_LO 0x59DC #define BGE_LPMBX_TX_NIC_PROD12_HI 0x59E0 #define BGE_LPMBX_TX_NIC_PROD12_LO 0x59E4 #define BGE_LPMBX_TX_NIC_PROD13_HI 0x59E8 #define BGE_LPMBX_TX_NIC_PROD13_LO 0x59EC #define BGE_LPMBX_TX_NIC_PROD14_HI 0x59F0 #define BGE_LPMBX_TX_NIC_PROD14_LO 0x59F4 #define BGE_LPMBX_TX_NIC_PROD15_HI 0x59F8 #define BGE_LPMBX_TX_NIC_PROD15_LO 0x59FC /* * Flow throw Queue reset register */ #define BGE_FTQ_RESET 0x5C00 #define BGE_FTQRESET_DMAREAD 0x00000002 #define BGE_FTQRESET_DMAHIPRIO_RD 0x00000004 #define BGE_FTQRESET_DMADONE 0x00000010 #define BGE_FTQRESET_SBDC 0x00000020 #define BGE_FTQRESET_SDI 0x00000040 #define BGE_FTQRESET_WDMA 0x00000080 #define BGE_FTQRESET_DMAHIPRIO_WR 0x00000100 #define BGE_FTQRESET_TYPE1_SOFTWARE 0x00000200 #define BGE_FTQRESET_SDC 0x00000400 #define BGE_FTQRESET_HCC 0x00000800 #define BGE_FTQRESET_TXFIFO 0x00001000 #define BGE_FTQRESET_MBC 0x00002000 #define BGE_FTQRESET_RBDC 0x00004000 #define BGE_FTQRESET_RXLP 0x00008000 #define BGE_FTQRESET_RDBDI 0x00010000 #define BGE_FTQRESET_RDC 0x00020000 #define BGE_FTQRESET_TYPE2_SOFTWARE 0x00040000 /* * Message Signaled Interrupt registers */ #define BGE_MSI_MODE 0x6000 #define BGE_MSI_STATUS 0x6004 #define BGE_MSI_FIFOACCESS 0x6008 /* MSI mode register */ #define BGE_MSIMODE_RESET 0x00000001 #define BGE_MSIMODE_ENABLE 0x00000002 #define BGE_MSIMODE_ONE_SHOT_DISABLE 0x00000020 #define BGE_MSIMODE_MULTIVEC_ENABLE 0x00000080 /* MSI status register */ #define BGE_MSISTAT_PCI_TGT_ABRT_ATTN 0x00000004 #define BGE_MSISTAT_PCI_MSTR_ABRT_ATTN 0x00000008 #define BGE_MSISTAT_PCI_PERR_ATTN 0x00000010 #define BGE_MSISTAT_MSI_FIFOUFLOW_ATTN 0x00000020 #define BGE_MSISTAT_MSI_FIFOOFLOW_ATTN 0x00000040 /* * DMA Completion registers */ #define BGE_DMAC_MODE 0x6400 /* DMA Completion mode register */ #define BGE_DMACMODE_RESET 0x00000001 #define BGE_DMACMODE_ENABLE 0x00000002 /* * General control registers. */ #define BGE_MODE_CTL 0x6800 #define BGE_MISC_CFG 0x6804 #define BGE_MISC_LOCAL_CTL 0x6808 #define BGE_RX_CPU_EVENT 0x6810 #define BGE_TX_CPU_EVENT 0x6820 #define BGE_EE_ADDR 0x6838 #define BGE_EE_DATA 0x683C #define BGE_EE_CTL 0x6840 #define BGE_MDI_CTL 0x6844 #define BGE_EE_DELAY 0x6848 #define BGE_FASTBOOT_PC 0x6894 #define BGE_RX_CPU_DRV_EVENT 0x00004000 /* * NVRAM Control registers */ #define BGE_NVRAM_CMD 0x7000 #define BGE_NVRAM_STAT 0x7004 #define BGE_NVRAM_WRDATA 0x7008 #define BGE_NVRAM_ADDR 0x700c #define BGE_NVRAM_RDDATA 0x7010 #define BGE_NVRAM_CFG1 0x7014 #define BGE_NVRAM_CFG2 0x7018 #define BGE_NVRAM_CFG3 0x701c #define BGE_NVRAM_SWARB 0x7020 #define BGE_NVRAM_ACCESS 0x7024 #define BGE_NVRAM_WRITE1 0x7028 #define BGE_NVRAMCMD_RESET 0x00000001 #define BGE_NVRAMCMD_DONE 0x00000008 #define BGE_NVRAMCMD_START 0x00000010 #define BGE_NVRAMCMD_WR 0x00000020 /* 1 = wr, 0 = rd */ #define BGE_NVRAMCMD_ERASE 0x00000040 #define BGE_NVRAMCMD_FIRST 0x00000080 #define BGE_NVRAMCMD_LAST 0x00000100 #define BGE_NVRAM_READCMD \ (BGE_NVRAMCMD_FIRST|BGE_NVRAMCMD_LAST| \ BGE_NVRAMCMD_START|BGE_NVRAMCMD_DONE) #define BGE_NVRAM_WRITECMD \ (BGE_NVRAMCMD_FIRST|BGE_NVRAMCMD_LAST| \ BGE_NVRAMCMD_START|BGE_NVRAMCMD_DONE|BGE_NVRAMCMD_WR) #define BGE_NVRAMSWARB_SET0 0x00000001 #define BGE_NVRAMSWARB_SET1 0x00000002 #define BGE_NVRAMSWARB_SET2 0x00000003 #define BGE_NVRAMSWARB_SET3 0x00000004 #define BGE_NVRAMSWARB_CLR0 0x00000010 #define BGE_NVRAMSWARB_CLR1 0x00000020 #define BGE_NVRAMSWARB_CLR2 0x00000040 #define BGE_NVRAMSWARB_CLR3 0x00000080 #define BGE_NVRAMSWARB_GNT0 0x00000100 #define BGE_NVRAMSWARB_GNT1 0x00000200 #define BGE_NVRAMSWARB_GNT2 0x00000400 #define BGE_NVRAMSWARB_GNT3 0x00000800 #define BGE_NVRAMSWARB_REQ0 0x00001000 #define BGE_NVRAMSWARB_REQ1 0x00002000 #define BGE_NVRAMSWARB_REQ2 0x00004000 #define BGE_NVRAMSWARB_REQ3 0x00008000 #define BGE_NVRAMACC_ENABLE 0x00000001 #define BGE_NVRAMACC_WRENABLE 0x00000002 /* Mode control register */ #define BGE_MODECTL_INT_SNDCOAL_ONLY 0x00000001 #define BGE_MODECTL_BYTESWAP_NONFRAME 0x00000002 #define BGE_MODECTL_WORDSWAP_NONFRAME 0x00000004 #define BGE_MODECTL_BYTESWAP_DATA 0x00000010 #define BGE_MODECTL_WORDSWAP_DATA 0x00000020 #define BGE_MODECTL_BYTESWAP_B2HRX_DATA 0x00000040 #define BGE_MODECTL_WORDSWAP_B2HRX_DATA 0x00000080 #define BGE_MODECTL_NO_FRAME_CRACKING 0x00000200 #define BGE_MODECTL_NO_RX_CRC 0x00000400 #define BGE_MODECTL_RX_BADFRAMES 0x00000800 #define BGE_MODECTL_NO_TX_INTR 0x00002000 #define BGE_MODECTL_NO_RX_INTR 0x00004000 #define BGE_MODECTL_FORCE_PCI32 0x00008000 #define BGE_MODECTL_B2HRX_ENABLE 0x00008000 #define BGE_MODECTL_STACKUP 0x00010000 #define BGE_MODECTL_HOST_SEND_BDS 0x00020000 #define BGE_MODECTL_HTX2B_ENABLE 0x00040000 #define BGE_MODECTL_TX_NO_PHDR_CSUM 0x00100000 #define BGE_MODECTL_RX_NO_PHDR_CSUM 0x00800000 #define BGE_MODECTL_TX_ATTN_INTR 0x01000000 #define BGE_MODECTL_RX_ATTN_INTR 0x02000000 #define BGE_MODECTL_MAC_ATTN_INTR 0x04000000 #define BGE_MODECTL_DMA_ATTN_INTR 0x08000000 #define BGE_MODECTL_FLOWCTL_ATTN_INTR 0x10000000 #define BGE_MODECTL_4X_SENDRING_SZ 0x20000000 #define BGE_MODECTL_FW_PROCESS_MCASTS 0x40000000 /* Misc. config register */ #define BGE_MISCCFG_RESET_CORE_CLOCKS 0x00000001 #define BGE_MISCCFG_TIMER_PRESCALER 0x000000FE #define BGE_MISCCFG_BOARD_ID_MASK 0x0001E000 #define BGE_MISCCFG_BOARD_ID_5704 0x00000000 #define BGE_MISCCFG_BOARD_ID_5704CIOBE 0x00004000 #define BGE_MISCCFG_BOARD_ID_5788 0x00010000 #define BGE_MISCCFG_BOARD_ID_5788M 0x00018000 #define BGE_MISCCFG_EPHY_IDDQ 0x00200000 #define BGE_MISCCFG_GPHY_PD_OVERRIDE 0x04000000 #define BGE_32BITTIME_66MHZ (0x41 << 1) /* Misc. Local Control */ #define BGE_MLC_INTR_STATE 0x00000001 #define BGE_MLC_INTR_CLR 0x00000002 #define BGE_MLC_INTR_SET 0x00000004 #define BGE_MLC_INTR_ONATTN 0x00000008 #define BGE_MLC_MISCIO_IN0 0x00000100 #define BGE_MLC_MISCIO_IN1 0x00000200 #define BGE_MLC_MISCIO_IN2 0x00000400 #define BGE_MLC_MISCIO_OUTEN0 0x00000800 #define BGE_MLC_MISCIO_OUTEN1 0x00001000 #define BGE_MLC_MISCIO_OUTEN2 0x00002000 #define BGE_MLC_MISCIO_OUT0 0x00004000 #define BGE_MLC_MISCIO_OUT1 0x00008000 #define BGE_MLC_MISCIO_OUT2 0x00010000 #define BGE_MLC_EXTRAM_ENB 0x00020000 #define BGE_MLC_SRAM_SIZE 0x001C0000 #define BGE_MLC_BANK_SEL 0x00200000 /* 0 = 2 banks, 1 == 1 */ #define BGE_MLC_SSRAM_TYPE 0x00400000 /* 1 = ZBT, 0 = standard */ #define BGE_MLC_SSRAM_CYC_DESEL 0x00800000 #define BGE_MLC_AUTO_EEPROM 0x01000000 #define BGE_SSRAMSIZE_256KB 0x00000000 #define BGE_SSRAMSIZE_512KB 0x00040000 #define BGE_SSRAMSIZE_1MB 0x00080000 #define BGE_SSRAMSIZE_2MB 0x000C0000 #define BGE_SSRAMSIZE_4MB 0x00100000 #define BGE_SSRAMSIZE_8MB 0x00140000 #define BGE_SSRAMSIZE_16M 0x00180000 /* EEPROM address register */ #define BGE_EEADDR_ADDRESS 0x0000FFFC #define BGE_EEADDR_HALFCLK 0x01FF0000 #define BGE_EEADDR_START 0x02000000 #define BGE_EEADDR_DEVID 0x1C000000 #define BGE_EEADDR_RESET 0x20000000 #define BGE_EEADDR_DONE 0x40000000 #define BGE_EEADDR_RW 0x80000000 /* 1 = rd, 0 = wr */ #define BGE_EEDEVID(x) ((x & 7) << 26) #define BGE_EEHALFCLK(x) ((x & 0x1FF) << 16) #define BGE_HALFCLK_384SCL 0x60 #define BGE_EE_READCMD \ (BGE_EEHALFCLK(BGE_HALFCLK_384SCL)|BGE_EEDEVID(0)| \ BGE_EEADDR_START|BGE_EEADDR_RW|BGE_EEADDR_DONE) #define BGE_EE_WRCMD \ (BGE_EEHALFCLK(BGE_HALFCLK_384SCL)|BGE_EEDEVID(0)| \ BGE_EEADDR_START|BGE_EEADDR_DONE) /* EEPROM Control register */ #define BGE_EECTL_CLKOUT_TRISTATE 0x00000001 #define BGE_EECTL_CLKOUT 0x00000002 #define BGE_EECTL_CLKIN 0x00000004 #define BGE_EECTL_DATAOUT_TRISTATE 0x00000008 #define BGE_EECTL_DATAOUT 0x00000010 #define BGE_EECTL_DATAIN 0x00000020 /* MDI (MII/GMII) access register */ #define BGE_MDI_DATA 0x00000001 #define BGE_MDI_DIR 0x00000002 #define BGE_MDI_SEL 0x00000004 #define BGE_MDI_CLK 0x00000008 #define BGE_MEMWIN_START 0x00008000 #define BGE_MEMWIN_END 0x0000FFFF /* BAR1 (APE) Register Definitions */ #define BGE_APE_GPIO_MSG 0x0008 #define BGE_APE_EVENT 0x000C #define BGE_APE_LOCK_REQ 0x002C #define BGE_APE_LOCK_GRANT 0x004C #define BGE_APE_GPIO_MSG_SHIFT 4 #define BGE_APE_EVENT_1 0x00000001 #define BGE_APE_LOCK_REQ_DRIVER0 0x00001000 #define BGE_APE_LOCK_GRANT_DRIVER0 0x00001000 /* APE Shared Memory block (writable by APE only) */ #define BGE_APE_SEG_SIG 0x4000 #define BGE_APE_FW_STATUS 0x400C #define BGE_APE_FW_FEATURES 0x4010 #define BGE_APE_FW_BEHAVIOR 0x4014 #define BGE_APE_FW_VERSION 0x4018 #define BGE_APE_FW_HEARTBEAT_INTERVAL 0x4024 #define BGE_APE_FW_HEARTBEAT 0x4028 #define BGE_APE_FW_ERROR_FLAGS 0x4074 #define BGE_APE_SEG_SIG_MAGIC 0x41504521 #define BGE_APE_FW_STATUS_READY 0x00000100 #define BGE_APE_FW_FEATURE_DASH 0x00000001 #define BGE_APE_FW_FEATURE_NCSI 0x00000002 #define BGE_APE_FW_VERSION_MAJMSK 0xFF000000 #define BGE_APE_FW_VERSION_MAJSFT 24 #define BGE_APE_FW_VERSION_MINMSK 0x00FF0000 #define BGE_APE_FW_VERSION_MINSFT 16 #define BGE_APE_FW_VERSION_REVMSK 0x0000FF00 #define BGE_APE_FW_VERSION_REVSFT 8 #define BGE_APE_FW_VERSION_BLDMSK 0x000000FF /* Host Shared Memory block (writable by host only) */ #define BGE_APE_HOST_SEG_SIG 0x4200 #define BGE_APE_HOST_SEG_LEN 0x4204 #define BGE_APE_HOST_INIT_COUNT 0x4208 #define BGE_APE_HOST_DRIVER_ID 0x420C #define BGE_APE_HOST_BEHAVIOR 0x4210 #define BGE_APE_HOST_HEARTBEAT_INT_MS 0x4214 #define BGE_APE_HOST_HEARTBEAT_COUNT 0x4218 #define BGE_APE_HOST_DRVR_STATE 0x421C #define BGE_APE_HOST_WOL_SPEED 0x4224 #define BGE_APE_HOST_SEG_SIG_MAGIC 0x484F5354 #define BGE_APE_HOST_SEG_LEN_MAGIC 0x00000020 #define BGE_APE_HOST_DRIVER_ID_FBSD 0xF6000000 #define BGE_APE_HOST_DRIVER_ID_MAGIC(maj, min) \ (BGE_APE_HOST_DRIVER_ID_FBSD | \ ((maj) & 0xffd) << 16 | ((min) & 0xff) << 8) #define BGE_APE_HOST_BEHAV_NO_PHYLOCK 0x00000001 #define BGE_APE_HOST_HEARTBEAT_INT_DISABLE 0 #define BGE_APE_HOST_HEARTBEAT_INT_5SEC 5000 #define BGE_APE_HOST_DRVR_STATE_START 0x00000001 #define BGE_APE_HOST_DRVR_STATE_UNLOAD 0x00000002 #define BGE_APE_HOST_DRVR_STATE_WOL 0x00000003 #define BGE_APE_HOST_DRVR_STATE_SUSPEND 0x00000004 #define BGE_APE_HOST_WOL_SPEED_AUTO 0x00008000 #define BGE_APE_EVENT_STATUS 0x4300 #define BGE_APE_EVENT_STATUS_DRIVER_EVNT 0x00000010 #define BGE_APE_EVENT_STATUS_STATE_CHNGE 0x00000500 #define BGE_APE_EVENT_STATUS_STATE_START 0x00010000 #define BGE_APE_EVENT_STATUS_STATE_UNLOAD 0x00020000 #define BGE_APE_EVENT_STATUS_STATE_WOL 0x00030000 #define BGE_APE_EVENT_STATUS_STATE_SUSPEND 0x00040000 #define BGE_APE_EVENT_STATUS_EVENT_PENDING 0x80000000 #define BGE_APE_DEBUG_LOG 0x4E00 #define BGE_APE_DEBUG_LOG_LEN 0x0100 #define BGE_APE_PER_LOCK_REQ 0x8400 #define BGE_APE_PER_LOCK_GRANT 0x8420 #define BGE_APE_LOCK_PER_REQ_DRIVER0 0x00001000 #define BGE_APE_LOCK_PER_REQ_DRIVER1 0x00000002 #define BGE_APE_LOCK_PER_REQ_DRIVER2 0x00000004 #define BGE_APE_LOCK_PER_REQ_DRIVER3 0x00000008 #define BGE_APE_PER_LOCK_GRANT_DRIVER0 0x00001000 #define BGE_APE_PER_LOCK_GRANT_DRIVER1 0x00000002 #define BGE_APE_PER_LOCK_GRANT_DRIVER2 0x00000004 #define BGE_APE_PER_LOCK_GRANT_DRIVER3 0x00000008 /* APE Mutex Resources */ #define BGE_APE_LOCK_PHY0 0 #define BGE_APE_LOCK_GRC 1 #define BGE_APE_LOCK_PHY1 2 #define BGE_APE_LOCK_PHY2 3 #define BGE_APE_LOCK_MEM 4 #define BGE_APE_LOCK_PHY3 5 #define BGE_APE_LOCK_GPIO 7 #define BGE_MEMWIN_READ(sc, x, val) \ do { \ pci_write_config(sc->bge_dev, BGE_PCI_MEMWIN_BASEADDR, \ (0xFFFF0000 & x), 4); \ val = CSR_READ_4(sc, BGE_MEMWIN_START + (x & 0xFFFF)); \ } while(0) #define BGE_MEMWIN_WRITE(sc, x, val) \ do { \ pci_write_config(sc->bge_dev, BGE_PCI_MEMWIN_BASEADDR, \ (0xFFFF0000 & x), 4); \ CSR_WRITE_4(sc, BGE_MEMWIN_START + (x & 0xFFFF), val); \ } while(0) /* * This magic number is written to the firmware mailbox at 0xb50 * before a software reset is issued. After the internal firmware * has completed its initialization it will write the opposite of * this value, ~BGE_SRAM_FW_MB_MAGIC, to the same location, * allowing the driver to synchronize with the firmware. */ #define BGE_SRAM_FW_MB_MAGIC 0x4B657654 typedef struct { uint32_t bge_addr_hi; uint32_t bge_addr_lo; } bge_hostaddr; #define BGE_HOSTADDR(x, y) \ do { \ (x).bge_addr_lo = ((uint64_t) (y) & 0xffffffff); \ (x).bge_addr_hi = ((uint64_t) (y) >> 32); \ } while(0) #define BGE_ADDR_LO(y) \ ((uint64_t) (y) & 0xFFFFFFFF) #define BGE_ADDR_HI(y) \ ((uint64_t) (y) >> 32) /* Ring control block structure */ struct bge_rcb { bge_hostaddr bge_hostaddr; uint32_t bge_maxlen_flags; uint32_t bge_nicaddr; }; #define RCB_WRITE_4(sc, rcb, offset, val) \ bus_write_4(sc->bge_res, rcb + offsetof(struct bge_rcb, offset), val) #define BGE_RCB_MAXLEN_FLAGS(maxlen, flags) ((maxlen) << 16 | (flags)) #define BGE_RCB_FLAG_USE_EXT_RX_BD 0x0001 #define BGE_RCB_FLAG_RING_DISABLED 0x0002 struct bge_tx_bd { bge_hostaddr bge_addr; #if BYTE_ORDER == LITTLE_ENDIAN uint16_t bge_flags; uint16_t bge_len; uint16_t bge_vlan_tag; uint16_t bge_mss; #else uint16_t bge_len; uint16_t bge_flags; uint16_t bge_mss; uint16_t bge_vlan_tag; #endif }; #define BGE_TXBDFLAG_TCP_UDP_CSUM 0x0001 #define BGE_TXBDFLAG_IP_CSUM 0x0002 #define BGE_TXBDFLAG_END 0x0004 #define BGE_TXBDFLAG_IP_FRAG 0x0008 #define BGE_TXBDFLAG_JUMBO_FRAME 0x0008 /* 5717 */ #define BGE_TXBDFLAG_IP_FRAG_END 0x0010 #define BGE_TXBDFLAG_HDRLEN_BIT2 0x0010 /* 5717 */ #define BGE_TXBDFLAG_SNAP 0x0020 /* 5717 */ #define BGE_TXBDFLAG_VLAN_TAG 0x0040 #define BGE_TXBDFLAG_COAL_NOW 0x0080 #define BGE_TXBDFLAG_CPU_PRE_DMA 0x0100 #define BGE_TXBDFLAG_CPU_POST_DMA 0x0200 #define BGE_TXBDFLAG_HDRLEN_BIT3 0x0400 /* 5717 */ #define BGE_TXBDFLAG_HDRLEN_BIT4 0x0800 /* 5717 */ #define BGE_TXBDFLAG_INSERT_SRC_ADDR 0x1000 #define BGE_TXBDFLAG_HDRLEN_BIT5 0x1000 /* 5717 */ #define BGE_TXBDFLAG_HDRLEN_BIT6 0x2000 /* 5717 */ #define BGE_TXBDFLAG_HDRLEN_BIT7 0x4000 /* 5717 */ #define BGE_TXBDFLAG_CHOOSE_SRC_ADDR 0x6000 #define BGE_TXBDFLAG_NO_CRC 0x8000 #define BGE_TXBDFLAG_MSS_SIZE_MASK 0x3FFF /* 5717 */ /* Bits [1:0] of the MSS header length. */ #define BGE_TXBDFLAG_MSS_HDRLEN_MASK 0xC000 /* 5717 */ #define BGE_NIC_TXRING_ADDR(ringno, size) \ BGE_SEND_RING_1_TO_4 + \ ((ringno * sizeof(struct bge_tx_bd) * size) / 4) struct bge_rx_bd { bge_hostaddr bge_addr; #if BYTE_ORDER == LITTLE_ENDIAN uint16_t bge_len; uint16_t bge_idx; uint16_t bge_flags; uint16_t bge_type; uint16_t bge_tcp_udp_csum; uint16_t bge_ip_csum; uint16_t bge_vlan_tag; uint16_t bge_error_flag; #else uint16_t bge_idx; uint16_t bge_len; uint16_t bge_type; uint16_t bge_flags; uint16_t bge_ip_csum; uint16_t bge_tcp_udp_csum; uint16_t bge_error_flag; uint16_t bge_vlan_tag; #endif uint32_t bge_rsvd; uint32_t bge_opaque; }; struct bge_extrx_bd { bge_hostaddr bge_addr1; bge_hostaddr bge_addr2; bge_hostaddr bge_addr3; #if BYTE_ORDER == LITTLE_ENDIAN uint16_t bge_len2; uint16_t bge_len1; uint16_t bge_rsvd1; uint16_t bge_len3; #else uint16_t bge_len1; uint16_t bge_len2; uint16_t bge_len3; uint16_t bge_rsvd1; #endif bge_hostaddr bge_addr0; #if BYTE_ORDER == LITTLE_ENDIAN uint16_t bge_len0; uint16_t bge_idx; uint16_t bge_flags; uint16_t bge_type; uint16_t bge_tcp_udp_csum; uint16_t bge_ip_csum; uint16_t bge_vlan_tag; uint16_t bge_error_flag; #else uint16_t bge_idx; uint16_t bge_len0; uint16_t bge_type; uint16_t bge_flags; uint16_t bge_ip_csum; uint16_t bge_tcp_udp_csum; uint16_t bge_error_flag; uint16_t bge_vlan_tag; #endif uint32_t bge_rsvd0; uint32_t bge_opaque; }; #define BGE_RXBDFLAG_END 0x0004 #define BGE_RXBDFLAG_JUMBO_RING 0x0020 #define BGE_RXBDFLAG_VLAN_TAG 0x0040 #define BGE_RXBDFLAG_ERROR 0x0400 #define BGE_RXBDFLAG_MINI_RING 0x0800 #define BGE_RXBDFLAG_IP_CSUM 0x1000 #define BGE_RXBDFLAG_TCP_UDP_CSUM 0x2000 #define BGE_RXBDFLAG_TCP_UDP_IS_TCP 0x4000 #define BGE_RXBDFLAG_IPV6 0x8000 #define BGE_RXERRFLAG_BAD_CRC 0x0001 #define BGE_RXERRFLAG_COLL_DETECT 0x0002 #define BGE_RXERRFLAG_LINK_LOST 0x0004 #define BGE_RXERRFLAG_PHY_DECODE_ERR 0x0008 #define BGE_RXERRFLAG_MAC_ABORT 0x0010 #define BGE_RXERRFLAG_RUNT 0x0020 #define BGE_RXERRFLAG_TRUNC_NO_RSRCS 0x0040 #define BGE_RXERRFLAG_GIANT 0x0080 #define BGE_RXERRFLAG_IP_CSUM_NOK 0x1000 /* 5717 */ struct bge_sts_idx { #if BYTE_ORDER == LITTLE_ENDIAN uint16_t bge_rx_prod_idx; uint16_t bge_tx_cons_idx; #else uint16_t bge_tx_cons_idx; uint16_t bge_rx_prod_idx; #endif }; struct bge_status_block { uint32_t bge_status; uint32_t bge_status_tag; #if BYTE_ORDER == LITTLE_ENDIAN uint16_t bge_rx_jumbo_cons_idx; uint16_t bge_rx_std_cons_idx; uint16_t bge_rx_mini_cons_idx; uint16_t bge_rsvd1; #else uint16_t bge_rx_std_cons_idx; uint16_t bge_rx_jumbo_cons_idx; uint16_t bge_rsvd1; uint16_t bge_rx_mini_cons_idx; #endif struct bge_sts_idx bge_idx[16]; }; #define BGE_STATFLAG_UPDATED 0x00000001 #define BGE_STATFLAG_LINKSTATE_CHANGED 0x00000002 #define BGE_STATFLAG_ERROR 0x00000004 /* * Broadcom Vendor ID * (Note: the BCM570x still defaults to the Alteon PCI vendor ID * even though they're now manufactured by Broadcom) */ #define BCOM_VENDORID 0x14E4 #define BCOM_DEVICEID_BCM5700 0x1644 #define BCOM_DEVICEID_BCM5701 0x1645 #define BCOM_DEVICEID_BCM5702 0x1646 #define BCOM_DEVICEID_BCM5702X 0x16A6 #define BCOM_DEVICEID_BCM5702_ALT 0x16C6 #define BCOM_DEVICEID_BCM5703 0x1647 #define BCOM_DEVICEID_BCM5703X 0x16A7 #define BCOM_DEVICEID_BCM5703_ALT 0x16C7 #define BCOM_DEVICEID_BCM5704C 0x1648 #define BCOM_DEVICEID_BCM5704S 0x16A8 #define BCOM_DEVICEID_BCM5704S_ALT 0x1649 #define BCOM_DEVICEID_BCM5705 0x1653 #define BCOM_DEVICEID_BCM5705K 0x1654 #define BCOM_DEVICEID_BCM5705F 0x166E #define BCOM_DEVICEID_BCM5705M 0x165D #define BCOM_DEVICEID_BCM5705M_ALT 0x165E #define BCOM_DEVICEID_BCM5714C 0x1668 #define BCOM_DEVICEID_BCM5714S 0x1669 #define BCOM_DEVICEID_BCM5715 0x1678 #define BCOM_DEVICEID_BCM5715S 0x1679 #define BCOM_DEVICEID_BCM5717 0x1655 #define BCOM_DEVICEID_BCM5718 0x1656 #define BCOM_DEVICEID_BCM5719 0x1657 #define BCOM_DEVICEID_BCM5720_PP 0x1658 /* Not released to public. */ #define BCOM_DEVICEID_BCM5720 0x165F #define BCOM_DEVICEID_BCM5721 0x1659 #define BCOM_DEVICEID_BCM5722 0x165A #define BCOM_DEVICEID_BCM5723 0x165B #define BCOM_DEVICEID_BCM5725 0x1643 #define BCOM_DEVICEID_BCM5727 0x16F3 #define BCOM_DEVICEID_BCM5750 0x1676 #define BCOM_DEVICEID_BCM5750M 0x167C #define BCOM_DEVICEID_BCM5751 0x1677 #define BCOM_DEVICEID_BCM5751F 0x167E #define BCOM_DEVICEID_BCM5751M 0x167D #define BCOM_DEVICEID_BCM5752 0x1600 #define BCOM_DEVICEID_BCM5752M 0x1601 #define BCOM_DEVICEID_BCM5753 0x16F7 #define BCOM_DEVICEID_BCM5753F 0x16FE #define BCOM_DEVICEID_BCM5753M 0x16FD #define BCOM_DEVICEID_BCM5754 0x167A #define BCOM_DEVICEID_BCM5754M 0x1672 #define BCOM_DEVICEID_BCM5755 0x167B #define BCOM_DEVICEID_BCM5755M 0x1673 #define BCOM_DEVICEID_BCM5756 0x1674 #define BCOM_DEVICEID_BCM5761 0x1681 #define BCOM_DEVICEID_BCM5761E 0x1680 #define BCOM_DEVICEID_BCM5761S 0x1688 #define BCOM_DEVICEID_BCM5761SE 0x1689 #define BCOM_DEVICEID_BCM5762 0x1687 #define BCOM_DEVICEID_BCM5764 0x1684 #define BCOM_DEVICEID_BCM5780 0x166A #define BCOM_DEVICEID_BCM5780S 0x166B #define BCOM_DEVICEID_BCM5781 0x16DD #define BCOM_DEVICEID_BCM5782 0x1696 #define BCOM_DEVICEID_BCM5784 0x1698 #define BCOM_DEVICEID_BCM5785F 0x16a0 #define BCOM_DEVICEID_BCM5785G 0x1699 #define BCOM_DEVICEID_BCM5786 0x169A #define BCOM_DEVICEID_BCM5787 0x169B #define BCOM_DEVICEID_BCM5787M 0x1693 #define BCOM_DEVICEID_BCM5787F 0x167f #define BCOM_DEVICEID_BCM5788 0x169C #define BCOM_DEVICEID_BCM5789 0x169D #define BCOM_DEVICEID_BCM5901 0x170D #define BCOM_DEVICEID_BCM5901A2 0x170E #define BCOM_DEVICEID_BCM5903M 0x16FF #define BCOM_DEVICEID_BCM5906 0x1712 #define BCOM_DEVICEID_BCM5906M 0x1713 #define BCOM_DEVICEID_BCM57760 0x1690 #define BCOM_DEVICEID_BCM57761 0x16B0 #define BCOM_DEVICEID_BCM57762 0x1682 #define BCOM_DEVICEID_BCM57764 0x1642 #define BCOM_DEVICEID_BCM57765 0x16B4 #define BCOM_DEVICEID_BCM57766 0x1686 #define BCOM_DEVICEID_BCM57767 0x1683 #define BCOM_DEVICEID_BCM57780 0x1692 #define BCOM_DEVICEID_BCM57781 0x16B1 #define BCOM_DEVICEID_BCM57782 0x16B7 #define BCOM_DEVICEID_BCM57785 0x16B5 #define BCOM_DEVICEID_BCM57786 0x16B3 #define BCOM_DEVICEID_BCM57787 0x1641 #define BCOM_DEVICEID_BCM57788 0x1691 #define BCOM_DEVICEID_BCM57790 0x1694 #define BCOM_DEVICEID_BCM57791 0x16B2 #define BCOM_DEVICEID_BCM57795 0x16B6 /* * Alteon AceNIC PCI vendor/device ID. */ #define ALTEON_VENDORID 0x12AE #define ALTEON_DEVICEID_ACENIC 0x0001 #define ALTEON_DEVICEID_ACENIC_COPPER 0x0002 #define ALTEON_DEVICEID_BCM5700 0x0003 #define ALTEON_DEVICEID_BCM5701 0x0004 /* * 3Com 3c996 PCI vendor/device ID. */ #define TC_VENDORID 0x10B7 #define TC_DEVICEID_3C996 0x0003 /* * SysKonnect PCI vendor ID */ #define SK_VENDORID 0x1148 #define SK_DEVICEID_ALTIMA 0x4400 #define SK_SUBSYSID_9D21 0x4421 #define SK_SUBSYSID_9D41 0x4441 /* * Altima PCI vendor/device ID. */ #define ALTIMA_VENDORID 0x173b #define ALTIMA_DEVICE_AC1000 0x03e8 #define ALTIMA_DEVICE_AC1002 0x03e9 #define ALTIMA_DEVICE_AC9100 0x03ea /* * Dell PCI vendor ID */ #define DELL_VENDORID 0x1028 /* * Apple PCI vendor ID. */ #define APPLE_VENDORID 0x106b #define APPLE_DEVICE_BCM5701 0x1645 /* * Sun PCI vendor ID */ #define SUN_VENDORID 0x108e /* * Fujitsu vendor/device IDs */ #define FJTSU_VENDORID 0x10cf #define FJTSU_DEVICEID_PW008GE5 0x11a1 #define FJTSU_DEVICEID_PW008GE4 0x11a2 #define FJTSU_DEVICEID_PP250450 0x11cc /* PRIMEPOWER250/450 LAN */ /* * Offset of MAC address inside EEPROM. */ #define BGE_EE_MAC_OFFSET 0x7C #define BGE_EE_MAC_OFFSET_5906 0x10 #define BGE_EE_HWCFG_OFFSET 0xC8 #define BGE_HWCFG_VOLTAGE 0x00000003 #define BGE_HWCFG_PHYLED_MODE 0x0000000C #define BGE_HWCFG_MEDIA 0x00000030 #define BGE_HWCFG_ASF 0x00000080 #define BGE_VOLTAGE_1POINT3 0x00000000 #define BGE_VOLTAGE_1POINT8 0x00000001 #define BGE_PHYLEDMODE_UNSPEC 0x00000000 #define BGE_PHYLEDMODE_TRIPLELED 0x00000004 #define BGE_PHYLEDMODE_SINGLELED 0x00000008 #define BGE_MEDIA_UNSPEC 0x00000000 #define BGE_MEDIA_COPPER 0x00000010 #define BGE_MEDIA_FIBER 0x00000020 #define BGE_TICKS_PER_SEC 1000000 /* * Ring size constants. */ #define BGE_EVENT_RING_CNT 256 #define BGE_CMD_RING_CNT 64 #define BGE_STD_RX_RING_CNT 512 #define BGE_JUMBO_RX_RING_CNT 256 #define BGE_MINI_RX_RING_CNT 1024 #define BGE_RETURN_RING_CNT 1024 /* 5705 has smaller return ring size */ #define BGE_RETURN_RING_CNT_5705 512 /* * Possible TX ring sizes. */ #define BGE_TX_RING_CNT_128 128 #define BGE_TX_RING_BASE_128 0x3800 #define BGE_TX_RING_CNT_256 256 #define BGE_TX_RING_BASE_256 0x3000 #define BGE_TX_RING_CNT_512 512 #define BGE_TX_RING_BASE_512 0x2000 #define BGE_TX_RING_CNT BGE_TX_RING_CNT_512 #define BGE_TX_RING_BASE BGE_TX_RING_BASE_512 /* * Tigon III statistics counters. */ /* Statistics maintained MAC Receive block. */ struct bge_rx_mac_stats { bge_hostaddr ifHCInOctets; bge_hostaddr Reserved1; bge_hostaddr etherStatsFragments; bge_hostaddr ifHCInUcastPkts; bge_hostaddr ifHCInMulticastPkts; bge_hostaddr ifHCInBroadcastPkts; bge_hostaddr dot3StatsFCSErrors; bge_hostaddr dot3StatsAlignmentErrors; bge_hostaddr xonPauseFramesReceived; bge_hostaddr xoffPauseFramesReceived; bge_hostaddr macControlFramesReceived; bge_hostaddr xoffStateEntered; bge_hostaddr dot3StatsFramesTooLong; bge_hostaddr etherStatsJabbers; bge_hostaddr etherStatsUndersizePkts; bge_hostaddr inRangeLengthError; bge_hostaddr outRangeLengthError; bge_hostaddr etherStatsPkts64Octets; bge_hostaddr etherStatsPkts65Octetsto127Octets; bge_hostaddr etherStatsPkts128Octetsto255Octets; bge_hostaddr etherStatsPkts256Octetsto511Octets; bge_hostaddr etherStatsPkts512Octetsto1023Octets; bge_hostaddr etherStatsPkts1024Octetsto1522Octets; bge_hostaddr etherStatsPkts1523Octetsto2047Octets; bge_hostaddr etherStatsPkts2048Octetsto4095Octets; bge_hostaddr etherStatsPkts4096Octetsto8191Octets; bge_hostaddr etherStatsPkts8192Octetsto9022Octets; }; /* Statistics maintained MAC Transmit block. */ struct bge_tx_mac_stats { bge_hostaddr ifHCOutOctets; bge_hostaddr Reserved2; bge_hostaddr etherStatsCollisions; bge_hostaddr outXonSent; bge_hostaddr outXoffSent; bge_hostaddr flowControlDone; bge_hostaddr dot3StatsInternalMacTransmitErrors; bge_hostaddr dot3StatsSingleCollisionFrames; bge_hostaddr dot3StatsMultipleCollisionFrames; bge_hostaddr dot3StatsDeferredTransmissions; bge_hostaddr Reserved3; bge_hostaddr dot3StatsExcessiveCollisions; bge_hostaddr dot3StatsLateCollisions; bge_hostaddr dot3Collided2Times; bge_hostaddr dot3Collided3Times; bge_hostaddr dot3Collided4Times; bge_hostaddr dot3Collided5Times; bge_hostaddr dot3Collided6Times; bge_hostaddr dot3Collided7Times; bge_hostaddr dot3Collided8Times; bge_hostaddr dot3Collided9Times; bge_hostaddr dot3Collided10Times; bge_hostaddr dot3Collided11Times; bge_hostaddr dot3Collided12Times; bge_hostaddr dot3Collided13Times; bge_hostaddr dot3Collided14Times; bge_hostaddr dot3Collided15Times; bge_hostaddr ifHCOutUcastPkts; bge_hostaddr ifHCOutMulticastPkts; bge_hostaddr ifHCOutBroadcastPkts; bge_hostaddr dot3StatsCarrierSenseErrors; bge_hostaddr ifOutDiscards; bge_hostaddr ifOutErrors; }; /* Stats counters access through registers */ struct bge_mac_stats { /* TX MAC statistics */ uint64_t ifHCOutOctets; uint64_t Reserved0; uint64_t etherStatsCollisions; uint64_t outXonSent; uint64_t outXoffSent; uint64_t Reserved1; uint64_t dot3StatsInternalMacTransmitErrors; uint64_t dot3StatsSingleCollisionFrames; uint64_t dot3StatsMultipleCollisionFrames; uint64_t dot3StatsDeferredTransmissions; uint64_t Reserved2; uint64_t dot3StatsExcessiveCollisions; uint64_t dot3StatsLateCollisions; uint64_t Reserved3[14]; uint64_t ifHCOutUcastPkts; uint64_t ifHCOutMulticastPkts; uint64_t ifHCOutBroadcastPkts; uint64_t Reserved4[2]; /* RX MAC statistics */ uint64_t ifHCInOctets; uint64_t Reserved5; uint64_t etherStatsFragments; uint64_t ifHCInUcastPkts; uint64_t ifHCInMulticastPkts; uint64_t ifHCInBroadcastPkts; uint64_t dot3StatsFCSErrors; uint64_t dot3StatsAlignmentErrors; uint64_t xonPauseFramesReceived; uint64_t xoffPauseFramesReceived; uint64_t macControlFramesReceived; uint64_t xoffStateEntered; uint64_t dot3StatsFramesTooLong; uint64_t etherStatsJabbers; uint64_t etherStatsUndersizePkts; /* Receive List Placement control */ uint64_t FramesDroppedDueToFilters; uint64_t DmaWriteQueueFull; uint64_t DmaWriteHighPriQueueFull; uint64_t NoMoreRxBDs; uint64_t InputDiscards; uint64_t InputErrors; uint64_t RecvThresholdHit; }; struct bge_stats { uint8_t Reserved0[256]; /* Statistics maintained by Receive MAC. */ struct bge_rx_mac_stats rxstats; bge_hostaddr Unused1[37]; /* Statistics maintained by Transmit MAC. */ struct bge_tx_mac_stats txstats; bge_hostaddr Unused2[31]; /* Statistics maintained by Receive List Placement. */ bge_hostaddr COSIfHCInPkts[16]; bge_hostaddr COSFramesDroppedDueToFilters; bge_hostaddr nicDmaWriteQueueFull; bge_hostaddr nicDmaWriteHighPriQueueFull; bge_hostaddr nicNoMoreRxBDs; bge_hostaddr ifInDiscards; bge_hostaddr ifInErrors; bge_hostaddr nicRecvThresholdHit; bge_hostaddr Unused3[9]; /* Statistics maintained by Send Data Initiator. */ bge_hostaddr COSIfHCOutPkts[16]; bge_hostaddr nicDmaReadQueueFull; bge_hostaddr nicDmaReadHighPriQueueFull; bge_hostaddr nicSendDataCompQueueFull; /* Statistics maintained by Host Coalescing. */ bge_hostaddr nicRingSetSendProdIndex; bge_hostaddr nicRingStatusUpdate; bge_hostaddr nicInterrupts; bge_hostaddr nicAvoidedInterrupts; bge_hostaddr nicSendThresholdHit; uint8_t Reserved4[320]; }; /* * Tigon general information block. This resides in host memory * and contains the status counters, ring control blocks and * producer pointers. */ struct bge_gib { struct bge_stats bge_stats; struct bge_rcb bge_tx_rcb[16]; struct bge_rcb bge_std_rx_rcb; struct bge_rcb bge_jumbo_rx_rcb; struct bge_rcb bge_mini_rx_rcb; struct bge_rcb bge_return_rcb; }; #define BGE_FRAMELEN 1518 #define BGE_MAX_FRAMELEN 1536 #define BGE_JUMBO_FRAMELEN 9018 #define BGE_JUMBO_MTU (BGE_JUMBO_FRAMELEN-ETHER_HDR_LEN-ETHER_CRC_LEN) #define BGE_MIN_FRAMELEN 60 /* * Other utility macros. */ #define BGE_INC(x, y) (x) = (x + 1) % y /* * BAR0 MAC register access macros. The Tigon always uses memory mapped register * accesses and all registers must be accessed with 32 bit operations. */ #define CSR_WRITE_4(sc, reg, val) \ bus_write_4(sc->bge_res, reg, val) #define CSR_READ_4(sc, reg) \ bus_read_4(sc->bge_res, reg) #define BGE_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, (CSR_READ_4(sc, reg) | (x))) #define BGE_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, (CSR_READ_4(sc, reg) & ~(x))) /* BAR2 APE register access macros. */ #define APE_WRITE_4(sc, reg, val) \ bus_write_4(sc->bge_res2, reg, val) #define APE_READ_4(sc, reg) \ bus_read_4(sc->bge_res2, reg) #define APE_SETBIT(sc, reg, x) \ APE_WRITE_4(sc, reg, (APE_READ_4(sc, reg) | (x))) #define APE_CLRBIT(sc, reg, x) \ APE_WRITE_4(sc, reg, (APE_READ_4(sc, reg) & ~(x))) #define PCI_SETBIT(dev, reg, x, s) \ pci_write_config(dev, reg, (pci_read_config(dev, reg, s) | (x)), s) #define PCI_CLRBIT(dev, reg, x, s) \ pci_write_config(dev, reg, (pci_read_config(dev, reg, s) & ~(x)), s) /* * Memory management stuff. */ #define BGE_NSEG_JUMBO 4 #define BGE_NSEG_NEW 35 #define BGE_TSOSEG_SZ 4096 /* Maximum DMA address for controllers that have 40bit DMA address bug. */ #if (BUS_SPACE_MAXADDR < 0xFFFFFFFFFF) #define BGE_DMA_MAXADDR BUS_SPACE_MAXADDR #else #define BGE_DMA_MAXADDR 0xFFFFFFFFFF #endif /* * Ring structures. Most of these reside in host memory and we tell * the NIC where they are via the ring control blocks. The exceptions * are the tx and command rings, which live in NIC memory and which * we access via the shared memory window. */ struct bge_ring_data { struct bge_rx_bd *bge_rx_std_ring; bus_addr_t bge_rx_std_ring_paddr; struct bge_extrx_bd *bge_rx_jumbo_ring; bus_addr_t bge_rx_jumbo_ring_paddr; struct bge_rx_bd *bge_rx_return_ring; bus_addr_t bge_rx_return_ring_paddr; struct bge_tx_bd *bge_tx_ring; bus_addr_t bge_tx_ring_paddr; struct bge_status_block *bge_status_block; bus_addr_t bge_status_block_paddr; struct bge_stats *bge_stats; bus_addr_t bge_stats_paddr; struct bge_gib bge_info; }; #define BGE_STD_RX_RING_SZ \ (sizeof(struct bge_rx_bd) * BGE_STD_RX_RING_CNT) #define BGE_JUMBO_RX_RING_SZ \ (sizeof(struct bge_extrx_bd) * BGE_JUMBO_RX_RING_CNT) #define BGE_TX_RING_SZ \ (sizeof(struct bge_tx_bd) * BGE_TX_RING_CNT) #define BGE_RX_RTN_RING_SZ(x) \ (sizeof(struct bge_rx_bd) * x->bge_return_ring_cnt) #define BGE_STATUS_BLK_SZ sizeof (struct bge_status_block) #define BGE_STATS_SZ sizeof (struct bge_stats) /* * Mbuf pointers. We need these to keep track of the virtual addresses * of our mbuf chains since we can only convert from physical to virtual, * not the other way around. */ struct bge_chain_data { bus_dma_tag_t bge_parent_tag; bus_dma_tag_t bge_buffer_tag; bus_dma_tag_t bge_rx_std_ring_tag; bus_dma_tag_t bge_rx_jumbo_ring_tag; bus_dma_tag_t bge_rx_return_ring_tag; bus_dma_tag_t bge_tx_ring_tag; bus_dma_tag_t bge_status_tag; bus_dma_tag_t bge_stats_tag; bus_dma_tag_t bge_rx_mtag; /* Rx mbuf mapping tag */ bus_dma_tag_t bge_tx_mtag; /* Tx mbuf mapping tag */ bus_dma_tag_t bge_mtag_jumbo; /* Jumbo mbuf mapping tag */ bus_dmamap_t bge_tx_dmamap[BGE_TX_RING_CNT]; bus_dmamap_t bge_rx_std_sparemap; bus_dmamap_t bge_rx_std_dmamap[BGE_STD_RX_RING_CNT]; bus_dmamap_t bge_rx_jumbo_sparemap; bus_dmamap_t bge_rx_jumbo_dmamap[BGE_JUMBO_RX_RING_CNT]; bus_dmamap_t bge_rx_std_ring_map; bus_dmamap_t bge_rx_jumbo_ring_map; bus_dmamap_t bge_tx_ring_map; bus_dmamap_t bge_rx_return_ring_map; bus_dmamap_t bge_status_map; bus_dmamap_t bge_stats_map; struct mbuf *bge_tx_chain[BGE_TX_RING_CNT]; struct mbuf *bge_rx_std_chain[BGE_STD_RX_RING_CNT]; struct mbuf *bge_rx_jumbo_chain[BGE_JUMBO_RX_RING_CNT]; int bge_rx_std_seglen[BGE_STD_RX_RING_CNT]; int bge_rx_jumbo_seglen[BGE_JUMBO_RX_RING_CNT][4]; }; struct bge_dmamap_arg { bus_addr_t bge_busaddr; }; #define BGE_HWREV_TIGON 0x01 #define BGE_HWREV_TIGON_II 0x02 #define BGE_TIMEOUT 100000 #define BGE_TXCONS_UNSET 0xFFFF /* impossible value */ #define BGE_TX_TIMEOUT 5 struct bge_bcom_hack { int reg; int val; }; #define ASF_ENABLE 1 #define ASF_NEW_HANDSHAKE 2 #define ASF_STACKUP 4 struct bge_softc { if_t bge_ifp; /* interface info */ device_t bge_dev; struct mtx bge_mtx; device_t bge_miibus; void *bge_intrhand; struct resource *bge_irq; struct resource *bge_res; /* MAC mapped I/O */ struct resource *bge_res2; /* APE mapped I/O */ struct ifmedia bge_ifmedia; /* TBI media info */ int bge_expcap; int bge_expmrq; int bge_msicap; int bge_pcixcap; uint32_t bge_flags; #define BGE_FLAG_TBI 0x00000001 #define BGE_FLAG_JUMBO 0x00000002 #define BGE_FLAG_JUMBO_STD 0x00000004 #define BGE_FLAG_EADDR 0x00000008 #define BGE_FLAG_MII_SERDES 0x00000010 #define BGE_FLAG_CPMU_PRESENT 0x00000020 #define BGE_FLAG_TAGGED_STATUS 0x00000040 #define BGE_FLAG_APE 0x00000080 #define BGE_FLAG_MSI 0x00000100 #define BGE_FLAG_PCIX 0x00000200 #define BGE_FLAG_PCIE 0x00000400 #define BGE_FLAG_TSO 0x00000800 #define BGE_FLAG_TSO3 0x00001000 #define BGE_FLAG_JUMBO_FRAME 0x00002000 #define BGE_FLAG_5700_FAMILY 0x00010000 #define BGE_FLAG_5705_PLUS 0x00020000 #define BGE_FLAG_5714_FAMILY 0x00040000 #define BGE_FLAG_575X_PLUS 0x00080000 #define BGE_FLAG_5755_PLUS 0x00100000 #define BGE_FLAG_5788 0x00200000 #define BGE_FLAG_5717_PLUS 0x00400000 #define BGE_FLAG_57765_PLUS 0x00800000 #define BGE_FLAG_40BIT_BUG 0x01000000 #define BGE_FLAG_4G_BNDRY_BUG 0x02000000 #define BGE_FLAG_RX_ALIGNBUG 0x04000000 #define BGE_FLAG_SHORT_DMA_BUG 0x08000000 #define BGE_FLAG_4K_RDMA_BUG 0x10000000 #define BGE_FLAG_MBOX_REORDER 0x20000000 #define BGE_FLAG_RDMA_BUG 0x40000000 #define BGE_FLAG_RUNNING 0x80000000 uint32_t bge_mfw_flags; /* Management F/W flags */ #define BGE_MFW_ON_RXCPU 0x00000001 #define BGE_MFW_ON_APE 0x00000002 #define BGE_MFW_TYPE_NCSI 0x00000004 #define BGE_MFW_TYPE_DASH 0x00000008 int bge_phy_ape_lock; int bge_func_addr; int bge_phy_addr; uint32_t bge_phy_flags; #define BGE_PHY_NO_WIRESPEED 0x00000001 #define BGE_PHY_ADC_BUG 0x00000002 #define BGE_PHY_5704_A0_BUG 0x00000004 #define BGE_PHY_JITTER_BUG 0x00000008 #define BGE_PHY_BER_BUG 0x00000010 #define BGE_PHY_ADJUST_TRIM 0x00000020 #define BGE_PHY_CRC_BUG 0x00000040 #define BGE_PHY_NO_3LED 0x00000080 uint32_t bge_chipid; uint32_t bge_asicrev; uint32_t bge_chiprev; uint8_t bge_asf_mode; uint8_t bge_asf_count; uint16_t bge_mps; struct bge_ring_data bge_ldata; /* rings */ struct bge_chain_data bge_cdata; /* mbufs */ uint16_t bge_tx_saved_considx; uint16_t bge_rx_saved_considx; uint16_t bge_ev_saved_considx; uint16_t bge_return_ring_cnt; uint16_t bge_std; /* current std ring head */ uint16_t bge_jumbo; /* current jumo ring head */ uint32_t bge_stat_ticks; uint32_t bge_rx_coal_ticks; uint32_t bge_tx_coal_ticks; uint32_t bge_tx_prodidx; uint32_t bge_rx_max_coal_bds; uint32_t bge_tx_max_coal_bds; uint32_t bge_mi_mode; int bge_if_flags; int bge_txcnt; int bge_link; /* link state */ int bge_link_evt; /* pending link event */ int bge_timer; int bge_forced_collapse; int bge_forced_udpcsum; int bge_msi; - int bge_csum_features; struct callout bge_stat_ch; uint32_t bge_rx_discards; uint32_t bge_rx_inerrs; uint32_t bge_rx_nobds; uint32_t bge_tx_discards; uint32_t bge_tx_collisions; + uint32_t bge_mtu; + uint32_t bge_capenable; + uint64_t bge_hwassist; #ifdef DEVICE_POLLING int rxcycles; #endif /* DEVICE_POLLING */ struct bge_mac_stats bge_mac_stats; struct task bge_intr_task; struct taskqueue *bge_tq; }; #define BGE_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->bge_mtx, _name, MTX_NETWORK_LOCK, MTX_DEF) #define BGE_LOCK(_sc) mtx_lock(&(_sc)->bge_mtx) #define BGE_TRYLOCK(_sc) mtx_trylock(&(_sc)->bge_mtx) #define BGE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->bge_mtx, MA_OWNED) #define BGE_UNLOCK(_sc) mtx_unlock(&(_sc)->bge_mtx) #define BGE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->bge_mtx) Index: projects/ifnet/sys/dev/mii/brgphy.c =================================================================== --- projects/ifnet/sys/dev/mii/brgphy.c (revision 277242) +++ projects/ifnet/sys/dev/mii/brgphy.c (revision 277243) @@ -1,1074 +1,1074 @@ /*- * Copyright (c) 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Broadcom BCM54xx/57xx 1000baseTX PHY. */ #include #include #include #include /* XXXGL: if_b[cg]ereg.h contamination */ #include /* XXXGL: if_b[cg]ereg.h contamination */ #include #include #include #include #include #include #include #include #include #include "miidevs.h" #include #include #include #include #include #include #include "miibus_if.h" static int brgphy_probe(device_t); static int brgphy_attach(device_t); struct brgphy_softc { struct mii_softc mii_sc; int serdes_flags; /* Keeps track of the serdes type used */ #define BRGPHY_5706S 0x0001 #define BRGPHY_5708S 0x0002 #define BRGPHY_NOANWAIT 0x0004 #define BRGPHY_5709S 0x0008 int bce_phy_flags; /* PHY flags transferred from the MAC driver */ }; static device_method_t brgphy_methods[] = { /* device interface */ DEVMETHOD(device_probe, brgphy_probe), DEVMETHOD(device_attach, brgphy_attach), DEVMETHOD(device_detach, mii_phy_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD_END }; static devclass_t brgphy_devclass; static driver_t brgphy_driver = { "brgphy", brgphy_methods, sizeof(struct brgphy_softc) }; DRIVER_MODULE(brgphy, miibus, brgphy_driver, brgphy_devclass, 0, 0); static int brgphy_service(struct mii_softc *, struct mii_data *, int); static void brgphy_setmedia(struct mii_softc *, int); static void brgphy_status(struct mii_softc *); static void brgphy_mii_phy_auto(struct mii_softc *, int); static void brgphy_reset(struct mii_softc *); static void brgphy_enable_loopback(struct mii_softc *); static void bcm5401_load_dspcode(struct mii_softc *); static void bcm5411_load_dspcode(struct mii_softc *); static void bcm54k2_load_dspcode(struct mii_softc *); static void brgphy_fixup_5704_a0_bug(struct mii_softc *); static void brgphy_fixup_adc_bug(struct mii_softc *); static void brgphy_fixup_adjust_trim(struct mii_softc *); static void brgphy_fixup_ber_bug(struct mii_softc *); static void brgphy_fixup_crc_bug(struct mii_softc *); static void brgphy_fixup_jitter_bug(struct mii_softc *); static void brgphy_ethernet_wirespeed(struct mii_softc *); static void brgphy_jumbo_settings(struct mii_softc *, u_long); static const struct mii_phydesc brgphys[] = { MII_PHY_DESC(BROADCOM, BCM5400), MII_PHY_DESC(BROADCOM, BCM5401), MII_PHY_DESC(BROADCOM, BCM5411), MII_PHY_DESC(BROADCOM, BCM54K2), MII_PHY_DESC(BROADCOM, BCM5701), MII_PHY_DESC(BROADCOM, BCM5703), MII_PHY_DESC(BROADCOM, BCM5704), MII_PHY_DESC(BROADCOM, BCM5705), MII_PHY_DESC(BROADCOM, BCM5706), MII_PHY_DESC(BROADCOM, BCM5714), MII_PHY_DESC(BROADCOM, BCM5421), MII_PHY_DESC(BROADCOM, BCM5750), MII_PHY_DESC(BROADCOM, BCM5752), MII_PHY_DESC(BROADCOM, BCM5780), MII_PHY_DESC(BROADCOM, BCM5708C), MII_PHY_DESC(BROADCOM2, BCM5482), MII_PHY_DESC(BROADCOM2, BCM5708S), MII_PHY_DESC(BROADCOM2, BCM5709C), MII_PHY_DESC(BROADCOM2, BCM5709S), MII_PHY_DESC(BROADCOM2, BCM5709CAX), MII_PHY_DESC(BROADCOM2, BCM5722), MII_PHY_DESC(BROADCOM2, BCM5755), MII_PHY_DESC(BROADCOM2, BCM5754), MII_PHY_DESC(BROADCOM2, BCM5761), MII_PHY_DESC(BROADCOM2, BCM5784), #ifdef notyet /* better handled by ukphy(4) until WARs are implemented */ MII_PHY_DESC(BROADCOM2, BCM5785), #endif MII_PHY_DESC(BROADCOM3, BCM5717C), MII_PHY_DESC(BROADCOM3, BCM5719C), MII_PHY_DESC(BROADCOM3, BCM5720C), MII_PHY_DESC(BROADCOM3, BCM57765), MII_PHY_DESC(BROADCOM3, BCM57780), MII_PHY_DESC(BROADCOM4, BCM5725C), MII_PHY_DESC(xxBROADCOM_ALT1, BCM5906), MII_PHY_END }; static const struct mii_phy_funcs brgphy_funcs = { brgphy_service, brgphy_status, brgphy_reset }; #define HS21_PRODUCT_ID "IBM eServer BladeCenter HS21" #define HS21_BCM_CHIPID 0x57081021 static int detect_hs21(struct bce_softc *bce_sc) { char *sysenv; int found; found = 0; if (bce_sc->bce_chipid == HS21_BCM_CHIPID) { sysenv = kern_getenv("smbios.system.product"); if (sysenv != NULL) { if (strncmp(sysenv, HS21_PRODUCT_ID, strlen(HS21_PRODUCT_ID)) == 0) found = 1; freeenv(sysenv); } } return (found); } /* Search for our PHY in the list of known PHYs */ static int brgphy_probe(device_t dev) { return (mii_phy_dev_probe(dev, brgphys, BUS_PROBE_DEFAULT)); } /* Attach the PHY to the MII bus */ static int brgphy_attach(device_t dev) { struct brgphy_softc *bsc; struct bge_softc *bge_sc = NULL; struct bce_softc *bce_sc = NULL; struct mii_softc *sc; bsc = device_get_softc(dev); sc = &bsc->mii_sc; mii_phy_dev_attach(dev, MIIF_NOISOLATE | MIIF_NOMANPAUSE, &brgphy_funcs, 0); bsc->serdes_flags = 0; /* Find the MAC driver associated with this PHY. */ if (mii_dev_mac_match(dev, "bge")) bge_sc = mii_dev_mac_softc(dev); else if (mii_dev_mac_match(dev, "bce")) bce_sc = mii_dev_mac_softc(dev); /* Handle any special cases based on the PHY ID */ switch (sc->mii_mpd_oui) { case MII_OUI_BROADCOM: switch (sc->mii_mpd_model) { case MII_MODEL_BROADCOM_BCM5706: case MII_MODEL_BROADCOM_BCM5714: /* * The 5464 PHY used in the 5706 supports both copper * and fiber interfaces over GMII. Need to check the * shadow registers to see which mode is actually * in effect, and therefore whether we have 5706C or * 5706S. */ PHY_WRITE(sc, BRGPHY_MII_SHADOW_1C, BRGPHY_SHADOW_1C_MODE_CTRL); if (PHY_READ(sc, BRGPHY_MII_SHADOW_1C) & BRGPHY_SHADOW_1C_ENA_1000X) { bsc->serdes_flags |= BRGPHY_5706S; sc->mii_flags |= MIIF_HAVEFIBER; } break; } break; case MII_OUI_BROADCOM2: switch (sc->mii_mpd_model) { case MII_MODEL_BROADCOM2_BCM5708S: bsc->serdes_flags |= BRGPHY_5708S; sc->mii_flags |= MIIF_HAVEFIBER; break; case MII_MODEL_BROADCOM2_BCM5709S: /* * XXX * 5720S and 5709S shares the same PHY id. * Assume 5720S PHY if parent device is bge(4). */ if (bge_sc != NULL) bsc->serdes_flags |= BRGPHY_5708S; else bsc->serdes_flags |= BRGPHY_5709S; sc->mii_flags |= MIIF_HAVEFIBER; break; } break; } PHY_RESET(sc); /* Read the PHY's capabilities. */ sc->mii_capabilities = PHY_READ(sc, MII_BMSR) & sc->mii_capmask; if (sc->mii_capabilities & BMSR_EXTSTAT) sc->mii_extcapabilities = PHY_READ(sc, MII_EXTSR); device_printf(dev, " "); #define ADD(m, c) ifmedia_add(&sc->mii_pdata->mii_media, (m), (c), NULL) /* Add the supported media types */ if ((sc->mii_flags & MIIF_HAVEFIBER) == 0) { mii_phy_add_media(sc); printf("\n"); } else { sc->mii_anegticks = MII_ANEGTICKS_GIGE; ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, IFM_FDX, sc->mii_inst), BRGPHY_S1000 | BRGPHY_BMCR_FDX); printf("1000baseSX-FDX, "); /* 2.5G support is a software enabled feature on the 5708S and 5709S. */ if (bce_sc && (bce_sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)) { ADD(IFM_MAKEWORD(IFM_ETHER, IFM_2500_SX, IFM_FDX, sc->mii_inst), 0); printf("2500baseSX-FDX, "); } else if ((bsc->serdes_flags & BRGPHY_5708S) && bce_sc && (detect_hs21(bce_sc) != 0)) { /* * There appears to be certain silicon revision * in IBM HS21 blades that is having issues with * this driver wating for the auto-negotiation to * complete. This happens with a specific chip id * only and when the 1000baseSX-FDX is the only * mode. Workaround this issue since it's unlikely * to be ever addressed. */ printf("auto-neg workaround, "); bsc->serdes_flags |= BRGPHY_NOANWAIT; } ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, sc->mii_inst), 0); printf("auto\n"); } #undef ADD MIIBUS_MEDIAINIT(sc->mii_dev); return (0); } static int brgphy_service(struct mii_softc *sc, struct mii_data *mii, int cmd) { struct ifmedia_entry *ife = mii->mii_media.ifm_cur; int val; switch (cmd) { case MII_POLLSTAT: break; case MII_MEDIACHG: /* Todo: Why is this here? Is it really needed? */ PHY_RESET(sc); /* XXX hardware bug work-around */ switch (IFM_SUBTYPE(ife->ifm_media)) { case IFM_AUTO: brgphy_mii_phy_auto(sc, ife->ifm_media); break; case IFM_2500_SX: case IFM_1000_SX: case IFM_1000_T: case IFM_100_TX: case IFM_10_T: brgphy_setmedia(sc, ife->ifm_media); break; default: return (EINVAL); } break; case MII_TICK: /* Bail if autoneg isn't in process. */ if (IFM_SUBTYPE(ife->ifm_media) != IFM_AUTO) { sc->mii_ticks = 0; break; } /* * Check to see if we have link. If we do, we don't * need to restart the autonegotiation process. */ val = PHY_READ(sc, MII_BMSR) | PHY_READ(sc, MII_BMSR); if (val & BMSR_LINK) { sc->mii_ticks = 0; /* Reset autoneg timer. */ break; } /* Announce link loss right after it happens. */ if (sc->mii_ticks++ == 0) break; /* Only retry autonegotiation every mii_anegticks seconds. */ if (sc->mii_ticks <= sc->mii_anegticks) break; /* Retry autonegotiation */ sc->mii_ticks = 0; brgphy_mii_phy_auto(sc, ife->ifm_media); break; } /* Update the media status. */ PHY_STATUS(sc); /* * Callback if something changed. Note that we need to poke * the DSP on the Broadcom PHYs if the media changes. */ if (sc->mii_media_active != mii->mii_media_active || sc->mii_media_status != mii->mii_media_status || cmd == MII_MEDIACHG) { switch (sc->mii_mpd_oui) { case MII_OUI_BROADCOM: switch (sc->mii_mpd_model) { case MII_MODEL_BROADCOM_BCM5400: bcm5401_load_dspcode(sc); break; case MII_MODEL_BROADCOM_BCM5401: if (sc->mii_mpd_rev == 1 || sc->mii_mpd_rev == 3) bcm5401_load_dspcode(sc); break; case MII_MODEL_BROADCOM_BCM5411: bcm5411_load_dspcode(sc); break; case MII_MODEL_BROADCOM_BCM54K2: bcm54k2_load_dspcode(sc); break; } break; } } mii_phy_update(sc, cmd); return (0); } /****************************************************************************/ /* Sets the PHY link speed. */ /* */ /* Returns: */ /* None */ /****************************************************************************/ static void brgphy_setmedia(struct mii_softc *sc, int media) { int bmcr = 0, gig; switch (IFM_SUBTYPE(media)) { case IFM_2500_SX: break; case IFM_1000_SX: case IFM_1000_T: bmcr = BRGPHY_S1000; break; case IFM_100_TX: bmcr = BRGPHY_S100; break; case IFM_10_T: default: bmcr = BRGPHY_S10; break; } if ((media & IFM_FDX) != 0) { bmcr |= BRGPHY_BMCR_FDX; gig = BRGPHY_1000CTL_AFD; } else { gig = BRGPHY_1000CTL_AHD; } /* Force loopback to disconnect PHY from Ethernet medium. */ brgphy_enable_loopback(sc); PHY_WRITE(sc, BRGPHY_MII_1000CTL, 0); PHY_WRITE(sc, BRGPHY_MII_ANAR, BRGPHY_SEL_TYPE); if (IFM_SUBTYPE(media) != IFM_1000_T && IFM_SUBTYPE(media) != IFM_1000_SX) { PHY_WRITE(sc, BRGPHY_MII_BMCR, bmcr); return; } if (IFM_SUBTYPE(media) == IFM_1000_T) { gig |= BRGPHY_1000CTL_MSE; if ((media & IFM_ETH_MASTER) != 0) gig |= BRGPHY_1000CTL_MSC; } PHY_WRITE(sc, BRGPHY_MII_1000CTL, gig); PHY_WRITE(sc, BRGPHY_MII_BMCR, bmcr | BRGPHY_BMCR_AUTOEN | BRGPHY_BMCR_STARTNEG); } /****************************************************************************/ /* Set the media status based on the PHY settings. */ /* */ /* Returns: */ /* None */ /****************************************************************************/ static void brgphy_status(struct mii_softc *sc) { struct brgphy_softc *bsc = (struct brgphy_softc *)sc; struct mii_data *mii = sc->mii_pdata; int aux, bmcr, bmsr, val, xstat; u_int flowstat; mii->mii_media_status = IFM_AVALID; mii->mii_media_active = IFM_ETHER; bmsr = PHY_READ(sc, BRGPHY_MII_BMSR) | PHY_READ(sc, BRGPHY_MII_BMSR); bmcr = PHY_READ(sc, BRGPHY_MII_BMCR); if (bmcr & BRGPHY_BMCR_LOOP) { mii->mii_media_active |= IFM_LOOP; } if ((bmcr & BRGPHY_BMCR_AUTOEN) && (bmsr & BRGPHY_BMSR_ACOMP) == 0 && (bsc->serdes_flags & BRGPHY_NOANWAIT) == 0) { /* Erg, still trying, I guess... */ mii->mii_media_active |= IFM_NONE; return; } if ((sc->mii_flags & MIIF_HAVEFIBER) == 0) { /* * NB: reading the ANAR, ANLPAR or 1000STS after the AUXSTS * wedges at least the PHY of BCM5704 (but not others). */ flowstat = mii_phy_flowstatus(sc); xstat = PHY_READ(sc, BRGPHY_MII_1000STS); aux = PHY_READ(sc, BRGPHY_MII_AUXSTS); /* If copper link is up, get the negotiated speed/duplex. */ if (aux & BRGPHY_AUXSTS_LINK) { mii->mii_media_status |= IFM_ACTIVE; switch (aux & BRGPHY_AUXSTS_AN_RES) { case BRGPHY_RES_1000FD: mii->mii_media_active |= IFM_1000_T | IFM_FDX; break; case BRGPHY_RES_1000HD: mii->mii_media_active |= IFM_1000_T | IFM_HDX; break; case BRGPHY_RES_100FD: mii->mii_media_active |= IFM_100_TX | IFM_FDX; break; case BRGPHY_RES_100T4: mii->mii_media_active |= IFM_100_T4; break; case BRGPHY_RES_100HD: mii->mii_media_active |= IFM_100_TX | IFM_HDX; break; case BRGPHY_RES_10FD: mii->mii_media_active |= IFM_10_T | IFM_FDX; break; case BRGPHY_RES_10HD: mii->mii_media_active |= IFM_10_T | IFM_HDX; break; default: mii->mii_media_active |= IFM_NONE; break; } if ((mii->mii_media_active & IFM_FDX) != 0) mii->mii_media_active |= flowstat; if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T && (xstat & BRGPHY_1000STS_MSR) != 0) mii->mii_media_active |= IFM_ETH_MASTER; } } else { /* Todo: Add support for flow control. */ /* If serdes link is up, get the negotiated speed/duplex. */ if (bmsr & BRGPHY_BMSR_LINK) { mii->mii_media_status |= IFM_ACTIVE; } /* Check the link speed/duplex based on the PHY type. */ if (bsc->serdes_flags & BRGPHY_5706S) { mii->mii_media_active |= IFM_1000_SX; /* If autoneg enabled, read negotiated duplex settings */ if (bmcr & BRGPHY_BMCR_AUTOEN) { val = PHY_READ(sc, BRGPHY_SERDES_ANAR) & PHY_READ(sc, BRGPHY_SERDES_ANLPAR); if (val & BRGPHY_SERDES_ANAR_FDX) mii->mii_media_active |= IFM_FDX; else mii->mii_media_active |= IFM_HDX; } } else if (bsc->serdes_flags & BRGPHY_5708S) { PHY_WRITE(sc, BRGPHY_5708S_BLOCK_ADDR, BRGPHY_5708S_DIG_PG0); xstat = PHY_READ(sc, BRGPHY_5708S_PG0_1000X_STAT1); /* Check for MRBE auto-negotiated speed results. */ switch (xstat & BRGPHY_5708S_PG0_1000X_STAT1_SPEED_MASK) { case BRGPHY_5708S_PG0_1000X_STAT1_SPEED_10: mii->mii_media_active |= IFM_10_FL; break; case BRGPHY_5708S_PG0_1000X_STAT1_SPEED_100: mii->mii_media_active |= IFM_100_FX; break; case BRGPHY_5708S_PG0_1000X_STAT1_SPEED_1G: mii->mii_media_active |= IFM_1000_SX; break; case BRGPHY_5708S_PG0_1000X_STAT1_SPEED_25G: mii->mii_media_active |= IFM_2500_SX; break; } /* Check for MRBE auto-negotiated duplex results. */ if (xstat & BRGPHY_5708S_PG0_1000X_STAT1_FDX) mii->mii_media_active |= IFM_FDX; else mii->mii_media_active |= IFM_HDX; } else if (bsc->serdes_flags & BRGPHY_5709S) { /* Select GP Status Block of the AN MMD, get autoneg results. */ PHY_WRITE(sc, BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_GP_STATUS); xstat = PHY_READ(sc, BRGPHY_GP_STATUS_TOP_ANEG_STATUS); /* Restore IEEE0 block (assumed in all brgphy(4) code). */ PHY_WRITE(sc, BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0); /* Check for MRBE auto-negotiated speed results. */ switch (xstat & BRGPHY_GP_STATUS_TOP_ANEG_SPEED_MASK) { case BRGPHY_GP_STATUS_TOP_ANEG_SPEED_10: mii->mii_media_active |= IFM_10_FL; break; case BRGPHY_GP_STATUS_TOP_ANEG_SPEED_100: mii->mii_media_active |= IFM_100_FX; break; case BRGPHY_GP_STATUS_TOP_ANEG_SPEED_1G: mii->mii_media_active |= IFM_1000_SX; break; case BRGPHY_GP_STATUS_TOP_ANEG_SPEED_25G: mii->mii_media_active |= IFM_2500_SX; break; } /* Check for MRBE auto-negotiated duplex results. */ if (xstat & BRGPHY_GP_STATUS_TOP_ANEG_FDX) mii->mii_media_active |= IFM_FDX; else mii->mii_media_active |= IFM_HDX; } } } static void brgphy_mii_phy_auto(struct mii_softc *sc, int media) { int anar, ktcr = 0; PHY_RESET(sc); if ((sc->mii_flags & MIIF_HAVEFIBER) == 0) { anar = BMSR_MEDIA_TO_ANAR(sc->mii_capabilities) | ANAR_CSMA; if ((media & IFM_FLOW) != 0 || (sc->mii_flags & MIIF_FORCEPAUSE) != 0) anar |= BRGPHY_ANAR_PC | BRGPHY_ANAR_ASP; PHY_WRITE(sc, BRGPHY_MII_ANAR, anar); ktcr = BRGPHY_1000CTL_AFD | BRGPHY_1000CTL_AHD; if (sc->mii_mpd_model == MII_MODEL_BROADCOM_BCM5701) ktcr |= BRGPHY_1000CTL_MSE | BRGPHY_1000CTL_MSC; PHY_WRITE(sc, BRGPHY_MII_1000CTL, ktcr); PHY_READ(sc, BRGPHY_MII_1000CTL); } else { anar = BRGPHY_SERDES_ANAR_FDX | BRGPHY_SERDES_ANAR_HDX; if ((media & IFM_FLOW) != 0 || (sc->mii_flags & MIIF_FORCEPAUSE) != 0) anar |= BRGPHY_SERDES_ANAR_BOTH_PAUSE; PHY_WRITE(sc, BRGPHY_SERDES_ANAR, anar); } PHY_WRITE(sc, BRGPHY_MII_BMCR, BRGPHY_BMCR_AUTOEN | BRGPHY_BMCR_STARTNEG); PHY_WRITE(sc, BRGPHY_MII_IMR, 0xFF00); } /* Enable loopback to force the link down. */ static void brgphy_enable_loopback(struct mii_softc *sc) { int i; PHY_WRITE(sc, BRGPHY_MII_BMCR, BRGPHY_BMCR_LOOP); for (i = 0; i < 15000; i++) { if (!(PHY_READ(sc, BRGPHY_MII_BMSR) & BRGPHY_BMSR_LINK)) break; DELAY(10); } } /* Turn off tap power management on 5401. */ static void bcm5401_load_dspcode(struct mii_softc *sc) { static const struct { int reg; uint16_t val; } dspcode[] = { { BRGPHY_MII_AUXCTL, 0x0c20 }, { BRGPHY_MII_DSP_ADDR_REG, 0x0012 }, { BRGPHY_MII_DSP_RW_PORT, 0x1804 }, { BRGPHY_MII_DSP_ADDR_REG, 0x0013 }, { BRGPHY_MII_DSP_RW_PORT, 0x1204 }, { BRGPHY_MII_DSP_ADDR_REG, 0x8006 }, { BRGPHY_MII_DSP_RW_PORT, 0x0132 }, { BRGPHY_MII_DSP_ADDR_REG, 0x8006 }, { BRGPHY_MII_DSP_RW_PORT, 0x0232 }, { BRGPHY_MII_DSP_ADDR_REG, 0x201f }, { BRGPHY_MII_DSP_RW_PORT, 0x0a20 }, { 0, 0 }, }; int i; for (i = 0; dspcode[i].reg != 0; i++) PHY_WRITE(sc, dspcode[i].reg, dspcode[i].val); DELAY(40); } static void bcm5411_load_dspcode(struct mii_softc *sc) { static const struct { int reg; uint16_t val; } dspcode[] = { { 0x1c, 0x8c23 }, { 0x1c, 0x8ca3 }, { 0x1c, 0x8c23 }, { 0, 0 }, }; int i; for (i = 0; dspcode[i].reg != 0; i++) PHY_WRITE(sc, dspcode[i].reg, dspcode[i].val); } void bcm54k2_load_dspcode(struct mii_softc *sc) { static const struct { int reg; uint16_t val; } dspcode[] = { { 4, 0x01e1 }, { 9, 0x0300 }, { 0, 0 }, }; int i; for (i = 0; dspcode[i].reg != 0; i++) PHY_WRITE(sc, dspcode[i].reg, dspcode[i].val); } static void brgphy_fixup_5704_a0_bug(struct mii_softc *sc) { static const struct { int reg; uint16_t val; } dspcode[] = { { 0x1c, 0x8d68 }, { 0x1c, 0x8d68 }, { 0, 0 }, }; int i; for (i = 0; dspcode[i].reg != 0; i++) PHY_WRITE(sc, dspcode[i].reg, dspcode[i].val); } static void brgphy_fixup_adc_bug(struct mii_softc *sc) { static const struct { int reg; uint16_t val; } dspcode[] = { { BRGPHY_MII_AUXCTL, 0x0c00 }, { BRGPHY_MII_DSP_ADDR_REG, 0x201f }, { BRGPHY_MII_DSP_RW_PORT, 0x2aaa }, { 0, 0 }, }; int i; for (i = 0; dspcode[i].reg != 0; i++) PHY_WRITE(sc, dspcode[i].reg, dspcode[i].val); } static void brgphy_fixup_adjust_trim(struct mii_softc *sc) { static const struct { int reg; uint16_t val; } dspcode[] = { { BRGPHY_MII_AUXCTL, 0x0c00 }, { BRGPHY_MII_DSP_ADDR_REG, 0x000a }, { BRGPHY_MII_DSP_RW_PORT, 0x110b }, { BRGPHY_MII_TEST1, 0x0014 }, { BRGPHY_MII_AUXCTL, 0x0400 }, { 0, 0 }, }; int i; for (i = 0; dspcode[i].reg != 0; i++) PHY_WRITE(sc, dspcode[i].reg, dspcode[i].val); } static void brgphy_fixup_ber_bug(struct mii_softc *sc) { static const struct { int reg; uint16_t val; } dspcode[] = { { BRGPHY_MII_AUXCTL, 0x0c00 }, { BRGPHY_MII_DSP_ADDR_REG, 0x000a }, { BRGPHY_MII_DSP_RW_PORT, 0x310b }, { BRGPHY_MII_DSP_ADDR_REG, 0x201f }, { BRGPHY_MII_DSP_RW_PORT, 0x9506 }, { BRGPHY_MII_DSP_ADDR_REG, 0x401f }, { BRGPHY_MII_DSP_RW_PORT, 0x14e2 }, { BRGPHY_MII_AUXCTL, 0x0400 }, { 0, 0 }, }; int i; for (i = 0; dspcode[i].reg != 0; i++) PHY_WRITE(sc, dspcode[i].reg, dspcode[i].val); } static void brgphy_fixup_crc_bug(struct mii_softc *sc) { static const struct { int reg; uint16_t val; } dspcode[] = { { BRGPHY_MII_DSP_RW_PORT, 0x0a75 }, { 0x1c, 0x8c68 }, { 0x1c, 0x8d68 }, { 0x1c, 0x8c68 }, { 0, 0 }, }; int i; for (i = 0; dspcode[i].reg != 0; i++) PHY_WRITE(sc, dspcode[i].reg, dspcode[i].val); } static void brgphy_fixup_jitter_bug(struct mii_softc *sc) { static const struct { int reg; uint16_t val; } dspcode[] = { { BRGPHY_MII_AUXCTL, 0x0c00 }, { BRGPHY_MII_DSP_ADDR_REG, 0x000a }, { BRGPHY_MII_DSP_RW_PORT, 0x010b }, { BRGPHY_MII_AUXCTL, 0x0400 }, { 0, 0 }, }; int i; for (i = 0; dspcode[i].reg != 0; i++) PHY_WRITE(sc, dspcode[i].reg, dspcode[i].val); } static void brgphy_fixup_disable_early_dac(struct mii_softc *sc) { uint32_t val; PHY_WRITE(sc, BRGPHY_MII_DSP_ADDR_REG, 0x0f08); val = PHY_READ(sc, BRGPHY_MII_DSP_RW_PORT); val &= ~(1 << 8); PHY_WRITE(sc, BRGPHY_MII_DSP_RW_PORT, val); } static void brgphy_ethernet_wirespeed(struct mii_softc *sc) { uint32_t val; /* Enable Ethernet@WireSpeed. */ PHY_WRITE(sc, BRGPHY_MII_AUXCTL, 0x7007); val = PHY_READ(sc, BRGPHY_MII_AUXCTL); PHY_WRITE(sc, BRGPHY_MII_AUXCTL, val | (1 << 15) | (1 << 4)); } static void brgphy_jumbo_settings(struct mii_softc *sc, u_long mtu) { uint32_t val; /* Set or clear jumbo frame settings in the PHY. */ if (mtu > ETHER_MAX_LEN) { if (sc->mii_mpd_model == MII_MODEL_BROADCOM_BCM5401) { /* BCM5401 PHY cannot read-modify-write. */ PHY_WRITE(sc, BRGPHY_MII_AUXCTL, 0x4c20); } else { PHY_WRITE(sc, BRGPHY_MII_AUXCTL, 0x7); val = PHY_READ(sc, BRGPHY_MII_AUXCTL); PHY_WRITE(sc, BRGPHY_MII_AUXCTL, val | BRGPHY_AUXCTL_LONG_PKT); } val = PHY_READ(sc, BRGPHY_MII_PHY_EXTCTL); PHY_WRITE(sc, BRGPHY_MII_PHY_EXTCTL, val | BRGPHY_PHY_EXTCTL_HIGH_LA); } else { PHY_WRITE(sc, BRGPHY_MII_AUXCTL, 0x7); val = PHY_READ(sc, BRGPHY_MII_AUXCTL); PHY_WRITE(sc, BRGPHY_MII_AUXCTL, val & ~(BRGPHY_AUXCTL_LONG_PKT | 0x7)); val = PHY_READ(sc, BRGPHY_MII_PHY_EXTCTL); PHY_WRITE(sc, BRGPHY_MII_PHY_EXTCTL, val & ~BRGPHY_PHY_EXTCTL_HIGH_LA); } } static void brgphy_reset(struct mii_softc *sc) { struct bge_softc *bge_sc = NULL; struct bce_softc *bce_sc = NULL; u_int mtu; int i, val; /* * Perform a reset. Note that at least some Broadcom PHYs default to * being powered down as well as isolated after a reset but don't work * if one or both of these bits are cleared. However, they just work * fine if both bits remain set, so we don't use mii_phy_reset() here. */ PHY_WRITE(sc, BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET); /* Wait 100ms for it to complete. */ for (i = 0; i < 100; i++) { if ((PHY_READ(sc, BRGPHY_MII_BMCR) & BRGPHY_BMCR_RESET) == 0) break; DELAY(1000); } /* Handle any PHY specific procedures following the reset. */ switch (sc->mii_mpd_oui) { case MII_OUI_BROADCOM: switch (sc->mii_mpd_model) { case MII_MODEL_BROADCOM_BCM5400: bcm5401_load_dspcode(sc); break; case MII_MODEL_BROADCOM_BCM5401: if (sc->mii_mpd_rev == 1 || sc->mii_mpd_rev == 3) bcm5401_load_dspcode(sc); break; case MII_MODEL_BROADCOM_BCM5411: bcm5411_load_dspcode(sc); break; case MII_MODEL_BROADCOM_BCM54K2: bcm54k2_load_dspcode(sc); break; } break; case MII_OUI_BROADCOM3: switch (sc->mii_mpd_model) { case MII_MODEL_BROADCOM3_BCM5717C: case MII_MODEL_BROADCOM3_BCM5719C: case MII_MODEL_BROADCOM3_BCM5720C: case MII_MODEL_BROADCOM3_BCM57765: return; } break; case MII_OUI_BROADCOM4: return; } - mtu = MIIBUS_READVAR(sc->mii_dev, IF_MTU); + mtu = MIIBUS_READVAR(sc->mii_dev, MIIVAR_MTU); /* Find the driver associated with this PHY. */ if (mii_phy_mac_match(sc, "bge")) bge_sc = mii_phy_mac_softc(sc); else if (mii_phy_mac_match(sc, "bce")) bce_sc = mii_phy_mac_softc(sc); if (bge_sc) { /* Fix up various bugs */ if (bge_sc->bge_phy_flags & BGE_PHY_5704_A0_BUG) brgphy_fixup_5704_a0_bug(sc); if (bge_sc->bge_phy_flags & BGE_PHY_ADC_BUG) brgphy_fixup_adc_bug(sc); if (bge_sc->bge_phy_flags & BGE_PHY_ADJUST_TRIM) brgphy_fixup_adjust_trim(sc); if (bge_sc->bge_phy_flags & BGE_PHY_BER_BUG) brgphy_fixup_ber_bug(sc); if (bge_sc->bge_phy_flags & BGE_PHY_CRC_BUG) brgphy_fixup_crc_bug(sc); if (bge_sc->bge_phy_flags & BGE_PHY_JITTER_BUG) brgphy_fixup_jitter_bug(sc); if (bge_sc->bge_flags & BGE_FLAG_JUMBO) brgphy_jumbo_settings(sc, mtu); if ((bge_sc->bge_phy_flags & BGE_PHY_NO_WIRESPEED) == 0) brgphy_ethernet_wirespeed(sc); /* Enable Link LED on Dell boxes */ if (bge_sc->bge_phy_flags & BGE_PHY_NO_3LED) { PHY_WRITE(sc, BRGPHY_MII_PHY_EXTCTL, PHY_READ(sc, BRGPHY_MII_PHY_EXTCTL) & ~BRGPHY_PHY_EXTCTL_3_LED); } /* Adjust output voltage (From Linux driver) */ if (bge_sc->bge_asicrev == BGE_ASICREV_BCM5906) PHY_WRITE(sc, BRGPHY_MII_EPHY_PTEST, 0x12); } else if (bce_sc) { if (BCE_CHIP_NUM(bce_sc) == BCE_CHIP_NUM_5708 && (bce_sc->bce_phy_flags & BCE_PHY_SERDES_FLAG)) { /* Store autoneg capabilities/results in digital block (Page 0) */ PHY_WRITE(sc, BRGPHY_5708S_BLOCK_ADDR, BRGPHY_5708S_DIG3_PG2); PHY_WRITE(sc, BRGPHY_5708S_PG2_DIGCTL_3_0, BRGPHY_5708S_PG2_DIGCTL_3_0_USE_IEEE); PHY_WRITE(sc, BRGPHY_5708S_BLOCK_ADDR, BRGPHY_5708S_DIG_PG0); /* Enable fiber mode and autodetection */ PHY_WRITE(sc, BRGPHY_5708S_PG0_1000X_CTL1, PHY_READ(sc, BRGPHY_5708S_PG0_1000X_CTL1) | BRGPHY_5708S_PG0_1000X_CTL1_AUTODET_EN | BRGPHY_5708S_PG0_1000X_CTL1_FIBER_MODE); /* Enable parallel detection */ PHY_WRITE(sc, BRGPHY_5708S_PG0_1000X_CTL2, PHY_READ(sc, BRGPHY_5708S_PG0_1000X_CTL2) | BRGPHY_5708S_PG0_1000X_CTL2_PAR_DET_EN); /* Advertise 2.5G support through next page during autoneg */ if (bce_sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) PHY_WRITE(sc, BRGPHY_5708S_ANEG_NXT_PG_XMIT1, PHY_READ(sc, BRGPHY_5708S_ANEG_NXT_PG_XMIT1) | BRGPHY_5708S_ANEG_NXT_PG_XMIT1_25G); /* Increase TX signal amplitude */ if ((BCE_CHIP_ID(bce_sc) == BCE_CHIP_ID_5708_A0) || (BCE_CHIP_ID(bce_sc) == BCE_CHIP_ID_5708_B0) || (BCE_CHIP_ID(bce_sc) == BCE_CHIP_ID_5708_B1)) { PHY_WRITE(sc, BRGPHY_5708S_BLOCK_ADDR, BRGPHY_5708S_TX_MISC_PG5); PHY_WRITE(sc, BRGPHY_5708S_PG5_TXACTL1, PHY_READ(sc, BRGPHY_5708S_PG5_TXACTL1) & ~0x30); PHY_WRITE(sc, BRGPHY_5708S_BLOCK_ADDR, BRGPHY_5708S_DIG_PG0); } /* Backplanes use special driver/pre-driver/pre-emphasis values. */ if ((bce_sc->bce_shared_hw_cfg & BCE_SHARED_HW_CFG_PHY_BACKPLANE) && (bce_sc->bce_port_hw_cfg & BCE_PORT_HW_CFG_CFG_TXCTL3_MASK)) { PHY_WRITE(sc, BRGPHY_5708S_BLOCK_ADDR, BRGPHY_5708S_TX_MISC_PG5); PHY_WRITE(sc, BRGPHY_5708S_PG5_TXACTL3, bce_sc->bce_port_hw_cfg & BCE_PORT_HW_CFG_CFG_TXCTL3_MASK); PHY_WRITE(sc, BRGPHY_5708S_BLOCK_ADDR, BRGPHY_5708S_DIG_PG0); } } else if (BCE_CHIP_NUM(bce_sc) == BCE_CHIP_NUM_5709 && (bce_sc->bce_phy_flags & BCE_PHY_SERDES_FLAG)) { /* Select the SerDes Digital block of the AN MMD. */ PHY_WRITE(sc, BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_SERDES_DIG); val = PHY_READ(sc, BRGPHY_SERDES_DIG_1000X_CTL1); val &= ~BRGPHY_SD_DIG_1000X_CTL1_AUTODET; val |= BRGPHY_SD_DIG_1000X_CTL1_FIBER; PHY_WRITE(sc, BRGPHY_SERDES_DIG_1000X_CTL1, val); /* Select the Over 1G block of the AN MMD. */ PHY_WRITE(sc, BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_OVER_1G); /* Enable autoneg "Next Page" to advertise 2.5G support. */ val = PHY_READ(sc, BRGPHY_OVER_1G_UNFORMAT_PG1); if (bce_sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) val |= BRGPHY_5708S_ANEG_NXT_PG_XMIT1_25G; else val &= ~BRGPHY_5708S_ANEG_NXT_PG_XMIT1_25G; PHY_WRITE(sc, BRGPHY_OVER_1G_UNFORMAT_PG1, val); /* Select the Multi-Rate Backplane Ethernet block of the AN MMD. */ PHY_WRITE(sc, BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_MRBE); /* Enable MRBE speed autoneg. */ val = PHY_READ(sc, BRGPHY_MRBE_MSG_PG5_NP); val |= BRGPHY_MRBE_MSG_PG5_NP_MBRE | BRGPHY_MRBE_MSG_PG5_NP_T2; PHY_WRITE(sc, BRGPHY_MRBE_MSG_PG5_NP, val); /* Select the Clause 73 User B0 block of the AN MMD. */ PHY_WRITE(sc, BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_CL73_USER_B0); /* Enable MRBE speed autoneg. */ PHY_WRITE(sc, BRGPHY_CL73_USER_B0_MBRE_CTL1, BRGPHY_CL73_USER_B0_MBRE_CTL1_NP_AFT_BP | BRGPHY_CL73_USER_B0_MBRE_CTL1_STA_MGR | BRGPHY_CL73_USER_B0_MBRE_CTL1_ANEG); /* Restore IEEE0 block (assumed in all brgphy(4) code). */ PHY_WRITE(sc, BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0); } else if (BCE_CHIP_NUM(bce_sc) == BCE_CHIP_NUM_5709) { if ((BCE_CHIP_REV(bce_sc) == BCE_CHIP_REV_Ax) || (BCE_CHIP_REV(bce_sc) == BCE_CHIP_REV_Bx)) brgphy_fixup_disable_early_dac(sc); brgphy_jumbo_settings(sc, mtu); brgphy_ethernet_wirespeed(sc); } else { brgphy_fixup_ber_bug(sc); brgphy_jumbo_settings(sc, mtu); brgphy_ethernet_wirespeed(sc); } } } Index: projects/ifnet/sys/dev/mii/miivar.h =================================================================== --- projects/ifnet/sys/dev/mii/miivar.h (revision 277242) +++ projects/ifnet/sys/dev/mii/miivar.h (revision 277243) @@ -1,270 +1,275 @@ /* $NetBSD: miivar.h,v 1.8 1999/04/23 04:24:32 thorpej Exp $ */ /*- * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, * NASA Ames Research Center. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _DEV_MII_MIIVAR_H_ #define _DEV_MII_MIIVAR_H_ #include /* * Media Independent Interface data structure defintions */ struct mii_softc; /* * A network interface driver has one of these structures in its softc. * It is the interface from the network interface driver to the MII * layer. */ struct mii_data { struct ifmedia mii_media; /* media information */ /* * For network interfaces with multiple PHYs, a list of all * PHYs is required so they can all be notified when a media * request is made. */ LIST_HEAD(mii_listhead, mii_softc) mii_phys; u_int mii_instance; /* * PHY driver fills this in with active media status. */ u_int mii_media_status; u_int mii_media_active; }; typedef struct mii_data mii_data_t; /* * Functions provided by the PHY to perform various functions. */ struct mii_phy_funcs { int (*pf_service)(struct mii_softc *, struct mii_data *, int); void (*pf_status)(struct mii_softc *); void (*pf_reset)(struct mii_softc *); }; /* * Requests that can be made to the downcall. */ #define MII_TICK 1 /* once-per-second tick */ #define MII_MEDIACHG 2 /* user changed media; perform the switch */ #define MII_POLLSTAT 3 /* user requested media status; fill it in */ /* * Each PHY driver's softc has one of these as the first member. * XXX This would be better named "phy_softc", but this is the name * XXX BSDI used, and we would like to have the same interface. */ struct mii_softc { device_t mii_dev; /* generic device glue */ LIST_ENTRY(mii_softc) mii_list; /* entry on parent's PHY list */ uint32_t mii_mpd_oui; /* the PHY's OUI (MII_OUI())*/ uint32_t mii_mpd_model; /* the PHY's model (MII_MODEL())*/ uint32_t mii_mpd_rev; /* the PHY's revision (MII_REV())*/ u_int mii_capmask; /* capability mask for BMSR */ u_int mii_phy; /* our MII address */ u_int mii_offset; /* first PHY, second PHY, etc. */ u_int mii_inst; /* instance for ifmedia */ /* Our PHY functions. */ const struct mii_phy_funcs *mii_funcs; struct mii_data *mii_pdata; /* pointer to parent's mii_data */ u_int mii_flags; /* misc. flags; see below */ u_int mii_capabilities; /* capabilities from BMSR */ u_int mii_extcapabilities; /* extended capabilities */ u_int mii_ticks; /* MII_TICK counter */ u_int mii_anegticks; /* ticks before retrying aneg */ u_int mii_media_active; /* last active media */ u_int mii_media_status; /* last active status */ }; typedef struct mii_softc mii_softc_t; /* mii_flags */ #define MIIF_INITDONE 0x00000001 /* has been initialized (mii_data) */ #define MIIF_NOISOLATE 0x00000002 /* do not isolate the PHY */ #if 0 #define MIIF_NOLOOP 0x00000004 /* no loopback capability */ #endif #define MIIF_DOINGAUTO 0x00000008 /* doing autonegotiation (mii_softc) */ #define MIIF_AUTOTSLEEP 0x00000010 /* use tsleep(), not callout() */ #define MIIF_HAVEFIBER 0x00000020 /* from parent: has fiber interface */ #define MIIF_HAVE_GTCR 0x00000040 /* has 100base-T2/1000base-T CR */ #define MIIF_IS_1000X 0x00000080 /* is a 1000BASE-X device */ #define MIIF_DOPAUSE 0x00000100 /* advertise PAUSE capability */ #define MIIF_IS_HPNA 0x00000200 /* is a HomePNA device */ #define MIIF_FORCEANEG 0x00000400 /* force auto-negotiation */ #define MIIF_NOMANPAUSE 0x00100000 /* no manual PAUSE selection */ #define MIIF_FORCEPAUSE 0x00200000 /* force PAUSE advertisement */ #define MIIF_MACPRIV0 0x01000000 /* private to the MAC driver */ #define MIIF_MACPRIV1 0x02000000 /* private to the MAC driver */ #define MIIF_MACPRIV2 0x04000000 /* private to the MAC driver */ #define MIIF_PHYPRIV0 0x10000000 /* private to the PHY driver */ #define MIIF_PHYPRIV1 0x20000000 /* private to the PHY driver */ #define MIIF_PHYPRIV2 0x40000000 /* private to the PHY driver */ /* Default mii_anegticks values */ #define MII_ANEGTICKS 5 #define MII_ANEGTICKS_GIGE 17 #define MIIF_INHERIT_MASK (MIIF_NOISOLATE|MIIF_NOLOOP|MIIF_AUTOTSLEEP) /* * Special `locators' passed to mii_attach(). If one of these is not * an `any' value, we look for *that* PHY and configure it. If both * are not `any', that is an error, and mii_attach() will fail. */ #define MII_OFFSET_ANY -1 #define MII_PHY_ANY -1 /* * Used to attach a PHY to a parent. */ struct mii_attach_args { struct mii_data *mii_data; /* pointer to parent data */ u_int mii_phyno; /* MII address */ u_int mii_offset; /* first PHY, second PHY, etc. */ uint32_t mii_id1; /* PHY ID register 1 */ uint32_t mii_id2; /* PHY ID register 2 */ u_int mii_capmask; /* capability mask for BMSR */ }; typedef struct mii_attach_args mii_attach_args_t; /* * Used to match a PHY. */ struct mii_phydesc { uint32_t mpd_oui; /* the PHY's OUI */ uint32_t mpd_model; /* the PHY's model */ const char *mpd_name; /* the PHY's name */ }; #define MII_PHY_DESC(a, b) { MII_OUI_ ## a, MII_MODEL_ ## a ## _ ## b, \ MII_STR_ ## a ## _ ## b } #define MII_PHY_END { 0, 0, NULL } /* * An array of these structures map MII media types to BMCR/ANAR settings. */ struct mii_media { u_int mm_bmcr; /* BMCR settings for this media */ u_int mm_anar; /* ANAR settings for this media */ u_int mm_gtcr; /* 100base-T2 or 1000base-T CR */ }; #define MII_MEDIA_NONE 0 #define MII_MEDIA_10_T 1 #define MII_MEDIA_10_T_FDX 2 #define MII_MEDIA_100_T4 3 #define MII_MEDIA_100_TX 4 #define MII_MEDIA_100_TX_FDX 5 #define MII_MEDIA_1000_X 6 #define MII_MEDIA_1000_X_FDX 7 #define MII_MEDIA_1000_T 8 #define MII_MEDIA_1000_T_FDX 9 #define MII_NMEDIA 10 #ifdef _KERNEL #define PHY_READ(p, r) \ MIIBUS_READREG((p)->mii_dev, (p)->mii_phy, (r)) #define PHY_WRITE(p, r, v) \ MIIBUS_WRITEREG((p)->mii_dev, (p)->mii_phy, (r), (v)) #define PHY_SERVICE(p, d, o) \ (*(p)->mii_funcs->pf_service)((p), (d), (o)) #define PHY_STATUS(p) \ (*(p)->mii_funcs->pf_status)(p) #define PHY_RESET(p) \ (*(p)->mii_funcs->pf_reset)(p) enum miibus_device_ivars { MIIBUS_IVAR_FLAGS }; /* * Simplified accessors for miibus */ #define MIIBUS_ACCESSOR(var, ivar, type) \ __BUS_ACCESSOR(miibus, var, MIIBUS, ivar, type) MIIBUS_ACCESSOR(flags, FLAGS, u_int) extern devclass_t miibus_devclass; extern driver_t miibus_driver; int mii_attach(device_t, device_t *, ifm_change_cb_t, ifm_stat_cb_t, int, int, int, int); int mii_mediachg(struct mii_data *); void mii_tick(struct mii_data *); void mii_pollstat(struct mii_data *); void mii_phy_add_media(struct mii_softc *); int mii_phy_auto(struct mii_softc *); int mii_phy_detach(device_t dev); u_int mii_phy_flowstatus(struct mii_softc *); void mii_phy_reset(struct mii_softc *); void mii_phy_setmedia(struct mii_softc *sc); void mii_phy_update(struct mii_softc *, int); int mii_phy_tick(struct mii_softc *); int mii_phy_mac_match(struct mii_softc *, const char *); int mii_dev_mac_match(device_t, const char *); void *mii_phy_mac_softc(struct mii_softc *); void *mii_dev_mac_softc(device_t); const struct mii_phydesc * mii_phy_match(const struct mii_attach_args *ma, const struct mii_phydesc *mpd); const struct mii_phydesc * mii_phy_match_gen(const struct mii_attach_args *ma, const struct mii_phydesc *mpd, size_t endlen); int mii_phy_dev_probe(device_t dev, const struct mii_phydesc *mpd, int mrv); void mii_phy_dev_attach(device_t dev, u_int flags, const struct mii_phy_funcs *mpf, int add_media); void ukphy_status(struct mii_softc *); u_int mii_oui(u_int, u_int); #define MII_OUI(id1, id2) mii_oui(id1, id2) #define MII_MODEL(id2) (((id2) & IDR2_MODEL) >> 4) #define MII_REV(id2) ((id2) & IDR2_REV) +/* Arguments for miibus_readvar(). */ +enum { + MIIVAR_MTU, +}; + #endif /* _KERNEL */ #endif /* _DEV_MII_MIIVAR_H_ */ Index: projects/ifnet/sys/dev/mii/truephy.c =================================================================== --- projects/ifnet/sys/dev/mii/truephy.c (revision 277242) +++ projects/ifnet/sys/dev/mii/truephy.c (revision 277243) @@ -1,315 +1,315 @@ /*- * Copyright (c) 2007 The DragonFly Project. All rights reserved. * * This code is derived from software contributed to The DragonFly Project * by Sepherosa Ziehau * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name of The DragonFly Project nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific, prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $DragonFly: src/sys/dev/netif/mii_layer/truephy.c,v 1.3 2008/02/10 07:29:27 sephe Exp $ * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include "miidevs.h" #include #include "miibus_if.h" #define TRUEPHY_FRAMELEN(mtu) \ (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + (mtu) + ETHER_CRC_LEN) static int truephy_service(struct mii_softc *, struct mii_data *, int); static int truephy_attach(device_t); static int truephy_probe(device_t); static void truephy_reset(struct mii_softc *); static void truephy_status(struct mii_softc *); static device_method_t truephy_methods[] = { /* device interface */ DEVMETHOD(device_probe, truephy_probe), DEVMETHOD(device_attach, truephy_attach), DEVMETHOD(device_detach, mii_phy_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD_END }; static const struct mii_phydesc truephys[] = { MII_PHY_DESC(AGERE, ET1011), MII_PHY_DESC(AGERE, ET1011C), MII_PHY_END }; static devclass_t truephy_devclass; static driver_t truephy_driver = { "truephy", truephy_methods, sizeof(struct mii_softc) }; DRIVER_MODULE(truephy, miibus, truephy_driver, truephy_devclass, 0, 0); static const struct mii_phy_funcs truephy_funcs = { truephy_service, truephy_status, truephy_reset }; static const struct truephy_dsp { uint16_t index; uint16_t data; } truephy_dspcode[] = { { 0x880b, 0x0926 }, /* AfeIfCreg4B1000Msbs */ { 0x880c, 0x0926 }, /* AfeIfCreg4B100Msbs */ { 0x880d, 0x0926 }, /* AfeIfCreg4B10Msbs */ { 0x880e, 0xb4d3 }, /* AfeIfCreg4B1000Lsbs */ { 0x880f, 0xb4d3 }, /* AfeIfCreg4B100Lsbs */ { 0x8810, 0xb4d3 }, /* AfeIfCreg4B10Lsbs */ { 0x8805, 0xb03e }, /* AfeIfCreg3B1000Msbs */ { 0x8806, 0xb03e }, /* AfeIfCreg3B100Msbs */ { 0x8807, 0xff00 }, /* AfeIfCreg3B10Msbs */ { 0x8808, 0xe090 }, /* AfeIfCreg3B1000Lsbs */ { 0x8809, 0xe110 }, /* AfeIfCreg3B100Lsbs */ { 0x880a, 0x0000 }, /* AfeIfCreg3B10Lsbs */ { 0x300d, 1 }, /* DisableNorm */ { 0x280c, 0x0180 }, /* LinkHoldEnd */ { 0x1c21, 0x0002 }, /* AlphaM */ { 0x3821, 6 }, /* FfeLkgTx0 */ { 0x381d, 1 }, /* FfeLkg1g4 */ { 0x381e, 1 }, /* FfeLkg1g5 */ { 0x381f, 1 }, /* FfeLkg1g6 */ { 0x3820, 1 }, /* FfeLkg1g7 */ { 0x8402, 0x01f0 }, /* Btinact */ { 0x800e, 20 }, /* LftrainTime */ { 0x800f, 24 }, /* DvguardTime */ { 0x8010, 46 } /* IdlguardTime */ }; static int truephy_probe(device_t dev) { return (mii_phy_dev_probe(dev, truephys, BUS_PROBE_DEFAULT)); } static int truephy_attach(device_t dev) { struct mii_softc *sc; sc = device_get_softc(dev); mii_phy_dev_attach(dev, MIIF_NOISOLATE | MIIF_NOMANPAUSE, &truephy_funcs, 0); PHY_RESET(sc); sc->mii_capabilities = PHY_READ(sc, MII_BMSR) & sc->mii_capmask; if (sc->mii_capabilities & BMSR_EXTSTAT) { sc->mii_extcapabilities = PHY_READ(sc, MII_EXTSR); /* No 1000baseT half-duplex support */ sc->mii_extcapabilities &= ~EXTSR_1000THDX; } device_printf(dev, " "); mii_phy_add_media(sc); printf("\n"); MIIBUS_MEDIAINIT(sc->mii_dev); return (0); } static int truephy_service(struct mii_softc *sc, struct mii_data *mii, int cmd) { struct ifmedia_entry *ife = mii->mii_media.ifm_cur; int bmcr; switch (cmd) { case MII_POLLSTAT: break; case MII_MEDIACHG: if (IFM_SUBTYPE(ife->ifm_media) != IFM_AUTO) { bmcr = PHY_READ(sc, MII_BMCR) & ~BMCR_AUTOEN; PHY_WRITE(sc, MII_BMCR, bmcr); PHY_WRITE(sc, MII_BMCR, bmcr | BMCR_PDOWN); } mii_phy_setmedia(sc); if (IFM_SUBTYPE(ife->ifm_media) != IFM_AUTO) { bmcr = PHY_READ(sc, MII_BMCR) & ~BMCR_PDOWN; PHY_WRITE(sc, MII_BMCR, bmcr); if (IFM_SUBTYPE(ife->ifm_media) == IFM_1000_T) { PHY_WRITE(sc, MII_BMCR, bmcr | BMCR_AUTOEN | BMCR_STARTNEG); } } break; case MII_TICK: if (mii_phy_tick(sc) == EJUSTRETURN) return (0); break; } /* Update the media status. */ PHY_STATUS(sc); /* Callback if something changed. */ mii_phy_update(sc, cmd); return (0); } static void truephy_reset(struct mii_softc *sc) { int i; if (sc->mii_mpd_model == MII_MODEL_AGERE_ET1011) { mii_phy_reset(sc); return; } for (i = 0; i < 2; ++i) { PHY_READ(sc, MII_PHYIDR1); PHY_READ(sc, MII_PHYIDR2); PHY_READ(sc, TRUEPHY_CTRL); PHY_WRITE(sc, TRUEPHY_CTRL, TRUEPHY_CTRL_DIAG | TRUEPHY_CTRL_RSV1); PHY_WRITE(sc, TRUEPHY_INDEX, TRUEPHY_INDEX_MAGIC); PHY_READ(sc, TRUEPHY_DATA); PHY_WRITE(sc, TRUEPHY_CTRL, TRUEPHY_CTRL_RSV1); } PHY_READ(sc, MII_BMCR); PHY_READ(sc, TRUEPHY_CTRL); PHY_WRITE(sc, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN | BMCR_S1000); PHY_WRITE(sc, TRUEPHY_CTRL, TRUEPHY_CTRL_DIAG | TRUEPHY_CTRL_RSV1 | TRUEPHY_CTRL_RSV0); for (i = 0; i < nitems(truephy_dspcode); ++i) { const struct truephy_dsp *dsp = &truephy_dspcode[i]; PHY_WRITE(sc, TRUEPHY_INDEX, dsp->index); PHY_WRITE(sc, TRUEPHY_DATA, dsp->data); PHY_WRITE(sc, TRUEPHY_INDEX, dsp->index); PHY_READ(sc, TRUEPHY_DATA); } PHY_READ(sc, MII_BMCR); PHY_READ(sc, TRUEPHY_CTRL); PHY_WRITE(sc, MII_BMCR, BMCR_AUTOEN | BMCR_S1000); PHY_WRITE(sc, TRUEPHY_CTRL, TRUEPHY_CTRL_RSV1); mii_phy_reset(sc); - if (TRUEPHY_FRAMELEN((MIIBUS_READVAR(sc->mii_dev, IF_MTU)) > 2048)) { + if (TRUEPHY_FRAMELEN((MIIBUS_READVAR(sc->mii_dev, MIIVAR_MTU)) > 2048)) { int conf; conf = PHY_READ(sc, TRUEPHY_CONF); conf &= ~TRUEPHY_CONF_TXFIFO_MASK; conf |= TRUEPHY_CONF_TXFIFO_24; PHY_WRITE(sc, TRUEPHY_CONF, conf); } } static void truephy_status(struct mii_softc *sc) { struct mii_data *mii = sc->mii_pdata; int bmsr, bmcr, sr; mii->mii_media_status = IFM_AVALID; mii->mii_media_active = IFM_ETHER; sr = PHY_READ(sc, TRUEPHY_SR); bmcr = PHY_READ(sc, MII_BMCR); bmsr = PHY_READ(sc, MII_BMSR) | PHY_READ(sc, MII_BMSR); if (bmsr & BMSR_LINK) mii->mii_media_status |= IFM_ACTIVE; if (bmcr & BMCR_AUTOEN) { if ((bmsr & BMSR_ACOMP) == 0) { mii->mii_media_active |= IFM_NONE; return; } } switch (sr & TRUEPHY_SR_SPD_MASK) { case TRUEPHY_SR_SPD_1000T: mii->mii_media_active |= IFM_1000_T; break; case TRUEPHY_SR_SPD_100TX: mii->mii_media_active |= IFM_100_TX; break; case TRUEPHY_SR_SPD_10T: mii->mii_media_active |= IFM_10_T; break; default: /* XXX will this ever happen? */ printf("invalid media SR %#x\n", sr); mii->mii_media_active |= IFM_NONE; return; } if (sr & TRUEPHY_SR_FDX) mii->mii_media_active |= IFM_FDX | mii_phy_flowstatus(sc); else mii->mii_media_active |= IFM_HDX; } Index: projects/ifnet/sys/dev/msk/if_msk.c =================================================================== --- projects/ifnet/sys/dev/msk/if_msk.c (revision 277242) +++ projects/ifnet/sys/dev/msk/if_msk.c (revision 277243) @@ -1,4596 +1,4559 @@ /****************************************************************************** * * Name : sky2.c * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x * Version: $Revision: 1.23 $ * Date : $Date: 2005/12/22 09:04:11 $ * Purpose: Main driver source file * *****************************************************************************/ /****************************************************************************** * * LICENSE: * Copyright (C) Marvell International Ltd. and/or its affiliates * * The computer program files contained in this folder ("Files") * are provided to you under the BSD-type license terms provided * below, and any use of such Files and any derivative works * thereof created by you shall be governed by the following terms * and conditions: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * - Neither the name of Marvell nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * /LICENSE * *****************************************************************************/ /*- * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 2003 Nathan L. Binkert * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * Device driver for the Marvell Yukon II Ethernet controller. * Due to lack of documentation, this driver is based on the code from * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(msk, pci, 1, 1, 1); MODULE_DEPEND(msk, ether, 1, 1, 1); MODULE_DEPEND(msk, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* Tunables. */ static int msi_disable = 0; TUNABLE_INT("hw.msk.msi_disable", &msi_disable); static int legacy_intr = 0; TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr); static int jumbo_disable = 0; TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable); #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) +#define MSK_DEFAULT_FRAMESIZE \ + (ETHERMTU + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) /* * Devices supported by this driver. */ static const struct msk_product { uint16_t msk_vendorid; uint16_t msk_deviceid; const char *msk_name; } msk_products[] = { { VENDORID_SK, DEVICEID_SK_YUKON2, "SK-9Sxx Gigabit Ethernet" }, { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR, "SK-9Exx Gigabit Ethernet"}, { VENDORID_MARVELL, DEVICEID_MRVL_8021CU, "Marvell Yukon 88E8021CU Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_8021X, "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_8022CU, "Marvell Yukon 88E8022CU Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_8022X, "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_8061CU, "Marvell Yukon 88E8061CU Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_8061X, "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_8062CU, "Marvell Yukon 88E8062CU Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_8062X, "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_8035, "Marvell Yukon 88E8035 Fast Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_8036, "Marvell Yukon 88E8036 Fast Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_8038, "Marvell Yukon 88E8038 Fast Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_8039, "Marvell Yukon 88E8039 Fast Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_8040, "Marvell Yukon 88E8040 Fast Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_8040T, "Marvell Yukon 88E8040T Fast Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_8042, "Marvell Yukon 88E8042 Fast Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_8048, "Marvell Yukon 88E8048 Fast Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_4361, "Marvell Yukon 88E8050 Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_4360, "Marvell Yukon 88E8052 Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_4362, "Marvell Yukon 88E8053 Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_4363, "Marvell Yukon 88E8055 Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_4364, "Marvell Yukon 88E8056 Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_4365, "Marvell Yukon 88E8070 Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_436A, "Marvell Yukon 88E8058 Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_436B, "Marvell Yukon 88E8071 Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_436C, "Marvell Yukon 88E8072 Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_436D, "Marvell Yukon 88E8055 Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_4370, "Marvell Yukon 88E8075 Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_4380, "Marvell Yukon 88E8057 Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_MRVL_4381, "Marvell Yukon 88E8059 Gigabit Ethernet" }, { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX, "D-Link 550SX Gigabit Ethernet" }, { VENDORID_DLINK, DEVICEID_DLINK_DGE560SX, "D-Link 560SX Gigabit Ethernet" }, { VENDORID_DLINK, DEVICEID_DLINK_DGE560T, "D-Link 560T Gigabit Ethernet" } }; static const char *model_name[] = { "Yukon XL", "Yukon EC Ultra", "Yukon EX", "Yukon EC", "Yukon FE", "Yukon FE+", "Yukon Supreme", "Yukon Ultra 2", "Yukon Unknown", "Yukon Optima", }; static int mskc_probe(device_t); static int mskc_attach(device_t); static int mskc_detach(device_t); static int mskc_shutdown(device_t); static int mskc_setup_rambuffer(struct msk_softc *); static int mskc_suspend(device_t); static int mskc_resume(device_t); static bus_dma_tag_t mskc_get_dma_tag(device_t, device_t); static void mskc_reset(struct msk_softc *); static int msk_probe(device_t); static int msk_attach(device_t); static int msk_detach(device_t); static void msk_tick(void *); static void msk_intr(void *); static void msk_intr_phy(struct msk_if_softc *); static void msk_intr_gmac(struct msk_if_softc *); static __inline void msk_rxput(struct msk_if_softc *); static int msk_handle_events(struct msk_softc *); static void msk_handle_hwerr(struct msk_if_softc *, uint32_t); static void msk_intr_hwerr(struct msk_softc *); #ifndef __NO_STRICT_ALIGNMENT static __inline void msk_fixup_rx(struct mbuf *); #endif static __inline void msk_rxcsum(struct msk_if_softc *, uint32_t, struct mbuf *); static void msk_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int); static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int); static void msk_txeof(struct msk_if_softc *, int); static int msk_encap(struct msk_if_softc *, struct mbuf **); static int msk_transmit(if_t, struct mbuf *); static int msk_start(struct msk_if_softc *); static int msk_ioctl(if_t, u_long, void *, struct thread *); static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t); static void msk_set_rambuffer(struct msk_if_softc *); static void msk_set_tx_stfwd(struct msk_if_softc *); static void msk_init(void *); static void msk_init_locked(struct msk_if_softc *); static void msk_stop(struct msk_if_softc *); static void msk_watchdog(struct msk_if_softc *); static int msk_mediachange(if_t); static void msk_mediastatus(if_t, struct ifmediareq *); static void msk_phy_power(struct msk_softc *, int); static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int); static int msk_status_dma_alloc(struct msk_softc *); static void msk_status_dma_free(struct msk_softc *); static int msk_txrx_dma_alloc(struct msk_if_softc *); static int msk_rx_dma_jalloc(struct msk_if_softc *); static void msk_txrx_dma_free(struct msk_if_softc *); static void msk_rx_dma_jfree(struct msk_if_softc *); static int msk_rx_fill(struct msk_if_softc *, int); static int msk_init_rx_ring(struct msk_if_softc *); static int msk_init_jumbo_rx_ring(struct msk_if_softc *); static void msk_init_tx_ring(struct msk_if_softc *); static __inline void msk_discard_rxbuf(struct msk_if_softc *, int); static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int); static int msk_newbuf(struct msk_if_softc *, int); static int msk_jumbo_newbuf(struct msk_if_softc *, int); static int msk_phy_readreg(struct msk_if_softc *, int, int); static int msk_phy_writereg(struct msk_if_softc *, int, int, int); static int msk_miibus_readreg(device_t, int, int); static int msk_miibus_writereg(device_t, int, int, int); static void msk_miibus_statchg(device_t); static void msk_rxfilter(struct msk_if_softc *); -static void msk_setvlan(struct msk_if_softc *, if_t); +static void msk_setvlan(struct msk_if_softc *); static void msk_stats_clear(struct msk_if_softc *); static void msk_stats_update(struct msk_if_softc *); static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS); static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS); static void msk_sysctl_node(struct msk_if_softc *); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS); static device_method_t mskc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mskc_probe), DEVMETHOD(device_attach, mskc_attach), DEVMETHOD(device_detach, mskc_detach), DEVMETHOD(device_suspend, mskc_suspend), DEVMETHOD(device_resume, mskc_resume), DEVMETHOD(device_shutdown, mskc_shutdown), DEVMETHOD(bus_get_dma_tag, mskc_get_dma_tag), DEVMETHOD_END }; static driver_t mskc_driver = { "mskc", mskc_methods, sizeof(struct msk_softc) }; static devclass_t mskc_devclass; static device_method_t msk_methods[] = { /* Device interface */ DEVMETHOD(device_probe, msk_probe), DEVMETHOD(device_attach, msk_attach), DEVMETHOD(device_detach, msk_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* MII interface */ DEVMETHOD(miibus_readreg, msk_miibus_readreg), DEVMETHOD(miibus_writereg, msk_miibus_writereg), DEVMETHOD(miibus_statchg, msk_miibus_statchg), DEVMETHOD_END }; static driver_t msk_driver = { "msk", msk_methods, sizeof(struct msk_if_softc) }; static devclass_t msk_devclass; DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, NULL, NULL); DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, NULL, NULL); DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, NULL, NULL); static struct resource_spec msk_res_spec_io[] = { { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE }, { -1, 0, 0 } }; static struct resource_spec msk_res_spec_mem[] = { { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, { -1, 0, 0 } }; static struct resource_spec msk_irq_spec_legacy[] = { { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; static struct resource_spec msk_irq_spec_msi[] = { { SYS_RES_IRQ, 1, RF_ACTIVE }, { -1, 0, 0 } }; static struct ifdriver msk_ifdrv = { .ifdrv_ops = { .ifop_origin = IFOP_ORIGIN_DRIVER, .ifop_ioctl = msk_ioctl, .ifop_init = msk_init, .ifop_transmit = msk_transmit, }, .ifdrv_name = "msk", .ifdrv_type = IFT_ETHER, .ifdrv_hdrlen = sizeof(struct ether_vlan_header), .ifdrv_maxqlen = MSK_TX_RING_CNT - 1, }; static int msk_miibus_readreg(device_t dev, int phy, int reg) { struct msk_if_softc *sc_if; sc_if = device_get_softc(dev); return (msk_phy_readreg(sc_if, phy, reg)); } static int msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg) { struct msk_softc *sc; int i, val; sc = sc_if->msk_softc; GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); for (i = 0; i < MSK_TIMEOUT; i++) { DELAY(1); val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL); if ((val & GM_SMI_CT_RD_VAL) != 0) { val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA); break; } } if (i == MSK_TIMEOUT) { if_printf(sc_if->msk_ifp, "phy failed to come ready\n"); val = 0; } return (val); } static int msk_miibus_writereg(device_t dev, int phy, int reg, int val) { struct msk_if_softc *sc_if; sc_if = device_get_softc(dev); return (msk_phy_writereg(sc_if, phy, reg, val)); } static int msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val) { struct msk_softc *sc; int i; sc = sc_if->msk_softc; GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val); GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg)); for (i = 0; i < MSK_TIMEOUT; i++) { DELAY(1); if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) & GM_SMI_CT_BUSY) == 0) break; } if (i == MSK_TIMEOUT) if_printf(sc_if->msk_ifp, "phy write timeout\n"); return (0); } static void msk_miibus_statchg(device_t dev) { struct msk_softc *sc; struct msk_if_softc *sc_if; struct mii_data *mii; if_t ifp; uint32_t gmac; sc_if = device_get_softc(dev); sc = sc_if->msk_softc; MSK_IF_LOCK_ASSERT(sc_if); mii = device_get_softc(sc_if->msk_miibus); ifp = sc_if->msk_ifp; if (mii == NULL || ifp == NULL || (sc_if->msk_flags & MSK_FLAG_RUNNING) == 0) return; sc_if->msk_flags &= ~MSK_FLAG_LINK; if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == (IFM_AVALID | IFM_ACTIVE)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc_if->msk_flags |= MSK_FLAG_LINK; break; case IFM_1000_T: case IFM_1000_SX: case IFM_1000_LX: case IFM_1000_CX: if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0) sc_if->msk_flags |= MSK_FLAG_LINK; break; default: break; } } if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) { /* Enable Tx FIFO Underrun. */ CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), GM_IS_TX_FF_UR | GM_IS_RX_FF_OR); /* * Because mii(4) notify msk(4) that it detected link status * change, there is no need to enable automatic * speed/flow-control/duplex updates. */ gmac = GM_GPCR_AU_ALL_DIS; switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_1000_SX: case IFM_1000_T: gmac |= GM_GPCR_SPEED_1000; break; case IFM_100_TX: gmac |= GM_GPCR_SPEED_100; break; case IFM_10_T: break; } if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) == 0) gmac |= GM_GPCR_FC_RX_DIS; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) == 0) gmac |= GM_GPCR_FC_TX_DIS; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) gmac |= GM_GPCR_DUP_FULL; else gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS; gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); /* Read again to ensure writing. */ GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); gmac = GMC_PAUSE_OFF; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) gmac = GMC_PAUSE_ON; } CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac); /* Enable PHY interrupt for FIFO underrun/overflow. */ msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR); } else { /* * Link state changed to down. * Disable PHY interrupts. */ msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); /* Disable Rx/Tx MAC. */ gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); if ((gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) != 0) { gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); /* Read again to ensure writing. */ GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); } } if_set(ifp, IF_BAUDRATE, ifmedia_baudrate(mii->mii_media_active)); if_link_state_change(ifp, ifmedia_link_state(mii->mii_media_status)); } static void msk_hash_maddr(void *arg, struct sockaddr *maddr) { struct sockaddr_dl *sdl = (struct sockaddr_dl *)maddr; uint32_t *mchash, crc; if (sdl->sdl_family != AF_LINK) return; mchash = arg; crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN); /* Just want the 6 least significant bits. */ crc &= 0x3f; /* Set the corresponding bit in the hash table. */ mchash[crc >> 5] |= 1 << (crc & 0x1f); } static void msk_rxfilter(struct msk_if_softc *sc_if) { struct msk_softc *sc; if_t ifp; uint32_t mchash[2], flags; uint16_t mode; sc = sc_if->msk_softc; MSK_IF_LOCK_ASSERT(sc_if); ifp = sc_if->msk_ifp; bzero(mchash, sizeof(mchash)); mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); flags = if_get(ifp, IF_FLAGS); if ((flags & IFF_PROMISC) != 0) mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); else if ((flags & IFF_ALLMULTI) != 0) { mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA; mchash[0] = 0xffff; mchash[1] = 0xffff; } else { mode |= GM_RXCR_UCF_ENA; if_foreach_maddr(ifp, msk_hash_maddr, mchash); if (mchash[0] != 0 || mchash[1] != 0) mode |= GM_RXCR_MCF_ENA; } GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1, mchash[0] & 0xffff); GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2, (mchash[0] >> 16) & 0xffff); GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3, mchash[1] & 0xffff); GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4, (mchash[1] >> 16) & 0xffff); GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); } static void -msk_setvlan(struct msk_if_softc *sc_if, if_t ifp) +msk_setvlan(struct msk_if_softc *sc_if) { struct msk_softc *sc; sc = sc_if->msk_softc; - if (if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWTAGGING) { + if (sc_if->msk_capenable & IFCAP_VLAN_HWTAGGING) { CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON); CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON); } else { CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); } } static int msk_rx_fill(struct msk_if_softc *sc_if, int jumbo) { uint16_t idx; int i; if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 && - (if_get(sc_if->msk_ifp, IF_CAPENABLE) & IFCAP_RXCSUM) != 0) { + (sc_if->msk_capenable & IFCAP_RXCSUM) != 0) { /* Wait until controller executes OP_TCPSTART command. */ for (i = 100; i > 0; i--) { DELAY(100); idx = CSR_READ_2(sc_if->msk_softc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_GET_IDX_REG)); if (idx != 0) break; } if (i == 0) { device_printf(sc_if->msk_if_dev, "prefetch unit stuck?\n"); return (ETIMEDOUT); } /* * Fill consumed LE with free buffer. This can be done * in Rx handler but we don't want to add special code * in fast handler. */ if (jumbo > 0) { if (msk_jumbo_newbuf(sc_if, 0) != 0) return (ENOBUFS); bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, sc_if->msk_cdata.msk_jumbo_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } else { if (msk_newbuf(sc_if, 0) != 0) return (ENOBUFS); bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag, sc_if->msk_cdata.msk_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } sc_if->msk_cdata.msk_rx_prod = 0; CSR_WRITE_2(sc_if->msk_softc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod); } return (0); } static int msk_init_rx_ring(struct msk_if_softc *sc_if) { struct msk_ring_data *rd; struct msk_rxdesc *rxd; int i, nbuf, prod; MSK_IF_LOCK_ASSERT(sc_if); sc_if->msk_cdata.msk_rx_cons = 0; sc_if->msk_cdata.msk_rx_prod = 0; sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; rd = &sc_if->msk_rdata; bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); for (i = prod = 0; i < MSK_RX_RING_CNT; i++) { rxd = &sc_if->msk_cdata.msk_rxdesc[prod]; rxd->rx_m = NULL; rxd->rx_le = &rd->msk_rx_ring[prod]; MSK_INC(prod, MSK_RX_RING_CNT); } nbuf = MSK_RX_BUF_CNT; prod = 0; /* Have controller know how to compute Rx checksum. */ if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 && - (if_get(sc_if->msk_ifp, IF_CAPENABLE) & IFCAP_RXCSUM)) { + (sc_if->msk_capenable & IFCAP_RXCSUM)) { #ifdef MSK_64BIT_DMA rxd = &sc_if->msk_cdata.msk_rxdesc[prod]; rxd->rx_m = NULL; rxd->rx_le = &rd->msk_rx_ring[prod]; rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 | ETHER_HDR_LEN); rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER); MSK_INC(prod, MSK_RX_RING_CNT); MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT); #endif rxd = &sc_if->msk_cdata.msk_rxdesc[prod]; rxd->rx_m = NULL; rxd->rx_le = &rd->msk_rx_ring[prod]; rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 | ETHER_HDR_LEN); rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER); MSK_INC(prod, MSK_RX_RING_CNT); MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT); nbuf--; } for (i = 0; i < nbuf; i++) { if (msk_newbuf(sc_if, prod) != 0) return (ENOBUFS); MSK_RX_INC(prod, MSK_RX_RING_CNT); } bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag, sc_if->msk_cdata.msk_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Update prefetch unit. */ sc_if->msk_cdata.msk_rx_prod = prod; CSR_WRITE_2(sc_if->msk_softc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), (sc_if->msk_cdata.msk_rx_prod + MSK_RX_RING_CNT - 1) % MSK_RX_RING_CNT); if (msk_rx_fill(sc_if, 0) != 0) return (ENOBUFS); return (0); } static int msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if) { struct msk_ring_data *rd; struct msk_rxdesc *rxd; int i, nbuf, prod; MSK_IF_LOCK_ASSERT(sc_if); sc_if->msk_cdata.msk_rx_cons = 0; sc_if->msk_cdata.msk_rx_prod = 0; sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; rd = &sc_if->msk_rdata; bzero(rd->msk_jumbo_rx_ring, sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT); for (i = prod = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod]; rxd->rx_m = NULL; rxd->rx_le = &rd->msk_jumbo_rx_ring[prod]; MSK_INC(prod, MSK_JUMBO_RX_RING_CNT); } nbuf = MSK_RX_BUF_CNT; prod = 0; /* Have controller know how to compute Rx checksum. */ if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 && - (if_get(sc_if->msk_ifp, IF_CAPENABLE) & IFCAP_RXCSUM) != 0) { + (sc_if->msk_capenable & IFCAP_RXCSUM) != 0) { #ifdef MSK_64BIT_DMA rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod]; rxd->rx_m = NULL; rxd->rx_le = &rd->msk_jumbo_rx_ring[prod]; rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 | ETHER_HDR_LEN); rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER); MSK_INC(prod, MSK_JUMBO_RX_RING_CNT); MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT); #endif rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod]; rxd->rx_m = NULL; rxd->rx_le = &rd->msk_jumbo_rx_ring[prod]; rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 | ETHER_HDR_LEN); rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER); MSK_INC(prod, MSK_JUMBO_RX_RING_CNT); MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT); nbuf--; } for (i = 0; i < nbuf; i++) { if (msk_jumbo_newbuf(sc_if, prod) != 0) return (ENOBUFS); MSK_RX_INC(prod, MSK_JUMBO_RX_RING_CNT); } bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, sc_if->msk_cdata.msk_jumbo_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Update prefetch unit. */ sc_if->msk_cdata.msk_rx_prod = prod; CSR_WRITE_2(sc_if->msk_softc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), (sc_if->msk_cdata.msk_rx_prod + MSK_JUMBO_RX_RING_CNT - 1) % MSK_JUMBO_RX_RING_CNT); if (msk_rx_fill(sc_if, 1) != 0) return (ENOBUFS); return (0); } static void msk_init_tx_ring(struct msk_if_softc *sc_if) { struct msk_ring_data *rd; struct msk_txdesc *txd; int i; sc_if->msk_cdata.msk_tso_mtu = 0; sc_if->msk_cdata.msk_last_csum = 0; sc_if->msk_cdata.msk_tx_prod = 0; sc_if->msk_cdata.msk_tx_cons = 0; sc_if->msk_cdata.msk_tx_cnt = 0; sc_if->msk_cdata.msk_tx_high_addr = 0; rd = &sc_if->msk_rdata; bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); for (i = 0; i < MSK_TX_RING_CNT; i++) { txd = &sc_if->msk_cdata.msk_txdesc[i]; txd->tx_m = NULL; txd->tx_le = &rd->msk_tx_ring[i]; } bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, sc_if->msk_cdata.msk_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static __inline void msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx) { struct msk_rx_desc *rx_le; struct msk_rxdesc *rxd; struct mbuf *m; #ifdef MSK_64BIT_DMA rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; rx_le = rxd->rx_le; rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER); MSK_INC(idx, MSK_RX_RING_CNT); #endif rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; m = rxd->rx_m; rx_le = rxd->rx_le; rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); } static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx) { struct msk_rx_desc *rx_le; struct msk_rxdesc *rxd; struct mbuf *m; #ifdef MSK_64BIT_DMA rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; rx_le = rxd->rx_le; rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER); MSK_INC(idx, MSK_JUMBO_RX_RING_CNT); #endif rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; m = rxd->rx_m; rx_le = rxd->rx_le; rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); } static int msk_newbuf(struct msk_if_softc *sc_if, int idx) { struct msk_rx_desc *rx_le; struct msk_rxdesc *rxd; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) m_adj(m, ETHER_ALIGN); #ifndef __NO_STRICT_ALIGNMENT else m_adj(m, MSK_RX_BUF_ALIGN); #endif if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag, sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; #ifdef MSK_64BIT_DMA rx_le = rxd->rx_le; rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr)); rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER); MSK_INC(idx, MSK_RX_RING_CNT); rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; #endif if (rxd->rx_m != NULL) { bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap); rxd->rx_m = NULL; } map = rxd->rx_dmamap; rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap; sc_if->msk_cdata.msk_rx_sparemap = map; bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); rxd->rx_m = m; rx_le = rxd->rx_le; rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); rx_le->msk_control = htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); return (0); } static int msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx) { struct msk_rx_desc *rx_le; struct msk_rxdesc *rxd; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MJUM9BYTES; if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) m_adj(m, ETHER_ALIGN); #ifndef __NO_STRICT_ALIGNMENT else m_adj(m, MSK_RX_BUF_ALIGN); #endif if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag, sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; #ifdef MSK_64BIT_DMA rx_le = rxd->rx_le; rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr)); rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER); MSK_INC(idx, MSK_JUMBO_RX_RING_CNT); rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; #endif if (rxd->rx_m != NULL) { bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap); rxd->rx_m = NULL; } map = rxd->rx_dmamap; rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap; sc_if->msk_cdata.msk_jumbo_rx_sparemap = map; bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); rxd->rx_m = m; rx_le = rxd->rx_le; rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); rx_le->msk_control = htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); return (0); } /* * Set media options. */ static int msk_mediachange(if_t ifp) { struct msk_if_softc *sc_if; struct mii_data *mii; int error; sc_if = if_getsoftc(ifp, IF_DRIVER_SOFTC); MSK_IF_LOCK(sc_if); mii = device_get_softc(sc_if->msk_miibus); error = mii_mediachg(mii); MSK_IF_UNLOCK(sc_if); return (error); } /* * Report current media status. */ static void msk_mediastatus(if_t ifp, struct ifmediareq *ifmr) { struct msk_if_softc *sc_if; struct mii_data *mii; if ((if_get(ifp, IF_FLAGS) & IFF_UP) == 0) return; sc_if = if_getsoftc(ifp, IF_DRIVER_SOFTC); MSK_IF_LOCK(sc_if); sc_if = if_getsoftc(ifp, IF_DRIVER_SOFTC); mii = device_get_softc(sc_if->msk_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; MSK_IF_UNLOCK(sc_if); } static int -msk_ioctl(if_t ifp, u_long command, void *data, struct thread *td) +msk_ioctl(if_t ifp, u_long command, void *data, struct thread *td) { struct msk_if_softc *sc_if; struct ifreq *ifr; struct mii_data *mii; int error, reinit, setvlan; - uint32_t flags, capenable, capabilities, mask; - uint64_t hwassist; + uint32_t flags, mask; sc_if = if_getsoftc(ifp, IF_DRIVER_SOFTC); ifr = (struct ifreq *)data; error = 0; switch(command) { case SIOCSIFMTU: if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) { error = EINVAL; break; } - if (if_get(ifp, IF_MTU) == ifr->ifr_mtu) - break; - MSK_IF_LOCK(sc_if); if (ifr->ifr_mtu > ETHERMTU) { if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) { error = EINVAL; MSK_IF_UNLOCK(sc_if); break; } - if ((sc_if->msk_flags & - MSK_FLAG_JUMBO_NOCSUM) != 0) { - if_clrflags(ifp, IF_HWASSIST, - MSK_CSUM_FEATURES | CSUM_TSO); - if_clrflags(ifp, IF_CAPENABLE, - IFCAP_TSO4 | IFCAP_TXCSUM); + if ((sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) { + struct ifreq tmp; + + MSK_IF_UNLOCK(sc_if); + if_drvioctl(SIOCGIFCAP, ifp, &tmp, td); + tmp.ifr_reqcap = tmp.ifr_curcap & + ~(MSK_CSUM_FEATURES | CSUM_TSO); + if_drvioctl(SIOCSIFCAP, ifp, &tmp, td); + MSK_IF_LOCK(sc_if); } } - if_set(ifp, IF_MTU, ifr->ifr_mtu); + sc_if->msk_framesize = ifr->ifr_mtu + ETHER_HDR_LEN + + ETHER_VLAN_ENCAP_LEN; if ((sc_if->msk_flags & MSK_FLAG_RUNNING) != 0) { sc_if->msk_flags &= ~MSK_FLAG_RUNNING; msk_init_locked(sc_if); } MSK_IF_UNLOCK(sc_if); break; case SIOCSIFFLAGS: MSK_IF_LOCK(sc_if); flags = if_get(ifp, IF_FLAGS); if ((flags & IFF_UP) != 0) { if ((sc_if->msk_flags & MSK_FLAG_RUNNING) != 0 && ((flags ^ sc_if->msk_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) msk_rxfilter(sc_if); else if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0) msk_init_locked(sc_if); } else if ((sc_if->msk_flags & MSK_FLAG_RUNNING) != 0) msk_stop(sc_if); sc_if->msk_if_flags = flags; MSK_IF_UNLOCK(sc_if); break; case SIOCADDMULTI: case SIOCDELMULTI: MSK_IF_LOCK(sc_if); if ((sc_if->msk_flags & MSK_FLAG_RUNNING) != 0) msk_rxfilter(sc_if); MSK_IF_UNLOCK(sc_if); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc_if->msk_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: reinit = 0; setvlan = 0; - MSK_IF_LOCK(sc_if); - capenable = if_get(ifp, IF_CAPENABLE); - capabilities = if_get(ifp, IF_CAPABILITIES); - hwassist = if_get(ifp, IF_HWASSIST); - mask = ifr->ifr_reqcap ^ capenable; - if ((mask & IFCAP_TXCSUM) != 0 && - (IFCAP_TXCSUM & capabilities) != 0) { - capenable ^= IFCAP_TXCSUM; - if ((IFCAP_TXCSUM & capenable) != 0) - hwassist |= MSK_CSUM_FEATURES; - else - hwassist &= ~MSK_CSUM_FEATURES; - } + ifr->ifr_hwassist = 0; + mask = ifr->ifr_reqcap ^ ifr->ifr_curcap; + if ((IFCAP_TXCSUM & ifr->ifr_reqcap) != 0) + ifr->ifr_hwassist |= MSK_CSUM_FEATURES; if ((mask & IFCAP_RXCSUM) != 0 && - (IFCAP_RXCSUM & capabilities) != 0) { - capenable ^= IFCAP_RXCSUM; - if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0) + (sc_if->msk_flags & MSK_FLAG_DESCV2) == 0) reinit = 1; - } - if ((mask & IFCAP_VLAN_HWCSUM) != 0 && - (IFCAP_VLAN_HWCSUM & capabilities) != 0) - capenable ^= IFCAP_VLAN_HWCSUM; - if ((mask & IFCAP_TSO4) != 0 && - (IFCAP_TSO4 & capabilities) != 0) { - capenable ^= IFCAP_TSO4; - if ((IFCAP_TSO4 & capenable) != 0) - hwassist |= CSUM_TSO; - else - hwassist &= ~CSUM_TSO; - } - if ((mask & IFCAP_VLAN_HWTSO) != 0 && - (IFCAP_VLAN_HWTSO & capabilities) != 0) - capenable ^= IFCAP_VLAN_HWTSO; - if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && - (IFCAP_VLAN_HWTAGGING & capabilities) != 0) { - capenable ^= IFCAP_VLAN_HWTAGGING; - if ((IFCAP_VLAN_HWTAGGING & capenable) == 0) - capenable &= - ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM); + if ((IFCAP_TSO4 & ifr->ifr_reqcap) != 0) + ifr->ifr_hwassist |= CSUM_TSO; + if ((mask & IFCAP_VLAN_HWTAGGING) != 0) setvlan = 1; - } - if (if_get(ifp, IF_MTU) > ETHERMTU && + if (sc_if->msk_framesize > MSK_DEFAULT_FRAMESIZE && (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) { - hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO); - capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM); + ifr->ifr_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO); + ifr->ifr_reqcap &= ~(IFCAP_TSO4 | IFCAP_TXCSUM); } - if_set(ifp, IF_HWASSIST, hwassist); - if_set(ifp, IF_CAPENABLE, capenable); + MSK_IF_LOCK(sc_if); + sc_if->msk_capenable = ifr->ifr_reqcap; if (setvlan) - msk_setvlan(sc_if, ifp); - if (reinit > 0 && (sc_if->msk_flags & MSK_FLAG_RUNNING) != 0) { + msk_setvlan(sc_if); + if (reinit && (sc_if->msk_flags & MSK_FLAG_RUNNING) != 0) { sc_if->msk_flags &= ~MSK_FLAG_RUNNING; msk_init_locked(sc_if); } MSK_IF_UNLOCK(sc_if); break; default: error = EOPNOTSUPP; break; } return (error); } static int mskc_probe(device_t dev) { const struct msk_product *mp; uint16_t vendor, devid; int i; vendor = pci_get_vendor(dev); devid = pci_get_device(dev); mp = msk_products; for (i = 0; i < nitems(msk_products); i++, mp++) { if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) { device_set_desc(dev, mp->msk_name); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int mskc_setup_rambuffer(struct msk_softc *sc) { int next; int i; /* Get adapter SRAM size. */ sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4; if (bootverbose) device_printf(sc->msk_dev, "RAM buffer size : %dKB\n", sc->msk_ramsize); if (sc->msk_ramsize == 0) return (0); sc->msk_pflags |= MSK_FLAG_RAMBUF; /* * Give receiver 2/3 of memory and round down to the multiple * of 1024. Tx/Rx RAM buffer size of Yukon II should be multiple * of 1024. */ sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024); sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize; for (i = 0, next = 0; i < sc->msk_num_port; i++) { sc->msk_rxqstart[i] = next; sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1; next = sc->msk_rxqend[i] + 1; sc->msk_txqstart[i] = next; sc->msk_txqend[i] = next + sc->msk_txqsize - 1; next = sc->msk_txqend[i] + 1; if (bootverbose) { device_printf(sc->msk_dev, "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i, sc->msk_rxqsize / 1024, sc->msk_rxqstart[i], sc->msk_rxqend[i]); device_printf(sc->msk_dev, "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i, sc->msk_txqsize / 1024, sc->msk_txqstart[i], sc->msk_txqend[i]); } } return (0); } static void msk_phy_power(struct msk_softc *sc, int mode) { uint32_t our, val; int i; switch (mode) { case MSK_PHY_POWERUP: /* Switch power to VCC (WA for VAUX problem). */ CSR_WRITE_1(sc, B0_POWER_CTRL, PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); /* Disable Core Clock Division, set Clock Select to 0. */ CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); val = 0; if (sc->msk_hw_id == CHIP_ID_YUKON_XL && sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { /* Enable bits are inverted. */ val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; } /* * Enable PCI & Core Clock, enable clock gating for both Links. */ CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); our = CSR_PCI_READ_4(sc, PCI_OUR_REG_1); our &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); if (sc->msk_hw_id == CHIP_ID_YUKON_XL) { if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { /* Deassert Low Power for 1st PHY. */ our |= PCI_Y2_PHY1_COMA; if (sc->msk_num_port > 1) our |= PCI_Y2_PHY2_COMA; } } if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U || sc->msk_hw_id == CHIP_ID_YUKON_EX || sc->msk_hw_id >= CHIP_ID_YUKON_FE_P) { val = CSR_PCI_READ_4(sc, PCI_OUR_REG_4); val &= (PCI_FORCE_ASPM_REQUEST | PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY | PCI_ASPM_CLKRUN_REQUEST); /* Set all bits to 0 except bits 15..12. */ CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, val); val = CSR_PCI_READ_4(sc, PCI_OUR_REG_5); val &= PCI_CTL_TIM_VMAIN_AV_MSK; CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, val); CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0); CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON); /* * Disable status race, workaround for * Yukon EC Ultra & Yukon EX. */ val = CSR_READ_4(sc, B2_GP_IO); val |= GLB_GPIO_STAT_RACE_DIS; CSR_WRITE_4(sc, B2_GP_IO, val); CSR_READ_4(sc, B2_GP_IO); } /* Release PHY from PowerDown/COMA mode. */ CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, our); for (i = 0; i < sc->msk_num_port; i++) { CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), GMLC_RST_SET); CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), GMLC_RST_CLR); } break; case MSK_PHY_POWERDOWN: val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1); val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD; if (sc->msk_hw_id == CHIP_ID_YUKON_XL && sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { val &= ~PCI_Y2_PHY1_COMA; if (sc->msk_num_port > 1) val &= ~PCI_Y2_PHY2_COMA; } CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val); val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; if (sc->msk_hw_id == CHIP_ID_YUKON_XL && sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { /* Enable bits are inverted. */ val = 0; } /* * Disable PCI & Core Clock, disable clock gating for * both Links. */ CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); CSR_WRITE_1(sc, B0_POWER_CTRL, PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); break; default: break; } } static void mskc_reset(struct msk_softc *sc) { bus_addr_t addr; uint16_t status; uint32_t val; int i, initram; /* Disable ASF. */ if (sc->msk_hw_id >= CHIP_ID_YUKON_XL && sc->msk_hw_id <= CHIP_ID_YUKON_SUPR) { if (sc->msk_hw_id == CHIP_ID_YUKON_EX || sc->msk_hw_id == CHIP_ID_YUKON_SUPR) { CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0); status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR); /* Clear AHB bridge & microcontroller reset. */ status &= ~(Y2_ASF_HCU_CCSR_AHB_RST | Y2_ASF_HCU_CCSR_CPU_RST_MODE); /* Clear ASF microcontroller state. */ status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK; status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK; CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status); CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0); } else CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE); /* * Since we disabled ASF, S/W reset is required for * Power Management. */ CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); } /* Clear all error bits in the PCI status register. */ status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); pci_write_config(sc->msk_dev, PCIR_STATUS, status | PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2); CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR); switch (sc->msk_bustype) { case MSK_PEX_BUS: /* Clear all PEX errors. */ CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); if ((val & PEX_RX_OV) != 0) { sc->msk_intrmask &= ~Y2_IS_HW_ERR; sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; } break; case MSK_PCI_BUS: case MSK_PCIX_BUS: /* Set Cache Line Size to 2(8bytes) if configured to 0. */ val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1); if (val == 0) pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1); if (sc->msk_bustype == MSK_PCIX_BUS) { /* Set Cache Line Size opt. */ val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); val |= PCI_CLS_OPT; pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); } break; } /* Set PHY power state. */ msk_phy_power(sc, MSK_PHY_POWERUP); /* Reset GPHY/GMAC Control */ for (i = 0; i < sc->msk_num_port; i++) { /* GPHY Control reset. */ CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET); CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR); /* GMAC Control reset. */ CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET); CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR); CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF); if (sc->msk_hw_id == CHIP_ID_YUKON_EX || sc->msk_hw_id == CHIP_ID_YUKON_SUPR) CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | GMC_BYP_RETR_ON); } if (sc->msk_hw_id == CHIP_ID_YUKON_SUPR && sc->msk_hw_rev > CHIP_REV_YU_SU_B0) CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, PCI_CLK_MACSEC_DIS); if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) { /* Disable PCIe PHY powerdown(reg 0x80, bit7). */ CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080); } CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); /* LED On. */ CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON); /* Clear TWSI IRQ. */ CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ); /* Turn off hardware timer. */ CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP); CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ); /* Turn off descriptor polling. */ CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP); /* Turn off time stamps. */ CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP); CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); initram = 0; if (sc->msk_hw_id == CHIP_ID_YUKON_XL || sc->msk_hw_id == CHIP_ID_YUKON_EC || sc->msk_hw_id == CHIP_ID_YUKON_FE) initram++; /* Configure timeout values. */ for (i = 0; initram > 0 && i < sc->msk_num_port; i++) { CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET); CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), MSK_RI_TO_53); CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), MSK_RI_TO_53); CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), MSK_RI_TO_53); CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), MSK_RI_TO_53); CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), MSK_RI_TO_53); CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), MSK_RI_TO_53); CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), MSK_RI_TO_53); CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), MSK_RI_TO_53); CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), MSK_RI_TO_53); CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), MSK_RI_TO_53); CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), MSK_RI_TO_53); CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), MSK_RI_TO_53); } /* Disable all interrupts. */ CSR_WRITE_4(sc, B0_HWE_IMSK, 0); CSR_READ_4(sc, B0_HWE_IMSK); CSR_WRITE_4(sc, B0_IMSK, 0); CSR_READ_4(sc, B0_IMSK); /* * On dual port PCI-X card, there is an problem where status * can be received out of order due to split transactions. */ if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) { uint16_t pcix_cmd; pcix_cmd = pci_read_config(sc->msk_dev, sc->msk_pcixcap + PCIXR_COMMAND, 2); /* Clear Max Outstanding Split Transactions. */ pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS; CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); pci_write_config(sc->msk_dev, sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2); CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); } if (sc->msk_expcap != 0) { /* Change Max. Read Request Size to 2048 bytes. */ if (pci_get_max_read_req(sc->msk_dev) == 512) pci_set_max_read_req(sc->msk_dev, 2048); } /* Clear status list. */ bzero(sc->msk_stat_ring, sizeof(struct msk_stat_desc) * sc->msk_stat_count); sc->msk_stat_cons = 0; bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET); CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR); /* Set the status list base address. */ addr = sc->msk_stat_ring_paddr; CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr)); CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr)); /* Set the status list last index. */ CSR_WRITE_2(sc, STAT_LAST_IDX, sc->msk_stat_count - 1); if (sc->msk_hw_id == CHIP_ID_YUKON_EC && sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { /* WA for dev. #4.3 */ CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK); /* WA for dev. #4.18 */ CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21); CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07); } else { CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a); CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10); if (sc->msk_hw_id == CHIP_ID_YUKON_XL && sc->msk_hw_rev == CHIP_REV_YU_XL_A0) CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04); else CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10); CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190); } /* * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI. */ CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000)); /* Enable status unit. */ CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON); CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START); CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START); CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START); } static int msk_probe(device_t dev) { struct msk_softc *sc; char desc[100]; sc = device_get_softc(device_get_parent(dev)); /* * Not much to do here. We always know there will be * at least one GMAC present, and if there are two, * mskc_attach() will create a second device instance * for us. */ snprintf(desc, sizeof(desc), "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x", model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id, sc->msk_hw_rev); device_set_desc_copy(dev, desc); return (BUS_PROBE_DEFAULT); } static int msk_attach(device_t dev) { struct if_attach_args ifat = { .ifat_version = IF_ATTACH_VERSION, .ifat_drv = &msk_ifdrv, .ifat_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST, .ifat_capabilities = IFCAP_TXCSUM | IFCAP_TSO4 | IFCAP_LINKSTATE, }; struct msk_softc *sc; struct msk_if_softc *sc_if; struct msk_mii_data *mmd; struct mii_data *mii; int i, port, error; uint8_t eaddr[6]; if_t ifp; if (dev == NULL) return (EINVAL); error = 0; sc_if = device_get_softc(dev); sc = device_get_softc(device_get_parent(dev)); mmd = device_get_ivars(dev); port = mmd->port; sc_if->msk_if_dev = dev; sc_if->msk_port = port; sc_if->msk_softc = sc; sc_if->msk_flags = sc->msk_pflags; sc->msk_if[port] = sc_if; /* Setup Tx/Rx queue register offsets. */ if (port == MSK_PORT_A) { sc_if->msk_txq = Q_XA1; sc_if->msk_txsq = Q_XS1; sc_if->msk_rxq = Q_R1; } else { sc_if->msk_txq = Q_XA2; sc_if->msk_txsq = Q_XS2; sc_if->msk_rxq = Q_R2; } callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0); msk_sysctl_node(sc_if); if ((error = msk_txrx_dma_alloc(sc_if) != 0)) goto fail; msk_rx_dma_jalloc(sc_if); /* * Do miibus setup. */ error = mii_attach(dev, &sc_if->msk_miibus, msk_mediachange, msk_mediastatus, BMSR_DEFCAPMASK, PHY_ADDR_MARV, MII_OFFSET_ANY, mmd->mii_flags); if (error) goto fail; mii = device_get_softc(sc_if->msk_miibus); /* * Enable Rx checksum offloading if controller supports * new descriptor formant and controller is not Yukon XL. */ if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 && sc->msk_hw_id != CHIP_ID_YUKON_XL) ifat.ifat_capabilities |= IFCAP_RXCSUM; if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 && (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0) ifat.ifat_capabilities |= IFCAP_RXCSUM; /* VLAN capability setup */ ifat.ifat_capabilities |= IFCAP_VLAN_MTU; if ((sc_if->msk_flags & MSK_FLAG_NOHWVLAN) == 0) { /* * Due to Tx checksum offload hardware bugs, msk(4) manually * computes checksum for short frames. For VLAN tagged frames * this workaround does not work so disable checksum offload * for VLAN interface. */ ifat.ifat_capabilities |= IFCAP_VLAN_HWTAGGING; ifat.ifat_capabilities |= IFCAP_VLAN_HWTSO; /* * Enable Rx checksum offloading for VLAN tagged frames * if controller support new descriptor format. */ if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 && (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0) ifat.ifat_capabilities |= IFCAP_VLAN_HWCSUM; } ifat.ifat_hwassist = MSK_CSUM_FEATURES | CSUM_TSO; ifat.ifat_capenable = ifat.ifat_capabilities; ifat.ifat_baudrate = ifmedia_baudrate(mii->mii_media_active); /* * Disable RX checksum offloading on controllers that don't use * new descriptor format but give chance to enable it. */ if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0) ifat.ifat_capenable &= ~IFCAP_RXCSUM; /* * Get station address for this interface. Note that * dual port cards actually come with three station * addresses: one for each port, plus an extra. The * extra one is used by the SysKonnect driver software * as a 'virtual' station address for when both ports * are operating in failover mode. Currently we don't * use this extra address. */ for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i); ifat.ifat_lla = eaddr; ifat.ifat_softc = sc_if; ifat.ifat_dunit = device_get_unit(dev); - ifp = sc_if->msk_ifp = if_attach(&ifat); + sc_if->msk_capenable = ifat.ifat_capenable; + sc_if->msk_framesize = MSK_DEFAULT_FRAMESIZE; + return (0); fail: /* Access should be ok even though lock has been dropped */ sc->msk_if[port] = NULL; msk_detach(dev); return (error); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int mskc_attach(device_t dev) { struct msk_softc *sc; struct msk_mii_data *mmd; int error, msic, msir, reg; sc = device_get_softc(dev); sc->msk_dev = dev; mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); /* * Map control/status registers. */ pci_enable_busmaster(dev); /* Allocate I/O resource */ #ifdef MSK_USEIOSPACE sc->msk_res_spec = msk_res_spec_io; #else sc->msk_res_spec = msk_res_spec_mem; #endif sc->msk_irq_spec = msk_irq_spec_legacy; error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res); if (error) { if (sc->msk_res_spec == msk_res_spec_mem) sc->msk_res_spec = msk_res_spec_io; else sc->msk_res_spec = msk_res_spec_mem; error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res); if (error) { device_printf(dev, "couldn't allocate %s resources\n", sc->msk_res_spec == msk_res_spec_mem ? "memory" : "I/O"); mtx_destroy(&sc->msk_mtx); return (ENXIO); } } /* Enable all clocks before accessing any registers. */ CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0); CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID); sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f; /* Bail out if chip is not recognized. */ if (sc->msk_hw_id < CHIP_ID_YUKON_XL || sc->msk_hw_id > CHIP_ID_YUKON_OPT || sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) { device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n", sc->msk_hw_id, sc->msk_hw_rev); mtx_destroy(&sc->msk_mtx); return (ENXIO); } SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I", "max number of Rx events to process"); sc->msk_process_limit = MSK_PROC_DEFAULT; error = resource_int_value(device_get_name(dev), device_get_unit(dev), "process_limit", &sc->msk_process_limit); if (error == 0) { if (sc->msk_process_limit < MSK_PROC_MIN || sc->msk_process_limit > MSK_PROC_MAX) { device_printf(dev, "process_limit value out of range; " "using default: %d\n", MSK_PROC_DEFAULT); sc->msk_process_limit = MSK_PROC_DEFAULT; } } sc->msk_int_holdoff = MSK_INT_HOLDOFF_DEFAULT; SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "int_holdoff", CTLFLAG_RW, &sc->msk_int_holdoff, 0, "Maximum number of time to delay interrupts"); resource_int_value(device_get_name(dev), device_get_unit(dev), "int_holdoff", &sc->msk_int_holdoff); sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP); /* Check number of MACs. */ sc->msk_num_port = 1; if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) { if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) sc->msk_num_port++; } /* Check bus type. */ if (pci_find_cap(sc->msk_dev, PCIY_EXPRESS, ®) == 0) { sc->msk_bustype = MSK_PEX_BUS; sc->msk_expcap = reg; } else if (pci_find_cap(sc->msk_dev, PCIY_PCIX, ®) == 0) { sc->msk_bustype = MSK_PCIX_BUS; sc->msk_pcixcap = reg; } else sc->msk_bustype = MSK_PCI_BUS; switch (sc->msk_hw_id) { case CHIP_ID_YUKON_EC: sc->msk_clock = 125; /* 125 MHz */ sc->msk_pflags |= MSK_FLAG_JUMBO; break; case CHIP_ID_YUKON_EC_U: sc->msk_clock = 125; /* 125 MHz */ sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM; break; case CHIP_ID_YUKON_EX: sc->msk_clock = 125; /* 125 MHz */ sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 | MSK_FLAG_AUTOTX_CSUM; /* * Yukon Extreme seems to have silicon bug for * automatic Tx checksum calculation capability. */ if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM; /* * Yukon Extreme A0 could not use store-and-forward * for jumbo frames, so disable Tx checksum * offloading for jumbo frames. */ if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0) sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM; break; case CHIP_ID_YUKON_FE: sc->msk_clock = 100; /* 100 MHz */ sc->msk_pflags |= MSK_FLAG_FASTETHER; break; case CHIP_ID_YUKON_FE_P: sc->msk_clock = 50; /* 50 MHz */ sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 | MSK_FLAG_AUTOTX_CSUM; if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) { /* * XXX * FE+ A0 has status LE writeback bug so msk(4) * does not rely on status word of received frame * in msk_rxeof() which in turn disables all * hardware assistance bits reported by the status * word as well as validity of the received frame. * Just pass received frames to upper stack with * minimal test and let upper stack handle them. */ sc->msk_pflags |= MSK_FLAG_NOHWVLAN | MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM; } break; case CHIP_ID_YUKON_XL: sc->msk_clock = 156; /* 156 MHz */ sc->msk_pflags |= MSK_FLAG_JUMBO; break; case CHIP_ID_YUKON_SUPR: sc->msk_clock = 125; /* 125 MHz */ sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 | MSK_FLAG_AUTOTX_CSUM; break; case CHIP_ID_YUKON_UL_2: sc->msk_clock = 125; /* 125 MHz */ sc->msk_pflags |= MSK_FLAG_JUMBO; break; case CHIP_ID_YUKON_OPT: sc->msk_clock = 125; /* 125 MHz */ sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2; break; default: sc->msk_clock = 156; /* 156 MHz */ break; } /* Allocate IRQ resources. */ msic = pci_msi_count(dev); if (bootverbose) device_printf(dev, "MSI count : %d\n", msic); if (legacy_intr != 0) msi_disable = 1; if (msi_disable == 0 && msic > 0) { msir = 1; if (pci_alloc_msi(dev, &msir) == 0) { if (msir == 1) { sc->msk_pflags |= MSK_FLAG_MSI; sc->msk_irq_spec = msk_irq_spec_msi; } else pci_release_msi(dev); } } error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq); if (error) { device_printf(dev, "couldn't allocate IRQ resources\n"); goto fail; } if ((error = msk_status_dma_alloc(sc)) != 0) goto fail; /* Set base interrupt mask. */ sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU; sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR | Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP; /* Reset the adapter. */ mskc_reset(sc); if ((error = mskc_setup_rambuffer(sc)) != 0) goto fail; sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1); if (sc->msk_devs[MSK_PORT_A] == NULL) { device_printf(dev, "failed to add child for PORT_A\n"); error = ENXIO; goto fail; } mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO); if (mmd == NULL) { device_printf(dev, "failed to allocate memory for " "ivars of PORT_A\n"); error = ENXIO; goto fail; } mmd->port = MSK_PORT_A; mmd->pmd = sc->msk_pmd; mmd->mii_flags |= MIIF_DOPAUSE; if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S') mmd->mii_flags |= MIIF_HAVEFIBER; if (sc->msk_pmd == 'P') mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0; device_set_ivars(sc->msk_devs[MSK_PORT_A], mmd); if (sc->msk_num_port > 1) { sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1); if (sc->msk_devs[MSK_PORT_B] == NULL) { device_printf(dev, "failed to add child for PORT_B\n"); error = ENXIO; goto fail; } mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO); if (mmd == NULL) { device_printf(dev, "failed to allocate memory for " "ivars of PORT_B\n"); error = ENXIO; goto fail; } mmd->port = MSK_PORT_B; mmd->pmd = sc->msk_pmd; if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S') mmd->mii_flags |= MIIF_HAVEFIBER; if (sc->msk_pmd == 'P') mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0; device_set_ivars(sc->msk_devs[MSK_PORT_B], mmd); } error = bus_generic_attach(dev); if (error) { device_printf(dev, "failed to attach port(s)\n"); goto fail; } /* Hook interrupt last to avoid having to lock softc. */ error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET | INTR_MPSAFE, NULL, msk_intr, sc, &sc->msk_intrhand); if (error != 0) { device_printf(dev, "couldn't set up interrupt handler\n"); goto fail; } fail: if (error != 0) mskc_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int msk_detach(device_t dev) { struct msk_softc *sc; struct msk_if_softc *sc_if; if_t ifp; sc_if = device_get_softc(dev); KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx), ("msk mutex not initialized in msk_detach")); MSK_IF_LOCK(sc_if); ifp = sc_if->msk_ifp; if (device_is_attached(dev)) { /* XXX */ sc_if->msk_flags |= MSK_FLAG_DETACH; msk_stop(sc_if); /* Can't hold locks while calling detach. */ MSK_IF_UNLOCK(sc_if); callout_drain(&sc_if->msk_tick_ch); if (ifp) if_detach(ifp); MSK_IF_LOCK(sc_if); } /* * We're generally called from mskc_detach() which is using * device_delete_child() to get to here. It's already trashed * miibus for us, so don't do it here or we'll panic. * * if (sc_if->msk_miibus != NULL) { * device_delete_child(dev, sc_if->msk_miibus); * sc_if->msk_miibus = NULL; * } */ msk_rx_dma_jfree(sc_if); msk_txrx_dma_free(sc_if); bus_generic_detach(dev); sc = sc_if->msk_softc; sc->msk_if[sc_if->msk_port] = NULL; MSK_IF_UNLOCK(sc_if); return (0); } static int mskc_detach(device_t dev) { struct msk_softc *sc; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized")); if (device_is_alive(dev)) { if (sc->msk_devs[MSK_PORT_A] != NULL) { free(device_get_ivars(sc->msk_devs[MSK_PORT_A]), M_DEVBUF); device_delete_child(dev, sc->msk_devs[MSK_PORT_A]); } if (sc->msk_devs[MSK_PORT_B] != NULL) { free(device_get_ivars(sc->msk_devs[MSK_PORT_B]), M_DEVBUF); device_delete_child(dev, sc->msk_devs[MSK_PORT_B]); } bus_generic_detach(dev); } /* Disable all interrupts. */ CSR_WRITE_4(sc, B0_IMSK, 0); CSR_READ_4(sc, B0_IMSK); CSR_WRITE_4(sc, B0_HWE_IMSK, 0); CSR_READ_4(sc, B0_HWE_IMSK); /* LED Off. */ CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF); /* Put hardware reset. */ CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); msk_status_dma_free(sc); if (sc->msk_intrhand) { bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand); sc->msk_intrhand = NULL; } bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq); if ((sc->msk_pflags & MSK_FLAG_MSI) != 0) pci_release_msi(dev); bus_release_resources(dev, sc->msk_res_spec, sc->msk_res); mtx_destroy(&sc->msk_mtx); return (0); } static bus_dma_tag_t mskc_get_dma_tag(device_t bus, device_t child __unused) { return (bus_get_dma_tag(bus)); } struct msk_dmamap_arg { bus_addr_t msk_busaddr; }; static void msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct msk_dmamap_arg *ctx; if (error != 0) return; ctx = arg; ctx->msk_busaddr = segs[0].ds_addr; } /* Create status DMA region. */ static int msk_status_dma_alloc(struct msk_softc *sc) { struct msk_dmamap_arg ctx; bus_size_t stat_sz; int count, error; /* * It seems controller requires number of status LE entries * is power of 2 and the maximum number of status LE entries * is 4096. For dual-port controllers, the number of status * LE entries should be large enough to hold both port's * status updates. */ count = 3 * MSK_RX_RING_CNT + MSK_TX_RING_CNT; count = imin(4096, roundup2(count, 1024)); sc->msk_stat_count = count; stat_sz = count * sizeof(struct msk_stat_desc); error = bus_dma_tag_create( bus_get_dma_tag(sc->msk_dev), /* parent */ MSK_STAT_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ stat_sz, /* maxsize */ 1, /* nsegments */ stat_sz, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->msk_stat_tag); if (error != 0) { device_printf(sc->msk_dev, "failed to create status DMA tag\n"); return (error); } /* Allocate DMA'able memory and load the DMA map for status ring. */ error = bus_dmamem_alloc(sc->msk_stat_tag, (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->msk_stat_map); if (error != 0) { device_printf(sc->msk_dev, "failed to allocate DMA'able memory for status ring\n"); return (error); } ctx.msk_busaddr = 0; error = bus_dmamap_load(sc->msk_stat_tag, sc->msk_stat_map, sc->msk_stat_ring, stat_sz, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->msk_dev, "failed to load DMA'able memory for status ring\n"); return (error); } sc->msk_stat_ring_paddr = ctx.msk_busaddr; return (0); } static void msk_status_dma_free(struct msk_softc *sc) { /* Destroy status block. */ if (sc->msk_stat_tag) { if (sc->msk_stat_ring_paddr) { bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map); sc->msk_stat_ring_paddr = 0; } if (sc->msk_stat_ring) { bus_dmamem_free(sc->msk_stat_tag, sc->msk_stat_ring, sc->msk_stat_map); sc->msk_stat_ring = NULL; } bus_dma_tag_destroy(sc->msk_stat_tag); sc->msk_stat_tag = NULL; } } static int msk_txrx_dma_alloc(struct msk_if_softc *sc_if) { struct msk_dmamap_arg ctx; struct msk_txdesc *txd; struct msk_rxdesc *rxd; bus_size_t rxalign; int error, i; /* Create parent DMA tag. */ error = bus_dma_tag_create( bus_get_dma_tag(sc_if->msk_if_dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc_if->msk_cdata.msk_parent_tag); if (error != 0) { device_printf(sc_if->msk_if_dev, "failed to create parent DMA tag\n"); goto fail; } /* Create tag for Tx ring. */ error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ MSK_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MSK_TX_RING_SZ, /* maxsize */ 1, /* nsegments */ MSK_TX_RING_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc_if->msk_cdata.msk_tx_ring_tag); if (error != 0) { device_printf(sc_if->msk_if_dev, "failed to create Tx ring DMA tag\n"); goto fail; } /* Create tag for Rx ring. */ error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ MSK_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MSK_RX_RING_SZ, /* maxsize */ 1, /* nsegments */ MSK_RX_RING_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc_if->msk_cdata.msk_rx_ring_tag); if (error != 0) { device_printf(sc_if->msk_if_dev, "failed to create Rx ring DMA tag\n"); goto fail; } /* Create tag for Tx buffers. */ error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MSK_TSO_MAXSIZE, /* maxsize */ MSK_MAXTXSEGS, /* nsegments */ MSK_TSO_MAXSGSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc_if->msk_cdata.msk_tx_tag); if (error != 0) { device_printf(sc_if->msk_if_dev, "failed to create Tx DMA tag\n"); goto fail; } rxalign = 1; /* * Workaround hardware hang which seems to happen when Rx buffer * is not aligned on multiple of FIFO word(8 bytes). */ if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) rxalign = MSK_RX_BUF_ALIGN; /* Create tag for Rx buffers. */ error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ rxalign, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, /* maxsize */ 1, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc_if->msk_cdata.msk_rx_tag); if (error != 0) { device_printf(sc_if->msk_if_dev, "failed to create Rx DMA tag\n"); goto fail; } /* Allocate DMA'able memory and load the DMA map for Tx ring. */ error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag, (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map); if (error != 0) { device_printf(sc_if->msk_if_dev, "failed to allocate DMA'able memory for Tx ring\n"); goto fail; } ctx.msk_busaddr = 0; error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag, sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring, MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc_if->msk_if_dev, "failed to load DMA'able memory for Tx ring\n"); goto fail; } sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr; /* Allocate DMA'able memory and load the DMA map for Rx ring. */ error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag, (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map); if (error != 0) { device_printf(sc_if->msk_if_dev, "failed to allocate DMA'able memory for Rx ring\n"); goto fail; } ctx.msk_busaddr = 0; error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag, sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring, MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc_if->msk_if_dev, "failed to load DMA'able memory for Rx ring\n"); goto fail; } sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr; /* Create DMA maps for Tx buffers. */ for (i = 0; i < MSK_TX_RING_CNT; i++) { txd = &sc_if->msk_cdata.msk_txdesc[i]; txd->tx_m = NULL; txd->tx_dmamap = NULL; error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0, &txd->tx_dmamap); if (error != 0) { device_printf(sc_if->msk_if_dev, "failed to create Tx dmamap\n"); goto fail; } } /* Create DMA maps for Rx buffers. */ if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, &sc_if->msk_cdata.msk_rx_sparemap)) != 0) { device_printf(sc_if->msk_if_dev, "failed to create spare Rx dmamap\n"); goto fail; } for (i = 0; i < MSK_RX_RING_CNT; i++) { rxd = &sc_if->msk_cdata.msk_rxdesc[i]; rxd->rx_m = NULL; rxd->rx_dmamap = NULL; error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, &rxd->rx_dmamap); if (error != 0) { device_printf(sc_if->msk_if_dev, "failed to create Rx dmamap\n"); goto fail; } } fail: return (error); } static int msk_rx_dma_jalloc(struct msk_if_softc *sc_if) { struct msk_dmamap_arg ctx; struct msk_rxdesc *jrxd; bus_size_t rxalign; int error, i; if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) { sc_if->msk_flags &= ~MSK_FLAG_JUMBO; device_printf(sc_if->msk_if_dev, "disabling jumbo frame support\n"); return (0); } /* Create tag for jumbo Rx ring. */ error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ MSK_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MSK_JUMBO_RX_RING_SZ, /* maxsize */ 1, /* nsegments */ MSK_JUMBO_RX_RING_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc_if->msk_cdata.msk_jumbo_rx_ring_tag); if (error != 0) { device_printf(sc_if->msk_if_dev, "failed to create jumbo Rx ring DMA tag\n"); goto jumbo_fail; } rxalign = 1; /* * Workaround hardware hang which seems to happen when Rx buffer * is not aligned on multiple of FIFO word(8 bytes). */ if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) rxalign = MSK_RX_BUF_ALIGN; /* Create tag for jumbo Rx buffers. */ error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ rxalign, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MJUM9BYTES, /* maxsize */ 1, /* nsegments */ MJUM9BYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc_if->msk_cdata.msk_jumbo_rx_tag); if (error != 0) { device_printf(sc_if->msk_if_dev, "failed to create jumbo Rx DMA tag\n"); goto jumbo_fail; } /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_jumbo_rx_ring_map); if (error != 0) { device_printf(sc_if->msk_if_dev, "failed to allocate DMA'able memory for jumbo Rx ring\n"); goto jumbo_fail; } ctx.msk_busaddr = 0; error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, sc_if->msk_cdata.msk_jumbo_rx_ring_map, sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc_if->msk_if_dev, "failed to load DMA'able memory for jumbo Rx ring\n"); goto jumbo_fail; } sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr; /* Create DMA maps for jumbo Rx buffers. */ if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) { device_printf(sc_if->msk_if_dev, "failed to create spare jumbo Rx dmamap\n"); goto jumbo_fail; } for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; jrxd->rx_m = NULL; jrxd->rx_dmamap = NULL; error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, &jrxd->rx_dmamap); if (error != 0) { device_printf(sc_if->msk_if_dev, "failed to create jumbo Rx dmamap\n"); goto jumbo_fail; } } return (0); jumbo_fail: msk_rx_dma_jfree(sc_if); device_printf(sc_if->msk_if_dev, "disabling jumbo frame support " "due to resource shortage\n"); sc_if->msk_flags &= ~MSK_FLAG_JUMBO; return (error); } static void msk_txrx_dma_free(struct msk_if_softc *sc_if) { struct msk_txdesc *txd; struct msk_rxdesc *rxd; int i; /* Tx ring. */ if (sc_if->msk_cdata.msk_tx_ring_tag) { if (sc_if->msk_rdata.msk_tx_ring_paddr) bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag, sc_if->msk_cdata.msk_tx_ring_map); if (sc_if->msk_rdata.msk_tx_ring) bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag, sc_if->msk_rdata.msk_tx_ring, sc_if->msk_cdata.msk_tx_ring_map); sc_if->msk_rdata.msk_tx_ring = NULL; sc_if->msk_rdata.msk_tx_ring_paddr = 0; bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag); sc_if->msk_cdata.msk_tx_ring_tag = NULL; } /* Rx ring. */ if (sc_if->msk_cdata.msk_rx_ring_tag) { if (sc_if->msk_rdata.msk_rx_ring_paddr) bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag, sc_if->msk_cdata.msk_rx_ring_map); if (sc_if->msk_rdata.msk_rx_ring) bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag, sc_if->msk_rdata.msk_rx_ring, sc_if->msk_cdata.msk_rx_ring_map); sc_if->msk_rdata.msk_rx_ring = NULL; sc_if->msk_rdata.msk_rx_ring_paddr = 0; bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag); sc_if->msk_cdata.msk_rx_ring_tag = NULL; } /* Tx buffers. */ if (sc_if->msk_cdata.msk_tx_tag) { for (i = 0; i < MSK_TX_RING_CNT; i++) { txd = &sc_if->msk_cdata.msk_txdesc[i]; if (txd->tx_dmamap) { bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap); txd->tx_dmamap = NULL; } } bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); sc_if->msk_cdata.msk_tx_tag = NULL; } /* Rx buffers. */ if (sc_if->msk_cdata.msk_rx_tag) { for (i = 0; i < MSK_RX_RING_CNT; i++) { rxd = &sc_if->msk_cdata.msk_rxdesc[i]; if (rxd->rx_dmamap) { bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap); rxd->rx_dmamap = NULL; } } if (sc_if->msk_cdata.msk_rx_sparemap) { bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, sc_if->msk_cdata.msk_rx_sparemap); sc_if->msk_cdata.msk_rx_sparemap = 0; } bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); sc_if->msk_cdata.msk_rx_tag = NULL; } if (sc_if->msk_cdata.msk_parent_tag) { bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag); sc_if->msk_cdata.msk_parent_tag = NULL; } } static void msk_rx_dma_jfree(struct msk_if_softc *sc_if) { struct msk_rxdesc *jrxd; int i; /* Jumbo Rx ring. */ if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) { if (sc_if->msk_rdata.msk_jumbo_rx_ring_paddr) bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, sc_if->msk_cdata.msk_jumbo_rx_ring_map); if (sc_if->msk_rdata.msk_jumbo_rx_ring) bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, sc_if->msk_rdata.msk_jumbo_rx_ring, sc_if->msk_cdata.msk_jumbo_rx_ring_map); sc_if->msk_rdata.msk_jumbo_rx_ring = NULL; sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = 0; bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag); sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL; } /* Jumbo Rx buffers. */ if (sc_if->msk_cdata.msk_jumbo_rx_tag) { for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; if (jrxd->rx_dmamap) { bus_dmamap_destroy( sc_if->msk_cdata.msk_jumbo_rx_tag, jrxd->rx_dmamap); jrxd->rx_dmamap = NULL; } } if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) { bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag, sc_if->msk_cdata.msk_jumbo_rx_sparemap); sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0; } bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag); sc_if->msk_cdata.msk_jumbo_rx_tag = NULL; } } static int msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head) { struct msk_txdesc *txd, *txd_last; struct msk_tx_desc *tx_le; struct mbuf *m; bus_dmamap_t map; bus_dma_segment_t txsegs[MSK_MAXTXSEGS]; uint32_t control, csum, prod, si; uint16_t offset, tcp_offset, tso_mtu; int error, i, nseg, tso; MSK_IF_LOCK_ASSERT(sc_if); tcp_offset = offset = 0; m = *m_head; if (((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) || ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 && (m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) { /* * Since mbuf has no protocol specific structure information * in it we have to inspect protocol information here to * setup TSO and checksum offload. I don't know why Marvell * made a such decision in chip design because other GigE * hardwares normally takes care of all these chores in * hardware. However, TSO performance of Yukon II is very * good such that it's worth to implement it. */ struct ether_header *eh; struct ip *ip; struct tcphdr *tcp; if (M_WRITABLE(m) == 0) { /* Get a writable copy. */ m = m_dup(*m_head, M_NOWAIT); m_freem(*m_head); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } *m_head = m; } offset = sizeof(struct ether_header); m = m_pullup(m, offset); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } eh = mtod(m, struct ether_header *); /* Check if hardware VLAN insertion is off. */ if (eh->ether_type == htons(ETHERTYPE_VLAN)) { offset = sizeof(struct ether_vlan_header); m = m_pullup(m, offset); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } } m = m_pullup(m, offset + sizeof(struct ip)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } ip = (struct ip *)(mtod(m, char *) + offset); offset += (ip->ip_hl << 2); tcp_offset = offset; if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { m = m_pullup(m, offset + sizeof(struct tcphdr)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } tcp = (struct tcphdr *)(mtod(m, char *) + offset); offset += (tcp->th_off << 2); } else if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 && (m->m_pkthdr.len < MSK_MIN_FRAMELEN) && (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) { /* * It seems that Yukon II has Tx checksum offload bug * for small TCP packets that's less than 60 bytes in * size (e.g. TCP window probe packet, pure ACK packet). * Common work around like padding with zeros to make * the frame minimum ethernet frame size didn't work at * all. * Instead of disabling checksum offload completely we * resort to S/W checksum routine when we encounter * short TCP frames. * Short UDP packets appear to be handled correctly by * Yukon II. Also I assume this bug does not happen on * controllers that use newer descriptor format or * automatic Tx checksum calculation. */ m = m_pullup(m, offset + sizeof(struct tcphdr)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } *(uint16_t *)(m->m_data + offset + m->m_pkthdr.csum_data) = in_cksum_skip(m, m->m_pkthdr.len, offset); m->m_pkthdr.csum_flags &= ~CSUM_TCP; } *m_head = m; } prod = sc_if->msk_cdata.msk_tx_prod; txd = &sc_if->msk_cdata.msk_txdesc[prod]; txd_last = txd; map = txd->tx_dmamap; error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(*m_head, M_NOWAIT, MSK_MAXTXSEGS); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); if (nseg == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } /* Check number of available descriptors. */ if (sc_if->msk_cdata.msk_tx_cnt + nseg >= (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) { bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map); return (ENOBUFS); } control = 0; tso = 0; tx_le = NULL; /* Check TSO support. */ if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) tso_mtu = m->m_pkthdr.tso_segsz; else tso_mtu = offset + m->m_pkthdr.tso_segsz; if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) { tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; tx_le->msk_addr = htole32(tso_mtu); if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) tx_le->msk_control = htole32(OP_MSS | HW_OWNER); else tx_le->msk_control = htole32(OP_LRGLEN | HW_OWNER); sc_if->msk_cdata.msk_tx_cnt++; MSK_INC(prod, MSK_TX_RING_CNT); sc_if->msk_cdata.msk_tso_mtu = tso_mtu; } tso++; } /* Check if we have a VLAN tag to insert. */ if ((m->m_flags & M_VLANTAG) != 0) { if (tx_le == NULL) { tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; tx_le->msk_addr = htole32(0); tx_le->msk_control = htole32(OP_VLAN | HW_OWNER | htons(m->m_pkthdr.ether_vtag)); sc_if->msk_cdata.msk_tx_cnt++; MSK_INC(prod, MSK_TX_RING_CNT); } else { tx_le->msk_control |= htole32(OP_VLAN | htons(m->m_pkthdr.ether_vtag)); } control |= INS_VLAN; } /* Check if we have to handle checksum offload. */ if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) { if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) != 0) control |= CALSUM; else { control |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) control |= UDPTCP; /* Checksum write position. */ csum = (tcp_offset + m->m_pkthdr.csum_data) & 0xffff; /* Checksum start position. */ csum |= (uint32_t)tcp_offset << 16; if (csum != sc_if->msk_cdata.msk_last_csum) { tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; tx_le->msk_addr = htole32(csum); tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER)); sc_if->msk_cdata.msk_tx_cnt++; MSK_INC(prod, MSK_TX_RING_CNT); sc_if->msk_cdata.msk_last_csum = csum; } } } #ifdef MSK_64BIT_DMA if (MSK_ADDR_HI(txsegs[0].ds_addr) != sc_if->msk_cdata.msk_tx_high_addr) { sc_if->msk_cdata.msk_tx_high_addr = MSK_ADDR_HI(txsegs[0].ds_addr); tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; tx_le->msk_addr = htole32(MSK_ADDR_HI(txsegs[0].ds_addr)); tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER); sc_if->msk_cdata.msk_tx_cnt++; MSK_INC(prod, MSK_TX_RING_CNT); } #endif si = prod; tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr)); if (tso == 0) tx_le->msk_control = htole32(txsegs[0].ds_len | control | OP_PACKET); else tx_le->msk_control = htole32(txsegs[0].ds_len | control | OP_LARGESEND); sc_if->msk_cdata.msk_tx_cnt++; MSK_INC(prod, MSK_TX_RING_CNT); for (i = 1; i < nseg; i++) { tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; #ifdef MSK_64BIT_DMA if (MSK_ADDR_HI(txsegs[i].ds_addr) != sc_if->msk_cdata.msk_tx_high_addr) { sc_if->msk_cdata.msk_tx_high_addr = MSK_ADDR_HI(txsegs[i].ds_addr); tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; tx_le->msk_addr = htole32(MSK_ADDR_HI(txsegs[i].ds_addr)); tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER); sc_if->msk_cdata.msk_tx_cnt++; MSK_INC(prod, MSK_TX_RING_CNT); tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; } #endif tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr)); tx_le->msk_control = htole32(txsegs[i].ds_len | control | OP_BUFFER | HW_OWNER); sc_if->msk_cdata.msk_tx_cnt++; MSK_INC(prod, MSK_TX_RING_CNT); } /* Update producer index. */ sc_if->msk_cdata.msk_tx_prod = prod; /* Set EOP on the last descriptor. */ prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT; tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; tx_le->msk_control |= htole32(EOP); /* Turn the first descriptor ownership to hardware. */ tx_le = &sc_if->msk_rdata.msk_tx_ring[si]; tx_le->msk_control |= htole32(HW_OWNER); txd = &sc_if->msk_cdata.msk_txdesc[prod]; map = txd_last->tx_dmamap; txd_last->tx_dmamap = txd->tx_dmamap; txd->tx_dmamap = map; txd->tx_m = m; /* Sync descriptors. */ bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, sc_if->msk_cdata.msk_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static int msk_transmit(if_t ifp, struct mbuf *m) { struct msk_if_softc *sc_if; int error; if ((error = if_snd_enqueue(ifp, m)) != 0) return (error); sc_if = if_getsoftc(ifp, IF_DRIVER_SOFTC); if (MSK_IF_TRYLOCK(sc_if) == 0) return (0); error = msk_start(sc_if); MSK_IF_UNLOCK(sc_if); return (error); } static int msk_start(struct msk_if_softc *sc_if) { struct mbuf *m; int error, enq; MSK_IF_LOCK_ASSERT(sc_if); if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) return (ENETDOWN); error = enq = 0; while (sc_if->msk_cdata.msk_tx_cnt < (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT) && (m = if_snd_dequeue(sc_if->msk_ifp)) != NULL) { if ((error = msk_encap(sc_if, &m)) != 0) { if (m == NULL) break; if_snd_prepend(sc_if->msk_ifp, m); break; } enq++; if_mtap(sc_if->msk_ifp, m, NULL, 0); } if (enq > 0) { /* Transmit */ CSR_WRITE_2(sc_if->msk_softc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_tx_prod); /* Set a timeout in case the chip goes out to lunch. */ sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT; } return (0); } static void msk_watchdog(struct msk_if_softc *sc_if) { if_t ifp; MSK_IF_LOCK_ASSERT(sc_if); if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer) return; ifp = sc_if->msk_ifp; if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) { if (bootverbose) if_printf(sc_if->msk_ifp, "watchdog timeout " "(missed link)\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); sc_if->msk_flags &= ~MSK_FLAG_RUNNING; msk_init_locked(sc_if); return; } if_printf(ifp, "watchdog timeout\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); sc_if->msk_flags &= ~MSK_FLAG_RUNNING; msk_init_locked(sc_if); msk_start(sc_if); } static int mskc_shutdown(device_t dev) { struct msk_softc *sc; int i; sc = device_get_softc(dev); MSK_LOCK(sc); for (i = 0; i < sc->msk_num_port; i++) { if (sc->msk_if[i] != NULL && ((sc->msk_if[i]->msk_flags & MSK_FLAG_RUNNING) != 0)) msk_stop(sc->msk_if[i]); } MSK_UNLOCK(sc); /* Put hardware reset. */ CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); return (0); } static int mskc_suspend(device_t dev) { struct msk_softc *sc; int i; sc = device_get_softc(dev); MSK_LOCK(sc); for (i = 0; i < sc->msk_num_port; i++) { if (sc->msk_if[i] != NULL && ((sc->msk_if[i]->msk_flags & MSK_FLAG_RUNNING) != 0)) msk_stop(sc->msk_if[i]); } /* Disable all interrupts. */ CSR_WRITE_4(sc, B0_IMSK, 0); CSR_READ_4(sc, B0_IMSK); CSR_WRITE_4(sc, B0_HWE_IMSK, 0); CSR_READ_4(sc, B0_HWE_IMSK); msk_phy_power(sc, MSK_PHY_POWERDOWN); /* Put hardware reset. */ CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); sc->msk_pflags |= MSK_FLAG_SUSPEND; MSK_UNLOCK(sc); return (0); } static int mskc_resume(device_t dev) { struct msk_softc *sc; int i; sc = device_get_softc(dev); MSK_LOCK(sc); CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0); mskc_reset(sc); for (i = 0; i < sc->msk_num_port; i++) { if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && (if_get(sc->msk_if[i]->msk_ifp, IF_FLAGS) & IFF_UP)) { sc->msk_if[i]->msk_flags &= ~MSK_FLAG_RUNNING; msk_init_locked(sc->msk_if[i]); } } sc->msk_pflags &= ~MSK_FLAG_SUSPEND; MSK_UNLOCK(sc); return (0); } #ifndef __NO_STRICT_ALIGNMENT static __inline void msk_fixup_rx(struct mbuf *m) { int i; uint16_t *src, *dst; src = mtod(m, uint16_t *); dst = src - 3; for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) *dst++ = *src++; m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN); } #endif static __inline void msk_rxcsum(struct msk_if_softc *sc_if, uint32_t control, struct mbuf *m) { struct ether_header *eh; struct ip *ip; struct udphdr *uh; int32_t hlen, len, pktlen, temp32; uint16_t csum, *opts; if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) { if ((control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if ((control & CSS_IPV4_CSUM_OK) != 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((control & (CSS_TCP | CSS_UDP)) != 0 && (control & (CSS_TCPUDP_CSUM_OK)) != 0) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } return; } /* * Marvell Yukon controllers that support OP_RXCHKS has known * to have various Rx checksum offloading bugs. These * controllers can be configured to compute simple checksum * at two different positions. So we can compute IP and TCP/UDP * checksum at the same time. We intentionally have controller * compute TCP/UDP checksum twice by specifying the same * checksum start position and compare the result. If the value * is different it would indicate the hardware logic was wrong. */ if ((sc_if->msk_csum & 0xFFFF) != (sc_if->msk_csum >> 16)) { if (bootverbose) device_printf(sc_if->msk_if_dev, "Rx checksum value mismatch!\n"); return; } pktlen = m->m_pkthdr.len; if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) return; eh = mtod(m, struct ether_header *); if (eh->ether_type != htons(ETHERTYPE_IP)) return; ip = (struct ip *)(eh + 1); if (ip->ip_v != IPVERSION) return; hlen = ip->ip_hl << 2; pktlen -= sizeof(struct ether_header); if (hlen < sizeof(struct ip)) return; if (ntohs(ip->ip_len) < hlen) return; if (ntohs(ip->ip_len) != pktlen) return; if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) return; /* can't handle fragmented packet. */ switch (ip->ip_p) { case IPPROTO_TCP: if (pktlen < (hlen + sizeof(struct tcphdr))) return; break; case IPPROTO_UDP: if (pktlen < (hlen + sizeof(struct udphdr))) return; uh = (struct udphdr *)((caddr_t)ip + hlen); if (uh->uh_sum == 0) return; /* no checksum */ break; default: return; } csum = bswap16(sc_if->msk_csum & 0xFFFF); /* Checksum fixup for IP options. */ len = hlen - sizeof(struct ip); if (len > 0) { opts = (uint16_t *)(ip + 1); for (; len > 0; len -= sizeof(uint16_t), opts++) { temp32 = csum - *opts; temp32 = (temp32 >> 16) + (temp32 & 65535); csum = temp32 & 65535; } } m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; m->m_pkthdr.csum_data = csum; } static void msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control, int len) { struct mbuf *m; if_t ifp; struct msk_rxdesc *rxd; int cons, rxlen; ifp = sc_if->msk_ifp; MSK_IF_LOCK_ASSERT(sc_if); cons = sc_if->msk_cdata.msk_rx_cons; do { rxlen = status >> 16; if ((status & GMR_FS_VLAN) != 0 && - (if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWTAGGING) != 0) + (sc_if->msk_capenable & IFCAP_VLAN_HWTAGGING) != 0) rxlen -= ETHER_VLAN_ENCAP_LEN; if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) { /* * For controllers that returns bogus status code * just do minimal check and let upper stack * handle this frame. */ if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); msk_discard_rxbuf(sc_if, cons); break; } } else if (len > sc_if->msk_framesize || ((status & GMR_FS_ANY_ERR) != 0) || ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { /* Don't count flow-control packet as errors. */ if ((status & GMR_FS_GOOD_FC) == 0) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); msk_discard_rxbuf(sc_if, cons); break; } #ifdef MSK_64BIT_DMA rxd = &sc_if->msk_cdata.msk_rxdesc[(cons + 1) % MSK_RX_RING_CNT]; #else rxd = &sc_if->msk_cdata.msk_rxdesc[cons]; #endif m = rxd->rx_m; if (msk_newbuf(sc_if, cons) != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); /* Reuse old buffer. */ msk_discard_rxbuf(sc_if, cons); break; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; #ifndef __NO_STRICT_ALIGNMENT if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) msk_fixup_rx(m); #endif if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); - if ((if_get(ifp, IF_CAPENABLE) & IFCAP_RXCSUM) != 0) + if ((sc_if->msk_capenable & IFCAP_RXCSUM) != 0) msk_rxcsum(sc_if, control, m); /* Check for VLAN tagged packets. */ if ((status & GMR_FS_VLAN) != 0 && - (if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWTAGGING) != 0) { + (sc_if->msk_capenable & IFCAP_VLAN_HWTAGGING) != 0) { m->m_pkthdr.ether_vtag = sc_if->msk_vtag; m->m_flags |= M_VLANTAG; } MSK_IF_UNLOCK(sc_if); if_input(ifp, m); MSK_IF_LOCK(sc_if); } while (0); MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT); MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT); } static void msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control, int len) { struct mbuf *m; if_t ifp; struct msk_rxdesc *jrxd; int cons, rxlen; ifp = sc_if->msk_ifp; MSK_IF_LOCK_ASSERT(sc_if); cons = sc_if->msk_cdata.msk_rx_cons; do { rxlen = status >> 16; if ((status & GMR_FS_VLAN) != 0 && - (if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWTAGGING) != 0) + (sc_if->msk_capenable & IFCAP_VLAN_HWTAGGING) != 0) rxlen -= ETHER_VLAN_ENCAP_LEN; if (len > sc_if->msk_framesize || ((status & GMR_FS_ANY_ERR) != 0) || ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { /* Don't count flow-control packet as errors. */ if ((status & GMR_FS_GOOD_FC) == 0) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); msk_discard_jumbo_rxbuf(sc_if, cons); break; } #ifdef MSK_64BIT_DMA jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[(cons + 1) % MSK_JUMBO_RX_RING_CNT]; #else jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons]; #endif m = jrxd->rx_m; if (msk_jumbo_newbuf(sc_if, cons) != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); /* Reuse old buffer. */ msk_discard_jumbo_rxbuf(sc_if, cons); break; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; #ifndef __NO_STRICT_ALIGNMENT if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) msk_fixup_rx(m); #endif if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); - if (if_get(ifp, IF_CAPENABLE) & IFCAP_RXCSUM) + if (sc_if->msk_capenable & IFCAP_RXCSUM) msk_rxcsum(sc_if, control, m); /* Check for VLAN tagged packets. */ if ((status & GMR_FS_VLAN) != 0 && - (if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWTAGGING) != 0) { + (sc_if->msk_capenable & IFCAP_VLAN_HWTAGGING) != 0) { m->m_pkthdr.ether_vtag = sc_if->msk_vtag; m->m_flags |= M_VLANTAG; } MSK_IF_UNLOCK(sc_if); if_input(ifp, m); MSK_IF_LOCK(sc_if); } while (0); MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT); MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT); } static void msk_txeof(struct msk_if_softc *sc_if, int idx) { struct msk_txdesc *txd; struct msk_tx_desc *cur_tx; if_t ifp; uint32_t control; int cons, prog; MSK_IF_LOCK_ASSERT(sc_if); ifp = sc_if->msk_ifp; bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, sc_if->msk_cdata.msk_tx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * Go through our tx ring and free mbufs for those * frames that have been sent. */ cons = sc_if->msk_cdata.msk_tx_cons; prog = 0; for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) { if (sc_if->msk_cdata.msk_tx_cnt <= 0) break; prog++; cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons]; control = le32toh(cur_tx->msk_control); sc_if->msk_cdata.msk_tx_cnt--; if ((control & EOP) == 0) continue; txd = &sc_if->msk_cdata.msk_txdesc[cons]; bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap); if_inc_txcounters(ifp, txd->tx_m); m_freem(txd->tx_m); txd->tx_m = NULL; } if (prog > 0) { sc_if->msk_cdata.msk_tx_cons = cons; if (sc_if->msk_cdata.msk_tx_cnt == 0) sc_if->msk_watchdog_timer = 0; /* No need to sync LEs as we didn't update LEs. */ } } static void msk_tick(void *xsc_if) { struct msk_if_softc *sc_if; struct mii_data *mii; sc_if = xsc_if; MSK_IF_LOCK_ASSERT(sc_if); mii = device_get_softc(sc_if->msk_miibus); mii_tick(mii); if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) msk_miibus_statchg(sc_if->msk_if_dev); msk_handle_events(sc_if->msk_softc); msk_watchdog(sc_if); callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); } static void msk_intr_phy(struct msk_if_softc *sc_if) { uint16_t status; msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); /* Handle FIFO Underrun/Overflow? */ if ((status & PHY_M_IS_FIFO_ERROR)) device_printf(sc_if->msk_if_dev, "PHY FIFO underrun/overflow.\n"); } static void msk_intr_gmac(struct msk_if_softc *sc_if) { struct msk_softc *sc; uint8_t status; sc = sc_if->msk_softc; status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); /* GMAC Rx FIFO overrun. */ if ((status & GM_IS_RX_FF_OR) != 0) CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); /* GMAC Tx FIFO underrun. */ if ((status & GM_IS_TX_FF_UR) != 0) { CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n"); /* * XXX * In case of Tx underrun, we may need to flush/reset * Tx MAC but that would also require resynchronization * with status LEs. Reinitializing status LEs would * affect other port in dual MAC configuration so it * should be avoided as possible as we can. * Due to lack of documentation it's all vague guess but * it needs more investigation. */ } } static void msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status) { struct msk_softc *sc; sc = sc_if->msk_softc; if ((status & Y2_IS_PAR_RD1) != 0) { device_printf(sc_if->msk_if_dev, "RAM buffer read parity error\n"); /* Clear IRQ. */ CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), RI_CLR_RD_PERR); } if ((status & Y2_IS_PAR_WR1) != 0) { device_printf(sc_if->msk_if_dev, "RAM buffer write parity error\n"); /* Clear IRQ. */ CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), RI_CLR_WR_PERR); } if ((status & Y2_IS_PAR_MAC1) != 0) { device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n"); /* Clear IRQ. */ CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_CLI_TX_PE); } if ((status & Y2_IS_PAR_RX1) != 0) { device_printf(sc_if->msk_if_dev, "Rx parity error\n"); /* Clear IRQ. */ CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR); } if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) { device_printf(sc_if->msk_if_dev, "TCP segmentation error\n"); /* Clear IRQ. */ CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP); } } static void msk_intr_hwerr(struct msk_softc *sc) { uint32_t status; uint32_t tlphead[4]; status = CSR_READ_4(sc, B0_HWE_ISRC); /* Time Stamp timer overflow. */ if ((status & Y2_IS_TIST_OV) != 0) CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); if ((status & Y2_IS_PCI_NEXP) != 0) { /* * PCI Express Error occured which is not described in PEX * spec. * This error is also mapped either to Master Abort( * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and * can only be cleared there. */ device_printf(sc->msk_dev, "PCI Express protocol violation error\n"); } if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) { uint16_t v16; if ((status & Y2_IS_MST_ERR) != 0) device_printf(sc->msk_dev, "unexpected IRQ Status error\n"); else device_printf(sc->msk_dev, "unexpected IRQ Master error\n"); /* Reset all bits in the PCI status register. */ v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); pci_write_config(sc->msk_dev, PCIR_STATUS, v16 | PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2); CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); } /* Check for PCI Express Uncorrectable Error. */ if ((status & Y2_IS_PCI_EXP) != 0) { uint32_t v32; /* * On PCI Express bus bridges are called root complexes (RC). * PCI Express errors are recognized by the root complex too, * which requests the system to handle the problem. After * error occurrence it may be that no access to the adapter * may be performed any longer. */ v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); if ((v32 & PEX_UNSUP_REQ) != 0) { /* Ignore unsupported request error. */ device_printf(sc->msk_dev, "Uncorrectable PCI Express error\n"); } if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) { int i; /* Get TLP header form Log Registers. */ for (i = 0; i < 4; i++) tlphead[i] = CSR_PCI_READ_4(sc, PEX_HEADER_LOG + i * 4); /* Check for vendor defined broadcast message. */ if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) { sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); CSR_READ_4(sc, B0_HWE_IMSK); } } /* Clear the interrupt. */ CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); } if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL) msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status); if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL) msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8); } static __inline void msk_rxput(struct msk_if_softc *sc_if) { struct msk_softc *sc; sc = sc_if->msk_softc; if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) bus_dmamap_sync( sc_if->msk_cdata.msk_jumbo_rx_ring_tag, sc_if->msk_cdata.msk_jumbo_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); else bus_dmamap_sync( sc_if->msk_cdata.msk_rx_ring_tag, sc_if->msk_cdata.msk_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod); } static int msk_handle_events(struct msk_softc *sc) { struct msk_if_softc *sc_if; int rxput[2]; struct msk_stat_desc *sd; uint32_t control, status; int cons, len, port, rxprog; if (sc->msk_stat_cons == CSR_READ_2(sc, STAT_PUT_IDX)) return (0); /* Sync status LEs. */ bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0; rxprog = 0; cons = sc->msk_stat_cons; for (;;) { sd = &sc->msk_stat_ring[cons]; control = le32toh(sd->msk_control); if ((control & HW_OWNER) == 0) break; control &= ~HW_OWNER; sd->msk_control = htole32(control); status = le32toh(sd->msk_status); len = control & STLE_LEN_MASK; port = (control >> 16) & 0x01; sc_if = sc->msk_if[port]; if (sc_if == NULL) { device_printf(sc->msk_dev, "invalid port opcode " "0x%08x\n", control & STLE_OP_MASK); continue; } switch (control & STLE_OP_MASK) { case OP_RXVLAN: sc_if->msk_vtag = ntohs(len); break; case OP_RXCHKSVLAN: sc_if->msk_vtag = ntohs(len); /* FALLTHROUGH */ case OP_RXCHKS: sc_if->msk_csum = status; break; case OP_RXSTAT: if (!(sc_if->msk_flags & MSK_FLAG_RUNNING)) break; if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) msk_jumbo_rxeof(sc_if, status, control, len); else msk_rxeof(sc_if, status, control, len); rxprog++; /* * Because there is no way to sync single Rx LE * put the DMA sync operation off until the end of * event processing. */ rxput[port]++; /* Update prefetch unit if we've passed water mark. */ if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) { msk_rxput(sc_if); rxput[port] = 0; } break; case OP_TXINDEXLE: if (sc->msk_if[MSK_PORT_A] != NULL) msk_txeof(sc->msk_if[MSK_PORT_A], status & STLE_TXA1_MSKL); if (sc->msk_if[MSK_PORT_B] != NULL) msk_txeof(sc->msk_if[MSK_PORT_B], ((status & STLE_TXA2_MSKL) >> STLE_TXA2_SHIFTL) | ((len & STLE_TXA2_MSKH) << STLE_TXA2_SHIFTH)); break; default: device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n", control & STLE_OP_MASK); break; } MSK_INC(cons, sc->msk_stat_count); if (rxprog > sc->msk_process_limit) break; } sc->msk_stat_cons = cons; bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (rxput[MSK_PORT_A] > 0) msk_rxput(sc->msk_if[MSK_PORT_A]); if (rxput[MSK_PORT_B] > 0) msk_rxput(sc->msk_if[MSK_PORT_B]); return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX)); } static void msk_intr(void *xsc) { struct msk_softc *sc; struct msk_if_softc *sc_if0, *sc_if1; uint32_t status; int domore; sc = xsc; MSK_LOCK(sc); /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */ status = CSR_READ_4(sc, B0_Y2_SP_ISRC2); if (status == 0 || status == 0xffffffff || (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 || (status & sc->msk_intrmask) == 0) { CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); MSK_UNLOCK(sc); return; } sc_if0 = sc->msk_if[MSK_PORT_A]; sc_if1 = sc->msk_if[MSK_PORT_B]; if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) msk_intr_phy(sc_if0); if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) msk_intr_phy(sc_if1); if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) msk_intr_gmac(sc_if0); if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) msk_intr_gmac(sc_if1); if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { device_printf(sc->msk_dev, "Rx descriptor error\n"); sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); CSR_READ_4(sc, B0_IMSK); } if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { device_printf(sc->msk_dev, "Tx descriptor error\n"); sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); CSR_READ_4(sc, B0_IMSK); } if ((status & Y2_IS_HW_ERR) != 0) msk_intr_hwerr(sc); domore = msk_handle_events(sc); if ((status & Y2_IS_STAT_BMU) != 0 && domore == 0) CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); /* Reenable interrupts. */ CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); if (sc_if0 != NULL && (sc_if0->msk_flags & MSK_FLAG_RUNNING) != 0) msk_start(sc_if0); if (sc_if1 != NULL && (sc_if1->msk_flags & MSK_FLAG_RUNNING) != 0) msk_start(sc_if1); MSK_UNLOCK(sc); } static void msk_set_tx_stfwd(struct msk_if_softc *sc_if) { struct msk_softc *sc; if_t ifp; ifp = sc_if->msk_ifp; sc = sc_if->msk_softc; if ((sc->msk_hw_id == CHIP_ID_YUKON_EX && sc->msk_hw_rev != CHIP_REV_YU_EX_A0) || sc->msk_hw_id >= CHIP_ID_YUKON_SUPR) { CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), TX_STFW_ENA); } else { - if (if_get(ifp, IF_MTU) > ETHERMTU) { + if (sc_if->msk_framesize > MSK_DEFAULT_FRAMESIZE) { /* Set Tx GMAC FIFO Almost Empty Threshold. */ CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR), MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR); /* Disable Store & Forward mode for Tx. */ CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), TX_STFW_DIS); } else { CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), TX_STFW_ENA); } } } static void msk_init(void *xsc) { struct msk_if_softc *sc_if = xsc; MSK_IF_LOCK(sc_if); msk_init_locked(sc_if); MSK_IF_UNLOCK(sc_if); } static void msk_init_locked(struct msk_if_softc *sc_if) { struct msk_softc *sc; if_t ifp; struct mii_data *mii; uint8_t *eaddr; uint16_t gmac; uint32_t reg; int error; MSK_IF_LOCK_ASSERT(sc_if); ifp = sc_if->msk_ifp; sc = sc_if->msk_softc; mii = device_get_softc(sc_if->msk_miibus); if ((sc_if->msk_flags & MSK_FLAG_RUNNING) != 0) return; error = 0; /* Cancel pending I/O and free all Rx/Tx buffers. */ msk_stop(sc_if); - if (if_get(ifp, IF_MTU) < ETHERMTU) - sc_if->msk_framesize = ETHERMTU; - else - sc_if->msk_framesize = if_get(ifp, IF_MTU); - sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; - if (if_get(ifp, IF_MTU) > ETHERMTU && - (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) { - if_clrflags(ifp, IF_HWASSIST, MSK_CSUM_FEATURES | CSUM_TSO); - if_clrflags(ifp, IF_CAPENABLE, IFCAP_TSO4 | IFCAP_TXCSUM); - } - /* GMAC Control reset. */ CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET); CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR); CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF); if (sc->msk_hw_id == CHIP_ID_YUKON_EX || sc->msk_hw_id == CHIP_ID_YUKON_SUPR) CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | GMC_BYP_RETR_ON); /* * Initialize GMAC first such that speed/duplex/flow-control * parameters are renegotiated when interface is brought up. */ GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0); /* Dummy read the Interrupt Source Register. */ CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); /* Clear MIB stats. */ msk_stats_clear(sc_if); /* Disable FCS. */ GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS); /* Setup Transmit Control Register. */ GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); /* Setup Transmit Flow Control Register. */ GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff); /* Setup Transmit Parameter Register. */ GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM, TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); - if (if_get(ifp, IF_MTU) > ETHERMTU) + if (sc_if->msk_framesize > MSK_DEFAULT_FRAMESIZE) gmac |= GM_SMOD_JUMBO_ENA; GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac); /* Set station address. */ eaddr = if_lladdr(ifp); GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L, eaddr[0] | (eaddr[1] << 8)); GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1M, eaddr[2] | (eaddr[3] << 8)); GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1H, eaddr[4] | (eaddr[5] << 8)); GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L, eaddr[0] | (eaddr[1] << 8)); GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2M, eaddr[2] | (eaddr[3] << 8)); GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2H, eaddr[4] | (eaddr[5] << 8)); /* Disable interrupts for counter overflows. */ GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0); GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0); GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0); /* Configure Rx MAC FIFO. */ CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR); reg = GMF_OPER_ON | GMF_RX_F_FL_ON; if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P || sc->msk_hw_id == CHIP_ID_YUKON_EX) reg |= GMF_RX_OVER_ON; CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg); /* Set receive filter. */ msk_rxfilter(sc_if); if (sc->msk_hw_id == CHIP_ID_YUKON_XL) { /* Clear flush mask - HW bug. */ CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0); } else { /* Flush Rx MAC FIFO on any flow control or error. */ CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); } /* * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word * due to hardware hang on receipt of pause frames. */ reg = RX_GMF_FL_THR_DEF + 1; /* Another magic for Yukon FE+ - From Linux. */ if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P && sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) reg = 0x178; CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg); /* Configure Tx MAC FIFO. */ CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR); CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON); /* Configure hardware VLAN tag insertion/stripping. */ - msk_setvlan(sc_if, ifp); + msk_setvlan(sc_if); if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) { /* Set Rx Pause threshold. */ CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR), MSK_ECU_LLPP); CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR), MSK_ECU_ULPP); /* Configure store-and-forward for Tx. */ msk_set_tx_stfwd(sc_if); } if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P && sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) { /* Disable dynamic watermark - from Linux. */ reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA)); reg &= ~0x03; CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg); } /* * Disable Force Sync bit and Alloc bit in Tx RAM interface * arbiter as we don't use Sync Tx queue. */ CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); /* Enable the RAM Interface Arbiter. */ CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB); /* Setup RAM buffer. */ msk_set_rambuffer(sc_if); /* Disable Tx sync Queue. */ CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET); /* Setup Tx Queue Bus Memory Interface. */ CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET); CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT); CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON); CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM); switch (sc->msk_hw_id) { case CHIP_ID_YUKON_EC_U: if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { /* Fix for Yukon-EC Ultra: set BMU FIFO level */ CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV); } break; case CHIP_ID_YUKON_EX: /* * Yukon Extreme seems to have silicon bug for * automatic Tx checksum calculation capability. */ if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F), F_TX_CHK_AUTO_OFF); break; } /* Setup Rx Queue Bus Memory Interface. */ CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET); CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT); CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON); CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM); if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) { /* MAC Rx RAM Read is controlled by hardware. */ CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS); } msk_set_prefetch(sc, sc_if->msk_txq, sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1); msk_init_tx_ring(sc_if); /* Disable Rx checksum offload and RSS hash. */ reg = BMU_DIS_RX_RSS_HASH; if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 && - (if_get(ifp, IF_CAPENABLE) & IFCAP_RXCSUM) != 0) + (sc_if->msk_capenable & IFCAP_RXCSUM) != 0) reg |= BMU_ENA_RX_CHKSUM; else reg |= BMU_DIS_RX_CHKSUM; CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), reg); if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) { msk_set_prefetch(sc, sc_if->msk_rxq, sc_if->msk_rdata.msk_jumbo_rx_ring_paddr, MSK_JUMBO_RX_RING_CNT - 1); error = msk_init_jumbo_rx_ring(sc_if); } else { msk_set_prefetch(sc, sc_if->msk_rxq, sc_if->msk_rdata.msk_rx_ring_paddr, MSK_RX_RING_CNT - 1); error = msk_init_rx_ring(sc_if); } if (error != 0) { device_printf(sc_if->msk_if_dev, "initialization failed: no memory for Rx buffers\n"); msk_stop(sc_if); return; } if (sc->msk_hw_id == CHIP_ID_YUKON_EX || sc->msk_hw_id == CHIP_ID_YUKON_SUPR) { /* Disable flushing of non-ASF packets. */ CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RX_MACSEC_FLUSH_OFF); } /* Configure interrupt handling. */ if (sc_if->msk_port == MSK_PORT_A) { sc->msk_intrmask |= Y2_IS_PORT_A; sc->msk_intrhwemask |= Y2_HWE_L1_MASK; } else { sc->msk_intrmask |= Y2_IS_PORT_B; sc->msk_intrhwemask |= Y2_HWE_L2_MASK; } /* Configure IRQ moderation mask. */ CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask); if (sc->msk_int_holdoff > 0) { /* Configure initial IRQ moderation timer value. */ CSR_WRITE_4(sc, B2_IRQM_INI, MSK_USECS(sc, sc->msk_int_holdoff)); CSR_WRITE_4(sc, B2_IRQM_VAL, MSK_USECS(sc, sc->msk_int_holdoff)); /* Start IRQ moderation. */ CSR_WRITE_1(sc, B2_IRQM_CTRL, TIM_START); } CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); CSR_READ_4(sc, B0_HWE_IMSK); CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); CSR_READ_4(sc, B0_IMSK); sc_if->msk_flags |= MSK_FLAG_RUNNING; sc_if->msk_flags &= ~MSK_FLAG_LINK; mii_mediachg(mii); callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); } static void msk_set_rambuffer(struct msk_if_softc *sc_if) { struct msk_softc *sc; int ltpp, utpp; sc = sc_if->msk_softc; if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) return; /* Setup Rx Queue. */ CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR); CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START), sc->msk_rxqstart[sc_if->msk_port] / 8); CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END), sc->msk_rxqend[sc_if->msk_port] / 8); CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP), sc->msk_rxqstart[sc_if->msk_port] / 8); CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP), sc->msk_rxqstart[sc_if->msk_port] / 8); utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8; ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8; if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE) ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8; CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp); CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp); /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */ CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD); CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL)); /* Setup Tx Queue. */ CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR); CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START), sc->msk_txqstart[sc_if->msk_port] / 8); CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END), sc->msk_txqend[sc_if->msk_port] / 8); CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP), sc->msk_txqstart[sc_if->msk_port] / 8); CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP), sc->msk_txqstart[sc_if->msk_port] / 8); /* Enable Store & Forward for Tx side. */ CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD); CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD); CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL)); } static void msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr, uint32_t count) { /* Reset the prefetch unit. */ CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), PREF_UNIT_RST_SET); CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), PREF_UNIT_RST_CLR); /* Set LE base address. */ CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG), MSK_ADDR_LO(addr)); CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG), MSK_ADDR_HI(addr)); /* Set the list last index. */ CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG), count); /* Turn on prefetch unit. */ CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), PREF_UNIT_OP_ON); /* Dummy read to ensure write. */ CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG)); } static void msk_stop(struct msk_if_softc *sc_if) { struct msk_softc *sc; struct msk_txdesc *txd; struct msk_rxdesc *rxd; struct msk_rxdesc *jrxd; if_t ifp; uint32_t val; int i; MSK_IF_LOCK_ASSERT(sc_if); sc = sc_if->msk_softc; ifp = sc_if->msk_ifp; callout_stop(&sc_if->msk_tick_ch); sc_if->msk_watchdog_timer = 0; /* Disable interrupts. */ if (sc_if->msk_port == MSK_PORT_A) { sc->msk_intrmask &= ~Y2_IS_PORT_A; sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK; } else { sc->msk_intrmask &= ~Y2_IS_PORT_B; sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK; } CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); CSR_READ_4(sc, B0_HWE_IMSK); CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); CSR_READ_4(sc, B0_IMSK); /* Disable Tx/Rx MAC. */ val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val); /* Read again to ensure writing. */ GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); /* Update stats and clear counters. */ msk_stats_update(sc_if); /* Stop Tx BMU. */ CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP); val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); for (i = 0; i < MSK_TIMEOUT; i++) { if ((val & (BMU_STOP | BMU_IDLE)) == 0) { CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP); val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); } else break; DELAY(1); } if (i == MSK_TIMEOUT) device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n"); CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET | RB_DIS_OP_MD); /* Disable all GMAC interrupt. */ CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0); /* Disable PHY interrupt. */ msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); /* Disable the RAM Interface Arbiter. */ CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB); /* Reset the PCI FIFO of the async Tx queue */ CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST); /* Reset the Tx prefetch units. */ CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG), PREF_UNIT_RST_SET); /* Reset the RAM Buffer async Tx queue. */ CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET); /* Reset Tx MAC FIFO. */ CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); /* Set Pause Off. */ CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF); /* * The Rx Stop command will not work for Yukon-2 if the BMU does not * reach the end of packet and since we can't make sure that we have * incoming data, we must reset the BMU while it is not during a DMA * transfer. Since it is possible that the Rx path is still active, * the Rx RAM buffer will be stopped first, so any possible incoming * data will not trigger a DMA. After the RAM buffer is stopped, the * BMU is polled until any DMA in progress is ended and only then it * will be reset. */ /* Disable the RAM Buffer receive queue. */ CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD); for (i = 0; i < MSK_TIMEOUT; i++) { if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) == CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL))) break; DELAY(1); } if (i == MSK_TIMEOUT) device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n"); CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST); /* Reset the Rx prefetch unit. */ CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG), PREF_UNIT_RST_SET); /* Reset the RAM Buffer receive queue. */ CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET); /* Reset Rx MAC FIFO. */ CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); /* Free Rx and Tx mbufs still in the queues. */ for (i = 0; i < MSK_RX_RING_CNT; i++) { rxd = &sc_if->msk_cdata.msk_rxdesc[i]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap); m_freem(rxd->rx_m); rxd->rx_m = NULL; } } for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; if (jrxd->rx_m != NULL) { bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, jrxd->rx_dmamap); m_freem(jrxd->rx_m); jrxd->rx_m = NULL; } } for (i = 0; i < MSK_TX_RING_CNT; i++) { txd = &sc_if->msk_cdata.msk_txdesc[i]; if (txd->tx_m != NULL) { bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; } } /* * Mark the interface down. */ sc_if->msk_flags &= ~MSK_FLAG_RUNNING; sc_if->msk_flags &= ~MSK_FLAG_LINK; } /* * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower * counter clears high 16 bits of the counter such that accessing * lower 16 bits should be the last operation. */ #define MSK_READ_MIB32(x, y) \ (((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) + \ (uint32_t)GMAC_READ_2(sc, x, y) #define MSK_READ_MIB64(x, y) \ (((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) + \ (uint64_t)MSK_READ_MIB32(x, y) static void msk_stats_clear(struct msk_if_softc *sc_if) { struct msk_softc *sc; uint32_t reg; uint16_t gmac; int i; MSK_IF_LOCK_ASSERT(sc_if); sc = sc_if->msk_softc; /* Set MIB Clear Counter Mode. */ gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); /* Read all MIB Counters with Clear Mode set. */ for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += sizeof(uint32_t)) reg = MSK_READ_MIB32(sc_if->msk_port, i); /* Clear MIB Clear Counter Mode. */ gmac &= ~GM_PAR_MIB_CLR; GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); } static void msk_stats_update(struct msk_if_softc *sc_if) { struct msk_softc *sc; if_t ifp; struct msk_hw_stats *stats; uint16_t gmac; uint32_t reg; MSK_IF_LOCK_ASSERT(sc_if); ifp = sc_if->msk_ifp; if ((sc_if->msk_flags & MSK_FLAG_RUNNING) == 0) return; sc = sc_if->msk_softc; stats = &sc_if->msk_stats; /* Set MIB Clear Counter Mode. */ gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); /* Rx stats. */ stats->rx_ucast_frames += MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK); stats->rx_bcast_frames += MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK); stats->rx_pause_frames += MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE); stats->rx_mcast_frames += MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK); stats->rx_crc_errs += MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR); reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1); stats->rx_good_octets += MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO); stats->rx_bad_octets += MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO); stats->rx_runts += MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT); stats->rx_runt_errs += MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG); stats->rx_pkts_64 += MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B); stats->rx_pkts_65_127 += MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B); stats->rx_pkts_128_255 += MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B); stats->rx_pkts_256_511 += MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B); stats->rx_pkts_512_1023 += MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B); stats->rx_pkts_1024_1518 += MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B); stats->rx_pkts_1519_max += MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ); stats->rx_pkts_too_long += MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR); stats->rx_pkts_jabbers += MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT); reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2); stats->rx_fifo_oflows += MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV); reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3); /* Tx stats. */ stats->tx_ucast_frames += MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK); stats->tx_bcast_frames += MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK); stats->tx_pause_frames += MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE); stats->tx_mcast_frames += MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK); stats->tx_octets += MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO); stats->tx_pkts_64 += MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B); stats->tx_pkts_65_127 += MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B); stats->tx_pkts_128_255 += MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B); stats->tx_pkts_256_511 += MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B); stats->tx_pkts_512_1023 += MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B); stats->tx_pkts_1024_1518 += MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B); stats->tx_pkts_1519_max += MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ); reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1); stats->tx_colls += MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL); stats->tx_late_colls += MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL); stats->tx_excess_colls += MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL); stats->tx_multi_colls += MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL); stats->tx_single_colls += MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL); stats->tx_underflows += MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR); /* Clear MIB Clear Counter Mode. */ gmac &= ~GM_PAR_MIB_CLR; GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); } static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS) { struct msk_softc *sc; struct msk_if_softc *sc_if; uint32_t result, *stat; int off; sc_if = (struct msk_if_softc *)arg1; sc = sc_if->msk_softc; off = arg2; stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off); MSK_IF_LOCK(sc_if); result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2); result += *stat; MSK_IF_UNLOCK(sc_if); return (sysctl_handle_int(oidp, &result, 0, req)); } static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS) { struct msk_softc *sc; struct msk_if_softc *sc_if; uint64_t result, *stat; int off; sc_if = (struct msk_if_softc *)arg1; sc = sc_if->msk_softc; off = arg2; stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off); MSK_IF_LOCK(sc_if); result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2); result += *stat; MSK_IF_UNLOCK(sc_if); return (sysctl_handle_64(oidp, &result, 0, req)); } #undef MSK_READ_MIB32 #undef MSK_READ_MIB64 #define MSK_SYSCTL_STAT32(sc, c, o, p, n, d) \ SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \ sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32, \ "IU", d) #define MSK_SYSCTL_STAT64(sc, c, o, p, n, d) \ SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_U64 | CTLFLAG_RD, \ sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64, \ "QU", d) static void msk_sysctl_node(struct msk_if_softc *sc_if) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child, *schild; struct sysctl_oid *tree; ctx = device_get_sysctl_ctx(sc_if->msk_if_dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev)); tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, NULL, "MSK Statistics"); schild = child = SYSCTL_CHILDREN(tree); tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD, NULL, "MSK RX Statistics"); child = SYSCTL_CHILDREN(tree); MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames", child, rx_ucast_frames, "Good unicast frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames", child, rx_bcast_frames, "Good broadcast frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames", child, rx_pause_frames, "Pause frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames", child, rx_mcast_frames, "Multicast frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs", child, rx_crc_errs, "CRC errors"); MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets", child, rx_good_octets, "Good octets"); MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets", child, rx_bad_octets, "Bad octets"); MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64", child, rx_pkts_64, "64 bytes frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127", child, rx_pkts_65_127, "65 to 127 bytes frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255", child, rx_pkts_128_255, "128 to 255 bytes frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511", child, rx_pkts_256_511, "256 to 511 bytes frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023", child, rx_pkts_512_1023, "512 to 1023 bytes frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518", child, rx_pkts_1024_1518, "1024 to 1518 bytes frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max", child, rx_pkts_1519_max, "1519 to max frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long", child, rx_pkts_too_long, "frames too long"); MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers", child, rx_pkts_jabbers, "Jabber errors"); MSK_SYSCTL_STAT32(sc_if, ctx, "overflows", child, rx_fifo_oflows, "FIFO overflows"); tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD, NULL, "MSK TX Statistics"); child = SYSCTL_CHILDREN(tree); MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames", child, tx_ucast_frames, "Unicast frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames", child, tx_bcast_frames, "Broadcast frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames", child, tx_pause_frames, "Pause frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames", child, tx_mcast_frames, "Multicast frames"); MSK_SYSCTL_STAT64(sc_if, ctx, "octets", child, tx_octets, "Octets"); MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64", child, tx_pkts_64, "64 bytes frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127", child, tx_pkts_65_127, "65 to 127 bytes frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255", child, tx_pkts_128_255, "128 to 255 bytes frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511", child, tx_pkts_256_511, "256 to 511 bytes frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023", child, tx_pkts_512_1023, "512 to 1023 bytes frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518", child, tx_pkts_1024_1518, "1024 to 1518 bytes frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max", child, tx_pkts_1519_max, "1519 to max frames"); MSK_SYSCTL_STAT32(sc_if, ctx, "colls", child, tx_colls, "Collisions"); MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls", child, tx_late_colls, "Late collisions"); MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls", child, tx_excess_colls, "Excessive collisions"); MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls", child, tx_multi_colls, "Multiple collisions"); MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls", child, tx_single_colls, "Single collisions"); MSK_SYSCTL_STAT32(sc_if, ctx, "underflows", child, tx_underflows, "FIFO underflows"); } #undef MSK_SYSCTL_STAT32 #undef MSK_SYSCTL_STAT64 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; if (!arg1) return (EINVAL); value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN, MSK_PROC_MAX)); } Index: projects/ifnet/sys/dev/msk/if_mskreg.h =================================================================== --- projects/ifnet/sys/dev/msk/if_mskreg.h (revision 277242) +++ projects/ifnet/sys/dev/msk/if_mskreg.h (revision 277243) @@ -1,2598 +1,2599 @@ /****************************************************************************** * * Name: skgehw.h * Project: Gigabit Ethernet Adapters, Common Modules * Version: $Revision: 2.49 $ * Date: $Date: 2005/01/20 13:01:35 $ * Purpose: Defines and Macros for the Gigabit Ethernet Adapter Product Family * ******************************************************************************/ /****************************************************************************** * * LICENSE: * Copyright (C) Marvell International Ltd. and/or its affiliates * * The computer program files contained in this folder ("Files") * are provided to you under the BSD-type license terms provided * below, and any use of such Files and any derivative works * thereof created by you shall be governed by the following terms * and conditions: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * - Neither the name of Marvell nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * /LICENSE * ******************************************************************************/ /*- * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 2003 Nathan L. Binkert * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /*$FreeBSD$*/ /* * SysKonnect PCI vendor ID */ #define VENDORID_SK 0x1148 /* * Marvell PCI vendor ID */ #define VENDORID_MARVELL 0x11AB /* * D-Link PCI vendor ID */ #define VENDORID_DLINK 0x1186 /* * SysKonnect ethernet device IDs */ #define DEVICEID_SK_YUKON2 0x9000 #define DEVICEID_SK_YUKON2_EXPR 0x9e00 /* * Marvell gigabit ethernet device IDs */ #define DEVICEID_MRVL_8021CU 0x4340 #define DEVICEID_MRVL_8022CU 0x4341 #define DEVICEID_MRVL_8061CU 0x4342 #define DEVICEID_MRVL_8062CU 0x4343 #define DEVICEID_MRVL_8021X 0x4344 #define DEVICEID_MRVL_8022X 0x4345 #define DEVICEID_MRVL_8061X 0x4346 #define DEVICEID_MRVL_8062X 0x4347 #define DEVICEID_MRVL_8035 0x4350 #define DEVICEID_MRVL_8036 0x4351 #define DEVICEID_MRVL_8038 0x4352 #define DEVICEID_MRVL_8039 0x4353 #define DEVICEID_MRVL_8040 0x4354 #define DEVICEID_MRVL_8040T 0x4355 #define DEVICEID_MRVL_8042 0x4357 #define DEVICEID_MRVL_8048 0x435A #define DEVICEID_MRVL_4360 0x4360 #define DEVICEID_MRVL_4361 0x4361 #define DEVICEID_MRVL_4362 0x4362 #define DEVICEID_MRVL_4363 0x4363 #define DEVICEID_MRVL_4364 0x4364 #define DEVICEID_MRVL_4365 0x4365 #define DEVICEID_MRVL_436A 0x436A #define DEVICEID_MRVL_436B 0x436B #define DEVICEID_MRVL_436C 0x436C #define DEVICEID_MRVL_436D 0x436D #define DEVICEID_MRVL_4370 0x4370 #define DEVICEID_MRVL_4380 0x4380 #define DEVICEID_MRVL_4381 0x4381 /* * D-Link gigabit ethernet device ID */ #define DEVICEID_DLINK_DGE550SX 0x4001 #define DEVICEID_DLINK_DGE560SX 0x4002 #define DEVICEID_DLINK_DGE560T 0x4b00 #define BIT_31 (1U << 31) #define BIT_30 (1 << 30) #define BIT_29 (1 << 29) #define BIT_28 (1 << 28) #define BIT_27 (1 << 27) #define BIT_26 (1 << 26) #define BIT_25 (1 << 25) #define BIT_24 (1 << 24) #define BIT_23 (1 << 23) #define BIT_22 (1 << 22) #define BIT_21 (1 << 21) #define BIT_20 (1 << 20) #define BIT_19 (1 << 19) #define BIT_18 (1 << 18) #define BIT_17 (1 << 17) #define BIT_16 (1 << 16) #define BIT_15 (1 << 15) #define BIT_14 (1 << 14) #define BIT_13 (1 << 13) #define BIT_12 (1 << 12) #define BIT_11 (1 << 11) #define BIT_10 (1 << 10) #define BIT_9 (1 << 9) #define BIT_8 (1 << 8) #define BIT_7 (1 << 7) #define BIT_6 (1 << 6) #define BIT_5 (1 << 5) #define BIT_4 (1 << 4) #define BIT_3 (1 << 3) #define BIT_2 (1 << 2) #define BIT_1 (1 << 1) #define BIT_0 (1 << 0) #define SHIFT31(x) ((x) << 31) #define SHIFT30(x) ((x) << 30) #define SHIFT29(x) ((x) << 29) #define SHIFT28(x) ((x) << 28) #define SHIFT27(x) ((x) << 27) #define SHIFT26(x) ((x) << 26) #define SHIFT25(x) ((x) << 25) #define SHIFT24(x) ((x) << 24) #define SHIFT23(x) ((x) << 23) #define SHIFT22(x) ((x) << 22) #define SHIFT21(x) ((x) << 21) #define SHIFT20(x) ((x) << 20) #define SHIFT19(x) ((x) << 19) #define SHIFT18(x) ((x) << 18) #define SHIFT17(x) ((x) << 17) #define SHIFT16(x) ((x) << 16) #define SHIFT15(x) ((x) << 15) #define SHIFT14(x) ((x) << 14) #define SHIFT13(x) ((x) << 13) #define SHIFT12(x) ((x) << 12) #define SHIFT11(x) ((x) << 11) #define SHIFT10(x) ((x) << 10) #define SHIFT9(x) ((x) << 9) #define SHIFT8(x) ((x) << 8) #define SHIFT7(x) ((x) << 7) #define SHIFT6(x) ((x) << 6) #define SHIFT5(x) ((x) << 5) #define SHIFT4(x) ((x) << 4) #define SHIFT3(x) ((x) << 3) #define SHIFT2(x) ((x) << 2) #define SHIFT1(x) ((x) << 1) #define SHIFT0(x) ((x) << 0) /* * PCI Configuration Space header */ #define PCI_BASE_1ST 0x10 /* 32 bit 1st Base address */ #define PCI_BASE_2ND 0x14 /* 32 bit 2nd Base address */ #define PCI_OUR_REG_1 0x40 /* 32 bit Our Register 1 */ #define PCI_OUR_REG_2 0x44 /* 32 bit Our Register 2 */ #define PCI_OUR_STATUS 0x7c /* 32 bit Adapter Status Register */ #define PCI_OUR_REG_3 0x80 /* 32 bit Our Register 3 */ #define PCI_OUR_REG_4 0x84 /* 32 bit Our Register 4 */ #define PCI_OUR_REG_5 0x88 /* 32 bit Our Register 5 */ #define PCI_CFG_REG_0 0x90 /* 32 bit Config Register 0 */ #define PCI_CFG_REG_1 0x94 /* 32 bit Config Register 1 */ /* PCI Express Capability */ #define PEX_CAP_ID 0xe0 /* 8 bit PEX Capability ID */ #define PEX_NITEM 0xe1 /* 8 bit PEX Next Item Pointer */ #define PEX_CAP_REG 0xe2 /* 16 bit PEX Capability Register */ #define PEX_DEV_CAP 0xe4 /* 32 bit PEX Device Capabilities */ #define PEX_DEV_CTRL 0xe8 /* 16 bit PEX Device Control */ #define PEX_DEV_STAT 0xea /* 16 bit PEX Device Status */ #define PEX_LNK_CAP 0xec /* 32 bit PEX Link Capabilities */ #define PEX_LNK_CTRL 0xf0 /* 16 bit PEX Link Control */ #define PEX_LNK_STAT 0xf2 /* 16 bit PEX Link Status */ /* PCI Express Extended Capabilities */ #define PEX_ADV_ERR_REP 0x100 /* 32 bit PEX Advanced Error Reporting */ #define PEX_UNC_ERR_STAT 0x104 /* 32 bit PEX Uncorr. Errors Status */ #define PEX_UNC_ERR_MASK 0x108 /* 32 bit PEX Uncorr. Errors Mask */ #define PEX_UNC_ERR_SEV 0x10c /* 32 bit PEX Uncorr. Errors Severity */ #define PEX_COR_ERR_STAT 0x110 /* 32 bit PEX Correc. Errors Status */ #define PEX_COR_ERR_MASK 0x114 /* 32 bit PEX Correc. Errors Mask */ #define PEX_ADV_ERR_CAP_C 0x118 /* 32 bit PEX Advanced Error Cap./Ctrl */ #define PEX_HEADER_LOG 0x11c /* 4x32 bit PEX Header Log Register */ /* PCI_OUR_REG_1 32 bit Our Register 1 */ #define PCI_Y2_PIG_ENA BIT_31 /* Enable Plug-in-Go (YUKON-2) */ #define PCI_Y2_DLL_DIS BIT_30 /* Disable PCI DLL (YUKON-2) */ #define PCI_Y2_PHY2_COMA BIT_29 /* Set PHY 2 to Coma Mode (YUKON-2) */ #define PCI_Y2_PHY1_COMA BIT_28 /* Set PHY 1 to Coma Mode (YUKON-2) */ #define PCI_Y2_PHY2_POWD BIT_27 /* Set PHY 2 to Power Down (YUKON-2) */ #define PCI_Y2_PHY1_POWD BIT_26 /* Set PHY 1 to Power Down (YUKON-2) */ #define PCI_DIS_BOOT BIT_24 /* Disable BOOT via ROM */ #define PCI_EN_IO BIT_23 /* Mapping to I/O space */ #define PCI_EN_FPROM BIT_22 /* Enable FLASH mapping to memory */ /* 1 = Map Flash to memory */ /* 0 = Disable addr. dec */ #define PCI_PAGESIZE (3L<<20)/* Bit 21..20: FLASH Page Size */ #define PCI_PAGE_16 (0L<<20)/* 16 k pages */ #define PCI_PAGE_32K (1L<<20)/* 32 k pages */ #define PCI_PAGE_64K (2L<<20)/* 64 k pages */ #define PCI_PAGE_128K (3L<<20)/* 128 k pages */ #define PCI_PAGEREG (7L<<16)/* Bit 18..16: Page Register */ #define PCI_PEX_LEGNAT BIT_15 /* PEX PM legacy/native mode (YUKON-2) */ #define PCI_FORCE_BE BIT_14 /* Assert all BEs on MR */ #define PCI_DIS_MRL BIT_13 /* Disable Mem Read Line */ #define PCI_DIS_MRM BIT_12 /* Disable Mem Read Multiple */ #define PCI_DIS_MWI BIT_11 /* Disable Mem Write & Invalidate */ #define PCI_DISC_CLS BIT_10 /* Disc: cacheLsz bound */ #define PCI_BURST_DIS BIT_9 /* Burst Disable */ #define PCI_DIS_PCI_CLK BIT_8 /* Disable PCI clock driving */ #define PCI_SKEW_DAS (0xfL<<4)/* Bit 7.. 4: Skew Ctrl, DAS Ext */ #define PCI_SKEW_BASE 0xfL /* Bit 3.. 0: Skew Ctrl, Base */ #define PCI_CLS_OPT BIT_3 /* Cache Line Size opt. PCI-X (YUKON-2) */ /* PCI_OUR_REG_2 32 bit Our Register 2 */ #define PCI_VPD_WR_THR (0xff<<24) /* Bit 31..24: VPD Write Threshold */ #define PCI_DEV_SEL (0x7f<<17) /* Bit 23..17: EEPROM Device Select */ #define PCI_VPD_ROM_SZ (0x07<<14) /* Bit 16..14: VPD ROM Size */ /* Bit 13..12: reserved */ #define PCI_PATCH_DIR (0x0f<<8) /* Bit 11.. 8: Ext Patches dir 3..0 */ #define PCI_PATCH_DIR_3 BIT_11 #define PCI_PATCH_DIR_2 BIT_10 #define PCI_PATCH_DIR_1 BIT_9 #define PCI_PATCH_DIR_0 BIT_8 #define PCI_EXT_PATCHS (0x0f<<4) /* Bit 7.. 4: Extended Patches 3..0 */ #define PCI_EXT_PATCH_3 BIT_7 #define PCI_EXT_PATCH_2 BIT_6 #define PCI_EXT_PATCH_1 BIT_5 #define PCI_EXT_PATCH_0 BIT_4 #define PCI_EN_DUMMY_RD BIT_3 /* Enable Dummy Read */ #define PCI_REV_DESC BIT_2 /* Reverse Desc. Bytes */ #define PCI_USEDATA64 BIT_0 /* Use 64Bit Data bus ext */ /* PCI_OUR_STATUS 32 bit Adapter Status Register (Yukon-2) */ #define PCI_OS_PCI64B BIT_31 /* Conventional PCI 64 bits Bus */ #define PCI_OS_PCIX BIT_30 /* PCI-X Bus */ #define PCI_OS_MODE_MSK (3<<28) /* Bit 29..28: PCI-X Bus Mode Mask */ #define PCI_OS_PCI66M BIT_27 /* PCI 66 MHz Bus */ #define PCI_OS_PCI_X BIT_26 /* PCI/PCI-X Bus (0 = PEX) */ #define PCI_OS_DLLE_MSK (3<<24) /* Bit 25..24: DLL Status Indication */ #define PCI_OS_DLLR_MSK (0x0f<<20) /* Bit 23..20: DLL Row Counters Values */ #define PCI_OS_DLLC_MSK (0x0f<<16) /* Bit 19..16: DLL Col. Counters Values */ #define PCI_OS_SPEED(val) ((val & PCI_OS_MODE_MSK) >> 28) /* PCI-X Speed */ /* possible values for the speed field of the register */ #define PCI_OS_SPD_PCI 0 /* PCI Conventional Bus */ #define PCI_OS_SPD_X66 1 /* PCI-X 66MHz Bus */ #define PCI_OS_SPD_X100 2 /* PCI-X 100MHz Bus */ #define PCI_OS_SPD_X133 3 /* PCI-X 133MHz Bus */ /* PCI_OUR_REG_3 32 bit Our Register 3 (Yukon-ECU only) */ #define PCI_CLK_MACSEC_DIS BIT_17 /* Disable Clock MACSec. */ /* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */ #define PCI_TIMER_VALUE_MSK (0xff<<16) /* Bit 23..16: Timer Value Mask */ #define PCI_FORCE_ASPM_REQUEST BIT_15 /* Force ASPM Request (A1 only) */ #define PCI_ASPM_GPHY_LINK_DOWN BIT_14 /* GPHY Link Down (A1 only) */ #define PCI_ASPM_INT_FIFO_EMPTY BIT_13 /* Internal FIFO Empty (A1 only) */ #define PCI_ASPM_CLKRUN_REQUEST BIT_12 /* CLKRUN Request (A1 only) */ #define PCI_ASPM_FORCE_CLKREQ_ENA BIT_4 /* Force CLKREQ Enable (A1b only) */ #define PCI_ASPM_CLKREQ_PAD_CTL BIT_3 /* CLKREQ PAD Control (A1 only) */ #define PCI_ASPM_A1_MODE_SELECT BIT_2 /* A1 Mode Select (A1 only) */ #define PCI_CLK_GATE_PEX_UNIT_ENA BIT_1 /* Enable Gate PEX Unit Clock */ #define PCI_CLK_GATE_ROOT_COR_ENA BIT_0 /* Enable Gate Root Core Clock */ /* PCI_OUR_REG_5 32 bit Our Register 5 (Yukon-ECU only) */ /* Bit 31..27: for A3 & later */ #define PCI_CTL_DIV_CORE_CLK_ENA BIT_31 /* Divide Core Clock Enable */ #define PCI_CTL_SRESET_VMAIN_AV BIT_30 /* Soft Reset for Vmain_av De-Glitch */ #define PCI_CTL_BYPASS_VMAIN_AV BIT_29 /* Bypass En. for Vmain_av De-Glitch */ #define PCI_CTL_TIM_VMAIN_AV1 BIT_28 /* Bit 28..27: Timer Vmain_av Mask */ #define PCI_CTL_TIM_VMAIN_AV0 BIT_27 /* Bit 28..27: Timer Vmain_av Mask */ #define PCI_CTL_TIM_VMAIN_AV_MSK (BIT_28 | BIT_27) /* Bit 26..16: Release Clock on Event */ #define PCI_REL_PCIE_RST_DE_ASS BIT_26 /* PCIe Reset De-Asserted */ #define PCI_REL_GPHY_REC_PACKET BIT_25 /* GPHY Received Packet */ #define PCI_REL_INT_FIFO_N_EMPTY BIT_24 /* Internal FIFO Not Empty */ #define PCI_REL_MAIN_PWR_AVAIL BIT_23 /* Main Power Available */ #define PCI_REL_CLKRUN_REQ_REL BIT_22 /* CLKRUN Request Release */ #define PCI_REL_PCIE_RESET_ASS BIT_21 /* PCIe Reset Asserted */ #define PCI_REL_PME_ASSERTED BIT_20 /* PME Asserted */ #define PCI_REL_PCIE_EXIT_L1_ST BIT_19 /* PCIe Exit L1 State */ #define PCI_REL_LOADER_NOT_FIN BIT_18 /* EPROM Loader Not Finished */ #define PCI_REL_PCIE_RX_EX_IDLE BIT_17 /* PCIe Rx Exit Electrical Idle State */ #define PCI_REL_GPHY_LINK_UP BIT_16 /* GPHY Link Up */ /* Bit 10.. 0: Mask for Gate Clock */ #define PCI_GAT_PCIE_RST_ASSERTED BIT_10 /* PCIe Reset Asserted */ #define PCI_GAT_GPHY_N_REC_PACKET BIT_9 /* GPHY Not Received Packet */ #define PCI_GAT_INT_FIFO_EMPTY BIT_8 /* Internal FIFO Empty */ #define PCI_GAT_MAIN_PWR_N_AVAIL BIT_7 /* Main Power Not Available */ #define PCI_GAT_CLKRUN_REQ_REL BIT_6 /* CLKRUN Not Requested */ #define PCI_GAT_PCIE_RESET_ASS BIT_5 /* PCIe Reset Asserted */ #define PCI_GAT_PME_DE_ASSERTED BIT_4 /* PME De-Asserted */ #define PCI_GAT_PCIE_ENTER_L1_ST BIT_3 /* PCIe Enter L1 State */ #define PCI_GAT_LOADER_FINISHED BIT_2 /* EPROM Loader Finished */ #define PCI_GAT_PCIE_RX_EL_IDLE BIT_1 /* PCIe Rx Electrical Idle State */ #define PCI_GAT_GPHY_LINK_DOWN BIT_0 /* GPHY Link Down */ /* PCI_CFG_REG_1 32 bit Config Register 1 */ #define PCI_CF1_DIS_REL_EVT_RST BIT_24 /* Dis. Rel. Event during PCIE reset */ /* Bit 23..21: Release Clock on Event */ #define PCI_CF1_REL_LDR_NOT_FIN BIT_23 /* EEPROM Loader Not Finished */ #define PCI_CF1_REL_VMAIN_AVLBL BIT_22 /* Vmain available */ #define PCI_CF1_REL_PCIE_RESET BIT_21 /* PCI-E reset */ /* Bit 20..18: Gate Clock on Event */ #define PCI_CF1_GAT_LDR_NOT_FIN BIT_20 /* EEPROM Loader Finished */ #define PCI_CF1_GAT_PCIE_RX_IDLE BIT_19 /* PCI-E Rx Electrical idle */ #define PCI_CF1_GAT_PCIE_RESET BIT_18 /* PCI-E Reset */ #define PCI_CF1_PRST_PHY_CLKREQ BIT_17 /* Enable PCI-E rst & PM2PHY gen. CLKREQ */ #define PCI_CF1_PCIE_RST_CLKREQ BIT_16 /* Enable PCI-E rst generate CLKREQ */ #define PCI_CF1_ENA_CFG_LDR_DONE BIT_8 /* Enable core level Config loader done */ #define PCI_CF1_ENA_TXBMU_RD_IDLE BIT_1 /* Enable TX BMU Read IDLE for ASPM */ #define PCI_CF1_ENA_TXBMU_WR_IDLE BIT_0 /* Enable TX BMU Write IDLE for ASPM */ /* PEX_DEV_CTRL 16 bit PEX Device Control (Yukon-2) */ #define PEX_DC_MAX_RRS_MSK (7<<12) /* Bit 14..12: Max. Read Request Size */ #define PEX_DC_EN_NO_SNOOP BIT_11 /* Enable No Snoop */ #define PEX_DC_EN_AUX_POW BIT_10 /* Enable AUX Power */ #define PEX_DC_EN_PHANTOM BIT_9 /* Enable Phantom Functions */ #define PEX_DC_EN_EXT_TAG BIT_8 /* Enable Extended Tag Field */ #define PEX_DC_MAX_PLS_MSK (7<<5) /* Bit 7.. 5: Max. Payload Size Mask */ #define PEX_DC_EN_REL_ORD BIT_4 /* Enable Relaxed Ordering */ #define PEX_DC_EN_UNS_RQ_RP BIT_3 /* Enable Unsupported Request Reporting */ #define PEX_DC_EN_FAT_ER_RP BIT_2 /* Enable Fatal Error Reporting */ #define PEX_DC_EN_NFA_ER_RP BIT_1 /* Enable Non-Fatal Error Reporting */ #define PEX_DC_EN_COR_ER_RP BIT_0 /* Enable Correctable Error Reporting */ #define PEX_DC_MAX_RD_RQ_SIZE(x) (SHIFT12(x) & PEX_DC_MAX_RRS_MSK) /* PEX_LNK_STAT 16 bit PEX Link Status (Yukon-2) */ #define PEX_LS_SLOT_CLK_CFG BIT_12 /* Slot Clock Config */ #define PEX_LS_LINK_TRAIN BIT_11 /* Link Training */ #define PEX_LS_TRAIN_ERROR BIT_10 /* Training Error */ #define PEX_LS_LINK_WI_MSK (0x3f<<4) /* Bit 9.. 4: Neg. Link Width Mask */ #define PEX_LS_LINK_SP_MSK 0x0f /* Bit 3.. 0: Link Speed Mask */ /* PEX_UNC_ERR_STAT PEX Uncorrectable Errors Status Register (Yukon-2) */ #define PEX_UNSUP_REQ BIT_20 /* Unsupported Request Error */ #define PEX_MALFOR_TLP BIT_18 /* Malformed TLP */ #define PEX_RX_OV BIT_17 /* Receiver Overflow (not supported) */ #define PEX_UNEXP_COMP BIT_16 /* Unexpected Completion */ #define PEX_COMP_TO BIT_14 /* Completion Timeout */ #define PEX_FLOW_CTRL_P BIT_13 /* Flow Control Protocol Error */ #define PEX_POIS_TLP BIT_12 /* Poisoned TLP */ #define PEX_DATA_LINK_P BIT_4 /* Data Link Protocol Error */ #define PEX_FATAL_ERRORS (PEX_MALFOR_TLP | PEX_FLOW_CTRL_P | PEX_DATA_LINK_P) /* Control Register File (Address Map) */ /* * Bank 0 */ #define B0_RAP 0x0000 /* 8 bit Register Address Port */ #define B0_CTST 0x0004 /* 16 bit Control/Status register */ #define B0_LED 0x0006 /* 8 Bit LED register */ #define B0_POWER_CTRL 0x0007 /* 8 Bit Power Control reg (YUKON only) */ #define B0_ISRC 0x0008 /* 32 bit Interrupt Source Register */ #define B0_IMSK 0x000c /* 32 bit Interrupt Mask Register */ #define B0_HWE_ISRC 0x0010 /* 32 bit HW Error Interrupt Src Reg */ #define B0_HWE_IMSK 0x0014 /* 32 bit HW Error Interrupt Mask Reg */ #define B0_SP_ISRC 0x0018 /* 32 bit Special Interrupt Source Reg 1 */ /* Special ISR registers (Yukon-2 only) */ #define B0_Y2_SP_ISRC2 0x001c /* 32 bit Special Interrupt Source Reg 2 */ #define B0_Y2_SP_ISRC3 0x0020 /* 32 bit Special Interrupt Source Reg 3 */ #define B0_Y2_SP_EISR 0x0024 /* 32 bit Enter ISR Reg */ #define B0_Y2_SP_LISR 0x0028 /* 32 bit Leave ISR Reg */ #define B0_Y2_SP_ICR 0x002c /* 32 bit Interrupt Control Reg */ /* * Bank 1 * - completely empty (this is the RAP Block window) * Note: if RAP = 1 this page is reserved */ /* * Bank 2 */ /* NA reg = 48 bit Network Address Register, 3x16 or 8x8 bit readable */ #define B2_MAC_1 0x0100 /* NA reg MAC Address 1 */ #define B2_MAC_2 0x0108 /* NA reg MAC Address 2 */ #define B2_MAC_3 0x0110 /* NA reg MAC Address 3 */ #define B2_CONN_TYP 0x0118 /* 8 bit Connector type */ #define B2_PMD_TYP 0x0119 /* 8 bit PMD type */ #define B2_MAC_CFG 0x011a /* 8 bit MAC Configuration / Chip Revision */ #define B2_CHIP_ID 0x011b /* 8 bit Chip Identification Number */ #define B2_E_0 0x011c /* 8 bit EPROM Byte 0 (ext. SRAM size */ #define B2_Y2_CLK_GATE 0x011d /* 8 bit Clock Gating (Yukon-2) */ #define B2_Y2_HW_RES 0x011e /* 8 bit HW Resources (Yukon-2) */ #define B2_E_3 0x011f /* 8 bit EPROM Byte 3 */ #define B2_Y2_CLK_CTRL 0x0120 /* 32 bit Core Clock Frequency Control */ #define B2_TI_INI 0x0130 /* 32 bit Timer Init Value */ #define B2_TI_VAL 0x0134 /* 32 bit Timer Value */ #define B2_TI_CTRL 0x0138 /* 8 bit Timer Control */ #define B2_TI_TEST 0x0139 /* 8 Bit Timer Test */ #define B2_IRQM_INI 0x0140 /* 32 bit IRQ Moderation Timer Init Reg.*/ #define B2_IRQM_VAL 0x0144 /* 32 bit IRQ Moderation Timer Value */ #define B2_IRQM_CTRL 0x0148 /* 8 bit IRQ Moderation Timer Control */ #define B2_IRQM_TEST 0x0149 /* 8 bit IRQ Moderation Timer Test */ #define B2_IRQM_MSK 0x014c /* 32 bit IRQ Moderation Mask */ #define B2_IRQM_HWE_MSK 0x0150 /* 32 bit IRQ Moderation HW Error Mask */ #define B2_TST_CTRL1 0x0158 /* 8 bit Test Control Register 1 */ #define B2_TST_CTRL2 0x0159 /* 8 bit Test Control Register 2 */ #define B2_GP_IO 0x015c /* 32 bit General Purpose I/O Register */ #define B2_I2C_CTRL 0x0160 /* 32 bit I2C HW Control Register */ #define B2_I2C_DATA 0x0164 /* 32 bit I2C HW Data Register */ #define B2_I2C_IRQ 0x0168 /* 32 bit I2C HW IRQ Register */ #define B2_I2C_SW 0x016c /* 32 bit I2C SW Port Register */ #define Y2_PEX_PHY_DATA 0x0170 /* 16 bit PEX PHY Data Register */ #define Y2_PEX_PHY_ADDR 0x0172 /* 16 bit PEX PHY Address Register */ /* * Bank 3 */ /* RAM Random Registers */ #define B3_RAM_ADDR 0x0180 /* 32 bit RAM Address, to read or write */ #define B3_RAM_DATA_LO 0x0184 /* 32 bit RAM Data Word (low dWord) */ #define B3_RAM_DATA_HI 0x0188 /* 32 bit RAM Data Word (high dWord) */ #define SELECT_RAM_BUFFER(rb, addr) (addr | (rb << 6)) /* Yukon-2 only */ /* RAM Interface Registers */ /* Yukon-2: use SELECT_RAM_BUFFER() to access the RAM buffer */ /* * The HW-Spec. calls this registers Timeout Value 0..11. But this names are * not usable in SW. Please notice these are NOT real timeouts, these are * the number of qWords transferred continuously. */ #define B3_RI_WTO_R1 0x0190 /* 8 bit WR Timeout Queue R1 (TO0) */ #define B3_RI_WTO_XA1 0x0191 /* 8 bit WR Timeout Queue XA1 (TO1) */ #define B3_RI_WTO_XS1 0x0192 /* 8 bit WR Timeout Queue XS1 (TO2) */ #define B3_RI_RTO_R1 0x0193 /* 8 bit RD Timeout Queue R1 (TO3) */ #define B3_RI_RTO_XA1 0x0194 /* 8 bit RD Timeout Queue XA1 (TO4) */ #define B3_RI_RTO_XS1 0x0195 /* 8 bit RD Timeout Queue XS1 (TO5) */ #define B3_RI_WTO_R2 0x0196 /* 8 bit WR Timeout Queue R2 (TO6) */ #define B3_RI_WTO_XA2 0x0197 /* 8 bit WR Timeout Queue XA2 (TO7) */ #define B3_RI_WTO_XS2 0x0198 /* 8 bit WR Timeout Queue XS2 (TO8) */ #define B3_RI_RTO_R2 0x0199 /* 8 bit RD Timeout Queue R2 (TO9) */ #define B3_RI_RTO_XA2 0x019a /* 8 bit RD Timeout Queue XA2 (TO10)*/ #define B3_RI_RTO_XS2 0x019b /* 8 bit RD Timeout Queue XS2 (TO11)*/ #define B3_RI_TO_VAL 0x019c /* 8 bit Current Timeout Count Val */ #define B3_RI_CTRL 0x01a0 /* 16 bit RAM Interface Control Register */ #define B3_RI_TEST 0x01a2 /* 8 bit RAM Interface Test Register */ /* * Bank 4 - 5 */ /* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */ #define TXA_ITI_INI 0x0200 /* 32 bit Tx Arb Interval Timer Init Val*/ #define TXA_ITI_VAL 0x0204 /* 32 bit Tx Arb Interval Timer Value */ #define TXA_LIM_INI 0x0208 /* 32 bit Tx Arb Limit Counter Init Val */ #define TXA_LIM_VAL 0x020c /* 32 bit Tx Arb Limit Counter Value */ #define TXA_CTRL 0x0210 /* 8 bit Tx Arbiter Control Register */ #define TXA_TEST 0x0211 /* 8 bit Tx Arbiter Test Register */ #define TXA_STAT 0x0212 /* 8 bit Tx Arbiter Status Register */ #define MR_ADDR(Mac, Offs) (((Mac) << 7) + (Offs)) /* RSS key registers for Yukon-2 Family */ #define B4_RSS_KEY 0x0220 /* 4x32 bit RSS Key register (Yukon-2) */ /* RSS key register offsets */ #define KEY_IDX_0 0 /* offset for location of KEY 0 */ #define KEY_IDX_1 4 /* offset for location of KEY 1 */ #define KEY_IDX_2 8 /* offset for location of KEY 2 */ #define KEY_IDX_3 12 /* offset for location of KEY 3 */ /* 0x0280 - 0x0292: MAC 2 */ #define RSS_KEY_ADDR(Port, KeyIndex) \ ((B4_RSS_KEY | ( ((Port) == 0) ? 0 : 0x80)) + (KeyIndex)) /* * Bank 8 - 15 */ /* Receive and Transmit Queue Registers, use Q_ADDR() to access */ #define B8_Q_REGS 0x0400 /* Queue Register Offsets, use Q_ADDR() to access */ #define Q_D 0x00 /* 8*32 bit Current Descriptor */ #define Q_DA_L 0x20 /* 32 bit Current Descriptor Address Low dWord */ #define Q_DONE 0x24 /* 16 bit Done Index */ #define Q_AC_L 0x28 /* 32 bit Current Address Counter Low dWord */ #define Q_AC_H 0x2c /* 32 bit Current Address Counter High dWord */ #define Q_BC 0x30 /* 32 bit Current Byte Counter */ #define Q_CSR 0x34 /* 32 bit BMU Control/Status Register */ #define Q_F 0x38 /* 32 bit Flag Register */ #define Q_T1 0x3c /* 32 bit Test Register 1 */ #define Q_T1_TR 0x3c /* 8 bit Test Register 1 Transfer SM */ #define Q_T1_WR 0x3d /* 8 bit Test Register 1 Write Descriptor SM */ #define Q_T1_RD 0x3e /* 8 bit Test Register 1 Read Descriptor SM */ #define Q_T1_SV 0x3f /* 8 bit Test Register 1 Supervisor SM */ #define Q_WM 0x40 /* 16 bit FIFO Watermark */ #define Q_AL 0x42 /* 8 bit FIFO Alignment */ #define Q_RSP 0x44 /* 16 bit FIFO Read Shadow Pointer */ #define Q_RSL 0x46 /* 8 bit FIFO Read Shadow Level */ #define Q_RP 0x48 /* 8 bit FIFO Read Pointer */ #define Q_RL 0x4a /* 8 bit FIFO Read Level */ #define Q_WP 0x4c /* 8 bit FIFO Write Pointer */ #define Q_WSP 0x4d /* 8 bit FIFO Write Shadow Pointer */ #define Q_WL 0x4e /* 8 bit FIFO Write Level */ #define Q_WSL 0x4f /* 8 bit FIFO Write Shadow Level */ #define Q_ADDR(Queue, Offs) (B8_Q_REGS + (Queue) + (Offs)) /* Queue Prefetch Unit Offsets, use Y2_PREF_Q_ADDR() to address */ #define Y2_B8_PREF_REGS 0x0450 #define PREF_UNIT_CTRL_REG 0x00 /* 32 bit Prefetch Control register */ #define PREF_UNIT_LAST_IDX_REG 0x04 /* 16 bit Last Index */ #define PREF_UNIT_ADDR_LOW_REG 0x08 /* 32 bit List start addr, low part */ #define PREF_UNIT_ADDR_HI_REG 0x0c /* 32 bit List start addr, high part*/ #define PREF_UNIT_GET_IDX_REG 0x10 /* 16 bit Get Index */ #define PREF_UNIT_PUT_IDX_REG 0x14 /* 16 bit Put Index */ #define PREF_UNIT_FIFO_WP_REG 0x20 /* 8 bit FIFO write pointer */ #define PREF_UNIT_FIFO_RP_REG 0x24 /* 8 bit FIFO read pointer */ #define PREF_UNIT_FIFO_WM_REG 0x28 /* 8 bit FIFO watermark */ #define PREF_UNIT_FIFO_LEV_REG 0x2c /* 8 bit FIFO level */ #define PREF_UNIT_MASK_IDX 0x0fff #define Y2_PREF_Q_ADDR(Queue, Offs) (Y2_B8_PREF_REGS + (Queue) + (Offs)) /* * Bank 16 - 23 */ /* RAM Buffer Registers */ #define B16_RAM_REGS 0x0800 /* RAM Buffer Register Offsets, use RB_ADDR() to access */ #define RB_START 0x00 /* 32 bit RAM Buffer Start Address */ #define RB_END 0x04 /* 32 bit RAM Buffer End Address */ #define RB_WP 0x08 /* 32 bit RAM Buffer Write Pointer */ #define RB_RP 0x0c /* 32 bit RAM Buffer Read Pointer */ #define RB_RX_UTPP 0x10 /* 32 bit Rx Upper Threshold, Pause Packet */ #define RB_RX_LTPP 0x14 /* 32 bit Rx Lower Threshold, Pause Packet */ #define RB_RX_UTHP 0x18 /* 32 bit Rx Upper Threshold, High Prio */ #define RB_RX_LTHP 0x1c /* 32 bit Rx Lower Threshold, High Prio */ #define RB_PC 0x20 /* 32 bit RAM Buffer Packet Counter */ #define RB_LEV 0x24 /* 32 bit RAM Buffer Level Register */ #define RB_CTRL 0x28 /* 8 bit RAM Buffer Control Register */ #define RB_TST1 0x29 /* 8 bit RAM Buffer Test Register 1 */ #define RB_TST2 0x2a /* 8 bit RAM Buffer Test Register 2 */ /* * Bank 24 */ /* Receive GMAC FIFO (YUKON and Yukon-2), use MR_ADDR() to access */ #define RX_GMF_EA 0x0c40 /* 32 bit Rx GMAC FIFO End Address */ #define RX_GMF_AF_THR 0x0c44 /* 32 bit Rx GMAC FIFO Almost Full Thresh. */ #define RX_GMF_CTRL_T 0x0c48 /* 32 bit Rx GMAC FIFO Control/Test */ #define RX_GMF_FL_MSK 0x0c4c /* 32 bit Rx GMAC FIFO Flush Mask */ #define RX_GMF_FL_THR 0x0c50 /* 32 bit Rx GMAC FIFO Flush Threshold */ #define RX_GMF_TR_THR 0x0c54 /* 32 bit Rx Truncation Threshold (Yukon-2) */ #define RX_GMF_UP_THR 0x0c58 /* 16 bit Rx Upper Pause Thr (Yukon-EC_U) */ #define RX_GMF_LP_THR 0x0c5a /* 16 bit Rx Lower Pause Thr (Yukon-EC_U) */ #define RX_GMF_VLAN 0x0c5c /* 32 bit Rx VLAN Type Register (Yukon-2) */ #define RX_GMF_WP 0x0c60 /* 32 bit Rx GMAC FIFO Write Pointer */ #define RX_GMF_WLEV 0x0c68 /* 32 bit Rx GMAC FIFO Write Level */ #define RX_GMF_RP 0x0c70 /* 32 bit Rx GMAC FIFO Read Pointer */ #define RX_GMF_RLEV 0x0c78 /* 32 bit Rx GMAC FIFO Read Level */ /* * Bank 25 */ /* 0x0c80 - 0x0cbf: MAC 2 */ /* 0x0cc0 - 0x0cff: reserved */ /* * Bank 26 */ /* Transmit GMAC FIFO (YUKON and Yukon-2), use MR_ADDR() to access */ #define TX_GMF_EA 0x0d40 /* 32 bit Tx GMAC FIFO End Address */ #define TX_GMF_AE_THR 0x0d44 /* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/ #define TX_GMF_CTRL_T 0x0d48 /* 32 bit Tx GMAC FIFO Control/Test */ #define TX_GMF_VLAN 0x0d5c /* 32 bit Tx VLAN Type Register (Yukon-2) */ #define TX_GMF_WP 0x0d60 /* 32 bit Tx GMAC FIFO Write Pointer */ #define TX_GMF_WSP 0x0d64 /* 32 bit Tx GMAC FIFO Write Shadow Pointer */ #define TX_GMF_WLEV 0x0d68 /* 32 bit Tx GMAC FIFO Write Level */ #define TX_GMF_RP 0x0d70 /* 32 bit Tx GMAC FIFO Read Pointer */ #define TX_GMF_RSTP 0x0d74 /* 32 bit Tx GMAC FIFO Restart Pointer */ #define TX_GMF_RLEV 0x0d78 /* 32 bit Tx GMAC FIFO Read Level */ /* * Bank 27 */ /* 0x0d80 - 0x0dbf: MAC 2 */ /* 0x0daa - 0x0dff: reserved */ /* * Bank 28 */ /* Descriptor Poll Timer Registers */ #define B28_DPT_INI 0x0e00 /* 24 bit Descriptor Poll Timer Init Val */ #define B28_DPT_VAL 0x0e04 /* 24 bit Descriptor Poll Timer Curr Val */ #define B28_DPT_CTRL 0x0e08 /* 8 bit Descriptor Poll Timer Ctrl Reg */ #define B28_DPT_TST 0x0e0a /* 8 bit Descriptor Poll Timer Test Reg */ /* Time Stamp Timer Registers (YUKON only) */ #define GMAC_TI_ST_VAL 0x0e14 /* 32 bit Time Stamp Timer Curr Val */ #define GMAC_TI_ST_CTRL 0x0e18 /* 8 bit Time Stamp Timer Ctrl Reg */ #define GMAC_TI_ST_TST 0x0e1a /* 8 bit Time Stamp Timer Test Reg */ /* Polling Unit Registers (Yukon-2 only) */ #define POLL_CTRL 0x0e20 /* 32 bit Polling Unit Control Reg */ #define POLL_LAST_IDX 0x0e24 /* 16 bit Polling Unit List Last Index */ #define POLL_LIST_ADDR_LO 0x0e28 /* 32 bit Poll. List Start Addr (low) */ #define POLL_LIST_ADDR_HI 0x0e2c /* 32 bit Poll. List Start Addr (high) */ /* ASF Subsystem Registers (Yukon-2 only) */ #define B28_Y2_SMB_CONFIG 0x0e40 /* 32 bit ASF SMBus Config Register */ #define B28_Y2_SMB_CSD_REG 0x0e44 /* 32 bit ASF SMB Control/Status/Data */ #define B28_Y2_CPU_WDOG 0x0e48 /* 32 bit Watchdog Register */ #define B28_Y2_ASF_IRQ_V_BASE 0x0e60 /* 32 bit ASF IRQ Vector Base */ #define B28_Y2_ASF_STAT_CMD 0x0e68 /* 32 bit ASF Status and Command Reg */ #define B28_Y2_ASF_HCU_CCSR 0x0e68 /* 32 bit ASF HCU CCSR (Yukon EX) */ #define B28_Y2_ASF_HOST_COM 0x0e6c /* 32 bit ASF Host Communication Reg */ #define B28_Y2_DATA_REG_1 0x0e70 /* 32 bit ASF/Host Data Register 1 */ #define B28_Y2_DATA_REG_2 0x0e74 /* 32 bit ASF/Host Data Register 2 */ #define B28_Y2_DATA_REG_3 0x0e78 /* 32 bit ASF/Host Data Register 3 */ #define B28_Y2_DATA_REG_4 0x0e7c /* 32 bit ASF/Host Data Register 4 */ /* * Bank 29 */ /* Status BMU Registers (Yukon-2 only)*/ #define STAT_CTRL 0x0e80 /* 32 bit Status BMU Control Reg */ #define STAT_LAST_IDX 0x0e84 /* 16 bit Status BMU Last Index */ #define STAT_LIST_ADDR_LO 0x0e88 /* 32 bit Status List Start Addr (low) */ #define STAT_LIST_ADDR_HI 0x0e8c /* 32 bit Status List Start Addr (high) */ #define STAT_TXA1_RIDX 0x0e90 /* 16 bit Status TxA1 Report Index Reg */ #define STAT_TXS1_RIDX 0x0e92 /* 16 bit Status TxS1 Report Index Reg */ #define STAT_TXA2_RIDX 0x0e94 /* 16 bit Status TxA2 Report Index Reg */ #define STAT_TXS2_RIDX 0x0e96 /* 16 bit Status TxS2 Report Index Reg */ #define STAT_TX_IDX_TH 0x0e98 /* 16 bit Status Tx Index Threshold Reg */ #define STAT_PUT_IDX 0x0e9c /* 16 bit Status Put Index Reg */ /* FIFO Control/Status Registers (Yukon-2 only)*/ #define STAT_FIFO_WP 0x0ea0 /* 8 bit Status FIFO Write Pointer Reg */ #define STAT_FIFO_RP 0x0ea4 /* 8 bit Status FIFO Read Pointer Reg */ #define STAT_FIFO_RSP 0x0ea6 /* 8 bit Status FIFO Read Shadow Ptr */ #define STAT_FIFO_LEVEL 0x0ea8 /* 8 bit Status FIFO Level Reg */ #define STAT_FIFO_SHLVL 0x0eaa /* 8 bit Status FIFO Shadow Level Reg */ #define STAT_FIFO_WM 0x0eac /* 8 bit Status FIFO Watermark Reg */ #define STAT_FIFO_ISR_WM 0x0ead /* 8 bit Status FIFO ISR Watermark Reg */ /* Level and ISR Timer Registers (Yukon-2 only)*/ #define STAT_LEV_TIMER_INI 0x0eb0 /* 32 bit Level Timer Init. Value Reg */ #define STAT_LEV_TIMER_CNT 0x0eb4 /* 32 bit Level Timer Counter Reg */ #define STAT_LEV_TIMER_CTRL 0x0eb8 /* 8 bit Level Timer Control Reg */ #define STAT_LEV_TIMER_TEST 0x0eb9 /* 8 bit Level Timer Test Reg */ #define STAT_TX_TIMER_INI 0x0ec0 /* 32 bit Tx Timer Init. Value Reg */ #define STAT_TX_TIMER_CNT 0x0ec4 /* 32 bit Tx Timer Counter Reg */ #define STAT_TX_TIMER_CTRL 0x0ec8 /* 8 bit Tx Timer Control Reg */ #define STAT_TX_TIMER_TEST 0x0ec9 /* 8 bit Tx Timer Test Reg */ #define STAT_ISR_TIMER_INI 0x0ed0 /* 32 bit ISR Timer Init. Value Reg */ #define STAT_ISR_TIMER_CNT 0x0ed4 /* 32 bit ISR Timer Counter Reg */ #define STAT_ISR_TIMER_CTRL 0x0ed8 /* 8 bit ISR Timer Control Reg */ #define STAT_ISR_TIMER_TEST 0x0ed9 /* 8 bit ISR Timer Test Reg */ #define ST_LAST_IDX_MASK 0x007f /* Last Index Mask */ #define ST_TXRP_IDX_MASK 0x0fff /* Tx Report Index Mask */ #define ST_TXTH_IDX_MASK 0x0fff /* Tx Threshold Index Mask */ #define ST_WM_IDX_MASK 0x3f /* FIFO Watermark Index Mask */ /* * Bank 30 */ /* GMAC and GPHY Control Registers (YUKON only) */ #define GMAC_CTRL 0x0f00 /* 32 bit GMAC Control Reg */ #define GPHY_CTRL 0x0f04 /* 32 bit GPHY Control Reg */ #define GMAC_IRQ_SRC 0x0f08 /* 8 bit GMAC Interrupt Source Reg */ #define GMAC_IRQ_MSK 0x0f0c /* 8 bit GMAC Interrupt Mask Reg */ #define GMAC_LINK_CTRL 0x0f10 /* 16 bit Link Control Reg */ /* Wake-up Frame Pattern Match Control Registers (YUKON only) */ #define WOL_REG_OFFS 0x20 /* HW-Bug: Address is + 0x20 against spec. */ #define WOL_CTRL_STAT 0x0f20 /* 16 bit WOL Control/Status Reg */ #define WOL_MATCH_CTL 0x0f22 /* 8 bit WOL Match Control Reg */ #define WOL_MATCH_RES 0x0f23 /* 8 bit WOL Match Result Reg */ #define WOL_MAC_ADDR_LO 0x0f24 /* 32 bit WOL MAC Address Low */ #define WOL_MAC_ADDR_HI 0x0f28 /* 16 bit WOL MAC Address High */ #define WOL_PATT_PME 0x0f2a /* 8 bit WOL PME Match Enable (Yukon-2) */ #define WOL_PATT_ASFM 0x0f2b /* 8 bit WOL ASF Match Enable (Yukon-2) */ #define WOL_PATT_RPTR 0x0f2c /* 8 bit WOL Pattern Read Pointer */ /* WOL Pattern Length Registers (YUKON only) */ #define WOL_PATT_LEN_LO 0x0f30 /* 32 bit WOL Pattern Length 3..0 */ #define WOL_PATT_LEN_HI 0x0f34 /* 24 bit WOL Pattern Length 6..4 */ /* WOL Pattern Counter Registers (YUKON only) */ #define WOL_PATT_CNT_0 0x0f38 /* 32 bit WOL Pattern Counter 3..0 */ #define WOL_PATT_CNT_4 0x0f3c /* 24 bit WOL Pattern Counter 6..4 */ /* * Bank 32 - 33 */ #define WOL_PATT_RAM_1 0x1000 /* WOL Pattern RAM Link 1 */ #define WOL_PATT_RAM_2 0x1400 /* WOL Pattern RAM Link 2 */ /* offset to configuration space on Yukon-2 */ #define Y2_CFG_SPC 0x1c00 #define BASE_GMAC_1 0x2800 /* GMAC 1 registers */ #define BASE_GMAC_2 0x3800 /* GMAC 2 registers */ /* * Control Register Bit Definitions: */ /* B0_CTST 24 bit Control/Status register */ #define Y2_VMAIN_AVAIL BIT_17 /* VMAIN available (YUKON-2 only) */ #define Y2_VAUX_AVAIL BIT_16 /* VAUX available (YUKON-2 only) */ #define Y2_HW_WOL_ON BIT_15 /* HW WOL On (Yukon-EC Ultra A1 only) */ #define Y2_HW_WOL_OFF BIT_14 /* HW WOL Off (Yukon-EC Ultra A1 only) */ #define Y2_ASF_ENABLE BIT_13 /* ASF Unit Enable (YUKON-2 only) */ #define Y2_ASF_DISABLE BIT_12 /* ASF Unit Disable (YUKON-2 only) */ #define Y2_CLK_RUN_ENA BIT_11 /* CLK_RUN Enable (YUKON-2 only) */ #define Y2_CLK_RUN_DIS BIT_10 /* CLK_RUN Disable (YUKON-2 only) */ #define Y2_LED_STAT_ON BIT_9 /* Status LED On (YUKON-2 only) */ #define Y2_LED_STAT_OFF BIT_8 /* Status LED Off (YUKON-2 only) */ #define CS_ST_SW_IRQ BIT_7 /* Set IRQ SW Request */ #define CS_CL_SW_IRQ BIT_6 /* Clear IRQ SW Request */ #define CS_STOP_DONE BIT_5 /* Stop Master is finished */ #define CS_STOP_MAST BIT_4 /* Command Bit to stop the master */ #define CS_MRST_CLR BIT_3 /* Clear Master Reset */ #define CS_MRST_SET BIT_2 /* Set Master Reset */ #define CS_RST_CLR BIT_1 /* Clear Software Reset */ #define CS_RST_SET BIT_0 /* Set Software Reset */ #define LED_STAT_ON BIT_1 /* Status LED On */ #define LED_STAT_OFF BIT_0 /* Status LED Off */ /* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */ #define PC_VAUX_ENA BIT_7 /* Switch VAUX Enable */ #define PC_VAUX_DIS BIT_6 /* Switch VAUX Disable */ #define PC_VCC_ENA BIT_5 /* Switch VCC Enable */ #define PC_VCC_DIS BIT_4 /* Switch VCC Disable */ #define PC_VAUX_ON BIT_3 /* Switch VAUX On */ #define PC_VAUX_OFF BIT_2 /* Switch VAUX Off */ #define PC_VCC_ON BIT_1 /* Switch VCC On */ #define PC_VCC_OFF BIT_0 /* Switch VCC Off */ /* B0_ISRC 32 bit Interrupt Source Register */ /* B0_IMSK 32 bit Interrupt Mask Register */ /* B0_SP_ISRC 32 bit Special Interrupt Source Reg */ /* B2_IRQM_MSK 32 bit IRQ Moderation Mask */ /* B0_Y2_SP_ISRC2 32 bit Special Interrupt Source Reg 2 */ /* B0_Y2_SP_ISRC3 32 bit Special Interrupt Source Reg 3 */ /* B0_Y2_SP_EISR 32 bit Enter ISR Reg */ /* B0_Y2_SP_LISR 32 bit Leave ISR Reg */ #define Y2_IS_PORT_MASK(Port, Mask) ((Mask) << (Port*8)) #define Y2_IS_HW_ERR BIT_31 /* Interrupt HW Error */ #define Y2_IS_STAT_BMU BIT_30 /* Status BMU Interrupt */ #define Y2_IS_ASF BIT_29 /* ASF subsystem Interrupt */ #define Y2_IS_POLL_CHK BIT_27 /* Check IRQ from polling unit */ #define Y2_IS_TWSI_RDY BIT_26 /* IRQ on end of TWSI Tx */ #define Y2_IS_IRQ_SW BIT_25 /* SW forced IRQ */ #define Y2_IS_TIMINT BIT_24 /* IRQ from Timer */ #define Y2_IS_IRQ_PHY2 BIT_12 /* Interrupt from PHY 2 */ #define Y2_IS_IRQ_MAC2 BIT_11 /* Interrupt from MAC 2 */ #define Y2_IS_CHK_RX2 BIT_10 /* Descriptor error Rx 2 */ #define Y2_IS_CHK_TXS2 BIT_9 /* Descriptor error TXS 2 */ #define Y2_IS_CHK_TXA2 BIT_8 /* Descriptor error TXA 2 */ #define Y2_IS_PSM_ACK BIT_7 /* PSM Ack (Yukon Optima) */ #define Y2_IS_PTP_TIST BIT_6 /* PTP TIme Stamp (Yukon Optima) */ #define Y2_IS_PHY_QLNK BIT_5 /* PHY Quick Link (Yukon Optima) */ #define Y2_IS_IRQ_PHY1 BIT_4 /* Interrupt from PHY 1 */ #define Y2_IS_IRQ_MAC1 BIT_3 /* Interrupt from MAC 1 */ #define Y2_IS_CHK_RX1 BIT_2 /* Descriptor error Rx 1 */ #define Y2_IS_CHK_TXS1 BIT_1 /* Descriptor error TXS 1 */ #define Y2_IS_CHK_TXA1 BIT_0 /* Descriptor error TXA 1 */ #define Y2_IS_L1_MASK 0x0000001f /* IRQ Mask for port 1 */ #define Y2_IS_L2_MASK 0x00001f00 /* IRQ Mask for port 2 */ #define Y2_IS_ALL_MSK 0xef001f1f /* All Interrupt bits */ #define Y2_IS_PORT_A \ (Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1) #define Y2_IS_PORT_B \ (Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2) /* B0_HWE_ISRC 32 bit HW Error Interrupt Src Reg */ /* B0_HWE_IMSK 32 bit HW Error Interrupt Mask Reg */ /* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ #define Y2_IS_TIST_OV BIT_29 /* Time Stamp Timer overflow interrupt */ #define Y2_IS_SENSOR BIT_28 /* Sensor interrupt */ #define Y2_IS_MST_ERR BIT_27 /* Master error interrupt */ #define Y2_IS_IRQ_STAT BIT_26 /* Status exception interrupt */ #define Y2_IS_PCI_EXP BIT_25 /* PCI-Express interrupt */ #define Y2_IS_PCI_NEXP BIT_24 /* PCI-Express error similar to PCI error */ #define Y2_IS_PAR_RD2 BIT_13 /* Read RAM parity error interrupt */ #define Y2_IS_PAR_WR2 BIT_12 /* Write RAM parity error interrupt */ #define Y2_IS_PAR_MAC2 BIT_11 /* MAC hardware fault interrupt */ #define Y2_IS_PAR_RX2 BIT_10 /* Parity Error Rx Queue 2 */ #define Y2_IS_TCP_TXS2 BIT_9 /* TCP length mismatch sync Tx queue IRQ */ #define Y2_IS_TCP_TXA2 BIT_8 /* TCP length mismatch async Tx queue IRQ */ #define Y2_IS_PAR_RD1 BIT_5 /* Read RAM parity error interrupt */ #define Y2_IS_PAR_WR1 BIT_4 /* Write RAM parity error interrupt */ #define Y2_IS_PAR_MAC1 BIT_3 /* MAC hardware fault interrupt */ #define Y2_IS_PAR_RX1 BIT_2 /* Parity Error Rx Queue 1 */ #define Y2_IS_TCP_TXS1 BIT_1 /* TCP length mismatch sync Tx queue IRQ */ #define Y2_IS_TCP_TXA1 BIT_0 /* TCP length mismatch async Tx queue IRQ */ #define Y2_HWE_L1_MASK (Y2_IS_PAR_RD1 | Y2_IS_PAR_WR1 | Y2_IS_PAR_MAC1 |\ Y2_IS_PAR_RX1 | Y2_IS_TCP_TXS1| Y2_IS_TCP_TXA1) #define Y2_HWE_L2_MASK (Y2_IS_PAR_RD2 | Y2_IS_PAR_WR2 | Y2_IS_PAR_MAC2 |\ Y2_IS_PAR_RX2 | Y2_IS_TCP_TXS2| Y2_IS_TCP_TXA2) #define Y2_HWE_ALL_MSK (Y2_IS_TIST_OV | /* Y2_IS_SENSOR | */ Y2_IS_MST_ERR |\ Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP |\ Y2_HWE_L1_MASK | Y2_HWE_L2_MASK) /* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */ #define CFG_CHIP_R_MSK (0x0f<<4) /* Bit 7.. 4: Chip Revision */ #define CFG_DIS_M2_CLK BIT_1 /* Disable Clock for 2nd MAC */ #define CFG_SNG_MAC BIT_0 /* MAC Config: 0 = 2 MACs; 1 = 1 MAC */ /* B2_CHIP_ID 8 bit Chip Identification Number */ #define CHIP_ID_GENESIS 0x0a /* Chip ID for GENESIS */ #define CHIP_ID_YUKON 0xb0 /* Chip ID for YUKON */ #define CHIP_ID_YUKON_LITE 0xb1 /* Chip ID for YUKON-Lite (Rev. A1-A3) */ #define CHIP_ID_YUKON_LP 0xb2 /* Chip ID for YUKON-LP */ #define CHIP_ID_YUKON_XL 0xb3 /* Chip ID for YUKON-2 XL */ #define CHIP_ID_YUKON_EC_U 0xb4 /* Chip ID for YUKON-2 EC Ultra */ #define CHIP_ID_YUKON_EX 0xb5 /* Chip ID for YUKON-2 Extreme */ #define CHIP_ID_YUKON_EC 0xb6 /* Chip ID for YUKON-2 EC */ #define CHIP_ID_YUKON_FE 0xb7 /* Chip ID for YUKON-2 FE */ #define CHIP_ID_YUKON_FE_P 0xb8 /* Chip ID for YUKON-2 FE+ */ #define CHIP_ID_YUKON_SUPR 0xb9 /* Chip ID for YUKON-2 Supreme */ #define CHIP_ID_YUKON_UL_2 0xba /* Chip ID for YUKON-2 Ultra 2 */ #define CHIP_ID_YUKON_UNKNOWN 0xbb #define CHIP_ID_YUKON_OPT 0xbc /* Chip ID for YUKON-2 Optima */ #define CHIP_REV_YU_XL_A0 0 /* Chip Rev. for Yukon-2 A0 */ #define CHIP_REV_YU_XL_A1 1 /* Chip Rev. for Yukon-2 A1 */ #define CHIP_REV_YU_XL_A2 2 /* Chip Rev. for Yukon-2 A2 */ #define CHIP_REV_YU_XL_A3 3 /* Chip Rev. for Yukon-2 A3 */ #define CHIP_REV_YU_EC_A1 0 /* Chip Rev. for Yukon-EC A1/A0 */ #define CHIP_REV_YU_EC_A2 1 /* Chip Rev. for Yukon-EC A2 */ #define CHIP_REV_YU_EC_A3 2 /* Chip Rev. for Yukon-EC A3 */ #define CHIP_REV_YU_EC_U_A0 1 #define CHIP_REV_YU_EC_U_A1 2 #define CHIP_REV_YU_FE_P_A0 0 /* Chip Rev. for Yukon-2 FE+ A0 */ #define CHIP_REV_YU_EX_A0 1 /* Chip Rev. for Yukon-2 EX A0 */ #define CHIP_REV_YU_EX_B0 2 /* Chip Rev. for Yukon-2 EX B0 */ #define CHIP_REV_YU_SU_A0 0 /* Chip Rev. for Yukon-2 SUPR A0 */ #define CHIP_REV_YU_SU_B0 1 /* Chip Rev. for Yukon-2 SUPR B0 */ #define CHIP_REV_YU_SU_B1 3 /* Chip Rev. for Yukon-2 SUPR B1 */ /* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */ #define Y2_STATUS_LNK2_INAC BIT_7 /* Status Link 2 inactiv (0 = activ) */ #define Y2_CLK_GAT_LNK2_DIS BIT_6 /* Disable clock gating Link 2 */ #define Y2_COR_CLK_LNK2_DIS BIT_5 /* Disable Core clock Link 2 */ #define Y2_PCI_CLK_LNK2_DIS BIT_4 /* Disable PCI clock Link 2 */ #define Y2_STATUS_LNK1_INAC BIT_3 /* Status Link 1 inactiv (0 = activ) */ #define Y2_CLK_GAT_LNK1_DIS BIT_2 /* Disable clock gating Link 1 */ #define Y2_COR_CLK_LNK1_DIS BIT_1 /* Disable Core clock Link 1 */ #define Y2_PCI_CLK_LNK1_DIS BIT_0 /* Disable PCI clock Link 1 */ /* B2_Y2_HW_RES 8 bit HW Resources (Yukon-2 only) */ #define CFG_LED_MODE_MSK (0x07<<2) /* Bit 4.. 2: LED Mode Mask */ #define CFG_LINK_2_AVAIL BIT_1 /* Link 2 available */ #define CFG_LINK_1_AVAIL BIT_0 /* Link 1 available */ #define CFG_LED_MODE(x) (((x) & CFG_LED_MODE_MSK) >> 2) #define CFG_DUAL_MAC_MSK (CFG_LINK_2_AVAIL | CFG_LINK_1_AVAIL) /* B2_E_3 8 bit lower 4 bits used for HW self test result */ #define B2_E3_RES_MASK 0x0f /* B2_Y2_CLK_CTRL 32 bit Core Clock Frequency Control Register (Yukon-2/EC) */ /* Yukon-EC/FE */ #define Y2_CLK_DIV_VAL_MSK (0xff<<16) /* Bit 23..16: Clock Divisor Value */ #define Y2_CLK_DIV_VAL(x) (SHIFT16(x) & Y2_CLK_DIV_VAL_MSK) /* Yukon-2 */ #define Y2_CLK_DIV_VAL2_MSK (0x07<<21) /* Bit 23..21: Clock Divisor Value */ #define Y2_CLK_SELECT2_MSK (0x1f<<16) /* Bit 20..16: Clock Select */ #define Y2_CLK_DIV_VAL_2(x) (SHIFT21(x) & Y2_CLK_DIV_VAL2_MSK) #define Y2_CLK_SEL_VAL_2(x) (SHIFT16(x) & Y2_CLK_SELECT2_MSK) #define Y2_CLK_DIV_ENA BIT_1 /* Enable Core Clock Division */ #define Y2_CLK_DIV_DIS BIT_0 /* Disable Core Clock Division */ /* B2_TI_CTRL 8 bit Timer control */ /* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */ #define TIM_START BIT_2 /* Start Timer */ #define TIM_STOP BIT_1 /* Stop Timer */ #define TIM_CLR_IRQ BIT_0 /* Clear Timer IRQ (!IRQM) */ /* B2_TI_TEST 8 Bit Timer Test */ /* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */ /* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */ #define TIM_T_ON BIT_2 /* Test mode on */ #define TIM_T_OFF BIT_1 /* Test mode off */ #define TIM_T_STEP BIT_0 /* Test step */ /* B28_DPT_INI 32 bit Descriptor Poll Timer Init Val */ /* B28_DPT_VAL 32 bit Descriptor Poll Timer Curr Val */ #define DPT_MSK 0x00ffffff /* Bit 23.. 0: Desc Poll Timer Bits */ /* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */ #define DPT_START BIT_1 /* Start Descriptor Poll Timer */ #define DPT_STOP BIT_0 /* Stop Descriptor Poll Timer */ /* B2_TST_CTRL1 8 bit Test Control Register 1 */ #define TST_FRC_DPERR_MR BIT_7 /* force DATAPERR on MST RD */ #define TST_FRC_DPERR_MW BIT_6 /* force DATAPERR on MST WR */ #define TST_FRC_DPERR_TR BIT_5 /* force DATAPERR on TRG RD */ #define TST_FRC_DPERR_TW BIT_4 /* force DATAPERR on TRG WR */ #define TST_FRC_APERR_M BIT_3 /* force ADDRPERR on MST */ #define TST_FRC_APERR_T BIT_2 /* force ADDRPERR on TRG */ #define TST_CFG_WRITE_ON BIT_1 /* Enable Config Reg WR */ #define TST_CFG_WRITE_OFF BIT_0 /* Disable Config Reg WR */ /* B2_GP_IO */ #define GLB_GPIO_CLK_DEB_ENA BIT_31 /* Clock Debug Enable */ #define GLB_GPIO_CLK_DBG_MSK 0x3c000000 /* Clock Debug */ #define GLB_GPIO_INT_RST_D3_DIS BIT_15 /* Disable Internal Reset After D3 to D0 */ #define GLB_GPIO_LED_PAD_SPEED_UP BIT_14 /* LED PAD Speed Up */ #define GLB_GPIO_STAT_RACE_DIS BIT_13 /* Status Race Disable */ #define GLB_GPIO_TEST_SEL_MSK 0x00001800 /* Testmode Select */ #define GLB_GPIO_TEST_SEL_BASE BIT_11 #define GLB_GPIO_RAND_ENA BIT_10 /* Random Enable */ #define GLB_GPIO_RAND_BIT_1 BIT_9 /* Random Bit 1 */ /* B2_I2C_CTRL 32 bit I2C HW Control Register */ #define I2C_FLAG BIT_31 /* Start read/write if WR */ #define I2C_ADDR (0x7fff<<16) /* Bit 30..16: Addr to be RD/WR */ #define I2C_DEV_SEL (0x7f<<9) /* Bit 15.. 9: I2C Device Select */ #define I2C_BURST_LEN BIT_4 /* Burst Len, 1/4 bytes */ #define I2C_DEV_SIZE (7<<1) /* Bit 3.. 1: I2C Device Size */ #define I2C_025K_DEV (0<<1) /* 0: 256 Bytes or smal. */ #define I2C_05K_DEV (1<<1) /* 1: 512 Bytes */ #define I2C_1K_DEV (2<<1) /* 2: 1024 Bytes */ #define I2C_2K_DEV (3<<1) /* 3: 2048 Bytes */ #define I2C_4K_DEV (4<<1) /* 4: 4096 Bytes */ #define I2C_8K_DEV (5<<1) /* 5: 8192 Bytes */ #define I2C_16K_DEV (6<<1) /* 6: 16384 Bytes */ #define I2C_32K_DEV (7<<1) /* 7: 32768 Bytes */ #define I2C_STOP BIT_0 /* Interrupt I2C transfer */ /* B2_I2C_IRQ 32 bit I2C HW IRQ Register */ #define I2C_CLR_IRQ BIT_0 /* Clear I2C IRQ */ /* B2_I2C_SW 32 bit (8 bit access) I2C HW SW Port Register */ #define I2C_DATA_DIR BIT_2 /* direction of I2C_DATA */ #define I2C_DATA BIT_1 /* I2C Data Port */ #define I2C_CLK BIT_0 /* I2C Clock Port */ /* I2C Address */ #define I2C_SENS_ADDR LM80_ADDR /* I2C Sensor Address (Volt and Temp) */ /* B2_BSC_CTRL 8 bit Blink Source Counter Control */ #define BSC_START BIT_1 /* Start Blink Source Counter */ #define BSC_STOP BIT_0 /* Stop Blink Source Counter */ /* B2_BSC_STAT 8 bit Blink Source Counter Status */ #define BSC_SRC BIT_0 /* Blink Source, 0=Off / 1=On */ /* B2_BSC_TST 16 bit Blink Source Counter Test Reg */ #define BSC_T_ON BIT_2 /* Test mode on */ #define BSC_T_OFF BIT_1 /* Test mode off */ #define BSC_T_STEP BIT_0 /* Test step */ /* Y2_PEX_PHY_ADDR/DATA PEX PHY address and data reg (Yukon-2 only) */ #define PEX_RD_ACCESS BIT_31 /* Access Mode Read = 1, Write = 0 */ #define PEX_DB_ACCESS BIT_30 /* Access to debug register */ /* B3_RAM_ADDR 32 bit RAM Address, to read or write */ #define RAM_ADR_RAN 0x0007ffff /* Bit 18.. 0: RAM Address Range */ /* RAM Interface Registers */ /* B3_RI_CTRL 16 bit RAM Interface Control Register */ #define RI_CLR_RD_PERR BIT_9 /* Clear IRQ RAM Read Parity Err */ #define RI_CLR_WR_PERR BIT_8 /* Clear IRQ RAM Write Parity Err */ #define RI_RST_CLR BIT_1 /* Clear RAM Interface Reset */ #define RI_RST_SET BIT_0 /* Set RAM Interface Reset */ #define MSK_RI_TO_53 36 /* RAM interface timeout */ /* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */ /* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */ /* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */ /* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */ /* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */ #define TXA_MAX_VAL 0x00ffffff/* Bit 23.. 0: Max TXA Timer/Cnt Val */ /* TXA_CTRL 8 bit Tx Arbiter Control Register */ #define TXA_ENA_FSYNC BIT_7 /* Enable force of sync Tx queue */ #define TXA_DIS_FSYNC BIT_6 /* Disable force of sync Tx queue */ #define TXA_ENA_ALLOC BIT_5 /* Enable alloc of free bandwidth */ #define TXA_DIS_ALLOC BIT_4 /* Disable alloc of free bandwidth */ #define TXA_START_RC BIT_3 /* Start sync Rate Control */ #define TXA_STOP_RC BIT_2 /* Stop sync Rate Control */ #define TXA_ENA_ARB BIT_1 /* Enable Tx Arbiter */ #define TXA_DIS_ARB BIT_0 /* Disable Tx Arbiter */ /* TXA_TEST 8 bit Tx Arbiter Test Register */ #define TXA_INT_T_ON BIT_5 /* Tx Arb Interval Timer Test On */ #define TXA_INT_T_OFF BIT_4 /* Tx Arb Interval Timer Test Off */ #define TXA_INT_T_STEP BIT_3 /* Tx Arb Interval Timer Step */ #define TXA_LIM_T_ON BIT_2 /* Tx Arb Limit Timer Test On */ #define TXA_LIM_T_OFF BIT_1 /* Tx Arb Limit Timer Test Off */ #define TXA_LIM_T_STEP BIT_0 /* Tx Arb Limit Timer Step */ /* TXA_STAT 8 bit Tx Arbiter Status Register */ #define TXA_PRIO_XS BIT_0 /* sync queue has prio to send */ /* Q_BC 32 bit Current Byte Counter */ #define BC_MAX 0xffff /* Bit 15.. 0: Byte counter */ /* Rx BMU Control / Status Registers (Yukon-2) */ #define BMU_IDLE BIT_31 /* BMU Idle State */ #define BMU_RX_TCP_PKT BIT_30 /* Rx TCP Packet (when RSS Hash enabled) */ #define BMU_RX_IP_PKT BIT_29 /* Rx IP Packet (when RSS Hash enabled) */ #define BMU_ENA_RX_RSS_HASH BIT_15 /* Enable Rx RSS Hash */ #define BMU_DIS_RX_RSS_HASH BIT_14 /* Disable Rx RSS Hash */ #define BMU_ENA_RX_CHKSUM BIT_13 /* Enable Rx TCP/IP Checksum Check */ #define BMU_DIS_RX_CHKSUM BIT_12 /* Disable Rx TCP/IP Checksum Check */ #define BMU_CLR_IRQ_PAR BIT_11 /* Clear IRQ on Parity errors (Rx) */ #define BMU_CLR_IRQ_TCP BIT_11 /* Clear IRQ on TCP segmen. error (Tx) */ #define BMU_CLR_IRQ_CHK BIT_10 /* Clear IRQ Check */ #define BMU_STOP BIT_9 /* Stop Rx/Tx Queue */ #define BMU_START BIT_8 /* Start Rx/Tx Queue */ #define BMU_FIFO_OP_ON BIT_7 /* FIFO Operational On */ #define BMU_FIFO_OP_OFF BIT_6 /* FIFO Operational Off */ #define BMU_FIFO_ENA BIT_5 /* Enable FIFO */ #define BMU_FIFO_RST BIT_4 /* Reset FIFO */ #define BMU_OP_ON BIT_3 /* BMU Operational On */ #define BMU_OP_OFF BIT_2 /* BMU Operational Off */ #define BMU_RST_CLR BIT_1 /* Clear BMU Reset (Enable) */ #define BMU_RST_SET BIT_0 /* Set BMU Reset */ #define BMU_CLR_RESET (BMU_FIFO_RST | BMU_OP_OFF | BMU_RST_CLR) #define BMU_OPER_INIT (BMU_CLR_IRQ_PAR | BMU_CLR_IRQ_CHK | \ BMU_START | BMU_FIFO_ENA | BMU_OP_ON) /* Tx BMU Control / Status Registers (Yukon-2) */ /* Bit 31: same as for Rx */ #define BMU_TX_IPIDINCR_ON BIT_13 /* Enable IP ID Increment */ #define BMU_TX_IPIDINCR_OFF BIT_12 /* Disable IP ID Increment */ #define BMU_TX_CLR_IRQ_TCP BIT_11 /* Clear IRQ on TCP segm. length mism. */ /* Bit 10..0: same as for Rx */ /* Q_F 32 bit Flag Register */ #define F_TX_CHK_AUTO_OFF BIT_31 /* Tx checksum auto-calc Off(Yukon EX)*/ #define F_TX_CHK_AUTO_ON BIT_30 /* Tx checksum auto-calc On(Yukon EX)*/ #define F_ALM_FULL BIT_28 /* Rx FIFO: almost full */ #define F_EMPTY BIT_27 /* Tx FIFO: empty flag */ #define F_FIFO_EOF BIT_26 /* Tag (EOF Flag) bit in FIFO */ #define F_WM_REACHED BIT_25 /* Watermark reached */ #define F_M_RX_RAM_DIS BIT_24 /* MAC Rx RAM Read Port disable */ #define F_FIFO_LEVEL (0x1f<<16) /* Bit 23..16: # of Qwords in FIFO */ #define F_WATER_MARK 0x0007ff/* Bit 10.. 0: Watermark */ /* Queue Prefetch Unit Offsets, use Y2_PREF_Q_ADDR() to address (Yukon-2 only)*/ /* PREF_UNIT_CTRL_REG 32 bit Prefetch Control register */ #define PREF_UNIT_OP_ON BIT_3 /* prefetch unit operational */ #define PREF_UNIT_OP_OFF BIT_2 /* prefetch unit not operational */ #define PREF_UNIT_RST_CLR BIT_1 /* Clear Prefetch Unit Reset */ #define PREF_UNIT_RST_SET BIT_0 /* Set Prefetch Unit Reset */ /* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */ /* RB_START 32 bit RAM Buffer Start Address */ /* RB_END 32 bit RAM Buffer End Address */ /* RB_WP 32 bit RAM Buffer Write Pointer */ /* RB_RP 32 bit RAM Buffer Read Pointer */ /* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */ /* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */ /* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */ /* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */ /* RB_PC 32 bit RAM Buffer Packet Counter */ /* RB_LEV 32 bit RAM Buffer Level Register */ #define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */ /* RB_TST2 8 bit RAM Buffer Test Register 2 */ #define RB_PC_DEC BIT_3 /* Packet Counter Decrement */ #define RB_PC_T_ON BIT_2 /* Packet Counter Test On */ #define RB_PC_T_OFF BIT_1 /* Packet Counter Test Off */ #define RB_PC_INC BIT_0 /* Packet Counter Increment */ /* RB_TST1 8 bit RAM Buffer Test Register 1 */ #define RB_WP_T_ON BIT_6 /* Write Pointer Test On */ #define RB_WP_T_OFF BIT_5 /* Write Pointer Test Off */ #define RB_WP_INC BIT_4 /* Write Pointer Increment */ #define RB_RP_T_ON BIT_2 /* Read Pointer Test On */ #define RB_RP_T_OFF BIT_1 /* Read Pointer Test Off */ #define RB_RP_INC BIT_0 /* Read Pointer Increment */ /* RB_CTRL 8 bit RAM Buffer Control Register */ #define RB_ENA_STFWD BIT_5 /* Enable Store & Forward */ #define RB_DIS_STFWD BIT_4 /* Disable Store & Forward */ #define RB_ENA_OP_MD BIT_3 /* Enable Operation Mode */ #define RB_DIS_OP_MD BIT_2 /* Disable Operation Mode */ #define RB_RST_CLR BIT_1 /* Clear RAM Buf STM Reset */ #define RB_RST_SET BIT_0 /* Set RAM Buf STM Reset */ /* RAM Buffer High Pause Threshold values */ #define MSK_RB_ULPP (8 * 1024) /* Upper Level in kB/8 */ #define MSK_RB_LLPP_S (10 * 1024) /* Lower Level for small Queues */ #define MSK_RB_LLPP_B (16 * 1024) /* Lower Level for big Queues */ /* Threshold values for Yukon-EC Ultra */ #define MSK_ECU_ULPP 0x0080 /* Upper Pause Threshold (multiples of 8) */ #define MSK_ECU_LLPP 0x0060 /* Lower Pause Threshold (multiples of 8) */ #define MSK_ECU_AE_THR 0x0070 /* Almost Empty Threshold */ #define MSK_ECU_TXFF_LEV 0x01a0 /* Tx BMU FIFO Level */ #define MSK_ECU_JUMBO_WM 0x01 #define MSK_BMU_RX_WM 0x600 /* BMU Rx Watermark */ #define MSK_BMU_TX_WM 0x600 /* BMU Tx Watermark */ /* performance sensitive drivers should set this define to 0x80 */ #define MSK_BMU_RX_WM_PEX 0x600 /* BMU Rx Watermark for PEX */ /* Receive and Transmit Queues */ #define Q_R1 0x0000 /* Receive Queue 1 */ #define Q_R2 0x0080 /* Receive Queue 2 */ #define Q_XS1 0x0200 /* Synchronous Transmit Queue 1 */ #define Q_XA1 0x0280 /* Asynchronous Transmit Queue 1 */ #define Q_XS2 0x0300 /* Synchronous Transmit Queue 2 */ #define Q_XA2 0x0380 /* Asynchronous Transmit Queue 2 */ #define Q_ASF_R1 0x100 /* ASF Rx Queue 1 */ #define Q_ASF_R2 0x180 /* ASF Rx Queue 2 */ #define Q_ASF_T1 0x140 /* ASF Tx Queue 1 */ #define Q_ASF_T2 0x1c0 /* ASF Tx Queue 2 */ #define RB_ADDR(Queue, Offs) (B16_RAM_REGS + (Queue) + (Offs)) /* Minimum RAM Buffer Rx Queue Size */ #define MSK_MIN_RXQ_SIZE 10 /* Minimum RAM Buffer Tx Queue Size */ #define MSK_MIN_TXQ_SIZE 10 /* Percentage of queue size from whole memory. 80 % for receive */ #define MSK_RAM_QUOTA_RX 80 /* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */ #define WOL_CTL_LINK_CHG_OCC BIT_15 #define WOL_CTL_MAGIC_PKT_OCC BIT_14 #define WOL_CTL_PATTERN_OCC BIT_13 #define WOL_CTL_CLEAR_RESULT BIT_12 #define WOL_CTL_ENA_PME_ON_LINK_CHG BIT_11 #define WOL_CTL_DIS_PME_ON_LINK_CHG BIT_10 #define WOL_CTL_ENA_PME_ON_MAGIC_PKT BIT_9 #define WOL_CTL_DIS_PME_ON_MAGIC_PKT BIT_8 #define WOL_CTL_ENA_PME_ON_PATTERN BIT_7 #define WOL_CTL_DIS_PME_ON_PATTERN BIT_6 #define WOL_CTL_ENA_LINK_CHG_UNIT BIT_5 #define WOL_CTL_DIS_LINK_CHG_UNIT BIT_4 #define WOL_CTL_ENA_MAGIC_PKT_UNIT BIT_3 #define WOL_CTL_DIS_MAGIC_PKT_UNIT BIT_2 #define WOL_CTL_ENA_PATTERN_UNIT BIT_1 #define WOL_CTL_DIS_PATTERN_UNIT BIT_0 #define WOL_CTL_DEFAULT \ (WOL_CTL_DIS_PME_ON_LINK_CHG | \ WOL_CTL_DIS_PME_ON_PATTERN | \ WOL_CTL_DIS_PME_ON_MAGIC_PKT | \ WOL_CTL_DIS_LINK_CHG_UNIT | \ WOL_CTL_DIS_PATTERN_UNIT | \ WOL_CTL_DIS_MAGIC_PKT_UNIT) /* WOL_MATCH_CTL 8 bit WOL Match Control Reg */ #define WOL_CTL_PATT_ENA(x) (BIT_0 << (x)) /* WOL_PATT_PME 8 bit WOL PME Match Enable (Yukon-2) */ #define WOL_PATT_FORCE_PME BIT_7 /* Generates a PME */ #define WOL_PATT_MATCH_PME_ALL 0x7f /* * Marvel-PHY Registers, indirect addressed over GMAC */ #define PHY_MARV_CTRL 0x00 /* 16 bit r/w PHY Control Register */ #define PHY_MARV_STAT 0x01 /* 16 bit r/o PHY Status Register */ #define PHY_MARV_ID0 0x02 /* 16 bit r/o PHY ID0 Register */ #define PHY_MARV_ID1 0x03 /* 16 bit r/o PHY ID1 Register */ #define PHY_MARV_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */ #define PHY_MARV_AUNE_LP 0x05 /* 16 bit r/o Link Part Ability Reg */ #define PHY_MARV_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */ #define PHY_MARV_NEPG 0x07 /* 16 bit r/w Next Page Register */ #define PHY_MARV_NEPG_LP 0x08 /* 16 bit r/o Next Page Link Partner */ /* Marvel-specific registers */ #define PHY_MARV_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Control Reg */ #define PHY_MARV_1000T_STAT 0x0a /* 16 bit r/o 1000Base-T Status Reg */ /* 0x0b - 0x0e: reserved */ #define PHY_MARV_EXT_STAT 0x0f /* 16 bit r/o Extended Status Reg */ #define PHY_MARV_PHY_CTRL 0x10 /* 16 bit r/w PHY Specific Control Reg */ #define PHY_MARV_PHY_STAT 0x11 /* 16 bit r/o PHY Specific Status Reg */ #define PHY_MARV_INT_MASK 0x12 /* 16 bit r/w Interrupt Mask Reg */ #define PHY_MARV_INT_STAT 0x13 /* 16 bit r/o Interrupt Status Reg */ #define PHY_MARV_EXT_CTRL 0x14 /* 16 bit r/w Ext. PHY Specific Ctrl */ #define PHY_MARV_RXE_CNT 0x15 /* 16 bit r/w Receive Error Counter */ #define PHY_MARV_EXT_ADR 0x16 /* 16 bit r/w Ext. Ad. for Cable Diag. */ #define PHY_MARV_PORT_IRQ 0x17 /* 16 bit r/o Port 0 IRQ (88E1111 only) */ #define PHY_MARV_LED_CTRL 0x18 /* 16 bit r/w LED Control Reg */ #define PHY_MARV_LED_OVER 0x19 /* 16 bit r/w Manual LED Override Reg */ #define PHY_MARV_EXT_CTRL_2 0x1a /* 16 bit r/w Ext. PHY Specific Ctrl 2 */ #define PHY_MARV_EXT_P_STAT 0x1b /* 16 bit r/w Ext. PHY Spec. Stat Reg */ #define PHY_MARV_CABLE_DIAG 0x1c /* 16 bit r/o Cable Diagnostic Reg */ #define PHY_MARV_PAGE_ADDR 0x1d /* 16 bit r/w Extended Page Address Reg */ #define PHY_MARV_PAGE_DATA 0x1e /* 16 bit r/w Extended Page Data Reg */ /* for 10/100 Fast Ethernet PHY (88E3082 only) */ #define PHY_MARV_FE_LED_PAR 0x16 /* 16 bit r/w LED Parallel Select Reg. */ #define PHY_MARV_FE_LED_SER 0x17 /* 16 bit r/w LED Stream Select S. LED */ #define PHY_MARV_FE_VCT_TX 0x1a /* 16 bit r/w VCT Reg. for TXP/N Pins */ #define PHY_MARV_FE_VCT_RX 0x1b /* 16 bit r/o VCT Reg. for RXP/N Pins */ #define PHY_MARV_FE_SPEC_2 0x1c /* 16 bit r/w Specific Control Reg. 2 */ #define PHY_CT_RESET (1<<15) /* Bit 15: (sc) clear all PHY related regs */ #define PHY_CT_LOOP (1<<14) /* Bit 14: enable Loopback over PHY */ #define PHY_CT_SPS_LSB (1<<13) /* Bit 13: Speed select, lower bit */ #define PHY_CT_ANE (1<<12) /* Bit 12: Auto-Negotiation Enabled */ #define PHY_CT_PDOWN (1<<11) /* Bit 11: Power Down Mode */ #define PHY_CT_ISOL (1<<10) /* Bit 10: Isolate Mode */ #define PHY_CT_RE_CFG (1<<9) /* Bit 9: (sc) Restart Auto-Negotiation */ #define PHY_CT_DUP_MD (1<<8) /* Bit 8: Duplex Mode */ #define PHY_CT_COL_TST (1<<7) /* Bit 7: Collision Test enabled */ #define PHY_CT_SPS_MSB (1<<6) /* Bit 6: Speed select, upper bit */ #define PHY_CT_SP1000 PHY_CT_SPS_MSB /* enable speed of 1000 Mbps */ #define PHY_CT_SP100 PHY_CT_SPS_LSB /* enable speed of 100 Mbps */ #define PHY_CT_SP10 (0) /* enable speed of 10 Mbps */ #define PHY_ST_EXT_ST (1<<8) /* Bit 8: Extended Status Present */ #define PHY_ST_PRE_SUP (1<<6) /* Bit 6: Preamble Suppression */ #define PHY_ST_AN_OVER (1<<5) /* Bit 5: Auto-Negotiation Over */ #define PHY_ST_REM_FLT (1<<4) /* Bit 4: Remote Fault Condition Occured */ #define PHY_ST_AN_CAP (1<<3) /* Bit 3: Auto-Negotiation Capability */ #define PHY_ST_LSYNC (1<<2) /* Bit 2: Link Synchronized */ #define PHY_ST_JAB_DET (1<<1) /* Bit 1: Jabber Detected */ #define PHY_ST_EXT_REG (1<<0) /* Bit 0: Extended Register available */ #define PHY_I1_OUI_MSK (0x3f<<10) /* Bit 15..10: Organization Unique ID */ #define PHY_I1_MOD_NUM (0x3f<<4) /* Bit 9.. 4: Model Number */ #define PHY_I1_REV_MSK 0xf /* Bit 3.. 0: Revision Number */ /* different Marvell PHY Ids */ #define PHY_MARV_ID0_VAL 0x0141 /* Marvell Unique Identifier */ #define PHY_MARV_ID1_B0 0x0C23 /* Yukon (PHY 88E1011) */ #define PHY_MARV_ID1_B2 0x0C25 /* Yukon-Plus (PHY 88E1011) */ #define PHY_MARV_ID1_C2 0x0CC2 /* Yukon-EC (PHY 88E1111) */ #define PHY_MARV_ID1_Y2 0x0C91 /* Yukon-2 (PHY 88E1112) */ #define PHY_MARV_ID1_FE 0x0C83 /* Yukon-FE (PHY 88E3082 Rev.A1) */ #define PHY_MARV_ID1_ECU 0x0CB0 /* Yukon-2 (PHY 88E1149 Rev.B2?) */ /***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ #define PHY_B_1000S_MSF (1<<15) /* Bit 15: Master/Slave Fault */ #define PHY_B_1000S_MSR (1<<14) /* Bit 14: Master/Slave Result */ #define PHY_B_1000S_LRS (1<<13) /* Bit 13: Local Receiver Status */ #define PHY_B_1000S_RRS (1<<12) /* Bit 12: Remote Receiver Status */ #define PHY_B_1000S_LP_FD (1<<11) /* Bit 11: Link Partner can FD */ #define PHY_B_1000S_LP_HD (1<<10) /* Bit 10: Link Partner can HD */ #define PHY_B_1000S_IEC 0xff /* Bit 7..0: Idle Error Count */ /***** PHY_MARV_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ /***** PHY_MARV_AUNE_LP 16 bit r/w Link Part Ability Reg *****/ #define PHY_M_AN_NXT_PG BIT_15 /* Request Next Page */ #define PHY_M_AN_ACK BIT_14 /* (ro) Acknowledge Received */ #define PHY_M_AN_RF BIT_13 /* Remote Fault */ #define PHY_M_AN_ASP BIT_11 /* Asymmetric Pause */ #define PHY_M_AN_PC BIT_10 /* MAC Pause implemented */ #define PHY_M_AN_100_T4 BIT_9 /* Not cap. 100Base-T4 (always 0) */ #define PHY_M_AN_100_FD BIT_8 /* Advertise 100Base-TX Full Duplex */ #define PHY_M_AN_100_HD BIT_7 /* Advertise 100Base-TX Half Duplex */ #define PHY_M_AN_10_FD BIT_6 /* Advertise 10Base-TX Full Duplex */ #define PHY_M_AN_10_HD BIT_5 /* Advertise 10Base-TX Half Duplex */ #define PHY_M_AN_SEL_MSK (0x1f<<4) /* Bit 4.. 0: Selector Field Mask */ /* special defines for FIBER (88E1011S only) */ #define PHY_M_AN_ASP_X BIT_8 /* Asymmetric Pause */ #define PHY_M_AN_PC_X BIT_7 /* MAC Pause implemented */ #define PHY_M_AN_1000X_AHD BIT_6 /* Advertise 10000Base-X Half Duplex */ #define PHY_M_AN_1000X_AFD BIT_5 /* Advertise 10000Base-X Full Duplex */ /* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */ #define PHY_M_P_NO_PAUSE_X (0<<7) /* Bit 8.. 7: no Pause Mode */ #define PHY_M_P_SYM_MD_X (1<<7) /* Bit 8.. 7: symmetric Pause Mode */ #define PHY_M_P_ASYM_MD_X (2<<7) /* Bit 8.. 7: asymmetric Pause Mode */ #define PHY_M_P_BOTH_MD_X (3<<7) /* Bit 8.. 7: both Pause Mode */ /***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ #define PHY_M_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */ #define PHY_M_1000C_MSE BIT_12 /* Manual Master/Slave Enable */ #define PHY_M_1000C_MSC BIT_11 /* M/S Configuration (1=Master) */ #define PHY_M_1000C_MPD BIT_10 /* Multi-Port Device */ #define PHY_M_1000C_AFD BIT_9 /* Advertise Full Duplex */ #define PHY_M_1000C_AHD BIT_8 /* Advertise Half Duplex */ /***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/ #define PHY_M_PC_TX_FFD_MSK (3<<14) /* Bit 15..14: Tx FIFO Depth Mask */ #define PHY_M_PC_RX_FFD_MSK (3<<12) /* Bit 13..12: Rx FIFO Depth Mask */ #define PHY_M_PC_ASS_CRS_TX BIT_11 /* Assert CRS on Transmit */ #define PHY_M_PC_FL_GOOD BIT_10 /* Force Link Good */ #define PHY_M_PC_EN_DET_MSK (3<<8) /* Bit 9.. 8: Energy Detect Mask */ #define PHY_M_PC_ENA_EXT_D BIT_7 /* Enable Ext. Distance (10BT) */ #define PHY_M_PC_MDIX_MSK (3<<5) /* Bit 6.. 5: MDI/MDIX Config. Mask */ #define PHY_M_PC_DIS_125CLK BIT_4 /* Disable 125 CLK */ #define PHY_M_PC_MAC_POW_UP BIT_3 /* MAC Power up */ #define PHY_M_PC_SQE_T_ENA BIT_2 /* SQE Test Enabled */ #define PHY_M_PC_POL_R_DIS BIT_1 /* Polarity Reversal Disabled */ #define PHY_M_PC_DIS_JABBER BIT_0 /* Disable Jabber */ #define PHY_M_PC_EN_DET SHIFT8(2) /* Energy Detect (Mode 1) */ #define PHY_M_PC_EN_DET_PLUS SHIFT8(3) /* Energy Detect Plus (Mode 2) */ #define PHY_M_PC_MDI_XMODE(x) (SHIFT5(x) & PHY_M_PC_MDIX_MSK) #define PHY_M_PC_MAN_MDI 0 /* 00 = Manual MDI configuration */ #define PHY_M_PC_MAN_MDIX 1 /* 01 = Manual MDIX configuration */ #define PHY_M_PC_ENA_AUTO 3 /* 11 = Enable Automatic Crossover */ /* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */ #define PHY_M_PC_DIS_LINK_P BIT_15 /* Disable Link Pulses */ #define PHY_M_PC_DSC_MSK (7<<12) /* Bit 14..12: Downshift Counter */ #define PHY_M_PC_DOWN_S_ENA BIT_11 /* Downshift Enable */ /* !!! Errata in spec. (1 = disable) */ #define PHY_M_PC_DSC(x) (SHIFT12(x) & PHY_M_PC_DSC_MSK) /* 000=1x; 001=2x; 010=3x; 011=4x */ /* 100=5x; 101=6x; 110=7x; 111=8x */ /* for 10/100 Fast Ethernet PHY (88E3082 only) */ #define PHY_M_PC_ENA_DTE_DT BIT_15 /* Enable Data Terminal Equ. (DTE) Detect */ #define PHY_M_PC_ENA_ENE_DT BIT_14 /* Enable Energy Detect (sense & pulse) */ #define PHY_M_PC_DIS_NLP_CK BIT_13 /* Disable Normal Link Puls (NLP) Check */ #define PHY_M_PC_ENA_LIP_NP BIT_12 /* Enable Link Partner Next Page Reg. */ #define PHY_M_PC_DIS_NLP_GN BIT_11 /* Disable Normal Link Puls Generation */ #define PHY_M_PC_DIS_SCRAMB BIT_9 /* Disable Scrambler */ #define PHY_M_PC_DIS_FEFI BIT_8 /* Disable Far End Fault Indic. (FEFI) */ #define PHY_M_PC_SH_TP_SEL BIT_6 /* Shielded Twisted Pair Select */ #define PHY_M_PC_RX_FD_MSK (3<<2) /* Bit 3.. 2: Rx FIFO Depth Mask */ /***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/ #define PHY_M_PS_SPEED_MSK (3<<14) /* Bit 15..14: Speed Mask */ #define PHY_M_PS_SPEED_1000 BIT_15 /* 10 = 1000 Mbps */ #define PHY_M_PS_SPEED_100 BIT_14 /* 01 = 100 Mbps */ #define PHY_M_PS_SPEED_10 0 /* 00 = 10 Mbps */ #define PHY_M_PS_FULL_DUP BIT_13 /* Full Duplex */ #define PHY_M_PS_PAGE_REC BIT_12 /* Page Received */ #define PHY_M_PS_SPDUP_RES BIT_11 /* Speed & Duplex Resolved */ #define PHY_M_PS_LINK_UP BIT_10 /* Link Up */ #define PHY_M_PS_CABLE_MSK (7<<7) /* Bit 9.. 7: Cable Length Mask */ #define PHY_M_PS_MDI_X_STAT BIT_6 /* MDI Crossover Stat (1=MDIX) */ #define PHY_M_PS_DOWNS_STAT BIT_5 /* Downshift Status (1=downsh.) */ #define PHY_M_PS_ENDET_STAT BIT_4 /* Energy Detect Status (1=act) */ #define PHY_M_PS_TX_P_EN BIT_3 /* Tx Pause Enabled */ #define PHY_M_PS_RX_P_EN BIT_2 /* Rx Pause Enabled */ #define PHY_M_PS_POL_REV BIT_1 /* Polarity Reversed */ #define PHY_M_PS_JABBER BIT_0 /* Jabber */ #define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN) /* for 10/100 Fast Ethernet PHY (88E3082 only) */ #define PHY_M_PS_DTE_DETECT BIT_15 /* Data Terminal Equipment (DTE) Detected */ #define PHY_M_PS_RES_SPEED BIT_14 /* Resolved Speed (1=100 Mbps, 0=10 Mbps */ /***** PHY_MARV_INT_MASK 16 bit r/w Interrupt Mask Reg *****/ /***** PHY_MARV_INT_STAT 16 bit r/o Interrupt Status Reg *****/ #define PHY_M_IS_AN_ERROR BIT_15 /* Auto-Negotiation Error */ #define PHY_M_IS_LSP_CHANGE BIT_14 /* Link Speed Changed */ #define PHY_M_IS_DUP_CHANGE BIT_13 /* Duplex Mode Changed */ #define PHY_M_IS_AN_PR BIT_12 /* Page Received */ #define PHY_M_IS_AN_COMPL BIT_11 /* Auto-Negotiation Completed */ #define PHY_M_IS_LST_CHANGE BIT_10 /* Link Status Changed */ #define PHY_M_IS_SYMB_ERROR BIT_9 /* Symbol Error */ #define PHY_M_IS_FALSE_CARR BIT_8 /* False Carrier */ #define PHY_M_IS_FIFO_ERROR BIT_7 /* FIFO Overflow/Underrun Error */ #define PHY_M_IS_MDI_CHANGE BIT_6 /* MDI Crossover Changed */ #define PHY_M_IS_DOWNSH_DET BIT_5 /* Downshift Detected */ #define PHY_M_IS_END_CHANGE BIT_4 /* Energy Detect Changed */ #define PHY_M_IS_DTE_CHANGE BIT_2 /* DTE Power Det. Status Changed */ #define PHY_M_IS_POL_CHANGE BIT_1 /* Polarity Changed */ #define PHY_M_IS_JABBER BIT_0 /* Jabber */ #define PHY_M_DEF_MSK (PHY_M_IS_AN_ERROR | PHY_M_IS_AN_PR | \ PHY_M_IS_LST_CHANGE | PHY_M_IS_FIFO_ERROR) /***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/ #define PHY_M_EC_ENA_BC_EXT BIT_15 /* Enable Block Carr. Ext. (88E1111 only) */ #define PHY_M_EC_ENA_LIN_LB BIT_14 /* Enable Line Loopback (88E1111 only) */ #define PHY_M_EC_DIS_LINK_P BIT_12 /* Disable Link Pulses (88E1111 only) */ #define PHY_M_EC_M_DSC_MSK (3<<10) /* Bit 11..10: Master Downshift Counter */ /* (88E1011 only) */ #define PHY_M_EC_S_DSC_MSK (3<<8) /* Bit 9.. 8: Slave Downshift Counter */ /* (88E1011 only) */ #define PHY_M_EC_DSC_MSK_2 (7<<9) /* Bit 11.. 9: Downshift Counter */ /* (88E1111 only) */ #define PHY_M_EC_DOWN_S_ENA BIT_8 /* Downshift Enable (88E1111 only) */ /* !!! Errata in spec. (1 = disable) */ #define PHY_M_EC_RX_TIM_CT BIT_7 /* RGMII Rx Timing Control*/ #define PHY_M_EC_MAC_S_MSK (7<<4) /* Bit 6.. 4: Def. MAC interface speed */ #define PHY_M_EC_FIB_AN_ENA BIT_3 /* Fiber Auto-Neg. Enable (88E1011S only) */ #define PHY_M_EC_DTE_D_ENA BIT_2 /* DTE Detect Enable (88E1111 only) */ #define PHY_M_EC_TX_TIM_CT BIT_1 /* RGMII Tx Timing Control */ #define PHY_M_EC_TRANS_DIS BIT_0 /* Transmitter Disable (88E1111 only) */ #define PHY_M_EC_M_DSC(x) (SHIFT10(x) & PHY_M_EC_M_DSC_MSK) /* 00=1x; 01=2x; 10=3x; 11=4x */ #define PHY_M_EC_S_DSC(x) (SHIFT8(x) & PHY_M_EC_S_DSC_MSK) /* 00=dis; 01=1x; 10=2x; 11=3x */ #define PHY_M_EC_MAC_S(x) (SHIFT4(x) & PHY_M_EC_MAC_S_MSK) /* 01X=0; 110=2.5; 111=25 (MHz) */ #define PHY_M_EC_DSC_2(x) (SHIFT9(x) & PHY_M_EC_DSC_MSK_2) /* 000=1x; 001=2x; 010=3x; 011=4x */ /* 100=5x; 101=6x; 110=7x; 111=8x */ #define MAC_TX_CLK_0_MHZ 2 #define MAC_TX_CLK_2_5_MHZ 6 #define MAC_TX_CLK_25_MHZ 7 /***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/ #define PHY_M_LEDC_DIS_LED BIT_15 /* Disable LED */ #define PHY_M_LEDC_PULS_MSK (7<<12) /* Bit 14..12: Pulse Stretch Mask */ #define PHY_M_LEDC_F_INT BIT_11 /* Force Interrupt */ #define PHY_M_LEDC_BL_R_MSK (7<<8) /* Bit 10.. 8: Blink Rate Mask */ #define PHY_M_LEDC_DP_C_LSB BIT_7 /* Duplex Control (LSB, 88E1111 only) */ #define PHY_M_LEDC_TX_C_LSB BIT_6 /* Tx Control (LSB, 88E1111 only) */ #define PHY_M_LEDC_LK_C_MSK (7<<3) /* Bit 5.. 3: Link Control Mask */ /* (88E1111 only) */ #define PHY_M_LEDC_LINK_MSK (3<<3) /* Bit 4.. 3: Link Control Mask */ /* (88E1011 only) */ #define PHY_M_LEDC_DP_CTRL BIT_2 /* Duplex Control */ #define PHY_M_LEDC_DP_C_MSB BIT_2 /* Duplex Control (MSB, 88E1111 only) */ #define PHY_M_LEDC_RX_CTRL BIT_1 /* Rx Activity / Link */ #define PHY_M_LEDC_TX_CTRL BIT_0 /* Tx Activity / Link */ #define PHY_M_LEDC_TX_C_MSB BIT_0 /* Tx Control (MSB, 88E1111 only) */ #define PHY_M_LED_PULS_DUR(x) (SHIFT12(x) & PHY_M_LEDC_PULS_MSK) #define PULS_NO_STR 0 /* no pulse stretching */ #define PULS_21MS 1 /* 21 ms to 42 ms */ #define PULS_42MS 2 /* 42 ms to 84 ms */ #define PULS_84MS 3 /* 84 ms to 170 ms */ #define PULS_170MS 4 /* 170 ms to 340 ms */ #define PULS_340MS 5 /* 340 ms to 670 ms */ #define PULS_670MS 6 /* 670 ms to 1.3 s */ #define PULS_1300MS 7 /* 1.3 s to 2.7 s */ #define PHY_M_LED_BLINK_RT(x) (SHIFT8(x) & PHY_M_LEDC_BL_R_MSK) #define BLINK_42MS 0 /* 42 ms */ #define BLINK_84MS 1 /* 84 ms */ #define BLINK_170MS 2 /* 170 ms */ #define BLINK_340MS 3 /* 340 ms */ #define BLINK_670MS 4 /* 670 ms */ /***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/ #define PHY_M_LED_MO_SGMII(x) SHIFT14(x) /* Bit 15..14: SGMII AN Timer */ #define PHY_M_LED_MO_DUP(x) SHIFT10(x) /* Bit 11..10: Duplex */ #define PHY_M_LED_MO_10(x) SHIFT8(x) /* Bit 9.. 8: Link 10 */ #define PHY_M_LED_MO_100(x) SHIFT6(x) /* Bit 7.. 6: Link 100 */ #define PHY_M_LED_MO_1000(x) SHIFT4(x) /* Bit 5.. 4: Link 1000 */ #define PHY_M_LED_MO_RX(x) SHIFT2(x) /* Bit 3.. 2: Rx */ #define PHY_M_LED_MO_TX(x) SHIFT0(x) /* Bit 1.. 0: Tx */ #define MO_LED_NORM 0 #define MO_LED_BLINK 1 #define MO_LED_OFF 2 #define MO_LED_ON 3 /***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/ #define PHY_M_EC2_FI_IMPED BIT_6 /* Fiber Input Impedance */ #define PHY_M_EC2_FO_IMPED BIT_5 /* Fiber Output Impedance */ #define PHY_M_EC2_FO_M_CLK BIT_4 /* Fiber Mode Clock Enable */ #define PHY_M_EC2_FO_BOOST BIT_3 /* Fiber Output Boost */ #define PHY_M_EC2_FO_AM_MSK 7 /* Bit 2.. 0: Fiber Output Amplitude */ /***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/ #define PHY_M_FC_AUTO_SEL BIT_15 /* Fiber/Copper Auto Sel. Dis. */ #define PHY_M_FC_AN_REG_ACC BIT_14 /* Fiber/Copper AN Reg. Access */ #define PHY_M_FC_RESOLUTION BIT_13 /* Fiber/Copper Resolution */ #define PHY_M_SER_IF_AN_BP BIT_12 /* Ser. IF AN Bypass Enable */ #define PHY_M_SER_IF_BP_ST BIT_11 /* Ser. IF AN Bypass Status */ #define PHY_M_IRQ_POLARITY BIT_10 /* IRQ polarity */ #define PHY_M_DIS_AUT_MED BIT_9 /* Disable Aut. Medium Reg. Selection */ /* (88E1111 only) */ #define PHY_M_UNDOC1 BIT_7 /* undocumented bit !! */ #define PHY_M_DTE_POW_STAT BIT_4 /* DTE Power Status (88E1111 only) */ #define PHY_M_MODE_MASK 0xf /* Bit 3.. 0: copy of HWCFG MODE[3:0] */ /***** PHY_MARV_CABLE_DIAG 16 bit r/o Cable Diagnostic Reg *****/ #define PHY_M_CABD_ENA_TEST BIT_15 /* Enable Test (Page 0) */ #define PHY_M_CABD_DIS_WAIT BIT_15 /* Disable Waiting Period (Page 1) */ /* (88E1111 only) */ #define PHY_M_CABD_STAT_MSK (3<<13) /* Bit 14..13: Status Mask */ #define PHY_M_CABD_AMPL_MSK (0x1f<<8) /* Bit 12.. 8: Amplitude Mask */ /* (88E1111 only) */ #define PHY_M_CABD_DIST_MSK 0xff /* Bit 7.. 0: Distance Mask */ /* values for Cable Diagnostic Status (11=fail; 00=OK; 10=open; 01=short) */ #define CABD_STAT_NORMAL 0 #define CABD_STAT_SHORT 1 #define CABD_STAT_OPEN 2 #define CABD_STAT_FAIL 3 /* for 10/100 Fast Ethernet PHY (88E3082 only) */ /***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/ #define PHY_M_FELP_LED2_MSK (0xf<<8) /* Bit 11.. 8: LED2 Mask (LINK) */ #define PHY_M_FELP_LED1_MSK (0xf<<4) /* Bit 7.. 4: LED1 Mask (ACT) */ #define PHY_M_FELP_LED0_MSK 0xf /* Bit 3.. 0: LED0 Mask (SPEED) */ #define PHY_M_FELP_LED2_CTRL(x) (SHIFT8(x) & PHY_M_FELP_LED2_MSK) #define PHY_M_FELP_LED1_CTRL(x) (SHIFT4(x) & PHY_M_FELP_LED1_MSK) #define PHY_M_FELP_LED0_CTRL(x) (SHIFT0(x) & PHY_M_FELP_LED0_MSK) #define LED_PAR_CTRL_COLX 0x00 #define LED_PAR_CTRL_ERROR 0x01 #define LED_PAR_CTRL_DUPLEX 0x02 #define LED_PAR_CTRL_DP_COL 0x03 #define LED_PAR_CTRL_SPEED 0x04 #define LED_PAR_CTRL_LINK 0x05 #define LED_PAR_CTRL_TX 0x06 #define LED_PAR_CTRL_RX 0x07 #define LED_PAR_CTRL_ACT 0x08 #define LED_PAR_CTRL_LNK_RX 0x09 #define LED_PAR_CTRL_LNK_AC 0x0a #define LED_PAR_CTRL_ACT_BL 0x0b #define LED_PAR_CTRL_TX_BL 0x0c #define LED_PAR_CTRL_RX_BL 0x0d #define LED_PAR_CTRL_COL_BL 0x0e #define LED_PAR_CTRL_INACT 0x0f /***** PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/ #define PHY_M_FESC_DIS_WAIT BIT_2 /* Disable TDR Waiting Period */ #define PHY_M_FESC_ENA_MCLK BIT_1 /* Enable MAC Rx Clock in sleep mode */ #define PHY_M_FESC_SEL_CL_A BIT_0 /* Select Class A driver (100B-TX) */ /* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */ /***** PHY_MARV_PHY_CTRL (page 1) 16 bit r/w Fiber Specific Ctrl *****/ #define PHY_M_FIB_FORCE_LNK BIT_10 /* Force Link Good */ #define PHY_M_FIB_SIGD_POL BIT_9 /* SIGDET Polarity */ #define PHY_M_FIB_TX_DIS BIT_3 /* Transmitter Disable */ /***** PHY_MARV_PHY_CTRL (page 2) 16 bit r/w MAC Specific Ctrl *****/ #define PHY_M_MAC_MD_MSK (7<<7) /* Bit 9.. 7: Mode Select Mask */ #define PHY_M_MAC_MD_AUTO 3 /* Auto Copper/1000Base-X */ #define PHY_M_MAC_MD_COPPER 5 /* Copper only */ #define PHY_M_MAC_MD_1000BX 7 /* 1000Base-X only */ #define PHY_M_MAC_MODE_SEL(x) (SHIFT7(x) & PHY_M_MAC_MD_MSK) /***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/ #define PHY_M_LEDC_LOS_MSK (0xf<<12) /* Bit 15..12: LOS LED Ctrl. Mask */ #define PHY_M_LEDC_INIT_MSK (0xf<<8) /* Bit 11.. 8: INIT LED Ctrl. Mask */ #define PHY_M_LEDC_STA1_MSK (0xf<<4) /* Bit 7.. 4: STAT1 LED Ctrl. Mask */ #define PHY_M_LEDC_STA0_MSK 0xf /* Bit 3.. 0: STAT0 LED Ctrl. Mask */ #define PHY_M_LEDC_LOS_CTRL(x) (SHIFT12(x) & PHY_M_LEDC_LOS_MSK) #define PHY_M_LEDC_INIT_CTRL(x) (SHIFT8(x) & PHY_M_LEDC_INIT_MSK) #define PHY_M_LEDC_STA1_CTRL(x) (SHIFT4(x) & PHY_M_LEDC_STA1_MSK) #define PHY_M_LEDC_STA0_CTRL(x) (SHIFT0(x) & PHY_M_LEDC_STA0_MSK) /***** PHY_MARV_PHY_STAT (page 3) 16 bit r/w Polarity Control Reg. *****/ #define PHY_M_POLC_LS1M_MSK (0xf<<12) /* Bit 15..12: LOS,STAT1 Mix % Mask */ #define PHY_M_POLC_IS0M_MSK (0xf<<8) /* Bit 11.. 8: INIT,STAT0 Mix % Mask */ #define PHY_M_POLC_LOS_MSK (0x3<<6) /* Bit 7.. 6: LOS Pol. Ctrl. Mask */ #define PHY_M_POLC_INIT_MSK (0x3<<4) /* Bit 5.. 4: INIT Pol. Ctrl. Mask */ #define PHY_M_POLC_STA1_MSK (0x3<<2) /* Bit 3.. 2: STAT1 Pol. Ctrl. Mask */ #define PHY_M_POLC_STA0_MSK 0x3 /* Bit 1.. 0: STAT0 Pol. Ctrl. Mask */ #define PHY_M_POLC_LS1_P_MIX(x) (SHIFT12(x) & PHY_M_POLC_LS1M_MSK) #define PHY_M_POLC_IS0_P_MIX(x) (SHIFT8(x) & PHY_M_POLC_IS0M_MSK) #define PHY_M_POLC_LOS_CTRL(x) (SHIFT6(x) & PHY_M_POLC_LOS_MSK) #define PHY_M_POLC_INIT_CTRL(x) (SHIFT4(x) & PHY_M_POLC_INIT_MSK) #define PHY_M_POLC_STA1_CTRL(x) (SHIFT2(x) & PHY_M_POLC_STA1_MSK) #define PHY_M_POLC_STA0_CTRL(x) (SHIFT0(x) & PHY_M_POLC_STA0_MSK) /* * GMAC registers * * The GMAC registers are 16 or 32 bits wide. * The GMACs host processor interface is 16 bits wide, * therefore ALL registers will be addressed with 16 bit accesses. * * Note: NA reg = Network Address e.g DA, SA etc. */ /* Port Registers */ #define GM_GP_STAT 0x0000 /* 16 bit r/o General Purpose Status */ #define GM_GP_CTRL 0x0004 /* 16 bit r/w General Purpose Control */ #define GM_TX_CTRL 0x0008 /* 16 bit r/w Transmit Control Reg. */ #define GM_RX_CTRL 0x000c /* 16 bit r/w Receive Control Reg. */ #define GM_TX_FLOW_CTRL 0x0010 /* 16 bit r/w Transmit Flow-Control */ #define GM_TX_PARAM 0x0014 /* 16 bit r/w Transmit Parameter Reg. */ #define GM_SERIAL_MODE 0x0018 /* 16 bit r/w Serial Mode Register */ /* Source Address Registers */ #define GM_SRC_ADDR_1L 0x001c /* 16 bit r/w Source Address 1 (low) */ #define GM_SRC_ADDR_1M 0x0020 /* 16 bit r/w Source Address 1 (middle) */ #define GM_SRC_ADDR_1H 0x0024 /* 16 bit r/w Source Address 1 (high) */ #define GM_SRC_ADDR_2L 0x0028 /* 16 bit r/w Source Address 2 (low) */ #define GM_SRC_ADDR_2M 0x002c /* 16 bit r/w Source Address 2 (middle) */ #define GM_SRC_ADDR_2H 0x0030 /* 16 bit r/w Source Address 2 (high) */ /* Multicast Address Hash Registers */ #define GM_MC_ADDR_H1 0x0034 /* 16 bit r/w Multicast Address Hash 1 */ #define GM_MC_ADDR_H2 0x0038 /* 16 bit r/w Multicast Address Hash 2 */ #define GM_MC_ADDR_H3 0x003c /* 16 bit r/w Multicast Address Hash 3 */ #define GM_MC_ADDR_H4 0x0040 /* 16 bit r/w Multicast Address Hash 4 */ /* Interrupt Source Registers */ #define GM_TX_IRQ_SRC 0x0044 /* 16 bit r/o Tx Overflow IRQ Source */ #define GM_RX_IRQ_SRC 0x0048 /* 16 bit r/o Rx Overflow IRQ Source */ #define GM_TR_IRQ_SRC 0x004c /* 16 bit r/o Tx/Rx Over. IRQ Source */ /* Interrupt Mask Registers */ #define GM_TX_IRQ_MSK 0x0050 /* 16 bit r/w Tx Overflow IRQ Mask */ #define GM_RX_IRQ_MSK 0x0054 /* 16 bit r/w Rx Overflow IRQ Mask */ #define GM_TR_IRQ_MSK 0x0058 /* 16 bit r/w Tx/Rx Over. IRQ Mask */ /* Serial Management Interface (SMI) Registers */ #define GM_SMI_CTRL 0x0080 /* 16 bit r/w SMI Control Register */ #define GM_SMI_DATA 0x0084 /* 16 bit r/w SMI Data Register */ #define GM_PHY_ADDR 0x0088 /* 16 bit r/w GPHY Address Register */ /* MIB Counters */ #define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */ #define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */ /* * MIB Counters base address definitions (low word) - * use offset 4 for access to high word (32 bit r/o) */ #define GM_RXF_UC_OK \ (GM_MIB_CNT_BASE + 0) /* Unicast Frames Received OK */ #define GM_RXF_BC_OK \ (GM_MIB_CNT_BASE + 8) /* Broadcast Frames Received OK */ #define GM_RXF_MPAUSE \ (GM_MIB_CNT_BASE + 16) /* Pause MAC Ctrl Frames Received */ #define GM_RXF_MC_OK \ (GM_MIB_CNT_BASE + 24) /* Multicast Frames Received OK */ #define GM_RXF_FCS_ERR \ (GM_MIB_CNT_BASE + 32) /* Rx Frame Check Seq. Error */ #define GM_RXF_SPARE1 \ (GM_MIB_CNT_BASE + 40) /* Rx spare 1 */ #define GM_RXO_OK_LO \ (GM_MIB_CNT_BASE + 48) /* Octets Received OK Low */ #define GM_RXO_OK_HI \ (GM_MIB_CNT_BASE + 56) /* Octets Received OK High */ #define GM_RXO_ERR_LO \ (GM_MIB_CNT_BASE + 64) /* Octets Received Invalid Low */ #define GM_RXO_ERR_HI \ (GM_MIB_CNT_BASE + 72) /* Octets Received Invalid High */ #define GM_RXF_SHT \ (GM_MIB_CNT_BASE + 80) /* Frames <64 Byte Received OK */ #define GM_RXE_FRAG \ (GM_MIB_CNT_BASE + 88) /* Frames <64 Byte Received with FCS Err */ #define GM_RXF_64B \ (GM_MIB_CNT_BASE + 96) /* 64 Byte Rx Frame */ #define GM_RXF_127B \ (GM_MIB_CNT_BASE + 104) /* 65-127 Byte Rx Frame */ #define GM_RXF_255B \ (GM_MIB_CNT_BASE + 112) /* 128-255 Byte Rx Frame */ #define GM_RXF_511B \ (GM_MIB_CNT_BASE + 120) /* 256-511 Byte Rx Frame */ #define GM_RXF_1023B \ (GM_MIB_CNT_BASE + 128) /* 512-1023 Byte Rx Frame */ #define GM_RXF_1518B \ (GM_MIB_CNT_BASE + 136) /* 1024-1518 Byte Rx Frame */ #define GM_RXF_MAX_SZ \ (GM_MIB_CNT_BASE + 144) /* 1519-MaxSize Byte Rx Frame */ #define GM_RXF_LNG_ERR \ (GM_MIB_CNT_BASE + 152) /* Rx Frame too Long Error */ #define GM_RXF_JAB_PKT \ (GM_MIB_CNT_BASE + 160) /* Rx Jabber Packet Frame */ #define GM_RXF_SPARE2 \ (GM_MIB_CNT_BASE + 168) /* Rx spare 2 */ #define GM_RXE_FIFO_OV \ (GM_MIB_CNT_BASE + 176) /* Rx FIFO overflow Event */ #define GM_RXF_SPARE3 \ (GM_MIB_CNT_BASE + 184) /* Rx spare 3 */ #define GM_TXF_UC_OK \ (GM_MIB_CNT_BASE + 192) /* Unicast Frames Xmitted OK */ #define GM_TXF_BC_OK \ (GM_MIB_CNT_BASE + 200) /* Broadcast Frames Xmitted OK */ #define GM_TXF_MPAUSE \ (GM_MIB_CNT_BASE + 208) /* Pause MAC Ctrl Frames Xmitted */ #define GM_TXF_MC_OK \ (GM_MIB_CNT_BASE + 216) /* Multicast Frames Xmitted OK */ #define GM_TXO_OK_LO \ (GM_MIB_CNT_BASE + 224) /* Octets Transmitted OK Low */ #define GM_TXO_OK_HI \ (GM_MIB_CNT_BASE + 232) /* Octets Transmitted OK High */ #define GM_TXF_64B \ (GM_MIB_CNT_BASE + 240) /* 64 Byte Tx Frame */ #define GM_TXF_127B \ (GM_MIB_CNT_BASE + 248) /* 65-127 Byte Tx Frame */ #define GM_TXF_255B \ (GM_MIB_CNT_BASE + 256) /* 128-255 Byte Tx Frame */ #define GM_TXF_511B \ (GM_MIB_CNT_BASE + 264) /* 256-511 Byte Tx Frame */ #define GM_TXF_1023B \ (GM_MIB_CNT_BASE + 272) /* 512-1023 Byte Tx Frame */ #define GM_TXF_1518B \ (GM_MIB_CNT_BASE + 280) /* 1024-1518 Byte Tx Frame */ #define GM_TXF_MAX_SZ \ (GM_MIB_CNT_BASE + 288) /* 1519-MaxSize Byte Tx Frame */ #define GM_TXF_SPARE1 \ (GM_MIB_CNT_BASE + 296) /* Tx spare 1 */ #define GM_TXF_COL \ (GM_MIB_CNT_BASE + 304) /* Tx Collision */ #define GM_TXF_LAT_COL \ (GM_MIB_CNT_BASE + 312) /* Tx Late Collision */ #define GM_TXF_ABO_COL \ (GM_MIB_CNT_BASE + 320) /* Tx aborted due to Exces. Col. */ #define GM_TXF_MUL_COL \ (GM_MIB_CNT_BASE + 328) /* Tx Multiple Collision */ #define GM_TXF_SNG_COL \ (GM_MIB_CNT_BASE + 336) /* Tx Single Collision */ #define GM_TXE_FIFO_UR \ (GM_MIB_CNT_BASE + 344) /* Tx FIFO Underrun Event */ /*----------------------------------------------------------------------------*/ /* * GMAC Bit Definitions * * If the bit access behaviour differs from the register access behaviour * (r/w, r/o) this is documented after the bit number. * The following bit access behaviours are used: * (sc) self clearing * (r/o) read only */ /* GM_GP_STAT 16 bit r/o General Purpose Status Register */ #define GM_GPSR_SPEED BIT_15 /* Port Speed (1 = 100 Mbps) */ #define GM_GPSR_DUPLEX BIT_14 /* Duplex Mode (1 = Full) */ #define GM_GPSR_FC_TX_DIS BIT_13 /* Tx Flow-Control Mode Disabled */ #define GM_GPSR_LINK_UP BIT_12 /* Link Up Status */ #define GM_GPSR_PAUSE BIT_11 /* Pause State */ #define GM_GPSR_TX_ACTIVE BIT_10 /* Tx in Progress */ #define GM_GPSR_EXC_COL BIT_9 /* Excessive Collisions Occured */ #define GM_GPSR_LAT_COL BIT_8 /* Late Collisions Occured */ #define GM_GPSR_PHY_ST_CH BIT_5 /* PHY Status Change */ #define GM_GPSR_GIG_SPEED BIT_4 /* Gigabit Speed (1 = 1000 Mbps) */ #define GM_GPSR_PART_MODE BIT_3 /* Partition mode */ #define GM_GPSR_FC_RX_DIS BIT_2 /* Rx Flow-Control Mode Disabled */ /* GM_GP_CTRL 16 bit r/w General Purpose Control Register */ #define GM_GPCR_RMII_PH_ENA BIT_15 /* Enable RMII for PHY (Yukon-FE only) */ #define GM_GPCR_RMII_LB_ENA BIT_14 /* Enable RMII Loopback (Yukon-FE only) */ #define GM_GPCR_FC_TX_DIS BIT_13 /* Disable Tx Flow-Control Mode */ #define GM_GPCR_TX_ENA BIT_12 /* Enable Transmit */ #define GM_GPCR_RX_ENA BIT_11 /* Enable Receive */ #define GM_GPCR_LOOP_ENA BIT_9 /* Enable MAC Loopback Mode */ #define GM_GPCR_PART_ENA BIT_8 /* Enable Partition Mode */ #define GM_GPCR_GIGS_ENA BIT_7 /* Gigabit Speed (1000 Mbps) */ #define GM_GPCR_FL_PASS BIT_6 /* Force Link Pass */ #define GM_GPCR_DUP_FULL BIT_5 /* Full Duplex Mode */ #define GM_GPCR_FC_RX_DIS BIT_4 /* Disable Rx Flow-Control Mode */ #define GM_GPCR_SPEED_100 BIT_3 /* Port Speed 100 Mbps */ #define GM_GPCR_AU_DUP_DIS BIT_2 /* Disable Auto-Update Duplex */ #define GM_GPCR_AU_FCT_DIS BIT_1 /* Disable Auto-Update Flow-C. */ #define GM_GPCR_AU_SPD_DIS BIT_0 /* Disable Auto-Update Speed */ #define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100) #define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS |\ GM_GPCR_AU_SPD_DIS) /* GM_TX_CTRL 16 bit r/w Transmit Control Register */ #define GM_TXCR_FORCE_JAM BIT_15 /* Force Jam / Flow-Control */ #define GM_TXCR_CRC_DIS BIT_14 /* Disable insertion of CRC */ #define GM_TXCR_PAD_DIS BIT_13 /* Disable padding of packets */ #define GM_TXCR_COL_THR_MSK (7<<10) /* Bit 12..10: Collision Threshold Mask */ #define GM_TXCR_PAD_PAT_MSK 0xff /* Bit 7.. 0: Padding Pattern Mask */ /* (Yukon-2 only) */ #define TX_COL_THR(x) (SHIFT10(x) & GM_TXCR_COL_THR_MSK) #define TX_COL_DEF 0x04 /* GM_RX_CTRL 16 bit r/w Receive Control Register */ #define GM_RXCR_UCF_ENA BIT_15 /* Enable Unicast filtering */ #define GM_RXCR_MCF_ENA BIT_14 /* Enable Multicast filtering */ #define GM_RXCR_CRC_DIS BIT_13 /* Remove 4-byte CRC */ #define GM_RXCR_PASS_FC BIT_12 /* Pass FC packets to FIFO (Yukon-1 only) */ /* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */ #define GM_TXPA_JAMLEN_MSK (3<<14) /* Bit 15..14: Jam Length Mask */ #define GM_TXPA_JAMIPG_MSK (0x1f<<9) /* Bit 13.. 9: Jam IPG Mask */ #define GM_TXPA_JAMDAT_MSK (0x1f<<4) /* Bit 8.. 4: IPG Jam to Data Mask */ #define GM_TXPA_BO_LIM_MSK 0x0f /* Bit 3.. 0: Backoff Limit Mask */ /* (Yukon-2 only) */ #define TX_JAM_LEN_VAL(x) (SHIFT14(x) & GM_TXPA_JAMLEN_MSK) #define TX_JAM_IPG_VAL(x) (SHIFT9(x) & GM_TXPA_JAMIPG_MSK) #define TX_IPG_JAM_DATA(x) (SHIFT4(x) & GM_TXPA_JAMDAT_MSK) #define TX_BACK_OFF_LIM(x) ((x) & GM_TXPA_BO_LIM_MSK) #define TX_JAM_LEN_DEF 0x03 #define TX_JAM_IPG_DEF 0x0b #define TX_IPG_JAM_DEF 0x1c #define TX_BOF_LIM_DEF 0x04 /* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */ #define GM_SMOD_DATABL_MSK (0x1f<<11) /* Bit 15..11: Data Blinder */ /* r/o on Yukon, r/w on Yukon-EC */ #define GM_SMOD_LIMIT_4 BIT_10 /* 4 consecutive Tx trials */ #define GM_SMOD_VLAN_ENA BIT_9 /* Enable VLAN (Max. Frame Len) */ #define GM_SMOD_JUMBO_ENA BIT_8 /* Enable Jumbo (Max. Frame Len) */ #define GM_SMOD_IPG_MSK 0x1f /* Bit 4.. 0: Inter-Packet Gap (IPG) */ #define DATA_BLIND_VAL(x) (SHIFT11(x) & GM_SMOD_DATABL_MSK) #define IPG_DATA_VAL(x) ((x) & GM_SMOD_IPG_MSK) #define DATA_BLIND_DEF 0x04 #define IPG_DATA_DEF 0x1e /* GM_SMI_CTRL 16 bit r/w SMI Control Register */ #define GM_SMI_CT_PHY_A_MSK (0x1f<<11) /* Bit 15..11: PHY Device Address */ #define GM_SMI_CT_REG_A_MSK (0x1f<<6) /* Bit 10.. 6: PHY Register Address */ #define GM_SMI_CT_OP_RD BIT_5 /* OpCode Read (0=Write)*/ #define GM_SMI_CT_RD_VAL BIT_4 /* Read Valid (Read completed) */ #define GM_SMI_CT_BUSY BIT_3 /* Busy (Operation in progress) */ #define GM_SMI_CT_PHY_AD(x) (SHIFT11(x) & GM_SMI_CT_PHY_A_MSK) #define GM_SMI_CT_REG_AD(x) (SHIFT6(x) & GM_SMI_CT_REG_A_MSK) /* GM_PHY_ADDR 16 bit r/w GPHY Address Register */ #define GM_PAR_MIB_CLR BIT_5 /* Set MIB Clear Counter Mode */ #define GM_PAR_MIB_TST BIT_4 /* MIB Load Counter (Test Mode) */ /* Receive Frame Status Encoding */ #define GMR_FS_LEN_MSK (0xffff<<16) /* Bit 31..16: Rx Frame Length */ #define GMR_FS_VLAN BIT_13 /* VLAN Packet */ #define GMR_FS_JABBER BIT_12 /* Jabber Packet */ #define GMR_FS_UN_SIZE BIT_11 /* Undersize Packet */ #define GMR_FS_MC BIT_10 /* Multicast Packet */ #define GMR_FS_BC BIT_9 /* Broadcast Packet */ #define GMR_FS_RX_OK BIT_8 /* Receive OK (Good Packet) */ #define GMR_FS_GOOD_FC BIT_7 /* Good Flow-Control Packet */ #define GMR_FS_BAD_FC BIT_6 /* Bad Flow-Control Packet */ #define GMR_FS_MII_ERR BIT_5 /* MII Error */ #define GMR_FS_LONG_ERR BIT_4 /* Too Long Packet */ #define GMR_FS_FRAGMENT BIT_3 /* Fragment */ #define GMR_FS_CRC_ERR BIT_1 /* CRC Error */ #define GMR_FS_RX_FF_OV BIT_0 /* Rx FIFO Overflow */ #define GMR_FS_LEN_SHIFT 16 #define GMR_FS_ANY_ERR ( \ GMR_FS_RX_FF_OV | \ GMR_FS_CRC_ERR | \ GMR_FS_FRAGMENT | \ GMR_FS_LONG_ERR | \ GMR_FS_MII_ERR | \ GMR_FS_BAD_FC | \ GMR_FS_GOOD_FC | \ GMR_FS_UN_SIZE | \ GMR_FS_JABBER) /* Rx GMAC FIFO Flush Mask (default) */ #define RX_FF_FL_DEF_MSK GMR_FS_ANY_ERR /* Receive and Transmit GMAC FIFO Registers (YUKON only) */ /* RX_GMF_EA 32 bit Rx GMAC FIFO End Address */ /* RX_GMF_AF_THR 32 bit Rx GMAC FIFO Almost Full Thresh. */ /* RX_GMF_WP 32 bit Rx GMAC FIFO Write Pointer */ /* RX_GMF_WLEV 32 bit Rx GMAC FIFO Write Level */ /* RX_GMF_RP 32 bit Rx GMAC FIFO Read Pointer */ /* RX_GMF_RLEV 32 bit Rx GMAC FIFO Read Level */ /* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */ /* TX_GMF_AE_THR 32 bit Tx GMAC FIFO Almost Empty Thresh.*/ /* TX_GMF_WP 32 bit Tx GMAC FIFO Write Pointer */ /* TX_GMF_WSP 32 bit Tx GMAC FIFO Write Shadow Pointer */ /* TX_GMF_WLEV 32 bit Tx GMAC FIFO Write Level */ /* TX_GMF_RP 32 bit Tx GMAC FIFO Read Pointer */ /* TX_GMF_RSTP 32 bit Tx GMAC FIFO Restart Pointer */ /* TX_GMF_RLEV 32 bit Tx GMAC FIFO Read Level */ /* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ #define RX_TRUNC_ON BIT_27 /* enable packet truncation */ #define RX_TRUNC_OFF BIT_26 /* disable packet truncation */ #define RX_VLAN_STRIP_ON BIT_25 /* enable VLAN stripping */ #define RX_VLAN_STRIP_OFF BIT_24 /* disable VLAN stripping */ #define GMF_RX_MACSEC_FLUSH_ON BIT_23 #define GMF_RX_MACSEC_FLUSH_OFF BIT_22 #define GMF_RX_OVER_ON BIT_19 /* enable flushing on receive overrun */ #define GMF_RX_OVER_OFF BIT_18 /* disable flushing on receive overrun */ #define GMF_ASF_RX_OVER_ON BIT_17 /* enable flushing of ASF when overrun */ #define GMF_ASF_RX_OVER_OFF BIT_16 /* disable flushing of ASF when overrun */ #define GMF_WP_TST_ON BIT_14 /* Write Pointer Test On */ #define GMF_WP_TST_OFF BIT_13 /* Write Pointer Test Off */ #define GMF_WP_STEP BIT_12 /* Write Pointer Step/Increment */ #define GMF_RP_TST_ON BIT_10 /* Read Pointer Test On */ #define GMF_RP_TST_OFF BIT_9 /* Read Pointer Test Off */ #define GMF_RP_STEP BIT_8 /* Read Pointer Step/Increment */ #define GMF_RX_F_FL_ON BIT_7 /* Rx FIFO Flush Mode On */ #define GMF_RX_F_FL_OFF BIT_6 /* Rx FIFO Flush Mode Off */ #define GMF_CLI_RX_FO BIT_5 /* Clear IRQ Rx FIFO Overrun */ #define GMF_CLI_RX_FC BIT_4 /* Clear IRQ Rx Frame Complete */ #define GMF_OPER_ON BIT_3 /* Operational Mode On */ #define GMF_OPER_OFF BIT_2 /* Operational Mode Off */ #define GMF_RST_CLR BIT_1 /* Clear GMAC FIFO Reset */ #define GMF_RST_SET BIT_0 /* Set GMAC FIFO Reset */ /* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test (YUKON and Yukon-2) */ #define TX_STFW_DIS BIT_31 /* Disable Store & Forward (Yukon-EC Ultra) */ #define TX_STFW_ENA BIT_30 /* Enable Store & Forward (Yukon-EC Ultra) */ #define TX_VLAN_TAG_ON BIT_25 /* enable VLAN tagging */ #define TX_VLAN_TAG_OFF BIT_24 /* disable VLAN tagging */ #define TX_JUMBO_ENA BIT_23 /* Enable Jumbo Mode (Yukon-EC Ultra) */ #define TX_JUMBO_DIS BIT_22 /* Disable Jumbo Mode (Yukon-EC Ultra) */ #define GMF_WSP_TST_ON BIT_18 /* Write Shadow Pointer Test On */ #define GMF_WSP_TST_OFF BIT_17 /* Write Shadow Pointer Test Off */ #define GMF_WSP_STEP BIT_16 /* Write Shadow Pointer Step/Increment */ /* Bits 15..8: same as for RX_GMF_CTRL_T */ #define GMF_CLI_TX_FU BIT_6 /* Clear IRQ Tx FIFO Underrun */ #define GMF_CLI_TX_FC BIT_5 /* Clear IRQ Tx Frame Complete */ #define GMF_CLI_TX_PE BIT_4 /* Clear IRQ Tx Parity Error */ /* Bits 3..0: same as for RX_GMF_CTRL_T */ #define GMF_RX_CTRL_DEF (GMF_OPER_ON | GMF_RX_F_FL_ON) #define GMF_TX_CTRL_DEF GMF_OPER_ON #define RX_GMF_AF_THR_MIN 0x0c /* Rx GMAC FIFO Almost Full Thresh. min. */ #define RX_GMF_FL_THR_DEF 0x0a /* Rx GMAC FIFO Flush Threshold default */ /* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */ #define GMT_ST_START BIT_2 /* Start Time Stamp Timer */ #define GMT_ST_STOP BIT_1 /* Stop Time Stamp Timer */ #define GMT_ST_CLR_IRQ BIT_0 /* Clear Time Stamp Timer IRQ */ /* POLL_CTRL 32 bit Polling Unit control register (Yukon-2 only) */ #define PC_CLR_IRQ_CHK BIT_5 /* Clear IRQ Check */ #define PC_POLL_RQ BIT_4 /* Poll Request Start */ #define PC_POLL_OP_ON BIT_3 /* Operational Mode On */ #define PC_POLL_OP_OFF BIT_2 /* Operational Mode Off */ #define PC_POLL_RST_CLR BIT_1 /* Clear Polling Unit Reset (Enable) */ #define PC_POLL_RST_SET BIT_0 /* Set Polling Unit Reset */ /* B28_Y2_ASF_STAT_CMD 32 bit ASF Status and Command Reg */ /* This register is used by the host driver software */ #define Y2_ASF_OS_PRES BIT_4 /* ASF operation system present */ #define Y2_ASF_RESET BIT_3 /* ASF system in reset state */ #define Y2_ASF_RUNNING BIT_2 /* ASF system operational */ #define Y2_ASF_CLR_HSTI BIT_1 /* Clear ASF IRQ */ #define Y2_ASF_IRQ BIT_0 /* Issue an IRQ to ASF system */ #define Y2_ASF_UC_STATE (3<<2) /* ASF uC State */ #define Y2_ASF_CLK_HALT 0 /* ASF system clock stopped */ /* B28_Y2_ASF_HCU_CCSR 32bit CPU Control and Status Register (Yukon EX) */ #define Y2_ASF_HCU_CCSR_SMBALERT_MONITOR BIT_27 /* SMBALERT pin monitor */ #define Y2_ASF_HCU_CCSR_CPU_SLEEP BIT_26 /* CPU sleep status */ #define Y2_ASF_HCU_CCSR_CS_TO BIT_25 /* Clock Stretching Timeout */ #define Y2_ASF_HCU_CCSR_WDOG BIT_24 /* Watchdog Reset */ #define Y2_ASF_HCU_CCSR_CLR_IRQ_HOST BIT_17 /* Clear IRQ_HOST */ #define Y2_ASF_HCU_CCSR_SET_IRQ_HCU BIT_16 /* Set IRQ_HCU */ #define Y2_ASF_HCU_CCSR_AHB_RST BIT_9 /* Reset AHB bridge */ #define Y2_ASF_HCU_CCSR_CPU_RST_MODE BIT_8 /* CPU Reset Mode */ #define Y2_ASF_HCU_CCSR_SET_SYNC_CPU BIT_5 #define Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE1 BIT_4 #define Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE0 BIT_3 #define Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK (BIT_4 | BIT_3) /* CPU Clock Divide */ #define Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_BASE BIT_3 #define Y2_ASF_HCU_CCSR_OS_PRSNT BIT_2 /* ASF OS Present */ /* Microcontroller State */ #define Y2_ASF_HCU_CCSR_UC_STATE_MSK 3 #define Y2_ASF_HCU_CCSR_UC_STATE_BASE BIT_0 #define Y2_ASF_HCU_CCSR_ASF_RESET 0 #define Y2_ASF_HCU_CCSR_ASF_HALTED BIT_1 #define Y2_ASF_HCU_CCSR_ASF_RUNNING BIT_0 /* B28_Y2_ASF_HOST_COM 32 bit ASF Host Communication Reg */ /* This register is used by the ASF firmware */ #define Y2_ASF_CLR_ASFI BIT_1 /* Clear host IRQ */ #define Y2_ASF_HOST_IRQ BIT_0 /* Issue an IRQ to HOST system */ /* STAT_CTRL 32 bit Status BMU control register (Yukon-2 only) */ #define SC_STAT_CLR_IRQ BIT_4 /* Status Burst IRQ clear */ #define SC_STAT_OP_ON BIT_3 /* Operational Mode On */ #define SC_STAT_OP_OFF BIT_2 /* Operational Mode Off */ #define SC_STAT_RST_CLR BIT_1 /* Clear Status Unit Reset (Enable) */ #define SC_STAT_RST_SET BIT_0 /* Set Status Unit Reset */ /* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */ #define GMC_SEC_RST BIT_15 /* MAC SEC RST */ #define GMC_SEC_RST_OFF BIT_14 /* MAC SEC RST Off */ #define GMC_BYP_MACSECRX_ON BIT_13 /* Bypass MAC SEC RX */ #define GMC_BYP_MACSECRX_OFF BIT_12 /* Bypass MAC SEC RX Off */ #define GMC_BYP_MACSECTX_ON BIT_11 /* Bypass MAC SEC TX */ #define GMC_BYP_MACSECTX_OFF BIT_10 /* Bypass MAC SEC TX Off */ #define GMC_BYP_RETR_ON BIT_9 /* Bypass MAC retransmit FIFO On */ #define GMC_BYP_RETR_OFF BIT_8 /* Bypass MAC retransmit FIFO Off */ #define GMC_H_BURST_ON BIT_7 /* Half Duplex Burst Mode On */ #define GMC_H_BURST_OFF BIT_6 /* Half Duplex Burst Mode Off */ #define GMC_F_LOOPB_ON BIT_5 /* FIFO Loopback On */ #define GMC_F_LOOPB_OFF BIT_4 /* FIFO Loopback Off */ #define GMC_PAUSE_ON BIT_3 /* Pause On */ #define GMC_PAUSE_OFF BIT_2 /* Pause Off */ #define GMC_RST_CLR BIT_1 /* Clear GMAC Reset */ #define GMC_RST_SET BIT_0 /* Set GMAC Reset */ /* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */ #define GPC_SEL_BDT BIT_28 /* Select Bi-Dir. Transfer for MDC/MDIO */ #define GPC_INT_POL BIT_27 /* IRQ Polarity is Active Low */ #define GPC_75_OHM BIT_26 /* Use 75 Ohm Termination instead of 50 */ #define GPC_DIS_FC BIT_25 /* Disable Automatic Fiber/Copper Detection */ #define GPC_DIS_SLEEP BIT_24 /* Disable Energy Detect */ #define GPC_HWCFG_M_3 BIT_23 /* HWCFG_MODE[3] */ #define GPC_HWCFG_M_2 BIT_22 /* HWCFG_MODE[2] */ #define GPC_HWCFG_M_1 BIT_21 /* HWCFG_MODE[1] */ #define GPC_HWCFG_M_0 BIT_20 /* HWCFG_MODE[0] */ #define GPC_ANEG_0 BIT_19 /* ANEG[0] */ #define GPC_ENA_XC BIT_18 /* Enable MDI crossover */ #define GPC_DIS_125 BIT_17 /* Disable 125 MHz clock */ #define GPC_ANEG_3 BIT_16 /* ANEG[3] */ #define GPC_ANEG_2 BIT_15 /* ANEG[2] */ #define GPC_ANEG_1 BIT_14 /* ANEG[1] */ #define GPC_ENA_PAUSE BIT_13 /* Enable Pause (SYM_OR_REM) */ #define GPC_PHYADDR_4 BIT_12 /* Bit 4 of Phy Addr */ #define GPC_PHYADDR_3 BIT_11 /* Bit 3 of Phy Addr */ #define GPC_PHYADDR_2 BIT_10 /* Bit 2 of Phy Addr */ #define GPC_PHYADDR_1 BIT_9 /* Bit 1 of Phy Addr */ #define GPC_PHYADDR_0 BIT_8 /* Bit 0 of Phy Addr */ #define GPC_RST_CLR BIT_1 /* Clear GPHY Reset */ #define GPC_RST_SET BIT_0 /* Set GPHY Reset */ /* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */ /* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */ #define GM_IS_RX_CO_OV BIT_5 /* Receive Counter Overflow IRQ */ #define GM_IS_TX_CO_OV BIT_4 /* Transmit Counter Overflow IRQ */ #define GM_IS_TX_FF_UR BIT_3 /* Transmit FIFO Underrun */ #define GM_IS_TX_COMPL BIT_2 /* Frame Transmission Complete */ #define GM_IS_RX_FF_OR BIT_1 /* Receive FIFO Overrun */ #define GM_IS_RX_COMPL BIT_0 /* Frame Reception Complete */ #define GMAC_DEF_MSK (GM_IS_RX_CO_OV | GM_IS_TX_CO_OV | GM_IS_TX_FF_UR) /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ #define GMLC_RST_CLR BIT_1 /* Clear GMAC Link Reset */ #define GMLC_RST_SET BIT_0 /* Set GMAC Link Reset */ #define MSK_PORT_A 0 #define MSK_PORT_B 1 /* Register access macros */ #define CSR_WRITE_4(sc, reg, val) \ bus_write_4((sc)->msk_res[0], (reg), (val)) #define CSR_WRITE_2(sc, reg, val) \ bus_write_2((sc)->msk_res[0], (reg), (val)) #define CSR_WRITE_1(sc, reg, val) \ bus_write_1((sc)->msk_res[0], (reg), (val)) #define CSR_READ_4(sc, reg) \ bus_read_4((sc)->msk_res[0], (reg)) #define CSR_READ_2(sc, reg) \ bus_read_2((sc)->msk_res[0], (reg)) #define CSR_READ_1(sc, reg) \ bus_read_1((sc)->msk_res[0], (reg)) #define CSR_PCI_WRITE_4(sc, reg, val) \ bus_write_4((sc)->msk_res[0], Y2_CFG_SPC + (reg), (val)) #define CSR_PCI_WRITE_2(sc, reg, val) \ bus_write_2((sc)->msk_res[0], Y2_CFG_SPC + (reg), (val)) #define CSR_PCI_WRITE_1(sc, reg, val) \ bus_write_1((sc)->msk_res[0], Y2_CFG_SPC + (reg), (val)) #define CSR_PCI_READ_4(sc, reg) \ bus_read_4((sc)->msk_res[0], Y2_CFG_SPC + (reg)) #define CSR_PCI_READ_2(sc, reg) \ bus_read_2((sc)->msk_res[0], Y2_CFG_SPC + (reg)) #define CSR_PCI_READ_1(sc, reg) \ bus_read_1((sc)->msk_res[0], Y2_CFG_SPC + (reg)) #define MSK_IF_READ_4(sc_if, reg) \ CSR_READ_4((sc_if)->msk_softc, (reg)) #define MSK_IF_READ_2(sc_if, reg) \ CSR_READ_2((sc_if)->msk_softc, (reg)) #define MSK_IF_READ_1(sc_if, reg) \ CSR_READ_1((sc_if)->msk_softc, (reg)) #define MSK_IF_WRITE_4(sc_if, reg, val) \ CSR_WRITE_4((sc_if)->msk_softc, (reg), (val)) #define MSK_IF_WRITE_2(sc_if, reg, val) \ CSR_WRITE_2((sc_if)->msk_softc, (reg), (val)) #define MSK_IF_WRITE_1(sc_if, reg, val) \ CSR_WRITE_1((sc_if)->msk_softc, (reg), (val)) #define GMAC_REG(port, reg) \ ((BASE_GMAC_1 + (port) * (BASE_GMAC_2 - BASE_GMAC_1)) | (reg)) #define GMAC_WRITE_2(sc, port, reg, val) \ CSR_WRITE_2((sc), GMAC_REG((port), (reg)), (val)) #define GMAC_READ_2(sc, port, reg) \ CSR_READ_2((sc), GMAC_REG((port), (reg))) /* GPHY address (bits 15..11 of SMI control reg) */ #define PHY_ADDR_MARV 0 #define MSK_ADDR_LO(x) ((uint64_t) (x) & 0xffffffffUL) #define MSK_ADDR_HI(x) ((uint64_t) (x) >> 32) /* * At first I guessed 8 bytes, the size of a single descriptor, would be * required alignment constraints. But, it seems that Yukon II have 4096 * bytes boundary alignment constraints. */ #define MSK_RING_ALIGN 4096 #define MSK_STAT_ALIGN 4096 /* Rx descriptor data structure */ struct msk_rx_desc { uint32_t msk_addr; uint32_t msk_control; }; /* Tx descriptor data structure */ struct msk_tx_desc { uint32_t msk_addr; uint32_t msk_control; }; /* Status descriptor data structure */ struct msk_stat_desc { uint32_t msk_status; uint32_t msk_control; }; /* mask and shift value to get Tx async queue status for port 1 */ #define STLE_TXA1_MSKL 0x00000fff #define STLE_TXA1_SHIFTL 0 /* mask and shift value to get Tx sync queue status for port 1 */ #define STLE_TXS1_MSKL 0x00fff000 #define STLE_TXS1_SHIFTL 12 /* mask and shift value to get Tx async queue status for port 2 */ #define STLE_TXA2_MSKL 0xff000000 #define STLE_TXA2_SHIFTL 24 #define STLE_TXA2_MSKH 0x000f /* this one shifts up */ #define STLE_TXA2_SHIFTH 8 /* mask and shift value to get Tx sync queue status for port 2 */ #define STLE_TXS2_MSKL 0x00000000 #define STLE_TXS2_SHIFTL 0 #define STLE_TXS2_MSKH 0xfff0 #define STLE_TXS2_SHIFTH 4 /* YUKON-2 bit values */ #define HW_OWNER 0x80000000 #define SW_OWNER 0x00000000 #define PU_PUTIDX_VALID 0x10000000 /* YUKON-2 Control flags */ #define UDPTCP 0x00010000 #define CALSUM 0x00020000 #define WR_SUM 0x00040000 #define INIT_SUM 0x00080000 #define LOCK_SUM 0x00100000 #define INS_VLAN 0x00200000 #define FRC_STAT 0x00400000 #define EOP 0x00800000 #define TX_LOCK 0x01000000 #define BUF_SEND 0x02000000 #define PACKET_SEND 0x04000000 #define NO_WARNING 0x40000000 #define NO_UPDATE 0x80000000 /* YUKON-2 Rx/Tx opcodes defines */ #define OP_TCPWRITE 0x11000000 #define OP_TCPSTART 0x12000000 #define OP_TCPINIT 0x14000000 #define OP_TCPLCK 0x18000000 #define OP_TCPCHKSUM OP_TCPSTART #define OP_TCPIS (OP_TCPINIT | OP_TCPSTART) #define OP_TCPLW (OP_TCPLCK | OP_TCPWRITE) #define OP_TCPLSW (OP_TCPLCK | OP_TCPSTART | OP_TCPWRITE) #define OP_TCPLISW (OP_TCPLCK | OP_TCPINIT | OP_TCPSTART | OP_TCPWRITE) #define OP_ADDR64 0x21000000 #define OP_VLAN 0x22000000 #define OP_ADDR64VLAN (OP_ADDR64 | OP_VLAN) #define OP_LRGLEN 0x24000000 #define OP_LRGLENVLAN (OP_LRGLEN | OP_VLAN) #define OP_MSS 0x28000000 #define OP_MSSVLAN (OP_MSS | OP_VLAN) #define OP_BUFFER 0x40000000 #define OP_PACKET 0x41000000 #define OP_LARGESEND 0x43000000 /* YUKON-2 STATUS opcodes defines */ #define OP_RXSTAT 0x60000000 #define OP_RXTIMESTAMP 0x61000000 #define OP_RXVLAN 0x62000000 #define OP_RXCHKS 0x64000000 #define OP_RXCHKSVLAN (OP_RXCHKS | OP_RXVLAN) #define OP_RXTIMEVLAN (OP_RXTIMESTAMP | OP_RXVLAN) #define OP_RSS_HASH 0x65000000 #define OP_TXINDEXLE 0x68000000 /* YUKON-2 SPECIAL opcodes defines */ #define OP_PUTIDX 0x70000000 #define STLE_OP_MASK 0xff000000 #define STLE_CSS_MASK 0x00ff0000 #define STLE_LEN_MASK 0x0000ffff /* CSS defined in status LE(valid for descriptor V2 format). */ #define CSS_TCPUDP_CSUM_OK 0x00800000 #define CSS_UDP 0x00400000 #define CSS_TCP 0x00200000 #define CSS_IPFRAG 0x00100000 #define CSS_IPV6 0x00080000 #define CSS_IPV4_CSUM_OK 0x00040000 #define CSS_IPV4 0x00020000 #define CSS_PORT 0x00010000 /* Descriptor Bit Definition */ /* TxCtrl Transmit Buffer Control Field */ /* RxCtrl Receive Buffer Control Field */ #define BMU_OWN BIT_31 /* OWN bit: 0=host/1=BMU */ #define BMU_STF BIT_30 /* Start of Frame */ #define BMU_EOF BIT_29 /* End of Frame */ #define BMU_IRQ_EOB BIT_28 /* Req "End of Buffer" IRQ */ #define BMU_IRQ_EOF BIT_27 /* Req "End of Frame" IRQ */ /* TxCtrl specific bits */ #define BMU_STFWD BIT_26 /* (Tx) Store & Forward Frame */ #define BMU_NO_FCS BIT_25 /* (Tx) Disable MAC FCS (CRC) generation */ #define BMU_SW BIT_24 /* (Tx) 1 bit res. for SW use */ /* RxCtrl specific bits */ #define BMU_DEV_0 BIT_26 /* (Rx) Transfer data to Dev0 */ #define BMU_STAT_VAL BIT_25 /* (Rx) Rx Status Valid */ #define BMU_TIST_VAL BIT_24 /* (Rx) Rx TimeStamp Valid */ /* Bit 23..16: BMU Check Opcodes */ #define BMU_CHECK (0x55<<16) /* Default BMU check */ #define BMU_TCP_CHECK (0x56<<16) /* Descr with TCP ext */ #define BMU_UDP_CHECK (0x57<<16) /* Descr with UDP ext (YUKON only) */ #define BMU_BBC 0xffff /* Bit 15.. 0: Buffer Byte Counter */ /* * Controller requires an additional LE op code for 64bit DMA operation. * Driver uses fixed number of RX buffers such that this limitation * reduces number of available RX buffers with 64bit DMA so double * number of RX buffers on platforms that support 64bit DMA. For TX * side, controller requires an additional OP_ADDR64 op code if a TX * buffer uses different high address value than previously used one. * Driver monitors high DMA address change in TX and inserts an * OP_ADDR64 op code if the high DMA address is changed. Driver * allocates 50% more total TX buffers on platforms that support 64bit * DMA. */ #if (BUS_SPACE_MAXADDR > 0xFFFFFFFF) #define MSK_64BIT_DMA #define MSK_TX_RING_CNT 384 #define MSK_RX_RING_CNT 512 #else #undef MSK_64BIT_DMA #define MSK_TX_RING_CNT 256 #define MSK_RX_RING_CNT 256 #endif #define MSK_RX_BUF_ALIGN 8 #define MSK_JUMBO_RX_RING_CNT MSK_RX_RING_CNT #define MSK_MAXTXSEGS 35 #define MSK_TSO_MAXSGSIZE 4096 #define MSK_TSO_MAXSIZE (65535 + sizeof(struct ether_vlan_header)) /* * It seems that the hardware requires extra descriptors(LEs) to offload * TCP/UDP checksum, VLAN hardware tag insertion and TSO. * * 1 descriptor for TCP/UDP checksum offload. * 1 descriptor VLAN hardware tag insertion. * 1 descriptor for TSO(TCP Segmentation Offload) * 1 descriptor for each 64bits DMA transfers */ #ifdef MSK_64BIT_DMA #define MSK_RESERVED_TX_DESC_CNT (MSK_MAXTXSEGS + 3) #else #define MSK_RESERVED_TX_DESC_CNT 3 #endif #define MSK_JUMBO_FRAMELEN 9022 #define MSK_JUMBO_MTU (MSK_JUMBO_FRAMELEN-ETHER_HDR_LEN-ETHER_CRC_LEN) #define MSK_MAX_FRAMELEN \ (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN - ETHER_CRC_LEN) #define MSK_MIN_FRAMELEN (ETHER_MIN_LEN - ETHER_CRC_LEN) struct msk_txdesc { struct mbuf *tx_m; bus_dmamap_t tx_dmamap; struct msk_tx_desc *tx_le; }; struct msk_rxdesc { struct mbuf *rx_m; bus_dmamap_t rx_dmamap; struct msk_rx_desc *rx_le; }; struct msk_chain_data { bus_dma_tag_t msk_parent_tag; bus_dma_tag_t msk_tx_tag; struct msk_txdesc msk_txdesc[MSK_TX_RING_CNT]; bus_dma_tag_t msk_rx_tag; struct msk_rxdesc msk_rxdesc[MSK_RX_RING_CNT]; bus_dma_tag_t msk_tx_ring_tag; bus_dma_tag_t msk_rx_ring_tag; bus_dmamap_t msk_tx_ring_map; bus_dmamap_t msk_rx_ring_map; bus_dmamap_t msk_rx_sparemap; bus_dma_tag_t msk_jumbo_rx_tag; struct msk_rxdesc msk_jumbo_rxdesc[MSK_JUMBO_RX_RING_CNT]; bus_dma_tag_t msk_jumbo_rx_ring_tag; bus_dmamap_t msk_jumbo_rx_ring_map; bus_dmamap_t msk_jumbo_rx_sparemap; uint16_t msk_tso_mtu; uint32_t msk_last_csum; uint32_t msk_tx_high_addr; int msk_tx_prod; int msk_tx_cons; int msk_tx_cnt; int msk_tx_put; int msk_rx_cons; int msk_rx_prod; int msk_rx_putwm; }; struct msk_ring_data { struct msk_tx_desc *msk_tx_ring; bus_addr_t msk_tx_ring_paddr; struct msk_rx_desc *msk_rx_ring; bus_addr_t msk_rx_ring_paddr; struct msk_rx_desc *msk_jumbo_rx_ring; bus_addr_t msk_jumbo_rx_ring_paddr; }; #define MSK_TX_RING_ADDR(sc, i) \ ((sc)->msk_rdata.msk_tx_ring_paddr + sizeof(struct msk_tx_desc) * (i)) #define MSK_RX_RING_ADDR(sc, i) \ ((sc)->msk_rdata.msk_rx_ring_paddr + sizeof(struct msk_rx_desc) * (i)) #define MSK_JUMBO_RX_RING_ADDR(sc, i) \ ((sc)->msk_rdata.msk_jumbo_rx_ring_paddr + sizeof(struct msk_rx_desc) * (i)) #define MSK_TX_RING_SZ \ (sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT) #define MSK_RX_RING_SZ \ (sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT) #define MSK_JUMBO_RX_RING_SZ \ (sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT) #define MSK_INC(x, y) (x) = (x + 1) % y #ifdef MSK_64BIT_DMA #define MSK_RX_INC(x, y) (x) = (x + 2) % y #define MSK_RX_BUF_CNT (MSK_RX_RING_CNT / 2) #define MSK_JUMBO_RX_BUF_CNT (MSK_JUMBO_RX_RING_CNT / 2) #else #define MSK_RX_INC(x, y) (x) = (x + 1) % y #define MSK_RX_BUF_CNT MSK_RX_RING_CNT #define MSK_JUMBO_RX_BUF_CNT MSK_JUMBO_RX_RING_CNT #endif #define MSK_PCI_BUS 0 #define MSK_PCIX_BUS 1 #define MSK_PEX_BUS 2 #define MSK_PROC_DEFAULT (MSK_RX_RING_CNT / 2) #define MSK_PROC_MIN 30 #define MSK_PROC_MAX (MSK_RX_RING_CNT - 1) #define MSK_INT_HOLDOFF_DEFAULT 100 #define MSK_TX_TIMEOUT 5 #define MSK_PUT_WM 10 struct msk_mii_data { int port; uint32_t pmd; int mii_flags; }; /* Forward decl. */ struct msk_if_softc; struct msk_hw_stats { /* Rx stats. */ uint32_t rx_ucast_frames; uint32_t rx_bcast_frames; uint32_t rx_pause_frames; uint32_t rx_mcast_frames; uint32_t rx_crc_errs; uint32_t rx_spare1; uint64_t rx_good_octets; uint64_t rx_bad_octets; uint32_t rx_runts; uint32_t rx_runt_errs; uint32_t rx_pkts_64; uint32_t rx_pkts_65_127; uint32_t rx_pkts_128_255; uint32_t rx_pkts_256_511; uint32_t rx_pkts_512_1023; uint32_t rx_pkts_1024_1518; uint32_t rx_pkts_1519_max; uint32_t rx_pkts_too_long; uint32_t rx_pkts_jabbers; uint32_t rx_spare2; uint32_t rx_fifo_oflows; uint32_t rx_spare3; /* Tx stats. */ uint32_t tx_ucast_frames; uint32_t tx_bcast_frames; uint32_t tx_pause_frames; uint32_t tx_mcast_frames; uint64_t tx_octets; uint32_t tx_pkts_64; uint32_t tx_pkts_65_127; uint32_t tx_pkts_128_255; uint32_t tx_pkts_256_511; uint32_t tx_pkts_512_1023; uint32_t tx_pkts_1024_1518; uint32_t tx_pkts_1519_max; uint32_t tx_spare1; uint32_t tx_colls; uint32_t tx_late_colls; uint32_t tx_excess_colls; uint32_t tx_multi_colls; uint32_t tx_single_colls; uint32_t tx_underflows; }; /* Softc for the Marvell Yukon II controller. */ struct msk_softc { struct resource *msk_res[1]; /* I/O resource */ struct resource_spec *msk_res_spec; struct resource *msk_irq[1]; /* IRQ resources */ struct resource_spec *msk_irq_spec; void *msk_intrhand; /* irq handler handle */ device_t msk_dev; uint8_t msk_hw_id; uint8_t msk_hw_rev; uint8_t msk_bustype; uint8_t msk_num_port; int msk_expcap; int msk_pcixcap; int msk_ramsize; /* amount of SRAM on NIC */ uint32_t msk_pmd; /* physical media type */ uint32_t msk_intrmask; uint32_t msk_intrhwemask; uint32_t msk_pflags; int msk_clock; struct msk_if_softc *msk_if[2]; device_t msk_devs[2]; int msk_txqsize; int msk_rxqsize; int msk_txqstart[2]; int msk_txqend[2]; int msk_rxqstart[2]; int msk_rxqend[2]; bus_dma_tag_t msk_stat_tag; bus_dmamap_t msk_stat_map; struct msk_stat_desc *msk_stat_ring; bus_addr_t msk_stat_ring_paddr; int msk_int_holdoff; int msk_process_limit; int msk_stat_cons; int msk_stat_count; struct mtx msk_mtx; }; #define MSK_LOCK(_sc) mtx_lock(&(_sc)->msk_mtx) #define MSK_TRYLOCK(_sc) mtx_trylock(&(_sc)->msk_mtx) #define MSK_UNLOCK(_sc) mtx_unlock(&(_sc)->msk_mtx) #define MSK_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->msk_mtx, MA_OWNED) #define MSK_IF_LOCK(_sc) MSK_LOCK((_sc)->msk_softc) #define MSK_IF_TRYLOCK(_sc) MSK_TRYLOCK((_sc)->msk_softc) #define MSK_IF_UNLOCK(_sc) MSK_UNLOCK((_sc)->msk_softc) #define MSK_IF_LOCK_ASSERT(_sc) MSK_LOCK_ASSERT((_sc)->msk_softc) #define MSK_USECS(sc, us) ((sc)->msk_clock * (us)) /* Softc for each logical interface. */ struct msk_if_softc { if_t msk_ifp; /* interface info */ device_t msk_miibus; device_t msk_if_dev; int32_t msk_port; /* port # on controller */ - int msk_framesize; int msk_phytype; int msk_phyaddr; + uint32_t msk_framesize; + uint32_t msk_capenable; uint32_t msk_flags; #define MSK_FLAG_MSI 0x00000001 #define MSK_FLAG_FASTETHER 0x00000004 #define MSK_FLAG_JUMBO 0x00000008 #define MSK_FLAG_JUMBO_NOCSUM 0x00000010 #define MSK_FLAG_RAMBUF 0x00000020 #define MSK_FLAG_DESCV2 0x00000040 #define MSK_FLAG_AUTOTX_CSUM 0x00000080 #define MSK_FLAG_NOHWVLAN 0x00000100 #define MSK_FLAG_NORXCHK 0x00000200 #define MSK_FLAG_NORX_CSUM 0x00000400 #define MSK_FLAG_SUSPEND 0x00002000 #define MSK_FLAG_DETACH 0x00004000 #define MSK_FLAG_LINK 0x00008000 #define MSK_FLAG_RUNNING 0x00010000 struct callout msk_tick_ch; int msk_watchdog_timer; uint32_t msk_txq; /* Tx. Async Queue offset */ uint32_t msk_txsq; /* Tx. Syn Queue offset */ uint32_t msk_rxq; /* Rx. Qeueue offset */ struct msk_chain_data msk_cdata; struct msk_ring_data msk_rdata; struct msk_softc *msk_softc; /* parent controller */ struct msk_hw_stats msk_stats; int msk_if_flags; uint16_t msk_vtag; /* VLAN tag id. */ uint32_t msk_csum; }; #define MSK_TIMEOUT 1000 #define MSK_PHY_POWERUP 1 #define MSK_PHY_POWERDOWN 0 Index: projects/ifnet/sys/dev/virtio/network/if_vtnet.c =================================================================== --- projects/ifnet/sys/dev/virtio/network/if_vtnet.c (revision 277242) +++ projects/ifnet/sys/dev/virtio/network/if_vtnet.c (revision 277243) @@ -1,3847 +1,3801 @@ /*- * Copyright (c) 2011, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for VirtIO network devices. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_NETMAP #include #endif /* DEV_NETMAP */ #include "virtio_if.h" #include "opt_inet.h" #include "opt_inet6.h" static int vtnet_modevent(module_t, int, void *); static int vtnet_probe(device_t); static int vtnet_attach(device_t); static int vtnet_detach(device_t); static int vtnet_suspend(device_t); static int vtnet_resume(device_t); static int vtnet_shutdown(device_t); static int vtnet_attach_completed(device_t); static int vtnet_config_change(device_t); static void vtnet_negotiate_features(struct vtnet_softc *); static void vtnet_setup_features(struct vtnet_softc *); static int vtnet_init_rxq(struct vtnet_softc *, int); static int vtnet_init_txq(struct vtnet_softc *, int); static int vtnet_alloc_rxtx_queues(struct vtnet_softc *); static void vtnet_free_rxtx_queues(struct vtnet_softc *); static int vtnet_alloc_rx_filters(struct vtnet_softc *); static void vtnet_free_rx_filters(struct vtnet_softc *); static int vtnet_alloc_virtqueues(struct vtnet_softc *); static void vtnet_setup_interface(struct vtnet_softc *); static int vtnet_change_mtu(struct vtnet_softc *, int); static int vtnet_ioctl(if_t, u_long, void *, struct thread *); static uint64_t vtnet_get_counter(if_t, ift_counter); static int vtnet_rxq_populate(struct vtnet_rxq *); static void vtnet_rxq_free_mbufs(struct vtnet_rxq *); static struct mbuf * vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **); static int vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *, struct mbuf *, int); static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int); static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *); static int vtnet_rxq_new_buf(struct vtnet_rxq *); static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *, struct virtio_net_hdr *); static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int); static void vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *); static int vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int); static void vtnet_rxq_input(struct vtnet_rxq *, struct mbuf *, struct virtio_net_hdr *); static int vtnet_rxq_eof(struct vtnet_rxq *); static void vtnet_rx_vq_intr(void *); static void vtnet_rxq_tq_intr(void *, int); static int vtnet_txq_below_threshold(struct vtnet_txq *); static int vtnet_txq_notify(struct vtnet_txq *); static void vtnet_txq_free_mbufs(struct vtnet_txq *); static int vtnet_txq_offload_ctx(struct vtnet_txq *, struct mbuf *, int *, int *, int *); static int vtnet_txq_offload_tso(struct vtnet_txq *, struct mbuf *, int, int, struct virtio_net_hdr *); static struct mbuf * vtnet_txq_offload(struct vtnet_txq *, struct mbuf *, struct virtio_net_hdr *); static int vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **, struct vtnet_tx_header *); static int vtnet_txq_encap(struct vtnet_txq *, struct mbuf **); static int vtnet_txq_mq_start_locked(struct vtnet_txq *, struct mbuf *); static int vtnet_txq_mq_start(if_t, struct mbuf *); static void vtnet_txq_tq_deferred(void *, int); static void vtnet_txq_start(struct vtnet_txq *); static void vtnet_txq_tq_intr(void *, int); static int vtnet_txq_eof(struct vtnet_txq *); static void vtnet_tx_vq_intr(void *); static void vtnet_tx_start_all(struct vtnet_softc *); static void vtnet_qflush(if_t); static int vtnet_watchdog(struct vtnet_txq *); static void vtnet_accum_stats(struct vtnet_softc *, struct vtnet_rxq_stats *, struct vtnet_txq_stats *); static void vtnet_tick(void *); static void vtnet_start_taskqueues(struct vtnet_softc *); static void vtnet_free_taskqueues(struct vtnet_softc *); static void vtnet_drain_taskqueues(struct vtnet_softc *); static void vtnet_drain_rxtx_queues(struct vtnet_softc *); static void vtnet_stop_rendezvous(struct vtnet_softc *); static void vtnet_stop(struct vtnet_softc *); static int vtnet_virtio_reinit(struct vtnet_softc *); static void vtnet_init_rx_filters(struct vtnet_softc *); static int vtnet_init_rx_queues(struct vtnet_softc *); static int vtnet_init_tx_queues(struct vtnet_softc *); static int vtnet_init_rxtx_queues(struct vtnet_softc *); static void vtnet_set_active_vq_pairs(struct vtnet_softc *); static int vtnet_reinit(struct vtnet_softc *); static void vtnet_init_locked(struct vtnet_softc *); static void vtnet_init(void *); static void vtnet_free_ctrl_vq(struct vtnet_softc *); static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *, struct sglist *, int, int); static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *); static int vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t); static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int); static int vtnet_set_promisc(struct vtnet_softc *, int); static int vtnet_set_allmulti(struct vtnet_softc *, int); static void vtnet_attach_disable_promisc(struct vtnet_softc *); static void vtnet_rx_filter(struct vtnet_softc *); static void vtnet_rx_filter_mac(struct vtnet_softc *); static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t); static void vtnet_rx_filter_vlan(struct vtnet_softc *); static void vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t); static void vtnet_register_vlan(void *, if_t, uint16_t); static void vtnet_unregister_vlan(void *, if_t, uint16_t); static int vtnet_is_link_up(struct vtnet_softc *); static void vtnet_update_link_status(struct vtnet_softc *); static int vtnet_ifmedia_upd(if_t); static void vtnet_ifmedia_sts(if_t, struct ifmediareq *); static void vtnet_get_hwaddr(struct vtnet_softc *); static void vtnet_set_hwaddr(struct vtnet_softc *); static void vtnet_vlan_tag_remove(struct mbuf *); static void vtnet_set_rx_process_limit(struct vtnet_softc *); static void vtnet_set_tx_intr_threshold(struct vtnet_softc *); static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *, struct sysctl_oid_list *, struct vtnet_rxq *); static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *, struct sysctl_oid_list *, struct vtnet_txq *); static void vtnet_setup_queue_sysctl(struct vtnet_softc *); static void vtnet_setup_sysctl(struct vtnet_softc *); static int vtnet_rxq_enable_intr(struct vtnet_rxq *); static void vtnet_rxq_disable_intr(struct vtnet_rxq *); static int vtnet_txq_enable_intr(struct vtnet_txq *); static void vtnet_txq_disable_intr(struct vtnet_txq *); static void vtnet_enable_rx_interrupts(struct vtnet_softc *); static void vtnet_enable_tx_interrupts(struct vtnet_softc *); static void vtnet_enable_interrupts(struct vtnet_softc *); static void vtnet_disable_rx_interrupts(struct vtnet_softc *); static void vtnet_disable_tx_interrupts(struct vtnet_softc *); static void vtnet_disable_interrupts(struct vtnet_softc *); static int vtnet_tunable_int(struct vtnet_softc *, const char *, int); /* Tunables. */ static int vtnet_csum_disable = 0; TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable); static int vtnet_tso_disable = 0; TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable); static int vtnet_lro_disable = 0; TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable); static int vtnet_mq_disable = 0; TUNABLE_INT("hw.vtnet.mq_disable", &vtnet_mq_disable); static int vtnet_mq_max_pairs = 0; TUNABLE_INT("hw.vtnet.mq_max_pairs", &vtnet_mq_max_pairs); static int vtnet_rx_process_limit = 512; TUNABLE_INT("hw.vtnet.rx_process_limit", &vtnet_rx_process_limit); static uma_zone_t vtnet_tx_header_zone; static struct virtio_feature_desc vtnet_feature_desc[] = { { VIRTIO_NET_F_CSUM, "TxChecksum" }, { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" }, { VIRTIO_NET_F_MAC, "MacAddress" }, { VIRTIO_NET_F_GSO, "TxAllGSO" }, { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" }, { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" }, { VIRTIO_NET_F_GUEST_ECN, "RxECN" }, { VIRTIO_NET_F_GUEST_UFO, "RxUFO" }, { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" }, { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" }, { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" }, { VIRTIO_NET_F_HOST_UFO, "TxUFO" }, { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" }, { VIRTIO_NET_F_STATUS, "Status" }, { VIRTIO_NET_F_CTRL_VQ, "ControlVq" }, { VIRTIO_NET_F_CTRL_RX, "RxMode" }, { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" }, { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" }, { VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" }, { VIRTIO_NET_F_MQ, "Multiqueue" }, { VIRTIO_NET_F_CTRL_MAC_ADDR, "SetMacAddress" }, { 0, NULL } }; static device_method_t vtnet_methods[] = { /* Device methods. */ DEVMETHOD(device_probe, vtnet_probe), DEVMETHOD(device_attach, vtnet_attach), DEVMETHOD(device_detach, vtnet_detach), DEVMETHOD(device_suspend, vtnet_suspend), DEVMETHOD(device_resume, vtnet_resume), DEVMETHOD(device_shutdown, vtnet_shutdown), /* VirtIO methods. */ DEVMETHOD(virtio_attach_completed, vtnet_attach_completed), DEVMETHOD(virtio_config_change, vtnet_config_change), DEVMETHOD_END }; static driver_t vtnet_driver = { "vtnet", vtnet_methods, sizeof(struct vtnet_softc) }; static devclass_t vtnet_devclass; DRIVER_MODULE(vtnet, virtio_mmio, vtnet_driver, vtnet_devclass, vtnet_modevent, 0); DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass, vtnet_modevent, 0); MODULE_VERSION(vtnet, 1); MODULE_DEPEND(vtnet, virtio, 1, 1, 1); static struct ifdriver vtnet_ifdrv = { .ifdrv_ops = { .ifop_origin = IFOP_ORIGIN_DRIVER, .ifop_ioctl = vtnet_ioctl, .ifop_init = vtnet_init, .ifop_get_counter = vtnet_get_counter, .ifop_transmit = vtnet_txq_mq_start, .ifop_qflush = vtnet_qflush, }, .ifdrv_name = "vtnet", .ifdrv_type = IFT_ETHER, .ifdrv_hdrlen = sizeof(struct ether_vlan_header), }; static int vtnet_modevent(module_t mod, int type, void *unused) { int error; error = 0; switch (type) { case MOD_LOAD: vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr", sizeof(struct vtnet_tx_header), NULL, NULL, NULL, NULL, 0, 0); break; case MOD_QUIESCE: case MOD_UNLOAD: if (uma_zone_get_cur(vtnet_tx_header_zone) > 0) error = EBUSY; else if (type == MOD_UNLOAD) { uma_zdestroy(vtnet_tx_header_zone); vtnet_tx_header_zone = NULL; } break; case MOD_SHUTDOWN: break; default: error = EOPNOTSUPP; break; } return (error); } static int vtnet_probe(device_t dev) { if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK) return (ENXIO); device_set_desc(dev, "VirtIO Networking Adapter"); return (BUS_PROBE_DEFAULT); } static int vtnet_attach(device_t dev) { struct vtnet_softc *sc; int error; sc = device_get_softc(dev); sc->vtnet_dev = dev; /* Register our feature descriptions. */ virtio_set_feature_desc(dev, vtnet_feature_desc); VTNET_CORE_LOCK_INIT(sc); callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0); vtnet_setup_sysctl(sc); vtnet_setup_features(sc); error = vtnet_alloc_rx_filters(sc); if (error) { device_printf(dev, "cannot allocate Rx filters\n"); goto fail; } error = vtnet_alloc_rxtx_queues(sc); if (error) { device_printf(dev, "cannot allocate queues\n"); goto fail; } error = vtnet_alloc_virtqueues(sc); if (error) { device_printf(dev, "cannot allocate virtqueues\n"); goto fail; } error = virtio_setup_intr(dev, INTR_TYPE_NET); if (error) { device_printf(dev, "cannot setup virtqueue interrupts\n"); goto fail; } vtnet_setup_interface(sc); #ifdef DEV_NETMAP vtnet_netmap_attach(sc); #endif /* DEV_NETMAP */ vtnet_start_taskqueues(sc); fail: if (error) vtnet_detach(dev); return (error); } static int vtnet_detach(device_t dev) { struct vtnet_softc *sc; sc = device_get_softc(dev); if (device_is_attached(dev)) { VTNET_CORE_LOCK(sc); vtnet_stop(sc); VTNET_CORE_UNLOCK(sc); callout_drain(&sc->vtnet_tick_ch); vtnet_drain_taskqueues(sc); #ifdef DEV_NETMAP netmap_detach(sc->vtnet_ifp); #endif /* DEV_NETMAP */ if_detach(sc->vtnet_ifp); } vtnet_free_taskqueues(sc); if (sc->vtnet_vlan_attach != NULL) { EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach); sc->vtnet_vlan_attach = NULL; } if (sc->vtnet_vlan_detach != NULL) { EVENTHANDLER_DEREGISTER(vlan_unconfg, sc->vtnet_vlan_detach); sc->vtnet_vlan_detach = NULL; } ifmedia_removeall(&sc->vtnet_media); vtnet_free_rxtx_queues(sc); vtnet_free_rx_filters(sc); if (sc->vtnet_ctrl_vq != NULL) vtnet_free_ctrl_vq(sc); VTNET_CORE_LOCK_DESTROY(sc); return (0); } static int vtnet_suspend(device_t dev) { struct vtnet_softc *sc; sc = device_get_softc(dev); VTNET_CORE_LOCK(sc); vtnet_stop(sc); sc->vtnet_flags |= VTNET_FLAG_SUSPENDED; VTNET_CORE_UNLOCK(sc); return (0); } static int vtnet_resume(device_t dev) { struct vtnet_softc *sc; if_t ifp; sc = device_get_softc(dev); ifp = sc->vtnet_ifp; VTNET_CORE_LOCK(sc); if (if_get(ifp, IF_FLAGS) & IFF_UP) vtnet_init_locked(sc); sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED; VTNET_CORE_UNLOCK(sc); return (0); } static int vtnet_shutdown(device_t dev) { /* * Suspend already does all of what we need to * do here; we just never expect to be resumed. */ return (vtnet_suspend(dev)); } static int vtnet_attach_completed(device_t dev) { vtnet_attach_disable_promisc(device_get_softc(dev)); return (0); } static int vtnet_config_change(device_t dev) { struct vtnet_softc *sc; sc = device_get_softc(dev); VTNET_CORE_LOCK(sc); vtnet_update_link_status(sc); if (sc->vtnet_link_active != 0) vtnet_tx_start_all(sc); VTNET_CORE_UNLOCK(sc); return (0); } static void vtnet_negotiate_features(struct vtnet_softc *sc) { device_t dev; uint64_t mask, features; dev = sc->vtnet_dev; mask = 0; /* * TSO and LRO are only available when their corresponding checksum * offload feature is also negotiated. */ if (vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable)) { mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM; mask |= VTNET_TSO_FEATURES | VTNET_LRO_FEATURES; } if (vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable)) mask |= VTNET_TSO_FEATURES; if (vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable)) mask |= VTNET_LRO_FEATURES; if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable)) mask |= VIRTIO_NET_F_MQ; features = VTNET_FEATURES & ~mask; sc->vtnet_features = virtio_negotiate_features(dev, features); if (virtio_with_feature(dev, VTNET_LRO_FEATURES) && virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) { /* * LRO without mergeable buffers requires special care. This * is not ideal because every receive buffer must be large * enough to hold the maximum TCP packet, the Ethernet header, * and the header. This requires up to 34 descriptors with * MCLBYTES clusters. If we do not have indirect descriptors, * LRO is disabled since the virtqueue will not contain very * many receive buffers. */ if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) { device_printf(dev, "LRO disabled due to both mergeable buffers and " "indirect descriptors not negotiated\n"); features &= ~VTNET_LRO_FEATURES; sc->vtnet_features = virtio_negotiate_features(dev, features); } else sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG; } } static void vtnet_setup_features(struct vtnet_softc *sc) { device_t dev; int max_pairs, max; dev = sc->vtnet_dev; vtnet_negotiate_features(sc); if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) sc->vtnet_flags |= VTNET_FLAG_INDIRECT; if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX)) sc->vtnet_flags |= VTNET_FLAG_EVENT_IDX; if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) { /* This feature should always be negotiated. */ sc->vtnet_flags |= VTNET_FLAG_MAC; } if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) { sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS; sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); } else sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr); if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS; else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) sc->vtnet_rx_nsegs = VTNET_MAX_RX_SEGS; else sc->vtnet_rx_nsegs = VTNET_MIN_RX_SEGS; if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS; else sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS; if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) { sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ; if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX)) sc->vtnet_flags |= VTNET_FLAG_CTRL_RX; if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN)) sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER; if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR)) sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC; } if (virtio_with_feature(dev, VIRTIO_NET_F_MQ) && sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { max_pairs = virtio_read_dev_config_2(dev, offsetof(struct virtio_net_config, max_virtqueue_pairs)); if (max_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || max_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) max_pairs = 1; } else max_pairs = 1; if (max_pairs > 1) { /* * Limit the maximum number of queue pairs to the number of * CPUs or the configured maximum. The actual number of * queues that get used may be less. */ max = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs); if (max > 0 && max_pairs > max) max_pairs = max; if (max_pairs > mp_ncpus) max_pairs = mp_ncpus; if (max_pairs > VTNET_MAX_QUEUE_PAIRS) max_pairs = VTNET_MAX_QUEUE_PAIRS; if (max_pairs > 1) sc->vtnet_flags |= VTNET_FLAG_MULTIQ; } sc->vtnet_max_vq_pairs = max_pairs; } static int vtnet_init_rxq(struct vtnet_softc *sc, int id) { struct vtnet_rxq *rxq; rxq = &sc->vtnet_rxqs[id]; snprintf(rxq->vtnrx_name, sizeof(rxq->vtnrx_name), "%s-rx%d", device_get_nameunit(sc->vtnet_dev), id); mtx_init(&rxq->vtnrx_mtx, rxq->vtnrx_name, NULL, MTX_DEF); rxq->vtnrx_sc = sc; rxq->vtnrx_id = id; rxq->vtnrx_sg = sglist_alloc(sc->vtnet_rx_nsegs, M_NOWAIT); if (rxq->vtnrx_sg == NULL) return (ENOMEM); TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq); rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT, taskqueue_thread_enqueue, &rxq->vtnrx_tq); return (rxq->vtnrx_tq == NULL ? ENOMEM : 0); } static int vtnet_init_txq(struct vtnet_softc *sc, int id) { struct vtnet_txq *txq; txq = &sc->vtnet_txqs[id]; snprintf(txq->vtntx_name, sizeof(txq->vtntx_name), "%s-tx%d", device_get_nameunit(sc->vtnet_dev), id); mtx_init(&txq->vtntx_mtx, txq->vtntx_name, NULL, MTX_DEF); txq->vtntx_sc = sc; txq->vtntx_id = id; txq->vtntx_sg = sglist_alloc(sc->vtnet_tx_nsegs, M_NOWAIT); if (txq->vtntx_sg == NULL) return (ENOMEM); txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF, M_NOWAIT, &txq->vtntx_mtx); if (txq->vtntx_br == NULL) return (ENOMEM); TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq); TASK_INIT(&txq->vtntx_intrtask, 0, vtnet_txq_tq_intr, txq); txq->vtntx_tq = taskqueue_create(txq->vtntx_name, M_NOWAIT, taskqueue_thread_enqueue, &txq->vtntx_tq); if (txq->vtntx_tq == NULL) return (ENOMEM); return (0); } static int vtnet_alloc_rxtx_queues(struct vtnet_softc *sc) { int i, npairs, error; npairs = sc->vtnet_max_vq_pairs; sc->vtnet_rxqs = malloc(sizeof(struct vtnet_rxq) * npairs, M_DEVBUF, M_NOWAIT | M_ZERO); sc->vtnet_txqs = malloc(sizeof(struct vtnet_txq) * npairs, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtnet_rxqs == NULL || sc->vtnet_txqs == NULL) return (ENOMEM); for (i = 0; i < npairs; i++) { error = vtnet_init_rxq(sc, i); if (error) return (error); error = vtnet_init_txq(sc, i); if (error) return (error); } vtnet_setup_queue_sysctl(sc); return (0); } static void vtnet_destroy_rxq(struct vtnet_rxq *rxq) { rxq->vtnrx_sc = NULL; rxq->vtnrx_id = -1; if (rxq->vtnrx_sg != NULL) { sglist_free(rxq->vtnrx_sg); rxq->vtnrx_sg = NULL; } if (mtx_initialized(&rxq->vtnrx_mtx) != 0) mtx_destroy(&rxq->vtnrx_mtx); } static void vtnet_destroy_txq(struct vtnet_txq *txq) { txq->vtntx_sc = NULL; txq->vtntx_id = -1; if (txq->vtntx_sg != NULL) { sglist_free(txq->vtntx_sg); txq->vtntx_sg = NULL; } if (txq->vtntx_br != NULL) { buf_ring_free(txq->vtntx_br, M_DEVBUF); txq->vtntx_br = NULL; } if (mtx_initialized(&txq->vtntx_mtx) != 0) mtx_destroy(&txq->vtntx_mtx); } static void vtnet_free_rxtx_queues(struct vtnet_softc *sc) { int i; if (sc->vtnet_rxqs != NULL) { for (i = 0; i < sc->vtnet_max_vq_pairs; i++) vtnet_destroy_rxq(&sc->vtnet_rxqs[i]); free(sc->vtnet_rxqs, M_DEVBUF); sc->vtnet_rxqs = NULL; } if (sc->vtnet_txqs != NULL) { for (i = 0; i < sc->vtnet_max_vq_pairs; i++) vtnet_destroy_txq(&sc->vtnet_txqs[i]); free(sc->vtnet_txqs, M_DEVBUF); sc->vtnet_txqs = NULL; } } static int vtnet_alloc_rx_filters(struct vtnet_softc *sc) { if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { sc->vtnet_mac_filter = malloc(sizeof(struct vtnet_mac_filter), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtnet_mac_filter == NULL) return (ENOMEM); } if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { sc->vtnet_vlan_filter = malloc(sizeof(uint32_t) * VTNET_VLAN_FILTER_NWORDS, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtnet_vlan_filter == NULL) return (ENOMEM); } return (0); } static void vtnet_free_rx_filters(struct vtnet_softc *sc) { if (sc->vtnet_mac_filter != NULL) { free(sc->vtnet_mac_filter, M_DEVBUF); sc->vtnet_mac_filter = NULL; } if (sc->vtnet_vlan_filter != NULL) { free(sc->vtnet_vlan_filter, M_DEVBUF); sc->vtnet_vlan_filter = NULL; } } static int vtnet_alloc_virtqueues(struct vtnet_softc *sc) { device_t dev; struct vq_alloc_info *info; struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i, idx, flags, nvqs, error; dev = sc->vtnet_dev; flags = 0; nvqs = sc->vtnet_max_vq_pairs * 2; if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) nvqs++; info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT); if (info == NULL) return (ENOMEM); for (i = 0, idx = 0; i < sc->vtnet_max_vq_pairs; i++, idx+=2) { rxq = &sc->vtnet_rxqs[i]; VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs, vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq, "%s-%d rx", device_get_nameunit(dev), rxq->vtnrx_id); txq = &sc->vtnet_txqs[i]; VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs, vtnet_tx_vq_intr, txq, &txq->vtntx_vq, "%s-%d tx", device_get_nameunit(dev), txq->vtntx_id); } if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL, &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev)); } /* * Enable interrupt binding if this is multiqueue. This only matters * when per-vq MSIX is available. */ if (sc->vtnet_flags & VTNET_FLAG_MULTIQ) flags |= 0; error = virtio_alloc_virtqueues(dev, flags, nvqs, info); free(info, M_TEMP); return (error); } static void vtnet_setup_interface(struct vtnet_softc *sc) { struct if_attach_args ifat = { .ifat_version = IF_ATTACH_VERSION, .ifat_drv = &vtnet_ifdrv, .ifat_softc = sc, .ifat_baudrate = IF_Gbps(10), /* Approx. */ .ifat_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST, .ifat_capabilities = IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU, }; device_t dev; dev = sc->vtnet_dev; /* Read (or generate) the MAC address for the adapter. */ vtnet_get_hwaddr(sc); ifat.ifat_dunit = device_get_unit(dev); ifat.ifat_lla = sc->vtnet_hwaddr; if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) ifat.ifat_capabilities |= IFCAP_LINKSTATE; if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) { ifat.ifat_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6; if (virtio_with_feature(dev, VIRTIO_NET_F_GSO)) { ifat.ifat_capabilities |= IFCAP_TSO4 | IFCAP_TSO6; sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; } else { if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4)) ifat.ifat_capabilities |= IFCAP_TSO4; if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) ifat.ifat_capabilities |= IFCAP_TSO6; if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN)) sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; } if (ifat.ifat_capabilities & IFCAP_TSO) ifat.ifat_capabilities |= IFCAP_VLAN_HWTSO; } if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) { ifat.ifat_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6; if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) || virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6)) ifat.ifat_capabilities |= IFCAP_LRO; } if (ifat.ifat_capabilities & IFCAP_HWCSUM) { /* * VirtIO does not support VLAN tagging, but we can fake * it by inserting and removing the 802.1Q header during * transmit and receive. We are then able to do checksum * offloading of VLAN frames. */ ifat.ifat_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; } ifat.ifat_capenable = ifat.ifat_capabilities; /* * Capabilities after here are not enabled by default. */ if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { ifat.ifat_capabilities |= IFCAP_VLAN_HWFILTER; sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST); sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); } vtnet_set_rx_process_limit(sc); vtnet_set_tx_intr_threshold(sc); ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd, vtnet_ifmedia_sts); ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL); ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE); sc->vtnet_ifp = if_attach(&ifat); } static int vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu) { if_t ifp; int frame_size, clsize; ifp = sc->vtnet_ifp; if (new_mtu < ETHERMIN || new_mtu > VTNET_MAX_MTU) return (EINVAL); frame_size = sc->vtnet_hdr_size + sizeof(struct ether_vlan_header) + new_mtu; /* * Based on the new MTU (and hence frame size) determine which * cluster size is most appropriate for the receive queues. */ if (frame_size <= MCLBYTES) { clsize = MCLBYTES; } else if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { /* Avoid going past 9K jumbos. */ if (frame_size > MJUM9BYTES) return (EINVAL); clsize = MJUM9BYTES; } else clsize = MJUMPAGESIZE; if_set(ifp, IF_MTU, new_mtu); sc->vtnet_rx_new_clsize = clsize; if (sc->vtnet_flags & VTNET_FLAG_RUNNING) { sc->vtnet_flags &= ~VTNET_FLAG_RUNNING; vtnet_init_locked(sc); } return (0); } static int vtnet_ioctl(if_t ifp, u_long cmd, void *data, struct thread *td) { struct vtnet_softc *sc; struct ifreq *ifr; - int reinit, capenable, mask, error; + int error; sc = if_getsoftc(ifp, IF_DRIVER_SOFTC); ifr = (struct ifreq *) data; error = 0; switch (cmd) { case SIOCSIFMTU: - if (if_get(ifp, IF_MTU) != ifr->ifr_mtu) { - VTNET_CORE_LOCK(sc); - error = vtnet_change_mtu(sc, ifr->ifr_mtu); - VTNET_CORE_UNLOCK(sc); - } + VTNET_CORE_LOCK(sc); + error = vtnet_change_mtu(sc, ifr->ifr_mtu); + VTNET_CORE_UNLOCK(sc); break; case SIOCSIFFLAGS: VTNET_CORE_LOCK(sc); if ((if_get(ifp, IF_FLAGS) & IFF_UP) == 0) { if (sc->vtnet_flags & VTNET_FLAG_RUNNING) vtnet_stop(sc); } else if (sc->vtnet_flags & VTNET_FLAG_RUNNING) { if ((if_get(ifp, IF_FLAGS) ^ sc->vtnet_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) vtnet_rx_filter(sc); else error = ENOTSUP; } } else vtnet_init_locked(sc); if (error == 0) sc->vtnet_if_flags = if_get(ifp, IF_FLAGS); VTNET_CORE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) break; VTNET_CORE_LOCK(sc); if (sc->vtnet_flags & VTNET_FLAG_RUNNING) vtnet_rx_filter_mac(sc); VTNET_CORE_UNLOCK(sc); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd); break; case SIOCSIFCAP: - VTNET_CORE_LOCK(sc); - capenable = if_get(ifp, IF_CAPENABLE); - mask = ifr->ifr_reqcap ^ capenable; - - if (mask & IFCAP_TXCSUM) - capenable ^= IFCAP_TXCSUM; - if (mask & IFCAP_TXCSUM_IPV6) - capenable ^= IFCAP_TXCSUM_IPV6; - if (mask & IFCAP_TSO4) - capenable ^= IFCAP_TSO4; - if (mask & IFCAP_TSO6) - capenable ^= IFCAP_TSO6; - - if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO | - IFCAP_VLAN_HWFILTER)) { - /* These Rx features require us to renegotiate. */ - reinit = 1; - - if (mask & IFCAP_RXCSUM) - capenable ^= IFCAP_RXCSUM; - if (mask & IFCAP_RXCSUM_IPV6) - capenable ^= IFCAP_RXCSUM_IPV6; - if (mask & IFCAP_LRO) - capenable ^= IFCAP_LRO; - if (mask & IFCAP_VLAN_HWFILTER) - capenable ^= IFCAP_VLAN_HWFILTER; - } else - reinit = 0; - - if (mask & IFCAP_VLAN_HWTSO) - capenable ^= IFCAP_VLAN_HWTSO; - if (mask & IFCAP_VLAN_HWTAGGING) - capenable ^= IFCAP_VLAN_HWTAGGING; - - if (reinit && (sc->vtnet_flags & VTNET_FLAG_RUNNING)) { + sc->vtnet_capenable = ifr->ifr_reqcap; + /* These Rx features require us to renegotiate. */ + if ((ifr->ifr_reqcap ^ ifr->ifr_curcap) & + (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO | + IFCAP_VLAN_HWFILTER) && + (sc->vtnet_flags & VTNET_FLAG_RUNNING)) { + VTNET_CORE_LOCK(sc); sc->vtnet_flags &= ~VTNET_FLAG_RUNNING; vtnet_init_locked(sc); + VTNET_CORE_UNLOCK(sc); } - - VTNET_CORE_UNLOCK(sc); - - if_set(ifp, IF_CAPENABLE, capenable); - + ifr->ifr_hwassist = 0; + if (ifr->ifr_reqcap & IFCAP_TXCSUM) + ifr->ifr_hwassist |= VTNET_CSUM_OFFLOAD; + if (ifr->ifr_reqcap & IFCAP_TXCSUM_IPV6) + ifr->ifr_hwassist |= VTNET_CSUM_OFFLOAD_IPV6; + if (ifr->ifr_reqcap & IFCAP_TSO4) + ifr->ifr_hwassist |= CSUM_TSO; + if (ifr->ifr_reqcap & IFCAP_TSO6) + ifr->ifr_hwassist |= CSUM_IP6_TSO; break; default: error = EOPNOTSUPP; break; } VTNET_CORE_LOCK_ASSERT_NOTOWNED(sc); return (error); } static int vtnet_rxq_populate(struct vtnet_rxq *rxq) { struct virtqueue *vq; int nbufs, error; vq = rxq->vtnrx_vq; error = ENOSPC; for (nbufs = 0; !virtqueue_full(vq); nbufs++) { error = vtnet_rxq_new_buf(rxq); if (error) break; } if (nbufs > 0) { virtqueue_notify(vq); /* * EMSGSIZE signifies the virtqueue did not have enough * entries available to hold the last mbuf. This is not * an error. */ if (error == EMSGSIZE) error = 0; } return (error); } static void vtnet_rxq_free_mbufs(struct vtnet_rxq *rxq) { struct virtqueue *vq; struct mbuf *m; int last; vq = rxq->vtnrx_vq; last = 0; while ((m = virtqueue_drain(vq, &last)) != NULL) m_freem(m); KASSERT(virtqueue_empty(vq), ("%s: mbufs remaining in rx queue %p", __func__, rxq)); } static struct mbuf * vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp) { struct mbuf *m_head, *m_tail, *m; int i, clsize; clsize = sc->vtnet_rx_clsize; KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, ("%s: chained mbuf %d request without LRO_NOMRG", __func__, nbufs)); m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, clsize); if (m_head == NULL) goto fail; m_head->m_len = clsize; m_tail = m_head; /* Allocate the rest of the chain. */ for (i = 1; i < nbufs; i++) { m = m_getjcl(M_NOWAIT, MT_DATA, 0, clsize); if (m == NULL) goto fail; m->m_len = clsize; m_tail->m_next = m; m_tail = m; } if (m_tailp != NULL) *m_tailp = m_tail; return (m_head); fail: sc->vtnet_stats.mbuf_alloc_failed++; m_freem(m_head); return (NULL); } /* * Slow path for when LRO without mergeable buffers is negotiated. */ static int vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *rxq, struct mbuf *m0, int len0) { struct vtnet_softc *sc; struct mbuf *m, *m_prev; struct mbuf *m_new, *m_tail; int len, clsize, nreplace, error; sc = rxq->vtnrx_sc; clsize = sc->vtnet_rx_clsize; m_prev = NULL; m_tail = NULL; nreplace = 0; m = m0; len = len0; /* * Since these mbuf chains are so large, we avoid allocating an * entire replacement chain if possible. When the received frame * did not consume the entire chain, the unused mbufs are moved * to the replacement chain. */ while (len > 0) { /* * Something is seriously wrong if we received a frame * larger than the chain. Drop it. */ if (m == NULL) { sc->vtnet_stats.rx_frame_too_large++; return (EMSGSIZE); } /* We always allocate the same cluster size. */ KASSERT(m->m_len == clsize, ("%s: mbuf size %d is not the cluster size %d", __func__, m->m_len, clsize)); m->m_len = MIN(m->m_len, len); len -= m->m_len; m_prev = m; m = m->m_next; nreplace++; } KASSERT(nreplace <= sc->vtnet_rx_nmbufs, ("%s: too many replacement mbufs %d max %d", __func__, nreplace, sc->vtnet_rx_nmbufs)); m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail); if (m_new == NULL) { m_prev->m_len = clsize; return (ENOBUFS); } /* * Move any unused mbufs from the received chain onto the end * of the new chain. */ if (m_prev->m_next != NULL) { m_tail->m_next = m_prev->m_next; m_prev->m_next = NULL; } error = vtnet_rxq_enqueue_buf(rxq, m_new); if (error) { /* * BAD! We could not enqueue the replacement mbuf chain. We * must restore the m0 chain to the original state if it was * modified so we can subsequently discard it. * * NOTE: The replacement is suppose to be an identical copy * to the one just dequeued so this is an unexpected error. */ sc->vtnet_stats.rx_enq_replacement_failed++; if (m_tail->m_next != NULL) { m_prev->m_next = m_tail->m_next; m_tail->m_next = NULL; } m_prev->m_len = clsize; m_freem(m_new); } return (error); } static int vtnet_rxq_replace_buf(struct vtnet_rxq *rxq, struct mbuf *m, int len) { struct vtnet_softc *sc; struct mbuf *m_new; int error; sc = rxq->vtnrx_sc; KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL, ("%s: chained mbuf without LRO_NOMRG", __func__)); if (m->m_next == NULL) { /* Fast-path for the common case of just one mbuf. */ if (m->m_len < len) return (EINVAL); m_new = vtnet_rx_alloc_buf(sc, 1, NULL); if (m_new == NULL) return (ENOBUFS); error = vtnet_rxq_enqueue_buf(rxq, m_new); if (error) { /* * The new mbuf is suppose to be an identical * copy of the one just dequeued so this is an * unexpected error. */ m_freem(m_new); sc->vtnet_stats.rx_enq_replacement_failed++; } else m->m_len = len; } else error = vtnet_rxq_replace_lro_nomgr_buf(rxq, m, len); return (error); } static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *rxq, struct mbuf *m) { struct vtnet_softc *sc; struct sglist *sg; struct vtnet_rx_header *rxhdr; uint8_t *mdata; int offset, error; sc = rxq->vtnrx_sc; sg = rxq->vtnrx_sg; mdata = mtod(m, uint8_t *); VTNET_RXQ_LOCK_ASSERT(rxq); KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL, ("%s: chained mbuf without LRO_NOMRG", __func__)); KASSERT(m->m_len == sc->vtnet_rx_clsize, ("%s: unexpected cluster size %d/%d", __func__, m->m_len, sc->vtnet_rx_clsize)); sglist_reset(sg); if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr)); rxhdr = (struct vtnet_rx_header *) mdata; sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size); offset = sizeof(struct vtnet_rx_header); } else offset = 0; sglist_append(sg, mdata + offset, m->m_len - offset); if (m->m_next != NULL) { error = sglist_append_mbuf(sg, m->m_next); MPASS(error == 0); } error = virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg); return (error); } static int vtnet_rxq_new_buf(struct vtnet_rxq *rxq) { struct vtnet_softc *sc; struct mbuf *m; int error; sc = rxq->vtnrx_sc; m = vtnet_rx_alloc_buf(sc, sc->vtnet_rx_nmbufs, NULL); if (m == NULL) return (ENOBUFS); error = vtnet_rxq_enqueue_buf(rxq, m); if (error) m_freem(m); return (error); } /* * Use the checksum offset in the VirtIO header to set the * correct CSUM_* flags. */ static int vtnet_rxq_csum_by_offset(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr) { struct vtnet_softc *sc; #if defined(INET) || defined(INET6) int offset = hdr->csum_start + hdr->csum_offset; #endif sc = rxq->vtnrx_sc; /* Only do a basic sanity check on the offset. */ switch (eth_type) { #if defined(INET) case ETHERTYPE_IP: if (__predict_false(offset < ip_start + sizeof(struct ip))) return (1); break; #endif #if defined(INET6) case ETHERTYPE_IPV6: if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr))) return (1); break; #endif default: sc->vtnet_stats.rx_csum_bad_ethtype++; return (1); } /* * Use the offset to determine the appropriate CSUM_* flags. This is * a bit dirty, but we can get by with it since the checksum offsets * happen to be different. We assume the host host does not do IPv4 * header checksum offloading. */ switch (hdr->csum_offset) { case offsetof(struct udphdr, uh_sum): case offsetof(struct tcphdr, th_sum): m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; case offsetof(struct sctphdr, checksum): m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; break; default: sc->vtnet_stats.rx_csum_bad_offset++; return (1); } return (0); } static int vtnet_rxq_csum_by_parse(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr) { struct vtnet_softc *sc; int offset, proto; sc = rxq->vtnrx_sc; switch (eth_type) { #if defined(INET) case ETHERTYPE_IP: { struct ip *ip; if (__predict_false(m->m_len < ip_start + sizeof(struct ip))) return (1); ip = (struct ip *)(m->m_data + ip_start); proto = ip->ip_p; offset = ip_start + (ip->ip_hl << 2); break; } #endif #if defined(INET6) case ETHERTYPE_IPV6: if (__predict_false(m->m_len < ip_start + sizeof(struct ip6_hdr))) return (1); offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto); if (__predict_false(offset < 0)) return (1); break; #endif default: sc->vtnet_stats.rx_csum_bad_ethtype++; return (1); } switch (proto) { case IPPROTO_TCP: if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) return (1); m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; case IPPROTO_UDP: if (__predict_false(m->m_len < offset + sizeof(struct udphdr))) return (1); m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; case IPPROTO_SCTP: if (__predict_false(m->m_len < offset + sizeof(struct sctphdr))) return (1); m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; break; default: /* * For the remaining protocols, FreeBSD does not support * checksum offloading, so the checksum will be recomputed. */ #if 0 if_printf(sc->vtnet_ifp, "cksum offload of unsupported " "protocol eth_type=%#x proto=%d csum_start=%d " "csum_offset=%d\n", __func__, eth_type, proto, hdr->csum_start, hdr->csum_offset); #endif break; } return (0); } /* * Set the appropriate CSUM_* flags. Unfortunately, the information * provided is not directly useful to us. The VirtIO header gives the * offset of the checksum, which is all Linux needs, but this is not * how FreeBSD does things. We are forced to peek inside the packet * a bit. * * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD * could accept the offsets and let the stack figure it out. */ static int vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m, struct virtio_net_hdr *hdr) { struct ether_header *eh; struct ether_vlan_header *evh; uint16_t eth_type; int offset, error; eh = mtod(m, struct ether_header *); eth_type = ntohs(eh->ether_type); if (eth_type == ETHERTYPE_VLAN) { /* BMV: We should handle nested VLAN tags too. */ evh = mtod(m, struct ether_vlan_header *); eth_type = ntohs(evh->evl_proto); offset = sizeof(struct ether_vlan_header); } else offset = sizeof(struct ether_header); if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) error = vtnet_rxq_csum_by_offset(rxq, m, eth_type, offset, hdr); else error = vtnet_rxq_csum_by_parse(rxq, m, eth_type, offset, hdr); return (error); } static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs) { struct mbuf *m; while (--nbufs > 0) { m = virtqueue_dequeue(rxq->vtnrx_vq, NULL); if (m == NULL) break; vtnet_rxq_discard_buf(rxq, m); } } static void vtnet_rxq_discard_buf(struct vtnet_rxq *rxq, struct mbuf *m) { int error; /* * Requeue the discarded mbuf. This should always be successful * since it was just dequeued. */ error = vtnet_rxq_enqueue_buf(rxq, m); KASSERT(error == 0, ("%s: cannot requeue discarded mbuf %d", __func__, error)); } static int vtnet_rxq_merged_eof(struct vtnet_rxq *rxq, struct mbuf *m_head, int nbufs) { struct vtnet_softc *sc; struct virtqueue *vq; struct mbuf *m, *m_tail; int len; sc = rxq->vtnrx_sc; vq = rxq->vtnrx_vq; m_tail = m_head; while (--nbufs > 0) { m = virtqueue_dequeue(vq, &len); if (m == NULL) { rxq->vtnrx_stats.vrxs_ierrors++; goto fail; } if (vtnet_rxq_new_buf(rxq) != 0) { rxq->vtnrx_stats.vrxs_iqdrops++; vtnet_rxq_discard_buf(rxq, m); if (nbufs > 1) vtnet_rxq_discard_merged_bufs(rxq, nbufs); goto fail; } if (m->m_len < len) len = m->m_len; m->m_len = len; m->m_flags &= ~M_PKTHDR; m_head->m_pkthdr.len += len; m_tail->m_next = m; m_tail = m; } return (0); fail: sc->vtnet_stats.rx_mergeable_failed++; m_freem(m_head); return (1); } static void vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m, struct virtio_net_hdr *hdr) { struct vtnet_softc *sc; if_t ifp; struct ether_header *eh; sc = rxq->vtnrx_sc; ifp = sc->vtnet_ifp; - if (if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWTAGGING) { + if (sc->vtnet_capenable & IFCAP_VLAN_HWTAGGING) { eh = mtod(m, struct ether_header *); if (eh->ether_type == htons(ETHERTYPE_VLAN)) { vtnet_vlan_tag_remove(m); /* * With the 802.1Q header removed, update the * checksum starting location accordingly. */ if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) hdr->csum_start -= ETHER_VLAN_ENCAP_LEN; } } m->m_pkthdr.flowid = rxq->vtnrx_id; M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); /* * BMV: FreeBSD does not have the UNNECESSARY and PARTIAL checksum * distinction that Linux does. Need to reevaluate if performing * offloading for the NEEDS_CSUM case is really appropriate. */ if (hdr->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID)) { if (vtnet_rxq_csum(rxq, m, hdr) == 0) rxq->vtnrx_stats.vrxs_csum++; else rxq->vtnrx_stats.vrxs_csum_failed++; } rxq->vtnrx_stats.vrxs_ipackets++; rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len; VTNET_RXQ_UNLOCK(rxq); if_input(ifp, m); VTNET_RXQ_LOCK(rxq); } static int vtnet_rxq_eof(struct vtnet_rxq *rxq) { struct virtio_net_hdr lhdr, *hdr; struct vtnet_softc *sc; if_t ifp; struct virtqueue *vq; struct mbuf *m; struct virtio_net_hdr_mrg_rxbuf *mhdr; int len, deq, nbufs, adjsz, count; sc = rxq->vtnrx_sc; vq = rxq->vtnrx_vq; ifp = sc->vtnet_ifp; hdr = &lhdr; deq = 0; count = sc->vtnet_rx_process_limit; VTNET_RXQ_LOCK_ASSERT(rxq); #ifdef DEV_NETMAP if (netmap_rx_irq(ifp, 0, &deq)) { return (FALSE); } #endif /* DEV_NETMAP */ while (count-- > 0) { m = virtqueue_dequeue(vq, &len); if (m == NULL) break; deq++; if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) { rxq->vtnrx_stats.vrxs_ierrors++; vtnet_rxq_discard_buf(rxq, m); continue; } if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { nbufs = 1; adjsz = sizeof(struct vtnet_rx_header); /* * Account for our pad inserted between the header * and the actual start of the frame. */ len += VTNET_RX_HEADER_PAD; } else { mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *); nbufs = mhdr->num_buffers; adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf); } if (vtnet_rxq_replace_buf(rxq, m, len) != 0) { rxq->vtnrx_stats.vrxs_iqdrops++; vtnet_rxq_discard_buf(rxq, m); if (nbufs > 1) vtnet_rxq_discard_merged_bufs(rxq, nbufs); continue; } m->m_pkthdr.len = len; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.csum_flags = 0; if (nbufs > 1) { /* Dequeue the rest of chain. */ if (vtnet_rxq_merged_eof(rxq, m, nbufs) != 0) continue; } /* * Save copy of header before we strip it. For both mergeable * and non-mergeable, the header is at the beginning of the * mbuf data. We no longer need num_buffers, so always use a * regular header. * * BMV: Is this memcpy() expensive? We know the mbuf data is * still valid even after the m_adj(). */ memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr)); m_adj(m, adjsz); vtnet_rxq_input(rxq, m, hdr); /* Must recheck after dropping the Rx lock. */ if ((sc->vtnet_flags & VTNET_FLAG_RUNNING) == 0) break; } if (deq > 0) virtqueue_notify(vq); return (count > 0 ? 0 : EAGAIN); } static void vtnet_rx_vq_intr(void *xrxq) { struct vtnet_softc *sc; struct vtnet_rxq *rxq; if_t ifp; int tries, more; rxq = xrxq; sc = rxq->vtnrx_sc; ifp = sc->vtnet_ifp; tries = 0; if (__predict_false(rxq->vtnrx_id >= sc->vtnet_act_vq_pairs)) { /* * Ignore this interrupt. Either this is a spurious interrupt * or multiqueue without per-VQ MSIX so every queue needs to * be polled (a brain dead configuration we could try harder * to avoid). */ vtnet_rxq_disable_intr(rxq); return; } VTNET_RXQ_LOCK(rxq); again: if ((sc->vtnet_flags & VTNET_FLAG_RUNNING) == 0) { VTNET_RXQ_UNLOCK(rxq); return; } more = vtnet_rxq_eof(rxq); if (more || vtnet_rxq_enable_intr(rxq) != 0) { if (!more) vtnet_rxq_disable_intr(rxq); /* * This is an occasional condition or race (when !more), * so retry a few times before scheduling the taskqueue. */ if (tries++ < VTNET_INTR_DISABLE_RETRIES) goto again; VTNET_RXQ_UNLOCK(rxq); rxq->vtnrx_stats.vrxs_rescheduled++; taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); } else VTNET_RXQ_UNLOCK(rxq); } static void vtnet_rxq_tq_intr(void *xrxq, int pending) { struct vtnet_softc *sc; struct vtnet_rxq *rxq; if_t ifp; int more; rxq = xrxq; sc = rxq->vtnrx_sc; ifp = sc->vtnet_ifp; VTNET_RXQ_LOCK(rxq); if ((sc->vtnet_flags & VTNET_FLAG_RUNNING) == 0) { VTNET_RXQ_UNLOCK(rxq); return; } more = vtnet_rxq_eof(rxq); if (more || vtnet_rxq_enable_intr(rxq) != 0) { if (!more) vtnet_rxq_disable_intr(rxq); rxq->vtnrx_stats.vrxs_rescheduled++; taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); } VTNET_RXQ_UNLOCK(rxq); } static int vtnet_txq_below_threshold(struct vtnet_txq *txq) { struct vtnet_softc *sc; struct virtqueue *vq; sc = txq->vtntx_sc; vq = txq->vtntx_vq; return (virtqueue_nfree(vq) <= sc->vtnet_tx_intr_thresh); } static int vtnet_txq_notify(struct vtnet_txq *txq) { struct virtqueue *vq; vq = txq->vtntx_vq; txq->vtntx_watchdog = VTNET_TX_TIMEOUT; virtqueue_notify(vq); if (vtnet_txq_enable_intr(txq) == 0) return (0); /* * Drain frames that were completed since last checked. If this * causes the queue to go above the threshold, the caller should * continue transmitting. */ if (vtnet_txq_eof(txq) != 0 && vtnet_txq_below_threshold(txq) == 0) { virtqueue_disable_intr(vq); return (1); } return (0); } static void vtnet_txq_free_mbufs(struct vtnet_txq *txq) { struct virtqueue *vq; struct vtnet_tx_header *txhdr; int last; vq = txq->vtntx_vq; last = 0; while ((txhdr = virtqueue_drain(vq, &last)) != NULL) { m_freem(txhdr->vth_mbuf); uma_zfree(vtnet_tx_header_zone, txhdr); } KASSERT(virtqueue_empty(vq), ("%s: mbufs remaining in tx queue %p", __func__, txq)); } /* * BMV: Much of this can go away once we finally have offsets in * the mbuf packet header. Bug andre@. */ static int vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m, int *etype, int *proto, int *start) { struct vtnet_softc *sc; struct ether_vlan_header *evh; int offset; sc = txq->vtntx_sc; evh = mtod(m, struct ether_vlan_header *); if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { /* BMV: We should handle nested VLAN tags too. */ *etype = ntohs(evh->evl_proto); offset = sizeof(struct ether_vlan_header); } else { *etype = ntohs(evh->evl_encap_proto); offset = sizeof(struct ether_header); } switch (*etype) { #if defined(INET) case ETHERTYPE_IP: { struct ip *ip, iphdr; if (__predict_false(m->m_len < offset + sizeof(struct ip))) { m_copydata(m, offset, sizeof(struct ip), (caddr_t) &iphdr); ip = &iphdr; } else ip = (struct ip *)(m->m_data + offset); *proto = ip->ip_p; *start = offset + (ip->ip_hl << 2); break; } #endif #if defined(INET6) case ETHERTYPE_IPV6: *proto = -1; *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); /* Assert the network stack sent us a valid packet. */ KASSERT(*start > offset, ("%s: mbuf %p start %d offset %d proto %d", __func__, m, *start, offset, *proto)); break; #endif default: sc->vtnet_stats.tx_csum_bad_ethtype++; return (EINVAL); } return (0); } static int vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type, int offset, struct virtio_net_hdr *hdr) { static struct timeval lastecn; static int curecn; struct vtnet_softc *sc; struct tcphdr *tcp, tcphdr; sc = txq->vtntx_sc; if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) { m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr); tcp = &tcphdr; } else tcp = (struct tcphdr *)(m->m_data + offset); hdr->hdr_len = offset + (tcp->th_off << 2); hdr->gso_size = m->m_pkthdr.tso_segsz; hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 : VIRTIO_NET_HDR_GSO_TCPV6; if (tcp->th_flags & TH_CWR) { /* * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD, * ECN support is not on a per-interface basis, but globally via * the net.inet.tcp.ecn.enable sysctl knob. The default is off. */ if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) { if (ppsratecheck(&lastecn, &curecn, 1)) if_printf(sc->vtnet_ifp, "TSO with ECN not negotiated with host\n"); return (ENOTSUP); } hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; } txq->vtntx_stats.vtxs_tso++; return (0); } static struct mbuf * vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m, struct virtio_net_hdr *hdr) { struct vtnet_softc *sc; int flags, etype, csum_start, proto, error; sc = txq->vtntx_sc; flags = m->m_pkthdr.csum_flags; error = vtnet_txq_offload_ctx(txq, m, &etype, &proto, &csum_start); if (error) goto drop; if ((etype == ETHERTYPE_IP && flags & VTNET_CSUM_OFFLOAD) || (etype == ETHERTYPE_IPV6 && flags & VTNET_CSUM_OFFLOAD_IPV6)) { /* * We could compare the IP protocol vs the CSUM_ flag too, * but that really should not be necessary. */ hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; hdr->csum_start = csum_start; hdr->csum_offset = m->m_pkthdr.csum_data; txq->vtntx_stats.vtxs_csum++; } if (flags & CSUM_TSO) { if (__predict_false(proto != IPPROTO_TCP)) { /* Likely failed to correctly parse the mbuf. */ sc->vtnet_stats.tx_tso_not_tcp++; goto drop; } KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM, ("%s: mbuf %p TSO without checksum offload %#x", __func__, m, flags)); error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr); if (error) goto drop; } return (m); drop: m_freem(m); return (NULL); } static int vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head, struct vtnet_tx_header *txhdr) { struct vtnet_softc *sc; struct virtqueue *vq; struct sglist *sg; struct mbuf *m; int error; sc = txq->vtntx_sc; vq = txq->vtntx_vq; sg = txq->vtntx_sg; m = *m_head; sglist_reset(sg); error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size); KASSERT(error == 0 && sg->sg_nseg == 1, ("%s: error %d adding header to sglist", __func__, error)); error = sglist_append_mbuf(sg, m); if (error) { m = m_defrag(m, M_NOWAIT); if (m == NULL) goto fail; *m_head = m; sc->vtnet_stats.tx_defragged++; error = sglist_append_mbuf(sg, m); if (error) goto fail; } txhdr->vth_mbuf = m; error = virtqueue_enqueue(vq, txhdr, sg, sg->sg_nseg, 0); return (error); fail: sc->vtnet_stats.tx_defrag_failed++; m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } static int vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head) { struct vtnet_tx_header *txhdr; struct virtio_net_hdr *hdr; struct mbuf *m; int error; m = *m_head; M_ASSERTPKTHDR(m); txhdr = uma_zalloc(vtnet_tx_header_zone, M_NOWAIT | M_ZERO); if (txhdr == NULL) { m_freem(m); *m_head = NULL; return (ENOMEM); } /* * Always use the non-mergeable header, regardless if the feature * was negotiated. For transmit, num_buffers is always zero. The * vtnet_hdr_size is used to enqueue the correct header size. */ hdr = &txhdr->vth_uhdr.hdr; if (m->m_flags & M_VLANTAG) { m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); if ((*m_head = m) == NULL) { error = ENOBUFS; goto fail; } m->m_flags &= ~M_VLANTAG; } if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) { m = vtnet_txq_offload(txq, m, hdr); if ((*m_head = m) == NULL) { error = ENOBUFS; goto fail; } } error = vtnet_txq_enqueue_buf(txq, m_head, txhdr); if (error == 0) return (0); fail: uma_zfree(vtnet_tx_header_zone, txhdr); return (error); } static int vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m) { struct vtnet_softc *sc; struct virtqueue *vq; struct buf_ring *br; if_t ifp; int enq, tries, error; sc = txq->vtntx_sc; vq = txq->vtntx_vq; br = txq->vtntx_br; ifp = sc->vtnet_ifp; tries = 0; error = 0; VTNET_TXQ_LOCK_ASSERT(txq); if ((sc->vtnet_flags & VTNET_FLAG_RUNNING) == 0 || sc->vtnet_link_active == 0) { if (m != NULL) error = buf_ring_enqueue(br, m); return (error); } if (m != NULL) { error = buf_ring_enqueue(br, m); if (error) return (error); } vtnet_txq_eof(txq); again: enq = 0; while ((m = buf_ring_peek(br)) != NULL) { if (virtqueue_full(vq)) { buf_ring_putback_sc(br, m); break; } if (vtnet_txq_encap(txq, &m) != 0) { if (m != NULL) buf_ring_putback_sc(br, m); else buf_ring_advance_sc(br); break; } buf_ring_advance_sc(br); enq++; if_mtap(ifp, m, NULL, 0); } if (enq > 0 && vtnet_txq_notify(txq) != 0) { if (tries++ < VTNET_NOTIFY_RETRIES) goto again; txq->vtntx_stats.vtxs_rescheduled++; taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask); } return (0); } static int vtnet_txq_mq_start(if_t ifp, struct mbuf *m) { struct vtnet_softc *sc; struct vtnet_txq *txq; int i, npairs, error; sc = if_getsoftc(ifp, IF_DRIVER_SOFTC); npairs = sc->vtnet_act_vq_pairs; /* check if flowid is set */ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) i = m->m_pkthdr.flowid % npairs; else i = curcpu % npairs; txq = &sc->vtnet_txqs[i]; if (VTNET_TXQ_TRYLOCK(txq) != 0) { error = vtnet_txq_mq_start_locked(txq, m); VTNET_TXQ_UNLOCK(txq); } else { error = buf_ring_enqueue(txq->vtntx_br, m); taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_defrtask); } return (error); } static void vtnet_txq_tq_deferred(void *xtxq, int pending) { struct vtnet_softc *sc; struct vtnet_txq *txq; txq = xtxq; sc = txq->vtntx_sc; VTNET_TXQ_LOCK(txq); if (!buf_ring_empty(txq->vtntx_br)) vtnet_txq_mq_start_locked(txq, NULL); VTNET_TXQ_UNLOCK(txq); } static void vtnet_txq_start(struct vtnet_txq *txq) { struct vtnet_softc *sc; sc = txq->vtntx_sc; if (!buf_ring_empty(txq->vtntx_br)) vtnet_txq_mq_start_locked(txq, NULL); } static void vtnet_txq_tq_intr(void *xtxq, int pending) { struct vtnet_softc *sc; struct vtnet_txq *txq; if_t ifp; txq = xtxq; sc = txq->vtntx_sc; ifp = sc->vtnet_ifp; VTNET_TXQ_LOCK(txq); if ((sc->vtnet_flags & VTNET_FLAG_RUNNING) == 0) { VTNET_TXQ_UNLOCK(txq); return; } vtnet_txq_eof(txq); vtnet_txq_start(txq); VTNET_TXQ_UNLOCK(txq); } static int vtnet_txq_eof(struct vtnet_txq *txq) { struct virtqueue *vq; struct vtnet_tx_header *txhdr; struct mbuf *m; int deq; vq = txq->vtntx_vq; deq = 0; VTNET_TXQ_LOCK_ASSERT(txq); #ifdef DEV_NETMAP if (netmap_tx_irq(txq->vtntx_sc->vtnet_ifp, txq->vtntx_id)) { virtqueue_disable_intr(vq); // XXX luigi return 0; // XXX or 1 ? } #endif /* DEV_NETMAP */ while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) { m = txhdr->vth_mbuf; deq++; txq->vtntx_stats.vtxs_opackets++; txq->vtntx_stats.vtxs_obytes += m->m_pkthdr.len; if (m->m_flags & M_MCAST) txq->vtntx_stats.vtxs_omcasts++; m_freem(m); uma_zfree(vtnet_tx_header_zone, txhdr); } if (virtqueue_empty(vq)) txq->vtntx_watchdog = 0; return (deq); } static void vtnet_tx_vq_intr(void *xtxq) { struct vtnet_softc *sc; struct vtnet_txq *txq; if_t ifp; txq = xtxq; sc = txq->vtntx_sc; ifp = sc->vtnet_ifp; if (__predict_false(txq->vtntx_id >= sc->vtnet_act_vq_pairs)) { /* * Ignore this interrupt. Either this is a spurious interrupt * or multiqueue without per-VQ MSIX so every queue needs to * be polled (a brain dead configuration we could try harder * to avoid). */ vtnet_txq_disable_intr(txq); return; } VTNET_TXQ_LOCK(txq); if ((sc->vtnet_flags & VTNET_FLAG_RUNNING) == 0) { VTNET_TXQ_UNLOCK(txq); return; } vtnet_txq_eof(txq); vtnet_txq_start(txq); VTNET_TXQ_UNLOCK(txq); } static void vtnet_tx_start_all(struct vtnet_softc *sc) { struct vtnet_txq *txq; int i; VTNET_CORE_LOCK_ASSERT(sc); for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { txq = &sc->vtnet_txqs[i]; VTNET_TXQ_LOCK(txq); vtnet_txq_start(txq); VTNET_TXQ_UNLOCK(txq); } } static void vtnet_qflush(if_t ifp) { struct vtnet_softc *sc; struct vtnet_txq *txq; struct mbuf *m; int i; sc = if_getsoftc(ifp, IF_DRIVER_SOFTC); for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { txq = &sc->vtnet_txqs[i]; VTNET_TXQ_LOCK(txq); while ((m = buf_ring_dequeue_sc(txq->vtntx_br)) != NULL) m_freem(m); VTNET_TXQ_UNLOCK(txq); } } static int vtnet_watchdog(struct vtnet_txq *txq) { struct ifnet *ifp; ifp = txq->vtntx_sc->vtnet_ifp; VTNET_TXQ_LOCK(txq); if (txq->vtntx_watchdog == 1) { /* * Only drain completed frames if the watchdog is about to * expire. If any frames were drained, there may be enough * free descriptors now available to transmit queued frames. * In that case, the timer will immediately be decremented * below, but the timeout is generous enough that should not * be a problem. */ if (vtnet_txq_eof(txq) != 0) vtnet_txq_start(txq); } if (txq->vtntx_watchdog == 0 || --txq->vtntx_watchdog) { VTNET_TXQ_UNLOCK(txq); return (0); } VTNET_TXQ_UNLOCK(txq); if_printf(ifp, "watchdog timeout on queue %d\n", txq->vtntx_id); return (1); } static void vtnet_accum_stats(struct vtnet_softc *sc, struct vtnet_rxq_stats *rxacc, struct vtnet_txq_stats *txacc) { bzero(rxacc, sizeof(struct vtnet_rxq_stats)); bzero(txacc, sizeof(struct vtnet_txq_stats)); for (int i = 0; i < sc->vtnet_max_vq_pairs; i++) { struct vtnet_rxq_stats *rxst; struct vtnet_txq_stats *txst; rxst = &sc->vtnet_rxqs[i].vtnrx_stats; rxacc->vrxs_ipackets += rxst->vrxs_ipackets; rxacc->vrxs_ibytes += rxst->vrxs_ibytes; rxacc->vrxs_iqdrops += rxst->vrxs_iqdrops; rxacc->vrxs_csum += rxst->vrxs_csum; rxacc->vrxs_csum_failed += rxst->vrxs_csum_failed; rxacc->vrxs_rescheduled += rxst->vrxs_rescheduled; txst = &sc->vtnet_txqs[i].vtntx_stats; txacc->vtxs_opackets += txst->vtxs_opackets; txacc->vtxs_obytes += txst->vtxs_obytes; txacc->vtxs_csum += txst->vtxs_csum; txacc->vtxs_tso += txst->vtxs_tso; txacc->vtxs_rescheduled += txst->vtxs_rescheduled; } } static uint64_t vtnet_get_counter(if_t ifp, ift_counter cnt) { struct vtnet_softc *sc; struct vtnet_rxq_stats rxaccum; struct vtnet_txq_stats txaccum; sc = if_getsoftc(ifp, IF_DRIVER_SOFTC); vtnet_accum_stats(sc, &rxaccum, &txaccum); switch (cnt) { case IFCOUNTER_IPACKETS: return (rxaccum.vrxs_ipackets); case IFCOUNTER_IQDROPS: return (rxaccum.vrxs_iqdrops); case IFCOUNTER_IERRORS: return (rxaccum.vrxs_ierrors); case IFCOUNTER_OPACKETS: return (txaccum.vtxs_opackets); case IFCOUNTER_OBYTES: return (txaccum.vtxs_obytes); case IFCOUNTER_OMCASTS: return (txaccum.vtxs_omcasts); default: return (if_get_counter_default(ifp, cnt)); } } static void vtnet_tick(void *xsc) { struct vtnet_softc *sc; struct ifnet *ifp; int i, timedout; sc = xsc; ifp = sc->vtnet_ifp; timedout = 0; VTNET_CORE_LOCK_ASSERT(sc); for (i = 0; i < sc->vtnet_act_vq_pairs; i++) timedout |= vtnet_watchdog(&sc->vtnet_txqs[i]); if (timedout != 0) { sc->vtnet_flags &= ~VTNET_FLAG_RUNNING; vtnet_init_locked(sc); } else callout_schedule(&sc->vtnet_tick_ch, hz); } static void vtnet_start_taskqueues(struct vtnet_softc *sc) { device_t dev; struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i, error; dev = sc->vtnet_dev; /* * Errors here are very difficult to recover from - we cannot * easily fail because, if this is during boot, we will hang * when freeing any successfully started taskqueues because * the scheduler isn't up yet. * * Most drivers just ignore the return value - it only fails * with ENOMEM so an error is not likely. */ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET, "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id); if (error) { device_printf(dev, "failed to start rx taskq %d\n", rxq->vtnrx_id); } txq = &sc->vtnet_txqs[i]; error = taskqueue_start_threads(&txq->vtntx_tq, 1, PI_NET, "%s txq %d", device_get_nameunit(dev), txq->vtntx_id); if (error) { device_printf(dev, "failed to start tx taskq %d\n", txq->vtntx_id); } } } static void vtnet_free_taskqueues(struct vtnet_softc *sc) { struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i; for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; if (rxq->vtnrx_tq != NULL) { taskqueue_free(rxq->vtnrx_tq); rxq->vtnrx_vq = NULL; } txq = &sc->vtnet_txqs[i]; if (txq->vtntx_tq != NULL) { taskqueue_free(txq->vtntx_tq); txq->vtntx_tq = NULL; } } } static void vtnet_drain_taskqueues(struct vtnet_softc *sc) { struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i; for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; if (rxq->vtnrx_tq != NULL) taskqueue_drain(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); txq = &sc->vtnet_txqs[i]; if (txq->vtntx_tq != NULL) { taskqueue_drain(txq->vtntx_tq, &txq->vtntx_intrtask); taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask); } } } static void vtnet_drain_rxtx_queues(struct vtnet_softc *sc) { struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; vtnet_rxq_free_mbufs(rxq); txq = &sc->vtnet_txqs[i]; vtnet_txq_free_mbufs(txq); } } static void vtnet_stop_rendezvous(struct vtnet_softc *sc) { struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i; /* * Lock and unlock the per-queue mutex so we known the stop * state is visible. Doing only the active queues should be * sufficient, but it does not cost much extra to do all the * queues. Note we hold the core mutex here too. */ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; VTNET_RXQ_LOCK(rxq); VTNET_RXQ_UNLOCK(rxq); txq = &sc->vtnet_txqs[i]; VTNET_TXQ_LOCK(txq); VTNET_TXQ_UNLOCK(txq); } } static void vtnet_stop(struct vtnet_softc *sc) { device_t dev; struct ifnet *ifp; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK_ASSERT(sc); sc->vtnet_flags &= ~VTNET_FLAG_RUNNING; sc->vtnet_link_active = 0; callout_stop(&sc->vtnet_tick_ch); /* Only advisory. */ vtnet_disable_interrupts(sc); /* * Stop the host adapter. This resets it to the pre-initialized * state. It will not generate any interrupts until after it is * reinitialized. */ virtio_stop(dev); vtnet_stop_rendezvous(sc); /* Free any mbufs left in the virtqueues. */ vtnet_drain_rxtx_queues(sc); } static int vtnet_virtio_reinit(struct vtnet_softc *sc) { device_t dev; struct ifnet *ifp; uint64_t features; - uint32_t caps, capenable, mask; + uint32_t mask; int error; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; features = sc->vtnet_features; mask = 0; #if defined(INET) mask |= IFCAP_RXCSUM; #endif #if defined (INET6) mask |= IFCAP_RXCSUM_IPV6; #endif /* * Re-negotiate with the host, removing any disabled receive * features. Transmit features are disabled only on our side - * via IF_CAPEANBLE and IF_HWASSIST. + * via if_capenable and if_hwassist. + * + * We require both IPv4 and IPv6 offloading to be enabled + * in order to negotiated it: VirtIO does not distinguish + * between the two. */ - caps = if_get(ifp, IF_CAPABILITIES); - capenable = if_get(ifp, IF_CAPENABLE); - if (caps & mask) { - /* - * We require both IPv4 and IPv6 offloading to be enabled - * in order to negotiated it: VirtIO does not distinguish - * between the two. - */ - if ((capenable & mask) != mask) - features &= ~VIRTIO_NET_F_GUEST_CSUM; - } + if ((sc->vtnet_capenable & mask) != mask) + features &= ~VIRTIO_NET_F_GUEST_CSUM; - if (caps & IFCAP_LRO) { - if ((capenable & IFCAP_LRO) == 0) - features &= ~VTNET_LRO_FEATURES; - } + if ((sc->vtnet_capenable & IFCAP_LRO) == 0) + features &= ~VTNET_LRO_FEATURES; - if (caps & IFCAP_VLAN_HWFILTER) { - if ((capenable & IFCAP_VLAN_HWFILTER) == 0) - features &= ~VIRTIO_NET_F_CTRL_VLAN; - } + if ((sc->vtnet_capenable & IFCAP_VLAN_HWFILTER) == 0) + features &= ~VIRTIO_NET_F_CTRL_VLAN; error = virtio_reinit(dev, features); if (error) device_printf(dev, "virtio reinit error %d\n", error); return (error); } static void vtnet_init_rx_filters(struct vtnet_softc *sc) { struct ifnet *ifp; ifp = sc->vtnet_ifp; if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { /* Restore promiscuous and all-multicast modes. */ vtnet_rx_filter(sc); /* Restore filtered MAC addresses. */ vtnet_rx_filter_mac(sc); } - if (if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWFILTER) + if (sc->vtnet_capenable & IFCAP_VLAN_HWFILTER) vtnet_rx_filter_vlan(sc); } static int vtnet_init_rx_queues(struct vtnet_softc *sc) { device_t dev; struct vtnet_rxq *rxq; int i, clsize, error; dev = sc->vtnet_dev; /* * Use the new cluster size if one has been set (via a MTU * change). Otherwise, use the standard 2K clusters. * * BMV: It might make sense to use page sized clusters as * the default (depending on the features negotiated). */ if (sc->vtnet_rx_new_clsize != 0) { clsize = sc->vtnet_rx_new_clsize; sc->vtnet_rx_new_clsize = 0; } else clsize = MCLBYTES; sc->vtnet_rx_clsize = clsize; sc->vtnet_rx_nmbufs = VTNET_NEEDED_RX_MBUFS(sc, clsize); KASSERT(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS || sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs, ("%s: too many rx mbufs %d for %d segments", __func__, sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs)); #ifdef DEV_NETMAP if (vtnet_netmap_init_rx_buffers(sc)) return 0; #endif /* DEV_NETMAP */ for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; /* Hold the lock to satisfy asserts. */ VTNET_RXQ_LOCK(rxq); error = vtnet_rxq_populate(rxq); VTNET_RXQ_UNLOCK(rxq); if (error) { device_printf(dev, "cannot allocate mbufs for Rx queue %d\n", i); return (error); } } return (0); } static int vtnet_init_tx_queues(struct vtnet_softc *sc) { struct vtnet_txq *txq; int i; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { txq = &sc->vtnet_txqs[i]; txq->vtntx_watchdog = 0; } return (0); } static int vtnet_init_rxtx_queues(struct vtnet_softc *sc) { int error; error = vtnet_init_rx_queues(sc); if (error) return (error); error = vtnet_init_tx_queues(sc); if (error) return (error); return (0); } static void vtnet_set_active_vq_pairs(struct vtnet_softc *sc) { device_t dev; int npairs; dev = sc->vtnet_dev; if ((sc->vtnet_flags & VTNET_FLAG_MULTIQ) == 0) { MPASS(sc->vtnet_max_vq_pairs == 1); sc->vtnet_act_vq_pairs = 1; return; } /* BMV: Just use the maximum configured for now. */ npairs = sc->vtnet_max_vq_pairs; if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) { device_printf(dev, "cannot set active queue pairs to %d\n", npairs); npairs = 1; } sc->vtnet_act_vq_pairs = npairs; } static int vtnet_reinit(struct vtnet_softc *sc) { struct ifnet *ifp; - uint64_t hwassist; int error; ifp = sc->vtnet_ifp; /* Use the current MAC address. */ bcopy(if_lladdr(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN); vtnet_set_hwaddr(sc); vtnet_set_active_vq_pairs(sc); - hwassist = 0; - if (if_get(ifp, IF_CAPENABLE) & IFCAP_TXCSUM) - hwassist |= VTNET_CSUM_OFFLOAD; - if (if_get(ifp, IF_CAPENABLE) & IFCAP_TXCSUM_IPV6) - hwassist |= VTNET_CSUM_OFFLOAD_IPV6; - if (if_get(ifp, IF_CAPENABLE) & IFCAP_TSO4) - hwassist |= CSUM_TSO; - if (if_get(ifp, IF_CAPENABLE) & IFCAP_TSO6) - hwassist |= CSUM_IP6_TSO; - if_set(ifp, IF_HWASSIST, hwassist); - if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) vtnet_init_rx_filters(sc); error = vtnet_init_rxtx_queues(sc); if (error) return (error); vtnet_enable_interrupts(sc); sc->vtnet_flags |= VTNET_FLAG_RUNNING; return (0); } static void vtnet_init_locked(struct vtnet_softc *sc) { device_t dev; struct ifnet *ifp; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK_ASSERT(sc); if (sc->vtnet_flags & VTNET_FLAG_RUNNING) return; vtnet_stop(sc); /* Reinitialize with the host. */ if (vtnet_virtio_reinit(sc) != 0) goto fail; if (vtnet_reinit(sc) != 0) goto fail; virtio_reinit_complete(dev); vtnet_update_link_status(sc); callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); return; fail: vtnet_stop(sc); } static void vtnet_init(void *xsc) { struct vtnet_softc *sc; sc = xsc; #ifdef DEV_NETMAP if (!NA(sc->vtnet_ifp)) { D("try to attach again"); vtnet_netmap_attach(sc); } #endif /* DEV_NETMAP */ VTNET_CORE_LOCK(sc); vtnet_init_locked(sc); VTNET_CORE_UNLOCK(sc); } static void vtnet_free_ctrl_vq(struct vtnet_softc *sc) { struct virtqueue *vq; vq = sc->vtnet_ctrl_vq; /* * The control virtqueue is only polled and therefore it should * already be empty. */ KASSERT(virtqueue_empty(vq), ("%s: ctrl vq %p not empty", __func__, vq)); } static void vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie, struct sglist *sg, int readable, int writable) { struct virtqueue *vq; vq = sc->vtnet_ctrl_vq; VTNET_CORE_LOCK_ASSERT(sc); KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ, ("%s: CTRL_VQ feature not negotiated", __func__)); if (!virtqueue_empty(vq)) return; if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0) return; /* * Poll for the response, but the command is likely already * done when we return from the notify. */ virtqueue_notify(vq); virtqueue_poll(vq, NULL); } static int vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr) { struct virtio_net_ctrl_hdr hdr __aligned(2); struct sglist_seg segs[3]; struct sglist sg; uint8_t ack; int error; hdr.class = VIRTIO_NET_CTRL_MAC; hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET; ack = VIRTIO_NET_ERR; sglist_init(&sg, 3, segs); error = 0; error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, hwaddr, ETHER_ADDR_LEN); error |= sglist_append(&sg, &ack, sizeof(uint8_t)); KASSERT(error == 0 && sg.sg_nseg == 3, ("%s: error %d adding set MAC msg to sglist", __func__, error)); vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); return (ack == VIRTIO_NET_OK ? 0 : EIO); } static int vtnet_ctrl_mq_cmd(struct vtnet_softc *sc, uint16_t npairs) { struct sglist_seg segs[3]; struct sglist sg; struct { struct virtio_net_ctrl_hdr hdr; uint8_t pad1; struct virtio_net_ctrl_mq mq; uint8_t pad2; uint8_t ack; } s __aligned(2); int error; s.hdr.class = VIRTIO_NET_CTRL_MQ; s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET; s.mq.virtqueue_pairs = npairs; s.ack = VIRTIO_NET_ERR; sglist_init(&sg, 3, segs); error = 0; error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq)); error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); KASSERT(error == 0 && sg.sg_nseg == 3, ("%s: error %d adding MQ message to sglist", __func__, error)); vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); return (s.ack == VIRTIO_NET_OK ? 0 : EIO); } static int vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on) { struct sglist_seg segs[3]; struct sglist sg; struct { struct virtio_net_ctrl_hdr hdr; uint8_t pad1; uint8_t onoff; uint8_t pad2; uint8_t ack; } s __aligned(2); int error; KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, ("%s: CTRL_RX feature not negotiated", __func__)); s.hdr.class = VIRTIO_NET_CTRL_RX; s.hdr.cmd = cmd; s.onoff = !!on; s.ack = VIRTIO_NET_ERR; sglist_init(&sg, 3, segs); error = 0; error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t)); error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); KASSERT(error == 0 && sg.sg_nseg == 3, ("%s: error %d adding Rx message to sglist", __func__, error)); vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); return (s.ack == VIRTIO_NET_OK ? 0 : EIO); } static int vtnet_set_promisc(struct vtnet_softc *sc, int on) { return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on)); } static int vtnet_set_allmulti(struct vtnet_softc *sc, int on) { return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on)); } /* * The device defaults to promiscuous mode for backwards compatibility. * Turn it off at attach time if possible. */ static void vtnet_attach_disable_promisc(struct vtnet_softc *sc) { struct ifnet *ifp; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK(sc); if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) { if_addflags(ifp, IF_FLAGS, IFF_PROMISC); } else if (vtnet_set_promisc(sc, 0) != 0) { if_addflags(ifp, IF_FLAGS, IFF_PROMISC); device_printf(sc->vtnet_dev, "cannot disable default promiscuous mode\n"); } VTNET_CORE_UNLOCK(sc); } static void vtnet_rx_filter(struct vtnet_softc *sc) { device_t dev; struct ifnet *ifp; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK_ASSERT(sc); if (vtnet_set_promisc(sc, if_get(ifp, IF_FLAGS) & IFF_PROMISC) != 0) device_printf(dev, "cannot %s promiscuous mode\n", if_get(ifp, IF_FLAGS) & IFF_PROMISC ? "enable" : "disable"); if (vtnet_set_allmulti(sc, if_get(ifp, IF_FLAGS) & IFF_ALLMULTI) != 0) device_printf(dev, "cannot %s all-multicast mode\n", if_get(ifp, IF_FLAGS) & IFF_ALLMULTI ? "enable" : "disable"); } static void vtnet_copy_unicast_mac(void *arg, struct sockaddr *addr, struct sockaddr *dstaddr, struct sockaddr *mask) { struct vtnet_softc *sc = arg; struct vtnet_mac_filter *filter = sc->vtnet_mac_filter; struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr; if (addr->sa_family != AF_LINK) return; if (memcmp(LLADDR(sdl), sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0) return; if (filter->vmf_unicast.nentries == VTNET_MAX_MAC_ENTRIES) { filter->vmf_unicast.nentries++; return; } bcopy(LLADDR(sdl), &filter->vmf_unicast.macs[filter->vmf_unicast.nentries++], ETHER_ADDR_LEN); } static void vtnet_copy_multicast_mac(void *arg, struct sockaddr *maddr) { struct vtnet_softc *sc = arg; struct vtnet_mac_filter *filter = sc->vtnet_mac_filter; struct sockaddr_dl *sdl = (struct sockaddr_dl *)maddr; if (maddr->sa_family != AF_LINK) return; if (filter->vmf_multicast.nentries == VTNET_MAX_MAC_ENTRIES) { filter->vmf_multicast.nentries++; return; } bcopy(LLADDR(sdl), &filter->vmf_multicast.macs[filter->vmf_multicast.nentries++], ETHER_ADDR_LEN); } static void vtnet_rx_filter_mac(struct vtnet_softc *sc) { struct virtio_net_ctrl_hdr hdr __aligned(2); struct vtnet_mac_filter *filter; struct sglist_seg segs[4]; struct sglist sg; struct ifnet *ifp; int promisc, allmulti, error; uint8_t ack; ifp = sc->vtnet_ifp; filter = sc->vtnet_mac_filter; promisc = 0; allmulti = 0; VTNET_CORE_LOCK_ASSERT(sc); KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, ("%s: CTRL_RX feature not negotiated", __func__)); /* Unicast MAC addresses: */ if_foreach_addr(ifp, vtnet_copy_unicast_mac, sc); if (filter->vmf_unicast.nentries > VTNET_MAX_MAC_ENTRIES) { promisc = 1; filter->vmf_unicast.nentries = 0; if_printf(ifp, "more than %d MAC addresses assigned, " "falling back to promiscuous mode\n", VTNET_MAX_MAC_ENTRIES); } /* Multicast MAC addresses: */ if_foreach_maddr(ifp, vtnet_copy_multicast_mac, sc); if (filter->vmf_multicast.nentries > VTNET_MAX_MAC_ENTRIES) { allmulti = 1; filter->vmf_multicast.nentries = 0; if_printf(ifp, "more than %d multicast MAC addresses " "assigned, falling back to all-multicast mode\n", VTNET_MAX_MAC_ENTRIES); } if (promisc != 0 && allmulti != 0) goto out; hdr.class = VIRTIO_NET_CTRL_MAC; hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; ack = VIRTIO_NET_ERR; sglist_init(&sg, 4, segs); error = 0; error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &filter->vmf_unicast, sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN); error |= sglist_append(&sg, &filter->vmf_multicast, sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN); error |= sglist_append(&sg, &ack, sizeof(uint8_t)); KASSERT(error == 0 && sg.sg_nseg == 4, ("%s: error %d adding MAC filter msg to sglist", __func__, error)); vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); if (ack != VIRTIO_NET_OK) if_printf(ifp, "error setting host MAC filter table\n"); out: if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0) if_printf(ifp, "cannot enable promiscuous mode\n"); if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0) if_printf(ifp, "cannot enable all-multicast mode\n"); } static int vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) { struct sglist_seg segs[3]; struct sglist sg; struct { struct virtio_net_ctrl_hdr hdr; uint8_t pad1; uint16_t tag; uint8_t pad2; uint8_t ack; } s __aligned(2); int error; s.hdr.class = VIRTIO_NET_CTRL_VLAN; s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; s.tag = tag; s.ack = VIRTIO_NET_ERR; sglist_init(&sg, 3, segs); error = 0; error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &s.tag, sizeof(uint16_t)); error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); KASSERT(error == 0 && sg.sg_nseg == 3, ("%s: error %d adding VLAN message to sglist", __func__, error)); vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); return (s.ack == VIRTIO_NET_OK ? 0 : EIO); } static void vtnet_rx_filter_vlan(struct vtnet_softc *sc) { uint32_t w; uint16_t tag; int i, bit; VTNET_CORE_LOCK_ASSERT(sc); KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER, ("%s: VLAN_FILTER feature not negotiated", __func__)); /* Enable the filter for each configured VLAN. */ for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) { w = sc->vtnet_vlan_filter[i]; while ((bit = ffs(w) - 1) != -1) { w &= ~(1 << bit); tag = sizeof(w) * CHAR_BIT * i + bit; if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) { device_printf(sc->vtnet_dev, "cannot enable VLAN %d filter\n", tag); } } } } static void vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) { struct ifnet *ifp; int idx, bit; ifp = sc->vtnet_ifp; idx = (tag >> 5) & 0x7F; bit = tag & 0x1F; if (tag == 0 || tag > 4095) return; VTNET_CORE_LOCK(sc); if (add) sc->vtnet_vlan_filter[idx] |= (1 << bit); else sc->vtnet_vlan_filter[idx] &= ~(1 << bit); - if ((if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWFILTER) && + if ((sc->vtnet_capenable & IFCAP_VLAN_HWFILTER) && vtnet_exec_vlan_filter(sc, add, tag) != 0) { device_printf(sc->vtnet_dev, "cannot %s VLAN %d %s the host filter table\n", add ? "add" : "remove", tag, add ? "to" : "from"); } VTNET_CORE_UNLOCK(sc); } static void vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag) { if (if_getsoftc(ifp, IF_DRIVER_SOFTC) != arg) return; vtnet_update_vlan_filter(arg, 1, tag); } static void vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag) { if (if_getsoftc(ifp, IF_DRIVER_SOFTC) != arg) return; vtnet_update_vlan_filter(arg, 0, tag); } static int vtnet_is_link_up(struct vtnet_softc *sc) { device_t dev; struct ifnet *ifp; uint16_t status; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; - if ((if_get(ifp, IF_CAPABILITIES) & IFCAP_LINKSTATE) == 0) + if (!virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) status = VIRTIO_NET_S_LINK_UP; else status = virtio_read_dev_config_2(dev, offsetof(struct virtio_net_config, status)); return ((status & VIRTIO_NET_S_LINK_UP) != 0); } static void vtnet_update_link_status(struct vtnet_softc *sc) { struct ifnet *ifp; int link; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK_ASSERT(sc); link = vtnet_is_link_up(sc); /* Notify if the link status has changed. */ if (link != 0 && sc->vtnet_link_active == 0) { sc->vtnet_link_active = 1; if_link_state_change(ifp, LINK_STATE_UP); } else if (link == 0 && sc->vtnet_link_active != 0) { sc->vtnet_link_active = 0; if_link_state_change(ifp, LINK_STATE_DOWN); } } static int vtnet_ifmedia_upd(struct ifnet *ifp) { struct vtnet_softc *sc; struct ifmedia *ifm; sc = if_getsoftc(ifp, IF_DRIVER_SOFTC); ifm = &sc->vtnet_media; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); return (0); } static void vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct vtnet_softc *sc; sc = if_getsoftc(ifp, IF_DRIVER_SOFTC); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; VTNET_CORE_LOCK(sc); if (vtnet_is_link_up(sc) != 0) { ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= VTNET_MEDIATYPE; } else ifmr->ifm_active |= IFM_NONE; VTNET_CORE_UNLOCK(sc); } static void vtnet_set_hwaddr(struct vtnet_softc *sc) { device_t dev; int i; dev = sc->vtnet_dev; if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) { if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0) device_printf(dev, "unable to set MAC address\n"); } else if (sc->vtnet_flags & VTNET_FLAG_MAC) { for (i = 0; i < ETHER_ADDR_LEN; i++) { virtio_write_dev_config_1(dev, offsetof(struct virtio_net_config, mac) + i, sc->vtnet_hwaddr[i]); } } } static void vtnet_get_hwaddr(struct vtnet_softc *sc) { device_t dev; int i; dev = sc->vtnet_dev; if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) { /* * Generate a random locally administered unicast address. * * It would be nice to generate the same MAC address across * reboots, but it seems all the hosts currently available * support the MAC feature, so this isn't too important. */ sc->vtnet_hwaddr[0] = 0xB2; arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0); vtnet_set_hwaddr(sc); return; } for (i = 0; i < ETHER_ADDR_LEN; i++) { sc->vtnet_hwaddr[i] = virtio_read_dev_config_1(dev, offsetof(struct virtio_net_config, mac) + i); } } static void vtnet_vlan_tag_remove(struct mbuf *m) { struct ether_vlan_header *evh; evh = mtod(m, struct ether_vlan_header *); m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); m->m_flags |= M_VLANTAG; /* Strip the 802.1Q header. */ bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, ETHER_HDR_LEN - ETHER_TYPE_LEN); m_adj(m, ETHER_VLAN_ENCAP_LEN); } static void vtnet_set_rx_process_limit(struct vtnet_softc *sc) { int limit; limit = vtnet_tunable_int(sc, "rx_process_limit", vtnet_rx_process_limit); if (limit < 0) limit = INT_MAX; sc->vtnet_rx_process_limit = limit; } static void vtnet_set_tx_intr_threshold(struct vtnet_softc *sc) { device_t dev; int size, thresh; dev = sc->vtnet_dev; size = virtqueue_size(sc->vtnet_txqs[0].vtntx_vq); /* * The Tx interrupt is disabled until the queue free count falls * below our threshold. Completed frames are drained from the Tx * virtqueue before transmitting new frames and in the watchdog * callout, so the frequency of Tx interrupts is greatly reduced, * at the cost of not freeing mbufs as quickly as they otherwise * would be. * * N.B. We assume all the Tx queues are the same size. */ thresh = size / 4; /* * Without indirect descriptors, leave enough room for the most * segments we handle. */ if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 && thresh < sc->vtnet_tx_nsegs) thresh = sc->vtnet_tx_nsegs; sc->vtnet_tx_intr_thresh = thresh; } static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct vtnet_rxq *rxq) { struct sysctl_oid *node; struct sysctl_oid_list *list; struct vtnet_rxq_stats *stats; char namebuf[16]; snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vtnrx_id); node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Receive Queue"); list = SYSCTL_CHILDREN(node); stats = &rxq->vtnrx_stats; SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD, &stats->vrxs_ipackets, "Receive packets"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD, &stats->vrxs_ibytes, "Receive bytes"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD, &stats->vrxs_iqdrops, "Receive drops"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD, &stats->vrxs_ierrors, "Receive errors"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD, &stats->vrxs_csum, "Receive checksum offloaded"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD, &stats->vrxs_csum_failed, "Receive checksum offload failed"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, &stats->vrxs_rescheduled, "Receive interrupt handler rescheduled"); } static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct vtnet_txq *txq) { struct sysctl_oid *node; struct sysctl_oid_list *list; struct vtnet_txq_stats *stats; char namebuf[16]; snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vtntx_id); node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Transmit Queue"); list = SYSCTL_CHILDREN(node); stats = &txq->vtntx_stats; SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD, &stats->vtxs_opackets, "Transmit packets"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD, &stats->vtxs_obytes, "Transmit bytes"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD, &stats->vtxs_omcasts, "Transmit multicasts"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD, &stats->vtxs_csum, "Transmit checksum offloaded"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD, &stats->vtxs_tso, "Transmit segmentation offloaded"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, &stats->vtxs_rescheduled, "Transmit interrupt handler rescheduled"); } static void vtnet_setup_queue_sysctl(struct vtnet_softc *sc) { device_t dev; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; int i; dev = sc->vtnet_dev; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]); vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]); } } static void vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct vtnet_softc *sc) { struct vtnet_statistics *stats; struct vtnet_rxq_stats rxaccum; struct vtnet_txq_stats txaccum; vtnet_accum_stats(sc, &rxaccum, &txaccum); stats = &sc->vtnet_stats; stats->rx_csum_offloaded = rxaccum.vrxs_csum; stats->rx_csum_failed = rxaccum.vrxs_csum_failed; stats->rx_task_rescheduled = rxaccum.vrxs_rescheduled; stats->tx_csum_offloaded = txaccum.vtxs_csum; stats->tx_tso_offloaded = txaccum.vtxs_tso; stats->tx_task_rescheduled = txaccum.vtxs_rescheduled; SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed", CTLFLAG_RD, &stats->mbuf_alloc_failed, "Mbuf cluster allocation failures"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large", CTLFLAG_RD, &stats->rx_frame_too_large, "Received frame larger than the mbuf chain"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed", CTLFLAG_RD, &stats->rx_enq_replacement_failed, "Enqueuing the replacement receive mbuf failed"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed", CTLFLAG_RD, &stats->rx_mergeable_failed, "Mergeable buffers receive failures"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype", CTLFLAG_RD, &stats->rx_csum_bad_ethtype, "Received checksum offloaded buffer with unsupported " "Ethernet type"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto", CTLFLAG_RD, &stats->rx_csum_bad_ipproto, "Received checksum offloaded buffer with incorrect IP protocol"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset", CTLFLAG_RD, &stats->rx_csum_bad_offset, "Received checksum offloaded buffer with incorrect offset"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto", CTLFLAG_RD, &stats->rx_csum_bad_proto, "Received checksum offloaded buffer with incorrect protocol"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed", CTLFLAG_RD, &stats->rx_csum_failed, "Received buffer checksum offload failed"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded", CTLFLAG_RD, &stats->rx_csum_offloaded, "Received buffer checksum offload succeeded"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled", CTLFLAG_RD, &stats->rx_task_rescheduled, "Times the receive interrupt task rescheduled itself"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype", CTLFLAG_RD, &stats->tx_csum_bad_ethtype, "Aborted transmit of checksum offloaded buffer with unknown " "Ethernet type"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype", CTLFLAG_RD, &stats->tx_tso_bad_ethtype, "Aborted transmit of TSO buffer with unknown Ethernet type"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp", CTLFLAG_RD, &stats->tx_tso_not_tcp, "Aborted transmit of TSO buffer with non TCP protocol"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged", CTLFLAG_RD, &stats->tx_defragged, "Transmit mbufs defragged"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed", CTLFLAG_RD, &stats->tx_defrag_failed, "Aborted transmit of buffer because defrag failed"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded", CTLFLAG_RD, &stats->tx_csum_offloaded, "Offloaded checksum of transmitted buffer"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded", CTLFLAG_RD, &stats->tx_tso_offloaded, "Segmentation offload of transmitted buffer"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled", CTLFLAG_RD, &stats->tx_task_rescheduled, "Times the transmit interrupt task rescheduled itself"); } static void vtnet_setup_sysctl(struct vtnet_softc *sc) { device_t dev; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; dev = sc->vtnet_dev; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs", CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0, "Maximum number of supported virtqueue pairs"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs", CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0, "Number of active virtqueue pairs"); vtnet_setup_stat_sysctl(ctx, child, sc); } static int vtnet_rxq_enable_intr(struct vtnet_rxq *rxq) { return (virtqueue_enable_intr(rxq->vtnrx_vq)); } static void vtnet_rxq_disable_intr(struct vtnet_rxq *rxq) { virtqueue_disable_intr(rxq->vtnrx_vq); } static int vtnet_txq_enable_intr(struct vtnet_txq *txq) { struct virtqueue *vq; vq = txq->vtntx_vq; if (vtnet_txq_below_threshold(txq) != 0) return (virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG)); /* * The free count is above our threshold. Keep the Tx interrupt * disabled until the queue is fuller. */ return (0); } static void vtnet_txq_disable_intr(struct vtnet_txq *txq) { virtqueue_disable_intr(txq->vtntx_vq); } static void vtnet_enable_rx_interrupts(struct vtnet_softc *sc) { int i; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) vtnet_rxq_enable_intr(&sc->vtnet_rxqs[i]); } static void vtnet_enable_tx_interrupts(struct vtnet_softc *sc) { int i; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) vtnet_txq_enable_intr(&sc->vtnet_txqs[i]); } static void vtnet_enable_interrupts(struct vtnet_softc *sc) { vtnet_enable_rx_interrupts(sc); vtnet_enable_tx_interrupts(sc); } static void vtnet_disable_rx_interrupts(struct vtnet_softc *sc) { int i; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]); } static void vtnet_disable_tx_interrupts(struct vtnet_softc *sc) { int i; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) vtnet_txq_disable_intr(&sc->vtnet_txqs[i]); } static void vtnet_disable_interrupts(struct vtnet_softc *sc) { vtnet_disable_rx_interrupts(sc); vtnet_disable_tx_interrupts(sc); } static int vtnet_tunable_int(struct vtnet_softc *sc, const char *knob, int def) { char path[64]; snprintf(path, sizeof(path), "hw.vtnet.%d.%s", device_get_unit(sc->vtnet_dev), knob); TUNABLE_INT_FETCH(path, &def); return (def); } Index: projects/ifnet/sys/dev/virtio/network/if_vtnetvar.h =================================================================== --- projects/ifnet/sys/dev/virtio/network/if_vtnetvar.h (revision 277242) +++ projects/ifnet/sys/dev/virtio/network/if_vtnetvar.h (revision 277243) @@ -1,359 +1,361 @@ /*- * Copyright (c) 2011, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _IF_VTNETVAR_H #define _IF_VTNETVAR_H struct vtnet_softc; struct vtnet_statistics { uint64_t mbuf_alloc_failed; uint64_t rx_frame_too_large; uint64_t rx_enq_replacement_failed; uint64_t rx_mergeable_failed; uint64_t rx_csum_bad_ethtype; uint64_t rx_csum_bad_ipproto; uint64_t rx_csum_bad_offset; uint64_t rx_csum_bad_proto; uint64_t tx_csum_bad_ethtype; uint64_t tx_tso_bad_ethtype; uint64_t tx_tso_not_tcp; uint64_t tx_defragged; uint64_t tx_defrag_failed; /* * These are accumulated from each Rx/Tx queue. */ uint64_t rx_csum_failed; uint64_t rx_csum_offloaded; uint64_t rx_task_rescheduled; uint64_t tx_csum_offloaded; uint64_t tx_tso_offloaded; uint64_t tx_task_rescheduled; }; struct vtnet_rxq_stats { uint64_t vrxs_ipackets; /* if_ipackets */ uint64_t vrxs_ibytes; /* if_ibytes */ uint64_t vrxs_iqdrops; /* if_iqdrops */ uint64_t vrxs_ierrors; /* if_ierrors */ uint64_t vrxs_csum; uint64_t vrxs_csum_failed; uint64_t vrxs_rescheduled; }; struct vtnet_rxq { struct mtx vtnrx_mtx; struct vtnet_softc *vtnrx_sc; struct virtqueue *vtnrx_vq; struct sglist *vtnrx_sg; int vtnrx_id; struct vtnet_rxq_stats vtnrx_stats; struct taskqueue *vtnrx_tq; struct task vtnrx_intrtask; char vtnrx_name[16]; } __aligned(CACHE_LINE_SIZE); #define VTNET_RXQ_LOCK(_rxq) mtx_lock(&(_rxq)->vtnrx_mtx) #define VTNET_RXQ_UNLOCK(_rxq) mtx_unlock(&(_rxq)->vtnrx_mtx) #define VTNET_RXQ_LOCK_ASSERT(_rxq) \ mtx_assert(&(_rxq)->vtnrx_mtx, MA_OWNED) #define VTNET_RXQ_LOCK_ASSERT_NOTOWNED(_rxq) \ mtx_assert(&(_rxq)->vtnrx_mtx, MA_NOTOWNED) struct vtnet_txq_stats { uint64_t vtxs_opackets; /* if_opackets */ uint64_t vtxs_obytes; /* if_obytes */ uint64_t vtxs_omcasts; /* if_omcasts */ uint64_t vtxs_csum; uint64_t vtxs_tso; uint64_t vtxs_rescheduled; }; struct vtnet_txq { struct mtx vtntx_mtx; struct vtnet_softc *vtntx_sc; struct virtqueue *vtntx_vq; struct sglist *vtntx_sg; #ifndef VTNET_LEGACY_TX struct buf_ring *vtntx_br; #endif int vtntx_id; int vtntx_watchdog; struct vtnet_txq_stats vtntx_stats; struct taskqueue *vtntx_tq; struct task vtntx_intrtask; #ifndef VTNET_LEGACY_TX struct task vtntx_defrtask; #endif char vtntx_name[16]; } __aligned(CACHE_LINE_SIZE); #define VTNET_TXQ_LOCK(_txq) mtx_lock(&(_txq)->vtntx_mtx) #define VTNET_TXQ_TRYLOCK(_txq) mtx_trylock(&(_txq)->vtntx_mtx) #define VTNET_TXQ_UNLOCK(_txq) mtx_unlock(&(_txq)->vtntx_mtx) #define VTNET_TXQ_LOCK_ASSERT(_txq) \ mtx_assert(&(_txq)->vtntx_mtx, MA_OWNED) #define VTNET_TXQ_LOCK_ASSERT_NOTOWNED(_txq) \ mtx_assert(&(_txq)->vtntx_mtx, MA_NOTOWNED) struct vtnet_softc { device_t vtnet_dev; if_t vtnet_ifp; struct vtnet_rxq *vtnet_rxqs; struct vtnet_txq *vtnet_txqs; + uint64_t vtnet_hwassist; + uint32_t vtnet_capenable; uint32_t vtnet_flags; #define VTNET_FLAG_SUSPENDED 0x0001 #define VTNET_FLAG_MAC 0x0002 #define VTNET_FLAG_CTRL_VQ 0x0004 #define VTNET_FLAG_CTRL_RX 0x0008 #define VTNET_FLAG_CTRL_MAC 0x0010 #define VTNET_FLAG_VLAN_FILTER 0x0020 #define VTNET_FLAG_TSO_ECN 0x0040 #define VTNET_FLAG_MRG_RXBUFS 0x0080 #define VTNET_FLAG_LRO_NOMRG 0x0100 #define VTNET_FLAG_MULTIQ 0x0200 #define VTNET_FLAG_INDIRECT 0x0400 #define VTNET_FLAG_EVENT_IDX 0x0800 #define VTNET_FLAG_RUNNING 0x1000 int vtnet_link_active; int vtnet_hdr_size; int vtnet_rx_process_limit; int vtnet_rx_nsegs; int vtnet_rx_nmbufs; int vtnet_rx_clsize; int vtnet_rx_new_clsize; int vtnet_tx_intr_thresh; int vtnet_tx_nsegs; int vtnet_if_flags; int vtnet_act_vq_pairs; int vtnet_max_vq_pairs; struct virtqueue *vtnet_ctrl_vq; struct vtnet_mac_filter *vtnet_mac_filter; uint32_t *vtnet_vlan_filter; uint64_t vtnet_features; struct vtnet_statistics vtnet_stats; struct callout vtnet_tick_ch; struct ifmedia vtnet_media; eventhandler_tag vtnet_vlan_attach; eventhandler_tag vtnet_vlan_detach; struct mtx vtnet_mtx; char vtnet_mtx_name[16]; char vtnet_hwaddr[ETHER_ADDR_LEN]; }; /* * Maximum number of queue pairs we will autoconfigure to. */ #define VTNET_MAX_QUEUE_PAIRS 8 /* * Additional completed entries can appear in a virtqueue before we can * reenable interrupts. Number of times to retry before scheduling the * taskqueue to process the completed entries. */ #define VTNET_INTR_DISABLE_RETRIES 4 /* * Similarly, additional completed entries can appear in a virtqueue * between when lasted checked and before notifying the host. Number * of times to retry before scheduling the taskqueue to process the * queue. */ #define VTNET_NOTIFY_RETRIES 4 /* * Fake the media type. The host does not provide us with any real media * information. */ #define VTNET_MEDIATYPE (IFM_ETHER | IFM_10G_T | IFM_FDX) /* * Number of words to allocate for the VLAN shadow table. There is one * bit for each VLAN. */ #define VTNET_VLAN_FILTER_NWORDS (4096 / 32) /* * When mergeable buffers are not negotiated, the vtnet_rx_header structure * below is placed at the beginning of the mbuf data. Use 4 bytes of pad to * both keep the VirtIO header and the data non-contiguous and to keep the * frame's payload 4 byte aligned. * * When mergeable buffers are negotiated, the host puts the VirtIO header in * the beginning of the first mbuf's data. */ #define VTNET_RX_HEADER_PAD 4 struct vtnet_rx_header { struct virtio_net_hdr vrh_hdr; char vrh_pad[VTNET_RX_HEADER_PAD]; } __packed; /* * For each outgoing frame, the vtnet_tx_header below is allocated from * the vtnet_tx_header_zone. */ struct vtnet_tx_header { union { struct virtio_net_hdr hdr; struct virtio_net_hdr_mrg_rxbuf mhdr; } vth_uhdr; struct mbuf *vth_mbuf; }; /* * The VirtIO specification does not place a limit on the number of MAC * addresses the guest driver may request to be filtered. In practice, * the host is constrained by available resources. To simplify this driver, * impose a reasonably high limit of MAC addresses we will filter before * falling back to promiscuous or all-multicast modes. */ #define VTNET_MAX_MAC_ENTRIES 128 struct vtnet_mac_table { uint32_t nentries; uint8_t macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN]; } __packed; struct vtnet_mac_filter { struct vtnet_mac_table vmf_unicast; uint32_t vmf_pad; /* Make tables non-contiguous. */ struct vtnet_mac_table vmf_multicast; }; /* * The MAC filter table is malloc(9)'d when needed. Ensure it will * always fit in one segment. */ CTASSERT(sizeof(struct vtnet_mac_filter) <= PAGE_SIZE); #define VTNET_TX_TIMEOUT 5 #define VTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP | CSUM_SCTP) #define VTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_SCTP_IPV6) #define VTNET_CSUM_ALL_OFFLOAD \ (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6 | CSUM_TSO) /* Features desired/implemented by this driver. */ #define VTNET_FEATURES \ (VIRTIO_NET_F_MAC | \ VIRTIO_NET_F_STATUS | \ VIRTIO_NET_F_CTRL_VQ | \ VIRTIO_NET_F_CTRL_RX | \ VIRTIO_NET_F_CTRL_MAC_ADDR | \ VIRTIO_NET_F_CTRL_VLAN | \ VIRTIO_NET_F_CSUM | \ VIRTIO_NET_F_GSO | \ VIRTIO_NET_F_HOST_TSO4 | \ VIRTIO_NET_F_HOST_TSO6 | \ VIRTIO_NET_F_HOST_ECN | \ VIRTIO_NET_F_GUEST_CSUM | \ VIRTIO_NET_F_GUEST_TSO4 | \ VIRTIO_NET_F_GUEST_TSO6 | \ VIRTIO_NET_F_GUEST_ECN | \ VIRTIO_NET_F_MRG_RXBUF | \ VIRTIO_NET_F_MQ | \ VIRTIO_RING_F_EVENT_IDX | \ VIRTIO_RING_F_INDIRECT_DESC) /* * The VIRTIO_NET_F_HOST_TSO[46] features permit us to send the host * frames larger than 1514 bytes. */ #define VTNET_TSO_FEATURES (VIRTIO_NET_F_GSO | VIRTIO_NET_F_HOST_TSO4 | \ VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_ECN) /* * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us * frames larger than 1514 bytes. We do not yet support software LRO * via tcp_lro_rx(). */ #define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \ VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN) #define VTNET_MAX_MTU 65536 #define VTNET_MAX_RX_SIZE 65550 /* * Used to preallocate the Vq indirect descriptors. The first segment * is reserved for the header, except for mergeable buffers since the * header is placed inline with the data. */ #define VTNET_MRG_RX_SEGS 1 #define VTNET_MIN_RX_SEGS 2 #define VTNET_MAX_RX_SEGS 34 #define VTNET_MIN_TX_SEGS 4 #define VTNET_MAX_TX_SEGS 64 /* * Assert we can receive and transmit the maximum with regular * size clusters. */ CTASSERT(((VTNET_MAX_RX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE); CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_MTU); /* * Number of slots in the Tx bufrings. This value matches most other * multiqueue drivers. */ #define VTNET_DEFAULT_BUFRING_SIZE 4096 /* * Determine how many mbufs are in each receive buffer. For LRO without * mergeable buffers, we must allocate an mbuf chain large enough to * hold both the vtnet_rx_header and the maximum receivable data. */ #define VTNET_NEEDED_RX_MBUFS(_sc, _clsize) \ ((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0 ? 1 : \ howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE, \ (_clsize)) #define VTNET_CORE_MTX(_sc) &(_sc)->vtnet_mtx #define VTNET_CORE_LOCK(_sc) mtx_lock(VTNET_CORE_MTX((_sc))) #define VTNET_CORE_UNLOCK(_sc) mtx_unlock(VTNET_CORE_MTX((_sc))) #define VTNET_CORE_LOCK_DESTROY(_sc) mtx_destroy(VTNET_CORE_MTX((_sc))) #define VTNET_CORE_LOCK_ASSERT(_sc) \ mtx_assert(VTNET_CORE_MTX((_sc)), MA_OWNED) #define VTNET_CORE_LOCK_ASSERT_NOTOWNED(_sc) \ mtx_assert(VTNET_CORE_MTX((_sc)), MA_NOTOWNED) #define VTNET_CORE_LOCK_INIT(_sc) do { \ snprintf((_sc)->vtnet_mtx_name, sizeof((_sc)->vtnet_mtx_name), \ "%s", device_get_nameunit((_sc)->vtnet_dev)); \ mtx_init(VTNET_CORE_MTX((_sc)), (_sc)->vtnet_mtx_name, \ "VTNET Core Lock", MTX_DEF); \ } while (0) #endif /* _IF_VTNETVAR_H */ Index: projects/ifnet/sys/dev/xl/if_xl.c =================================================================== --- projects/ifnet/sys/dev/xl/if_xl.c (revision 277242) +++ projects/ifnet/sys/dev/xl/if_xl.c (revision 277243) @@ -1,3285 +1,3270 @@ /*- * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * 3Com 3c90x Etherlink XL PCI NIC driver * * Supports the 3Com "boomerang", "cyclone" and "hurricane" PCI * bus-master chips (3c90x cards and embedded controllers) including * the following: * * 3Com 3c900-TPO 10Mbps/RJ-45 * 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC * 3Com 3c905-TX 10/100Mbps/RJ-45 * 3Com 3c905-T4 10/100Mbps/RJ-45 * 3Com 3c900B-TPO 10Mbps/RJ-45 * 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC * 3Com 3c900B-TPC 10Mbps/RJ-45,BNC * 3Com 3c900B-FL 10Mbps/Fiber-optic * 3Com 3c905B-COMBO 10/100Mbps/RJ-45,AUI,BNC * 3Com 3c905B-TX 10/100Mbps/RJ-45 * 3Com 3c905B-FL/FX 10/100Mbps/Fiber-optic * 3Com 3c905C-TX 10/100Mbps/RJ-45 (Tornado ASIC) * 3Com 3c980-TX 10/100Mbps server adapter (Hurricane ASIC) * 3Com 3c980C-TX 10/100Mbps server adapter (Tornado ASIC) * 3Com 3cSOHO100-TX 10/100Mbps/RJ-45 (Hurricane ASIC) * 3Com 3c450-TX 10/100Mbps/RJ-45 (Tornado ASIC) * 3Com 3c555 10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane) * 3Com 3c556 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) * 3Com 3c556B 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) * 3Com 3c575TX 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3c575B 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3c575C 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3cxfem656 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3cxfem656b 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3cxfem656c 10/100Mbps/RJ-45 (Cardbus, Tornado ASIC) * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45 * Dell on-board 3c920 10/100Mbps/RJ-45 * Dell Precision on-board 3c905B 10/100Mbps/RJ-45 * Dell Latitude laptop docking station embedded 3c905-TX * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The 3c90x series chips use a bus-master DMA interface for transfering * packets to and from the controller chip. Some of the "vortex" cards * (3c59x) also supported a bus master mode, however for those chips * you could only DMA packets to/from a contiguous memory buffer. For * transmission this would mean copying the contents of the queued mbuf * chain into an mbuf cluster and then DMAing the cluster. This extra * copy would sort of defeat the purpose of the bus master support for * any packet that doesn't fit into a single mbuf. * * By contrast, the 3c90x cards support a fragment-based bus master * mode where mbuf chains can be encapsulated using TX descriptors. * This is similar to other PCI chips such as the Texas Instruments * ThunderLAN and the Intel 82557/82558. * * The "vortex" driver (if_vx.c) happens to work for the "boomerang" * bus master chips because they maintain the old PIO interface for * backwards compatibility, but starting with the 3c905B and the * "cyclone" chips, the compatibility interface has been dropped. * Since using bus master DMA is a big win, we use this driver to * support the PCI "boomerang" chips even though they work with the * "vortex" driver in order to obtain better performance. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(xl, pci, 1, 1, 1); MODULE_DEPEND(xl, ether, 1, 1, 1); MODULE_DEPEND(xl, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #include /* * TX Checksumming is disabled by default for two reasons: * - TX Checksumming will occasionally produce corrupt packets * - TX Checksumming seems to reduce performance * * Only 905B/C cards were reported to have this problem, it is possible * that later chips _may_ be immune. */ #define XL905B_TXCSUM_BROKEN 1 #ifdef XL905B_TXCSUM_BROKEN #define XL905B_CSUM_FEATURES 0 #else #define XL905B_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) #endif /* * Various supported device vendors/types and their names. */ static const struct xl_type xl_devs[] = { { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT, "3Com 3c900-TPO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO, "3Com 3c900-COMBO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT, "3Com 3c905-TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4, "3Com 3c905-T4 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT, "3Com 3c900B-TPO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_COMBO, "3Com 3c900B-COMBO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_TPC, "3Com 3c900B-TPC Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10FL, "3Com 3c900B-FL Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT, "3Com 3c905B-TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4, "3Com 3c905B-T4 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX, "3Com 3c905B-FX/SC Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO, "3Com 3c905B-COMBO Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT, "3Com 3c905C-TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B, "3Com 3c920B-EMB Integrated Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B_WNM, "3Com 3c920B-EMB-WNM Integrated Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT_SERV, "3Com 3c980 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_SERV, "3Com 3c980C Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX, "3Com 3cSOHO100-TX OfficeConnect" }, { TC_VENDORID, TC_DEVICEID_TORNADO_HOMECONNECT, "3Com 3c450-TX HomeConnect" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_555, "3Com 3c555 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_556, "3Com 3c556 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_556B, "3Com 3c556B Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_575A, "3Com 3c575TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_575B, "3Com 3c575B Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_575C, "3Com 3c575C Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_656, "3Com 3c656 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_656B, "3Com 3c656B Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_656C, "3Com 3c656C Fast Etherlink XL" }, { 0, 0, NULL } }; static int xl_probe(device_t); static int xl_attach(device_t); static int xl_detach(device_t); static int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *); static void xl_tick(void *); static void xl_stats_update(struct xl_softc *); static int xl_encap(struct xl_softc *, struct xl_chain *, struct mbuf **); static int xl_rxeof(struct xl_softc *); static void xl_rxeof_task(void *, int); static int xl_rx_resync(struct xl_softc *); static void xl_txeof(struct xl_softc *); static void xl_txeof_90xB(struct xl_softc *); static void xl_txeoc(struct xl_softc *); static void xl_intr(void *); static int xl_transmit(if_t, struct mbuf *); static void xl_start_locked(struct xl_softc *); static void xl_start_90xB_locked(struct xl_softc *); static int xl_ioctl(if_t, u_long, void *, struct thread *); static void xl_init(void *); static void xl_init_locked(struct xl_softc *); static void xl_stop(struct xl_softc *); static int xl_watchdog(struct xl_softc *); static int xl_shutdown(device_t); static int xl_suspend(device_t); static int xl_resume(device_t); static void xl_setwol(struct xl_softc *); #ifdef DEVICE_POLLING static int xl_poll(if_t, enum poll_cmd cmd, int count); static int xl_poll_locked(if_t, enum poll_cmd cmd, int count); #endif static int xl_ifmedia_upd(if_t); static void xl_ifmedia_sts(if_t, struct ifmediareq *); static int xl_eeprom_wait(struct xl_softc *); static int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int); static void xl_rxfilter(struct xl_softc *); static void xl_rxfilter_90x(struct xl_softc *); static void xl_rxfilter_90xB(struct xl_softc *); static void xl_setcfg(struct xl_softc *); static void xl_setmode(struct xl_softc *, int); static void xl_reset(struct xl_softc *); static int xl_list_rx_init(struct xl_softc *); static int xl_list_tx_init(struct xl_softc *); static int xl_list_tx_init_90xB(struct xl_softc *); static void xl_wait(struct xl_softc *); static void xl_mediacheck(struct xl_softc *); static void xl_choose_media(struct xl_softc *sc, int *media); static void xl_choose_xcvr(struct xl_softc *, int); static void xl_dma_map_addr(void *, bus_dma_segment_t *, int, int); #ifdef notdef static void xl_testpacket(struct xl_softc *); #endif static int xl_miibus_readreg(device_t, int, int); static int xl_miibus_writereg(device_t, int, int, int); static void xl_miibus_statchg(device_t); static void xl_miibus_mediainit(device_t); /* * MII bit-bang glue */ static uint32_t xl_mii_bitbang_read(device_t); static void xl_mii_bitbang_write(device_t, uint32_t); static const struct mii_bitbang_ops xl_mii_bitbang_ops = { xl_mii_bitbang_read, xl_mii_bitbang_write, { XL_MII_DATA, /* MII_BIT_MDO */ XL_MII_DATA, /* MII_BIT_MDI */ XL_MII_CLK, /* MII_BIT_MDC */ XL_MII_DIR, /* MII_BIT_DIR_HOST_PHY */ 0, /* MII_BIT_DIR_PHY_HOST */ } }; static device_method_t xl_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xl_probe), DEVMETHOD(device_attach, xl_attach), DEVMETHOD(device_detach, xl_detach), DEVMETHOD(device_shutdown, xl_shutdown), DEVMETHOD(device_suspend, xl_suspend), DEVMETHOD(device_resume, xl_resume), /* MII interface */ DEVMETHOD(miibus_readreg, xl_miibus_readreg), DEVMETHOD(miibus_writereg, xl_miibus_writereg), DEVMETHOD(miibus_statchg, xl_miibus_statchg), DEVMETHOD(miibus_mediainit, xl_miibus_mediainit), DEVMETHOD_END }; static driver_t xl_driver = { "xl", xl_methods, sizeof(struct xl_softc) }; static devclass_t xl_devclass; DRIVER_MODULE_ORDERED(xl, pci, xl_driver, xl_devclass, NULL, NULL, SI_ORDER_ANY); DRIVER_MODULE(miibus, xl, miibus_driver, miibus_devclass, NULL, NULL); static struct ifdriver xl_ifdrv = { .ifdrv_ops = { .ifop_origin = IFOP_ORIGIN_DRIVER, .ifop_ioctl = xl_ioctl, .ifop_transmit = xl_transmit, .ifop_init = xl_init, }, .ifdrv_name = "xl", .ifdrv_type = IFT_ETHER, .ifdrv_maxqlen = XL_TX_LIST_CNT - 1, }; static void xl_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { u_int32_t *paddr; paddr = arg; *paddr = segs->ds_addr; } /* * Murphy's law says that it's possible the chip can wedge and * the 'command in progress' bit may never clear. Hence, we wait * only a finite amount of time to avoid getting caught in an * infinite loop. Normally this delay routine would be a macro, * but it isn't called during normal operation so we can afford * to make it a function. Suppress warning when card gone. */ static void xl_wait(struct xl_softc *sc) { register int i; for (i = 0; i < XL_TIMEOUT; i++) { if ((CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY) == 0) break; } if (i == XL_TIMEOUT && bus_child_present(sc->xl_dev)) device_printf(sc->xl_dev, "command never completed!\n"); } /* * MII access routines are provided for adapters with external * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in * autoneg logic that's faked up to look like a PHY (3c905B-TX). * Note: if you don't perform the MDIO operations just right, * it's possible to end up with code that works correctly with * some chips/CPUs/processor speeds/bus speeds/etc but not * with others. */ /* * Read the MII serial port for the MII bit-bang module. */ static uint32_t xl_mii_bitbang_read(device_t dev) { struct xl_softc *sc; uint32_t val; sc = device_get_softc(dev); /* We're already in window 4. */ val = CSR_READ_2(sc, XL_W4_PHY_MGMT); CSR_BARRIER(sc, XL_W4_PHY_MGMT, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (val); } /* * Write the MII serial port for the MII bit-bang module. */ static void xl_mii_bitbang_write(device_t dev, uint32_t val) { struct xl_softc *sc; sc = device_get_softc(dev); /* We're already in window 4. */ CSR_WRITE_2(sc, XL_W4_PHY_MGMT, val); CSR_BARRIER(sc, XL_W4_PHY_MGMT, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } static int xl_miibus_readreg(device_t dev, int phy, int reg) { struct xl_softc *sc; sc = device_get_softc(dev); /* Select the window 4. */ XL_SEL_WIN(4); return (mii_bitbang_readreg(dev, &xl_mii_bitbang_ops, phy, reg)); } static int xl_miibus_writereg(device_t dev, int phy, int reg, int data) { struct xl_softc *sc; sc = device_get_softc(dev); /* Select the window 4. */ XL_SEL_WIN(4); mii_bitbang_writereg(dev, &xl_mii_bitbang_ops, phy, reg, data); return (0); } static void xl_miibus_statchg(device_t dev) { struct xl_softc *sc; struct mii_data *mii; uint8_t macctl; sc = device_get_softc(dev); mii = device_get_softc(sc->xl_miibus); xl_setcfg(sc); /* Set ASIC's duplex mode to match the PHY. */ XL_SEL_WIN(3); macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { macctl |= XL_MACCTRL_DUPLEX; if (sc->xl_type == XL_TYPE_905B) { if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) macctl |= XL_MACCTRL_FLOW_CONTROL_ENB; else macctl &= ~XL_MACCTRL_FLOW_CONTROL_ENB; } } else { macctl &= ~XL_MACCTRL_DUPLEX; if (sc->xl_type == XL_TYPE_905B) macctl &= ~XL_MACCTRL_FLOW_CONTROL_ENB; } CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl); if (sc->xl_ifp != NULL) { if_set(sc->xl_ifp, IF_BAUDRATE, ifmedia_baudrate(mii->mii_media_active)); if_link_state_change(sc->xl_ifp, ifmedia_link_state(mii->mii_media_status)); } } /* * Special support for the 3c905B-COMBO. This card has 10/100 support * plus BNC and AUI ports. This means we will have both an miibus attached * plus some non-MII media settings. In order to allow this, we have to * add the extra media to the miibus's ifmedia struct, but we can't do * that during xl_attach() because the miibus hasn't been attached yet. * So instead, we wait until the miibus probe/attach is done, at which * point we will get a callback telling is that it's safe to add our * extra media. */ static void xl_miibus_mediainit(device_t dev) { struct xl_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = device_get_softc(dev); mii = device_get_softc(sc->xl_miibus); ifm = &mii->mii_media; if (sc->xl_media & (XL_MEDIAOPT_AUI | XL_MEDIAOPT_10FL)) { /* * Check for a 10baseFL board in disguise. */ if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { if (bootverbose) device_printf(sc->xl_dev, "found 10baseFL\n"); ifmedia_add(ifm, IFM_ETHER | IFM_10_FL, 0, NULL); ifmedia_add(ifm, IFM_ETHER | IFM_10_FL|IFM_HDX, 0, NULL); if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) ifmedia_add(ifm, IFM_ETHER | IFM_10_FL | IFM_FDX, 0, NULL); } else { if (bootverbose) device_printf(sc->xl_dev, "found AUI\n"); ifmedia_add(ifm, IFM_ETHER | IFM_10_5, 0, NULL); } } if (sc->xl_media & XL_MEDIAOPT_BNC) { if (bootverbose) device_printf(sc->xl_dev, "found BNC\n"); ifmedia_add(ifm, IFM_ETHER | IFM_10_2, 0, NULL); } } /* * The EEPROM is slow: give it time to come ready after issuing * it a command. */ static int xl_eeprom_wait(struct xl_softc *sc) { int i; for (i = 0; i < 100; i++) { if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY) DELAY(162); else break; } if (i == 100) { device_printf(sc->xl_dev, "eeprom failed to come ready\n"); return (1); } return (0); } /* * Read a sequence of words from the EEPROM. Note that ethernet address * data is stored in the EEPROM in network byte order. */ static int xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap) { int err = 0, i; u_int16_t word = 0, *ptr; #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F)) #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F) /* * XXX: WARNING! DANGER! * It's easy to accidentally overwrite the rom content! * Note: the 3c575 uses 8bit EEPROM offsets. */ XL_SEL_WIN(0); if (xl_eeprom_wait(sc)) return (1); if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30) off += 0x30; for (i = 0; i < cnt; i++) { if (sc->xl_flags & XL_FLAG_8BITROM) CSR_WRITE_2(sc, XL_W0_EE_CMD, XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i)); else CSR_WRITE_2(sc, XL_W0_EE_CMD, XL_EE_READ | EEPROM_5BIT_OFFSET(off + i)); err = xl_eeprom_wait(sc); if (err) break; word = CSR_READ_2(sc, XL_W0_EE_DATA); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return (err ? 1 : 0); } static void xl_rxfilter(struct xl_softc *sc) { if (sc->xl_type == XL_TYPE_905B) xl_rxfilter_90xB(sc); else xl_rxfilter_90x(sc); } /* * NICs older than the 3c905B have only one multicast option, which * is to enable reception of all multicast frames. */ static void xl_check_maddr_90x(void *arg, struct sockaddr *maddr) { struct sockaddr *sa = (struct sockaddr *)maddr; uint8_t *rxfilt = arg; if (sa->sa_family == AF_LINK) *rxfilt |= XL_RXFILTER_ALLMULTI; } static void xl_rxfilter_90x(struct xl_softc *sc) { if_t ifp; uint32_t flags; uint8_t rxfilt; XL_LOCK_ASSERT(sc); ifp = sc->xl_ifp; flags = if_get(ifp, IF_FLAGS); XL_SEL_WIN(5); rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI | XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL); /* Set the individual bit to receive frames for this host only. */ rxfilt |= XL_RXFILTER_INDIVIDUAL; /* Set capture broadcast bit to capture broadcast frames. */ if (flags & IFF_BROADCAST) rxfilt |= XL_RXFILTER_BROADCAST; /* If we want promiscuous mode, set the allframes bit. */ if (flags & (IFF_PROMISC | IFF_ALLMULTI)) { if (flags & IFF_PROMISC) rxfilt |= XL_RXFILTER_ALLFRAMES; if (flags & IFF_ALLMULTI) rxfilt |= XL_RXFILTER_ALLMULTI; } else if_foreach_maddr(ifp, xl_check_maddr_90x, &rxfilt); CSR_WRITE_2(sc, XL_COMMAND, rxfilt | XL_CMD_RX_SET_FILT); XL_SEL_WIN(7); } /* * 3c905B adapters have a hash filter that we can program. * Note: the 3c905B currently only supports a 64-bit hash table, which means * we really only need 6 bits, but the manual indicates that future chip * revisions will have a 256-bit hash table, hence the routine is set up to * calculate 8 bits of position info in case we need it some day. * Note II, The Sequel: _CURRENT_ versions of the 3c905B have a 256 bit hash * table. This means we have to use all 8 bits regardless. On older cards, * the upper 2 bits will be ignored. Grrrr.... */ static void xl_check_maddr_90xB(void *arg, struct sockaddr *maddr) { struct sockaddr_dl *sdl = (struct sockaddr_dl *)maddr; struct xl_softc *sc = arg; uint16_t h; if (sdl->sdl_family != AF_LINK) return; h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) & 0xFF; CSR_WRITE_2(sc, XL_COMMAND, h | XL_CMD_RX_SET_HASH | XL_HASH_SET); } static void xl_rxfilter_90xB(struct xl_softc *sc) { if_t ifp; uint32_t flags; uint8_t rxfilt; XL_LOCK_ASSERT(sc); ifp = sc->xl_ifp; flags = if_get(ifp, IF_FLAGS); XL_SEL_WIN(5); rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI | XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL | XL_RXFILTER_MULTIHASH); /* Set the individual bit to receive frames for this host only. */ rxfilt |= XL_RXFILTER_INDIVIDUAL; /* Set capture broadcast bit to capture broadcast frames. */ if (flags & IFF_BROADCAST) rxfilt |= XL_RXFILTER_BROADCAST; /* If we want promiscuous mode, set the allframes bit. */ if (flags & (IFF_PROMISC | IFF_ALLMULTI)) { if (flags & IFF_PROMISC) rxfilt |= XL_RXFILTER_ALLFRAMES; if (flags & IFF_ALLMULTI) rxfilt |= XL_RXFILTER_ALLMULTI; } else { /* First, zot all the existing hash bits. */ for (int i = 0; i < XL_HASHFILT_SIZE; i++) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH | i); /* Now program new ones. */ if_foreach_maddr(ifp, xl_check_maddr_90xB, sc); /* * XXXGL: a bit dirty, but easier then make a context * containing softc and rxfilt. */ if_foreach_maddr(ifp, xl_check_maddr_90x, &rxfilt); } CSR_WRITE_2(sc, XL_COMMAND, rxfilt | XL_CMD_RX_SET_FILT); XL_SEL_WIN(7); } static void xl_setcfg(struct xl_softc *sc) { u_int32_t icfg; /*XL_LOCK_ASSERT(sc);*/ XL_SEL_WIN(3); icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG); icfg &= ~XL_ICFG_CONNECTOR_MASK; if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BT4) icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS); if (sc->xl_media & XL_MEDIAOPT_BTX) icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS); CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); } static void xl_setmode(struct xl_softc *sc, int media) { u_int32_t icfg; u_int16_t mediastat; char *pmsg = "", *dmsg = ""; XL_LOCK_ASSERT(sc); XL_SEL_WIN(4); mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); XL_SEL_WIN(3); icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG); if (sc->xl_media & XL_MEDIAOPT_BT) { if (IFM_SUBTYPE(media) == IFM_10_T) { pmsg = "10baseT transceiver"; sc->xl_xcvr = XL_XCVR_10BT; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS); mediastat |= XL_MEDIASTAT_LINKBEAT | XL_MEDIASTAT_JABGUARD; mediastat &= ~XL_MEDIASTAT_SQEENB; } } if (sc->xl_media & XL_MEDIAOPT_BFX) { if (IFM_SUBTYPE(media) == IFM_100_FX) { pmsg = "100baseFX port"; sc->xl_xcvr = XL_XCVR_100BFX; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS); mediastat |= XL_MEDIASTAT_LINKBEAT; mediastat &= ~XL_MEDIASTAT_SQEENB; } } if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { if (IFM_SUBTYPE(media) == IFM_10_5) { pmsg = "AUI port"; sc->xl_xcvr = XL_XCVR_AUI; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS); mediastat &= ~(XL_MEDIASTAT_LINKBEAT | XL_MEDIASTAT_JABGUARD); mediastat |= ~XL_MEDIASTAT_SQEENB; } if (IFM_SUBTYPE(media) == IFM_10_FL) { pmsg = "10baseFL transceiver"; sc->xl_xcvr = XL_XCVR_AUI; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS); mediastat &= ~(XL_MEDIASTAT_LINKBEAT | XL_MEDIASTAT_JABGUARD); mediastat |= ~XL_MEDIASTAT_SQEENB; } } if (sc->xl_media & XL_MEDIAOPT_BNC) { if (IFM_SUBTYPE(media) == IFM_10_2) { pmsg = "AUI port"; sc->xl_xcvr = XL_XCVR_COAX; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS); mediastat &= ~(XL_MEDIASTAT_LINKBEAT | XL_MEDIASTAT_JABGUARD | XL_MEDIASTAT_SQEENB); } } if ((media & IFM_GMASK) == IFM_FDX || IFM_SUBTYPE(media) == IFM_100_FX) { dmsg = "full"; XL_SEL_WIN(3); CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX); } else { dmsg = "half"; XL_SEL_WIN(3); CSR_WRITE_1(sc, XL_W3_MAC_CTRL, (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX)); } if (IFM_SUBTYPE(media) == IFM_10_2) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START); else CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg); XL_SEL_WIN(4); CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat); DELAY(800); XL_SEL_WIN(7); device_printf(sc->xl_dev, "selecting %s, %s duplex\n", pmsg, dmsg); } static void xl_reset(struct xl_softc *sc) { register int i; XL_LOCK_ASSERT(sc); XL_SEL_WIN(0); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET | ((sc->xl_flags & XL_FLAG_WEIRDRESET) ? XL_RESETOPT_DISADVFD:0)); /* * If we're using memory mapped register mode, pause briefly * after issuing the reset command before trying to access any * other registers. With my 3c575C CardBus card, failing to do * this results in the system locking up while trying to poll * the command busy bit in the status register. */ if (sc->xl_flags & XL_FLAG_USE_MMIO) DELAY(100000); for (i = 0; i < XL_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY)) break; } if (i == XL_TIMEOUT) device_printf(sc->xl_dev, "reset didn't complete\n"); /* Reset TX and RX. */ /* Note: the RX reset takes an absurd amount of time * on newer versions of the Tornado chips such as those * on the 3c905CX and newer 3c908C cards. We wait an * extra amount of time so that xl_wait() doesn't complain * and annoy the users. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); DELAY(100000); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR || sc->xl_flags & XL_FLAG_INVERT_MII_PWR) { XL_SEL_WIN(2); CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc, XL_W2_RESET_OPTIONS) | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR) ? XL_RESETOPT_INVERT_LED : 0) | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR) ? XL_RESETOPT_INVERT_MII : 0)); } /* Wait a little while for the chip to get its brains in order. */ DELAY(100000); } /* * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int xl_probe(device_t dev) { const struct xl_type *t; t = xl_devs; while (t->xl_name != NULL) { if ((pci_get_vendor(dev) == t->xl_vid) && (pci_get_device(dev) == t->xl_did)) { device_set_desc(dev, t->xl_name); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } /* * This routine is a kludge to work around possible hardware faults * or manufacturing defects that can cause the media options register * (or reset options register, as it's called for the first generation * 3c90x adapters) to return an incorrect result. I have encountered * one Dell Latitude laptop docking station with an integrated 3c905-TX * which doesn't have any of the 'mediaopt' bits set. This screws up * the attach routine pretty badly because it doesn't know what media * to look for. If we find ourselves in this predicament, this routine * will try to guess the media options values and warn the user of a * possible manufacturing defect with his adapter/system/whatever. */ static void xl_mediacheck(struct xl_softc *sc) { /* * If some of the media options bits are set, assume they are * correct. If not, try to figure it out down below. * XXX I should check for 10baseFL, but I don't have an adapter * to test with. */ if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) { /* * Check the XCVR value. If it's not in the normal range * of values, we need to fake it up here. */ if (sc->xl_xcvr <= XL_XCVR_AUTO) return; else { device_printf(sc->xl_dev, "bogus xcvr value in EEPROM (%x)\n", sc->xl_xcvr); device_printf(sc->xl_dev, "choosing new default based on card type\n"); } } else { if (sc->xl_type == XL_TYPE_905B && sc->xl_media & XL_MEDIAOPT_10FL) return; device_printf(sc->xl_dev, "WARNING: no media options bits set in the media options register!!\n"); device_printf(sc->xl_dev, "this could be a manufacturing defect in your adapter or system\n"); device_printf(sc->xl_dev, "attempting to guess media type; you should probably consult your vendor\n"); } xl_choose_xcvr(sc, 1); } static void xl_choose_xcvr(struct xl_softc *sc, int verbose) { u_int16_t devid; /* * Read the device ID from the EEPROM. * This is what's loaded into the PCI device ID register, so it has * to be correct otherwise we wouldn't have gotten this far. */ xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0); switch (devid) { case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TPO */ case TC_DEVICEID_KRAKATOA_10BT: /* 3c900B-TPO */ sc->xl_media = XL_MEDIAOPT_BT; sc->xl_xcvr = XL_XCVR_10BT; if (verbose) device_printf(sc->xl_dev, "guessing 10BaseT transceiver\n"); break; case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */ case TC_DEVICEID_KRAKATOA_10BT_COMBO: /* 3c900B-COMBO */ sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI; sc->xl_xcvr = XL_XCVR_10BT; if (verbose) device_printf(sc->xl_dev, "guessing COMBO (AUI/BNC/TP)\n"); break; case TC_DEVICEID_KRAKATOA_10BT_TPC: /* 3c900B-TPC */ sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC; sc->xl_xcvr = XL_XCVR_10BT; if (verbose) device_printf(sc->xl_dev, "guessing TPC (BNC/TP)\n"); break; case TC_DEVICEID_CYCLONE_10FL: /* 3c900B-FL */ sc->xl_media = XL_MEDIAOPT_10FL; sc->xl_xcvr = XL_XCVR_AUI; if (verbose) device_printf(sc->xl_dev, "guessing 10baseFL\n"); break; case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */ case TC_DEVICEID_HURRICANE_555: /* 3c555 */ case TC_DEVICEID_HURRICANE_556: /* 3c556 */ case TC_DEVICEID_HURRICANE_556B: /* 3c556B */ case TC_DEVICEID_HURRICANE_575A: /* 3c575TX */ case TC_DEVICEID_HURRICANE_575B: /* 3c575B */ case TC_DEVICEID_HURRICANE_575C: /* 3c575C */ case TC_DEVICEID_HURRICANE_656: /* 3c656 */ case TC_DEVICEID_HURRICANE_656B: /* 3c656B */ case TC_DEVICEID_TORNADO_656C: /* 3c656C */ case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */ case TC_DEVICEID_TORNADO_10_100BT_920B_WNM: /* 3c920B-EMB-WNM */ sc->xl_media = XL_MEDIAOPT_MII; sc->xl_xcvr = XL_XCVR_MII; if (verbose) device_printf(sc->xl_dev, "guessing MII\n"); break; case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */ case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */ sc->xl_media = XL_MEDIAOPT_BT4; sc->xl_xcvr = XL_XCVR_MII; if (verbose) device_printf(sc->xl_dev, "guessing 100baseT4/MII\n"); break; case TC_DEVICEID_HURRICANE_10_100BT: /* 3c905B-TX */ case TC_DEVICEID_HURRICANE_10_100BT_SERV:/*3c980-TX */ case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */ case TC_DEVICEID_HURRICANE_SOHO100TX: /* 3cSOHO100-TX */ case TC_DEVICEID_TORNADO_10_100BT: /* 3c905C-TX */ case TC_DEVICEID_TORNADO_HOMECONNECT: /* 3c450-TX */ sc->xl_media = XL_MEDIAOPT_BTX; sc->xl_xcvr = XL_XCVR_AUTO; if (verbose) device_printf(sc->xl_dev, "guessing 10/100 internal\n"); break; case TC_DEVICEID_CYCLONE_10_100_COMBO: /* 3c905B-COMBO */ sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI; sc->xl_xcvr = XL_XCVR_AUTO; if (verbose) device_printf(sc->xl_dev, "guessing 10/100 plus BNC/AUI\n"); break; default: device_printf(sc->xl_dev, "unknown device ID: %x -- defaulting to 10baseT\n", devid); sc->xl_media = XL_MEDIAOPT_BT; break; } } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int xl_attach(device_t dev) { struct if_attach_args ifat = { .ifat_version = IF_ATTACH_VERSION, .ifat_drv = &xl_ifdrv, .ifat_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST, .ifat_capabilities = IFCAP_VLAN_MTU | IFCAP_LINKSTATE, }; u_char eaddr[ETHER_ADDR_LEN]; u_int16_t sinfo2, xcvr[2]; struct xl_softc *sc; if_t ifp; int media, pmcap; int error = 0, rid, res, unit; uint16_t did; sc = device_get_softc(dev); sc->xl_dev = dev; unit = device_get_unit(dev); mtx_init(&sc->xl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts); did = pci_get_device(dev); sc->xl_flags = 0; if (did == TC_DEVICEID_HURRICANE_555) sc->xl_flags |= XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_PHYOK; if (did == TC_DEVICEID_HURRICANE_556 || did == TC_DEVICEID_HURRICANE_556B) sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK | XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_WEIRDRESET | XL_FLAG_INVERT_LED_PWR | XL_FLAG_INVERT_MII_PWR; if (did == TC_DEVICEID_HURRICANE_555 || did == TC_DEVICEID_HURRICANE_556) sc->xl_flags |= XL_FLAG_8BITROM; if (did == TC_DEVICEID_HURRICANE_556B) sc->xl_flags |= XL_FLAG_NO_XCVR_PWR; if (did == TC_DEVICEID_HURRICANE_575B || did == TC_DEVICEID_HURRICANE_575C || did == TC_DEVICEID_HURRICANE_656B || did == TC_DEVICEID_TORNADO_656C) sc->xl_flags |= XL_FLAG_FUNCREG; if (did == TC_DEVICEID_HURRICANE_575A || did == TC_DEVICEID_HURRICANE_575B || did == TC_DEVICEID_HURRICANE_575C || did == TC_DEVICEID_HURRICANE_656B || did == TC_DEVICEID_TORNADO_656C) sc->xl_flags |= XL_FLAG_PHYOK | XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_8BITROM; if (did == TC_DEVICEID_HURRICANE_656) sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK; if (did == TC_DEVICEID_HURRICANE_575B) sc->xl_flags |= XL_FLAG_INVERT_LED_PWR; if (did == TC_DEVICEID_HURRICANE_575C) sc->xl_flags |= XL_FLAG_INVERT_MII_PWR; if (did == TC_DEVICEID_TORNADO_656C) sc->xl_flags |= XL_FLAG_INVERT_MII_PWR; if (did == TC_DEVICEID_HURRICANE_656 || did == TC_DEVICEID_HURRICANE_656B) sc->xl_flags |= XL_FLAG_INVERT_MII_PWR | XL_FLAG_INVERT_LED_PWR; if (did == TC_DEVICEID_TORNADO_10_100BT_920B || did == TC_DEVICEID_TORNADO_10_100BT_920B_WNM) sc->xl_flags |= XL_FLAG_PHYOK; switch (did) { case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */ case TC_DEVICEID_HURRICANE_575A: case TC_DEVICEID_HURRICANE_575B: case TC_DEVICEID_HURRICANE_575C: sc->xl_flags |= XL_FLAG_NO_MMIO; break; default: break; } /* * Map control/status registers. */ pci_enable_busmaster(dev); if ((sc->xl_flags & XL_FLAG_NO_MMIO) == 0) { rid = XL_PCI_LOMEM; res = SYS_RES_MEMORY; sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE); } if (sc->xl_res != NULL) { sc->xl_flags |= XL_FLAG_USE_MMIO; if (bootverbose) device_printf(dev, "using memory mapped I/O\n"); } else { rid = XL_PCI_LOIO; res = SYS_RES_IOPORT; sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE); if (sc->xl_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } if (bootverbose) device_printf(dev, "using port I/O\n"); } sc->xl_btag = rman_get_bustag(sc->xl_res); sc->xl_bhandle = rman_get_bushandle(sc->xl_res); if (sc->xl_flags & XL_FLAG_FUNCREG) { rid = XL_PCI_FUNCMEM; sc->xl_fres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->xl_fres == NULL) { device_printf(dev, "couldn't map funcreg memory\n"); error = ENXIO; goto fail; } sc->xl_ftag = rman_get_bustag(sc->xl_fres); sc->xl_fhandle = rman_get_bushandle(sc->xl_fres); } /* Allocate interrupt */ rid = 0; sc->xl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->xl_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* Reset the adapter. */ XL_LOCK(sc); xl_reset(sc); XL_UNLOCK(sc); /* * Get station address from the EEPROM. */ if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) { device_printf(dev, "failed to read station address\n"); error = ENXIO; goto fail; } callout_init_mtx(&sc->xl_tick_callout, &sc->xl_mtx, 0); TASK_INIT(&sc->xl_task, 0, xl_rxeof_task, sc); /* * Now allocate a tag for the DMA descriptor lists and a chunk * of DMA-able memory based on the tag. Also obtain the DMA * addresses of the RX and TX ring, which we'll need later. * All of our lists are allocated as a contiguous block * of memory. */ error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, XL_RX_LIST_SZ, 1, XL_RX_LIST_SZ, 0, NULL, NULL, &sc->xl_ldata.xl_rx_tag); if (error) { device_printf(dev, "failed to allocate rx dma tag\n"); goto fail; } error = bus_dmamem_alloc(sc->xl_ldata.xl_rx_tag, (void **)&sc->xl_ldata.xl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->xl_ldata.xl_rx_dmamap); if (error) { device_printf(dev, "no memory for rx list buffers!\n"); bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag); sc->xl_ldata.xl_rx_tag = NULL; goto fail; } error = bus_dmamap_load(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ, xl_dma_map_addr, &sc->xl_ldata.xl_rx_dmaaddr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "cannot get dma address of the rx ring!\n"); bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list, sc->xl_ldata.xl_rx_dmamap); bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag); sc->xl_ldata.xl_rx_tag = NULL; goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, XL_TX_LIST_SZ, 1, XL_TX_LIST_SZ, 0, NULL, NULL, &sc->xl_ldata.xl_tx_tag); if (error) { device_printf(dev, "failed to allocate tx dma tag\n"); goto fail; } error = bus_dmamem_alloc(sc->xl_ldata.xl_tx_tag, (void **)&sc->xl_ldata.xl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->xl_ldata.xl_tx_dmamap); if (error) { device_printf(dev, "no memory for list buffers!\n"); bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag); sc->xl_ldata.xl_tx_tag = NULL; goto fail; } error = bus_dmamap_load(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ, xl_dma_map_addr, &sc->xl_ldata.xl_tx_dmaaddr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "cannot get dma address of the tx ring!\n"); bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list, sc->xl_ldata.xl_tx_dmamap); bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag); sc->xl_ldata.xl_tx_tag = NULL; goto fail; } /* * Allocate a DMA tag for the mapping of mbufs. */ error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * XL_MAXFRAGS, XL_MAXFRAGS, MCLBYTES, 0, NULL, NULL, &sc->xl_mtag); if (error) { device_printf(dev, "failed to allocate mbuf dma tag\n"); goto fail; } /* We need a spare DMA map for the RX ring. */ error = bus_dmamap_create(sc->xl_mtag, 0, &sc->xl_tmpmap); if (error) goto fail; /* * Figure out the card type. 3c905B adapters have the * 'supportsNoTxLength' bit set in the capabilities * word in the EEPROM. * Note: my 3c575C CardBus card lies. It returns a value * of 0x1578 for its capabilities word, which is somewhat * nonsensical. Another way to distinguish a 3c90x chip * from a 3c90xB/C chip is to check for the 'supportsLargePackets' * bit. This will only be set for 3c90x boomerage chips. */ xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0); if (sc->xl_caps & XL_CAPS_NO_TXLENGTH || !(sc->xl_caps & XL_CAPS_LARGE_PKTS)) sc->xl_type = XL_TYPE_905B; else sc->xl_type = XL_TYPE_90X; /* Check availability of WOL. */ if ((sc->xl_caps & XL_CAPS_PWRMGMT) != 0 && pci_find_cap(dev, PCIY_PMG, &pmcap) == 0) { sc->xl_pmcap = pmcap; sc->xl_flags |= XL_FLAG_WOL; sinfo2 = 0; xl_read_eeprom(sc, (caddr_t)&sinfo2, XL_EE_SOFTINFO2, 1, 0); if ((sinfo2 & XL_SINFO2_AUX_WOL_CON) == 0 && bootverbose) device_printf(dev, "No auxiliary remote wakeup connector!\n"); } /* Set the TX start threshold for best performance. */ sc->xl_tx_thresh = XL_MIN_FRAMELEN; /* * Now we have to see what sort of media we have. * This includes probing for an MII interace and a * possible PHY. */ XL_SEL_WIN(3); sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT); if (bootverbose) device_printf(dev, "media options word: %x\n", sc->xl_media); xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0); sc->xl_xcvr = xcvr[0] | xcvr[1] << 16; sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK; sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS; xl_mediacheck(sc); if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX || sc->xl_media & XL_MEDIAOPT_BT4) { struct mii_data *mii; int phy; if (bootverbose) device_printf(dev, "found MII/AUTO\n"); xl_setcfg(sc); /* * Attach PHYs only at MII address 24 if !XL_FLAG_PHYOK. * This is to guard against problems with certain 3Com ASIC * revisions that incorrectly map the internal transceiver * control registers at all MII addresses. */ phy = MII_PHY_ANY; if ((sc->xl_flags & XL_FLAG_PHYOK) == 0) phy = 24; error = mii_attach(dev, &sc->xl_miibus, xl_ifmedia_upd, xl_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, sc->xl_type == XL_TYPE_905B ? MIIF_DOPAUSE : 0); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } mii = device_get_softc(sc->xl_miibus); ifat.ifat_baudrate = ifmedia_baudrate(mii->mii_media_active); goto media_done; } /* * Sanity check. If the user has selected "auto" and this isn't * a 10/100 card of some kind, we need to force the transceiver * type to something sane. */ if (sc->xl_xcvr == XL_XCVR_AUTO) xl_choose_xcvr(sc, bootverbose); /* * Do ifmedia setup. */ if (sc->xl_media & XL_MEDIAOPT_BT) { if (bootverbose) device_printf(dev, "found 10baseT\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); } if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { /* * Check for a 10baseFL board in disguise. */ if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { if (bootverbose) device_printf(dev, "found 10baseFL\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_HDX, 0, NULL); if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL); } else { if (bootverbose) device_printf(dev, "found AUI\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); } } if (sc->xl_media & XL_MEDIAOPT_BNC) { if (bootverbose) device_printf(dev, "found BNC\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL); } if (sc->xl_media & XL_MEDIAOPT_BFX) { if (bootverbose) device_printf(dev, "found 100baseFX\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL); } media = IFM_ETHER|IFM_100_TX|IFM_FDX; xl_choose_media(sc, &media); if (sc->xl_miibus == NULL) ifmedia_set(&sc->ifmedia, media); ifat.ifat_baudrate = ifmedia_baudrate(sc->ifmedia.ifm_media); media_done: if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) { XL_SEL_WIN(0); CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS); } error = bus_setup_intr(dev, sc->xl_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, xl_intr, sc, &sc->xl_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); goto fail; } /* Initialize interface. */ ifat.ifat_softc = sc; ifat.ifat_dunit = device_get_unit(dev); ifat.ifat_lla = eaddr; if (sc->xl_type == XL_TYPE_905B) { ifat.ifat_hwassist = XL905B_CSUM_FEATURES; #ifdef XL905B_TXCSUM_BROKEN ifat.ifat_capabilities |= IFCAP_RXCSUM; #else ifat.ifat_capabilities |= IFCAP_HWCSUM; #endif } if ((sc->xl_flags & XL_FLAG_WOL) != 0) ifat.ifat_capabilities |= IFCAP_WOL_MAGIC; ifat.ifat_capenable = ifat.ifat_capabilities; #ifdef DEVICE_POLLING ifat.ifat_capabilities |= IFCAP_POLLING; #endif ifp = sc->xl_ifp = if_attach(&ifat); return (0); fail: xl_detach(dev); return (error); } /* * Choose a default media. * XXX This is a leaf function only called by xl_attach() and * acquires/releases the non-recursible driver mutex to * satisfy lock assertions. */ static void xl_choose_media(struct xl_softc *sc, int *media) { XL_LOCK(sc); switch (sc->xl_xcvr) { case XL_XCVR_10BT: *media = IFM_ETHER|IFM_10_T; xl_setmode(sc, *media); break; case XL_XCVR_AUI: if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { *media = IFM_ETHER|IFM_10_FL; xl_setmode(sc, *media); } else { *media = IFM_ETHER|IFM_10_5; xl_setmode(sc, *media); } break; case XL_XCVR_COAX: *media = IFM_ETHER|IFM_10_2; xl_setmode(sc, *media); break; case XL_XCVR_AUTO: case XL_XCVR_100BTX: case XL_XCVR_MII: /* Chosen by miibus */ break; case XL_XCVR_100BFX: *media = IFM_ETHER|IFM_100_FX; break; default: device_printf(sc->xl_dev, "unknown XCVR type: %d\n", sc->xl_xcvr); /* * This will probably be wrong, but it prevents * the ifmedia code from panicking. */ *media = IFM_ETHER|IFM_10_T; break; } XL_UNLOCK(sc); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int xl_detach(device_t dev) { struct xl_softc *sc; if_t ifp; int rid, res; sc = device_get_softc(dev); ifp = sc->xl_ifp; KASSERT(mtx_initialized(&sc->xl_mtx), ("xl mutex not initialized")); #ifdef DEVICE_POLLING if (ifp && ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif if (sc->xl_flags & XL_FLAG_USE_MMIO) { rid = XL_PCI_LOMEM; res = SYS_RES_MEMORY; } else { rid = XL_PCI_LOIO; res = SYS_RES_IOPORT; } /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { XL_LOCK(sc); xl_stop(sc); XL_UNLOCK(sc); taskqueue_drain(taskqueue_swi, &sc->xl_task); callout_drain(&sc->xl_tick_callout); if_detach(ifp); } if (sc->xl_miibus) device_delete_child(dev, sc->xl_miibus); bus_generic_detach(dev); ifmedia_removeall(&sc->ifmedia); if (sc->xl_intrhand) bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand); if (sc->xl_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq); if (sc->xl_fres != NULL) bus_release_resource(dev, SYS_RES_MEMORY, XL_PCI_FUNCMEM, sc->xl_fres); if (sc->xl_res) bus_release_resource(dev, res, rid, sc->xl_res); if (sc->xl_mtag) { bus_dmamap_destroy(sc->xl_mtag, sc->xl_tmpmap); bus_dma_tag_destroy(sc->xl_mtag); } if (sc->xl_ldata.xl_rx_tag) { bus_dmamap_unload(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap); bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list, sc->xl_ldata.xl_rx_dmamap); bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag); } if (sc->xl_ldata.xl_tx_tag) { bus_dmamap_unload(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap); bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list, sc->xl_ldata.xl_tx_dmamap); bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag); } mtx_destroy(&sc->xl_mtx); return (0); } /* * Initialize the transmit descriptors. */ static int xl_list_tx_init(struct xl_softc *sc) { struct xl_chain_data *cd; struct xl_list_data *ld; int error, i; XL_LOCK_ASSERT(sc); cd = &sc->xl_cdata; ld = &sc->xl_ldata; for (i = 0; i < XL_TX_LIST_CNT; i++) { cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i]; error = bus_dmamap_create(sc->xl_mtag, 0, &cd->xl_tx_chain[i].xl_map); if (error) return (error); cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr + i * sizeof(struct xl_list); if (i == (XL_TX_LIST_CNT - 1)) cd->xl_tx_chain[i].xl_next = NULL; else cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1]; } cd->xl_tx_free = &cd->xl_tx_chain[0]; cd->xl_tx_tail = cd->xl_tx_head = NULL; bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE); return (0); } /* * Initialize the transmit descriptors. */ static int xl_list_tx_init_90xB(struct xl_softc *sc) { struct xl_chain_data *cd; struct xl_list_data *ld; int error, i; XL_LOCK_ASSERT(sc); cd = &sc->xl_cdata; ld = &sc->xl_ldata; for (i = 0; i < XL_TX_LIST_CNT; i++) { cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i]; error = bus_dmamap_create(sc->xl_mtag, 0, &cd->xl_tx_chain[i].xl_map); if (error) return (error); cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr + i * sizeof(struct xl_list); if (i == (XL_TX_LIST_CNT - 1)) cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0]; else cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1]; if (i == 0) cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[XL_TX_LIST_CNT - 1]; else cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[i - 1]; } bzero(ld->xl_tx_list, XL_TX_LIST_SZ); ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY); cd->xl_tx_prod = 1; cd->xl_tx_cons = 1; cd->xl_tx_cnt = 0; bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE); return (0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int xl_list_rx_init(struct xl_softc *sc) { struct xl_chain_data *cd; struct xl_list_data *ld; int error, i, next; u_int32_t nextptr; XL_LOCK_ASSERT(sc); cd = &sc->xl_cdata; ld = &sc->xl_ldata; for (i = 0; i < XL_RX_LIST_CNT; i++) { cd->xl_rx_chain[i].xl_ptr = &ld->xl_rx_list[i]; error = bus_dmamap_create(sc->xl_mtag, 0, &cd->xl_rx_chain[i].xl_map); if (error) return (error); error = xl_newbuf(sc, &cd->xl_rx_chain[i]); if (error) return (error); if (i == (XL_RX_LIST_CNT - 1)) next = 0; else next = i + 1; nextptr = ld->xl_rx_dmaaddr + next * sizeof(struct xl_list_onefrag); cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[next]; ld->xl_rx_list[i].xl_next = htole32(nextptr); } bus_dmamap_sync(ld->xl_rx_tag, ld->xl_rx_dmamap, BUS_DMASYNC_PREWRITE); cd->xl_rx_head = &cd->xl_rx_chain[0]; return (0); } /* * Initialize an RX descriptor and attach an MBUF cluster. * If we fail to do so, we need to leave the old mbuf and * the old DMA map untouched so that it can be reused. */ static int xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c) { struct mbuf *m_new = NULL; bus_dmamap_t map; bus_dma_segment_t segs[1]; int error, nseg; XL_LOCK_ASSERT(sc); m_new = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m_new == NULL) return (ENOBUFS); m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; /* Force longword alignment for packet payload. */ m_adj(m_new, ETHER_ALIGN); error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, sc->xl_tmpmap, m_new, segs, &nseg, BUS_DMA_NOWAIT); if (error) { m_freem(m_new); device_printf(sc->xl_dev, "can't map mbuf (error %d)\n", error); return (error); } KASSERT(nseg == 1, ("%s: too many DMA segments (%d)", __func__, nseg)); bus_dmamap_unload(sc->xl_mtag, c->xl_map); map = c->xl_map; c->xl_map = sc->xl_tmpmap; sc->xl_tmpmap = map; c->xl_mbuf = m_new; c->xl_ptr->xl_frag.xl_len = htole32(m_new->m_len | XL_LAST_FRAG); c->xl_ptr->xl_frag.xl_addr = htole32(segs->ds_addr); c->xl_ptr->xl_status = 0; bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREREAD); return (0); } static int xl_rx_resync(struct xl_softc *sc) { struct xl_chain_onefrag *pos; int i; XL_LOCK_ASSERT(sc); pos = sc->xl_cdata.xl_rx_head; for (i = 0; i < XL_RX_LIST_CNT; i++) { if (pos->xl_ptr->xl_status) break; pos = pos->xl_next; } if (i == XL_RX_LIST_CNT) return (0); sc->xl_cdata.xl_rx_head = pos; return (EAGAIN); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static int xl_rxeof(struct xl_softc *sc) { struct mbuf *m; if_t ifp = sc->xl_ifp; struct xl_chain_onefrag *cur_rx; int total_len; int rx_npkts = 0; u_int32_t rxstat; XL_LOCK_ASSERT(sc); again: bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_POSTREAD); while ((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) { #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif cur_rx = sc->xl_cdata.xl_rx_head; sc->xl_cdata.xl_rx_head = cur_rx->xl_next; total_len = rxstat & XL_RXSTAT_LENMASK; rx_npkts++; /* * Since we have told the chip to allow large frames, * we need to trap giant frame errors in software. We allow * a little more than the normal frame size to account for * frames with VLAN tags. */ if (total_len > XL_MAX_FRAMELEN) rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE); /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (rxstat & XL_RXSTAT_UP_ERROR) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); cur_rx->xl_ptr->xl_status = 0; bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); continue; } /* * If the error bit was not set, the upload complete * bit should be set which means we have a valid packet. * If not, something truly strange has happened. */ if (!(rxstat & XL_RXSTAT_UP_CMPLT)) { device_printf(sc->xl_dev, "bad receive status -- packet dropped\n"); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); cur_rx->xl_ptr->xl_status = 0; bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); continue; } /* No errors; receive the packet. */ bus_dmamap_sync(sc->xl_mtag, cur_rx->xl_map, BUS_DMASYNC_POSTREAD); m = cur_rx->xl_mbuf; /* * Try to conjure up a new mbuf cluster. If that * fails, it means we have an out of memory condition and * should leave the buffer in place and continue. This will * result in a lost packet, but there's little else we * can do in this situation. */ if (xl_newbuf(sc, cur_rx)) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); cur_rx->xl_ptr->xl_status = 0; bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); continue; } bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; - if (if_get(ifp, IF_CAPENABLE) & IFCAP_RXCSUM) { + if (sc->xl_capenable & IFCAP_RXCSUM) { /* Do IP checksum checking. */ if (rxstat & XL_RXSTAT_IPCKOK) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(rxstat & XL_RXSTAT_IPCKERR)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((rxstat & XL_RXSTAT_TCPCOK && !(rxstat & XL_RXSTAT_TCPCKERR)) || (rxstat & XL_RXSTAT_UDPCKOK && !(rxstat & XL_RXSTAT_UDPCKERR))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } XL_UNLOCK(sc); if_input(ifp, m); XL_LOCK(sc); /* * If we are running from the taskqueue, the interface * might have been stopped while we were passing the last * packet up the network stack. */ if (!(sc->xl_flags & XL_FLAG_RUNNING)) return (rx_npkts); } /* * Handle the 'end of channel' condition. When the upload * engine hits the end of the RX ring, it will stall. This * is our cue to flush the RX ring, reload the uplist pointer * register and unstall the engine. * XXX This is actually a little goofy. With the ThunderLAN * chip, you get an interrupt when the receiver hits the end * of the receive ring, which tells you exactly when you * you need to reload the ring pointer. Here we have to * fake it. I'm mad at myself for not being clever enough * to avoid the use of a goto here. */ if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 || CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); xl_wait(sc); CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr); sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0]; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); goto again; } return (rx_npkts); } /* * Taskqueue wrapper for xl_rxeof(). */ static void xl_rxeof_task(void *arg, int pending) { struct xl_softc *sc = (struct xl_softc *)arg; XL_LOCK(sc); if (sc->xl_flags & XL_FLAG_RUNNING) xl_rxeof(sc); XL_UNLOCK(sc); } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void xl_txeof(struct xl_softc *sc) { struct xl_chain *cur_tx; XL_LOCK_ASSERT(sc); /* * Go through our tx list and free mbufs for those * frames that have been uploaded. Note: the 3c905B * sets a special bit in the status word to let us * know that a frame has been downloaded, but the * original 3c900/3c905 adapters don't do that. * Consequently, we have to use a different test if * xl_type != XL_TYPE_905B. */ while (sc->xl_cdata.xl_tx_head != NULL) { cur_tx = sc->xl_cdata.xl_tx_head; if (CSR_READ_4(sc, XL_DOWNLIST_PTR)) break; sc->xl_cdata.xl_tx_head = cur_tx->xl_next; bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map); if_inc_txcounters(sc->xl_ifp, cur_tx->xl_mbuf); m_freem(cur_tx->xl_mbuf); cur_tx->xl_mbuf = NULL; cur_tx->xl_next = sc->xl_cdata.xl_tx_free; sc->xl_cdata.xl_tx_free = cur_tx; } if (sc->xl_cdata.xl_tx_head == NULL) { sc->xl_wdog_timer = 0; sc->xl_cdata.xl_tx_tail = NULL; } else { if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED || !CSR_READ_4(sc, XL_DOWNLIST_PTR)) { CSR_WRITE_4(sc, XL_DOWNLIST_PTR, sc->xl_cdata.xl_tx_head->xl_phys); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); } } } static void xl_txeof_90xB(struct xl_softc *sc) { struct xl_chain *cur_tx = NULL; int idx; XL_LOCK_ASSERT(sc); bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, BUS_DMASYNC_POSTREAD); idx = sc->xl_cdata.xl_tx_cons; while (idx != sc->xl_cdata.xl_tx_prod) { cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; if (!(le32toh(cur_tx->xl_ptr->xl_status) & XL_TXSTAT_DL_COMPLETE)) break; if (cur_tx->xl_mbuf != NULL) { bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map); if_inc_txcounters(sc->xl_ifp, cur_tx->xl_mbuf); m_freem(cur_tx->xl_mbuf); cur_tx->xl_mbuf = NULL; } sc->xl_cdata.xl_tx_cnt--; XL_INC(idx, XL_TX_LIST_CNT); } if (sc->xl_cdata.xl_tx_cnt == 0) sc->xl_wdog_timer = 0; sc->xl_cdata.xl_tx_cons = idx; } /* * TX 'end of channel' interrupt handler. Actually, we should * only get a 'TX complete' interrupt if there's a transmit error, * so this is really TX error handler. */ static void xl_txeoc(struct xl_softc *sc) { u_int8_t txstat; XL_LOCK_ASSERT(sc); while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) { if (txstat & XL_TXSTATUS_UNDERRUN || txstat & XL_TXSTATUS_JABBER || txstat & XL_TXSTATUS_RECLAIM) { device_printf(sc->xl_dev, "transmission error: 0x%02x\n", txstat); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); if (sc->xl_type == XL_TYPE_905B) { if (sc->xl_cdata.xl_tx_cnt) { int i; struct xl_chain *c; i = sc->xl_cdata.xl_tx_cons; c = &sc->xl_cdata.xl_tx_chain[i]; CSR_WRITE_4(sc, XL_DOWNLIST_PTR, c->xl_phys); CSR_WRITE_1(sc, XL_DOWN_POLL, 64); sc->xl_wdog_timer = 5; } } else { if (sc->xl_cdata.xl_tx_head != NULL) { CSR_WRITE_4(sc, XL_DOWNLIST_PTR, sc->xl_cdata.xl_tx_head->xl_phys); sc->xl_wdog_timer = 5; } } /* * Remember to set this for the * first generation 3c90X chips. */ CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8); if (txstat & XL_TXSTATUS_UNDERRUN && sc->xl_tx_thresh < XL_PACKET_SIZE) { sc->xl_tx_thresh += XL_MIN_FRAMELEN; device_printf(sc->xl_dev, "tx underrun, increasing tx start threshold to %d bytes\n", sc->xl_tx_thresh); } CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh); if (sc->xl_type == XL_TYPE_905B) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4)); } CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); } else { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); } /* * Write an arbitrary byte to the TX_STATUS register * to clear this interrupt/error and advance to the next. */ CSR_WRITE_1(sc, XL_TX_STATUS, 0x01); } } static void xl_intr(void *arg) { struct xl_softc *sc = arg; if_t ifp = sc->xl_ifp; u_int16_t status; XL_LOCK(sc); #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { XL_UNLOCK(sc); return; } #endif for (;;) { status = CSR_READ_2(sc, XL_STATUS); if ((status & XL_INTRS) == 0 || status == 0xFFFF) break; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|(status & XL_INTRS)); if ((sc->xl_flags & XL_FLAG_RUNNING) == 0) break; if (status & XL_STAT_UP_COMPLETE) { if (xl_rxeof(sc) == 0) { while (xl_rx_resync(sc)) xl_rxeof(sc); } } if (status & XL_STAT_DOWN_COMPLETE) { if (sc->xl_type == XL_TYPE_905B) xl_txeof_90xB(sc); else xl_txeof(sc); } if (status & XL_STAT_TX_COMPLETE) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); xl_txeoc(sc); } if (status & XL_STAT_ADFAIL) { sc->xl_flags &= ~XL_FLAG_RUNNING; xl_init_locked(sc); break; } if (status & XL_STAT_STATSOFLOW) xl_stats_update(sc); } if (if_snd_len(ifp) && sc->xl_flags & XL_FLAG_RUNNING) { if (sc->xl_type == XL_TYPE_905B) xl_start_90xB_locked(sc); else xl_start_locked(sc); } XL_UNLOCK(sc); } #ifdef DEVICE_POLLING static int xl_poll(if_t ifp, enum poll_cmd cmd, int count) { struct xl_softc *sc = ifp->if_softc; int rx_npkts = 0; XL_LOCK(sc); if (sc->xl_flags & XL_FLAG_RUNNING) rx_npkts = xl_poll_locked(ifp, cmd, count); XL_UNLOCK(sc); return (rx_npkts); } static int xl_poll_locked(if_t ifp, enum poll_cmd cmd, int count) { struct xl_softc *sc = ifp->if_softc; int rx_npkts; XL_LOCK_ASSERT(sc); sc->rxcycles = count; rx_npkts = xl_rxeof(sc); if (sc->xl_type == XL_TYPE_905B) xl_txeof_90xB(sc); else xl_txeof(sc); if (if_snd_len(ifp)) { if (sc->xl_type == XL_TYPE_905B) xl_start_90xB_locked(ifp); else xl_start_locked(ifp); } if (cmd == POLL_AND_CHECK_STATUS) { u_int16_t status; status = CSR_READ_2(sc, XL_STATUS); if (status & XL_INTRS && status != 0xFFFF) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|(status & XL_INTRS)); if (status & XL_STAT_TX_COMPLETE) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); xl_txeoc(sc); } if (status & XL_STAT_ADFAIL) { sc->xl_flags &= ~XL_FLAG_RUNNING; xl_init_locked(sc); } if (status & XL_STAT_STATSOFLOW) xl_stats_update(sc); } } return (rx_npkts); } #endif /* DEVICE_POLLING */ static void xl_tick(void *xsc) { struct xl_softc *sc = xsc; struct mii_data *mii; XL_LOCK_ASSERT(sc); if (sc->xl_miibus != NULL) { mii = device_get_softc(sc->xl_miibus); mii_tick(mii); } xl_stats_update(sc); if (xl_watchdog(sc) == EJUSTRETURN) return; callout_reset(&sc->xl_tick_callout, hz, xl_tick, sc); } static void xl_stats_update(struct xl_softc *sc) { if_t ifp = sc->xl_ifp; struct xl_stats xl_stats; u_int8_t *p; int i; XL_LOCK_ASSERT(sc); bzero((char *)&xl_stats, sizeof(struct xl_stats)); p = (u_int8_t *)&xl_stats; /* Read all the stats registers. */ XL_SEL_WIN(6); for (i = 0; i < 16; i++) *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i); if_inc_counter(ifp, IFCOUNTER_IERRORS, xl_stats.xl_rx_overrun); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, xl_stats.xl_tx_multi_collision + xl_stats.xl_tx_single_collision + xl_stats.xl_tx_late_collision); /* * Boomerang and cyclone chips have an extra stats counter * in window 4 (BadSSD). We have to read this too in order * to clear out all the stats registers and avoid a statsoflow * interrupt. */ XL_SEL_WIN(4); CSR_READ_1(sc, XL_W4_BADSSD); XL_SEL_WIN(7); } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf **m_head) { struct mbuf *m_new; if_t ifp = sc->xl_ifp; int error, i, nseg, total_len; u_int32_t status; XL_LOCK_ASSERT(sc); error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, c->xl_map, *m_head, sc->xl_cdata.xl_tx_segs, &nseg, BUS_DMA_NOWAIT); if (error && error != EFBIG) { if_printf(ifp, "can't map mbuf (error %d)\n", error); return (error); } /* * Handle special case: we used up all 63 fragments, * but we have more mbufs left in the chain. Copy the * data into an mbuf cluster. Note that we don't * bother clearing the values in the other fragment * pointers/counters; it wouldn't gain us anything, * and would waste cycles. */ if (error) { m_new = m_collapse(*m_head, M_NOWAIT, XL_MAXFRAGS); if (m_new == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m_new; error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, c->xl_map, *m_head, sc->xl_cdata.xl_tx_segs, &nseg, BUS_DMA_NOWAIT); if (error) { m_freem(*m_head); *m_head = NULL; if_printf(ifp, "can't map mbuf (error %d)\n", error); return (error); } } KASSERT(nseg <= XL_MAXFRAGS, ("%s: too many DMA segments (%d)", __func__, nseg)); if (nseg == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREWRITE); total_len = 0; for (i = 0; i < nseg; i++) { KASSERT(sc->xl_cdata.xl_tx_segs[i].ds_len <= MCLBYTES, ("segment size too large")); c->xl_ptr->xl_frag[i].xl_addr = htole32(sc->xl_cdata.xl_tx_segs[i].ds_addr); c->xl_ptr->xl_frag[i].xl_len = htole32(sc->xl_cdata.xl_tx_segs[i].ds_len); total_len += sc->xl_cdata.xl_tx_segs[i].ds_len; } c->xl_ptr->xl_frag[nseg - 1].xl_len |= htole32(XL_LAST_FRAG); if (sc->xl_type == XL_TYPE_905B) { status = XL_TXSTAT_RND_DEFEAT; #ifndef XL905B_TXCSUM_BROKEN if ((*m_head)->m_pkthdr.csum_flags) { if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) status |= XL_TXSTAT_IPCKSUM; if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) status |= XL_TXSTAT_TCPCKSUM; if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) status |= XL_TXSTAT_UDPCKSUM; } #endif } else status = total_len; c->xl_ptr->xl_status = htole32(status); c->xl_ptr->xl_next = 0; c->xl_mbuf = *m_head; return (0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static int xl_transmit(if_t ifp, struct mbuf *m) { struct xl_softc *sc; int error; if ((error = if_snd_enqueue(ifp, m)) != 0) return (error); sc = if_getsoftc(ifp, IF_DRIVER_SOFTC); if (XL_TRY_LOCK(sc) == 0) return (0); if (sc->xl_type == XL_TYPE_905B) xl_start_90xB_locked(sc); else xl_start_locked(sc); XL_UNLOCK(sc); return (0); } static void xl_start_locked(struct xl_softc *sc) { struct mbuf *m_head; struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx; struct xl_chain *prev_tx; int error; XL_LOCK_ASSERT(sc); if ((sc->xl_flags & XL_FLAG_RUNNING) != XL_FLAG_RUNNING) return; /* * Check for an available queue slot. If there are none, * punt. */ if (sc->xl_cdata.xl_tx_free == NULL) { xl_txeoc(sc); xl_txeof(sc); if (sc->xl_cdata.xl_tx_free == NULL) return; } start_tx = sc->xl_cdata.xl_tx_free; while (sc->xl_cdata.xl_tx_free != NULL && ((m_head = if_snd_dequeue(sc->xl_ifp)) != NULL)) { /* Pick a descriptor off the free list. */ prev_tx = cur_tx; cur_tx = sc->xl_cdata.xl_tx_free; /* Pack the data into the descriptor. */ error = xl_encap(sc, cur_tx, &m_head); if (error) { cur_tx = prev_tx; if (m_head == NULL) break; if_snd_prepend(sc->xl_ifp, m_head); break; } sc->xl_cdata.xl_tx_free = cur_tx->xl_next; cur_tx->xl_next = NULL; /* Chain it together. */ if (prev != NULL) { prev->xl_next = cur_tx; prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys); } prev = cur_tx; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if_mtap(sc->xl_ifp, cur_tx->xl_mbuf, NULL, 0); } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) return; /* * Place the request for the upload interrupt * in the last descriptor in the chain. This way, if * we're chaining several packets at once, we'll only * get an interrupt once for the whole chain rather than * once for each packet. */ cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR); /* * Queue the packets. If the TX channel is clear, update * the downlist pointer register. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL); xl_wait(sc); if (sc->xl_cdata.xl_tx_head != NULL) { sc->xl_cdata.xl_tx_tail->xl_next = start_tx; sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next = htole32(start_tx->xl_phys); sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &= htole32(~XL_TXSTAT_DL_INTR); sc->xl_cdata.xl_tx_tail = cur_tx; } else { sc->xl_cdata.xl_tx_head = start_tx; sc->xl_cdata.xl_tx_tail = cur_tx; } bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, BUS_DMASYNC_PREWRITE); if (!CSR_READ_4(sc, XL_DOWNLIST_PTR)) CSR_WRITE_4(sc, XL_DOWNLIST_PTR, start_tx->xl_phys); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); XL_SEL_WIN(7); /* * Set a timeout in case the chip goes out to lunch. */ sc->xl_wdog_timer = 5; /* * XXX Under certain conditions, usually on slower machines * where interrupts may be dropped, it's possible for the * adapter to chew up all the buffers in the receive ring * and stall, without us being able to do anything about it. * To guard against this, we need to make a pass over the * RX queue to make sure there aren't any packets pending. * Doing it here means we can flush the receive ring at the * same time the chip is DMAing the transmit descriptors we * just gave it. * * 3Com goes to some lengths to emphasize the Parallel Tasking (tm) * nature of their chips in all their marketing literature; * we may as well take advantage of it. :) */ taskqueue_enqueue(taskqueue_swi, &sc->xl_task); } static void xl_start_90xB_locked(struct xl_softc *sc) { struct mbuf *m_head; struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx; struct xl_chain *prev_tx; int error, idx; XL_LOCK_ASSERT(sc); if ((sc->xl_flags & XL_FLAG_RUNNING) != XL_FLAG_RUNNING) return; idx = sc->xl_cdata.xl_tx_prod; start_tx = &sc->xl_cdata.xl_tx_chain[idx]; while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL && ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) >= 3) && ((m_head = if_snd_dequeue(sc->xl_ifp)) != NULL)) { prev_tx = cur_tx; cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; /* Pack the data into the descriptor. */ error = xl_encap(sc, cur_tx, &m_head); if (error) { cur_tx = prev_tx; if (m_head == NULL) break; if_snd_prepend(sc->xl_ifp, m_head); break; } /* Chain it together. */ if (prev != NULL) prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys); prev = cur_tx; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if_mtap(sc->xl_ifp, cur_tx->xl_mbuf, NULL, 0); XL_INC(idx, XL_TX_LIST_CNT); sc->xl_cdata.xl_tx_cnt++; } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) return; /* * Place the request for the upload interrupt * in the last descriptor in the chain. This way, if * we're chaining several packets at once, we'll only * get an interrupt once for the whole chain rather than * once for each packet. */ cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR); /* Start transmission */ sc->xl_cdata.xl_tx_prod = idx; start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys); bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, BUS_DMASYNC_PREWRITE); /* * Set a timeout in case the chip goes out to lunch. */ sc->xl_wdog_timer = 5; } static void xl_init(void *xsc) { struct xl_softc *sc = xsc; XL_LOCK(sc); xl_init_locked(sc); XL_UNLOCK(sc); } static void xl_init_locked(struct xl_softc *sc) { int error, i; struct mii_data *mii = NULL; XL_LOCK_ASSERT(sc); if ((sc->xl_flags & XL_FLAG_RUNNING) != 0) return; /* * Cancel pending I/O and free all RX/TX buffers. */ xl_stop(sc); /* Reset the chip to a known state. */ xl_reset(sc); if (sc->xl_miibus == NULL) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); xl_wait(sc); } CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); DELAY(10000); if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); /* * Clear WOL status and disable all WOL feature as WOL * would interfere Rx operation under normal environments. */ if ((sc->xl_flags & XL_FLAG_WOL) != 0) { XL_SEL_WIN(7); CSR_READ_2(sc, XL_W7_BM_PME); CSR_WRITE_2(sc, XL_W7_BM_PME, 0); } /* Init our MAC address */ XL_SEL_WIN(2); for (i = 0; i < ETHER_ADDR_LEN; i++) { CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i, if_lladdr(sc->xl_ifp)[i]); } /* Clear the station mask. */ for (i = 0; i < 3; i++) CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0); #ifdef notdef /* Reset TX and RX. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); #endif /* Init circular RX list. */ error = xl_list_rx_init(sc); if (error) { device_printf(sc->xl_dev, "initialization of the rx ring failed (%d)\n", error); xl_stop(sc); return; } /* Init TX descriptors. */ if (sc->xl_type == XL_TYPE_905B) error = xl_list_tx_init_90xB(sc); else error = xl_list_tx_init(sc); if (error) { device_printf(sc->xl_dev, "initialization of the tx ring failed (%d)\n", error); xl_stop(sc); return; } /* * Set the TX freethresh value. * Note that this has no effect on 3c905B "cyclone" * cards but is required for 3c900/3c905 "boomerang" * cards in order to enable the download engine. */ CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8); /* Set the TX start threshold for best performance. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh); /* * If this is a 3c905B, also set the tx reclaim threshold. * This helps cut down on the number of tx reclaim errors * that could happen on a busy network. The chip multiplies * the register value by 16 to obtain the actual threshold * in bytes, so we divide by 16 when setting the value here. * The existing threshold value can be examined by reading * the register at offset 9 in window 5. */ if (sc->xl_type == XL_TYPE_905B) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4)); } /* Set RX filter bits. */ xl_rxfilter(sc); /* * Load the address of the RX list. We have to * stall the upload engine before we can manipulate * the uplist pointer register, then unstall it when * we're finished. We also have to wait for the * stall command to complete before proceeding. * Note that we have to do this after any RX resets * have completed since the uplist register is cleared * by a reset. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); xl_wait(sc); CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); xl_wait(sc); if (sc->xl_type == XL_TYPE_905B) { /* Set polling interval */ CSR_WRITE_1(sc, XL_DOWN_POLL, 64); /* Load the address of the TX list */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL); xl_wait(sc); CSR_WRITE_4(sc, XL_DOWNLIST_PTR, sc->xl_cdata.xl_tx_chain[0].xl_phys); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); xl_wait(sc); } /* * If the coax transceiver is on, make sure to enable * the DC-DC converter. */ XL_SEL_WIN(3); if (sc->xl_xcvr == XL_XCVR_COAX) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START); else CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); /* * increase packet size to allow reception of 802.1q or ISL packets. * For the 3c90x chip, set the 'allow large packets' bit in the MAC * control register. For 3c90xB/C chips, use the RX packet size * register. */ if (sc->xl_type == XL_TYPE_905B) CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE); else { u_int8_t macctl; macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL); macctl |= XL_MACCTRL_ALLOW_LARGE_PACK; CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl); } /* Clear out the stats counters. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); xl_stats_update(sc); XL_SEL_WIN(4); CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE); /* * Enable interrupts. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS); #ifdef DEVICE_POLLING /* Disable interrupts if we are polling. */ if (ifp->if_capenable & IFCAP_POLLING) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); else #endif CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS); if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000); /* Set the RX early threshold */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2)); CSR_WRITE_4(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY); /* Enable receiver and transmitter. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE); xl_wait(sc); /* XXX Downcall to miibus. */ if (mii != NULL) mii_mediachg(mii); /* Select window 7 for normal operations. */ XL_SEL_WIN(7); sc->xl_flags |= XL_FLAG_RUNNING; sc->xl_wdog_timer = 0; callout_reset(&sc->xl_tick_callout, hz, xl_tick, sc); } /* * Set media options. */ static int xl_ifmedia_upd(if_t ifp) { struct xl_softc *sc; struct ifmedia *ifm = NULL; struct mii_data *mii = NULL; sc = if_getsoftc(ifp, IF_DRIVER_SOFTC); XL_LOCK(sc); if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); if (mii == NULL) ifm = &sc->ifmedia; else ifm = &mii->mii_media; switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_100_FX: case IFM_10_FL: case IFM_10_2: case IFM_10_5: xl_setmode(sc, ifm->ifm_media); XL_UNLOCK(sc); return (0); } if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX || sc->xl_media & XL_MEDIAOPT_BT4) { sc->xl_flags &= ~XL_FLAG_RUNNING; xl_init_locked(sc); } else { xl_setmode(sc, ifm->ifm_media); } XL_UNLOCK(sc); return (0); } /* * Report current media status. */ static void xl_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct xl_softc *sc; u_int32_t icfg; u_int16_t status = 0; struct mii_data *mii = NULL; sc = if_getsoftc(ifp, IF_DRIVER_SOFTC); XL_LOCK(sc); if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); XL_SEL_WIN(4); status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); XL_SEL_WIN(3); icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK; icfg >>= XL_ICFG_CONNECTOR_BITS; ifmr->ifm_active = IFM_ETHER; ifmr->ifm_status = IFM_AVALID; if ((status & XL_MEDIASTAT_CARRIER) == 0) ifmr->ifm_status |= IFM_ACTIVE; switch (icfg) { case XL_XCVR_10BT: ifmr->ifm_active = IFM_ETHER|IFM_10_T; if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; break; case XL_XCVR_AUI: if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { ifmr->ifm_active = IFM_ETHER|IFM_10_FL; if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } else ifmr->ifm_active = IFM_ETHER|IFM_10_5; break; case XL_XCVR_COAX: ifmr->ifm_active = IFM_ETHER|IFM_10_2; break; /* * XXX MII and BTX/AUTO should be separate cases. */ case XL_XCVR_100BTX: case XL_XCVR_AUTO: case XL_XCVR_MII: if (mii != NULL) { mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } break; case XL_XCVR_100BFX: ifmr->ifm_active = IFM_ETHER|IFM_100_FX; break; default: if_printf(ifp, "unknown XCVR type: %d\n", icfg); break; } XL_UNLOCK(sc); } static int xl_ioctl(if_t ifp, u_long command, void *data, struct thread *td) { struct xl_softc *sc; struct ifreq *ifr = (struct ifreq *) data; - uint32_t flags, mask; + uint32_t flags; int error = 0; struct mii_data *mii = NULL; sc = if_getsoftc(ifp, IF_DRIVER_SOFTC); switch (command) { case SIOCSIFFLAGS: XL_LOCK(sc); flags = if_get(ifp, IF_FLAGS); if (flags & IFF_UP) { if (sc->xl_flags & XL_FLAG_RUNNING && (flags ^ sc->xl_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) xl_rxfilter(sc); else xl_init_locked(sc); } else { if (sc->xl_flags & XL_FLAG_RUNNING) xl_stop(sc); } sc->xl_if_flags = flags; XL_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* XXX Downcall from if_addmulti() possibly with locks held. */ XL_LOCK(sc); if (sc->xl_flags & XL_FLAG_RUNNING) xl_rxfilter(sc); XL_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); if (mii == NULL) error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); else error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: - mask = ifr->ifr_reqcap ^ if_get(ifp, IF_CAPENABLE); #ifdef DEVICE_POLLING - if ((mask & IFCAP_POLLING) != 0 && - (if_get(ifp, IF_CAPABILITIES) & IFCAP_POLLING) != 0) { - if_xorflags(ifp, IF_CAPENABLE, IFCAP_POLLING); - if ((if_get(ifp, IF_CAPENABLE) & IFCAP_POLLING) != 0) { + if (((ifr->ifr_reqcap ^ ifr->ifr_curcap) & IFCAP_POLLING)) { + if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { error = ether_poll_register(xl_poll, ifp); if (error) break; XL_LOCK(sc); /* Disable interrupts */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); - if_addflags(ifp, IF_CAPENABLE, IFCAP_POLLING); XL_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupts. */ XL_LOCK(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK | 0xFF); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB | XL_INTRS); if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000); XL_UNLOCK(sc); + if (error) + break; } } #endif /* DEVICE_POLLING */ - XL_LOCK(sc); - if ((mask & IFCAP_TXCSUM) != 0 && - (if_get(ifp, IF_CAPABILITIES) & IFCAP_TXCSUM) != 0) { - if_xorflags(ifp, IF_CAPENABLE, IFCAP_TXCSUM); - if ((if_get(ifp, IF_CAPENABLE) & IFCAP_TXCSUM) != 0) - if_addflags(ifp, IF_HWASSIST, - XL905B_CSUM_FEATURES); - else - if_clrflags(ifp, IF_HWASSIST, - XL905B_CSUM_FEATURES); - } - if ((mask & IFCAP_RXCSUM) != 0 && - (if_get(ifp, IF_CAPABILITIES) & IFCAP_RXCSUM) != 0) - if_xorflags(ifp, IF_CAPENABLE, IFCAP_RXCSUM); - if ((mask & IFCAP_WOL_MAGIC) != 0 && - (if_get(ifp, IF_CAPABILITIES) & IFCAP_WOL_MAGIC) != 0) - if_xorflags(ifp, IF_CAPENABLE, IFCAP_WOL_MAGIC); - XL_UNLOCK(sc); + sc->xl_capenable = ifr->ifr_reqcap; + if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) + ifr->ifr_hwassist = XL905B_CSUM_FEATURES; + else + ifr->ifr_hwassist = 0; break; default: error = EOPNOTSUPP; break; } return (error); } static int xl_watchdog(struct xl_softc *sc) { if_t ifp = sc->xl_ifp; u_int16_t status = 0; int misintr; XL_LOCK_ASSERT(sc); if (sc->xl_wdog_timer == 0 || --sc->xl_wdog_timer != 0) return (0); xl_rxeof(sc); xl_txeoc(sc); misintr = 0; if (sc->xl_type == XL_TYPE_905B) { xl_txeof_90xB(sc); if (sc->xl_cdata.xl_tx_cnt == 0) misintr++; } else { xl_txeof(sc); if (sc->xl_cdata.xl_tx_head == NULL) misintr++; } if (misintr != 0) { device_printf(sc->xl_dev, "watchdog timeout (missed Tx interrupts) -- recovering\n"); return (0); } if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); XL_SEL_WIN(4); status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); device_printf(sc->xl_dev, "watchdog timeout\n"); if (status & XL_MEDIASTAT_CARRIER) device_printf(sc->xl_dev, "no carrier - transceiver cable problem?\n"); sc->xl_flags &= ~XL_FLAG_RUNNING; xl_init_locked(sc); if (if_snd_len(ifp)) { if (sc->xl_type == XL_TYPE_905B) xl_start_90xB_locked(sc); else xl_start_locked(sc); } return (EJUSTRETURN); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void xl_stop(struct xl_softc *sc) { register int i; XL_LOCK_ASSERT(sc); sc->xl_wdog_timer = 0; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); DELAY(800); #ifdef foo CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); #endif CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000); /* Stop the stats updater. */ callout_stop(&sc->xl_tick_callout); /* * Free data in the RX lists. */ for (i = 0; i < XL_RX_LIST_CNT; i++) { if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) { bus_dmamap_unload(sc->xl_mtag, sc->xl_cdata.xl_rx_chain[i].xl_map); bus_dmamap_destroy(sc->xl_mtag, sc->xl_cdata.xl_rx_chain[i].xl_map); m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf); sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL; } } if (sc->xl_ldata.xl_rx_list != NULL) bzero(sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ); /* * Free the TX list buffers. */ for (i = 0; i < XL_TX_LIST_CNT; i++) { if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) { bus_dmamap_unload(sc->xl_mtag, sc->xl_cdata.xl_tx_chain[i].xl_map); bus_dmamap_destroy(sc->xl_mtag, sc->xl_cdata.xl_tx_chain[i].xl_map); m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf); sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL; } } if (sc->xl_ldata.xl_tx_list != NULL) bzero(sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ); sc->xl_flags &= ~XL_FLAG_RUNNING; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int xl_shutdown(device_t dev) { return (xl_suspend(dev)); } static int xl_suspend(device_t dev) { struct xl_softc *sc; sc = device_get_softc(dev); XL_LOCK(sc); xl_stop(sc); xl_setwol(sc); XL_UNLOCK(sc); return (0); } static int xl_resume(device_t dev) { struct xl_softc *sc; if_t ifp; sc = device_get_softc(dev); ifp = sc->xl_ifp; XL_LOCK(sc); if (if_get(ifp, IF_FLAGS) & IFF_UP) { sc->xl_flags &= ~XL_FLAG_RUNNING; xl_init_locked(sc); } XL_UNLOCK(sc); return (0); } static void xl_setwol(struct xl_softc *sc) { if_t ifp; u_int16_t cfg, pmstat; if ((sc->xl_flags & XL_FLAG_WOL) == 0) return; ifp = sc->xl_ifp; XL_SEL_WIN(7); /* Clear any pending PME events. */ CSR_READ_2(sc, XL_W7_BM_PME); cfg = 0; - if ((if_get(ifp, IF_CAPENABLE) & IFCAP_WOL_MAGIC) != 0) + if ((sc->xl_capenable & IFCAP_WOL_MAGIC) != 0) cfg |= XL_BM_PME_MAGIC; CSR_WRITE_2(sc, XL_W7_BM_PME, cfg); /* Enable RX. */ - if ((if_get(ifp, IF_CAPENABLE) & IFCAP_WOL_MAGIC) != 0) + if ((sc->xl_capenable & IFCAP_WOL_MAGIC) != 0) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE); /* Request PME. */ pmstat = pci_read_config(sc->xl_dev, sc->xl_pmcap + PCIR_POWER_STATUS, 2); - if ((if_get(ifp, IF_CAPENABLE) & IFCAP_WOL_MAGIC) != 0) + if ((sc->xl_capenable & IFCAP_WOL_MAGIC) != 0) pmstat |= PCIM_PSTAT_PMEENABLE; else pmstat &= ~PCIM_PSTAT_PMEENABLE; pci_write_config(sc->xl_dev, sc->xl_pmcap + PCIR_POWER_STATUS, pmstat, 2); } Index: projects/ifnet/sys/dev/xl/if_xlreg.h =================================================================== --- projects/ifnet/sys/dev/xl/if_xlreg.h (revision 277242) +++ projects/ifnet/sys/dev/xl/if_xlreg.h (revision 277243) @@ -1,759 +1,760 @@ /*- * Copyright (c) 1997, 1998 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #define XL_EE_READ 0x0080 /* read, 5 bit address */ #define XL_EE_WRITE 0x0040 /* write, 5 bit address */ #define XL_EE_ERASE 0x00c0 /* erase, 5 bit address */ #define XL_EE_EWEN 0x0030 /* erase, no data needed */ #define XL_EE_8BIT_READ 0x0200 /* read, 8 bit address */ #define XL_EE_BUSY 0x8000 #define XL_EE_EADDR0 0x00 /* station address, first word */ #define XL_EE_EADDR1 0x01 /* station address, next word, */ #define XL_EE_EADDR2 0x02 /* station address, last word */ #define XL_EE_PRODID 0x03 /* product ID code */ #define XL_EE_MDATA_DATE 0x04 /* manufacturing data, date */ #define XL_EE_MDATA_DIV 0x05 /* manufacturing data, division */ #define XL_EE_MDATA_PCODE 0x06 /* manufacturing data, product code */ #define XL_EE_MFG_ID 0x07 #define XL_EE_PCI_PARM 0x08 #define XL_EE_ROM_ONFO 0x09 #define XL_EE_OEM_ADR0 0x0A #define XL_EE_OEM_ADR1 0x0B #define XL_EE_OEM_ADR2 0x0C #define XL_EE_SOFTINFO1 0x0D #define XL_EE_COMPAT 0x0E #define XL_EE_SOFTINFO2 0x0F #define XL_EE_CAPS 0x10 /* capabilities word */ #define XL_EE_RSVD0 0x11 #define XL_EE_ICFG_0 0x12 #define XL_EE_ICFG_1 0x13 #define XL_EE_RSVD1 0x14 #define XL_EE_SOFTINFO3 0x15 #define XL_EE_RSVD_2 0x16 /* * Bits in the capabilities word */ #define XL_CAPS_PNP 0x0001 #define XL_CAPS_FULL_DUPLEX 0x0002 #define XL_CAPS_LARGE_PKTS 0x0004 #define XL_CAPS_SLAVE_DMA 0x0008 #define XL_CAPS_SECOND_DMA 0x0010 #define XL_CAPS_FULL_BM 0x0020 #define XL_CAPS_FRAG_BM 0x0040 #define XL_CAPS_CRC_PASSTHRU 0x0080 #define XL_CAPS_TXDONE 0x0100 #define XL_CAPS_NO_TXLENGTH 0x0200 #define XL_CAPS_RX_REPEAT 0x0400 #define XL_CAPS_SNOOPING 0x0800 #define XL_CAPS_100MBPS 0x1000 #define XL_CAPS_PWRMGMT 0x2000 /* * Bits in the software information 2 word */ #define XL_SINFO2_FIXED_BCAST_RX_BUG 0x0002 #define XL_SINFO2_FIXED_ENDEC_LOOP_BUG 0x0004 #define XL_SINFO2_AUX_WOL_CON 0x0008 #define XL_SINFO2_PME_PULSED 0x0010 #define XL_SINFO2_FIXED_MWI_BUG 0x0020 #define XL_SINFO2_WOL_AFTER_PWR_LOSS 0x0040 #define XL_SINFO2_AUTO_RST_TO_D0 0x0080 #define XL_PACKET_SIZE 1540 #define XL_MAX_FRAMELEN (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) /* * Register layouts. */ #define XL_COMMAND 0x0E #define XL_STATUS 0x0E #define XL_TX_STATUS 0x1B #define XL_TX_FREE 0x1C #define XL_DMACTL 0x20 #define XL_DOWNLIST_PTR 0x24 #define XL_DOWN_POLL 0x2D /* 3c90xB only */ #define XL_TX_FREETHRESH 0x2F #define XL_UPLIST_PTR 0x38 #define XL_UPLIST_STATUS 0x30 #define XL_UP_POLL 0x3D /* 3c90xB only */ #define XL_PKTSTAT_UP_STALLED 0x00002000 #define XL_PKTSTAT_UP_ERROR 0x00004000 #define XL_PKTSTAT_UP_CMPLT 0x00008000 #define XL_DMACTL_DN_CMPLT_REQ 0x00000002 #define XL_DMACTL_DOWN_STALLED 0x00000004 #define XL_DMACTL_UP_CMPLT 0x00000008 #define XL_DMACTL_DOWN_CMPLT 0x00000010 #define XL_DMACTL_UP_RX_EARLY 0x00000020 #define XL_DMACTL_ARM_COUNTDOWN 0x00000040 #define XL_DMACTL_DOWN_INPROG 0x00000080 #define XL_DMACTL_COUNTER_SPEED 0x00000100 #define XL_DMACTL_DOWNDOWN_MODE 0x00000200 #define XL_DMACTL_UP_ALTSEQ_DIS 0x00010000 /* 3c90xB/3c90xC */ #define XL_DMACTL_DOWN_ALTSEQ_DIS 0x00020000 /* 3c90xC only */ #define XL_DMACTL_DEFEAT_MWI 0x00100000 /* 3c90xB/3c90xC */ #define XL_DMACTL_DEFEAT_MRL 0x00100000 /* 3c90xB/3c90xC */ #define XL_DMACTL_UP_OVERRUN_DISC_DIS 0x00200000 /* 3c90xB/3c90xC */ #define XL_DMACTL_TARGET_ABORT 0x40000000 #define XL_DMACTL_MASTER_ABORT 0x80000000 /* * Command codes. Some command codes require that we wait for * the CMD_BUSY flag to clear. Those codes are marked as 'mustwait.' */ #define XL_CMD_RESET 0x0000 /* mustwait */ #define XL_CMD_WINSEL 0x0800 #define XL_CMD_COAX_START 0x1000 #define XL_CMD_RX_DISABLE 0x1800 #define XL_CMD_RX_ENABLE 0x2000 #define XL_CMD_RX_RESET 0x2800 /* mustwait */ #define XL_CMD_UP_STALL 0x3000 /* mustwait */ #define XL_CMD_UP_UNSTALL 0x3001 #define XL_CMD_DOWN_STALL 0x3002 /* mustwait */ #define XL_CMD_DOWN_UNSTALL 0x3003 #define XL_CMD_RX_DISCARD 0x4000 #define XL_CMD_TX_ENABLE 0x4800 #define XL_CMD_TX_DISABLE 0x5000 #define XL_CMD_TX_RESET 0x5800 /* mustwait */ #define XL_CMD_INTR_FAKE 0x6000 #define XL_CMD_INTR_ACK 0x6800 #define XL_CMD_INTR_ENB 0x7000 #define XL_CMD_STAT_ENB 0x7800 #define XL_CMD_RX_SET_FILT 0x8000 #define XL_CMD_RX_SET_THRESH 0x8800 #define XL_CMD_TX_SET_THRESH 0x9000 #define XL_CMD_TX_SET_START 0x9800 #define XL_CMD_DMA_UP 0xA000 #define XL_CMD_DMA_STOP 0xA001 #define XL_CMD_STATS_ENABLE 0xA800 #define XL_CMD_STATS_DISABLE 0xB000 #define XL_CMD_COAX_STOP 0xB800 #define XL_CMD_SET_TX_RECLAIM 0xC000 /* 3c905B only */ #define XL_CMD_RX_SET_HASH 0xC800 /* 3c905B only */ #define XL_HASH_SET 0x0400 #define XL_HASHFILT_SIZE 256 /* * status codes * Note that bits 15 to 13 indicate the currently visible register window * which may be anything from 0 to 7. */ #define XL_STAT_INTLATCH 0x0001 /* 0 */ #define XL_STAT_ADFAIL 0x0002 /* 1 */ #define XL_STAT_TX_COMPLETE 0x0004 /* 2 */ #define XL_STAT_TX_AVAIL 0x0008 /* 3 first generation */ #define XL_STAT_RX_COMPLETE 0x0010 /* 4 */ #define XL_STAT_RX_EARLY 0x0020 /* 5 */ #define XL_STAT_INTREQ 0x0040 /* 6 */ #define XL_STAT_STATSOFLOW 0x0080 /* 7 */ #define XL_STAT_DMADONE 0x0100 /* 8 first generation */ #define XL_STAT_LINKSTAT 0x0100 /* 8 3c509B */ #define XL_STAT_DOWN_COMPLETE 0x0200 /* 9 */ #define XL_STAT_UP_COMPLETE 0x0400 /* 10 */ #define XL_STAT_DMABUSY 0x0800 /* 11 first generation */ #define XL_STAT_CMDBUSY 0x1000 /* 12 */ /* * Interrupts we normally want enabled. */ #define XL_INTRS \ (XL_STAT_UP_COMPLETE|XL_STAT_STATSOFLOW|XL_STAT_ADFAIL| \ XL_STAT_DOWN_COMPLETE|XL_STAT_TX_COMPLETE|XL_STAT_INTLATCH) /* * Window 0 registers */ #define XL_W0_EE_DATA 0x0C #define XL_W0_EE_CMD 0x0A #define XL_W0_RSRC_CFG 0x08 #define XL_W0_ADDR_CFG 0x06 #define XL_W0_CFG_CTRL 0x04 #define XL_W0_PROD_ID 0x02 #define XL_W0_MFG_ID 0x00 /* * Window 1 */ #define XL_W1_TX_FIFO 0x10 #define XL_W1_FREE_TX 0x0C #define XL_W1_TX_STATUS 0x0B #define XL_W1_TX_TIMER 0x0A #define XL_W1_RX_STATUS 0x08 #define XL_W1_RX_FIFO 0x00 /* * RX status codes */ #define XL_RXSTATUS_OVERRUN 0x01 #define XL_RXSTATUS_RUNT 0x02 #define XL_RXSTATUS_ALIGN 0x04 #define XL_RXSTATUS_CRC 0x08 #define XL_RXSTATUS_OVERSIZE 0x10 #define XL_RXSTATUS_DRIBBLE 0x20 /* * TX status codes */ #define XL_TXSTATUS_RECLAIM 0x02 /* 3c905B only */ #define XL_TXSTATUS_OVERFLOW 0x04 #define XL_TXSTATUS_MAXCOLS 0x08 #define XL_TXSTATUS_UNDERRUN 0x10 #define XL_TXSTATUS_JABBER 0x20 #define XL_TXSTATUS_INTREQ 0x40 #define XL_TXSTATUS_COMPLETE 0x80 /* * Window 2 */ #define XL_W2_RESET_OPTIONS 0x0C /* 3c905B only */ #define XL_W2_STATION_MASK_HI 0x0A #define XL_W2_STATION_MASK_MID 0x08 #define XL_W2_STATION_MASK_LO 0x06 #define XL_W2_STATION_ADDR_HI 0x04 #define XL_W2_STATION_ADDR_MID 0x02 #define XL_W2_STATION_ADDR_LO 0x00 #define XL_RESETOPT_FEATUREMASK (0x0001 | 0x0002 | 0x004) #define XL_RESETOPT_D3RESETDIS 0x0008 #define XL_RESETOPT_DISADVFD 0x0010 #define XL_RESETOPT_DISADV100 0x0020 #define XL_RESETOPT_DISAUTONEG 0x0040 #define XL_RESETOPT_DEBUGMODE 0x0080 #define XL_RESETOPT_FASTAUTO 0x0100 #define XL_RESETOPT_FASTEE 0x0200 #define XL_RESETOPT_FORCEDCONF 0x0400 #define XL_RESETOPT_TESTPDTPDR 0x0800 #define XL_RESETOPT_TEST100TX 0x1000 #define XL_RESETOPT_TEST100RX 0x2000 #define XL_RESETOPT_INVERT_LED 0x0010 #define XL_RESETOPT_INVERT_MII 0x4000 /* * Window 3 (fifo management) */ #define XL_W3_INTERNAL_CFG 0x00 #define XL_W3_MAXPKTSIZE 0x04 /* 3c905B only */ #define XL_W3_RESET_OPT 0x08 #define XL_W3_FREE_TX 0x0C #define XL_W3_FREE_RX 0x0A #define XL_W3_MAC_CTRL 0x06 #define XL_ICFG_CONNECTOR_MASK 0x00F00000 #define XL_ICFG_CONNECTOR_BITS 20 #define XL_ICFG_RAMSIZE_MASK 0x00000007 #define XL_ICFG_RAMWIDTH 0x00000008 #define XL_ICFG_ROMSIZE_MASK (0x00000040 | 0x00000080) #define XL_ICFG_DISABLE_BASSD 0x00000100 #define XL_ICFG_RAMLOC 0x00000200 #define XL_ICFG_RAMPART (0x00010000 | 0x00020000) #define XL_ICFG_XCVRSEL (0x00100000 | 0x00200000 | 0x00400000) #define XL_ICFG_AUTOSEL 0x01000000 #define XL_XCVR_10BT 0x00 #define XL_XCVR_AUI 0x01 #define XL_XCVR_RSVD_0 0x02 #define XL_XCVR_COAX 0x03 #define XL_XCVR_100BTX 0x04 #define XL_XCVR_100BFX 0x05 #define XL_XCVR_MII 0x06 #define XL_XCVR_RSVD_1 0x07 #define XL_XCVR_AUTO 0x08 /* 3c905B only */ #define XL_MACCTRL_DEFER_EXT_END 0x0001 #define XL_MACCTRL_DEFER_0 0x0002 #define XL_MACCTRL_DEFER_1 0x0004 #define XL_MACCTRL_DEFER_2 0x0008 #define XL_MACCTRL_DEFER_3 0x0010 #define XL_MACCTRL_DUPLEX 0x0020 #define XL_MACCTRL_ALLOW_LARGE_PACK 0x0040 #define XL_MACCTRL_EXTEND_AFTER_COL 0x0080 /* 3c905B only */ #define XL_MACCTRL_FLOW_CONTROL_ENB 0x0100 /* 3c905B only */ #define XL_MACCTRL_VLT_END 0x0200 /* 3c905B only */ /* * The 'reset options' register contains power-on reset values * loaded from the EEPROM. This includes the supported media * types on the card. It is also known as the media options register. */ #define XL_W3_MEDIA_OPT 0x08 #define XL_MEDIAOPT_BT4 0x0001 /* MII */ #define XL_MEDIAOPT_BTX 0x0002 /* on-chip */ #define XL_MEDIAOPT_BFX 0x0004 /* on-chip */ #define XL_MEDIAOPT_BT 0x0008 /* on-chip */ #define XL_MEDIAOPT_BNC 0x0010 /* on-chip */ #define XL_MEDIAOPT_AUI 0x0020 /* on-chip */ #define XL_MEDIAOPT_MII 0x0040 /* MII */ #define XL_MEDIAOPT_VCO 0x0100 /* 1st gen chip only */ #define XL_MEDIAOPT_10FL 0x0100 /* 3x905B only, on-chip */ #define XL_MEDIAOPT_MASK 0x01FF /* * Window 4 (diagnostics) */ #define XL_W4_UPPERBYTESOK 0x0D #define XL_W4_BADSSD 0x0C #define XL_W4_MEDIA_STATUS 0x0A #define XL_W4_PHY_MGMT 0x08 #define XL_W4_NET_DIAG 0x06 #define XL_W4_FIFO_DIAG 0x04 #define XL_W4_VCO_DIAG 0x02 #define XL_W4_CTRLR_STAT 0x08 #define XL_W4_TX_DIAG 0x00 #define XL_MII_CLK 0x01 #define XL_MII_DATA 0x02 #define XL_MII_DIR 0x04 #define XL_MEDIA_SQE 0x0008 #define XL_MEDIA_10TP 0x00C0 #define XL_MEDIA_LNK 0x0080 #define XL_MEDIA_LNKBEAT 0x0800 #define XL_MEDIASTAT_CRCSTRIP 0x0004 #define XL_MEDIASTAT_SQEENB 0x0008 #define XL_MEDIASTAT_COLDET 0x0010 #define XL_MEDIASTAT_CARRIER 0x0020 #define XL_MEDIASTAT_JABGUARD 0x0040 #define XL_MEDIASTAT_LINKBEAT 0x0080 #define XL_MEDIASTAT_JABDETECT 0x0200 #define XL_MEDIASTAT_POLREVERS 0x0400 #define XL_MEDIASTAT_LINKDETECT 0x0800 #define XL_MEDIASTAT_TXINPROG 0x1000 #define XL_MEDIASTAT_DCENB 0x4000 #define XL_MEDIASTAT_AUIDIS 0x8000 #define XL_NETDIAG_TEST_LOWVOLT 0x0001 #define XL_NETDIAG_ASIC_REVMASK \ (0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020) #define XL_NETDIAG_UPPER_BYTES_ENABLE 0x0040 #define XL_NETDIAG_STATS_ENABLED 0x0080 #define XL_NETDIAG_TX_FATALERR 0x0100 #define XL_NETDIAG_TRANSMITTING 0x0200 #define XL_NETDIAG_RX_ENABLED 0x0400 #define XL_NETDIAG_TX_ENABLED 0x0800 #define XL_NETDIAG_FIFO_LOOPBACK 0x1000 #define XL_NETDIAG_MAC_LOOPBACK 0x2000 #define XL_NETDIAG_ENDEC_LOOPBACK 0x4000 #define XL_NETDIAG_EXTERNAL_LOOP 0x8000 /* * Window 5 */ #define XL_W5_STAT_ENB 0x0C #define XL_W5_INTR_ENB 0x0A #define XL_W5_RECLAIM_THRESH 0x09 /* 3c905B only */ #define XL_W5_RX_FILTER 0x08 #define XL_W5_RX_EARLYTHRESH 0x06 #define XL_W5_TX_AVAILTHRESH 0x02 #define XL_W5_TX_STARTTHRESH 0x00 /* * RX filter bits */ #define XL_RXFILTER_INDIVIDUAL 0x01 #define XL_RXFILTER_ALLMULTI 0x02 #define XL_RXFILTER_BROADCAST 0x04 #define XL_RXFILTER_ALLFRAMES 0x08 #define XL_RXFILTER_MULTIHASH 0x10 /* 3c905B only */ /* * Window 6 (stats) */ #define XL_W6_TX_BYTES_OK 0x0C #define XL_W6_RX_BYTES_OK 0x0A #define XL_W6_UPPER_FRAMES_OK 0x09 #define XL_W6_DEFERRED 0x08 #define XL_W6_RX_OK 0x07 #define XL_W6_TX_OK 0x06 #define XL_W6_RX_OVERRUN 0x05 #define XL_W6_COL_LATE 0x04 #define XL_W6_COL_SINGLE 0x03 #define XL_W6_COL_MULTIPLE 0x02 #define XL_W6_SQE_ERRORS 0x01 #define XL_W6_CARRIER_LOST 0x00 /* * Window 7 (bus master control) */ #define XL_W7_BM_ADDR 0x00 #define XL_W7_BM_LEN 0x06 #define XL_W7_BM_STATUS 0x0B #define XL_W7_BM_TIMEr 0x0A #define XL_W7_BM_PME 0x0C #define XL_BM_PME_WAKE 0x0001 #define XL_BM_PME_MAGIC 0x0002 #define XL_BM_PME_LINKCHG 0x0004 #define XL_BM_PME_WAKETIMER 0x0008 /* * bus master control registers */ #define XL_BM_PKTSTAT 0x20 #define XL_BM_DOWNLISTPTR 0x24 #define XL_BM_FRAGADDR 0x28 #define XL_BM_FRAGLEN 0x2C #define XL_BM_TXFREETHRESH 0x2F #define XL_BM_UPPKTSTAT 0x30 #define XL_BM_UPLISTPTR 0x38 #define XL_LAST_FRAG 0x80000000 #define XL_MAXFRAGS 63 #define XL_RX_LIST_CNT 128 #define XL_TX_LIST_CNT 256 #define XL_RX_LIST_SZ \ (XL_RX_LIST_CNT * sizeof(struct xl_list_onefrag)) #define XL_TX_LIST_SZ \ (XL_TX_LIST_CNT * sizeof(struct xl_list)) #define XL_MIN_FRAMELEN 60 #define ETHER_ALIGN 2 #define XL_INC(x, y) (x) = (x + 1) % y /* * Boomerang/Cyclone TX/RX list structure. * For the TX lists, bits 0 to 12 of the status word indicate * length. * This looks suspiciously like the ThunderLAN, doesn't it. */ struct xl_frag { u_int32_t xl_addr; /* 63 addr/len pairs */ u_int32_t xl_len; }; struct xl_list { u_int32_t xl_next; /* final entry has 0 nextptr */ u_int32_t xl_status; struct xl_frag xl_frag[XL_MAXFRAGS]; }; struct xl_list_onefrag { u_int32_t xl_next; /* final entry has 0 nextptr */ volatile u_int32_t xl_status; volatile struct xl_frag xl_frag; }; struct xl_list_data { struct xl_list_onefrag *xl_rx_list; struct xl_list *xl_tx_list; u_int32_t xl_rx_dmaaddr; bus_dma_tag_t xl_rx_tag; bus_dmamap_t xl_rx_dmamap; u_int32_t xl_tx_dmaaddr; bus_dma_tag_t xl_tx_tag; bus_dmamap_t xl_tx_dmamap; }; struct xl_chain { struct xl_list *xl_ptr; struct mbuf *xl_mbuf; struct xl_chain *xl_next; struct xl_chain *xl_prev; u_int32_t xl_phys; bus_dmamap_t xl_map; }; struct xl_chain_onefrag { struct xl_list_onefrag *xl_ptr; struct mbuf *xl_mbuf; struct xl_chain_onefrag *xl_next; bus_dmamap_t xl_map; }; struct xl_chain_data { struct xl_chain_onefrag xl_rx_chain[XL_RX_LIST_CNT]; struct xl_chain xl_tx_chain[XL_TX_LIST_CNT]; bus_dma_segment_t xl_tx_segs[XL_MAXFRAGS]; struct xl_chain_onefrag *xl_rx_head; /* 3c90x "boomerang" queuing stuff */ struct xl_chain *xl_tx_head; struct xl_chain *xl_tx_tail; struct xl_chain *xl_tx_free; /* 3c90xB "cyclone/hurricane/tornado" stuff */ int xl_tx_prod; int xl_tx_cons; int xl_tx_cnt; }; #define XL_RXSTAT_LENMASK 0x00001FFF #define XL_RXSTAT_UP_ERROR 0x00004000 #define XL_RXSTAT_UP_CMPLT 0x00008000 #define XL_RXSTAT_UP_OVERRUN 0x00010000 #define XL_RXSTAT_RUNT 0x00020000 #define XL_RXSTAT_ALIGN 0x00040000 #define XL_RXSTAT_CRC 0x00080000 #define XL_RXSTAT_OVERSIZE 0x00100000 #define XL_RXSTAT_DRIBBLE 0x00800000 #define XL_RXSTAT_UP_OFLOW 0x01000000 #define XL_RXSTAT_IPCKERR 0x02000000 /* 3c905B only */ #define XL_RXSTAT_TCPCKERR 0x04000000 /* 3c905B only */ #define XL_RXSTAT_UDPCKERR 0x08000000 /* 3c905B only */ #define XL_RXSTAT_BUFEN 0x10000000 /* 3c905B only */ #define XL_RXSTAT_IPCKOK 0x20000000 /* 3c905B only */ #define XL_RXSTAT_TCPCOK 0x40000000 /* 3c905B only */ #define XL_RXSTAT_UDPCKOK 0x80000000 /* 3c905B only */ #define XL_TXSTAT_LENMASK 0x00001FFF #define XL_TXSTAT_CRCDIS 0x00002000 #define XL_TXSTAT_TX_INTR 0x00008000 #define XL_TXSTAT_DL_COMPLETE 0x00010000 #define XL_TXSTAT_IPCKSUM 0x02000000 /* 3c905B only */ #define XL_TXSTAT_TCPCKSUM 0x04000000 /* 3c905B only */ #define XL_TXSTAT_UDPCKSUM 0x08000000 /* 3c905B only */ #define XL_TXSTAT_RND_DEFEAT 0x10000000 /* 3c905B only */ #define XL_TXSTAT_EMPTY 0x20000000 /* 3c905B only */ #define XL_TXSTAT_DL_INTR 0x80000000 #define XL_CAPABILITY_BM 0x20 struct xl_type { u_int16_t xl_vid; u_int16_t xl_did; const char *xl_name; }; /* * The 3C905B adapters implement a few features that we want to * take advantage of, namely the multicast hash filter. With older * chips, you only have the option of turning on reception of all * multicast frames, which is kind of lame. * * We also use this to decide on a transmit strategy. For the 3c90xB * cards, we can use polled descriptor mode, which reduces CPU overhead. */ #define XL_TYPE_905B 1 #define XL_TYPE_90X 2 #define XL_FLAG_FUNCREG 0x0001 #define XL_FLAG_PHYOK 0x0002 #define XL_FLAG_EEPROM_OFFSET_30 0x0004 #define XL_FLAG_WEIRDRESET 0x0008 #define XL_FLAG_8BITROM 0x0010 #define XL_FLAG_INVERT_LED_PWR 0x0020 #define XL_FLAG_INVERT_MII_PWR 0x0040 #define XL_FLAG_NO_XCVR_PWR 0x0080 #define XL_FLAG_USE_MMIO 0x0100 #define XL_FLAG_NO_MMIO 0x0200 #define XL_FLAG_WOL 0x0400 #define XL_FLAG_RUNNING 0x0800 #define XL_NO_XCVR_PWR_MAGICBITS 0x0900 struct xl_softc { if_t xl_ifp; /* interface info */ device_t xl_dev; /* device info */ struct ifmedia ifmedia; /* media info */ bus_space_handle_t xl_bhandle; bus_space_tag_t xl_btag; void *xl_intrhand; struct resource *xl_irq; struct resource *xl_res; device_t xl_miibus; const struct xl_type *xl_info; /* 3Com adapter info */ bus_dma_tag_t xl_mtag; bus_dmamap_t xl_tmpmap; /* spare DMA map */ u_int8_t xl_type; u_int32_t xl_xcvr; u_int16_t xl_media; u_int16_t xl_caps; u_int16_t xl_tx_thresh; int xl_pmcap; int xl_if_flags; struct xl_list_data xl_ldata; struct xl_chain_data xl_cdata; struct callout xl_tick_callout; int xl_wdog_timer; + uint32_t xl_capenable; int xl_flags; struct resource *xl_fres; bus_space_handle_t xl_fhandle; bus_space_tag_t xl_ftag; struct mtx xl_mtx; struct task xl_task; #ifdef DEVICE_POLLING int rxcycles; #endif }; #define XL_LOCK(_sc) mtx_lock(&(_sc)->xl_mtx) #define XL_TRY_LOCK(_sc) mtx_trylock(&(_sc)->xl_mtx) #define XL_UNLOCK(_sc) mtx_unlock(&(_sc)->xl_mtx) #define XL_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->xl_mtx, MA_OWNED) #define xl_rx_goodframes(x) \ ((x.xl_upper_frames_ok & 0x03) << 8) | x.xl_rx_frames_ok #define xl_tx_goodframes(x) \ ((x.xl_upper_frames_ok & 0x30) << 4) | x.xl_tx_frames_ok struct xl_stats { u_int8_t xl_carrier_lost; u_int8_t xl_sqe_errs; u_int8_t xl_tx_multi_collision; u_int8_t xl_tx_single_collision; u_int8_t xl_tx_late_collision; u_int8_t xl_rx_overrun; u_int8_t xl_tx_frames_ok; u_int8_t xl_rx_frames_ok; u_int8_t xl_tx_deferred; u_int8_t xl_upper_frames_ok; u_int16_t xl_rx_bytes_ok; u_int16_t xl_tx_bytes_ok; u_int16_t status; }; /* * register space access macros */ #define CSR_WRITE_4(sc, reg, val) \ bus_space_write_4(sc->xl_btag, sc->xl_bhandle, reg, val) #define CSR_WRITE_2(sc, reg, val) \ bus_space_write_2(sc->xl_btag, sc->xl_bhandle, reg, val) #define CSR_WRITE_1(sc, reg, val) \ bus_space_write_1(sc->xl_btag, sc->xl_bhandle, reg, val) #define CSR_READ_4(sc, reg) \ bus_space_read_4(sc->xl_btag, sc->xl_bhandle, reg) #define CSR_READ_2(sc, reg) \ bus_space_read_2(sc->xl_btag, sc->xl_bhandle, reg) #define CSR_READ_1(sc, reg) \ bus_space_read_1(sc->xl_btag, sc->xl_bhandle, reg) #define CSR_BARRIER(sc, reg, length, flags) \ bus_space_barrier(sc->xl_btag, sc->xl_bhandle, reg, length, flags) #define XL_SEL_WIN(x) do { \ CSR_BARRIER(sc, XL_COMMAND, 2, \ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); \ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_WINSEL | x); \ CSR_BARRIER(sc, XL_COMMAND, 2, \ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); \ } while (0) #define XL_TIMEOUT 1000 /* * General constants that are fun to know. * * 3Com PCI vendor ID */ #define TC_VENDORID 0x10B7 /* * 3Com chip device IDs. */ #define TC_DEVICEID_BOOMERANG_10BT 0x9000 #define TC_DEVICEID_BOOMERANG_10BT_COMBO 0x9001 #define TC_DEVICEID_BOOMERANG_10_100BT 0x9050 #define TC_DEVICEID_BOOMERANG_100BT4 0x9051 #define TC_DEVICEID_KRAKATOA_10BT 0x9004 #define TC_DEVICEID_KRAKATOA_10BT_COMBO 0x9005 #define TC_DEVICEID_KRAKATOA_10BT_TPC 0x9006 #define TC_DEVICEID_CYCLONE_10FL 0x900A #define TC_DEVICEID_HURRICANE_10_100BT 0x9055 #define TC_DEVICEID_CYCLONE_10_100BT4 0x9056 #define TC_DEVICEID_CYCLONE_10_100_COMBO 0x9058 #define TC_DEVICEID_CYCLONE_10_100FX 0x905A #define TC_DEVICEID_TORNADO_10_100BT 0x9200 #define TC_DEVICEID_TORNADO_10_100BT_920B 0x9201 #define TC_DEVICEID_TORNADO_10_100BT_920B_WNM 0x9202 #define TC_DEVICEID_HURRICANE_10_100BT_SERV 0x9800 #define TC_DEVICEID_TORNADO_10_100BT_SERV 0x9805 #define TC_DEVICEID_HURRICANE_SOHO100TX 0x7646 #define TC_DEVICEID_TORNADO_HOMECONNECT 0x4500 #define TC_DEVICEID_HURRICANE_555 0x5055 #define TC_DEVICEID_HURRICANE_556 0x6055 #define TC_DEVICEID_HURRICANE_556B 0x6056 #define TC_DEVICEID_HURRICANE_575A 0x5057 #define TC_DEVICEID_HURRICANE_575B 0x5157 #define TC_DEVICEID_HURRICANE_575C 0x5257 #define TC_DEVICEID_HURRICANE_656 0x6560 #define TC_DEVICEID_HURRICANE_656B 0x6562 #define TC_DEVICEID_TORNADO_656C 0x6564 /* * PCI low memory base and low I/O base register, and * other PCI registers. Note: some are only available on * the 3c905B, in particular those that related to power management. */ #define XL_PCI_VENDOR_ID 0x00 #define XL_PCI_DEVICE_ID 0x02 #define XL_PCI_COMMAND 0x04 #define XL_PCI_STATUS 0x06 #define XL_PCI_CLASSCODE 0x09 #define XL_PCI_LATENCY_TIMER 0x0D #define XL_PCI_HEADER_TYPE 0x0E #define XL_PCI_LOIO 0x10 #define XL_PCI_LOMEM 0x14 #define XL_PCI_FUNCMEM 0x18 #define XL_PCI_BIOSROM 0x30 #define XL_PCI_INTLINE 0x3C #define XL_PCI_INTPIN 0x3D #define XL_PCI_MINGNT 0x3E #define XL_PCI_MINLAT 0x0F #define XL_PCI_RESETOPT 0x48 #define XL_PCI_EEPROM_DATA 0x4C /* 3c905B-only registers */ #define XL_PCI_CAPID 0xDC /* 8 bits */ #define XL_PCI_NEXTPTR 0xDD /* 8 bits */ #define XL_PCI_PWRMGMTCAP 0xDE /* 16 bits */ #define XL_PCI_PWRMGMTCTRL 0xE0 /* 16 bits */ #define XL_PSTATE_MASK 0x0003 #define XL_PSTATE_D0 0x0000 #define XL_PSTATE_D1 0x0002 #define XL_PSTATE_D2 0x0002 #define XL_PSTATE_D3 0x0003 #define XL_PME_EN 0x0010 #define XL_PME_STATUS 0x8000 #ifndef IFM_10_FL #define IFM_10_FL 13 /* 10baseFL - Fiber */ #endif Index: projects/ifnet/sys/net/if.c =================================================================== --- projects/ifnet/sys/net/if.c (revision 277242) +++ projects/ifnet/sys/net/if.c (revision 277243) @@ -1,3763 +1,3727 @@ /*- * Copyright (c) 1980, 1986, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)if.c 8.5 (Berkeley) 1/9/95 * $FreeBSD$ */ #include "opt_compat.h" #include "opt_inet6.h" #include "opt_inet.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(INET) || defined(INET6) #include #include #include #include #include #ifdef INET #include #endif /* INET */ #ifdef INET6 #include #include #endif /* INET6 */ #endif /* INET || INET6 */ #include #ifdef COMPAT_FREEBSD32 #include #include #endif SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers"); SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management"); int ifqmaxlen = IFQ_MAXLEN; SYSCTL_INT(_net_link, OID_AUTO, ifqmaxlen, CTLFLAG_RDTUN, &ifqmaxlen, 0, "max send queue size"); /* Log link state change events */ static int log_link_state_change = 1; SYSCTL_INT(_net_link, OID_AUTO, log_link_state_change, CTLFLAG_RW, &log_link_state_change, 0, "log interface link state change events"); /* Interface description */ static unsigned int ifdescr_maxlen = 1024; SYSCTL_UINT(_net, OID_AUTO, ifdescr_maxlen, CTLFLAG_RW, &ifdescr_maxlen, 0, "administrative maximum length for interface description"); static MALLOC_DEFINE(M_IFDESCR, "ifdescr", "ifnet descriptions"); /* global sx for non-critical path ifdescr */ static struct sx ifdescr_sx; SX_SYSINIT(ifdescr_sx, &ifdescr_sx, "ifnet descr"); void (*bridge_linkstate_p)(struct ifnet *ifp); void (*ng_ether_link_state_p)(struct ifnet *ifp, int state); void (*lagg_linkstate_p)(struct ifnet *ifp, int state); /* These are external hooks for CARP. */ void (*carp_linkstate_p)(struct ifnet *ifp); void (*carp_demote_adj_p)(int, char *); int (*carp_master_p)(struct ifaddr *); #if defined(INET) || defined(INET6) int (*carp_forus_p)(struct ifnet *ifp, u_char *dhost); int (*carp_output_p)(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa); int (*carp_ioctl_p)(struct ifreq *, u_long, struct thread *); int (*carp_attach_p)(struct ifaddr *, int); void (*carp_detach_p)(struct ifaddr *); #endif #ifdef INET int (*carp_iamatch_p)(struct ifaddr *, uint8_t **); #endif #ifdef INET6 struct ifaddr *(*carp_iamatch6_p)(struct ifnet *ifp, struct in6_addr *taddr6); caddr_t (*carp_macmatch6_p)(struct ifnet *ifp, struct mbuf *m, const struct in6_addr *taddr); #endif struct mbuf *(*tbr_dequeue_ptr)(struct ifaltq *, int) = NULL; /* * XXX: Style; these should be sorted alphabetically, and unprototyped * static functions should be prototyped. Currently they are sorted by * declaration order. */ static void if_attachdomain(void *); static void if_attachdomain1(struct ifnet *); static int ifconf(u_long, caddr_t); static void if_freemulti(struct ifmultiaddr *); static void if_grow(void); static void if_route(struct ifnet *, int flag, int fam); static int if_setflag(struct ifnet *, int, int, int *, int); static void if_unroute(struct ifnet *, int flag, int fam); static void link_rtrequest(int, struct rtentry *, struct rt_addrinfo *); static int if_rtdel(struct radix_node *, void *); static int if_delmulti_locked(struct ifnet *, struct ifmultiaddr *, int); static void do_link_state_change(void *, int); static int if_getgroup(struct ifgroupreq *, struct ifnet *); static int if_getgroupmembers(struct ifgroupreq *); static void if_delgroups(struct ifnet *); static void if_attach_internal(struct ifnet *, int); static void if_detach_internal(struct ifnet *, int); static struct ifqueue * if_snd_alloc(int); static void if_snd_free(struct ifqueue *); static void if_snd_qflush(if_t); #ifdef INET6 /* * XXX: declare here to avoid to include many inet6 related files.. * should be more generalized? */ extern void nd6_setmtu(struct ifnet *); #endif VNET_DEFINE(int, if_index); VNET_DEFINE(struct ifnethead, ifnet); /* depend on static init XXX */ VNET_DEFINE(struct ifgrouphead, ifg_head); static VNET_DEFINE(int, if_indexlim) = 8; /* Table of ifnet by index. */ VNET_DEFINE(struct ifnet **, ifindex_table); #define V_if_indexlim VNET(if_indexlim) #define V_ifindex_table VNET(ifindex_table) static struct iftsomax default_tsomax = { /* * The TSO defaults need to be such that an NFS mbuf list of 35 * mbufs totalling just below 64K works and that a chain of mbufs * can be defragged into at most 32 segments. */ .tsomax_bytes = MIN(IP_MAXPACKET, (32 * MCLBYTES) - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)), .tsomax_segcount = 35, .tsomax_segsize = 2048, }; /* * The global network interface list (V_ifnet) and related state (such as * if_index, if_indexlim, and ifindex_table) are protected by an sxlock and * an rwlock. Either may be acquired shared to stablize the list, but both * must be acquired writable to modify the list. This model allows us to * both stablize the interface list during interrupt thread processing, but * also to stablize it over long-running ioctls, without introducing priority * inversions and deadlocks. */ struct rwlock ifnet_rwlock; RW_SYSINIT_FLAGS(ifnet_rw, &ifnet_rwlock, "ifnet_rw", RW_RECURSE); struct sx ifnet_sxlock; SX_SYSINIT_FLAGS(ifnet_sx, &ifnet_sxlock, "ifnet_sx", SX_RECURSE); /* * The allocation of network interfaces is a rather non-atomic affair; we * need to select an index before we are ready to expose the interface for * use, so will use this pointer value to indicate reservation. */ #define IFNET_HOLD (void *)(uintptr_t)(-1) static MALLOC_DEFINE(M_IFNET, "ifnet", "interface internals"); MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address"); MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address"); static struct ifops ifdead_ops; struct ifnet * ifnet_byindex_locked(u_short idx) { if (idx > V_if_index) return (NULL); if (V_ifindex_table[idx] == IFNET_HOLD) return (NULL); return (V_ifindex_table[idx]); } struct ifnet * ifnet_byindex(u_short idx) { struct ifnet *ifp; IFNET_RLOCK_NOSLEEP(); ifp = ifnet_byindex_locked(idx); IFNET_RUNLOCK_NOSLEEP(); return (ifp); } struct ifnet * ifnet_byindex_ref(u_short idx) { struct ifnet *ifp; IFNET_RLOCK_NOSLEEP(); ifp = ifnet_byindex_locked(idx); if (ifp == NULL || (ifp->if_flags & IFF_DYING)) { IFNET_RUNLOCK_NOSLEEP(); return (NULL); } if_ref(ifp); IFNET_RUNLOCK_NOSLEEP(); return (ifp); } /* * Allocate an ifindex array entry. */ static void ifindex_alloc(struct ifnet *ifp) { u_short idx; IFNET_WLOCK(); retry: /* * Try to find an empty slot below V_if_index. If we fail, take the * next slot. */ for (idx = 1; idx <= V_if_index; idx++) { if (V_ifindex_table[idx] == NULL) break; } /* Catch if_index overflow. */ if (idx >= V_if_indexlim) { if_grow(); goto retry; } if (idx > V_if_index) V_if_index = idx; V_ifindex_table[idx] = ifp; ifp->if_index = idx; IFNET_WUNLOCK(); } static void ifindex_free(u_short idx) { IFNET_WLOCK_ASSERT(); V_ifindex_table[idx] = NULL; while (V_if_index > 0 && V_ifindex_table[V_if_index] == NULL) V_if_index--; } struct ifaddr * ifaddr_byindex(u_short idx) { struct ifaddr *ifa; IFNET_RLOCK_NOSLEEP(); ifa = ifnet_byindex_locked(idx)->if_addr; if (ifa != NULL) ifa_ref(ifa); IFNET_RUNLOCK_NOSLEEP(); return (ifa); } /* * Network interface utility routines. * * Routines with ifa_ifwith* names take sockaddr *'s as * parameters. */ static void vnet_if_init(const void *unused __unused) { TAILQ_INIT(&V_ifnet); TAILQ_INIT(&V_ifg_head); IFNET_WLOCK(); if_grow(); /* create initial table */ IFNET_WUNLOCK(); vnet_if_clone_init(); } VNET_SYSINIT(vnet_if_init, SI_SUB_INIT_IF, SI_ORDER_SECOND, vnet_if_init, NULL); #ifdef VIMAGE static void vnet_if_uninit(const void *unused __unused) { VNET_ASSERT(TAILQ_EMPTY(&V_ifnet), ("%s:%d tailq &V_ifnet=%p " "not empty", __func__, __LINE__, &V_ifnet)); VNET_ASSERT(TAILQ_EMPTY(&V_ifg_head), ("%s:%d tailq &V_ifg_head=%p " "not empty", __func__, __LINE__, &V_ifg_head)); free((caddr_t)V_ifindex_table, M_IFNET); } VNET_SYSUNINIT(vnet_if_uninit, SI_SUB_INIT_IF, SI_ORDER_FIRST, vnet_if_uninit, NULL); #endif static void if_grow(void) { int oldlim; u_int n; struct ifnet **e; IFNET_WLOCK_ASSERT(); oldlim = V_if_indexlim; IFNET_WUNLOCK(); n = (oldlim << 1) * sizeof(*e); e = malloc(n, M_IFNET, M_WAITOK | M_ZERO); IFNET_WLOCK(); if (V_if_indexlim != oldlim) { free(e, M_IFNET); return; } if (V_ifindex_table != NULL) { memcpy((caddr_t)e, (caddr_t)V_ifindex_table, n/2); free((caddr_t)V_ifindex_table, M_IFNET); } V_if_indexlim <<= 1; V_ifindex_table = e; } /* * Registration/deregistration of interface types. A type can carry * common methods. Certain drivers depend on types to be loaded. */ static SLIST_HEAD(, iftype) iftypehead = SLIST_HEAD_INITIALIZER(iftypehead); void iftype_register(struct iftype *ift) { IFNET_WLOCK(); SLIST_INSERT_HEAD(&iftypehead, ift, ift_next); IFNET_WUNLOCK(); } void iftype_unregister(struct iftype *ift) { IFNET_WLOCK(); SLIST_REMOVE(&iftypehead, ift, iftype, ift_next); IFNET_WUNLOCK(); } static struct iftype * iftype_find(ifType type) { struct iftype *ift; IFNET_RLOCK(); SLIST_FOREACH(ift, &iftypehead, ift_next) if (ift->ift_type == type) break; IFNET_RUNLOCK(); return (ift); } #define ifdrv_flags __ifdrv_stack_owned #define IFDRV_BLESSED 0x00000001 static void ifdriver_bless(struct ifdriver *ifdrv, struct iftype *ift) { if (ift != NULL) { #define COPY(op) if (ifdrv->ifdrv_ops.op == NULL) \ ifdrv->ifdrv_ops.op = ift->ift_ops.op COPY(ifop_input); COPY(ifop_transmit); COPY(ifop_output); COPY(ifop_ioctl); COPY(ifop_get_counter); COPY(ifop_init); COPY(ifop_qflush); COPY(ifop_resolvemulti); COPY(ifop_reassign); #undef COPY #define COPY(f) if (ifdrv->ifdrv_ ## f == 0) \ ifdrv->ifdrv_ ## f = ift->ift_ ## f COPY(hdrlen); COPY(addrlen); COPY(dlt); COPY(dlt_hdrlen); #undef COPY } /* * If driver has ifdrv_maxqlen defined, then it opts-in * for * generic software queue, and thus for default * ifop_qflush. */ if (ifdrv->ifdrv_maxqlen > 0) { KASSERT(ifdrv->ifdrv_ops.ifop_qflush == NULL, ("%s: fdrv_maxqlen > 0 and ifop_qflush", ifdrv->ifdrv_name)); ifdrv->ifdrv_ops.ifop_qflush = if_snd_qflush; } if (ifdrv->ifdrv_ops.ifop_get_counter == NULL) ifdrv->ifdrv_ops.ifop_get_counter = if_get_counter_default; #if defined(INET) || defined(INET6) /* Use defaults for TSO, if nothing is set. */ if (ifdrv->ifdrv_tsomax == NULL) ifdrv->ifdrv_tsomax = &default_tsomax; else KASSERT(ifdrv->ifdrv_tsomax->tsomax_bytes == 0 || ifdrv->ifdrv_tsomax->tsomax_bytes >= (IP_MAXPACKET / 8), ("%s: tsomax_bytes is outside of range", ifdrv->ifdrv_name)); #endif ifdrv->ifdrv_flags |= IFDRV_BLESSED; } /* * Allocate a struct ifnet and an index for an interface. A layer 2 * common structure will also be allocated if an allocation routine is * registered for the passed type. * * The only reason for this function to fail is failure to allocate a * unit number, which is possible only if driver does cloning. */ if_t if_attach(struct if_attach_args *ifat) { struct ifdriver *ifdrv; struct iftype *ift; struct ifnet *ifp; struct ifaddr *ifa; struct sockaddr_dl *sdl; int socksize, ifasize, namelen, masklen; KASSERT(ifat->ifat_version == IF_ATTACH_VERSION, ("%s: version %d, expected %d", __func__, ifat->ifat_version, IF_ATTACH_VERSION)); ifdrv = ifat->ifat_drv; ift = iftype_find(ifdrv->ifdrv_type); if ((ifdrv->ifdrv_flags & IFDRV_BLESSED) == 0) ifdriver_bless(ifdrv, ift); if (ifdrv->ifdrv_clone != NULL) { int error; error = ifc_alloc_unit(ifdrv->ifdrv_clone, &ifat->ifat_dunit); if (error) { log(LOG_WARNING, "%s unit allocation failure: %d\n", ifdrv->ifdrv_name, error); ifat->ifat_error = error; return (NULL); } } ifp = malloc(sizeof(struct ifnet), M_IFNET, M_WAITOK | M_ZERO); for (int i = 0; i < IFCOUNTERS; i++) ifp->if_counters[i] = counter_u64_alloc(M_WAITOK); #ifdef MAC mac_ifnet_init(ifp); mac_ifnet_create(ifp); #endif ifp->if_ops = &ifdrv->ifdrv_ops; ifp->if_drv = ifdrv; ifp->if_type = ift; #define COPY(f) ifp->if_ ## f = ifat->ifat_ ## f COPY(softc); COPY(mtu); COPY(flags); COPY(capabilities); COPY(capenable); COPY(hwassist); COPY(baudrate); #undef COPY if (ifat->ifat_tsomax) { /* * Driver wants dynamic tsomax on this interface, we * will allocate one and are responsible for freeing * it on detach. */ KASSERT(ifat->ifat_tsomax->tsomax_bytes == 0 || ifat->ifat_tsomax->tsomax_bytes >= (IP_MAXPACKET / 8), ("%s: tsomax_bytes is outside of range", ifdrv->ifdrv_name)); ifp->if_tsomax = malloc(sizeof(struct iftsomax), M_IFNET, M_WAITOK); bcopy(ifat->ifat_tsomax, ifp->if_tsomax, sizeof(struct iftsomax)); } else ifp->if_tsomax = ifdrv->ifdrv_tsomax; if (ifdrv->ifdrv_maxqlen > 0) ifp->if_snd = if_snd_alloc(ifdrv->ifdrv_maxqlen); IF_ADDR_LOCK_INIT(ifp); IF_AFDATA_LOCK_INIT(ifp); TASK_INIT(&ifp->if_linktask, 0, do_link_state_change, ifp); TAILQ_INIT(&ifp->if_addrhead); TAILQ_INIT(&ifp->if_multiaddrs); TAILQ_INIT(&ifp->if_groups); /* XXXGL: there is no check that name is unique. */ ifp->if_dunit = ifat->ifat_dunit; if (ifat->ifat_name) strlcpy(ifp->if_xname, ifat->ifat_name, IFNAMSIZ); else if (ifat->ifat_dunit != IFAT_DUNIT_NONE) snprintf(ifp->if_xname, IFNAMSIZ, "%s%d", ifdrv->ifdrv_name, ifat->ifat_dunit); else strlcpy(ifp->if_xname, ifdrv->ifdrv_name, IFNAMSIZ); ifindex_alloc(ifp); refcount_init(&ifp->if_refcount, 1); /* * Allocate ifaddr to store link level address and name for this * interface. Always save enough space for any possiable name so * we can do a rename in place later. */ namelen = strlen(ifp->if_xname); masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + IFNAMSIZ; socksize = masklen + ifdrv->ifdrv_addrlen; if (socksize < sizeof(*sdl)) socksize = sizeof(*sdl); socksize = roundup2(socksize, sizeof(long)); ifasize = sizeof(*ifa) + 2 * socksize; ifa = ifa_alloc(ifasize, M_WAITOK); sdl = (struct sockaddr_dl *)(ifa + 1); sdl->sdl_len = socksize; sdl->sdl_family = AF_LINK; bcopy(ifp->if_xname, sdl->sdl_data, namelen); sdl->sdl_nlen = namelen; sdl->sdl_index = ifp->if_index; sdl->sdl_type = ifdrv->ifdrv_type; ifp->if_addr = ifa; ifa->ifa_ifp = ifp; ifa->ifa_rtrequest = link_rtrequest; ifa->ifa_addr = (struct sockaddr *)sdl; sdl = (struct sockaddr_dl *)(socksize + (char *)sdl); ifa->ifa_netmask = (struct sockaddr *)sdl; sdl->sdl_len = masklen; while (namelen != 0) sdl->sdl_data[--namelen] = 0xff; TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link); if (ift) ift->ift_attach(ifp, ifat); bpfattach(ifp, ifdrv->ifdrv_dlt, ifdrv->ifdrv_dlt_hdrlen); if_attach_internal(ifp, 0); return (ifp); } /* * Do the actual work of freeing a struct ifnet, and layer 2 common * structure. This call is made when the last reference to an * interface is released. */ static void if_free_internal(struct ifnet *ifp) { KASSERT((ifp->if_flags & IFF_DYING), ("if_free_internal: interface not dying")); #ifdef MAC mac_ifnet_destroy(ifp); #endif /* MAC */ if (ifp->if_description != NULL) free(ifp->if_description, M_IFDESCR); IF_AFDATA_DESTROY(ifp); IF_ADDR_LOCK_DESTROY(ifp); if (ifp->if_snd) if_snd_free(ifp->if_snd); for (int i = 0; i < IFCOUNTERS; i++) counter_u64_free(ifp->if_counters[i]); if (ifp->if_tsomax != ifp->if_drv->ifdrv_tsomax) free(ifp->if_tsomax, M_IFNET); free(ifp, M_IFNET); } void if_mtap(if_t ifp, struct mbuf *m, void *data, u_int dlen) { if (!bpf_peers_present(ifp->if_bpf)) return; if (dlen == 0) { if (m->m_flags & M_VLANTAG) ether_vlan_mtap(ifp->if_bpf, m, NULL, 0); else bpf_mtap(ifp->if_bpf, m); } else bpf_mtap2(ifp->if_bpf, data, dlen, m); } /* * Interfaces to keep an ifnet type-stable despite the possibility of the * driver calling if_free(). If there are additional references, we defer * freeing the underlying data structure. */ void if_ref(struct ifnet *ifp) { /* We don't assert the ifnet list lock here, but arguably should. */ refcount_acquire(&ifp->if_refcount); } void if_rele(struct ifnet *ifp) { if (!refcount_release(&ifp->if_refcount)) return; if_free_internal(ifp); } /* * Compute the least common TSO limit. */ void if_tsomax_common(const struct iftsomax *from, struct iftsomax *to) { /* * 1) If there is no limit currently, take the limit from * the network adapter. * * 2) If the network adapter has a limit below the current * limit, apply it. */ if (to->tsomax_bytes == 0 || (from->tsomax_bytes != 0 && from->tsomax_bytes < to->tsomax_bytes)) { to->tsomax_bytes = from->tsomax_bytes; } if (to->tsomax_segcount == 0 || (from->tsomax_segcount != 0 && from->tsomax_segcount < to->tsomax_segcount)) { to->tsomax_segcount = from->tsomax_segcount; } if (to->tsomax_segsize == 0 || (from->tsomax_segsize != 0 && from->tsomax_segsize < to->tsomax_segsize)) { to->tsomax_segsize = from->tsomax_segsize; } } /* * Update TSO limit of a network adapter. * * Returns zero if no change. Else non-zero. */ int if_tsomax_update(if_t ifp, const struct iftsomax *new) { int retval = 0; KASSERT(ifp->if_tsomax != ifp->if_drv->ifdrv_tsomax, ("%s: interface %s (driver %s) has static if_tsomax", __func__, ifp->if_xname, ifp->if_drv->ifdrv_name)); if (ifp->if_tsomax->tsomax_bytes != new->tsomax_bytes) { ifp->if_tsomax->tsomax_bytes = new->tsomax_bytes; retval++; } if (ifp->if_tsomax->tsomax_segsize != new->tsomax_segsize) { ifp->if_tsomax->tsomax_segsize = new->tsomax_segsize; retval++; } if (ifp->if_tsomax->tsomax_segcount != new->tsomax_segcount) { ifp->if_tsomax->tsomax_segcount = new->tsomax_segcount; retval++; } KASSERT(ifp->if_tsomax->tsomax_bytes == 0 || ifp->if_tsomax->tsomax_bytes >= (IP_MAXPACKET / 8), ("%s: tsomax_bytes is outside of range", ifp->if_xname)); return (retval); } static void if_attach_internal(struct ifnet *ifp, int vmove) { if (ifp->if_index == 0 || ifp != ifnet_byindex(ifp->if_index)) panic ("%s: BUG: if_attach called without if_alloc'd input()\n", ifp->if_xname); #ifdef VIMAGE ifp->if_vnet = curvnet; if (ifp->if_home_vnet == NULL) ifp->if_home_vnet = curvnet; #endif if_addgroup(ifp, IFG_ALL); getmicrotime(&ifp->if_lastchange); ifp->if_epoch = time_uptime; #ifdef VIMAGE /* * Update the interface index in the link layer address * of the interface. */ for (ifa = ifp->if_addr; ifa != NULL; ifa = TAILQ_NEXT(ifa, ifa_link)) { if (ifa->ifa_addr->sa_family == AF_LINK) { sdl = (struct sockaddr_dl *)ifa->ifa_addr; sdl->sdl_index = ifp->if_index; } } #endif IFNET_WLOCK(); TAILQ_INSERT_TAIL(&V_ifnet, ifp, if_link); #ifdef VIMAGE curvnet->vnet_ifcnt++; #endif IFNET_WUNLOCK(); if (domain_init_status >= 2) if_attachdomain1(ifp); EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp); if (IS_DEFAULT_VNET(curvnet)) devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL); /* Announce the interface. */ rt_ifannouncemsg(ifp, IFAN_ARRIVAL); } static void if_attachdomain(void *dummy) { struct ifnet *ifp; TAILQ_FOREACH(ifp, &V_ifnet, if_link) if_attachdomain1(ifp); } SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_SECOND, if_attachdomain, NULL); static void if_attachdomain1(struct ifnet *ifp) { struct domain *dp; /* * Since dp->dom_ifattach calls malloc() with M_WAITOK, we * cannot lock ifp->if_afdata initialization, entirely. */ if (IF_AFDATA_TRYLOCK(ifp) == 0) return; if (ifp->if_afdata_initialized >= domain_init_status) { IF_AFDATA_UNLOCK(ifp); log(LOG_WARNING, "%s called more than once on %s\n", __func__, ifp->if_xname); return; } ifp->if_afdata_initialized = domain_init_status; IF_AFDATA_UNLOCK(ifp); /* address family dependent data region */ bzero(ifp->if_afdata, sizeof(ifp->if_afdata)); for (dp = domains; dp; dp = dp->dom_next) { if (dp->dom_ifattach) ifp->if_afdata[dp->dom_family] = (*dp->dom_ifattach)(ifp); } } /* * Remove any unicast or broadcast network addresses from an interface. */ void if_purgeaddrs(struct ifnet *ifp) { struct ifaddr *ifa, *next; TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, next) { if (ifa->ifa_addr->sa_family == AF_LINK) continue; #ifdef INET /* XXX: Ugly!! ad hoc just for INET */ if (ifa->ifa_addr->sa_family == AF_INET) { struct ifaliasreq ifr; bzero(&ifr, sizeof(ifr)); ifr.ifra_addr = *ifa->ifa_addr; if (ifa->ifa_dstaddr) ifr.ifra_broadaddr = *ifa->ifa_dstaddr; if (in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp, NULL) == 0) continue; } #endif /* INET */ #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) { in6_purgeaddr(ifa); /* ifp_addrhead is already updated */ continue; } #endif /* INET6 */ TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link); ifa_free(ifa); } } /* * Remove any multicast network addresses from an interface when an ifnet * is going away. */ static void if_purgemaddrs(struct ifnet *ifp) { struct ifmultiaddr *ifma; struct ifmultiaddr *next; IF_ADDR_WLOCK(ifp); TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next) if_delmulti_locked(ifp, ifma, 1); IF_ADDR_WUNLOCK(ifp); } /* * Detach an interface, removing it from the list of "active" interfaces. * If vmove flag is set on entry to if_detach_internal(), perform only a * limited subset of cleanup tasks, given that we are moving an ifnet from * one vnet to another, where it must be fully operational. * * XXXRW: There are some significant questions about event ordering, and * how to prevent things from starting to use the interface during detach. */ void if_detach(if_t ifp) { ifp->if_flags |= IFF_DYING; /* XXX: Locking */ bpfdetach(ifp); CURVNET_SET_QUIET(ifp->if_vnet); if_detach_internal(ifp, 0); IFNET_WLOCK(); KASSERT(ifp == ifnet_byindex_locked(ifp->if_index), ("%s: freeing unallocated ifnet", ifp->if_xname)); ifindex_free(ifp->if_index); IFNET_WUNLOCK(); if (ifp->if_drv->ifdrv_clone != NULL) ifc_free_unit(ifp->if_drv->ifdrv_clone, ifp->if_dunit); if (refcount_release(&ifp->if_refcount)) if_free_internal(ifp); CURVNET_RESTORE(); } static void if_detach_internal(struct ifnet *ifp, int vmove) { struct ifaddr *ifa; struct radix_node_head *rnh; int i, j; struct domain *dp; struct ifnet *iter; int found = 0; IFNET_WLOCK(); TAILQ_FOREACH(iter, &V_ifnet, if_link) if (iter == ifp) { TAILQ_REMOVE(&V_ifnet, ifp, if_link); found = 1; break; } #ifdef VIMAGE if (found) curvnet->vnet_ifcnt--; #endif IFNET_WUNLOCK(); if (!found) { if (vmove) panic("%s: ifp=%p not on the ifnet tailq %p", __func__, ifp, &V_ifnet); else return; /* XXX this should panic as well? */ } /* * Remove/wait for pending events. */ taskqueue_drain(taskqueue_swi, &ifp->if_linktask); /* * Remove routes and flush queues. */ if_down(ifp); #ifdef ALTQ if (ALTQ_IS_ENABLED(&ifp->if_snd)) altq_disable(&ifp->if_snd); if (ALTQ_IS_ATTACHED(&ifp->if_snd)) altq_detach(&ifp->if_snd); #endif if_purgeaddrs(ifp); #ifdef INET in_ifdetach(ifp); #endif #ifdef INET6 /* * Remove all IPv6 kernel structs related to ifp. This should be done * before removing routing entries below, since IPv6 interface direct * routes are expected to be removed by the IPv6-specific kernel API. * Otherwise, the kernel will detect some inconsistency and bark it. */ in6_ifdetach(ifp); #endif if_purgemaddrs(ifp); /* Announce that the interface is gone. */ rt_ifannouncemsg(ifp, IFAN_DEPARTURE); EVENTHANDLER_INVOKE(ifnet_departure_event, ifp); if (IS_DEFAULT_VNET(curvnet)) devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL); if (!vmove) { struct iftype *ift = ifp->if_type; if (ift != NULL && ift->ift_detach != NULL) ift->ift_detach(ifp); /* * Prevent further calls into the device driver via ifnet. */ ifp->if_ops = &ifdead_ops; /* * Remove link ifaddr pointer and maybe decrement if_index. * Clean up all addresses. */ ifp->if_addr = NULL; /* We can now free link ifaddr. */ if (!TAILQ_EMPTY(&ifp->if_addrhead)) { ifa = TAILQ_FIRST(&ifp->if_addrhead); TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link); ifa_free(ifa); } } /* * Delete all remaining routes using this interface * Unfortuneatly the only way to do this is to slog through * the entire routing table looking for routes which point * to this interface...oh well... */ for (i = 1; i <= AF_MAX; i++) { for (j = 0; j < rt_numfibs; j++) { rnh = rt_tables_get_rnh(j, i); if (rnh == NULL) continue; RADIX_NODE_HEAD_LOCK(rnh); (void) rnh->rnh_walktree(rnh, if_rtdel, ifp); RADIX_NODE_HEAD_UNLOCK(rnh); } } if_delgroups(ifp); /* * We cannot hold the lock over dom_ifdetach calls as they might * sleep, for example trying to drain a callout, thus open up the * theoretical race with re-attaching. */ IF_AFDATA_LOCK(ifp); i = ifp->if_afdata_initialized; ifp->if_afdata_initialized = 0; IF_AFDATA_UNLOCK(ifp); for (dp = domains; i > 0 && dp; dp = dp->dom_next) { if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family]) (*dp->dom_ifdetach)(ifp, ifp->if_afdata[dp->dom_family]); } } #ifdef VIMAGE /* * if_vmove() performs a limited version of if_detach() in current * vnet and if_attach()es the ifnet to the vnet specified as 2nd arg. * An attempt is made to shrink if_index in current vnet, find an * unused if_index in target vnet and calls if_grow() if necessary, * and finally find an unused if_xname for the target vnet. */ void if_vmove(struct ifnet *ifp, struct vnet *new_vnet) { /* * Detach from current vnet, but preserve LLADDR info, do not * mark as dead etc. so that the ifnet can be reattached later. */ if_detach_internal(ifp, 1); /* * Unlink the ifnet from ifindex_table[] in current vnet, and shrink * the if_index for that vnet if possible. * * NOTE: IFNET_WLOCK/IFNET_WUNLOCK() are assumed to be unvirtualized, * or we'd lock on one vnet and unlock on another. */ IFNET_WLOCK(); ifindex_free(ifp->if_index); IFNET_WUNLOCK(); /* * Perform interface-specific reassignment tasks, if provided by * the driver. */ if (ifp->if_reassign != NULL) ifp->if_reassign(ifp, new_vnet, NULL); /* * Switch to the context of the target vnet. */ CURVNET_SET_QUIET(new_vnet); IFNET_WLOCK(); ifp->if_index = ifindex_alloc(); ifnet_setbyindex_locked(ifp->if_index, ifp); IFNET_WUNLOCK(); if_attach_internal(ifp, 1); CURVNET_RESTORE(); } /* * Move an ifnet to or from another child prison/vnet, specified by the jail id. */ static int if_vmove_loan(struct thread *td, struct ifnet *ifp, char *ifname, int jid) { struct prison *pr; struct ifnet *difp; /* Try to find the prison within our visibility. */ sx_slock(&allprison_lock); pr = prison_find_child(td->td_ucred->cr_prison, jid); sx_sunlock(&allprison_lock); if (pr == NULL) return (ENXIO); prison_hold_locked(pr); mtx_unlock(&pr->pr_mtx); /* Do not try to move the iface from and to the same prison. */ if (pr->pr_vnet == ifp->if_vnet) { prison_free(pr); return (EEXIST); } /* Make sure the named iface does not exists in the dst. prison/vnet. */ /* XXX Lock interfaces to avoid races. */ CURVNET_SET_QUIET(pr->pr_vnet); difp = ifunit(ifname); CURVNET_RESTORE(); if (difp != NULL) { prison_free(pr); return (EEXIST); } /* Move the interface into the child jail/vnet. */ if_vmove(ifp, pr->pr_vnet); /* Report the new if_xname back to the userland. */ sprintf(ifname, "%s", ifp->if_xname); prison_free(pr); return (0); } static int if_vmove_reclaim(struct thread *td, char *ifname, int jid) { struct prison *pr; struct vnet *vnet_dst; struct ifnet *ifp; /* Try to find the prison within our visibility. */ sx_slock(&allprison_lock); pr = prison_find_child(td->td_ucred->cr_prison, jid); sx_sunlock(&allprison_lock); if (pr == NULL) return (ENXIO); prison_hold_locked(pr); mtx_unlock(&pr->pr_mtx); /* Make sure the named iface exists in the source prison/vnet. */ CURVNET_SET(pr->pr_vnet); ifp = ifunit(ifname); /* XXX Lock to avoid races. */ if (ifp == NULL) { CURVNET_RESTORE(); prison_free(pr); return (ENXIO); } /* Do not try to move the iface from and to the same prison. */ vnet_dst = TD_TO_VNET(td); if (vnet_dst == ifp->if_vnet) { CURVNET_RESTORE(); prison_free(pr); return (EEXIST); } /* Get interface back from child jail/vnet. */ if_vmove(ifp, vnet_dst); CURVNET_RESTORE(); /* Report the new if_xname back to the userland. */ sprintf(ifname, "%s", ifp->if_xname); prison_free(pr); return (0); } #endif /* VIMAGE */ /* * Add a group to an interface */ int if_addgroup(struct ifnet *ifp, const char *groupname) { struct ifg_list *ifgl; struct ifg_group *ifg = NULL; struct ifg_member *ifgm; int new = 0; if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' && groupname[strlen(groupname) - 1] <= '9') return (EINVAL); IFNET_WLOCK(); TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) { IFNET_WUNLOCK(); return (EEXIST); } if ((ifgl = (struct ifg_list *)malloc(sizeof(struct ifg_list), M_TEMP, M_NOWAIT)) == NULL) { IFNET_WUNLOCK(); return (ENOMEM); } if ((ifgm = (struct ifg_member *)malloc(sizeof(struct ifg_member), M_TEMP, M_NOWAIT)) == NULL) { free(ifgl, M_TEMP); IFNET_WUNLOCK(); return (ENOMEM); } TAILQ_FOREACH(ifg, &V_ifg_head, ifg_next) if (!strcmp(ifg->ifg_group, groupname)) break; if (ifg == NULL) { if ((ifg = (struct ifg_group *)malloc(sizeof(struct ifg_group), M_TEMP, M_NOWAIT)) == NULL) { free(ifgl, M_TEMP); free(ifgm, M_TEMP); IFNET_WUNLOCK(); return (ENOMEM); } strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group)); ifg->ifg_refcnt = 0; TAILQ_INIT(&ifg->ifg_members); TAILQ_INSERT_TAIL(&V_ifg_head, ifg, ifg_next); new = 1; } ifg->ifg_refcnt++; ifgl->ifgl_group = ifg; ifgm->ifgm_ifp = ifp; IF_ADDR_WLOCK(ifp); TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next); TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next); IF_ADDR_WUNLOCK(ifp); IFNET_WUNLOCK(); if (new) EVENTHANDLER_INVOKE(group_attach_event, ifg); EVENTHANDLER_INVOKE(group_change_event, groupname); return (0); } /* * Remove a group from an interface */ int if_delgroup(struct ifnet *ifp, const char *groupname) { struct ifg_list *ifgl; struct ifg_member *ifgm; IFNET_WLOCK(); TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) break; if (ifgl == NULL) { IFNET_WUNLOCK(); return (ENOENT); } IF_ADDR_WLOCK(ifp); TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next); IF_ADDR_WUNLOCK(ifp); TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next) if (ifgm->ifgm_ifp == ifp) break; if (ifgm != NULL) { TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next); free(ifgm, M_TEMP); } if (--ifgl->ifgl_group->ifg_refcnt == 0) { TAILQ_REMOVE(&V_ifg_head, ifgl->ifgl_group, ifg_next); IFNET_WUNLOCK(); EVENTHANDLER_INVOKE(group_detach_event, ifgl->ifgl_group); free(ifgl->ifgl_group, M_TEMP); } else IFNET_WUNLOCK(); free(ifgl, M_TEMP); EVENTHANDLER_INVOKE(group_change_event, groupname); return (0); } /* * Remove an interface from all groups */ static void if_delgroups(struct ifnet *ifp) { struct ifg_list *ifgl; struct ifg_member *ifgm; char groupname[IFNAMSIZ]; IFNET_WLOCK(); while (!TAILQ_EMPTY(&ifp->if_groups)) { ifgl = TAILQ_FIRST(&ifp->if_groups); strlcpy(groupname, ifgl->ifgl_group->ifg_group, IFNAMSIZ); IF_ADDR_WLOCK(ifp); TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next); IF_ADDR_WUNLOCK(ifp); TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next) if (ifgm->ifgm_ifp == ifp) break; if (ifgm != NULL) { TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next); free(ifgm, M_TEMP); } if (--ifgl->ifgl_group->ifg_refcnt == 0) { TAILQ_REMOVE(&V_ifg_head, ifgl->ifgl_group, ifg_next); IFNET_WUNLOCK(); EVENTHANDLER_INVOKE(group_detach_event, ifgl->ifgl_group); free(ifgl->ifgl_group, M_TEMP); } else IFNET_WUNLOCK(); free(ifgl, M_TEMP); EVENTHANDLER_INVOKE(group_change_event, groupname); IFNET_WLOCK(); } IFNET_WUNLOCK(); } /* * Stores all groups from an interface in memory pointed * to by data */ static int if_getgroup(struct ifgroupreq *data, struct ifnet *ifp) { int len, error; struct ifg_list *ifgl; struct ifg_req ifgrq, *ifgp; struct ifgroupreq *ifgr = data; if (ifgr->ifgr_len == 0) { IF_ADDR_RLOCK(ifp); TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) ifgr->ifgr_len += sizeof(struct ifg_req); IF_ADDR_RUNLOCK(ifp); return (0); } len = ifgr->ifgr_len; ifgp = ifgr->ifgr_groups; /* XXX: wire */ IF_ADDR_RLOCK(ifp); TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) { if (len < sizeof(ifgrq)) { IF_ADDR_RUNLOCK(ifp); return (EINVAL); } bzero(&ifgrq, sizeof ifgrq); strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group, sizeof(ifgrq.ifgrq_group)); if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) { IF_ADDR_RUNLOCK(ifp); return (error); } len -= sizeof(ifgrq); ifgp++; } IF_ADDR_RUNLOCK(ifp); return (0); } /* * Stores all members of a group in memory pointed to by data */ static int if_getgroupmembers(struct ifgroupreq *data) { struct ifgroupreq *ifgr = data; struct ifg_group *ifg; struct ifg_member *ifgm; struct ifg_req ifgrq, *ifgp; int len, error; IFNET_RLOCK(); TAILQ_FOREACH(ifg, &V_ifg_head, ifg_next) if (!strcmp(ifg->ifg_group, ifgr->ifgr_name)) break; if (ifg == NULL) { IFNET_RUNLOCK(); return (ENOENT); } if (ifgr->ifgr_len == 0) { TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) ifgr->ifgr_len += sizeof(ifgrq); IFNET_RUNLOCK(); return (0); } len = ifgr->ifgr_len; ifgp = ifgr->ifgr_groups; TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) { if (len < sizeof(ifgrq)) { IFNET_RUNLOCK(); return (EINVAL); } bzero(&ifgrq, sizeof ifgrq); strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname, sizeof(ifgrq.ifgrq_member)); if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) { IFNET_RUNLOCK(); return (error); } len -= sizeof(ifgrq); ifgp++; } IFNET_RUNLOCK(); return (0); } /* * Delete Routes for a Network Interface * * Called for each routing entry via the rnh->rnh_walktree() call above * to delete all route entries referencing a detaching network interface. * * Arguments: * rn pointer to node in the routing table * arg argument passed to rnh->rnh_walktree() - detaching interface * * Returns: * 0 successful * errno failed - reason indicated * */ static int if_rtdel(struct radix_node *rn, void *arg) { struct rtentry *rt = (struct rtentry *)rn; struct ifnet *ifp = arg; int err; if (rt->rt_ifp == ifp) { /* * Protect (sorta) against walktree recursion problems * with cloned routes */ if ((rt->rt_flags & RTF_UP) == 0) return (0); err = rtrequest_fib(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), rt->rt_flags|RTF_RNH_LOCKED|RTF_PINNED, (struct rtentry **) NULL, rt->rt_fibnum); if (err) { log(LOG_WARNING, "if_rtdel: error %d\n", err); } } return (0); } /* * Managing different integer values and bitmasks of an ifnet. */ static void if_getfeature(if_t ifp, ift_feature f, uint32_t **f32, uint64_t **f64, void **ptr) { if (f32) *f32 = NULL; if (f64) *f64 = NULL; if (ptr) *ptr = NULL; switch (f) { case IF_FLAGS: *f32 = &ifp->if_flags; break; - case IF_CAPABILITIES: - *f32 = &ifp->if_capabilities; - break; - case IF_CAPENABLE: - *f32 = &ifp->if_capenable; - break; - case IF_MTU: - *f32 = &ifp->if_mtu; - break; case IF_FIB: *f32 = &ifp->if_fib; break; - case IF_HWASSIST: - *f64 = &ifp->if_hwassist; - break; case IF_BAUDRATE: *f64 = &ifp->if_baudrate; break; case IF_DRIVER_SOFTC: *ptr = ifp->if_softc; break; case IF_LLADDR: *ptr = LLADDR((struct sockaddr_dl *)(ifp->if_addr->ifa_addr)); break; case IF_BPF: *ptr = ifp->if_bpf; break; case IF_NAME: *ptr = ifp->if_xname; break; default: panic("%s: unknown feature %d", __func__, f); }; } -/* Changing some flags may require some actions. */ -static void -if_set_special(if_t ifp, ift_feature f) -{ - - switch (f) { - case IF_CAPENABLE: - /* - * Modifying if_capenable may require extra actions, e.g. - * reconfiguring capenable on vlans. - */ - if (ifp->if_vlantrunk != NULL) - (*vlan_trunk_cap_p)(ifp); - break; - default: - break; - } -} - void if_set(if_t ifp, ift_feature f, uint64_t set) { uint64_t *f64; uint32_t *f32; if_getfeature(ifp, f, &f32, &f64, NULL); KASSERT(f32 != NULL || f64 != NULL, ("%s: no feature %d", __func__, f)); if (f32 != NULL) { KASSERT(set <= UINT32_MAX, ("%s: value of 0x%jx for feature %d", __func__, (uintmax_t )set, f)); *f32 = set; } else { *f64 = set; } - if_set_special(ifp, f); } uint64_t if_flagbits(if_t ifp, ift_feature f, uint64_t set, uint64_t clr, uint64_t xor) { - uint64_t *f64, rv, old; + uint64_t *f64; uint32_t *f32; if_getfeature(ifp, f, &f32, &f64, NULL); if (f32 != NULL) { KASSERT(set <= UINT32_MAX, ("%s: value of 0x%jx for feature %d", __func__, (uintmax_t )set, f)); KASSERT(clr <= UINT32_MAX, ("%s: value of 0x%jx for feature %d", __func__, (uintmax_t )clr, f)); KASSERT(xor <= UINT32_MAX, ("%s: value of 0x%jx for feature %d", __func__, (uintmax_t )xor, f)); - old = *f32; *f32 |= set; *f32 &= ~clr; *f32 ^= xor; - rv = *f32; + return (*f32); } else { - old = *f64; *f64 |= set; *f64 &= ~clr; *f64 ^= xor; - rv = *f64; + return (*f64); } - - if (rv != old) - if_set_special(ifp, f); - - return (rv); } uint64_t if_get(if_t ifp, ift_feature f) { uint64_t *f64; uint32_t *f32; if_getfeature(ifp, f, &f32, &f64, NULL); KASSERT(f32 != NULL || f64 != NULL, ("%s: no feature %d", __func__, f)); if (f64 != NULL) return (*f64); if (f32 != NULL) return (*f32); return (EDOOFUS); } void * if_getsoftc(if_t ifp, ift_feature f) { void *ptr; if_getfeature(ifp, f, NULL, NULL, &ptr); return (ptr); } /* * Return counter values from counter(9)s stored in ifnet. */ uint64_t if_get_counter_default(struct ifnet *ifp, ift_counter cnt) { KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt)); return (counter_u64_fetch(ifp->if_counters[cnt])); } /* * Increase an ifnet counter. Usually used for counters shared * between the stack and a driver, but function supports them all. */ void if_inc_counter(struct ifnet *ifp, ift_counter cnt, int64_t inc) { KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt)); counter_u64_add(ifp->if_counters[cnt], inc); } /* * Account successful transmission of an mbuf. */ void if_inc_txcounters(struct ifnet *ifp, struct mbuf *m) { counter_u64_add(ifp->if_counters[IFCOUNTER_OBYTES], m->m_pkthdr.len); counter_u64_add(ifp->if_counters[IFCOUNTER_OPACKETS], 1); if (m->m_flags & M_MCAST) counter_u64_add(ifp->if_counters[IFCOUNTER_OMCASTS], 1); } /* * Copy data from ifnet to userland API structure if_data. */ void if_data_copy(struct ifnet *ifp, struct if_data *ifd) { ifd->ifi_type = if_type(ifp); ifd->ifi_physical = 0; ifd->ifi_addrlen = if_addrlen(ifp); ifd->ifi_hdrlen = ifp->if_drv->ifdrv_hdrlen; ifd->ifi_link_state = ifp->if_link_state; ifd->ifi_vhid = 0; ifd->ifi_datalen = sizeof(struct if_data); ifd->ifi_mtu = ifp->if_mtu; ifd->ifi_metric = ifp->if_metric; ifd->ifi_baudrate = ifp->if_baudrate; ifd->ifi_hwassist = ifp->if_hwassist; ifd->ifi_epoch = ifp->if_epoch; ifd->ifi_lastchange = ifp->if_lastchange; ifd->ifi_ipackets = if_get_counter(ifp, IFCOUNTER_IPACKETS); ifd->ifi_ierrors = if_get_counter(ifp, IFCOUNTER_IERRORS); ifd->ifi_opackets = if_get_counter(ifp, IFCOUNTER_OPACKETS); ifd->ifi_oerrors = if_get_counter(ifp, IFCOUNTER_OERRORS); ifd->ifi_collisions = if_get_counter(ifp, IFCOUNTER_COLLISIONS); ifd->ifi_ibytes = if_get_counter(ifp, IFCOUNTER_IBYTES); ifd->ifi_obytes = if_get_counter(ifp, IFCOUNTER_OBYTES); ifd->ifi_imcasts = if_get_counter(ifp, IFCOUNTER_IMCASTS); ifd->ifi_omcasts = if_get_counter(ifp, IFCOUNTER_OMCASTS); ifd->ifi_iqdrops = if_get_counter(ifp, IFCOUNTER_IQDROPS); ifd->ifi_oqdrops = if_get_counter(ifp, IFCOUNTER_OQDROPS); ifd->ifi_noproto = if_get_counter(ifp, IFCOUNTER_NOPROTO); } /* * Initialization, destruction and refcounting functions for ifaddrs. */ struct ifaddr * ifa_alloc(size_t size, int flags) { struct ifaddr *ifa; KASSERT(size >= sizeof(struct ifaddr), ("%s: invalid size %zu", __func__, size)); ifa = malloc(size, M_IFADDR, M_ZERO | flags); if (ifa == NULL) return (NULL); if ((ifa->ifa_opackets = counter_u64_alloc(flags)) == NULL) goto fail; if ((ifa->ifa_ipackets = counter_u64_alloc(flags)) == NULL) goto fail; if ((ifa->ifa_obytes = counter_u64_alloc(flags)) == NULL) goto fail; if ((ifa->ifa_ibytes = counter_u64_alloc(flags)) == NULL) goto fail; refcount_init(&ifa->ifa_refcnt, 1); return (ifa); fail: /* free(NULL) is okay */ counter_u64_free(ifa->ifa_opackets); counter_u64_free(ifa->ifa_ipackets); counter_u64_free(ifa->ifa_obytes); counter_u64_free(ifa->ifa_ibytes); free(ifa, M_IFADDR); return (NULL); } void ifa_ref(struct ifaddr *ifa) { refcount_acquire(&ifa->ifa_refcnt); } void ifa_free(struct ifaddr *ifa) { if (refcount_release(&ifa->ifa_refcnt)) { counter_u64_free(ifa->ifa_opackets); counter_u64_free(ifa->ifa_ipackets); counter_u64_free(ifa->ifa_obytes); counter_u64_free(ifa->ifa_ibytes); free(ifa, M_IFADDR); } } int ifa_add_loopback_route(struct ifaddr *ifa, struct sockaddr *ia) { int error = 0; struct rtentry *rt = NULL; struct rt_addrinfo info; static struct sockaddr_dl null_sdl = {sizeof(null_sdl), AF_LINK}; bzero(&info, sizeof(info)); info.rti_ifp = V_loif; info.rti_flags = ifa->ifa_flags | RTF_HOST | RTF_STATIC; info.rti_info[RTAX_DST] = ia; info.rti_info[RTAX_GATEWAY] = (struct sockaddr *)&null_sdl; error = rtrequest1_fib(RTM_ADD, &info, &rt, ifa->ifa_ifp->if_fib); if (error == 0 && rt != NULL) { RT_LOCK(rt); ((struct sockaddr_dl *)rt->rt_gateway)->sdl_type = if_type(ifa->ifa_ifp); ((struct sockaddr_dl *)rt->rt_gateway)->sdl_index = ifa->ifa_ifp->if_index; RT_REMREF(rt); RT_UNLOCK(rt); } else if (error != 0) log(LOG_DEBUG, "%s: insertion failed: %u\n", __func__, error); return (error); } int ifa_del_loopback_route(struct ifaddr *ifa, struct sockaddr *ia) { int error = 0; struct rt_addrinfo info; struct sockaddr_dl null_sdl; bzero(&null_sdl, sizeof(null_sdl)); null_sdl.sdl_len = sizeof(null_sdl); null_sdl.sdl_family = AF_LINK; null_sdl.sdl_type = if_type(ifa->ifa_ifp); null_sdl.sdl_index = ifa->ifa_ifp->if_index; bzero(&info, sizeof(info)); info.rti_flags = ifa->ifa_flags | RTF_HOST | RTF_STATIC; info.rti_info[RTAX_DST] = ia; info.rti_info[RTAX_GATEWAY] = (struct sockaddr *)&null_sdl; error = rtrequest1_fib(RTM_DELETE, &info, NULL, ifa->ifa_ifp->if_fib); if (error != 0) log(LOG_DEBUG, "%s: deletion failed: %u\n", __func__, error); return (error); } int ifa_switch_loopback_route(struct ifaddr *ifa, struct sockaddr *sa, int fib) { struct rtentry *rt; rt = rtalloc1_fib(sa, 0, 0, fib); if (rt == NULL) { log(LOG_DEBUG, "%s: fail", __func__); return (EHOSTUNREACH); } ((struct sockaddr_dl *)rt->rt_gateway)->sdl_type = if_type(ifa->ifa_ifp); ((struct sockaddr_dl *)rt->rt_gateway)->sdl_index = ifa->ifa_ifp->if_index; RTFREE_LOCKED(rt); return (0); } /* * XXX: Because sockaddr_dl has deeper structure than the sockaddr * structs used to represent other address families, it is necessary * to perform a different comparison. */ #define sa_dl_equal(a1, a2) \ ((((struct sockaddr_dl *)(a1))->sdl_len == \ ((struct sockaddr_dl *)(a2))->sdl_len) && \ (bcmp(LLADDR((struct sockaddr_dl *)(a1)), \ LLADDR((struct sockaddr_dl *)(a2)), \ ((struct sockaddr_dl *)(a1))->sdl_alen) == 0)) /* * Locate an interface based on a complete address. */ /*ARGSUSED*/ static struct ifaddr * ifa_ifwithaddr_internal(struct sockaddr *addr, int getref) { struct ifnet *ifp; struct ifaddr *ifa; IFNET_RLOCK_NOSLEEP(); TAILQ_FOREACH(ifp, &V_ifnet, if_link) { IF_ADDR_RLOCK(ifp); TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != addr->sa_family) continue; if (sa_equal(addr, ifa->ifa_addr)) { if (getref) ifa_ref(ifa); IF_ADDR_RUNLOCK(ifp); goto done; } /* IP6 doesn't have broadcast */ if ((ifp->if_flags & IFF_BROADCAST) && ifa->ifa_broadaddr && ifa->ifa_broadaddr->sa_len != 0 && sa_equal(ifa->ifa_broadaddr, addr)) { if (getref) ifa_ref(ifa); IF_ADDR_RUNLOCK(ifp); goto done; } } IF_ADDR_RUNLOCK(ifp); } ifa = NULL; done: IFNET_RUNLOCK_NOSLEEP(); return (ifa); } struct ifaddr * ifa_ifwithaddr(struct sockaddr *addr) { return (ifa_ifwithaddr_internal(addr, 1)); } int ifa_ifwithaddr_check(struct sockaddr *addr) { return (ifa_ifwithaddr_internal(addr, 0) != NULL); } /* * Locate an interface based on the broadcast address. */ /* ARGSUSED */ struct ifaddr * ifa_ifwithbroadaddr(struct sockaddr *addr, int fibnum) { struct ifnet *ifp; struct ifaddr *ifa; IFNET_RLOCK_NOSLEEP(); TAILQ_FOREACH(ifp, &V_ifnet, if_link) { if ((fibnum != RT_ALL_FIBS) && (ifp->if_fib != fibnum)) continue; IF_ADDR_RLOCK(ifp); TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != addr->sa_family) continue; if ((ifp->if_flags & IFF_BROADCAST) && ifa->ifa_broadaddr && ifa->ifa_broadaddr->sa_len != 0 && sa_equal(ifa->ifa_broadaddr, addr)) { ifa_ref(ifa); IF_ADDR_RUNLOCK(ifp); goto done; } } IF_ADDR_RUNLOCK(ifp); } ifa = NULL; done: IFNET_RUNLOCK_NOSLEEP(); return (ifa); } /* * Locate the point to point interface with a given destination address. */ /*ARGSUSED*/ struct ifaddr * ifa_ifwithdstaddr(struct sockaddr *addr, int fibnum) { struct ifnet *ifp; struct ifaddr *ifa; IFNET_RLOCK_NOSLEEP(); TAILQ_FOREACH(ifp, &V_ifnet, if_link) { if ((ifp->if_flags & IFF_POINTOPOINT) == 0) continue; if ((fibnum != RT_ALL_FIBS) && (ifp->if_fib != fibnum)) continue; IF_ADDR_RLOCK(ifp); TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != addr->sa_family) continue; if (ifa->ifa_dstaddr != NULL && sa_equal(addr, ifa->ifa_dstaddr)) { ifa_ref(ifa); IF_ADDR_RUNLOCK(ifp); goto done; } } IF_ADDR_RUNLOCK(ifp); } ifa = NULL; done: IFNET_RUNLOCK_NOSLEEP(); return (ifa); } /* * Find an interface on a specific network. If many, choice * is most specific found. */ struct ifaddr * ifa_ifwithnet(struct sockaddr *addr, int ignore_ptp, int fibnum) { struct ifnet *ifp; struct ifaddr *ifa; struct ifaddr *ifa_maybe = NULL; u_int af = addr->sa_family; char *addr_data = addr->sa_data, *cplim; /* * AF_LINK addresses can be looked up directly by their index number, * so do that if we can. */ if (af == AF_LINK) { struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr; if (sdl->sdl_index && sdl->sdl_index <= V_if_index) return (ifaddr_byindex(sdl->sdl_index)); } /* * Scan though each interface, looking for ones that have addresses * in this address family and the requested fib. Maintain a reference * on ifa_maybe once we find one, as we release the IF_ADDR_RLOCK() that * kept it stable when we move onto the next interface. */ IFNET_RLOCK_NOSLEEP(); TAILQ_FOREACH(ifp, &V_ifnet, if_link) { if ((fibnum != RT_ALL_FIBS) && (ifp->if_fib != fibnum)) continue; IF_ADDR_RLOCK(ifp); TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { char *cp, *cp2, *cp3; if (ifa->ifa_addr->sa_family != af) next: continue; if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT && !ignore_ptp) { /* * This is a bit broken as it doesn't * take into account that the remote end may * be a single node in the network we are * looking for. * The trouble is that we don't know the * netmask for the remote end. */ if (ifa->ifa_dstaddr != NULL && sa_equal(addr, ifa->ifa_dstaddr)) { ifa_ref(ifa); IF_ADDR_RUNLOCK(ifp); goto done; } } else { /* * Scan all the bits in the ifa's address. * If a bit dissagrees with what we are * looking for, mask it with the netmask * to see if it really matters. * (A byte at a time) */ if (ifa->ifa_netmask == 0) continue; cp = addr_data; cp2 = ifa->ifa_addr->sa_data; cp3 = ifa->ifa_netmask->sa_data; cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; while (cp3 < cplim) if ((*cp++ ^ *cp2++) & *cp3++) goto next; /* next address! */ /* * If the netmask of what we just found * is more specific than what we had before * (if we had one), or if the virtual status * of new prefix is better than of the old one, * then remember the new one before continuing * to search for an even better one. */ if (ifa_maybe == NULL || ifa_preferred(ifa_maybe, ifa) || rn_refines((caddr_t)ifa->ifa_netmask, (caddr_t)ifa_maybe->ifa_netmask)) { if (ifa_maybe != NULL) ifa_free(ifa_maybe); ifa_maybe = ifa; ifa_ref(ifa_maybe); } } } IF_ADDR_RUNLOCK(ifp); } ifa = ifa_maybe; ifa_maybe = NULL; done: IFNET_RUNLOCK_NOSLEEP(); if (ifa_maybe != NULL) ifa_free(ifa_maybe); return (ifa); } /* * Find an interface address specific to an interface best matching * a given address. */ struct ifaddr * ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp) { struct ifaddr *ifa; char *cp, *cp2, *cp3; char *cplim; struct ifaddr *ifa_maybe = NULL; u_int af = addr->sa_family; if (af >= AF_MAX) return (NULL); IF_ADDR_RLOCK(ifp); TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != af) continue; if (ifa_maybe == NULL) ifa_maybe = ifa; if (ifa->ifa_netmask == 0) { if (sa_equal(addr, ifa->ifa_addr) || (ifa->ifa_dstaddr && sa_equal(addr, ifa->ifa_dstaddr))) goto done; continue; } if (ifp->if_flags & IFF_POINTOPOINT) { if (sa_equal(addr, ifa->ifa_dstaddr)) goto done; } else { cp = addr->sa_data; cp2 = ifa->ifa_addr->sa_data; cp3 = ifa->ifa_netmask->sa_data; cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; for (; cp3 < cplim; cp3++) if ((*cp++ ^ *cp2++) & *cp3) break; if (cp3 == cplim) goto done; } } ifa = ifa_maybe; done: if (ifa != NULL) ifa_ref(ifa); IF_ADDR_RUNLOCK(ifp); return (ifa); } /* * See whether new ifa is better than current one: * 1) A non-virtual one is preferred over virtual. * 2) A virtual in master state preferred over any other state. * * Used in several address selecting functions. */ int ifa_preferred(struct ifaddr *cur, struct ifaddr *next) { return (cur->ifa_carp && (!next->ifa_carp || ((*carp_master_p)(next) && !(*carp_master_p)(cur)))); } #include /* * Default action when installing a route with a Link Level gateway. * Lookup an appropriate real ifa to point to. * This should be moved to /sys/net/link.c eventually. */ static void link_rtrequest(int cmd, struct rtentry *rt, struct rt_addrinfo *info) { struct ifaddr *ifa, *oifa; struct sockaddr *dst; struct ifnet *ifp; if (cmd != RTM_ADD || ((ifa = rt->rt_ifa) == 0) || ((ifp = ifa->ifa_ifp) == 0) || ((dst = rt_key(rt)) == 0)) return; ifa = ifaof_ifpforaddr(dst, ifp); if (ifa) { oifa = rt->rt_ifa; rt->rt_ifa = ifa; ifa_free(oifa); if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest) ifa->ifa_rtrequest(cmd, rt, info); } } struct sockaddr_dl * link_alloc_sdl(size_t size, int flags) { return (malloc(size, M_TEMP, flags)); } void link_free_sdl(struct sockaddr *sa) { free(sa, M_TEMP); } /* * Fills in given sdl with interface basic info. * Returns pointer to filled sdl. */ struct sockaddr_dl * link_init_sdl(struct ifnet *ifp, struct sockaddr *paddr, u_char iftype) { struct sockaddr_dl *sdl; sdl = (struct sockaddr_dl *)paddr; memset(sdl, 0, sizeof(struct sockaddr_dl)); sdl->sdl_len = sizeof(struct sockaddr_dl); sdl->sdl_family = AF_LINK; sdl->sdl_index = ifp->if_index; sdl->sdl_type = iftype; return (sdl); } /* * Mark an interface down and notify protocols of * the transition. */ static void if_unroute(struct ifnet *ifp, int flag, int fam) { struct ifaddr *ifa; KASSERT(flag == IFF_UP, ("if_unroute: flag != IFF_UP")); ifp->if_flags &= ~flag; getmicrotime(&ifp->if_lastchange); TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) pfctlinput(PRC_IFDOWN, ifa->ifa_addr); if_qflush(ifp); if (ifp->if_carp) (*carp_linkstate_p)(ifp); rt_ifmsg(ifp); } /* * Mark an interface up and notify protocols of * the transition. */ static void if_route(struct ifnet *ifp, int flag, int fam) { struct ifaddr *ifa; KASSERT(flag == IFF_UP, ("if_route: flag != IFF_UP")); ifp->if_flags |= flag; getmicrotime(&ifp->if_lastchange); TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) pfctlinput(PRC_IFUP, ifa->ifa_addr); if (ifp->if_carp) (*carp_linkstate_p)(ifp); rt_ifmsg(ifp); #ifdef INET6 in6_if_up(ifp); #endif } void (*vlan_link_state_p)(struct ifnet *); /* XXX: private from if_vlan */ void (*vlan_trunk_cap_p)(struct ifnet *); /* XXX: private from if_vlan */ struct ifnet *(*vlan_trunkdev_p)(struct ifnet *); struct ifnet *(*vlan_devat_p)(struct ifnet *, uint16_t); int (*vlan_tag_p)(struct ifnet *, uint16_t *); int (*vlan_setcookie_p)(struct ifnet *, void *); void *(*vlan_cookie_p)(struct ifnet *); /* * Handle a change in the interface link state. To avoid LORs * between driver lock and upper layer locks, as well as possible * recursions, we post event to taskqueue, and all job * is done in static do_link_state_change(). */ void if_link_state_change(struct ifnet *ifp, int link_state) { /* Return if state hasn't changed. */ if (ifp->if_link_state == link_state) return; ifp->if_link_state = link_state; taskqueue_enqueue(taskqueue_swi, &ifp->if_linktask); } static void do_link_state_change(void *arg, int pending) { struct ifnet *ifp = (struct ifnet *)arg; int link_state = ifp->if_link_state; CURVNET_SET(ifp->if_vnet); /* Notify that the link state has changed. */ rt_ifmsg(ifp); if (ifp->if_vlantrunk != NULL) (*vlan_link_state_p)(ifp); /* XXXGL: make ng_ether softc pointer */ if ((if_type(ifp) == IFT_ETHER || if_type(ifp) == IFT_L2VLAN) && ifp->if_l2com != NULL) (*ng_ether_link_state_p)(ifp, link_state); if (ifp->if_carp) (*carp_linkstate_p)(ifp); if (ifp->if_bridge) (*bridge_linkstate_p)(ifp); if (ifp->if_lagg) (*lagg_linkstate_p)(ifp, link_state); if (IS_DEFAULT_VNET(curvnet)) devctl_notify("IFNET", ifp->if_xname, (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", NULL); if (pending > 1) if_printf(ifp, "%d link states coalesced\n", pending); if (log_link_state_change) log(LOG_NOTICE, "%s: link state changed to %s\n", ifp->if_xname, (link_state == LINK_STATE_UP) ? "UP" : "DOWN" ); EVENTHANDLER_INVOKE(ifnet_link_event, ifp, ifp->if_link_state); CURVNET_RESTORE(); } /* * Mark an interface down and notify protocols of * the transition. */ void if_down(struct ifnet *ifp) { if_unroute(ifp, IFF_UP, AF_UNSPEC); } /* * Mark an interface up and notify protocols of * the transition. */ void if_up(struct ifnet *ifp) { if_route(ifp, IFF_UP, AF_UNSPEC); } /* * Map interface name to interface structure pointer, with or without * returning a reference. */ struct ifnet * ifunit_ref(const char *name) { struct ifnet *ifp; IFNET_RLOCK_NOSLEEP(); TAILQ_FOREACH(ifp, &V_ifnet, if_link) { if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0 && !(ifp->if_flags & IFF_DYING)) break; } if (ifp != NULL) if_ref(ifp); IFNET_RUNLOCK_NOSLEEP(); return (ifp); } struct ifnet * ifunit(const char *name) { struct ifnet *ifp; IFNET_RLOCK_NOSLEEP(); TAILQ_FOREACH(ifp, &V_ifnet, if_link) { if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0) break; } IFNET_RUNLOCK_NOSLEEP(); return (ifp); } /* * Hardware specific interface ioctls. */ int if_drvioctl(u_long cmd, struct ifnet *ifp, void *data, struct thread *td) { struct ifreq *ifr; int error = 0; int new_flags, temp_flags; size_t namelen, onamelen; size_t descrlen; char *descrbuf, *odescrbuf; char new_name[IFNAMSIZ]; struct ifaddr *ifa; struct sockaddr_dl *sdl; ifr = (struct ifreq *)data; switch (cmd) { case SIOCGIFINDEX: ifr->ifr_index = ifp->if_index; break; case SIOCGIFFLAGS: temp_flags = ifp->if_flags; ifr->ifr_flags = temp_flags & 0xffff; ifr->ifr_flagshigh = temp_flags >> 16; break; case SIOCGIFCAP: ifr->ifr_reqcap = ifp->if_capabilities; ifr->ifr_curcap = ifp->if_capenable; break; #ifdef MAC case SIOCGIFMAC: error = mac_ifnet_ioctl_get(td->td_ucred, ifr, ifp); break; #endif case SIOCGIFMETRIC: ifr->ifr_metric = ifp->if_metric; break; case SIOCGIFMTU: ifr->ifr_mtu = ifp->if_mtu; break; case SIOCGIFPHYS: /* XXXGL: did this ever worked? */ ifr->ifr_phys = 0; break; case SIOCGIFDESCR: error = 0; sx_slock(&ifdescr_sx); if (ifp->if_description == NULL) error = ENOMSG; else { /* space for terminating nul */ descrlen = strlen(ifp->if_description) + 1; if (ifr->ifr_buffer.length < descrlen) ifr->ifr_buffer.buffer = NULL; else error = copyout(ifp->if_description, ifr->ifr_buffer.buffer, descrlen); ifr->ifr_buffer.length = descrlen; } sx_sunlock(&ifdescr_sx); break; case SIOCSIFDESCR: error = priv_check(td, PRIV_NET_SETIFDESCR); if (error) return (error); /* * Copy only (length-1) bytes to make sure that * if_description is always nul terminated. The * length parameter is supposed to count the * terminating nul in. */ if (ifr->ifr_buffer.length > ifdescr_maxlen) return (ENAMETOOLONG); else if (ifr->ifr_buffer.length == 0) descrbuf = NULL; else { descrbuf = malloc(ifr->ifr_buffer.length, M_IFDESCR, M_WAITOK | M_ZERO); error = copyin(ifr->ifr_buffer.buffer, descrbuf, ifr->ifr_buffer.length - 1); if (error) { free(descrbuf, M_IFDESCR); break; } } sx_xlock(&ifdescr_sx); odescrbuf = ifp->if_description; ifp->if_description = descrbuf; sx_xunlock(&ifdescr_sx); getmicrotime(&ifp->if_lastchange); free(odescrbuf, M_IFDESCR); break; case SIOCGIFFIB: ifr->ifr_fib = ifp->if_fib; break; case SIOCSIFFIB: error = priv_check(td, PRIV_NET_SETIFFIB); if (error) return (error); if (ifr->ifr_fib >= rt_numfibs) return (EINVAL); ifp->if_fib = ifr->ifr_fib; break; case SIOCSIFFLAGS: error = priv_check(td, PRIV_NET_SETIFFLAGS); if (error) return (error); /* * Currently, no driver owned flags pass the IFF_CANTCHANGE * check, so we don't need special handling here yet. */ new_flags = (ifr->ifr_flags & 0xffff) | (ifr->ifr_flagshigh << 16); if (ifp->if_flags & IFF_UP && (new_flags & IFF_UP) == 0) { if_down(ifp); } else if (new_flags & IFF_UP && (ifp->if_flags & IFF_UP) == 0) { if_up(ifp); } /* See if permanently promiscuous mode bit is about to flip */ if ((ifp->if_flags ^ new_flags) & IFF_PPROMISC) { if (new_flags & IFF_PPROMISC) ifp->if_flags |= IFF_PROMISC; else if (ifp->if_pcount == 0) ifp->if_flags &= ~IFF_PROMISC; log(LOG_INFO, "%s: permanently promiscuous mode %s\n", ifp->if_xname, (new_flags & IFF_PPROMISC) ? "enabled" : "disabled"); } ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) | (new_flags &~ IFF_CANTCHANGE); if_ioctl(ifp, cmd, data, td); getmicrotime(&ifp->if_lastchange); break; case SIOCSIFCAP: error = priv_check(td, PRIV_NET_SETIFCAP); if (error) return (error); + if ((ifr->ifr_reqcap & IFCAP_VLAN_HWTSO) != 0) + ifr->ifr_reqcap |= IFCAP_VLAN_HWTAGGING; if (ifr->ifr_reqcap & ~ifp->if_capabilities) return (EINVAL); + if (ifr->ifr_reqcap == ifp->if_capenable) + return (0); + ifr->ifr_curcap = ifp->if_capenable; error = if_ioctl(ifp, cmd, data, td); - if (error == 0) + if (error == 0) { + ifp->if_capenable = ifr->ifr_reqcap; + ifp->if_hwassist = ifr->ifr_hwassist; getmicrotime(&ifp->if_lastchange); + if (ifp->if_vlantrunk != NULL) + (*vlan_trunk_cap_p)(ifp); + } break; - #ifdef MAC case SIOCSIFMAC: error = mac_ifnet_ioctl_set(td->td_ucred, ifr, ifp); break; #endif case SIOCSIFNAME: error = priv_check(td, PRIV_NET_SETIFNAME); if (error) return (error); error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL); if (error != 0) return (error); if (new_name[0] == '\0') return (EINVAL); if (ifunit(new_name) != NULL) return (EEXIST); /* * XXX: Locking. Nothing else seems to lock if_flags, * and there are numerous other races with the * ifunit() checks not being atomic with namespace * changes (renames, vmoves, if_attach, etc). */ ifp->if_flags |= IFF_RENAMING; /* Announce the departure of the interface. */ rt_ifannouncemsg(ifp, IFAN_DEPARTURE); EVENTHANDLER_INVOKE(ifnet_departure_event, ifp); log(LOG_INFO, "%s: changing name to '%s'\n", ifp->if_xname, new_name); IF_ADDR_WLOCK(ifp); strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname)); ifa = ifp->if_addr; sdl = (struct sockaddr_dl *)ifa->ifa_addr; namelen = strlen(new_name); onamelen = sdl->sdl_nlen; /* * Move the address if needed. This is safe because we * allocate space for a name of length IFNAMSIZ when we * create this in if_attach(). */ if (namelen != onamelen) { bcopy(sdl->sdl_data + onamelen, sdl->sdl_data + namelen, sdl->sdl_alen); } bcopy(new_name, sdl->sdl_data, namelen); sdl->sdl_nlen = namelen; sdl = (struct sockaddr_dl *)ifa->ifa_netmask; bzero(sdl->sdl_data, onamelen); while (namelen != 0) sdl->sdl_data[--namelen] = 0xff; IF_ADDR_WUNLOCK(ifp); EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp); /* Announce the return of the interface. */ rt_ifannouncemsg(ifp, IFAN_ARRIVAL); ifp->if_flags &= ~IFF_RENAMING; break; #ifdef VIMAGE case SIOCSIFVNET: error = priv_check(td, PRIV_NET_SETIFVNET); if (error) return (error); error = if_vmove_loan(td, ifp, ifr->ifr_name, ifr->ifr_jid); break; #endif case SIOCSIFMETRIC: error = priv_check(td, PRIV_NET_SETIFMETRIC); if (error) return (error); ifp->if_metric = ifr->ifr_metric; getmicrotime(&ifp->if_lastchange); break; case SIOCSIFPHYS: error = priv_check(td, PRIV_NET_SETIFPHYS); if (error) return (error); error = if_ioctl(ifp, cmd, data, td); if (error == 0) getmicrotime(&ifp->if_lastchange); break; case SIOCSIFMTU: - { - u_long oldmtu = ifp->if_mtu; - error = priv_check(td, PRIV_NET_SETIFMTU); if (error) return (error); if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU) return (EINVAL); + if (ifr->ifr_mtu == ifp->if_mtu) + return (0); error = if_ioctl(ifp, cmd, data, td); if (error == 0) { + ifp->if_mtu = ifr->ifr_mtu; getmicrotime(&ifp->if_lastchange); rt_ifmsg(ifp); - } - /* - * If the link MTU changed, do network layer specific procedure. - */ - if (ifp->if_mtu != oldmtu) { #ifdef INET6 nd6_setmtu(ifp); #endif rt_updatemtu(ifp); } break; - } case SIOCADDMULTI: case SIOCDELMULTI: if (cmd == SIOCADDMULTI) error = priv_check(td, PRIV_NET_ADDMULTI); else error = priv_check(td, PRIV_NET_DELMULTI); if (error) return (error); /* Don't allow group membership on non-multicast interfaces. */ if ((ifp->if_flags & IFF_MULTICAST) == 0) return (EOPNOTSUPP); /* Don't let users screw up protocols' entries. */ if (ifr->ifr_addr.sa_family != AF_LINK) return (EINVAL); if (cmd == SIOCADDMULTI) { struct ifmultiaddr *ifma; /* * Userland is only permitted to join groups once * via the if_addmulti() KPI, because it cannot hold * struct ifmultiaddr * between calls. It may also * lose a race while we check if the membership * already exists. */ IF_ADDR_RLOCK(ifp); ifma = if_findmulti(ifp, &ifr->ifr_addr); IF_ADDR_RUNLOCK(ifp); if (ifma != NULL) error = EADDRINUSE; else error = if_addmulti(ifp, &ifr->ifr_addr, &ifma); } else { error = if_delmulti(ifp, &ifr->ifr_addr); } if (error == 0) getmicrotime(&ifp->if_lastchange); break; case SIOCSIFPHYADDR: case SIOCDIFPHYADDR: #ifdef INET6 case SIOCSIFPHYADDR_IN6: #endif case SIOCSIFMEDIA: case SIOCSIFGENERIC: error = priv_check(td, PRIV_NET_HWIOCTL); if (error) return (error); error = if_ioctl(ifp, cmd, data, td); if (error == 0) getmicrotime(&ifp->if_lastchange); break; case SIOCGIFSTATUS: case SIOCGIFPSRCADDR: case SIOCGIFPDSTADDR: case SIOCGIFMEDIA: case SIOCGIFGENERIC: error = if_ioctl(ifp, cmd, data, td); break; case SIOCSIFLLADDR: error = priv_check(td, PRIV_NET_SETLLADDR); if (error) return (error); error = if_setlladdr(ifp, ifr->ifr_addr.sa_data, ifr->ifr_addr.sa_len); EVENTHANDLER_INVOKE(iflladdr_event, ifp); break; case SIOCAIFGROUP: { struct ifgroupreq *ifgr = (struct ifgroupreq *)ifr; error = priv_check(td, PRIV_NET_ADDIFGROUP); if (error) return (error); if ((error = if_addgroup(ifp, ifgr->ifgr_group))) return (error); break; } case SIOCGIFGROUP: if ((error = if_getgroup((struct ifgroupreq *)ifr, ifp))) return (error); break; case SIOCDIFGROUP: { struct ifgroupreq *ifgr = (struct ifgroupreq *)ifr; error = priv_check(td, PRIV_NET_DELIFGROUP); if (error) return (error); if ((error = if_delgroup(ifp, ifgr->ifgr_group))) return (error); break; } default: error = ENOIOCTL; break; } return (error); } #ifdef COMPAT_FREEBSD32 struct ifconf32 { int32_t ifc_len; union { uint32_t ifcu_buf; uint32_t ifcu_req; } ifc_ifcu; }; #define SIOCGIFCONF32 _IOWR('i', 36, struct ifconf32) #endif /* * Interface ioctls. */ int ifioctl(struct socket *so, u_long cmd, caddr_t data, struct thread *td) { struct ifnet *ifp; struct ifreq *ifr; int error; int oif_flags; CURVNET_SET(so->so_vnet); switch (cmd) { case SIOCGIFCONF: error = ifconf(cmd, data); CURVNET_RESTORE(); return (error); #ifdef COMPAT_FREEBSD32 case SIOCGIFCONF32: { struct ifconf32 *ifc32; struct ifconf ifc; ifc32 = (struct ifconf32 *)data; ifc.ifc_len = ifc32->ifc_len; ifc.ifc_buf = PTRIN(ifc32->ifc_buf); error = ifconf(SIOCGIFCONF, (void *)&ifc); CURVNET_RESTORE(); if (error == 0) ifc32->ifc_len = ifc.ifc_len; return (error); } #endif } ifr = (struct ifreq *)data; switch (cmd) { #ifdef VIMAGE case SIOCSIFRVNET: error = priv_check(td, PRIV_NET_SETIFVNET); if (error == 0) error = if_vmove_reclaim(td, ifr->ifr_name, ifr->ifr_jid); CURVNET_RESTORE(); return (error); #endif case SIOCIFCREATE: case SIOCIFCREATE2: error = priv_check(td, PRIV_NET_IFCREATE); if (error == 0) error = if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name), cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL); CURVNET_RESTORE(); return (error); case SIOCIFDESTROY: error = priv_check(td, PRIV_NET_IFDESTROY); if (error == 0) error = if_clone_destroy(ifr->ifr_name); CURVNET_RESTORE(); return (error); case SIOCIFGCLONERS: error = if_clone_list((struct if_clonereq *)data); CURVNET_RESTORE(); return (error); case SIOCGIFGMEMB: error = if_getgroupmembers((struct ifgroupreq *)data); CURVNET_RESTORE(); return (error); #if defined(INET) || defined(INET6) case SIOCSVH: case SIOCGVH: if (carp_ioctl_p == NULL) error = EPROTONOSUPPORT; else error = (*carp_ioctl_p)(ifr, cmd, td); CURVNET_RESTORE(); return (error); #endif } ifp = ifunit_ref(ifr->ifr_name); if (ifp == NULL) { CURVNET_RESTORE(); return (ENXIO); } error = if_drvioctl(cmd, ifp, data, td); if (error != ENOIOCTL) { if_rele(ifp); CURVNET_RESTORE(); return (error); } oif_flags = ifp->if_flags; if (so->so_proto == NULL) { if_rele(ifp); CURVNET_RESTORE(); return (EOPNOTSUPP); } /* * Pass the request on to the socket control method, and if the * latter returns EOPNOTSUPP, directly to the interface. * * Make an exception for the legacy SIOCSIF* requests. Drivers * trust SIOCSIFADDR et al to come from an already privileged * layer, and do not perform any credentials checks or input * validation. */ error = ((*so->so_proto->pr_usrreqs->pru_control)(so, cmd, data, ifp, td)); if (error == EOPNOTSUPP && ifp != NULL && cmd != SIOCSIFADDR && cmd != SIOCSIFBRDADDR && cmd != SIOCSIFDSTADDR && cmd != SIOCSIFNETMASK) error = if_ioctl(ifp, cmd, data, td); if ((oif_flags ^ ifp->if_flags) & IFF_UP) { #ifdef INET6 if (ifp->if_flags & IFF_UP) in6_if_up(ifp); #endif } if_rele(ifp); CURVNET_RESTORE(); return (error); } /* * The code common to handling reference counted flags, * e.g., in ifpromisc() and if_allmulti(). * The "pflag" argument can specify a permanent mode flag to check, * such as IFF_PPROMISC for promiscuous mode; should be 0 if none. * * Only to be used on stack-owned flags, not driver-owned flags. */ static int if_setflag(struct ifnet *ifp, int flag, int pflag, int *refcount, int onswitch) { struct ifreq ifr; int error; int oldflags, oldcount; if (onswitch) KASSERT(*refcount >= 0, ("%s: increment negative refcount %d for flag %d", __func__, *refcount, flag)); else KASSERT(*refcount > 0, ("%s: decrement non-positive refcount %d for flag %d", __func__, *refcount, flag)); /* In case this mode is permanent, just touch refcount */ if (ifp->if_flags & pflag) { *refcount += onswitch ? 1 : -1; return (0); } /* Save ifnet parameters for if_ioctl() may fail */ oldcount = *refcount; oldflags = ifp->if_flags; /* * See if we aren't the only and touching refcount is enough. * Actually toggle interface flag if we are the first or last. */ if (onswitch) { if ((*refcount)++) return (0); ifp->if_flags |= flag; } else { if (--(*refcount)) return (0); ifp->if_flags &= ~flag; } /* Call down the driver since we've changed interface flags */ ifr.ifr_flags = ifp->if_flags & 0xffff; ifr.ifr_flagshigh = ifp->if_flags >> 16; error = if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, curthread); if (error) goto recover; /* Notify userland that interface flags have changed */ rt_ifmsg(ifp); return (0); recover: /* Recover after driver error */ *refcount = oldcount; ifp->if_flags = oldflags; return (error); } /* * Set/clear promiscuous mode on interface ifp based on the truth value * of pswitch. The calls are reference counted so that only the first * "on" request actually has an effect, as does the final "off" request. * Results are undefined if the "off" and "on" requests are not matched. */ int ifpromisc(struct ifnet *ifp, int pswitch) { int error; int oldflags = ifp->if_flags; error = if_setflag(ifp, IFF_PROMISC, IFF_PPROMISC, &ifp->if_pcount, pswitch); /* If promiscuous mode status has changed, log a message */ if (error == 0 && ((ifp->if_flags ^ oldflags) & IFF_PROMISC)) log(LOG_INFO, "%s: promiscuous mode %s\n", ifp->if_xname, (ifp->if_flags & IFF_PROMISC) ? "enabled" : "disabled"); return (error); } /* * Return interface configuration * of system. List may be used * in later ioctl's (above) to get * other information. */ /*ARGSUSED*/ static int ifconf(u_long cmd, caddr_t data) { struct ifconf *ifc = (struct ifconf *)data; struct ifnet *ifp; struct ifaddr *ifa; struct ifreq ifr; struct sbuf *sb; int error, full = 0, valid_len, max_len; /* Limit initial buffer size to MAXPHYS to avoid DoS from userspace. */ max_len = MAXPHYS - 1; /* Prevent hostile input from being able to crash the system */ if (ifc->ifc_len <= 0) return (EINVAL); again: if (ifc->ifc_len <= max_len) { max_len = ifc->ifc_len; full = 1; } sb = sbuf_new(NULL, NULL, max_len + 1, SBUF_FIXEDLEN); max_len = 0; valid_len = 0; IFNET_RLOCK(); TAILQ_FOREACH(ifp, &V_ifnet, if_link) { int addrs; /* * Zero the ifr_name buffer to make sure we don't * disclose the contents of the stack. */ memset(ifr.ifr_name, 0, sizeof(ifr.ifr_name)); if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name)) >= sizeof(ifr.ifr_name)) { sbuf_delete(sb); IFNET_RUNLOCK(); return (ENAMETOOLONG); } addrs = 0; IF_ADDR_RLOCK(ifp); TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { struct sockaddr *sa = ifa->ifa_addr; if (prison_if(curthread->td_ucred, sa) != 0) continue; addrs++; if (sa->sa_len <= sizeof(*sa)) { ifr.ifr_addr = *sa; sbuf_bcat(sb, &ifr, sizeof(ifr)); max_len += sizeof(ifr); } else { sbuf_bcat(sb, &ifr, offsetof(struct ifreq, ifr_addr)); max_len += offsetof(struct ifreq, ifr_addr); sbuf_bcat(sb, sa, sa->sa_len); max_len += sa->sa_len; } if (sbuf_error(sb) == 0) valid_len = sbuf_len(sb); } IF_ADDR_RUNLOCK(ifp); if (addrs == 0) { bzero((caddr_t)&ifr.ifr_addr, sizeof(ifr.ifr_addr)); sbuf_bcat(sb, &ifr, sizeof(ifr)); max_len += sizeof(ifr); if (sbuf_error(sb) == 0) valid_len = sbuf_len(sb); } } IFNET_RUNLOCK(); /* * If we didn't allocate enough space (uncommon), try again. If * we have already allocated as much space as we are allowed, * return what we've got. */ if (valid_len != max_len && !full) { sbuf_delete(sb); goto again; } ifc->ifc_len = valid_len; sbuf_finish(sb); error = copyout(sbuf_data(sb), ifc->ifc_req, ifc->ifc_len); sbuf_delete(sb); return (error); } /* * Just like ifpromisc(), but for all-multicast-reception mode. */ int if_allmulti(struct ifnet *ifp, int onswitch) { return (if_setflag(ifp, IFF_ALLMULTI, 0, &ifp->if_amcount, onswitch)); } struct ifmultiaddr * if_findmulti(struct ifnet *ifp, struct sockaddr *sa) { struct ifmultiaddr *ifma; IF_ADDR_LOCK_ASSERT(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (sa->sa_family == AF_LINK) { if (sa_dl_equal(ifma->ifma_addr, sa)) break; } else { if (sa_equal(ifma->ifma_addr, sa)) break; } } return ifma; } /* * Allocate a new ifmultiaddr and initialize based on passed arguments. We * make copies of passed sockaddrs. The ifmultiaddr will not be added to * the ifnet multicast address list here, so the caller must do that and * other setup work (such as notifying the device driver). The reference * count is initialized to 1. */ static struct ifmultiaddr * if_allocmulti(struct ifnet *ifp, struct sockaddr *sa, struct sockaddr *llsa, int mflags) { struct ifmultiaddr *ifma; struct sockaddr *dupsa; ifma = malloc(sizeof *ifma, M_IFMADDR, mflags | M_ZERO); if (ifma == NULL) return (NULL); dupsa = malloc(sa->sa_len, M_IFMADDR, mflags); if (dupsa == NULL) { free(ifma, M_IFMADDR); return (NULL); } bcopy(sa, dupsa, sa->sa_len); ifma->ifma_addr = dupsa; ifma->ifma_ifp = ifp; ifma->ifma_refcount = 1; ifma->ifma_protospec = NULL; if (llsa == NULL) { ifma->ifma_lladdr = NULL; return (ifma); } dupsa = malloc(llsa->sa_len, M_IFMADDR, mflags); if (dupsa == NULL) { free(ifma->ifma_addr, M_IFMADDR); free(ifma, M_IFMADDR); return (NULL); } bcopy(llsa, dupsa, llsa->sa_len); ifma->ifma_lladdr = dupsa; return (ifma); } /* * if_freemulti: free ifmultiaddr structure and possibly attached related * addresses. The caller is responsible for implementing reference * counting, notifying the driver, handling routing messages, and releasing * any dependent link layer state. */ static void if_freemulti(struct ifmultiaddr *ifma) { KASSERT(ifma->ifma_refcount == 0, ("if_freemulti: refcount %d", ifma->ifma_refcount)); if (ifma->ifma_lladdr != NULL) free(ifma->ifma_lladdr, M_IFMADDR); free(ifma->ifma_addr, M_IFMADDR); free(ifma, M_IFMADDR); } /* * Register an additional multicast address with a network interface. * * - If the address is already present, bump the reference count on the * address and return. * - If the address is not link-layer, look up a link layer address. * - Allocate address structures for one or both addresses, and attach to the * multicast address list on the interface. If automatically adding a link * layer address, the protocol address will own a reference to the link * layer address, to be freed when it is freed. * - Notify the network device driver of an addition to the multicast address * list. * * 'sa' points to caller-owned memory with the desired multicast address. * * 'retifma' will be used to return a pointer to the resulting multicast * address reference, if desired. */ int if_addmulti(struct ifnet *ifp, struct sockaddr *sa, struct ifmultiaddr **retifma) { struct ifmultiaddr *ifma, *ll_ifma; struct sockaddr *llsa; struct sockaddr_dl sdl; int error; /* * If the address is already present, return a new reference to it; * otherwise, allocate storage and set up a new address. */ IF_ADDR_WLOCK(ifp); ifma = if_findmulti(ifp, sa); if (ifma != NULL) { ifma->ifma_refcount++; if (retifma != NULL) *retifma = ifma; IF_ADDR_WUNLOCK(ifp); return (0); } /* * The address isn't already present; resolve the protocol address * into a link layer address, and then look that up, bump its * refcount or allocate an ifma for that also. * Most link layer resolving functions returns address data which * fits inside default sockaddr_dl structure. However callback * can allocate another sockaddr structure, in that case we need to * free it later. */ sdl.sdl_len = sizeof(sdl); llsa = (struct sockaddr *)&sdl; error = if_resolvemulti(ifp, &llsa, sa); if (error == EOPNOTSUPP) llsa = NULL; else if (error) goto unlock_out; /* * Allocate the new address. Don't hook it up yet, as we may also * need to allocate a link layer multicast address. */ ifma = if_allocmulti(ifp, sa, llsa, M_NOWAIT); if (ifma == NULL) { error = ENOMEM; goto free_llsa_out; } /* * If a link layer address is found, we'll need to see if it's * already present in the address list, or allocate is as well. * When this block finishes, the link layer address will be on the * list. */ if (llsa != NULL) { ll_ifma = if_findmulti(ifp, llsa); if (ll_ifma == NULL) { ll_ifma = if_allocmulti(ifp, llsa, NULL, M_NOWAIT); if (ll_ifma == NULL) { --ifma->ifma_refcount; if_freemulti(ifma); error = ENOMEM; goto free_llsa_out; } TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ll_ifma, ifma_link); } else ll_ifma->ifma_refcount++; ifma->ifma_llifma = ll_ifma; } /* * We now have a new multicast address, ifma, and possibly a new or * referenced link layer address. Add the primary address to the * ifnet address list. */ TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); if (retifma != NULL) *retifma = ifma; /* * Must generate the message while holding the lock so that 'ifma' * pointer is still valid. */ rt_newmaddrmsg(RTM_NEWMADDR, ifma); IF_ADDR_WUNLOCK(ifp); /* * We are certain we have added something, so call down to the * interface to let them know about it. */ if_ioctl(ifp, SIOCADDMULTI, 0, curthread); if ((llsa != NULL) && (llsa != (struct sockaddr *)&sdl)) link_free_sdl(llsa); return (0); free_llsa_out: if ((llsa != NULL) && (llsa != (struct sockaddr *)&sdl)) link_free_sdl(llsa); unlock_out: IF_ADDR_WUNLOCK(ifp); return (error); } /* * Delete a multicast group membership by network-layer group address. * * Returns ENOENT if the entry could not be found. If ifp no longer * exists, results are undefined. This entry point should only be used * from subsystems which do appropriate locking to hold ifp for the * duration of the call. * Network-layer protocol domains must use if_delmulti_ifma(). */ int if_delmulti(struct ifnet *ifp, struct sockaddr *sa) { struct ifmultiaddr *ifma; int lastref; #ifdef INVARIANTS struct ifnet *oifp; IFNET_RLOCK_NOSLEEP(); TAILQ_FOREACH(oifp, &V_ifnet, if_link) if (ifp == oifp) break; if (ifp != oifp) ifp = NULL; IFNET_RUNLOCK_NOSLEEP(); KASSERT(ifp != NULL, ("%s: ifnet went away", __func__)); #endif if (ifp == NULL) return (ENOENT); IF_ADDR_WLOCK(ifp); lastref = 0; ifma = if_findmulti(ifp, sa); if (ifma != NULL) lastref = if_delmulti_locked(ifp, ifma, 0); IF_ADDR_WUNLOCK(ifp); if (ifma == NULL) return (ENOENT); if (lastref) if_ioctl(ifp, SIOCDELMULTI, 0, curthread); return (0); } /* * Delete all multicast group membership for an interface. * Should be used to quickly flush all multicast filters. */ void if_delallmulti(struct ifnet *ifp) { struct ifmultiaddr *ifma; struct ifmultiaddr *next; IF_ADDR_WLOCK(ifp); TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next) if_delmulti_locked(ifp, ifma, 0); IF_ADDR_WUNLOCK(ifp); } /* * Delete a multicast group membership by group membership pointer. * Network-layer protocol domains must use this routine. * * It is safe to call this routine if the ifp disappeared. */ void if_delmulti_ifma(struct ifmultiaddr *ifma) { struct ifnet *ifp; int lastref; ifp = ifma->ifma_ifp; #ifdef DIAGNOSTIC if (ifp == NULL) { printf("%s: ifma_ifp seems to be detached\n", __func__); } else { struct ifnet *oifp; IFNET_RLOCK_NOSLEEP(); TAILQ_FOREACH(oifp, &V_ifnet, if_link) if (ifp == oifp) break; if (ifp != oifp) { printf("%s: ifnet %p disappeared\n", __func__, ifp); ifp = NULL; } IFNET_RUNLOCK_NOSLEEP(); } #endif /* * If and only if the ifnet instance exists: Acquire the address lock. */ if (ifp != NULL) IF_ADDR_WLOCK(ifp); lastref = if_delmulti_locked(ifp, ifma, 0); if (ifp != NULL) { /* * If and only if the ifnet instance exists: * Release the address lock. * If the group was left: update the hardware hash filter. */ IF_ADDR_WUNLOCK(ifp); if (lastref) if_ioctl(ifp, SIOCDELMULTI, 0, curthread); } } /* * Perform deletion of network-layer and/or link-layer multicast address. * * Return 0 if the reference count was decremented. * Return 1 if the final reference was released, indicating that the * hardware hash filter should be reprogrammed. */ static int if_delmulti_locked(struct ifnet *ifp, struct ifmultiaddr *ifma, int detaching) { struct ifmultiaddr *ll_ifma; if (ifp != NULL && ifma->ifma_ifp != NULL) { KASSERT(ifma->ifma_ifp == ifp, ("%s: inconsistent ifp %p", __func__, ifp)); IF_ADDR_WLOCK_ASSERT(ifp); } ifp = ifma->ifma_ifp; /* * If the ifnet is detaching, null out references to ifnet, * so that upper protocol layers will notice, and not attempt * to obtain locks for an ifnet which no longer exists. The * routing socket announcement must happen before the ifnet * instance is detached from the system. */ if (detaching) { #ifdef DIAGNOSTIC printf("%s: detaching ifnet instance %p\n", __func__, ifp); #endif /* * ifp may already be nulled out if we are being reentered * to delete the ll_ifma. */ if (ifp != NULL) { rt_newmaddrmsg(RTM_DELMADDR, ifma); ifma->ifma_ifp = NULL; } } if (--ifma->ifma_refcount > 0) return 0; /* * If this ifma is a network-layer ifma, a link-layer ifma may * have been associated with it. Release it first if so. */ ll_ifma = ifma->ifma_llifma; if (ll_ifma != NULL) { KASSERT(ifma->ifma_lladdr != NULL, ("%s: llifma w/o lladdr", __func__)); if (detaching) ll_ifma->ifma_ifp = NULL; /* XXX */ if (--ll_ifma->ifma_refcount == 0) { if (ifp != NULL) { TAILQ_REMOVE(&ifp->if_multiaddrs, ll_ifma, ifma_link); } if_freemulti(ll_ifma); } } if (ifp != NULL) TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); if_freemulti(ifma); /* * The last reference to this instance of struct ifmultiaddr * was released; the hardware should be notified of this change. */ return 1; } /* * Set the link layer address on an interface. * * At this time we only support certain types of interfaces, * and we don't allow the length of the address to change. */ int if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len) { struct sockaddr_dl *sdl; struct ifaddr *ifa; struct ifreq ifr; IF_ADDR_RLOCK(ifp); ifa = ifp->if_addr; if (ifa == NULL) { IF_ADDR_RUNLOCK(ifp); return (EINVAL); } ifa_ref(ifa); IF_ADDR_RUNLOCK(ifp); sdl = (struct sockaddr_dl *)ifa->ifa_addr; if (sdl == NULL) { ifa_free(ifa); return (EINVAL); } if (len != sdl->sdl_alen) { /* don't allow length to change */ ifa_free(ifa); return (EINVAL); } switch (if_type(ifp)) { case IFT_ETHER: case IFT_FDDI: case IFT_XETHER: case IFT_ISO88025: case IFT_L2VLAN: case IFT_BRIDGE: case IFT_ARCNET: case IFT_IEEE8023ADLAG: case IFT_IEEE80211: bcopy(lladdr, LLADDR(sdl), len); ifa_free(ifa); break; default: ifa_free(ifa); return (ENODEV); } /* * If the interface is already up, we need * to re-init it in order to reprogram its * address filter. */ if ((ifp->if_flags & IFF_UP) != 0) { ifp->if_flags &= ~IFF_UP; ifr.ifr_flags = ifp->if_flags & 0xffff; ifr.ifr_flagshigh = ifp->if_flags >> 16; if_ioctl(ifp, SIOCSIFFLAGS, &ifr, curthread); ifp->if_flags |= IFF_UP; ifr.ifr_flags = ifp->if_flags & 0xffff; ifr.ifr_flagshigh = ifp->if_flags >> 16; if_ioctl(ifp, SIOCSIFFLAGS, &ifr, curthread); #ifdef INET /* * Also send gratuitous ARPs to notify other nodes about * the address change. */ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family == AF_INET) arp_ifinit(ifp, ifa); } #endif } return (0); } int if_printf(struct ifnet *ifp, const char * fmt, ...) { va_list ap; int retval; retval = printf("%s: ", ifp->if_xname); va_start(ap, fmt); retval += vprintf(fmt, ap); va_end(ap); return (retval); } int if_getmtu_family(if_t ifp, int family) { struct domain *dp; for (dp = domains; dp; dp = dp->dom_next) if (dp->dom_family == family && dp->dom_ifmtu != NULL) return (dp->dom_ifmtu(ifp)); return (ifp->if_mtu); } /* * Methods for drivers to access interface unicast and multicast * addresses. Driver do not know 'struct ifaddr' neither 'struct ifmultiaddr'. */ void if_foreach_addr(if_t ifp, ifaddr_cb_t cb, void *cb_arg) { struct ifaddr *ifa; IF_ADDR_RLOCK(ifp); TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) (*cb)(cb_arg, ifa->ifa_addr, ifa->ifa_dstaddr, ifa->ifa_netmask); IF_ADDR_RUNLOCK(ifp); } void if_foreach_maddr(if_t ifp, ifmaddr_cb_t cb, void *cb_arg) { struct ifmultiaddr *ifma; IF_ADDR_RLOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) (*cb)(cb_arg, ifma->ifma_addr); IF_ADDR_RUNLOCK(ifp); } /* * Generic software queue, that many non-high-end drivers use. For now * it is minimalistic version of classic BSD ifqueue, but we can swap it * to any other implementation later. */ struct ifqueue { struct mbufq ifq_mbq; struct mtx ifq_mtx; }; static struct ifqueue * if_snd_alloc(int maxlen) { struct ifqueue *ifq; ifq = malloc(sizeof(struct ifqueue), M_IFNET, M_ZERO | M_WAITOK); mbufq_init(&ifq->ifq_mbq, maxlen); mtx_init(&ifq->ifq_mtx, "ifqueue", NULL, MTX_DEF); return (ifq); } static void if_snd_free(struct ifqueue *ifq) { mtx_destroy(&ifq->ifq_mtx); free(ifq, M_IFNET); } /* * Flush software interface queue. */ static void if_snd_qflush(if_t ifp) { struct ifqueue *ifq; struct mbuf *m, *n; ifq = ifp->if_snd; mtx_lock(&ifq->ifq_mtx); n = mbufq_flush(&ifq->ifq_mbq); mtx_unlock(&ifq->ifq_mtx); while ((m = n) != NULL) { n = m->m_nextpkt; m_freem(m); } } int if_snd_len(if_t ifp) { struct ifqueue *ifq = ifp->if_snd; return (mbufq_len(&ifq->ifq_mbq)); } int if_snd_enqueue(struct ifnet *ifp, struct mbuf *m) { struct ifqueue *ifq = ifp->if_snd; int error; mtx_lock(&ifq->ifq_mtx); error = mbufq_enqueue(&ifq->ifq_mbq, m); mtx_unlock(&ifq->ifq_mtx); if (error) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1); } return (error); } struct mbuf * if_snd_dequeue(if_t ifp) { struct ifqueue *ifq = ifp->if_snd; struct mbuf *m; mtx_lock(&ifq->ifq_mtx); m = mbufq_dequeue(&ifq->ifq_mbq); mtx_unlock(&ifq->ifq_mtx); return (m); } void if_snd_prepend(if_t ifp, struct mbuf *m) { struct ifqueue *ifq = ifp->if_snd; mtx_lock(&ifq->ifq_mtx); mbufq_prepend(&ifq->ifq_mbq, m); mtx_unlock(&ifq->ifq_mtx); } /* * Implementation of if ops, that can be called from drivers. */ void if_input(if_t ifp, struct mbuf *m) { return (ifp->if_ops->ifop_input(ifp, m)); } Index: projects/ifnet/sys/net/if.h =================================================================== --- projects/ifnet/sys/net/if.h (revision 277242) +++ projects/ifnet/sys/net/if.h (revision 277243) @@ -1,778 +1,779 @@ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)if.h 8.1 (Berkeley) 6/10/93 * $FreeBSD$ */ #ifndef _NET_IF_H_ #define _NET_IF_H_ #include #if __BSD_VISIBLE /* * does not depend on on most other systems. This * helps userland compatibility. (struct timeval ifi_lastchange) * The same holds for . (struct sockaddr ifru_addr) */ #ifndef _KERNEL #include #include #endif #endif /* * Length of interface external name, including terminating '\0'. * Note: this is the same size as a generic device's external name. */ #define IF_NAMESIZE 16 #if __BSD_VISIBLE #define IFNAMSIZ IF_NAMESIZE #define IF_MAXUNIT 0x7fff /* historical value */ #endif #if __BSD_VISIBLE /* * Structure used to query names of interface cloners. */ struct if_clonereq { int ifcr_total; /* total cloners (out) */ int ifcr_count; /* room for this many in user buffer */ char *ifcr_buffer; /* buffer for cloner names */ }; /* * Structure describing information about an interface * which may be of interest to management entities. */ struct if_data { /* generic interface information */ uint8_t ifi_type; /* ethernet, tokenring, etc */ uint8_t ifi_physical; /* e.g., AUI, Thinnet, 10base-T, etc */ uint8_t ifi_addrlen; /* media address length */ uint8_t ifi_hdrlen; /* media header length */ uint8_t ifi_link_state; /* current link state */ uint8_t ifi_vhid; /* carp vhid */ uint16_t ifi_datalen; /* length of this data struct */ uint32_t ifi_mtu; /* maximum transmission unit */ uint32_t ifi_metric; /* routing metric (external only) */ uint64_t ifi_baudrate; /* linespeed */ /* volatile statistics */ uint64_t ifi_ipackets; /* packets received on interface */ uint64_t ifi_ierrors; /* input errors on interface */ uint64_t ifi_opackets; /* packets sent on interface */ uint64_t ifi_oerrors; /* output errors on interface */ uint64_t ifi_collisions; /* collisions on csma interfaces */ uint64_t ifi_ibytes; /* total number of octets received */ uint64_t ifi_obytes; /* total number of octets sent */ uint64_t ifi_imcasts; /* packets received via multicast */ uint64_t ifi_omcasts; /* packets sent via multicast */ uint64_t ifi_iqdrops; /* dropped on input */ uint64_t ifi_oqdrops; /* dropped on output */ uint64_t ifi_noproto; /* destined for unsupported protocol */ uint64_t ifi_hwassist; /* HW offload capabilities, see IFCAP */ /* Unions are here to make sizes MI. */ union { /* uptime at attach or stat reset */ time_t tt; uint64_t ph; } __ifi_epoch; #define ifi_epoch __ifi_epoch.tt union { /* time of last administrative change */ struct timeval tv; struct { uint64_t ph1; uint64_t ph2; } ph; } __ifi_lastchange; #define ifi_lastchange __ifi_lastchange.tv }; /*- * Interface flags are of two types: network stack owned flags, and driver * owned flags. Historically, these values were stored in the same ifnet * flags field, but with the advent of fine-grained locking, they have been * broken out such that the network stack is responsible for synchronizing * the stack-owned fields, and the device driver the device-owned fields. * Both halves can perform lockless reads of the other half's field, subject * to accepting the involved races. * * Both sets of flags come from the same number space, and should not be * permitted to conflict, as they are exposed to user space via a single * field. * * The following symbols identify read and write requirements for fields: * * (i) if_flags field set by device driver before attach, read-only there * after. * (n) if_flags field written only by the network stack, read by either the * stack or driver. * (d) if_drv_flags field written only by the device driver, read by either * the stack or driver. */ #define IFF_UP 0x1 /* (n) interface is up */ #define IFF_BROADCAST 0x2 /* (i) broadcast address valid */ #define IFF_DEBUG 0x4 /* (n) turn on debugging */ #define IFF_LOOPBACK 0x8 /* (i) is a loopback net */ #define IFF_POINTOPOINT 0x10 /* (i) is a point-to-point link */ /* 0x20 was IFF_SMART */ #define IFF_RUNNING 0x40 /* (d) resources allocated */ #define IFF_NOARP 0x80 /* (n) no address resolution protocol */ #define IFF_PROMISC 0x100 /* (n) receive all packets */ #define IFF_ALLMULTI 0x200 /* (n) receive all multicast packets */ #define IFF_OACTIVE 0x400 /* (d) tx hardware queue is full */ #define IFF_SIMPLEX 0x800 /* (i) can't hear own transmissions */ #define IFF_LINK0 0x1000 /* per link layer defined bit */ #define IFF_LINK1 0x2000 /* per link layer defined bit */ #define IFF_LINK2 0x4000 /* per link layer defined bit */ #define IFF_ALTPHYS IFF_LINK2 /* use alternate physical connection */ #define IFF_MULTICAST 0x8000 /* (i) supports multicast */ #define IFF_CANTCONFIG 0x10000 /* (i) unconfigurable using ioctl(2) */ #define IFF_PPROMISC 0x20000 /* (n) user-requested promisc mode */ #define IFF_MONITOR 0x40000 /* (n) user-requested monitor mode */ #define IFF_STATICARP 0x80000 /* (n) static ARP */ #define IFF_DYING 0x200000 /* (n) interface is winding down */ #define IFF_RENAMING 0x400000 /* (n) interface is being renamed */ /* flags set internally only: */ #define IFF_CANTCHANGE \ (IFF_BROADCAST|IFF_POINTOPOINT|IFF_RUNNING|IFF_OACTIVE|\ IFF_SIMPLEX|IFF_MULTICAST|IFF_ALLMULTI|IFF_PROMISC|\ IFF_DYING|IFF_CANTCONFIG) /* * Values for if_link_state. */ enum { LINK_STATE_UNKNOWN = 0, /* link invalid/unknown */ LINK_STATE_DOWN, /* link is down */ LINK_STATE_UP, /* link is up */ }; /* * Some convenience macros used for setting ifi_baudrate. * XXX 1000 vs. 1024? --thorpej@netbsd.org */ #define IF_Kbps(x) ((uintmax_t)(x) * 1000) /* kilobits/sec. */ #define IF_Mbps(x) (IF_Kbps((x) * 1000)) /* megabits/sec. */ #define IF_Gbps(x) (IF_Mbps((x) * 1000)) /* gigabits/sec. */ /* * Capabilities that interfaces can advertise. * * struct ifnet.if_capabilities * contains the optional features & capabilities a particular interface * supports (not only the driver but also the detected hw revision). * Capabilities are defined by IFCAP_* below. * struct ifnet.if_capenable * contains the enabled (either by default or through ifconfig) optional * features & capabilities on this interface. * Capabilities are defined by IFCAP_* below. * struct if_data.ifi_hwassist in mbuf CSUM_ flag form, controlled by above * contains the enabled optional feature & capabilites that can be used * individually per packet and are specified in the mbuf pkthdr.csum_flags * field. IFCAP_* and CSUM_* do not match one to one and CSUM_* may be * more detailed or differenciated than IFCAP_*. * Hwassist features are defined CSUM_* in sys/mbuf.h * * Capabilities that cannot be arbitrarily changed with ifconfig/ioctl * are listed in IFCAP_CANTCHANGE, similar to IFF_CANTCHANGE. * This is not strictly necessary because the common code never * changes capabilities, and it is left to the individual driver * to do the right thing. However, having the filter here * avoids replication of the same code in all individual drivers. */ #define IFCAP_RXCSUM 0x00001 /* can offload checksum on RX */ #define IFCAP_TXCSUM 0x00002 /* can offload checksum on TX */ #define IFCAP_NETCONS 0x00004 /* can be a network console */ #define IFCAP_VLAN_MTU 0x00008 /* VLAN-compatible MTU */ #define IFCAP_VLAN_HWTAGGING 0x00010 /* hardware VLAN tag support */ #define IFCAP_JUMBO_MTU 0x00020 /* 9000 byte MTU supported */ #define IFCAP_POLLING 0x00040 /* driver supports polling */ #define IFCAP_VLAN_HWCSUM 0x00080 /* can do IFCAP_HWCSUM on VLANs */ #define IFCAP_TSO4 0x00100 /* can do TCP Segmentation Offload */ #define IFCAP_TSO6 0x00200 /* can do TCP6 Segmentation Offload */ #define IFCAP_LRO 0x00400 /* can do Large Receive Offload */ #define IFCAP_WOL_UCAST 0x00800 /* wake on any unicast frame */ #define IFCAP_WOL_MCAST 0x01000 /* wake on any multicast frame */ #define IFCAP_WOL_MAGIC 0x02000 /* wake on any Magic Packet */ #define IFCAP_TOE4 0x04000 /* interface can offload TCP */ #define IFCAP_TOE6 0x08000 /* interface can offload TCP6 */ #define IFCAP_VLAN_HWFILTER 0x10000 /* interface hw can filter vlan tag */ #define IFCAP_POLLING_NOCOUNT 0x20000 /* polling ticks cannot be fragmented */ #define IFCAP_VLAN_HWTSO 0x40000 /* can do IFCAP_TSO on VLANs */ #define IFCAP_LINKSTATE 0x80000 /* the runtime link state is dynamic */ #define IFCAP_NETMAP 0x100000 /* netmap mode supported/enabled */ #define IFCAP_RXCSUM_IPV6 0x200000 /* can offload checksum on IPv6 RX */ #define IFCAP_TXCSUM_IPV6 0x400000 /* can offload checksum on IPv6 TX */ #define IFCAP_HWSTATS 0x800000 /* manages counters internally */ #define IFCAP_HWCSUM_IPV6 (IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6) #define IFCAP_HWCSUM (IFCAP_RXCSUM | IFCAP_TXCSUM) #define IFCAP_TSO (IFCAP_TSO4 | IFCAP_TSO6) #define IFCAP_WOL (IFCAP_WOL_UCAST | IFCAP_WOL_MCAST | IFCAP_WOL_MAGIC) #define IFCAP_TOE (IFCAP_TOE4 | IFCAP_TOE6) #define IFCAP_CANTCHANGE (IFCAP_NETMAP) #define IFQ_MAXLEN 50 #define IFNET_SLOWHZ 1 /* granularity is 1 second */ /* * Message format for use in obtaining information about interfaces * from getkerninfo and the routing socket * For the new, extensible interface see struct if_msghdrl below. */ struct if_msghdr { u_short ifm_msglen; /* to skip over non-understood messages */ u_char ifm_version; /* future binary compatibility */ u_char ifm_type; /* message type */ int ifm_addrs; /* like rtm_addrs */ int ifm_flags; /* value of if_flags */ u_short ifm_index; /* index for associated ifp */ struct if_data ifm_data;/* statistics and other data about if */ }; /* * The 'l' version shall be used by new interfaces, like NET_RT_IFLISTL. It is * extensible after ifm_data_off or within ifm_data. Both the if_msghdr and * if_data now have a member field detailing the struct length in addition to * the routing message length. Macros are provided to find the start of * ifm_data and the start of the socket address strucutres immediately following * struct if_msghdrl given a pointer to struct if_msghdrl. */ #define IF_MSGHDRL_IFM_DATA(_l) \ (struct if_data *)((char *)(_l) + (_l)->ifm_data_off) #define IF_MSGHDRL_RTA(_l) \ (void *)((uintptr_t)(_l) + (_l)->ifm_len) struct if_msghdrl { u_short ifm_msglen; /* to skip over non-understood messages */ u_char ifm_version; /* future binary compatibility */ u_char ifm_type; /* message type */ int ifm_addrs; /* like rtm_addrs */ int ifm_flags; /* value of if_flags */ u_short ifm_index; /* index for associated ifp */ u_short _ifm_spare1; /* spare space to grow if_index, see if_var.h */ u_short ifm_len; /* length of if_msghdrl incl. if_data */ u_short ifm_data_off; /* offset of if_data from beginning */ struct if_data ifm_data;/* statistics and other data about if */ }; /* * Message format for use in obtaining information about interface addresses * from getkerninfo and the routing socket * For the new, extensible interface see struct ifa_msghdrl below. */ struct ifa_msghdr { u_short ifam_msglen; /* to skip over non-understood messages */ u_char ifam_version; /* future binary compatibility */ u_char ifam_type; /* message type */ int ifam_addrs; /* like rtm_addrs */ int ifam_flags; /* value of ifa_flags */ u_short ifam_index; /* index for associated ifp */ int ifam_metric; /* value of ifa_ifp->if_metric */ }; /* * The 'l' version shall be used by new interfaces, like NET_RT_IFLISTL. It is * extensible after ifam_metric or within ifam_data. Both the ifa_msghdrl and * if_data now have a member field detailing the struct length in addition to * the routing message length. Macros are provided to find the start of * ifm_data and the start of the socket address strucutres immediately following * struct ifa_msghdrl given a pointer to struct ifa_msghdrl. */ #define IFA_MSGHDRL_IFAM_DATA(_l) \ (struct if_data *)((char *)(_l) + (_l)->ifam_data_off) #define IFA_MSGHDRL_RTA(_l) \ (void *)((uintptr_t)(_l) + (_l)->ifam_len) struct ifa_msghdrl { u_short ifam_msglen; /* to skip over non-understood messages */ u_char ifam_version; /* future binary compatibility */ u_char ifam_type; /* message type */ int ifam_addrs; /* like rtm_addrs */ int ifam_flags; /* value of ifa_flags */ u_short ifam_index; /* index for associated ifp */ u_short _ifam_spare1; /* spare space to grow if_index, see if_var.h */ u_short ifam_len; /* length of ifa_msghdrl incl. if_data */ u_short ifam_data_off; /* offset of if_data from beginning */ int ifam_metric; /* value of ifa_ifp->if_metric */ struct if_data ifam_data;/* statistics and other data about if or * address */ }; /* * Message format for use in obtaining information about multicast addresses * from the routing socket */ struct ifma_msghdr { u_short ifmam_msglen; /* to skip over non-understood messages */ u_char ifmam_version; /* future binary compatibility */ u_char ifmam_type; /* message type */ int ifmam_addrs; /* like rtm_addrs */ int ifmam_flags; /* value of ifa_flags */ u_short ifmam_index; /* index for associated ifp */ }; /* * Message format announcing the arrival or departure of a network interface. */ struct if_announcemsghdr { u_short ifan_msglen; /* to skip over non-understood messages */ u_char ifan_version; /* future binary compatibility */ u_char ifan_type; /* message type */ u_short ifan_index; /* index for associated ifp */ char ifan_name[IFNAMSIZ]; /* if name, e.g. "en0" */ u_short ifan_what; /* what type of announcement */ }; #define IFAN_ARRIVAL 0 /* interface arrival */ #define IFAN_DEPARTURE 1 /* interface departure */ /* * Buffer with length to be used in SIOCGIFDESCR/SIOCSIFDESCR requests */ struct ifreq_buffer { size_t length; void *buffer; }; /* * Interface request structure used for socket * ioctl's. All interface ioctl's must have parameter * definitions which begin with ifr_name. The * remainder may be interface specific. */ struct ifreq { char ifr_name[IFNAMSIZ]; /* if name, e.g. "en0" */ union { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct ifreq_buffer ifru_buffer; + struct { + uint32_t ifrucap_reqcap; /* requested/returned */ + uint32_t ifrucap_curcap; /* current values */ + uint64_t ifrucap_hwassist; /* returned hwassist */ + } ifru_cap; short ifru_flags[2]; short ifru_index; int ifru_jid; int ifru_metric; int ifru_mtu; int ifru_phys; int ifru_media; caddr_t ifru_data; - int ifru_cap[2]; u_int ifru_fib; } ifr_ifru; #define ifr_addr ifr_ifru.ifru_addr /* address */ #define ifr_dstaddr ifr_ifru.ifru_dstaddr /* other end of p-to-p link */ #define ifr_broadaddr ifr_ifru.ifru_broadaddr /* broadcast address */ #define ifr_buffer ifr_ifru.ifru_buffer /* user supplied buffer with its length */ #define ifr_flags ifr_ifru.ifru_flags[0] /* flags (low 16 bits) */ #define ifr_flagshigh ifr_ifru.ifru_flags[1] /* flags (high 16 bits) */ #define ifr_jid ifr_ifru.ifru_jid /* jail/vnet */ #define ifr_metric ifr_ifru.ifru_metric /* metric */ #define ifr_mtu ifr_ifru.ifru_mtu /* mtu */ #define ifr_phys ifr_ifru.ifru_phys /* physical wire */ #define ifr_media ifr_ifru.ifru_media /* physical media */ #define ifr_data ifr_ifru.ifru_data /* for use by interface */ -#define ifr_reqcap ifr_ifru.ifru_cap[0] /* requested capabilities */ -#define ifr_curcap ifr_ifru.ifru_cap[1] /* current capabilities */ +#define ifr_reqcap ifr_ifru.ifru_cap.ifrucap_reqcap +#define ifr_curcap ifr_ifru.ifru_cap.ifrucap_curcap +#define ifr_hwassist ifr_ifru.ifru_cap.ifrucap_hwassist #define ifr_index ifr_ifru.ifru_index /* interface index */ #define ifr_fib ifr_ifru.ifru_fib /* interface fib */ }; #define _SIZEOF_ADDR_IFREQ(ifr) \ ((ifr).ifr_addr.sa_len > sizeof(struct sockaddr) ? \ (sizeof(struct ifreq) - sizeof(struct sockaddr) + \ (ifr).ifr_addr.sa_len) : sizeof(struct ifreq)) struct ifaliasreq { char ifra_name[IFNAMSIZ]; /* if name, e.g. "en0" */ struct sockaddr ifra_addr; struct sockaddr ifra_broadaddr; struct sockaddr ifra_mask; int ifra_vhid; }; /* 9.x compat */ struct oifaliasreq { char ifra_name[IFNAMSIZ]; struct sockaddr ifra_addr; struct sockaddr ifra_broadaddr; struct sockaddr ifra_mask; }; struct ifmediareq { char ifm_name[IFNAMSIZ]; /* if name, e.g. "en0" */ int ifm_current; /* current media options */ int ifm_mask; /* don't care mask */ int ifm_status; /* media status */ int ifm_active; /* active options */ int ifm_count; /* # entries in ifm_ulist array */ int *ifm_ulist; /* media words */ }; struct ifdrv { char ifd_name[IFNAMSIZ]; /* if name, e.g. "en0" */ unsigned long ifd_cmd; size_t ifd_len; void *ifd_data; }; /* * Structure used to retrieve aux status data from interfaces. * Kernel suppliers to this interface should respect the formatting * needed by ifconfig(8): each line starts with a TAB and ends with * a newline. The canonical example to copy and paste is in if_tun.c. */ #define IFSTATMAX 800 /* 10 lines of text */ struct ifstat { char ifs_name[IFNAMSIZ]; /* if name, e.g. "en0" */ char ascii[IFSTATMAX + 1]; }; /* * Structure used in SIOCGIFCONF request. * Used to retrieve interface configuration * for machine (useful for programs which * must know all networks accessible). */ struct ifconf { int ifc_len; /* size of associated buffer */ union { caddr_t ifcu_buf; struct ifreq *ifcu_req; } ifc_ifcu; #define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */ #define ifc_req ifc_ifcu.ifcu_req /* array of structures returned */ }; /* * interface groups */ #define IFG_ALL "all" /* group contains all interfaces */ /* XXX: will we implement this? */ #define IFG_EGRESS "egress" /* if(s) default route(s) point to */ struct ifg_req { union { char ifgrqu_group[IFNAMSIZ]; char ifgrqu_member[IFNAMSIZ]; } ifgrq_ifgrqu; #define ifgrq_group ifgrq_ifgrqu.ifgrqu_group #define ifgrq_member ifgrq_ifgrqu.ifgrqu_member }; /* * Used to lookup groups for an interface */ struct ifgroupreq { char ifgr_name[IFNAMSIZ]; u_int ifgr_len; union { char ifgru_group[IFNAMSIZ]; struct ifg_req *ifgru_groups; } ifgr_ifgru; #define ifgr_group ifgr_ifgru.ifgru_group #define ifgr_groups ifgr_ifgru.ifgru_groups }; /* * Structure used to request i2c data * from interface transceivers. */ struct ifi2creq { uint8_t dev_addr; /* i2c address (0xA0, 0xA2) */ uint8_t offset; /* read offset */ uint8_t len; /* read length */ uint8_t spare0; uint32_t spare1; uint8_t data[8]; /* read buffer */ }; #endif /* __BSD_VISIBLE */ #ifndef _KERNEL struct if_nameindex { unsigned int if_index; /* 1, 2, ... */ char *if_name; /* null terminated name: "le0", ... */ }; __BEGIN_DECLS void if_freenameindex(struct if_nameindex *); char *if_indextoname(unsigned int, char *); struct if_nameindex *if_nameindex(void); unsigned int if_nametoindex(const char *); __END_DECLS #endif #ifdef _KERNEL #include /* * Under _KERNEL there live declarations from net/if.c, that are public * and available to network device drivers. Declarations that are protected * from drivers, but available to the stack live in if_var.h. */ /* Some forward declarations are required. */ struct mbuf; /* if_input, if_output, if_transmit */ struct route; /* if_output */ struct vnet; /* if_reassign */ #ifdef MALLOC_DECLARE MALLOC_DECLARE(M_IFADDR); MALLOC_DECLARE(M_IFMADDR); #endif typedef enum { IFCOUNTER_IPACKETS = 0, IFCOUNTER_IERRORS, IFCOUNTER_OPACKETS, IFCOUNTER_OERRORS, IFCOUNTER_COLLISIONS, IFCOUNTER_IBYTES, IFCOUNTER_OBYTES, IFCOUNTER_IMCASTS, IFCOUNTER_OMCASTS, IFCOUNTER_IQDROPS, IFCOUNTER_OQDROPS, IFCOUNTER_NOPROTO, IFCOUNTERS /* Array size (used internally). */ } ift_counter; typedef enum { /* uint32_t */ IF_FLAGS, - IF_CAPABILITIES, - IF_CAPENABLE, - IF_MTU, IF_FIB, /* uint64_t */ - IF_HWASSIST, IF_BAUDRATE, /* pointers */ IF_DRIVER_SOFTC, IF_LLADDR, IF_BPF, IF_NAME, } ift_feature; typedef struct ifnet * if_t; typedef void (*if_init_t)(void *); typedef void (*if_input_t)(if_t, struct mbuf *); typedef int (*if_transmit_t)(if_t, struct mbuf *); typedef int (*if_output_t)(if_t, struct mbuf *, const struct sockaddr *, struct route *); typedef int (*if_ioctl_t)(if_t, u_long, void *, struct thread *); typedef uint64_t (*if_get_counter_t)(if_t, ift_counter); typedef void (*if_qflush_t)(if_t); typedef int (*if_resolvemulti_t)(if_t, struct sockaddr **, struct sockaddr *); typedef void (*if_reassign_t)(if_t, struct vnet *); /* * Interface methods. Usually stored in ifdriver definition, however * some subsystems like lagg(4) or altq(4) may put a shim ifops before * native ones. */ struct ifops { if_input_t ifop_input; /* input routine (from h/w driver) */ if_transmit_t ifop_transmit; /* initiate output routine */ if_output_t ifop_output; if_ioctl_t ifop_ioctl; /* ioctl routine */ if_get_counter_t ifop_get_counter; /* get counter values */ if_init_t ifop_init; /* init routine */ if_qflush_t ifop_qflush; /* flush any queue */ if_resolvemulti_t ifop_resolvemulti; /* validate/resolve multicast */ if_reassign_t ifop_reassign; /* reassign to vnet routine */ struct ifops *ifop_next; uint8_t ifop_origin; }; enum { IFOP_ORIGIN_DRIVER = 1, IFOP_ORIGIN_IFTYPE = 2, }; /* * Structure describing TSO properties of an interface. Known both to ifnet * layer and TCP. Most interfaces point to a static tsomax in ifdriver * definition. However, vlan(4) and lagg(4) require a dynamic tsomax. */ struct iftsomax { uint32_t tsomax_bytes; /* TSO total burst length limit in bytes */ uint32_t tsomax_segcount; /* TSO maximum segment count */ uint32_t tsomax_segsize; /* TSO maximum segment size in bytes */ }; /* * Driver description. All instances of a driver share common properties * that are stable during runtime. The stack can bless them, which * means modify, when attaching the first instance of given * driver. */ struct ifdriver { struct ifops ifdrv_ops; struct iftsomax *ifdrv_tsomax; /* * The ifdrv_name must be a pointer to storage which will last as * long as any interface does. For physical devices, the result of * device_get_name(dev) is a good choice and for pseudo-devices a * static string works well. */ const char * ifdrv_name; struct if_clone *ifdrv_clone; ifType ifdrv_type; /* from if_types.h */ uint8_t ifdrv_hdrlen; /* media header length */ uint8_t ifdrv_addrlen; /* media address length */ uint32_t ifdrv_dlt; /* from net/bpf.h */ uint32_t ifdrv_dlt_hdrlen; uint32_t ifdrv_maxqlen; /* max queue length for if_snd */ /* * Owned by stack. Drivers shouldn't initialize these! */ uint32_t __ifdrv_stack_owned; }; /* * Arguments for if_attach(). Usually stored on stack of device_attach * function in driver. In future this structure will probably have * different versions, so that we can support older ABIs for drivers. */ struct if_attach_args { uint8_t ifat_version; /* must be IF_ATTACH_VERSION */ #define IF_ATTACH_VERSION 1 uint8_t ifat_spare8; uint16_t ifat_spare16; uint32_t ifat_spare32; int ifat_error; /* Filled on return. */ struct ifdriver *ifat_drv; void *ifat_softc; /* Driver private softc. */ const uint8_t *ifat_lla; /* Link-level address. */ int32_t ifat_dunit; /* Specific unit or a hint. */ #define IFAT_DUNIT_NONE (-1) char * ifat_name; /* If driver wants a specific name. */ /* * Variables that may differ between two instances of a same * driver, but are constant within instance lifetime. */ uint64_t ifat_capabilities; /* * MTU, flags, capabilities at attach time. Driver * can change them later. */ uint32_t ifat_mtu; uint64_t ifat_flags; uint64_t ifat_capenable; uint64_t ifat_hwassist; uint64_t ifat_baudrate; /* * If ifat_tsomax pointer is non-zero, then an interface will * have dynamically allocated ifdrv_tsomax, that can be changed * later. Otherwise it inherits static iftsomax from ifdriver. */ struct iftsomax *ifat_tsomax; }; /* * Interface manipulating functions that are available for drivers. */ if_t if_attach(struct if_attach_args *); void if_detach(if_t); void if_input(if_t, struct mbuf *); void if_mtap(if_t, struct mbuf *, void *, u_int); void if_inc_counter(if_t, ift_counter, int64_t); void if_inc_txcounters(if_t, struct mbuf *); void if_link_state_change(if_t, int); void if_set(if_t, ift_feature, uint64_t); void * if_getsoftc(if_t, ift_feature); int if_printf(if_t, const char *, ...) __printflike(2, 3); int if_drvioctl(u_long, struct ifnet *, void *, struct thread *); uint64_t if_get(if_t, ift_feature); uint64_t if_flagbits(if_t, ift_feature, uint64_t, uint64_t, uint64_t); uint64_t if_get_counter_default(if_t, ift_counter); /* * Traversing through interface address lists. */ typedef void ifaddr_cb_t(void *, struct sockaddr *, struct sockaddr *, struct sockaddr *); typedef void ifmaddr_cb_t(void *, struct sockaddr *); void if_foreach_addr(if_t, ifaddr_cb_t, void *); void if_foreach_maddr(if_t, ifmaddr_cb_t, void *); /* * Generic software send queue manipulation. */ int if_snd_len(if_t); int if_snd_enqueue(if_t, struct mbuf *); struct mbuf * if_snd_dequeue(if_t); void if_snd_prepend(if_t, struct mbuf *); /* * Type-enforcing inliners over declared above functions. */ static inline uint64_t if_addflags(if_t ifp, ift_feature f, uint64_t add) { return (if_flagbits(ifp, f, add, 0, 0)); } static inline uint64_t if_clrflags(if_t ifp, ift_feature f, uint64_t clr) { return (if_flagbits(ifp, f, 0, clr, 0)); } static inline uint64_t if_xorflags(if_t ifp, ift_feature f, uint64_t xor) { return (if_flagbits(ifp, f, 0, 0, xor)); } static inline char * if_lladdr(if_t ifp) { return ((char *)(if_getsoftc(ifp, IF_LLADDR))); } static inline const char * if_name(if_t ifp) { return ((char *)(if_getsoftc(ifp, IF_NAME))); } #endif /* _KERNEL */ #endif /* !_NET_IF_H_ */ Index: projects/ifnet/sys/net/if_var.h =================================================================== --- projects/ifnet/sys/net/if_var.h (revision 277242) +++ projects/ifnet/sys/net/if_var.h (revision 277243) @@ -1,526 +1,526 @@ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From: @(#)if.h 8.1 (Berkeley) 6/10/93 * $FreeBSD$ */ #ifndef _NET_IF_VAR_H_ #define _NET_IF_VAR_H_ struct rtentry; /* ifa_rtrequest */ struct rt_addrinfo; /* ifa_rtrequest */ struct socket; struct carp_if; struct carp_softc; struct ifvlantrunk; struct ifmedia; struct netmap_adapter; #ifdef _KERNEL #include /* ifqueue only? */ #include #include #endif /* _KERNEL */ #include #include /* XXX */ #include /* struct ifqueue */ #include /* XXX */ #include /* XXX */ #include /* if_link_task */ #include TAILQ_HEAD(ifnethead, ifnet); /* we use TAILQs so that the order of */ TAILQ_HEAD(ifaddrhead, ifaddr); /* instantiation is preserved in the list */ TAILQ_HEAD(ifmultihead, ifmultiaddr); TAILQ_HEAD(ifgrouphead, ifg_group); #ifdef _KERNEL VNET_DECLARE(struct pfil_head, link_pfil_hook); /* packet filter hooks */ #define V_link_pfil_hook VNET(link_pfil_hook) #endif /* _KERNEL */ typedef void (*iftype_attach_t)(if_t ifp, struct if_attach_args *args); typedef void (*iftype_detach_t)(if_t ifp); struct iftype { const ifType ift_type; SLIST_ENTRY(iftype) ift_next; iftype_attach_t ift_attach; iftype_detach_t ift_detach; uint8_t ift_hdrlen; uint8_t ift_addrlen; uint32_t ift_dlt; uint32_t ift_dlt_hdrlen; struct ifops ift_ops; }; /* * Structure defining a network interface. * * (Would like to call this struct ``if'', but C isn't PL/1.) */ struct ifnet { struct ifops *if_ops; /* driver ops (or overridden) */ void *if_softc; /* driver soft state */ struct ifdriver *if_drv; /* driver static definition */ struct iftype *if_type; /* if type static def (optional)*/ struct iftsomax *if_tsomax; /* TSO limits */ /* General book keeping of interface lists. */ TAILQ_ENTRY(ifnet) if_link; /* all struct ifnets are chained */ LIST_ENTRY(ifnet) if_clones; /* interfaces of a cloner */ TAILQ_HEAD(, ifg_list) if_groups; /* linked list of groups per if */ /* protected by if_addr_lock */ void *if_llsoftc; /* link layer softc */ void *if_l2com; /* pointer to protocol bits */ int if_dunit; /* unit or IF_DUNIT_NONE */ u_short if_index; /* numeric abbreviation for this if */ short if_index_reserved; /* spare space to grow if_index */ char if_xname[IFNAMSIZ]; /* external name (name + unit) */ char *if_description; /* interface description */ /* Variable fields that are touched by the stack and drivers. */ uint32_t if_flags; /* up/down, broadcast, etc. */ uint32_t if_capabilities;/* interface features & capabilities */ uint32_t if_capenable; /* enabled features & capabilities */ void *if_linkmib; /* link-type-specific MIB data */ size_t if_linkmiblen; /* length of above data */ u_int if_refcount; /* reference count */ u_int if_fib; /* interface FIB */ uint8_t if_link_state; /* current link state */ uint32_t if_mtu; /* maximum transmission unit */ uint32_t if_metric; /* routing metric (external only) */ uint64_t if_baudrate; /* linespeed */ uint64_t if_hwassist; /* HW offload capabilities, see IFCAP */ time_t if_epoch; /* uptime at attach or stat reset */ struct timeval if_lastchange; /* time of last administrative change */ struct task if_linktask; /* task for link change events */ /* Addresses of different protocol families assigned to this if. */ struct rwlock if_addr_lock; /* lock to protect address lists */ /* * if_addrhead is the list of all addresses associated to * an interface. * Some code in the kernel assumes that first element * of the list has type AF_LINK, and contains sockaddr_dl * addresses which store the link-level address and the name * of the interface. * However, access to the AF_LINK address through this * field is deprecated. Use if_addr or ifaddr_byindex() instead. */ struct ifaddrhead if_addrhead; /* linked list of addresses per if */ struct ifmultihead if_multiaddrs; /* multicast addresses configured */ int if_amcount; /* number of all-multicast requests */ struct ifaddr *if_addr; /* pointer to link-level address */ const u_int8_t *if_broadcastaddr; /* linklevel broadcast bytestring */ struct rwlock if_afdata_lock; void *if_afdata[AF_MAX]; int if_afdata_initialized; /* Additional features hung off the interface. */ struct ifqueue *if_snd; /* software send queue */ struct vnet *if_vnet; /* pointer to network stack instance */ struct vnet *if_home_vnet; /* where this ifnet originates from */ struct ifvlantrunk *if_vlantrunk; /* pointer to 802.1q data */ struct bpf_if *if_bpf; /* packet filter structure */ int if_pcount; /* number of promiscuous listeners */ void *if_bridge; /* bridge glue */ void *if_lagg; /* lagg glue */ void *if_pf_kif; /* pf glue */ struct carp_if *if_carp; /* carp interface structure */ struct label *if_label; /* interface MAC label */ struct netmap_adapter *if_netmap; /* netmap(4) softc */ counter_u64_t if_counters[IFCOUNTERS]; /* Statistics */ /* * Spare fields to be added before branching a stable branch, so * that structure can be enhanced without changing the kernel * binary interface. */ }; /* * Locks for address lists on the network interface. */ #define IF_ADDR_LOCK_INIT(if) rw_init(&(if)->if_addr_lock, "if_addr_lock") #define IF_ADDR_LOCK_DESTROY(if) rw_destroy(&(if)->if_addr_lock) #define IF_ADDR_WLOCK(if) rw_wlock(&(if)->if_addr_lock) #define IF_ADDR_WUNLOCK(if) rw_wunlock(&(if)->if_addr_lock) #define IF_ADDR_RLOCK(if) rw_rlock(&(if)->if_addr_lock) #define IF_ADDR_RUNLOCK(if) rw_runlock(&(if)->if_addr_lock) #define IF_ADDR_LOCK_ASSERT(if) rw_assert(&(if)->if_addr_lock, RA_LOCKED) #define IF_ADDR_WLOCK_ASSERT(if) rw_assert(&(if)->if_addr_lock, RA_WLOCKED) #ifdef _KERNEL #ifdef _SYS_EVENTHANDLER_H_ /* interface link layer address change event */ typedef void (*iflladdr_event_handler_t)(void *, struct ifnet *); EVENTHANDLER_DECLARE(iflladdr_event, iflladdr_event_handler_t); /* interface address change event */ typedef void (*ifaddr_event_handler_t)(void *, struct ifnet *); EVENTHANDLER_DECLARE(ifaddr_event, ifaddr_event_handler_t); /* new interface arrival event */ typedef void (*ifnet_arrival_event_handler_t)(void *, struct ifnet *); EVENTHANDLER_DECLARE(ifnet_arrival_event, ifnet_arrival_event_handler_t); /* interface departure event */ typedef void (*ifnet_departure_event_handler_t)(void *, struct ifnet *); EVENTHANDLER_DECLARE(ifnet_departure_event, ifnet_departure_event_handler_t); /* Interface link state change event */ typedef void (*ifnet_link_event_handler_t)(void *, struct ifnet *, int); EVENTHANDLER_DECLARE(ifnet_link_event, ifnet_link_event_handler_t); #endif /* _SYS_EVENTHANDLER_H_ */ /* * interface groups */ struct ifg_group { char ifg_group[IFNAMSIZ]; u_int ifg_refcnt; void *ifg_pf_kif; TAILQ_HEAD(, ifg_member) ifg_members; TAILQ_ENTRY(ifg_group) ifg_next; }; struct ifg_member { TAILQ_ENTRY(ifg_member) ifgm_next; struct ifnet *ifgm_ifp; }; struct ifg_list { struct ifg_group *ifgl_group; TAILQ_ENTRY(ifg_list) ifgl_next; }; #ifdef _SYS_EVENTHANDLER_H_ /* group attach event */ typedef void (*group_attach_event_handler_t)(void *, struct ifg_group *); EVENTHANDLER_DECLARE(group_attach_event, group_attach_event_handler_t); /* group detach event */ typedef void (*group_detach_event_handler_t)(void *, struct ifg_group *); EVENTHANDLER_DECLARE(group_detach_event, group_detach_event_handler_t); /* group change event */ typedef void (*group_change_event_handler_t)(void *, const char *); EVENTHANDLER_DECLARE(group_change_event, group_change_event_handler_t); #endif /* _SYS_EVENTHANDLER_H_ */ #define IF_AFDATA_LOCK_INIT(ifp) \ rw_init(&(ifp)->if_afdata_lock, "if_afdata") #define IF_AFDATA_WLOCK(ifp) rw_wlock(&(ifp)->if_afdata_lock) #define IF_AFDATA_RLOCK(ifp) rw_rlock(&(ifp)->if_afdata_lock) #define IF_AFDATA_WUNLOCK(ifp) rw_wunlock(&(ifp)->if_afdata_lock) #define IF_AFDATA_RUNLOCK(ifp) rw_runlock(&(ifp)->if_afdata_lock) #define IF_AFDATA_LOCK(ifp) IF_AFDATA_WLOCK(ifp) #define IF_AFDATA_UNLOCK(ifp) IF_AFDATA_WUNLOCK(ifp) #define IF_AFDATA_TRYLOCK(ifp) rw_try_wlock(&(ifp)->if_afdata_lock) #define IF_AFDATA_DESTROY(ifp) rw_destroy(&(ifp)->if_afdata_lock) #define IF_AFDATA_LOCK_ASSERT(ifp) rw_assert(&(ifp)->if_afdata_lock, RA_LOCKED) #define IF_AFDATA_RLOCK_ASSERT(ifp) rw_assert(&(ifp)->if_afdata_lock, RA_RLOCKED) #define IF_AFDATA_WLOCK_ASSERT(ifp) rw_assert(&(ifp)->if_afdata_lock, RA_WLOCKED) #define IF_AFDATA_UNLOCK_ASSERT(ifp) rw_assert(&(ifp)->if_afdata_lock, RA_UNLOCKED) /* * 72 was chosen below because it is the size of a TCP/IP * header (40) + the minimum mss (32). */ #define IF_MINMTU 72 #define IF_MAXMTU 65535 #define TOEDEV(ifp) ((ifp)->if_llsoftc) #endif /* _KERNEL */ /* * The ifaddr structure contains information about one address * of an interface. They are maintained by the different address families, * are allocated and attached when an address is set, and are linked * together so all addresses for an interface can be located. * * NOTE: a 'struct ifaddr' is always at the beginning of a larger * chunk of malloc'ed memory, where we store the three addresses * (ifa_addr, ifa_dstaddr and ifa_netmask) referenced here. */ #if defined(_KERNEL) || defined(_WANT_IFADDR) struct ifaddr { struct sockaddr *ifa_addr; /* address of interface */ struct sockaddr *ifa_dstaddr; /* other end of p-to-p link */ #define ifa_broadaddr ifa_dstaddr /* broadcast address interface */ struct sockaddr *ifa_netmask; /* used to determine subnet */ struct ifnet *ifa_ifp; /* back-pointer to interface */ struct carp_softc *ifa_carp; /* pointer to CARP data */ TAILQ_ENTRY(ifaddr) ifa_link; /* queue macro glue */ void (*ifa_rtrequest) /* check or clean routes (+ or -)'d */ (int, struct rtentry *, struct rt_addrinfo *); u_short ifa_flags; /* mostly rt_flags for cloning */ u_int ifa_refcnt; /* references to this structure */ counter_u64_t ifa_ipackets; counter_u64_t ifa_opackets; counter_u64_t ifa_ibytes; counter_u64_t ifa_obytes; }; #endif #ifdef _KERNEL #define IFA_ROUTE RTF_UP /* route installed */ #define IFA_RTSELF RTF_HOST /* loopback route to self installed */ /* For compatibility with other BSDs. SCTP uses it. */ #define ifa_list ifa_link struct ifaddr * ifa_alloc(size_t size, int flags); void ifa_free(struct ifaddr *ifa); void ifa_ref(struct ifaddr *ifa); #endif /* _KERNEL */ /* * Multicast address structure. This is analogous to the ifaddr * structure except that it keeps track of multicast addresses. */ struct ifmultiaddr { TAILQ_ENTRY(ifmultiaddr) ifma_link; /* queue macro glue */ struct sockaddr *ifma_addr; /* address this membership is for */ struct sockaddr *ifma_lladdr; /* link-layer translation, if any */ struct ifnet *ifma_ifp; /* back-pointer to interface */ u_int ifma_refcount; /* reference count */ void *ifma_protospec; /* protocol-specific state, if any */ struct ifmultiaddr *ifma_llifma; /* pointer to ifma for ifma_lladdr */ }; #ifdef _KERNEL extern struct rwlock ifnet_rwlock; extern struct sx ifnet_sxlock; #define IFNET_WLOCK() do { \ sx_xlock(&ifnet_sxlock); \ rw_wlock(&ifnet_rwlock); \ } while (0) #define IFNET_WUNLOCK() do { \ rw_wunlock(&ifnet_rwlock); \ sx_xunlock(&ifnet_sxlock); \ } while (0) /* * To assert the ifnet lock, you must know not only whether it's for read or * write, but also whether it was acquired with sleep support or not. */ #define IFNET_RLOCK_ASSERT() sx_assert(&ifnet_sxlock, SA_SLOCKED) #define IFNET_RLOCK_NOSLEEP_ASSERT() rw_assert(&ifnet_rwlock, RA_RLOCKED) #define IFNET_WLOCK_ASSERT() do { \ sx_assert(&ifnet_sxlock, SA_XLOCKED); \ rw_assert(&ifnet_rwlock, RA_WLOCKED); \ } while (0) #define IFNET_RLOCK() sx_slock(&ifnet_sxlock) #define IFNET_RLOCK_NOSLEEP() rw_rlock(&ifnet_rwlock) #define IFNET_RUNLOCK() sx_sunlock(&ifnet_sxlock) #define IFNET_RUNLOCK_NOSLEEP() rw_runlock(&ifnet_rwlock) /* * Look up an ifnet given its index; the _ref variant also acquires a * reference that must be freed using if_rele(). It is almost always a bug * to call ifnet_byindex() instead if ifnet_byindex_ref(). */ struct ifnet *ifnet_byindex(u_short idx); struct ifnet *ifnet_byindex_locked(u_short idx); struct ifnet *ifnet_byindex_ref(u_short idx); /* * Given the index, ifaddr_byindex() returns the one and only * link-level ifaddr for the interface. You are not supposed to use * it to traverse the list of addresses associated to the interface. */ struct ifaddr *ifaddr_byindex(u_short idx); VNET_DECLARE(struct ifnethead, ifnet); VNET_DECLARE(struct ifgrouphead, ifg_head); VNET_DECLARE(int, if_index); VNET_DECLARE(struct ifnet *, loif); /* first loopback interface */ #define V_ifnet VNET(ifnet) #define V_ifg_head VNET(ifg_head) #define V_if_index VNET(if_index) #define V_loif VNET(loif) int if_addgroup(struct ifnet *, const char *); int if_delgroup(struct ifnet *, const char *); int if_addmulti(struct ifnet *, struct sockaddr *, struct ifmultiaddr **); int if_allmulti(struct ifnet *, int); int if_delmulti(struct ifnet *, struct sockaddr *); void if_delmulti_ifma(struct ifmultiaddr *); void if_vmove(struct ifnet *, struct vnet *); void if_purgeaddrs(struct ifnet *); void if_delallmulti(struct ifnet *); void if_down(struct ifnet *); struct ifmultiaddr * if_findmulti(struct ifnet *, struct sockaddr *); void if_ref(struct ifnet *); void if_rele(struct ifnet *); int if_setlladdr(struct ifnet *, const u_char *, int); void if_up(struct ifnet *); int ifioctl(struct socket *, u_long, caddr_t, struct thread *); int ifpromisc(struct ifnet *, int); struct ifnet *ifunit(const char *); struct ifnet *ifunit_ref(const char *); void iftype_register(struct iftype *); void iftype_unregister(struct iftype *); int ifa_add_loopback_route(struct ifaddr *, struct sockaddr *); int ifa_del_loopback_route(struct ifaddr *, struct sockaddr *); int ifa_switch_loopback_route(struct ifaddr *, struct sockaddr *, int fib); struct ifaddr *ifa_ifwithaddr(struct sockaddr *); int ifa_ifwithaddr_check(struct sockaddr *); struct ifaddr *ifa_ifwithbroadaddr(struct sockaddr *, int); struct ifaddr *ifa_ifwithdstaddr(struct sockaddr *, int); struct ifaddr *ifa_ifwithnet(struct sockaddr *, int, int); struct ifaddr *ifa_ifwithroute(int, struct sockaddr *, struct sockaddr *, u_int); struct ifaddr *ifaof_ifpforaddr(struct sockaddr *, struct ifnet *); int ifa_preferred(struct ifaddr *, struct ifaddr *); int if_simloop(struct ifnet *ifp, struct mbuf *m, int af, int hlen); void if_data_copy(struct ifnet *, struct if_data *); int if_getmtu_family(if_t ifp, int family); int if_setupmultiaddr(if_t ifp, void *mta, int *cnt, int max); int if_multiaddr_array(if_t ifp, void *mta, int *cnt, int max); int if_multiaddr_count(if_t ifp, int max); /* TSO */ void if_tsomax_common(const struct iftsomax *, struct iftsomax *); int if_tsomax_update(if_t ifp, const struct iftsomax *); #ifdef DEVICE_POLLING enum poll_cmd { POLL_ONLY, POLL_AND_CHECK_STATUS }; typedef int poll_handler_t(if_t ifp, enum poll_cmd cmd, int count); int ether_poll_register(poll_handler_t *h, if_t ifp); int ether_poll_deregister(if_t ifp); #endif /* DEVICE_POLLING */ /* * Wrappers around ifops. Some ops are optional and can be NULL, * others are mandatory. Those wrappers that driver can invoke * theirselves are not inlined, but implemented in if.c. */ static inline void if_init(if_t ifp, void *sc) { if (ifp->if_ops->ifop_init != NULL) return (ifp->if_ops->ifop_init(sc)); } static inline int if_transmit(if_t ifp, struct mbuf *m) { return (ifp->if_ops->ifop_transmit(ifp, m)); } static inline void if_qflush(if_t ifp) { if (ifp->if_ops->ifop_qflush != NULL) ifp->if_ops->ifop_qflush(ifp); } static inline int if_output(if_t ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) { return (ifp->if_ops->ifop_output(ifp, m, dst, ro)); } static inline int if_ioctl(if_t ifp, u_long cmd, void *data, struct thread *td) { - struct iftype *ift = ifp->if_type; int error = EOPNOTSUPP; if (ifp->if_ops->ifop_ioctl != NULL) error = ifp->if_ops->ifop_ioctl(ifp, cmd, data, td); - if (error == EOPNOTSUPP && ift->ift_ops.ifop_ioctl != NULL) - error = ift->ift_ops.ifop_ioctl(ifp, cmd, data, td); + if (error == EOPNOTSUPP && ifp->if_type != NULL && + ifp->if_type->ift_ops.ifop_ioctl != NULL) + error = ifp->if_type->ift_ops.ifop_ioctl(ifp, cmd, data, td); return (error); } static inline uint64_t if_get_counter(const if_t ifp, ift_counter cnt) { return (ifp->if_ops->ifop_get_counter(ifp, cnt)); } static inline int if_resolvemulti(if_t ifp, struct sockaddr **llsa, struct sockaddr *sa) { if (ifp->if_ops->ifop_resolvemulti != NULL) return (ifp->if_ops->ifop_resolvemulti(ifp, llsa, sa)); else return (EOPNOTSUPP); } static inline void if_reassign(if_t ifp, struct vnet *new) { return (ifp->if_ops->ifop_reassign(ifp, new)); } /* * Inliners to shorten code, and make protocols more ifnet-agnostic. */ static inline ifType if_type(const if_t ifp) { return (ifp->if_drv->ifdrv_type); } static inline uint8_t if_addrlen(const if_t ifp) { return (ifp->if_drv->ifdrv_addrlen); } #endif /* _KERNEL */ #endif /* !_NET_IF_VAR_H_ */ Index: projects/ifnet/sys/netgraph/ng_iface.c =================================================================== --- projects/ifnet/sys/netgraph/ng_iface.c (revision 277242) +++ projects/ifnet/sys/netgraph/ng_iface.c (revision 277243) @@ -1,730 +1,722 @@ /* * ng_iface.c */ /*- * Copyright (c) 1996-1999 Whistle Communications, Inc. * All rights reserved. * * Subject to the following obligations and disclaimer of warranty, use and * redistribution of this software, in source or object code forms, with or * without modifications are expressly permitted by Whistle Communications; * provided, however, that: * 1. Any and all reproductions of the source or object code must include the * copyright notice above and the following disclaimer of warranties; and * 2. No rights are granted, in any manner or form, to use Whistle * Communications, Inc. trademarks, including the mark "WHISTLE * COMMUNICATIONS" on advertising, endorsements, or otherwise except as * such appears in the above copyright notice or in the software. * * THIS SOFTWARE IS BEING PROVIDED BY WHISTLE COMMUNICATIONS "AS IS", AND * TO THE MAXIMUM EXTENT PERMITTED BY LAW, WHISTLE COMMUNICATIONS MAKES NO * REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING THIS SOFTWARE, * INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT. * WHISTLE COMMUNICATIONS DOES NOT WARRANT, GUARANTEE, OR MAKE ANY * REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS OF THE USE OF THIS * SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY, RELIABILITY OR OTHERWISE. * IN NO EVENT SHALL WHISTLE COMMUNICATIONS BE LIABLE FOR ANY DAMAGES * RESULTING FROM OR ARISING OUT OF ANY USE OF THIS SOFTWARE, INCLUDING * WITHOUT LIMITATION, ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, * PUNITIVE, OR CONSEQUENTIAL DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES, LOSS OF USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF WHISTLE COMMUNICATIONS IS ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Archie Cobbs * * $FreeBSD$ * $Whistle: ng_iface.c,v 1.33 1999/11/01 09:24:51 julian Exp $ */ /* * This node is also a system networking interface. It has * a hook for each protocol (IP, AppleTalk, etc). Packets * are simply relayed between the interface and the hooks. * * Interfaces are named ng0, ng1, etc. New nodes take the * first available interface name. * * This node also includes Berkeley packet filter support. */ #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* XXXGL: for if_transmit() and if_index. */ #include #include #include #include #include #include #include #include #include #ifdef NG_SEPARATE_MALLOC static MALLOC_DEFINE(M_NETGRAPH_IFACE, "netgraph_iface", "netgraph iface node"); #else #define M_NETGRAPH_IFACE M_NETGRAPH #endif /* This struct describes one address family */ struct iffam { sa_family_t family; /* Address family */ const char *hookname; /* Name for hook */ }; typedef const struct iffam *iffam_p; /* List of address families supported by our interface */ const static struct iffam gFamilies[] = { { AF_INET, NG_IFACE_HOOK_INET }, { AF_INET6, NG_IFACE_HOOK_INET6 }, { AF_ATM, NG_IFACE_HOOK_ATM }, { AF_NATM, NG_IFACE_HOOK_NATM }, }; #define NUM_FAMILIES (sizeof(gFamilies) / sizeof(*gFamilies)) /* Node private data */ struct ng_iface_private { struct ifnet *ifp; /* Our interface */ int unit; /* Interface unit number */ node_p node; /* Our netgraph node */ hook_p hooks[NUM_FAMILIES]; /* Hook for each address family */ }; typedef struct ng_iface_private *priv_p; /* Interface methods */ static int ng_iface_transmit(if_t, struct mbuf *); static int ng_iface_ioctl(if_t, u_long, void *, struct thread *); static int ng_iface_output(if_t, struct mbuf *m0, const struct sockaddr *dst, struct route *ro); static int ng_iface_send(if_t, struct mbuf *m, sa_family_t sa); #ifdef DEBUG static void ng_iface_print_ioctl(if_t, int cmd, caddr_t data); #endif /* Netgraph methods */ static int ng_iface_mod_event(module_t, int, void *); static ng_constructor_t ng_iface_constructor; static ng_rcvmsg_t ng_iface_rcvmsg; static ng_shutdown_t ng_iface_shutdown; static ng_newhook_t ng_iface_newhook; static ng_rcvdata_t ng_iface_rcvdata; static ng_disconnect_t ng_iface_disconnect; /* Helper stuff */ static iffam_p get_iffam_from_af(sa_family_t family); static iffam_p get_iffam_from_hook(priv_p priv, hook_p hook); static iffam_p get_iffam_from_name(const char *name); static hook_p *get_hook_from_iffam(priv_p priv, iffam_p iffam); /* List of commands and how to convert arguments to/from ASCII */ static const struct ng_cmdlist ng_iface_cmds[] = { { NGM_IFACE_COOKIE, NGM_IFACE_GET_IFNAME, "getifname", NULL, &ng_parse_string_type }, { NGM_IFACE_COOKIE, NGM_IFACE_POINT2POINT, "point2point", NULL, NULL }, { NGM_IFACE_COOKIE, NGM_IFACE_BROADCAST, "broadcast", NULL, NULL }, { NGM_IFACE_COOKIE, NGM_IFACE_GET_IFINDEX, "getifindex", NULL, &ng_parse_uint32_type }, { 0 } }; /* Node type descriptor */ static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_IFACE_NODE_TYPE, .mod_event = ng_iface_mod_event, .constructor = ng_iface_constructor, .rcvmsg = ng_iface_rcvmsg, .shutdown = ng_iface_shutdown, .newhook = ng_iface_newhook, .rcvdata = ng_iface_rcvdata, .disconnect = ng_iface_disconnect, .cmdlist = ng_iface_cmds, }; NETGRAPH_INIT(iface, &typestruct); static VNET_DEFINE(struct unrhdr *, ng_iface_unit); #define V_ng_iface_unit VNET(ng_iface_unit) static struct ifdriver ng_ifdrv = { .ifdrv_ops = { .ifop_origin = IFOP_ORIGIN_DRIVER, .ifop_output = ng_iface_output, .ifop_transmit = ng_iface_transmit, .ifop_ioctl = ng_iface_ioctl, }, .ifdrv_name = NG_IFACE_IFACE_NAME, .ifdrv_type = IFT_PROPVIRTUAL, .ifdrv_dlt = DLT_NULL, .ifdrv_dlt_hdrlen = sizeof(uint32_t), }; /************************************************************************ HELPER STUFF ************************************************************************/ /* * Get the family descriptor from the family ID */ static __inline iffam_p get_iffam_from_af(sa_family_t family) { iffam_p iffam; int k; for (k = 0; k < NUM_FAMILIES; k++) { iffam = &gFamilies[k]; if (iffam->family == family) return (iffam); } return (NULL); } /* * Get the family descriptor from the hook */ static __inline iffam_p get_iffam_from_hook(priv_p priv, hook_p hook) { int k; for (k = 0; k < NUM_FAMILIES; k++) if (priv->hooks[k] == hook) return (&gFamilies[k]); return (NULL); } /* * Get the hook from the iffam descriptor */ static __inline hook_p * get_hook_from_iffam(priv_p priv, iffam_p iffam) { return (&priv->hooks[iffam - gFamilies]); } /* * Get the iffam descriptor from the name */ static __inline iffam_p get_iffam_from_name(const char *name) { iffam_p iffam; int k; for (k = 0; k < NUM_FAMILIES; k++) { iffam = &gFamilies[k]; if (!strcmp(iffam->hookname, name)) return (iffam); } return (NULL); } /************************************************************************ INTERFACE STUFF ************************************************************************/ /* * Process an ioctl for the virtual interface */ static int ng_iface_ioctl(if_t ifp, u_long command, void *data, struct thread *td) { struct ifreq *const ifr = (struct ifreq *) data; int error = 0; #ifdef DEBUG ng_iface_print_ioctl(ifp, command, data); #endif switch (command) { /* These two are mostly handled at a higher layer */ case SIOCSIFADDR: if_addflags(ifp, IF_FLAGS, IFF_UP); break; - case SIOCGIFADDR: - break; - case SIOCSIFFLAGS: - break; /* Set the interface MTU */ case SIOCSIFMTU: if (ifr->ifr_mtu > NG_IFACE_MTU_MAX || ifr->ifr_mtu < NG_IFACE_MTU_MIN) error = EINVAL; - else - if_set(ifp, IF_MTU, ifr->ifr_mtu); break; /* Stuff that's not supported */ + case SIOCGIFADDR: + case SIOCSIFFLAGS: case SIOCADDMULTI: case SIOCDELMULTI: - error = 0; break; - case SIOCSIFPHYS: - error = EOPNOTSUPP; - break; default: - error = EINVAL; + error = EOPNOTSUPP; break; } return (error); } /* * This routine is called to deliver a packet out the interface. * We simply look at the address family and relay the packet to * the corresponding hook, if it exists and is connected. */ static int ng_iface_output(if_t ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) { struct m_tag *mtag; uint32_t af; /* Check interface flags */ if ((if_get(ifp, IF_FLAGS) & IFF_UP) == 0) { m_freem(m); return (ENETDOWN); } /* Protect from deadly infinite recursion. */ mtag = NULL; while ((mtag = m_tag_locate(m, MTAG_NGIF, MTAG_NGIF_CALLED, mtag))) { if (*(if_t *)(mtag + 1) == ifp) { log(LOG_NOTICE, "Loop detected on %s\n", if_name(ifp)); m_freem(m); return (EDEADLK); } } mtag = m_tag_alloc(MTAG_NGIF, MTAG_NGIF_CALLED, sizeof(if_t), M_NOWAIT); if (mtag == NULL) { m_freem(m); return (ENOMEM); } *(if_t *)(mtag + 1) = ifp; m_tag_prepend(m, mtag); /* BPF writes need to be handled specially. */ if (dst->sa_family == AF_UNSPEC) bcopy(dst->sa_data, &af, sizeof(af)); else af = dst->sa_family; M_PREPEND(m, sizeof(sa_family_t), M_NOWAIT); if (m == NULL) { if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1); return (ENOBUFS); } *(sa_family_t *)m->m_data = af; return (if_transmit(ifp, m)); } /* * Start method is used only when ALTQ is enabled. */ static int ng_iface_transmit(if_t ifp, struct mbuf *m) { sa_family_t af; af = *mtod(m, sa_family_t *); KASSERT(af != AF_UNSPEC, ("%s: af = AF_UNSPEC", __func__)); m_adj(m, sizeof(sa_family_t)); if_mtap(ifp, m, &af, sizeof(af)); return (ng_iface_send(ifp, m, af)); } /* * This routine does actual delivery of the packet into the * netgraph(4). It is called from ng_iface_start() and * ng_iface_output(). */ static int ng_iface_send(if_t ifp, struct mbuf *m, sa_family_t sa) { const priv_p priv = if_getsoftc(ifp, IF_DRIVER_SOFTC); const iffam_p iffam = get_iffam_from_af(sa); int error; int len; /* Check address family to determine hook (if known) */ if (iffam == NULL) { m_freem(m); log(LOG_WARNING, "%s: can't handle af%d\n", if_name(ifp), sa); return (EAFNOSUPPORT); } /* Copy length before the mbuf gets invalidated. */ len = m->m_pkthdr.len; /* Send packet. If hook is not connected, mbuf will get freed. */ NG_OUTBOUND_THREAD_REF(); NG_SEND_DATA_ONLY(error, *get_hook_from_iffam(priv, iffam), m); NG_OUTBOUND_THREAD_UNREF(); /* Update stats. */ if (error == 0) { if_inc_counter(ifp, IFCOUNTER_OBYTES, len); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } else if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return (error); } #ifdef DEBUG /* * Display an ioctl to the virtual interface */ static void ng_iface_print_ioctl(if_t ifp, int command, caddr_t data) { char *str; switch (command & IOC_DIRMASK) { case IOC_VOID: str = "IO"; break; case IOC_OUT: str = "IOR"; break; case IOC_IN: str = "IOW"; break; case IOC_INOUT: str = "IORW"; break; default: str = "IO??"; } log(LOG_DEBUG, "%s: %s('%c', %d, char[%d])\n", if_name(ifp), str, IOCGROUP(command), command & 0xff, IOCPARM_LEN(command)); } #endif /* DEBUG */ /************************************************************************ NETGRAPH NODE STUFF ************************************************************************/ /* * Constructor for a node */ static int ng_iface_constructor(node_p node) { struct if_attach_args ifat = { .ifat_version = IF_ATTACH_VERSION, .ifat_drv = &ng_ifdrv, .ifat_mtu = NG_IFACE_MTU_DEFAULT, .ifat_flags = IFF_SIMPLEX | IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST, .ifat_baudrate = 64000, /* XXX */ }; priv_p priv; priv = malloc(sizeof(*priv), M_NETGRAPH_IFACE, M_WAITOK | M_ZERO); NG_NODE_SET_PRIVATE(node, priv); priv->node = node; ifat.ifat_softc = priv; ifat.ifat_dunit = priv->unit = alloc_unr(V_ng_iface_unit); priv->ifp = if_attach(&ifat); /* Give this node the same name as the interface (if possible) */ if (ng_name_node(node, if_name(priv->ifp)) != 0) log(LOG_WARNING, "%s: can't acquire netgraph name\n", if_name(priv->ifp)); return (0); } /* * Give our ok for a hook to be added */ static int ng_iface_newhook(node_p node, hook_p hook, const char *name) { const iffam_p iffam = get_iffam_from_name(name); hook_p *hookptr; if (iffam == NULL) return (EPFNOSUPPORT); hookptr = get_hook_from_iffam(NG_NODE_PRIVATE(node), iffam); if (*hookptr != NULL) return (EISCONN); *hookptr = hook; NG_HOOK_HI_STACK(hook); NG_HOOK_SET_TO_INBOUND(hook); return (0); } /* * Receive a control message */ static int ng_iface_rcvmsg(node_p node, item_p item, hook_p lasthook) { const priv_p priv = NG_NODE_PRIVATE(node); const if_t ifp = priv->ifp; struct ng_mesg *resp = NULL; int error = 0; struct ng_mesg *msg; NGI_GET_MSG(item, msg); switch (msg->header.typecookie) { case NGM_IFACE_COOKIE: switch (msg->header.cmd) { case NGM_IFACE_GET_IFNAME: NG_MKRESPONSE(resp, msg, IFNAMSIZ, M_NOWAIT); if (resp == NULL) { error = ENOMEM; break; } strlcpy(resp->data, if_name(ifp), IFNAMSIZ); break; case NGM_IFACE_POINT2POINT: case NGM_IFACE_BROADCAST: { /* Deny request if interface is UP */ if ((if_get(ifp, IF_FLAGS) & IFF_UP) != 0) return (EBUSY); /* Change flags */ switch (msg->header.cmd) { case NGM_IFACE_POINT2POINT: if_addflags(ifp, IF_FLAGS, IFF_POINTOPOINT); if_clrflags(ifp, IF_FLAGS, IFF_BROADCAST); break; case NGM_IFACE_BROADCAST: if_clrflags(ifp, IF_FLAGS, IFF_POINTOPOINT); if_addflags(ifp, IF_FLAGS, IFF_BROADCAST); break; } break; } case NGM_IFACE_GET_IFINDEX: NG_MKRESPONSE(resp, msg, sizeof(uint32_t), M_NOWAIT); if (resp == NULL) { error = ENOMEM; break; } *((uint32_t *)resp->data) = ifp->if_index; break; default: error = EINVAL; break; } break; case NGM_FLOW_COOKIE: switch (msg->header.cmd) { case NGM_LINK_IS_UP: if_link_state_change(ifp, LINK_STATE_UP); break; case NGM_LINK_IS_DOWN: if_link_state_change(ifp, LINK_STATE_DOWN); break; default: break; } break; default: error = EINVAL; break; } NG_RESPOND_MSG(error, node, item, resp); NG_FREE_MSG(msg); return (error); } /* * Recive data from a hook. Pass the packet to the correct input routine. */ static int ng_iface_rcvdata(hook_p hook, item_p item) { const priv_p priv = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); const iffam_p iffam = get_iffam_from_hook(priv, hook); const if_t ifp = priv->ifp; struct mbuf *m; sa_family_t af; int isr; NGI_GET_M(item, m); NG_FREE_ITEM(item); /* Sanity checks */ KASSERT(iffam != NULL, ("%s: iffam", __func__)); M_ASSERTPKTHDR(m); if ((if_get(ifp, IF_FLAGS) & IFF_UP) == 0) { NG_FREE_M(m); return (ENETDOWN); } /* Update interface stats */ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len); /* Note receiving interface */ m->m_pkthdr.rcvif = ifp; /* Berkeley packet filter */ af = iffam->family; if_mtap(ifp, m, &af, sizeof(af)); /* Send packet */ switch (iffam->family) { #ifdef INET case AF_INET: isr = NETISR_IP; break; #endif #ifdef INET6 case AF_INET6: isr = NETISR_IPV6; break; #endif default: m_freem(m); return (EAFNOSUPPORT); } random_harvest(&(m->m_data), 12, 2, RANDOM_NET_NG); M_SETFIB(m, if_get(ifp, IF_FIB)); netisr_dispatch(isr, m); return (0); } /* * Shutdown and remove the node and its associated interface. */ static int ng_iface_shutdown(node_p node) { const priv_p priv = NG_NODE_PRIVATE(node); /* * The ifnet may be in a different vnet than the netgraph node, * hence we have to change the current vnet context here. */ CURVNET_SET_QUIET(priv->ifp->if_vnet); if_detach(priv->ifp); CURVNET_RESTORE(); priv->ifp = NULL; free_unr(V_ng_iface_unit, priv->unit); free(priv, M_NETGRAPH_IFACE); NG_NODE_SET_PRIVATE(node, NULL); NG_NODE_UNREF(node); return (0); } /* * Hook disconnection. Note that we do *not* shutdown when all * hooks have been disconnected. */ static int ng_iface_disconnect(hook_p hook) { const priv_p priv = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); const iffam_p iffam = get_iffam_from_hook(priv, hook); if (iffam == NULL) panic("%s", __func__); *get_hook_from_iffam(priv, iffam) = NULL; return (0); } /* * Handle loading and unloading for this node type. */ static int ng_iface_mod_event(module_t mod, int event, void *data) { int error = 0; switch (event) { case MOD_LOAD: case MOD_UNLOAD: break; default: error = EOPNOTSUPP; break; } return (error); } static void vnet_ng_iface_init(const void *unused) { V_ng_iface_unit = new_unrhdr(0, 0xffff, NULL); } VNET_SYSINIT(vnet_ng_iface_init, SI_SUB_PSEUDO, SI_ORDER_ANY, vnet_ng_iface_init, NULL); static void vnet_ng_iface_uninit(const void *unused) { delete_unrhdr(V_ng_iface_unit); } VNET_SYSUNINIT(vnet_ng_iface_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY, vnet_ng_iface_uninit, NULL);