Index: share/man/man4/Makefile =================================================================== --- share/man/man4/Makefile +++ share/man/man4/Makefile @@ -180,7 +180,6 @@ gre.4 \ h_ertt.4 \ hifn.4 \ - hme.4 \ hpet.4 \ ${_hpt27xx.4} \ ${_hptiop.4} \ @@ -665,7 +664,6 @@ MLINKS+=gpioths.4 dht11.4 MLINKS+=gpioths.4 dht22.4 MLINKS+=gre.4 if_gre.4 -MLINKS+=hme.4 if_hme.4 MLINKS+=hpet.4 acpi_hpet.4 MLINKS+=${_hptrr.4} ${_rr232x.4} MLINKS+=${_attimer.4} ${_i8254.4} Index: share/man/man4/altq.4 =================================================================== --- share/man/man4/altq.4 +++ share/man/man4/altq.4 @@ -148,7 +148,6 @@ .Xr et 4 , .Xr fxp 4 , .Xr gem 4 , -.Xr hme 4 , .Xr igb 4 , .Xr ixgbe 4 , .Xr jme 4 , Index: share/man/man4/hme.4 =================================================================== --- share/man/man4/hme.4 +++ /dev/null @@ -1,135 +0,0 @@ -.\" $NetBSD: hme.4,v 1.4 2003/02/14 15:20:18 grant Exp $ -.\" -.\" Copyright (c) 2001 The NetBSD Foundation, Inc. -.\" All rights reserved. -.\" -.\" This code is derived from software contributed to The NetBSD Foundation -.\" by Klaus Klein. -.\" -.\" Redistribution and use in source and binary forms, with or without -.\" modification, are permitted provided that the following conditions -.\" are met: -.\" 1. Redistributions of source code must retain the above copyright -.\" notice, this list of conditions and the following disclaimer. -.\" 2. Redistributions in binary form must reproduce the above copyright -.\" notice, this list of conditions and the following disclaimer in the -.\" documentation and/or other materials provided with the distribution. -.\" -.\" THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS -.\" ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -.\" TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -.\" PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS -.\" BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -.\" POSSIBILITY OF SUCH DAMAGE. -.\" -.\" $FreeBSD$ -.\" -.Dd February 12, 2020 -.Dt HME 4 -.Os -.Sh NAME -.Nm hme -.Nd "Sun Microelectronics STP2002-STQ Ethernet interfaces device driver" -.Sh SYNOPSIS -To compile this driver into the kernel, -place the following lines in your -kernel configuration file: -.Bd -ragged -offset indent -.Cd "device miibus" -.Cd "device hme" -.Ed -.Pp -Alternatively, to load the driver as a -module at boot time, place the following line in -.Xr loader.conf 5 : -.Bd -literal -offset indent -if_hme_load="YES" -.Ed -.Sh DEPRECATION NOTICE -The -.Nm -driver is not present in -.Fx 13.0 -and later. -See https://github.com/freebsd/fcp/blob/master/fcp-0101.md for more -information. -.Sh DESCRIPTION -The -.Nm -driver supports Sun Microelectronics STP2002-STQ -.Dq Happy Meal Ethernet -Fast Ethernet interfaces. -.Pp -All controllers supported by the -.Nm -driver have TCP checksum offload capability for both receive and transmit, -support for the reception and transmission of extended frames for -.Xr vlan 4 -and a 128-bit multicast hash filter. -.Sh HARDWARE -The -.Nm -driver supports the on-board Ethernet interfaces of many -Sun -.Tn UltraSPARC -workstation and server models. -.Pp -Cards supported by the -.Nm -driver include: -.Pp -.Bl -bullet -compact -.It -Sun PCI SunSwift Adapter -.Pq Dq SUNW,hme -.It -Sun SBus SunSwift Adapter -.Dq ( hme -and -.Dq SUNW,hme ) -.It -Sun PCI Sun100BaseT Adapter 2.0 -.Pq Dq SUNW,hme -.It -Sun SBus Sun100BaseT 2.0 -.Pq Dq SUNW,hme -.It -Sun PCI Quad FastEthernet Controller -.Pq Dq SUNW,qfe -.It -Sun SBus Quad FastEthernet Controller -.Pq Dq SUNW,qfe -.El -.Sh SEE ALSO -.Xr altq 4 , -.Xr intro 4 , -.Xr miibus 4 , -.Xr netintro 4 , -.Xr vlan 4 , -.Xr eeprom 8 , -.Xr ifconfig 8 -.Rs -.%T "STP2002QFP Fast Ethernet, Parallel Port, SCSI (FEPS) User's Guide" -.%D April 1996 -.%A Sun Microelectronics -.%U http://mediacast.sun.com/users/Barton808/media/STP2002QFP-FEPs_UG.pdf -.Re -.Sh HISTORY -The -.Nm -driver first appeared in -.Nx 1.5 . -The first -.Fx -version to include it was -.Fx 5.0 . -.Sh AUTHORS -The -.Nm -driver was written by -.An Paul Kranenburg Aq Mt pk@NetBSD.org . Index: share/man/man4/miibus.4 =================================================================== --- share/man/man4/miibus.4 +++ share/man/man4/miibus.4 @@ -77,8 +77,6 @@ Intel EtherExpress PRO/100B .It Xr gem 4 Sun ERI, Sun GEM and Apple GMAC Ethernet -.It Xr hme 4 -Sun HME Ethernet .It Xr jme 4 JMicron JMC250 Gigabit/JMC260 Fast Ethernet .It Xr lge 4 @@ -157,7 +155,6 @@ .Xr et 4 , .Xr fxp 4 , .Xr gem 4 , -.Xr hme 4 , .Xr jme 4 , .Xr lge 4 , .Xr msk 4 , Index: share/man/man4/vlan.4 =================================================================== --- share/man/man4/vlan.4 +++ share/man/man4/vlan.4 @@ -172,7 +172,6 @@ .Xr fwe 4 , .Xr fxp 4 , .Xr gem 4 , -.Xr hme 4 , .Xr le 4 , .Xr nfe 4 , .Xr rl 4 , Index: sys/amd64/conf/GENERIC =================================================================== --- sys/amd64/conf/GENERIC +++ sys/amd64/conf/GENERIC @@ -271,7 +271,6 @@ device et # Agere ET1310 10/100/Gigabit Ethernet device fxp # Intel EtherExpress PRO/100B (82557, 82558) device gem # Sun GEM/Sun ERI/Apple GMAC -device hme # Sun HME (Happy Meal Ethernet) device jme # JMicron JMC250 Gigabit/JMC260 Fast Ethernet device lge # Level 1 LXT1001 gigabit Ethernet device msk # Marvell/SysKonnect Yukon II Gigabit Ethernet Index: sys/conf/NOTES =================================================================== --- sys/conf/NOTES +++ sys/conf/NOTES @@ -1867,7 +1867,6 @@ # fxp: Intel EtherExpress Pro/100B # (hint of prefer_iomap can be done to prefer I/O instead of Mem mapping) # gem: Apple GMAC/Sun ERI/Sun GEM -# hme: Sun HME (Happy Meal Ethernet) # jme: JMicron JMC260 Fast Ethernet/JMC250 Gigabit Ethernet based adapters. # le: AMD Am7900 LANCE and Am79C9xx PCnet # lge: Support for PCI gigabit ethernet adapters based on the Level 1 @@ -1956,7 +1955,6 @@ device fxp # Intel EtherExpress PRO/100B (82557, 82558) envvar hint.fxp.0.prefer_iomap="0" device gem # Apple GMAC/Sun ERI/Sun GEM -device hme # Sun HME (Happy Meal Ethernet) device jme # JMicron JMC250 Gigabit/JMC260 Fast Ethernet device lge # Level 1 LXT1001 gigabit Ethernet device mlxfw # Mellanox firmware update module Index: sys/conf/files =================================================================== --- sys/conf/files +++ sys/conf/files @@ -1816,8 +1816,6 @@ dev/gpio/gpiopps.c optional gpiopps fdt dev/gpio/ofw_gpiobus.c optional fdt gpio dev/hifn/hifn7751.c optional hifn -dev/hme/if_hme.c optional hme -dev/hme/if_hme_pci.c optional hme pci dev/hptiop/hptiop.c optional hptiop scbus dev/hwpmc/hwpmc_logging.c optional hwpmc dev/hwpmc/hwpmc_mod.c optional hwpmc Index: sys/dev/hme/if_hme.c =================================================================== --- sys/dev/hme/if_hme.c +++ /dev/null @@ -1,1750 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 1999 The NetBSD Foundation, Inc. - * Copyright (c) 2001-2003 Thomas Moestl . - * All rights reserved. - * - * This code is derived from software contributed to The NetBSD Foundation - * by Paul Kranenburg. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the NetBSD - * Foundation, Inc. and its contributors. - * 4. Neither the name of The NetBSD Foundation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * from: NetBSD: hme.c,v 1.45 2005/02/18 00:22:11 heas Exp - */ - -#include -__FBSDID("$FreeBSD$"); - -/* - * HME Ethernet module driver. - * - * The HME is e.g. part of the PCIO PCI multi function device. - * It supports TX gathering and TX and RX checksum offloading. - * RX buffers must be aligned at a programmable offset modulo 16. We choose 2 - * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes - * are skipped to make sure the header after the ethernet header is aligned on a - * natural boundary, so this ensures minimal wastage in the most common case. - * - * Also, apparently, the buffers must extend to a DMA burst boundary beyond the - * maximum packet size (this is not verified). Buffers starting on odd - * boundaries must be mapped so that the burst can start on a natural boundary. - * - * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading. - * In reality, we can do the same technique for UDP datagram too. However, - * the hardware doesn't compensate the checksum for UDP datagram which can yield - * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It - * can be reactivated by setting special link option link0 with ifconfig(8). - */ -#define HME_CSUM_FEATURES (CSUM_TCP) -#if 0 -#define HMEDEBUG -#endif -#define KTR_HME KTR_SPARE2 /* XXX */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include -#include - -#include - -#include -#include - -CTASSERT(powerof2(HME_NRXDESC) && HME_NRXDESC >= 32 && HME_NRXDESC <= 256); -CTASSERT(HME_NTXDESC % 16 == 0 && HME_NTXDESC >= 16 && HME_NTXDESC <= 256); - -static void hme_start(struct ifnet *); -static void hme_start_locked(struct ifnet *); -static void hme_stop(struct hme_softc *); -static int hme_ioctl(struct ifnet *, u_long, caddr_t); -static void hme_tick(void *); -static int hme_watchdog(struct hme_softc *); -static void hme_init(void *); -static void hme_init_locked(struct hme_softc *); -static int hme_add_rxbuf(struct hme_softc *, unsigned int, int); -static int hme_meminit(struct hme_softc *); -static int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t, - u_int32_t, u_int32_t); -static void hme_mifinit(struct hme_softc *); -static void hme_setladrf(struct hme_softc *, int); - -static int hme_mediachange(struct ifnet *); -static int hme_mediachange_locked(struct hme_softc *); -static void hme_mediastatus(struct ifnet *, struct ifmediareq *); - -static int hme_load_txmbuf(struct hme_softc *, struct mbuf **); -static void hme_read(struct hme_softc *, int, int, u_int32_t); -static void hme_eint(struct hme_softc *, u_int); -static void hme_rint(struct hme_softc *); -static void hme_tint(struct hme_softc *); -static void hme_rxcksum(struct mbuf *, u_int32_t); - -static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int); - -devclass_t hme_devclass; - -static int hme_nerr; - -DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0); -MODULE_DEPEND(hme, miibus, 1, 1, 1); - -#define HME_SPC_READ_4(spc, sc, offs) \ - bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ - (offs)) -#define HME_SPC_WRITE_4(spc, sc, offs, v) \ - bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ - (offs), (v)) -#define HME_SPC_BARRIER(spc, sc, offs, l, f) \ - bus_space_barrier((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ - (offs), (l), (f)) - -#define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs)) -#define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v)) -#define HME_SEB_BARRIER(sc, offs, l, f) \ - HME_SPC_BARRIER(seb, (sc), (offs), (l), (f)) -#define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs)) -#define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v)) -#define HME_ERX_BARRIER(sc, offs, l, f) \ - HME_SPC_BARRIER(erx, (sc), (offs), (l), (f)) -#define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs)) -#define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v)) -#define HME_ETX_BARRIER(sc, offs, l, f) \ - HME_SPC_BARRIER(etx, (sc), (offs), (l), (f)) -#define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs)) -#define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v)) -#define HME_MAC_BARRIER(sc, offs, l, f) \ - HME_SPC_BARRIER(mac, (sc), (offs), (l), (f)) -#define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs)) -#define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v)) -#define HME_MIF_BARRIER(sc, offs, l, f) \ - HME_SPC_BARRIER(mif, (sc), (offs), (l), (f)) - -#define HME_MAXERR 5 -#define HME_WHINE(dev, ...) do { \ - if (hme_nerr++ < HME_MAXERR) \ - device_printf(dev, __VA_ARGS__); \ - if (hme_nerr == HME_MAXERR) { \ - device_printf(dev, "too many errors; not reporting " \ - "any more\n"); \ - } \ -} while(0) - -/* Support oversized VLAN frames. */ -#define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) - -int -hme_config(struct hme_softc *sc) -{ - struct ifnet *ifp; - struct mii_softc *child; - bus_size_t size; - int error, rdesc, tdesc, i; - - ifp = sc->sc_ifp = if_alloc(IFT_ETHER); - if (ifp == NULL) - return (ENOSPC); - - /* - * HME common initialization. - * - * hme_softc fields that must be initialized by the front-end: - * - * the DMA bus tag: - * sc_dmatag - * - * the bus handles, tags and offsets (splitted for SBus compatibility): - * sc_seb{t,h,o} (Shared Ethernet Block registers) - * sc_erx{t,h,o} (Receiver Unit registers) - * sc_etx{t,h,o} (Transmitter Unit registers) - * sc_mac{t,h,o} (MAC registers) - * sc_mif{t,h,o} (Management Interface registers) - * - * the maximum bus burst size: - * sc_burst - * - */ - - callout_init_mtx(&sc->sc_tick_ch, &sc->sc_lock, 0); - - /* Make sure the chip is stopped. */ - HME_LOCK(sc); - hme_stop(sc); - HME_UNLOCK(sc); - - error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, - BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, - BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, - NULL, NULL, &sc->sc_pdmatag); - if (error) - goto fail_ifnet; - - /* - * Create control, RX and TX mbuf DMA tags. - * Buffer descriptors must be aligned on a 2048 byte boundary; - * take this into account when calculating the size. Note that - * the maximum number of descriptors (256) occupies 2048 bytes, - * so we allocate that much regardless of HME_N*DESC. - */ - size = 4096; - error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0, - BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, - 1, size, 0, busdma_lock_mutex, &sc->sc_lock, &sc->sc_cdmatag); - if (error) - goto fail_ptag; - - error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0, - BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, - 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag); - if (error) - goto fail_ctag; - - error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0, - BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, - MCLBYTES * HME_NTXSEGS, HME_NTXSEGS, MCLBYTES, BUS_DMA_ALLOCNOW, - NULL, NULL, &sc->sc_tdmatag); - if (error) - goto fail_rtag; - - /* Allocate the control DMA buffer. */ - error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase, - BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdmamap); - if (error != 0) { - device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error); - goto fail_ttag; - } - - /* Load the control DMA buffer. */ - sc->sc_rb.rb_dmabase = 0; - if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap, - sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 || - sc->sc_rb.rb_dmabase == 0) { - device_printf(sc->sc_dev, "DMA buffer map load error %d\n", - error); - goto fail_free; - } - CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase, - sc->sc_rb.rb_dmabase); - - /* - * Prepare the RX descriptors. rdesc serves as marker for the last - * processed descriptor and may be used later on. - */ - for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) { - sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL; - error = bus_dmamap_create(sc->sc_rdmatag, 0, - &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap); - if (error != 0) - goto fail_rxdesc; - } - error = bus_dmamap_create(sc->sc_rdmatag, 0, - &sc->sc_rb.rb_spare_dmamap); - if (error != 0) - goto fail_rxdesc; - /* Same for the TX descs. */ - for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) { - sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL; - error = bus_dmamap_create(sc->sc_tdmatag, 0, - &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap); - if (error != 0) - goto fail_txdesc; - } - - sc->sc_csum_features = HME_CSUM_FEATURES; - /* Initialize ifnet structure. */ - ifp->if_softc = sc; - if_initname(ifp, device_get_name(sc->sc_dev), - device_get_unit(sc->sc_dev)); - ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; - ifp->if_start = hme_start; - ifp->if_ioctl = hme_ioctl; - ifp->if_init = hme_init; - IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ); - ifp->if_snd.ifq_drv_maxlen = HME_NTXQ; - IFQ_SET_READY(&ifp->if_snd); - - hme_mifinit(sc); - - /* - * DP83840A used with HME chips don't advertise their media - * capabilities themselves properly so force writing the ANAR - * according to the BMSR in mii_phy_setmedia(). - */ - error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, hme_mediachange, - hme_mediastatus, BMSR_DEFCAPMASK, HME_PHYAD_EXTERNAL, - MII_OFFSET_ANY, MIIF_FORCEANEG); - i = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, hme_mediachange, - hme_mediastatus, BMSR_DEFCAPMASK, HME_PHYAD_INTERNAL, - MII_OFFSET_ANY, MIIF_FORCEANEG); - if (error != 0 && i != 0) { - error = ENXIO; - device_printf(sc->sc_dev, "attaching PHYs failed\n"); - goto fail_rxdesc; - } - sc->sc_mii = device_get_softc(sc->sc_miibus); - - /* - * Walk along the list of attached MII devices and - * establish an `MII instance' to `PHY number' - * mapping. We'll use this mapping to enable the MII - * drivers of the external transceiver according to - * the currently selected media. - */ - sc->sc_phys[0] = sc->sc_phys[1] = -1; - LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list) { - /* - * Note: we support just two PHYs: the built-in - * internal device and an external on the MII - * connector. - */ - if ((child->mii_phy != HME_PHYAD_EXTERNAL && - child->mii_phy != HME_PHYAD_INTERNAL) || - child->mii_inst > 1) { - device_printf(sc->sc_dev, "cannot accommodate " - "MII device %s at phy %d, instance %d\n", - device_get_name(child->mii_dev), - child->mii_phy, child->mii_inst); - continue; - } - - sc->sc_phys[child->mii_inst] = child->mii_phy; - } - - /* Attach the interface. */ - ether_ifattach(ifp, sc->sc_enaddr); - - /* - * Tell the upper layer(s) we support long frames/checksum offloads. - */ - ifp->if_hdrlen = sizeof(struct ether_vlan_header); - ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; - ifp->if_hwassist |= sc->sc_csum_features; - ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; - - gone_in_dev(sc->sc_dev, 13, "10/100 NIC almost exclusively for sparc64"); - return (0); - -fail_txdesc: - for (i = 0; i < tdesc; i++) { - bus_dmamap_destroy(sc->sc_tdmatag, - sc->sc_rb.rb_txdesc[i].htx_dmamap); - } - bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap); -fail_rxdesc: - for (i = 0; i < rdesc; i++) { - bus_dmamap_destroy(sc->sc_rdmatag, - sc->sc_rb.rb_rxdesc[i].hrx_dmamap); - } - bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap); -fail_free: - bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap); -fail_ttag: - bus_dma_tag_destroy(sc->sc_tdmatag); -fail_rtag: - bus_dma_tag_destroy(sc->sc_rdmatag); -fail_ctag: - bus_dma_tag_destroy(sc->sc_cdmatag); -fail_ptag: - bus_dma_tag_destroy(sc->sc_pdmatag); -fail_ifnet: - if_free(ifp); - return (error); -} - -void -hme_detach(struct hme_softc *sc) -{ - struct ifnet *ifp = sc->sc_ifp; - int i; - - HME_LOCK(sc); - hme_stop(sc); - HME_UNLOCK(sc); - callout_drain(&sc->sc_tick_ch); - ether_ifdetach(ifp); - if_free(ifp); - device_delete_child(sc->sc_dev, sc->sc_miibus); - - for (i = 0; i < HME_NTXQ; i++) { - bus_dmamap_destroy(sc->sc_tdmatag, - sc->sc_rb.rb_txdesc[i].htx_dmamap); - } - bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap); - for (i = 0; i < HME_NRXDESC; i++) { - bus_dmamap_destroy(sc->sc_rdmatag, - sc->sc_rb.rb_rxdesc[i].hrx_dmamap); - } - bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, - BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap); - bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap); - bus_dma_tag_destroy(sc->sc_tdmatag); - bus_dma_tag_destroy(sc->sc_rdmatag); - bus_dma_tag_destroy(sc->sc_cdmatag); - bus_dma_tag_destroy(sc->sc_pdmatag); -} - -void -hme_suspend(struct hme_softc *sc) -{ - - HME_LOCK(sc); - hme_stop(sc); - HME_UNLOCK(sc); -} - -void -hme_resume(struct hme_softc *sc) -{ - struct ifnet *ifp = sc->sc_ifp; - - HME_LOCK(sc); - if ((ifp->if_flags & IFF_UP) != 0) - hme_init_locked(sc); - HME_UNLOCK(sc); -} - -static void -hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) -{ - struct hme_softc *sc = (struct hme_softc *)xsc; - - if (error != 0) - return; - KASSERT(nsegs == 1, - ("%s: too many DMA segments (%d)", __func__, nsegs)); - sc->sc_rb.rb_dmabase = segs[0].ds_addr; -} - -static void -hme_tick(void *arg) -{ - struct hme_softc *sc = arg; - struct ifnet *ifp; - - HME_LOCK_ASSERT(sc, MA_OWNED); - - ifp = sc->sc_ifp; - /* - * Unload collision counters - */ - if_inc_counter(ifp, IFCOUNTER_COLLISIONS, - HME_MAC_READ_4(sc, HME_MACI_NCCNT) + - HME_MAC_READ_4(sc, HME_MACI_FCCNT) + - HME_MAC_READ_4(sc, HME_MACI_EXCNT) + - HME_MAC_READ_4(sc, HME_MACI_LTCNT)); - - /* - * then clear the hardware counters. - */ - HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); - HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); - HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); - HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); - - mii_tick(sc->sc_mii); - - if (hme_watchdog(sc) == EJUSTRETURN) - return; - - callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); -} - -static void -hme_stop(struct hme_softc *sc) -{ - u_int32_t v; - int n; - - callout_stop(&sc->sc_tick_ch); - sc->sc_wdog_timer = 0; - sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); - sc->sc_flags &= ~HME_LINK; - - /* Mask all interrupts */ - HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 0xffffffff); - - /* Reset transmitter and receiver */ - HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX | - HME_SEB_RESET_ERX); - HME_SEB_BARRIER(sc, HME_SEBI_RESET, 4, - BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); - for (n = 0; n < 20; n++) { - v = HME_SEB_READ_4(sc, HME_SEBI_RESET); - if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0) - return; - DELAY(20); - } - - device_printf(sc->sc_dev, "hme_stop: reset failed\n"); -} - -/* - * Discard the contents of an mbuf in the RX ring, freeing the buffer in the - * ring for subsequent use. - */ -static __inline void -hme_discard_rxbuf(struct hme_softc *sc, int ix) -{ - - /* - * Dropped a packet, reinitialize the descriptor and turn the - * ownership back to the hardware. - */ - HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, - ix, HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, - &sc->sc_rb.rb_rxdesc[ix]))); -} - -static int -hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold) -{ - struct hme_rxdesc *rd; - struct mbuf *m; - bus_dma_segment_t segs[1]; - bus_dmamap_t map; - uintptr_t b; - int a, unmap, nsegs; - - rd = &sc->sc_rb.rb_rxdesc[ri]; - unmap = rd->hrx_m != NULL; - if (unmap && keepold) { - /* - * Reinitialize the descriptor flags, as they may have been - * altered by the hardware. - */ - hme_discard_rxbuf(sc, ri); - return (0); - } - if ((m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)) == NULL) - return (ENOBUFS); - m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; - b = mtod(m, uintptr_t); - /* - * Required alignment boundary. At least 16 is needed, but since - * the mapping must be done in a way that a burst can start on a - * natural boundary we might need to extend this. - */ - a = imax(HME_MINRXALIGN, sc->sc_burst); - /* - * Make sure the buffer suitably aligned. The 2 byte offset is removed - * when the mbuf is handed up. XXX: this ensures at least 16 byte - * alignment of the header adjacent to the ethernet header, which - * should be sufficient in all cases. Nevertheless, this second-guesses - * ALIGN(). - */ - m_adj(m, roundup2(b, a) - b); - if (bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap, - m, segs, &nsegs, 0) != 0) { - m_freem(m); - return (ENOBUFS); - } - /* If nsegs is wrong then the stack is corrupt. */ - KASSERT(nsegs == 1, - ("%s: too many DMA segments (%d)", __func__, nsegs)); - if (unmap) { - bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, - BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap); - } - map = rd->hrx_dmamap; - rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap; - sc->sc_rb.rb_spare_dmamap = map; - bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD); - HME_XD_SETADDR(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri, - segs[0].ds_addr); - rd->hrx_m = m; - HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri, - HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd))); - return (0); -} - -static int -hme_meminit(struct hme_softc *sc) -{ - struct hme_ring *hr = &sc->sc_rb; - struct hme_txdesc *td; - bus_addr_t dma; - caddr_t p; - unsigned int i; - int error; - - p = hr->rb_membase; - dma = hr->rb_dmabase; - - /* - * Allocate transmit descriptors - */ - hr->rb_txd = p; - hr->rb_txddma = dma; - p += HME_NTXDESC * HME_XD_SIZE; - dma += HME_NTXDESC * HME_XD_SIZE; - /* - * We have reserved descriptor space until the next 2048 byte - * boundary. - */ - dma = (bus_addr_t)roundup((u_long)dma, 2048); - p = (caddr_t)roundup((u_long)p, 2048); - - /* - * Allocate receive descriptors - */ - hr->rb_rxd = p; - hr->rb_rxddma = dma; - p += HME_NRXDESC * HME_XD_SIZE; - dma += HME_NRXDESC * HME_XD_SIZE; - /* Again move forward to the next 2048 byte boundary.*/ - dma = (bus_addr_t)roundup((u_long)dma, 2048); - p = (caddr_t)roundup((u_long)p, 2048); - - /* - * Initialize transmit buffer descriptors - */ - for (i = 0; i < HME_NTXDESC; i++) { - HME_XD_SETADDR(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0); - HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0); - } - - STAILQ_INIT(&sc->sc_rb.rb_txfreeq); - STAILQ_INIT(&sc->sc_rb.rb_txbusyq); - for (i = 0; i < HME_NTXQ; i++) { - td = &sc->sc_rb.rb_txdesc[i]; - if (td->htx_m != NULL) { - bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap); - m_freem(td->htx_m); - td->htx_m = NULL; - } - STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q); - } - - /* - * Initialize receive buffer descriptors - */ - for (i = 0; i < HME_NRXDESC; i++) { - error = hme_add_rxbuf(sc, i, 1); - if (error != 0) - return (error); - } - - bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - - hr->rb_tdhead = hr->rb_tdtail = 0; - hr->rb_td_nbusy = 0; - hr->rb_rdtail = 0; - CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd, - hr->rb_txddma); - CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd, - hr->rb_rxddma); - CTR2(KTR_HME, "rx entry 1: flags %x, address %x", - *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4)); - CTR2(KTR_HME, "tx entry 1: flags %x, address %x", - *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4)); - return (0); -} - -static int -hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val, - u_int32_t clr, u_int32_t set) -{ - int i = 0; - - val &= ~clr; - val |= set; - HME_MAC_WRITE_4(sc, reg, val); - HME_MAC_BARRIER(sc, reg, 4, - BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); - if (clr == 0 && set == 0) - return (1); /* just write, no bits to wait for */ - do { - DELAY(100); - i++; - val = HME_MAC_READ_4(sc, reg); - if (i > 40) { - /* After 3.5ms, we should have been done. */ - device_printf(sc->sc_dev, "timeout while writing to " - "MAC configuration register\n"); - return (0); - } - } while ((val & clr) != 0 && (val & set) != set); - return (1); -} - -/* - * Initialization of interface; set up initialization block - * and transmit/receive descriptor rings. - */ -static void -hme_init(void *xsc) -{ - struct hme_softc *sc = (struct hme_softc *)xsc; - - HME_LOCK(sc); - hme_init_locked(sc); - HME_UNLOCK(sc); -} - -static void -hme_init_locked(struct hme_softc *sc) -{ - struct ifnet *ifp = sc->sc_ifp; - u_int8_t *ea; - u_int32_t n, v; - - HME_LOCK_ASSERT(sc, MA_OWNED); - - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) - return; - - /* - * Initialization sequence. The numbered steps below correspond - * to the sequence outlined in section 6.3.5.1 in the Ethernet - * Channel Engine manual (part of the PCIO manual). - * See also the STP2002-STQ document from Sun Microsystems. - */ - - /* step 1 & 2. Reset the Ethernet Channel */ - hme_stop(sc); - - /* Re-initialize the MIF */ - hme_mifinit(sc); - -#if 0 - /* Mask all MIF interrupts, just in case */ - HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff); -#endif - - /* step 3. Setup data structures in host memory */ - if (hme_meminit(sc) != 0) { - device_printf(sc->sc_dev, "out of buffers; init aborted."); - return; - } - - /* step 4. TX MAC registers & counters */ - HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); - HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); - HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); - HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); - HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE); - - /* Load station MAC address */ - ea = IF_LLADDR(ifp); - HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]); - HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]); - HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]); - - /* - * Init seed for backoff - * (source suggested by manual: low 10 bits of MAC address) - */ - v = ((ea[4] << 8) | ea[5]) & 0x3fff; - HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v); - - /* Note: Accepting power-on default for other MAC registers here.. */ - - /* step 5. RX MAC registers & counters */ - hme_setladrf(sc, 0); - - /* step 6 & 7. Program Descriptor Ring Base Addresses */ - HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma); - /* Transmit Descriptor ring size: in increments of 16 */ - HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1); - - HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma); - HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE); - - /* step 8. Global Configuration & Interrupt Mask */ - HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, - ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/ - HME_SEB_STAT_HOSTTOTX | - HME_SEB_STAT_RXTOHOST | - HME_SEB_STAT_TXALL | - HME_SEB_STAT_TXPERR | - HME_SEB_STAT_RCNTEXP | - HME_SEB_STAT_ALL_ERRORS )); - - switch (sc->sc_burst) { - default: - v = 0; - break; - case 16: - v = HME_SEB_CFG_BURST16; - break; - case 32: - v = HME_SEB_CFG_BURST32; - break; - case 64: - v = HME_SEB_CFG_BURST64; - break; - } - /* - * Blindly setting 64bit transfers may hang PCI cards(Cheerio?). - * Allowing 64bit transfers breaks TX checksum offload as well. - * Don't know this comes from hardware bug or driver's DMAing - * scheme. - * - * if (sc->sc_flags & HME_PCI == 0) - * v |= HME_SEB_CFG_64BIT; - */ - HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v); - - /* step 9. ETX Configuration: use mostly default values */ - - /* Enable DMA */ - v = HME_ETX_READ_4(sc, HME_ETXI_CFG); - v |= HME_ETX_CFG_DMAENABLE; - HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v); - - /* step 10. ERX Configuration */ - v = HME_ERX_READ_4(sc, HME_ERXI_CFG); - - /* Encode Receive Descriptor ring size: four possible values */ - v &= ~HME_ERX_CFG_RINGSIZEMSK; - switch (HME_NRXDESC) { - case 32: - v |= HME_ERX_CFG_RINGSIZE32; - break; - case 64: - v |= HME_ERX_CFG_RINGSIZE64; - break; - case 128: - v |= HME_ERX_CFG_RINGSIZE128; - break; - case 256: - v |= HME_ERX_CFG_RINGSIZE256; - break; - default: - printf("hme: invalid Receive Descriptor ring size\n"); - break; - } - - /* Enable DMA, fix RX first byte offset. */ - v &= ~HME_ERX_CFG_FBO_MASK; - v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT); - /* RX TCP/UDP checksum offset */ - n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2; - n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK; - v |= n; - CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v); - HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v); - - /* step 11. XIF Configuration */ - v = HME_MAC_READ_4(sc, HME_MACI_XIF); - v |= HME_MAC_XIF_OE; - CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v); - HME_MAC_WRITE_4(sc, HME_MACI_XIF, v); - - /* step 12. RX_MAC Configuration Register */ - v = HME_MAC_READ_4(sc, HME_MACI_RXCFG); - v |= HME_MAC_RXCFG_ENABLE; - v &= ~(HME_MAC_RXCFG_DCRCS); - CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v); - HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v); - - /* step 13. TX_MAC Configuration Register */ - v = HME_MAC_READ_4(sc, HME_MACI_TXCFG); - v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP); - CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v); - HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v); - - /* step 14. Issue Transmit Pending command */ - -#ifdef HMEDEBUG - /* Debug: double-check. */ - CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, " - "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING), - HME_ETX_READ_4(sc, HME_ETXI_RSIZE), - HME_ERX_READ_4(sc, HME_ERXI_RING), - HME_MAC_READ_4(sc, HME_MACI_RXSIZE)); - CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x", - HME_SEB_READ_4(sc, HME_SEBI_IMASK), - HME_ERX_READ_4(sc, HME_ERXI_CFG), - HME_ETX_READ_4(sc, HME_ETXI_CFG)); - CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x", - HME_MAC_READ_4(sc, HME_MACI_RXCFG), - HME_MAC_READ_4(sc, HME_MACI_TXCFG)); -#endif - - ifp->if_drv_flags |= IFF_DRV_RUNNING; - ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; - - /* Set the current media. */ - hme_mediachange_locked(sc); - - /* Start the one second timer. */ - sc->sc_wdog_timer = 0; - callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); -} - -/* - * Routine to DMA map an mbuf chain, set up the descriptor rings - * accordingly and start the transmission. - * Returns 0 on success, -1 if there were not enough free descriptors - * to map the packet, or an errno otherwise. - * - * XXX: this relies on the fact that segments returned by - * bus_dmamap_load_mbuf_sg() are readable from the nearest burst - * boundary on (i.e. potentially before ds_addr) to the first - * boundary beyond the end. This is usually a safe assumption to - * make, but is not documented. - */ -static int -hme_load_txmbuf(struct hme_softc *sc, struct mbuf **m0) -{ - bus_dma_segment_t segs[HME_NTXSEGS]; - struct hme_txdesc *htx; - struct ip *ip; - struct mbuf *m; - caddr_t txd; - int error, i, nsegs, pci, ri, si; - uint32_t cflags, flags; - - if ((htx = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL) - return (ENOBUFS); - - cflags = 0; - if (((*m0)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) { - if (M_WRITABLE(*m0) == 0) { - m = m_dup(*m0, M_NOWAIT); - m_freem(*m0); - *m0 = m; - if (m == NULL) - return (ENOBUFS); - } - i = sizeof(struct ether_header); - m = m_pullup(*m0, i + sizeof(struct ip)); - if (m == NULL) { - *m0 = NULL; - return (ENOBUFS); - } - ip = (struct ip *)(mtod(m, caddr_t) + i); - i += (ip->ip_hl << 2); - cflags = i << HME_XD_TXCKSUM_SSHIFT | - ((i + m->m_pkthdr.csum_data) << HME_XD_TXCKSUM_OSHIFT) | - HME_XD_TXCKSUM; - *m0 = m; - } - - error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap, - *m0, segs, &nsegs, 0); - if (error == EFBIG) { - m = m_collapse(*m0, M_NOWAIT, HME_NTXSEGS); - if (m == NULL) { - m_freem(*m0); - *m0 = NULL; - return (ENOMEM); - } - *m0 = m; - error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap, - *m0, segs, &nsegs, 0); - if (error != 0) { - m_freem(*m0); - *m0 = NULL; - return (error); - } - } else if (error != 0) - return (error); - /* If nsegs is wrong then the stack is corrupt. */ - KASSERT(nsegs <= HME_NTXSEGS, - ("%s: too many DMA segments (%d)", __func__, nsegs)); - if (nsegs == 0) { - m_freem(*m0); - *m0 = NULL; - return (EIO); - } - if (sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) { - bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap); - /* Retry with m_collapse(9)? */ - return (ENOBUFS); - } - bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, BUS_DMASYNC_PREWRITE); - - si = ri = sc->sc_rb.rb_tdhead; - txd = sc->sc_rb.rb_txd; - pci = sc->sc_flags & HME_PCI; - CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", ri, - HME_XD_GETFLAGS(pci, txd, ri)); - for (i = 0; i < nsegs; i++) { - /* Fill the ring entry. */ - flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len); - if (i == 0) - flags |= HME_XD_SOP | cflags; - else - flags |= HME_XD_OWN | cflags; - CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)", - ri, si, flags); - HME_XD_SETADDR(pci, txd, ri, segs[i].ds_addr); - HME_XD_SETFLAGS(pci, txd, ri, flags); - sc->sc_rb.rb_td_nbusy++; - htx->htx_lastdesc = ri; - ri = (ri + 1) % HME_NTXDESC; - } - sc->sc_rb.rb_tdhead = ri; - - /* set EOP on the last descriptor */ - ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC; - flags = HME_XD_GETFLAGS(pci, txd, ri); - flags |= HME_XD_EOP; - CTR3(KTR_HME, "hme_load_mbuf: setting EOP ri %d, si %d (%#x)", ri, si, - flags); - HME_XD_SETFLAGS(pci, txd, ri, flags); - - /* Turn the first descriptor ownership to the hme */ - flags = HME_XD_GETFLAGS(pci, txd, si); - flags |= HME_XD_OWN; - CTR2(KTR_HME, "hme_load_mbuf: setting OWN for 1st desc ri %d, (%#x)", - ri, flags); - HME_XD_SETFLAGS(pci, txd, si, flags); - - STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q); - STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, htx, htx_q); - htx->htx_m = *m0; - - /* start the transmission. */ - HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP); - - return (0); -} - -/* - * Pass a packet to the higher levels. - */ -static void -hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags) -{ - struct ifnet *ifp = sc->sc_ifp; - struct mbuf *m; - - if (len <= sizeof(struct ether_header) || - len > HME_MAX_FRAMESIZE) { -#ifdef HMEDEBUG - HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n", - len); -#endif - if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); - hme_discard_rxbuf(sc, ix); - return; - } - - m = sc->sc_rb.rb_rxdesc[ix].hrx_m; - CTR1(KTR_HME, "hme_read: len %d", len); - - if (hme_add_rxbuf(sc, ix, 0) != 0) { - /* - * hme_add_rxbuf will leave the old buffer in the ring until - * it is sure that a new buffer can be mapped. If it can not, - * drop the packet, but leave the interface up. - */ - if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); - hme_discard_rxbuf(sc, ix); - return; - } - - if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); - - m->m_pkthdr.rcvif = ifp; - m->m_pkthdr.len = m->m_len = len + HME_RXOFFS; - m_adj(m, HME_RXOFFS); - /* RX TCP/UDP checksum */ - if (ifp->if_capenable & IFCAP_RXCSUM) - hme_rxcksum(m, flags); - /* Pass the packet up. */ - HME_UNLOCK(sc); - (*ifp->if_input)(ifp, m); - HME_LOCK(sc); -} - -static void -hme_start(struct ifnet *ifp) -{ - struct hme_softc *sc = ifp->if_softc; - - HME_LOCK(sc); - hme_start_locked(ifp); - HME_UNLOCK(sc); -} - -static void -hme_start_locked(struct ifnet *ifp) -{ - struct hme_softc *sc = (struct hme_softc *)ifp->if_softc; - struct mbuf *m; - int error, enq = 0; - - if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != - IFF_DRV_RUNNING || (sc->sc_flags & HME_LINK) == 0) - return; - - for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && - sc->sc_rb.rb_td_nbusy < HME_NTXDESC - 1;) { - IFQ_DRV_DEQUEUE(&ifp->if_snd, m); - if (m == NULL) - break; - - error = hme_load_txmbuf(sc, &m); - if (error != 0) { - if (m == NULL) - break; - ifp->if_drv_flags |= IFF_DRV_OACTIVE; - IFQ_DRV_PREPEND(&ifp->if_snd, m); - break; - } - enq++; - BPF_MTAP(ifp, m); - } - - if (enq > 0) { - bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - sc->sc_wdog_timer = 5; - } -} - -/* - * Transmit interrupt. - */ -static void -hme_tint(struct hme_softc *sc) -{ - caddr_t txd; - struct ifnet *ifp = sc->sc_ifp; - struct hme_txdesc *htx; - unsigned int ri, txflags; - - txd = sc->sc_rb.rb_txd; - htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq); - bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); - /* Fetch current position in the transmit ring */ - for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) { - if (sc->sc_rb.rb_td_nbusy <= 0) { - CTR0(KTR_HME, "hme_tint: not busy!"); - break; - } - - txflags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, txd, ri); - CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags); - - if ((txflags & HME_XD_OWN) != 0) - break; - - CTR0(KTR_HME, "hme_tint: not owned"); - --sc->sc_rb.rb_td_nbusy; - ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; - - /* Complete packet transmitted? */ - if ((txflags & HME_XD_EOP) == 0) - continue; - - KASSERT(htx->htx_lastdesc == ri, - ("%s: ring indices skewed: %d != %d!", - __func__, htx->htx_lastdesc, ri)); - bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap); - - if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); - m_freem(htx->htx_m); - htx->htx_m = NULL; - STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q); - STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q); - htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq); - } - sc->sc_wdog_timer = sc->sc_rb.rb_td_nbusy > 0 ? 5 : 0; - - /* Update ring */ - sc->sc_rb.rb_tdtail = ri; - - hme_start_locked(ifp); -} - -/* - * RX TCP/UDP checksum - */ -static void -hme_rxcksum(struct mbuf *m, u_int32_t flags) -{ - struct ether_header *eh; - struct ip *ip; - struct udphdr *uh; - int32_t hlen, len, pktlen; - u_int16_t cksum, *opts; - u_int32_t temp32; - - pktlen = m->m_pkthdr.len; - if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) - return; - eh = mtod(m, struct ether_header *); - if (eh->ether_type != htons(ETHERTYPE_IP)) - return; - ip = (struct ip *)(eh + 1); - if (ip->ip_v != IPVERSION) - return; - - hlen = ip->ip_hl << 2; - pktlen -= sizeof(struct ether_header); - if (hlen < sizeof(struct ip)) - return; - if (ntohs(ip->ip_len) < hlen) - return; - if (ntohs(ip->ip_len) != pktlen) - return; - if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) - return; /* can't handle fragmented packet */ - - switch (ip->ip_p) { - case IPPROTO_TCP: - if (pktlen < (hlen + sizeof(struct tcphdr))) - return; - break; - case IPPROTO_UDP: - if (pktlen < (hlen + sizeof(struct udphdr))) - return; - uh = (struct udphdr *)((caddr_t)ip + hlen); - if (uh->uh_sum == 0) - return; /* no checksum */ - break; - default: - return; - } - - cksum = ~(flags & HME_XD_RXCKSUM); - /* checksum fixup for IP options */ - len = hlen - sizeof(struct ip); - if (len > 0) { - opts = (u_int16_t *)(ip + 1); - for (; len > 0; len -= sizeof(u_int16_t), opts++) { - temp32 = cksum - *opts; - temp32 = (temp32 >> 16) + (temp32 & 65535); - cksum = temp32 & 65535; - } - } - m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; - m->m_pkthdr.csum_data = cksum; -} - -/* - * Receive interrupt. - */ -static void -hme_rint(struct hme_softc *sc) -{ - caddr_t xdr = sc->sc_rb.rb_rxd; - struct ifnet *ifp = sc->sc_ifp; - unsigned int ri, len; - int progress = 0; - u_int32_t flags; - - /* - * Process all buffers with valid data. - */ - bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); - for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) { - flags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, xdr, ri); - CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags); - if ((flags & HME_XD_OWN) != 0) - break; - - progress++; - if ((flags & HME_XD_OFL) != 0) { - device_printf(sc->sc_dev, "buffer overflow, ri=%d; " - "flags=0x%x\n", ri, flags); - if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); - hme_discard_rxbuf(sc, ri); - } else { - len = HME_XD_DECODE_RSIZE(flags); - hme_read(sc, ri, len, flags); - } - } - if (progress) { - bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - } - sc->sc_rb.rb_rdtail = ri; -} - -static void -hme_eint(struct hme_softc *sc, u_int status) -{ - - if ((status & HME_SEB_STAT_MIFIRQ) != 0) { - device_printf(sc->sc_dev, "XXXlink status changed: " - "cfg=%#x, stat=%#x, sm=%#x\n", - HME_MIF_READ_4(sc, HME_MIFI_CFG), - HME_MIF_READ_4(sc, HME_MIFI_STAT), - HME_MIF_READ_4(sc, HME_MIFI_SM)); - return; - } - - /* check for fatal errors that needs reset to unfreeze DMA engine */ - if ((status & HME_SEB_STAT_FATAL_ERRORS) != 0) { - HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status); - sc->sc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; - hme_init_locked(sc); - } -} - -void -hme_intr(void *v) -{ - struct hme_softc *sc = (struct hme_softc *)v; - u_int32_t status; - - HME_LOCK(sc); - status = HME_SEB_READ_4(sc, HME_SEBI_STAT); - CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status); - - if ((status & HME_SEB_STAT_ALL_ERRORS) != 0) - hme_eint(sc, status); - - if ((status & HME_SEB_STAT_RXTOHOST) != 0) - hme_rint(sc); - - if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0) - hme_tint(sc); - HME_UNLOCK(sc); -} - -static int -hme_watchdog(struct hme_softc *sc) -{ - struct ifnet *ifp = sc->sc_ifp; - - HME_LOCK_ASSERT(sc, MA_OWNED); - -#ifdef HMEDEBUG - CTR1(KTR_HME, "hme_watchdog: status %x", - (u_int)HME_SEB_READ_4(sc, HME_SEBI_STAT)); -#endif - - if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) - return (0); - - if ((sc->sc_flags & HME_LINK) != 0) - device_printf(sc->sc_dev, "device timeout\n"); - else if (bootverbose) - device_printf(sc->sc_dev, "device timeout (no link)\n"); - if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); - - ifp->if_drv_flags &= ~IFF_DRV_RUNNING; - hme_init_locked(sc); - hme_start_locked(ifp); - return (EJUSTRETURN); -} - -/* - * Initialize the MII Management Interface - */ -static void -hme_mifinit(struct hme_softc *sc) -{ - u_int32_t v; - - /* - * Configure the MIF in frame mode, polling disabled, internal PHY - * selected. - */ - HME_MIF_WRITE_4(sc, HME_MIFI_CFG, 0); - - /* - * If the currently selected media uses the external transceiver, - * enable its MII drivers (which basically isolates the internal - * one and vice versa). In case the current media hasn't been set, - * yet, we default to the internal transceiver. - */ - v = HME_MAC_READ_4(sc, HME_MACI_XIF); - if (sc->sc_mii != NULL && sc->sc_mii->mii_media.ifm_cur != NULL && - sc->sc_phys[IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media)] == - HME_PHYAD_EXTERNAL) - v |= HME_MAC_XIF_MIIENABLE; - else - v &= ~HME_MAC_XIF_MIIENABLE; - HME_MAC_WRITE_4(sc, HME_MACI_XIF, v); -} - -/* - * MII interface - */ -int -hme_mii_readreg(device_t dev, int phy, int reg) -{ - struct hme_softc *sc; - int n; - u_int32_t v; - - sc = device_get_softc(dev); - /* Select the desired PHY in the MIF configuration register */ - v = HME_MIF_READ_4(sc, HME_MIFI_CFG); - if (phy == HME_PHYAD_EXTERNAL) - v |= HME_MIF_CFG_PHY; - else - v &= ~HME_MIF_CFG_PHY; - HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); - - /* Construct the frame command */ - v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | - HME_MIF_FO_TAMSB | - (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) | - (phy << HME_MIF_FO_PHYAD_SHIFT) | - (reg << HME_MIF_FO_REGAD_SHIFT); - - HME_MIF_WRITE_4(sc, HME_MIFI_FO, v); - HME_MIF_BARRIER(sc, HME_MIFI_FO, 4, - BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); - for (n = 0; n < 100; n++) { - DELAY(1); - v = HME_MIF_READ_4(sc, HME_MIFI_FO); - if (v & HME_MIF_FO_TALSB) - return (v & HME_MIF_FO_DATA); - } - - device_printf(sc->sc_dev, "mii_read timeout\n"); - return (0); -} - -int -hme_mii_writereg(device_t dev, int phy, int reg, int val) -{ - struct hme_softc *sc; - int n; - u_int32_t v; - - sc = device_get_softc(dev); - /* Select the desired PHY in the MIF configuration register */ - v = HME_MIF_READ_4(sc, HME_MIFI_CFG); - if (phy == HME_PHYAD_EXTERNAL) - v |= HME_MIF_CFG_PHY; - else - v &= ~HME_MIF_CFG_PHY; - HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); - - /* Construct the frame command */ - v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | - HME_MIF_FO_TAMSB | - (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) | - (phy << HME_MIF_FO_PHYAD_SHIFT) | - (reg << HME_MIF_FO_REGAD_SHIFT) | - (val & HME_MIF_FO_DATA); - - HME_MIF_WRITE_4(sc, HME_MIFI_FO, v); - HME_MIF_BARRIER(sc, HME_MIFI_FO, 4, - BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); - for (n = 0; n < 100; n++) { - DELAY(1); - v = HME_MIF_READ_4(sc, HME_MIFI_FO); - if (v & HME_MIF_FO_TALSB) - return (1); - } - - device_printf(sc->sc_dev, "mii_write timeout\n"); - return (0); -} - -void -hme_mii_statchg(device_t dev) -{ - struct hme_softc *sc; - uint32_t rxcfg, txcfg; - - sc = device_get_softc(dev); - -#ifdef HMEDEBUG - if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0) - device_printf(sc->sc_dev, "hme_mii_statchg: status change\n"); -#endif - - if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 && - IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE) - sc->sc_flags |= HME_LINK; - else - sc->sc_flags &= ~HME_LINK; - - txcfg = HME_MAC_READ_4(sc, HME_MACI_TXCFG); - if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg, - HME_MAC_TXCFG_ENABLE, 0)) - device_printf(sc->sc_dev, "cannot disable TX MAC\n"); - rxcfg = HME_MAC_READ_4(sc, HME_MACI_RXCFG); - if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg, - HME_MAC_RXCFG_ENABLE, 0)) - device_printf(sc->sc_dev, "cannot disable RX MAC\n"); - - /* Set the MAC Full Duplex bit appropriately. */ - if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) - txcfg |= HME_MAC_TXCFG_FULLDPLX; - else - txcfg &= ~HME_MAC_TXCFG_FULLDPLX; - HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, txcfg); - - if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && - (sc->sc_flags & HME_LINK) != 0) { - if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg, 0, - HME_MAC_TXCFG_ENABLE)) - device_printf(sc->sc_dev, "cannot enable TX MAC\n"); - if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg, 0, - HME_MAC_RXCFG_ENABLE)) - device_printf(sc->sc_dev, "cannot enable RX MAC\n"); - } -} - -static int -hme_mediachange(struct ifnet *ifp) -{ - struct hme_softc *sc = ifp->if_softc; - int error; - - HME_LOCK(sc); - error = hme_mediachange_locked(sc); - HME_UNLOCK(sc); - return (error); -} - -static int -hme_mediachange_locked(struct hme_softc *sc) -{ - struct mii_softc *child; - - HME_LOCK_ASSERT(sc, MA_OWNED); - -#ifdef HMEDEBUG - if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0) - device_printf(sc->sc_dev, "hme_mediachange_locked"); -#endif - - hme_mifinit(sc); - - /* - * If both PHYs are present reset them. This is required for - * unisolating the previously isolated PHY when switching PHYs. - * As the above hme_mifinit() call will set the MII drivers in - * the XIF configuration register according to the currently - * selected media, there should be no window during which the - * data paths of both transceivers are open at the same time, - * even if the PHY device drivers use MIIF_NOISOLATE. - */ - if (sc->sc_phys[0] != -1 && sc->sc_phys[1] != -1) - LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list) - PHY_RESET(child); - return (mii_mediachg(sc->sc_mii)); -} - -static void -hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) -{ - struct hme_softc *sc = ifp->if_softc; - - HME_LOCK(sc); - if ((ifp->if_flags & IFF_UP) == 0) { - HME_UNLOCK(sc); - return; - } - - mii_pollstat(sc->sc_mii); - ifmr->ifm_active = sc->sc_mii->mii_media_active; - ifmr->ifm_status = sc->sc_mii->mii_media_status; - HME_UNLOCK(sc); -} - -/* - * Process an ioctl request. - */ -static int -hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) -{ - struct hme_softc *sc = ifp->if_softc; - struct ifreq *ifr = (struct ifreq *)data; - int error = 0; - - switch (cmd) { - case SIOCSIFFLAGS: - HME_LOCK(sc); - if ((ifp->if_flags & IFF_UP) != 0) { - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && - ((ifp->if_flags ^ sc->sc_ifflags) & - (IFF_ALLMULTI | IFF_PROMISC)) != 0) - hme_setladrf(sc, 1); - else - hme_init_locked(sc); - } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) - hme_stop(sc); - if ((ifp->if_flags & IFF_LINK0) != 0) - sc->sc_csum_features |= CSUM_UDP; - else - sc->sc_csum_features &= ~CSUM_UDP; - if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) - ifp->if_hwassist = sc->sc_csum_features; - sc->sc_ifflags = ifp->if_flags; - HME_UNLOCK(sc); - break; - - case SIOCADDMULTI: - case SIOCDELMULTI: - HME_LOCK(sc); - hme_setladrf(sc, 1); - HME_UNLOCK(sc); - error = 0; - break; - case SIOCGIFMEDIA: - case SIOCSIFMEDIA: - error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); - break; - case SIOCSIFCAP: - HME_LOCK(sc); - ifp->if_capenable = ifr->ifr_reqcap; - if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) - ifp->if_hwassist = sc->sc_csum_features; - else - ifp->if_hwassist = 0; - HME_UNLOCK(sc); - break; - default: - error = ether_ioctl(ifp, cmd, data); - break; - } - - return (error); -} - -static u_int -hme_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) -{ - uint32_t crc, *hash = arg; - - crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN); - /* Just want the 6 most significant bits. */ - crc >>= 26; - /* Set the corresponding bit in the filter. */ - hash[crc >> 4] |= 1 << (crc & 0xf); - - return (1); -} - -/* - * Set up the logical address filter. - */ -static void -hme_setladrf(struct hme_softc *sc, int reenable) -{ - struct ifnet *ifp = sc->sc_ifp; - u_int32_t hash[4]; - u_int32_t macc; - - HME_LOCK_ASSERT(sc, MA_OWNED); - /* Clear the hash table. */ - hash[3] = hash[2] = hash[1] = hash[0] = 0; - - /* Get the current RX configuration. */ - macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG); - - /* - * Turn off promiscuous mode, promiscuous group mode (all multicast), - * and hash filter. Depending on the case, the right bit will be - * enabled. - */ - macc &= ~(HME_MAC_RXCFG_PGRP | HME_MAC_RXCFG_PMISC); - - /* - * Disable the receiver while changing it's state as the documentation - * mandates. - * We then must wait until the bit clears in the register. This should - * take at most 3.5ms. - */ - if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, - HME_MAC_RXCFG_ENABLE, 0)) - device_printf(sc->sc_dev, "cannot disable RX MAC\n"); - /* Disable the hash filter before writing to the filter registers. */ - if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, - HME_MAC_RXCFG_HENABLE, 0)) - device_printf(sc->sc_dev, "cannot disable hash filter\n"); - - /* Make the RX MAC really SIMPLEX. */ - macc |= HME_MAC_RXCFG_ME; - if (reenable) - macc |= HME_MAC_RXCFG_ENABLE; - else - macc &= ~HME_MAC_RXCFG_ENABLE; - - if ((ifp->if_flags & IFF_PROMISC) != 0) { - macc |= HME_MAC_RXCFG_PMISC; - goto chipit; - } - if ((ifp->if_flags & IFF_ALLMULTI) != 0) { - macc |= HME_MAC_RXCFG_PGRP; - goto chipit; - } - - macc |= HME_MAC_RXCFG_HENABLE; - - /* - * Set up multicast address filter by passing all multicast addresses - * through a crc generator, and then using the high order 6 bits as an - * index into the 64 bit logical address filter. The high order bit - * selects the word, while the rest of the bits select the bit within - * the word. - */ - if_foreach_llmaddr(ifp, hme_hash_maddr, &hash); - -chipit: - /* Now load the hash table into the chip */ - HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]); - HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]); - HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]); - HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]); - if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0, - macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE | - HME_MAC_RXCFG_ME))) - device_printf(sc->sc_dev, "cannot configure RX MAC\n"); -} Index: sys/dev/hme/if_hme_pci.c =================================================================== --- sys/dev/hme/if_hme_pci.c +++ /dev/null @@ -1,415 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2000 Matthew R. Green - * Copyright (c) 2007 Marius Strobl - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED - * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * from: NetBSD: if_hme_pci.c,v 1.14 2004/03/17 08:58:23 martin Exp - */ - -#include -__FBSDID("$FreeBSD$"); - -/* - * PCI front-end device driver for the HME ethernet device. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#if defined(__powerpc__) -#include -#include -#endif -#include - -#include - -#include -#include -#include -#include -#include - -#include -#include - -#include -#include - -#include -#include - -#include "miibus_if.h" - -struct hme_pci_softc { - struct hme_softc hsc_hme; /* HME device */ - struct resource *hsc_sres; - struct resource *hsc_ires; - void *hsc_ih; -}; - -static int hme_pci_probe(device_t); -static int hme_pci_attach(device_t); -static int hme_pci_detach(device_t); -static int hme_pci_suspend(device_t); -static int hme_pci_resume(device_t); - -static device_method_t hme_pci_methods[] = { - /* Device interface */ - DEVMETHOD(device_probe, hme_pci_probe), - DEVMETHOD(device_attach, hme_pci_attach), - DEVMETHOD(device_detach, hme_pci_detach), - DEVMETHOD(device_suspend, hme_pci_suspend), - DEVMETHOD(device_resume, hme_pci_resume), - /* Can just use the suspend method here. */ - DEVMETHOD(device_shutdown, hme_pci_suspend), - - /* MII interface */ - DEVMETHOD(miibus_readreg, hme_mii_readreg), - DEVMETHOD(miibus_writereg, hme_mii_writereg), - DEVMETHOD(miibus_statchg, hme_mii_statchg), - - DEVMETHOD_END -}; - -static driver_t hme_pci_driver = { - "hme", - hme_pci_methods, - sizeof(struct hme_pci_softc) -}; - -DRIVER_MODULE(hme, pci, hme_pci_driver, hme_devclass, 0, 0); -MODULE_DEPEND(hme, pci, 1, 1, 1); -MODULE_DEPEND(hme, ether, 1, 1, 1); - -#define PCI_VENDOR_SUN 0x108e -#define PCI_PRODUCT_SUN_EBUS 0x1000 -#define PCI_PRODUCT_SUN_HMENETWORK 0x1001 - -int -hme_pci_probe(device_t dev) -{ - - if (pci_get_vendor(dev) == PCI_VENDOR_SUN && - pci_get_device(dev) == PCI_PRODUCT_SUN_HMENETWORK) { - device_set_desc(dev, "Sun HME 10/100 Ethernet"); - return (BUS_PROBE_DEFAULT); - } - return (ENXIO); -} - -int -hme_pci_attach(device_t dev) -{ - struct hme_pci_softc *hsc; - struct hme_softc *sc; - bus_space_tag_t memt; - bus_space_handle_t memh; - int i, error = 0; -#if !defined(__powerpc__) - device_t *children, ebus_dev; - struct resource *ebus_rres; - int j, slot; -#endif - - pci_enable_busmaster(dev); - /* - * Some Sun HMEs do have their intpin register bogusly set to 0, - * although it should be 1. Correct that. - */ - if (pci_get_intpin(dev) == 0) - pci_set_intpin(dev, 1); - - hsc = device_get_softc(dev); - sc = &hsc->hsc_hme; - sc->sc_dev = dev; - sc->sc_flags |= HME_PCI; - mtx_init(&sc->sc_lock, device_get_nameunit(dev), MTX_NETWORK_LOCK, - MTX_DEF); - - /* - * Map five register banks: - * - * bank 0: HME SEB registers: +0x0000 - * bank 1: HME ETX registers: +0x2000 - * bank 2: HME ERX registers: +0x4000 - * bank 3: HME MAC registers: +0x6000 - * bank 4: HME MIF registers: +0x7000 - * - */ - i = PCIR_BAR(0); - hsc->hsc_sres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, - &i, RF_ACTIVE); - if (hsc->hsc_sres == NULL) { - device_printf(dev, "could not map device registers\n"); - error = ENXIO; - goto fail_mtx; - } - i = 0; - hsc->hsc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, - &i, RF_SHAREABLE | RF_ACTIVE); - if (hsc->hsc_ires == NULL) { - device_printf(dev, "could not allocate interrupt\n"); - error = ENXIO; - goto fail_sres; - } - memt = rman_get_bustag(hsc->hsc_sres); - memh = rman_get_bushandle(hsc->hsc_sres); - sc->sc_sebt = sc->sc_etxt = sc->sc_erxt = sc->sc_mact = sc->sc_mift = - memt; - bus_space_subregion(memt, memh, 0x0000, 0x1000, &sc->sc_sebh); - bus_space_subregion(memt, memh, 0x2000, 0x1000, &sc->sc_etxh); - bus_space_subregion(memt, memh, 0x4000, 0x1000, &sc->sc_erxh); - bus_space_subregion(memt, memh, 0x6000, 0x1000, &sc->sc_mach); - bus_space_subregion(memt, memh, 0x7000, 0x1000, &sc->sc_mifh); - -#if defined(__powerpc__) - OF_getetheraddr(dev, sc->sc_enaddr); -#else - /* - * Dig out VPD (vital product data) and read NA (network address). - * - * The PCI HME is a PCIO chip, which is composed of two functions: - * function 0: PCI-EBus2 bridge, and - * function 1: HappyMeal Ethernet controller. - * - * The VPD of HME resides in the Boot PROM (PCI FCode) attached - * to the EBus bridge and can't be accessed via the PCI capability - * pointer. - * ``Writing FCode 3.x Programs'' (newer ones, dated 1997 and later) - * chapter 2 describes the data structure. - * - * We don't have a MI EBus driver since no EBus device exists - * (besides the FCode PROM) on add-on HME boards. The ``no driver - * attached'' message for function 0 therefore is what is expected. - */ - -#define PCI_ROMHDR_SIZE 0x1c -#define PCI_ROMHDR_SIG 0x00 -#define PCI_ROMHDR_SIG_MAGIC 0xaa55 /* little endian */ -#define PCI_ROMHDR_PTR_DATA 0x18 -#define PCI_ROM_SIZE 0x18 -#define PCI_ROM_SIG 0x00 -#define PCI_ROM_SIG_MAGIC 0x52494350 /* "PCIR", endian */ - /* reversed */ -#define PCI_ROM_VENDOR 0x04 -#define PCI_ROM_DEVICE 0x06 -#define PCI_ROM_PTR_VPD 0x08 -#define PCI_VPDRES_BYTE0 0x00 -#define PCI_VPDRES_ISLARGE(x) ((x) & 0x80) -#define PCI_VPDRES_LARGE_NAME(x) ((x) & 0x7f) -#define PCI_VPDRES_TYPE_VPD 0x10 /* large */ -#define PCI_VPDRES_LARGE_LEN_LSB 0x01 -#define PCI_VPDRES_LARGE_LEN_MSB 0x02 -#define PCI_VPDRES_LARGE_DATA 0x03 -#define PCI_VPD_SIZE 0x03 -#define PCI_VPD_KEY0 0x00 -#define PCI_VPD_KEY1 0x01 -#define PCI_VPD_LEN 0x02 -#define PCI_VPD_DATA 0x03 - -#define HME_ROM_READ_N(n, offs) bus_space_read_ ## n (memt, memh, (offs)) -#define HME_ROM_READ_1(offs) HME_ROM_READ_N(1, (offs)) -#define HME_ROM_READ_2(offs) HME_ROM_READ_N(2, (offs)) -#define HME_ROM_READ_4(offs) HME_ROM_READ_N(4, (offs)) - - /* Search accompanying EBus bridge. */ - slot = pci_get_slot(dev); - if (device_get_children(device_get_parent(dev), &children, &i) != 0) { - device_printf(dev, "could not get children\n"); - error = ENXIO; - goto fail_sres; - } - ebus_dev = NULL; - for (j = 0; j < i; j++) { - if (pci_get_class(children[j]) == PCIC_BRIDGE && - pci_get_vendor(children[j]) == PCI_VENDOR_SUN && - pci_get_device(children[j]) == PCI_PRODUCT_SUN_EBUS && - pci_get_slot(children[j]) == slot) { - ebus_dev = children[j]; - break; - } - } - if (ebus_dev == NULL) { - device_printf(dev, "could not find EBus bridge\n"); - error = ENXIO; - goto fail_children; - } - - /* Map EBus bridge PROM registers. */ - i = PCIR_BAR(0); - if ((ebus_rres = bus_alloc_resource_any(ebus_dev, SYS_RES_MEMORY, - &i, RF_ACTIVE)) == NULL) { - device_printf(dev, "could not map PROM registers\n"); - error = ENXIO; - goto fail_children; - } - memt = rman_get_bustag(ebus_rres); - memh = rman_get_bushandle(ebus_rres); - - /* Read PCI Expansion ROM header. */ - if (HME_ROM_READ_2(PCI_ROMHDR_SIG) != PCI_ROMHDR_SIG_MAGIC || - (i = HME_ROM_READ_2(PCI_ROMHDR_PTR_DATA)) < PCI_ROMHDR_SIZE) { - device_printf(dev, "unexpected PCI Expansion ROM header\n"); - error = ENXIO; - goto fail_rres; - } - - /* Read PCI Expansion ROM data. */ - if (HME_ROM_READ_4(i + PCI_ROM_SIG) != PCI_ROM_SIG_MAGIC || - HME_ROM_READ_2(i + PCI_ROM_VENDOR) != pci_get_vendor(dev) || - HME_ROM_READ_2(i + PCI_ROM_DEVICE) != pci_get_device(dev) || - (j = HME_ROM_READ_2(i + PCI_ROM_PTR_VPD)) < i + PCI_ROM_SIZE) { - device_printf(dev, "unexpected PCI Expansion ROM data\n"); - error = ENXIO; - goto fail_rres; - } - - /* - * Read PCI VPD. - * SUNW,hme cards have a single large resource VPD-R tag - * containing one NA. SUNW,qfe cards have four large resource - * VPD-R tags containing one NA each (all four HME chips share - * the same PROM). - * The VPD used on both cards is not in PCI 2.2 standard format - * however. The length in the resource header is in big endian - * and the end tag is non-standard (0x79) and followed by an - * all-zero "checksum" byte. Sun calls this a "Fresh Choice - * Ethernet" VPD... - */ - /* Look at the end tag to determine whether this is a VPD with 4 NAs. */ - if (HME_ROM_READ_1(j + PCI_VPDRES_LARGE_DATA + PCI_VPD_SIZE + - ETHER_ADDR_LEN) != 0x79 && - HME_ROM_READ_1(j + 4 * (PCI_VPDRES_LARGE_DATA + PCI_VPD_SIZE + - ETHER_ADDR_LEN)) == 0x79) - /* Use the Nth NA for the Nth HME on this SUNW,qfe. */ - j += slot * (PCI_VPDRES_LARGE_DATA + PCI_VPD_SIZE + - ETHER_ADDR_LEN); - if (PCI_VPDRES_ISLARGE(HME_ROM_READ_1(j + PCI_VPDRES_BYTE0)) == 0 || - PCI_VPDRES_LARGE_NAME(HME_ROM_READ_1(j + PCI_VPDRES_BYTE0)) != - PCI_VPDRES_TYPE_VPD || - (HME_ROM_READ_1(j + PCI_VPDRES_LARGE_LEN_LSB) << 8 | - HME_ROM_READ_1(j + PCI_VPDRES_LARGE_LEN_MSB)) != - PCI_VPD_SIZE + ETHER_ADDR_LEN || - HME_ROM_READ_1(j + PCI_VPDRES_LARGE_DATA + PCI_VPD_KEY0) != - 0x4e /* N */ || - HME_ROM_READ_1(j + PCI_VPDRES_LARGE_DATA + PCI_VPD_KEY1) != - 0x41 /* A */ || - HME_ROM_READ_1(j + PCI_VPDRES_LARGE_DATA + PCI_VPD_LEN) != - ETHER_ADDR_LEN) { - device_printf(dev, "unexpected PCI VPD\n"); - error = ENXIO; - goto fail_rres; - } - bus_space_read_region_1(memt, memh, j + PCI_VPDRES_LARGE_DATA + - PCI_VPD_DATA, sc->sc_enaddr, ETHER_ADDR_LEN); - -fail_rres: - bus_release_resource(ebus_dev, SYS_RES_MEMORY, - rman_get_rid(ebus_rres), ebus_rres); -fail_children: - free(children, M_TEMP); - if (error != 0) - goto fail_sres; -#endif - - sc->sc_burst = 64; /* XXX */ - - /* - * call the main configure - */ - if ((error = hme_config(sc)) != 0) { - device_printf(dev, "could not be configured\n"); - goto fail_ires; - } - - if ((error = bus_setup_intr(dev, hsc->hsc_ires, INTR_TYPE_NET | - INTR_MPSAFE, NULL, hme_intr, sc, &hsc->hsc_ih)) != 0) { - device_printf(dev, "couldn't establish interrupt\n"); - hme_detach(sc); - goto fail_ires; - } - return (0); - -fail_ires: - bus_release_resource(dev, SYS_RES_IRQ, - rman_get_rid(hsc->hsc_ires), hsc->hsc_ires); -fail_sres: - bus_release_resource(dev, SYS_RES_MEMORY, - rman_get_rid(hsc->hsc_sres), hsc->hsc_sres); -fail_mtx: - mtx_destroy(&sc->sc_lock); - return (error); -} - -static int -hme_pci_detach(device_t dev) -{ - struct hme_pci_softc *hsc; - struct hme_softc *sc; - - hsc = device_get_softc(dev); - sc = &hsc->hsc_hme; - bus_teardown_intr(dev, hsc->hsc_ires, hsc->hsc_ih); - hme_detach(sc); - bus_release_resource(dev, SYS_RES_IRQ, - rman_get_rid(hsc->hsc_ires), hsc->hsc_ires); - bus_release_resource(dev, SYS_RES_MEMORY, - rman_get_rid(hsc->hsc_sres), hsc->hsc_sres); - mtx_destroy(&sc->sc_lock); - return (0); -} - -static int -hme_pci_suspend(device_t dev) -{ - struct hme_pci_softc *hsc; - - hsc = device_get_softc(dev); - hme_suspend(&hsc->hsc_hme); - return (0); -} - -static int -hme_pci_resume(device_t dev) -{ - struct hme_pci_softc *hsc; - - hsc = device_get_softc(dev); - hme_resume(&hsc->hsc_hme); - return (0); -} Index: sys/dev/hme/if_hmereg.h =================================================================== --- sys/dev/hme/if_hmereg.h +++ /dev/null @@ -1,311 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 1999 The NetBSD Foundation, Inc. - * All rights reserved. - * - * This code is derived from software contributed to The NetBSD Foundation - * by Paul Kranenburg. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * from: NetBSD: hmereg.h,v 1.16 2003/11/02 11:07:45 wiz Exp - * - * $FreeBSD$ - */ - -/* - * HME Shared Ethernet Block register offsets - */ -#define HME_SEBI_RESET (0*4) -#define HME_SEBI_CFG (1*4) -#define HME_SEBI_STAT (64*4) -#define HME_SEBI_IMASK (65*4) - -/* HME SEB bits. */ -#define HME_SEB_RESET_ETX 0x00000001 /* reset external transmitter */ -#define HME_SEB_RESET_ERX 0x00000002 /* reset external receiver */ - -#define HME_SEB_CFG_BURSTMASK 0x00000003 /* covers all burst bits */ -#define HME_SEB_CFG_BURST16 0x00000000 /* 16 byte bursts */ -#define HME_SEB_CFG_BURST32 0x00000001 /* 32 byte bursts */ -#define HME_SEB_CFG_BURST64 0x00000002 /* 64 byte bursts */ -#define HME_SEB_CFG_64BIT 0x00000004 /* extended transfer mode */ -#define HME_SEB_CFG_PARITY 0x00000008 /* parity check for DVMA/PIO */ - -#define HME_SEB_STAT_GOTFRAME 0x00000001 /* frame received */ -#define HME_SEB_STAT_RCNTEXP 0x00000002 /* rx frame count expired */ -#define HME_SEB_STAT_ACNTEXP 0x00000004 /* align error count expired */ -#define HME_SEB_STAT_CCNTEXP 0x00000008 /* crc error count expired */ -#define HME_SEB_STAT_LCNTEXP 0x00000010 /* length error count expired */ -#define HME_SEB_STAT_RFIFOVF 0x00000020 /* rx fifo overflow */ -#define HME_SEB_STAT_CVCNTEXP 0x00000040 /* code violation counter exp */ -#define HME_SEB_STAT_STSTERR 0x00000080 /* xif sqe test failed */ -#define HME_SEB_STAT_SENTFRAME 0x00000100 /* frame sent */ -#define HME_SEB_STAT_TFIFO_UND 0x00000200 /* tx fifo underrun */ -#define HME_SEB_STAT_MAXPKTERR 0x00000400 /* max-packet size error */ -#define HME_SEB_STAT_NCNTEXP 0x00000800 /* normal collision count exp */ -#define HME_SEB_STAT_ECNTEXP 0x00001000 /* excess collision count exp */ -#define HME_SEB_STAT_LCCNTEXP 0x00002000 /* late collision count exp */ -#define HME_SEB_STAT_FCNTEXP 0x00004000 /* first collision count exp */ -#define HME_SEB_STAT_DTIMEXP 0x00008000 /* defer timer expired */ -#define HME_SEB_STAT_RXTOHOST 0x00010000 /* pkt moved from rx fifo->memory */ -#define HME_SEB_STAT_NORXD 0x00020000 /* out of receive descriptors */ -#define HME_SEB_STAT_RXERR 0x00040000 /* rx DMA error */ -#define HME_SEB_STAT_RXLATERR 0x00080000 /* late error during rx DMA */ -#define HME_SEB_STAT_RXPERR 0x00100000 /* parity error during rx DMA */ -#define HME_SEB_STAT_RXTERR 0x00200000 /* tag error during rx DMA */ -#define HME_SEB_STAT_EOPERR 0x00400000 /* tx descriptor did not set EOP */ -#define HME_SEB_STAT_MIFIRQ 0x00800000 /* mif needs attention */ -#define HME_SEB_STAT_HOSTTOTX 0x01000000 /* pkt moved from memory->tx fifo */ -#define HME_SEB_STAT_TXALL 0x02000000 /* all pkts in fifo transmitted */ -#define HME_SEB_STAT_TXEACK 0x04000000 /* error during tx DMA */ -#define HME_SEB_STAT_TXLERR 0x08000000 /* late error during tx DMA */ -#define HME_SEB_STAT_TXPERR 0x10000000 /* parity error during tx DMA */ -#define HME_SEB_STAT_TXTERR 0x20000000 /* tag error during tx DMA */ -#define HME_SEB_STAT_SLVERR 0x40000000 /* pio access error */ -#define HME_SEB_STAT_SLVPERR 0x80000000 /* pio access parity error */ -#define HME_SEB_STAT_BITS "\177\020" \ - "b\0GOTFRAME\0b\1RCNTEXP\0b\2ACNTEXP\0" \ - "b\3CCNTEXP\0b\4LCNTEXP\0b\5RFIFOVF\0" \ - "b\6CVCNTEXP\0b\7STSTERR\0b\10SENTFRAME\0" \ - "b\11TFIFO_UND\0b\12MAXPKTERR\0b\13NCNTEXP\0" \ - "b\14ECNTEXP\0b\15LCCNTEXP\0b\16FCNTEXP\0" \ - "b\17DTIMEXP\0b\20RXTOHOST\0b\21NORXD\0" \ - "b\22RXERR\0b\23RXLATERR\0b\24RXPERR\0" \ - "b\25RXTERR\0b\26EOPERR\0b\27MIFIRQ\0" \ - "b\30HOSTTOTX\0b\31TXALL\0b\32XTEACK\0" \ - "b\33TXLERR\0b\34TXPERR\0b\35TXTERR\0" \ - "b\36SLVERR\0b\37SLVPERR\0\0" - -#define HME_SEB_STAT_ALL_ERRORS \ - (HME_SEB_STAT_SLVPERR | HME_SEB_STAT_SLVERR | HME_SEB_STAT_TXTERR |\ - HME_SEB_STAT_TXPERR | HME_SEB_STAT_TXLERR | HME_SEB_STAT_TXEACK |\ - HME_SEB_STAT_EOPERR | HME_SEB_STAT_RXTERR | HME_SEB_STAT_RXPERR |\ - HME_SEB_STAT_RXLATERR | HME_SEB_STAT_RXERR | HME_SEB_STAT_NORXD |\ - HME_SEB_STAT_MAXPKTERR| HME_SEB_STAT_TFIFO_UND| HME_SEB_STAT_STSTERR |\ - HME_SEB_STAT_RFIFOVF) - -#define HME_SEB_STAT_VLAN_ERRORS \ - (HME_SEB_STAT_SLVPERR | HME_SEB_STAT_SLVERR | HME_SEB_STAT_TXTERR |\ - HME_SEB_STAT_TXPERR | HME_SEB_STAT_TXLERR | HME_SEB_STAT_TXEACK |\ - HME_SEB_STAT_EOPERR | HME_SEB_STAT_RXTERR | HME_SEB_STAT_RXPERR |\ - HME_SEB_STAT_RXLATERR | HME_SEB_STAT_RXERR | HME_SEB_STAT_NORXD |\ - HME_SEB_STAT_TFIFO_UND| HME_SEB_STAT_STSTERR | HME_SEB_STAT_RFIFOVF) - -#define HME_SEB_STAT_FATAL_ERRORS \ - (HME_SEB_STAT_SLVPERR | HME_SEB_STAT_SLVERR | HME_SEB_STAT_TXTERR |\ - HME_SEB_STAT_TXPERR | HME_SEB_STAT_TXLERR | HME_SEB_STAT_TXEACK |\ - HME_SEB_STAT_RXTERR | HME_SEB_STAT_RXPERR | HME_SEB_STAT_RXLATERR |\ - HME_SEB_STAT_RXERR) - -/* - * HME Transmitter register offsets - */ -#define HME_ETXI_PENDING (0*4) /* Pending/wakeup */ -#define HME_ETXI_CFG (1*4) -#define HME_ETXI_RING (2*4) /* Descriptor Ring pointer */ -#define HME_ETXI_BBASE (3*4) /* Buffer base address (ro) */ -#define HME_ETXI_BDISP (4*4) /* Buffer displacement (ro) */ -#define HME_ETXI_FIFO_WPTR (5*4) /* FIFO write pointer */ -#define HME_ETXI_FIFO_SWPTR (6*4) /* FIFO shadow write pointer */ -#define HME_ETXI_FIFO_RPTR (7*4) /* FIFO read pointer */ -#define HME_ETXI_FIFO_SRPTR (8*4) /* FIFO shadow read pointer */ -#define HME_ETXI_FIFO_PKTCNT (9*4) /* FIFO packet counter */ -#define HME_ETXI_STATEMACHINE (10*4) /* State machine */ -#define HME_ETXI_RSIZE (11*4) /* Ring size */ -#define HME_ETXI_BPTR (12*4) /* Buffer pointer */ - - -/* TXI_PENDING bits */ -#define HME_ETX_TP_DMAWAKEUP 0x00000001 /* Start tx (rw, auto-clear) */ - -/* TXI_CFG bits */ -#define HME_ETX_CFG_DMAENABLE 0x00000001 /* Enable TX DMA */ -#define HME_ETX_CFG_FIFOTHRESH 0x000003fe /* TX fifo threshold */ -#define HME_ETX_CFG_IRQDAFTER 0x00000400 /* Intr after tx-fifo empty */ -#define HME_ETX_CFG_IRQDBEFORE 0x00000000 /* Intr before tx-fifo empty */ - - -/* - * HME Receiver register offsets - */ -#define HME_ERXI_CFG (0*4) -#define HME_ERXI_RING (1*4) /* Descriptor Ring pointer */ -#define HME_ERXI_BPTR (2*4) /* Data Buffer pointer (ro) */ -#define HME_ERXI_FIFO_WPTR (3*4) /* FIFO write pointer */ -#define HME_ERXI_FIFO_SWPTR (4*4) /* FIFO shadow write pointer */ -#define HME_ERXI_FIFO_RPTR (5*4) /* FIFO read pointer */ -#define HME_ERXI_FIFO_PKTCNT (6*4) /* FIFO packet counter */ -#define HME_ERXI_STATEMACHINE (7*4) /* State machine */ - -/* RXI_CFG bits */ -#define HME_ERX_CFG_DMAENABLE 0x00000001 /* Enable RX DMA */ -#define HME_ERX_CFG_FBO_MASK 0x00000038 /* RX first byte offset */ -#define HME_ERX_CFG_FBO_SHIFT 0x00000003 -#define HME_ERX_CFG_RINGSIZE32 0x00000000 /* Descriptor ring size: 32 */ -#define HME_ERX_CFG_RINGSIZE64 0x00000200 /* Descriptor ring size: 64 */ -#define HME_ERX_CFG_RINGSIZE128 0x00000400 /* Descriptor ring size: 128 */ -#define HME_ERX_CFG_RINGSIZE256 0x00000600 /* Descriptor ring size: 256 */ -#define HME_ERX_CFG_RINGSIZEMSK 0x00000600 /* Descriptor ring size: 256 */ -#define HME_ERX_CFG_CSUMSTART_MASK 0x007f0000 /* cksum offset mask */ -#define HME_ERX_CFG_CSUMSTART_SHIFT 16 - -/* - * HME MAC-core register offsets - */ -#define HME_MACI_XIF (0*4) -#define HME_MACI_TXSWRST (130*4) /* TX reset */ -#define HME_MACI_TXCFG (131*4) /* TX config */ -#define HME_MACI_JSIZE (139*4) /* TX jam size */ -#define HME_MACI_TXSIZE (140*4) /* TX max size */ -#define HME_MACI_NCCNT (144*4) /* TX normal collision cnt */ -#define HME_MACI_FCCNT (145*4) /* TX first collision cnt */ -#define HME_MACI_EXCNT (146*4) /* TX excess collision cnt */ -#define HME_MACI_LTCNT (147*4) /* TX late collision cnt */ -#define HME_MACI_RANDSEED (148*4) /* */ -#define HME_MACI_RXSWRST (194*4) /* RX reset */ -#define HME_MACI_RXCFG (195*4) /* RX config */ -#define HME_MACI_RXSIZE (196*4) /* RX max size */ -#define HME_MACI_MACADDR2 (198*4) /* MAC address */ -#define HME_MACI_MACADDR1 (199*4) -#define HME_MACI_MACADDR0 (200*4) -#define HME_MACI_HASHTAB3 (208*4) /* Address hash table */ -#define HME_MACI_HASHTAB2 (209*4) -#define HME_MACI_HASHTAB1 (210*4) -#define HME_MACI_HASHTAB0 (211*4) -#define HME_MACI_AFILTER2 (212*4) /* Address filter */ -#define HME_MACI_AFILTER1 (213*4) -#define HME_MACI_AFILTER0 (214*4) -#define HME_MACI_AFILTER_MASK (215*4) - -/* XIF config register. */ -#define HME_MAC_XIF_OE 0x00000001 /* Output driver enable */ -#define HME_MAC_XIF_XLBACK 0x00000002 /* Loopback-mode XIF enable */ -#define HME_MAC_XIF_MLBACK 0x00000004 /* Loopback-mode MII enable */ -#define HME_MAC_XIF_MIIENABLE 0x00000008 /* MII receive buffer enable */ -#define HME_MAC_XIF_SQENABLE 0x00000010 /* SQE test enable */ -#define HME_MAC_XIF_SQETWIN 0x000003e0 /* SQE time window */ -#define HME_MAC_XIF_LANCE 0x00000010 /* Lance mode enable */ -#define HME_MAC_XIF_LIPG0 0x000003e0 /* Lance mode IPG0 */ - -/* Transmit config register. */ -#define HME_MAC_TXCFG_ENABLE 0x00000001 /* Enable the transmitter */ -#define HME_MAC_TXCFG_SMODE 0x00000020 /* Enable slow transmit mode */ -#define HME_MAC_TXCFG_CIGN 0x00000040 /* Ignore transmit collisions */ -#define HME_MAC_TXCFG_FCSOFF 0x00000080 /* Do not emit FCS */ -#define HME_MAC_TXCFG_DBACKOFF 0x00000100 /* Disable backoff */ -#define HME_MAC_TXCFG_FULLDPLX 0x00000200 /* Enable full-duplex */ -#define HME_MAC_TXCFG_DGIVEUP 0x00000400 /* Don't give up on transmits */ - -/* Receive config register. */ -#define HME_MAC_RXCFG_ENABLE 0x00000001 /* Enable the receiver */ -#define HME_MAC_RXCFG_PSTRIP 0x00000020 /* Pad byte strip enable */ -#define HME_MAC_RXCFG_PMISC 0x00000040 /* Enable promiscuous mode */ -#define HME_MAC_RXCFG_DERR 0x00000080 /* Disable error checking */ -#define HME_MAC_RXCFG_DCRCS 0x00000100 /* Disable CRC stripping */ -#define HME_MAC_RXCFG_ME 0x00000200 /* Receive packets addressed to me */ -#define HME_MAC_RXCFG_PGRP 0x00000400 /* Enable promisc group mode */ -#define HME_MAC_RXCFG_HENABLE 0x00000800 /* Enable the hash filter */ -#define HME_MAC_RXCFG_AENABLE 0x00001000 /* Enable the address filter */ - -/* - * HME MIF register offsets - */ -#define HME_MIFI_BB_CLK (0*4) /* bit-bang clock */ -#define HME_MIFI_BB_DATA (1*4) /* bit-bang data */ -#define HME_MIFI_BB_OE (2*4) /* bit-bang output enable */ -#define HME_MIFI_FO (3*4) /* frame output */ -#define HME_MIFI_CFG (4*4) /* */ -#define HME_MIFI_IMASK (5*4) /* Interrupt mask for status change */ -#define HME_MIFI_STAT (6*4) /* Status (ro, auto-clear) */ -#define HME_MIFI_SM (7*4) /* State machine (ro) */ - -/* MIF Configuration register */ -#define HME_MIF_CFG_PHY 0x00000001 /* PHY select */ -#define HME_MIF_CFG_PE 0x00000002 /* Poll enable */ -#define HME_MIF_CFG_BBMODE 0x00000004 /* Bit-bang mode */ -#define HME_MIF_CFG_PRADDR 0x000000f8 /* Poll register address */ -#define HME_MIF_CFG_MDI0 0x00000100 /* MDI_0 (ro) */ -#define HME_MIF_CFG_MDI1 0x00000200 /* MDI_1 (ro) */ -#define HME_MIF_CFG_PPADDR 0x00007c00 /* Poll phy address */ - -/* MIF Frame/Output register */ -#define HME_MIF_FO_ST 0xc0000000 /* Start of frame */ -#define HME_MIF_FO_ST_SHIFT 30 /* */ -#define HME_MIF_FO_OPC 0x30000000 /* Opcode */ -#define HME_MIF_FO_OPC_SHIFT 28 /* */ -#define HME_MIF_FO_PHYAD 0x0f800000 /* PHY Address */ -#define HME_MIF_FO_PHYAD_SHIFT 23 /* */ -#define HME_MIF_FO_REGAD 0x007c0000 /* Register Address */ -#define HME_MIF_FO_REGAD_SHIFT 18 /* */ -#define HME_MIF_FO_TAMSB 0x00020000 /* Turn-around MSB */ -#define HME_MIF_FO_TALSB 0x00010000 /* Turn-around LSB */ -#define HME_MIF_FO_DATA 0x0000ffff /* data to read or write */ - -/* Wired HME PHY addresses */ -#define HME_PHYAD_INTERNAL 1 -#define HME_PHYAD_EXTERNAL 0 - -/* - * Buffer Descriptors. - */ -#define HME_XD_SIZE 8 -#define HME_XD_FLAGS(base, index) ((base) + ((index) * HME_XD_SIZE) + 0) -#define HME_XD_ADDR(base, index) ((base) + ((index) * HME_XD_SIZE) + 4) -#define HME_XD_GETFLAGS(p, b, i) \ - ((p) ? le32toh(*((u_int32_t *)HME_XD_FLAGS(b,i))) : \ - (*((u_int32_t *)HME_XD_FLAGS(b,i)))) -#define HME_XD_SETFLAGS(p, b, i, f) do { \ - *((u_int32_t *)HME_XD_FLAGS(b,i)) = ((p) ? htole32((f)) : (f)); \ -} while(/* CONSTCOND */ 0) -#define HME_XD_SETADDR(p, b, i, a) do { \ - *((u_int32_t *)HME_XD_ADDR(b,i)) = ((p) ? htole32((a)) : (a)); \ -} while(/* CONSTCOND */ 0) - -/* Descriptor flag values */ -#define HME_XD_OWN 0x80000000 /* ownership: 1=hw, 0=sw */ -#define HME_XD_SOP 0x40000000 /* start of packet marker (tx) */ -#define HME_XD_OFL 0x40000000 /* buffer overflow (rx) */ -#define HME_XD_EOP 0x20000000 /* end of packet marker (tx) */ -#define HME_XD_TXCKSUM 0x10000000 /* checksum enable (tx) */ -#define HME_XD_RXLENMSK 0x3fff0000 /* packet length mask (rx) */ -#define HME_XD_RXLENSHIFT 16 -#define HME_XD_TXLENMSK 0x00003fff /* packet length mask (tx) */ -#define HME_XD_TXCKSUM_SSHIFT 14 -#define HME_XD_TXCKSUM_OSHIFT 20 -#define HME_XD_RXCKSUM 0x0000ffff /* packet checksum (rx) */ - -/* Macros to encode/decode the receive buffer size from the flags field */ -#define HME_XD_ENCODE_RSIZE(sz) \ - (((sz) << HME_XD_RXLENSHIFT) & HME_XD_RXLENMSK) -#define HME_XD_DECODE_RSIZE(flags) \ - (((flags) & HME_XD_RXLENMSK) >> HME_XD_RXLENSHIFT) - -/* Provide encode/decode macros for the transmit buffers for symmetry */ -#define HME_XD_ENCODE_TSIZE(sz) \ - (((sz) << 0) & HME_XD_TXLENMSK) -#define HME_XD_DECODE_TSIZE(flags) \ - (((flags) & HME_XD_TXLENMSK) >> 0) - -#define HME_MINRXALIGN 0x10 -#define HME_RXOFFS 2 Index: sys/dev/hme/if_hmevar.h =================================================================== --- sys/dev/hme/if_hmevar.h +++ /dev/null @@ -1,160 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 1999 The NetBSD Foundation, Inc. - * All rights reserved. - * - * This code is derived from software contributed to The NetBSD Foundation - * by Paul Kranenburg. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * from: NetBSD: hmevar.h,v 1.5 2000/06/25 01:10:04 eeh Exp - * - * $FreeBSD$ - */ - -#include - -/* - * Number of receive and transmit descriptors. For each receive descriptor, - * an mbuf cluster is allocated and set up to receive a packet, and a dma map - * is created. Therefore, this number should not be too high to not waste - * memory. - * TX descriptors have no static cost, except for the memory directly allocated - * for them. TX queue elements (the number of which is fixed by HME_NTXQ) hold - * the software state for a transmit job; each has a dmamap allocated for it. - * There may be multiple descriptors allocated to a single queue element. - * HME_NTXQ and HME_NTXSEGS are completely arbitrary. - */ -#define HME_NRXDESC 128 -#define HME_NTXDESC 256 -#define HME_NTXQ 64 -#define HME_NTXSEGS 16 - -/* Maximum size of a mapped RX buffer. */ -#define HME_BUFSZ 1600 - -/* - * RX DMA descriptor. The descriptors are preallocated; the dma map is - * reused. - */ -struct hme_rxdesc { - struct mbuf *hrx_m; - bus_dmamap_t hrx_dmamap; -}; - -/* Lazily leave at least one burst size grace space. */ -#define HME_DESC_RXLEN(sc, d) \ - ulmin(HME_BUFSZ, (d)->hrx_m->m_len - (sc)->sc_burst) - -struct hme_txdesc { - struct mbuf *htx_m; - bus_dmamap_t htx_dmamap; - int htx_lastdesc; - STAILQ_ENTRY(hme_txdesc) htx_q; -}; - -STAILQ_HEAD(hme_txdq, hme_txdesc); - -struct hme_ring { - /* Ring Descriptors */ - caddr_t rb_membase; /* Packet buffer: CPU address */ - bus_addr_t rb_dmabase; /* Packet buffer: DMA address */ - caddr_t rb_txd; /* Transmit descriptors */ - bus_addr_t rb_txddma; /* DMA address of same */ - caddr_t rb_rxd; /* Receive descriptors */ - bus_addr_t rb_rxddma; /* DMA address of same */ - - /* Ring Descriptor state */ - int rb_tdhead, rb_tdtail; - int rb_rdtail; - int rb_td_nbusy; - - /* Descriptors */ - struct hme_rxdesc rb_rxdesc[HME_NRXDESC]; - struct hme_txdesc rb_txdesc[HME_NTXQ]; - - struct hme_txdq rb_txfreeq; - struct hme_txdq rb_txbusyq; - - bus_dmamap_t rb_spare_dmamap; -}; - -struct hme_softc { - struct ifnet *sc_ifp; - struct ifmedia sc_ifmedia; - device_t sc_dev; - device_t sc_miibus; - struct mii_data *sc_mii; /* MII media control */ - u_char sc_enaddr[ETHER_ADDR_LEN]; - struct callout sc_tick_ch; /* tick callout */ - int sc_wdog_timer; /* watchdog timer */ - - /* The following bus handles are to be provided by the bus front-end */ - bus_dma_tag_t sc_pdmatag; /* bus dma parent tag */ - bus_dma_tag_t sc_cdmatag; /* control bus dma tag */ - bus_dmamap_t sc_cdmamap; /* control bus dma handle */ - bus_dma_tag_t sc_rdmatag; /* RX bus dma tag */ - bus_dma_tag_t sc_tdmatag; /* RX bus dma tag */ - bus_space_handle_t sc_sebh; /* HME Global registers */ - bus_space_handle_t sc_erxh; /* HME ERX registers */ - bus_space_handle_t sc_etxh; /* HME ETX registers */ - bus_space_handle_t sc_mach; /* HME MAC registers */ - bus_space_handle_t sc_mifh; /* HME MIF registers */ - bus_space_tag_t sc_sebt; /* HME Global registers */ - bus_space_tag_t sc_erxt; /* HME ERX registers */ - bus_space_tag_t sc_etxt; /* HME ETX registers */ - bus_space_tag_t sc_mact; /* HME MAC registers */ - bus_space_tag_t sc_mift; /* HME MIF registers */ - int sc_burst; /* DVMA burst size in effect */ - int sc_phys[2]; /* MII instance -> PHY map */ - - u_int sc_flags; -#define HME_LINK (1 << 0) /* link is up */ -#define HME_PCI (1 << 1) /* PCI busses are little-endian */ - - int sc_ifflags; - int sc_csum_features; - - /* Ring descriptor */ - struct hme_ring sc_rb; - - struct mtx sc_lock; -}; - -#define HME_LOCK(_sc) mtx_lock(&(_sc)->sc_lock) -#define HME_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock) -#define HME_LOCK_ASSERT(_sc, _what) mtx_assert(&(_sc)->sc_lock, (_what)) - -extern devclass_t hme_devclass; - -int hme_config(struct hme_softc *); -void hme_detach(struct hme_softc *); -void hme_suspend(struct hme_softc *); -void hme_resume(struct hme_softc *); -void hme_intr(void *); - -/* MII methods & callbacks */ -int hme_mii_readreg(device_t, int, int); -int hme_mii_writereg(device_t, int, int, int); -void hme_mii_statchg(device_t); Index: sys/dev/sk/if_sk.c =================================================================== --- sys/dev/sk/if_sk.c +++ sys/dev/sk/if_sk.c @@ -272,9 +272,7 @@ * UDP packets in Tx as the hardware can't differenciate UDP packets from * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it * means sender didn't perforam checksum computation. For the safety I - * disabled UDP checksum offload capability at the moment. Alternatively - * we can intrduce a LINK0/LINK1 flag as hme(4) did in its Tx checksum - * offload routine. + * disabled UDP checksum offload capability at the moment. */ #define SK_CSUM_FEATURES (CSUM_TCP) Index: sys/i386/conf/GENERIC =================================================================== --- sys/i386/conf/GENERIC +++ sys/i386/conf/GENERIC @@ -241,7 +241,6 @@ device et # Agere ET1310 10/100/Gigabit Ethernet device fxp # Intel EtherExpress PRO/100B (82557, 82558) device gem # Sun GEM/Sun ERI/Apple GMAC -device hme # Sun HME (Happy Meal Ethernet) device jme # JMicron JMC250 Gigabit/JMC260 Fast Ethernet device lge # Level 1 LXT1001 gigabit Ethernet device msk # Marvell/SysKonnect Yukon II Gigabit Ethernet Index: sys/modules/Makefile =================================================================== --- sys/modules/Makefile +++ sys/modules/Makefile @@ -130,7 +130,6 @@ ${_glxsb} \ gpio \ hifn \ - hme \ ${_hpt27xx} \ ${_hptiop} \ ${_hptmv} \ Index: sys/modules/hme/Makefile =================================================================== --- sys/modules/hme/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -# $FreeBSD$ - -.PATH: ${SRCTOP}/sys/dev/hme - -KMOD= if_hme -SRCS= bus_if.h device_if.h if_hme.c if_hme_pci.c miibus_if.h -SRCS+= pci_if.h - -.include Index: sys/netpfil/pf/pf.c =================================================================== --- sys/netpfil/pf/pf.c +++ sys/netpfil/pf/pf.c @@ -5666,7 +5666,7 @@ * CSUM_DATA_VALID : * network driver performed cksum, needs to additional pseudo header * cksum computation with partial csum_data(i.e. lack of H/W support for - * pseudo header, for instance hme(4), sk(4) and possibly gem(4)) + * pseudo header, for instance sk(4) and possibly gem(4)) * * After validating the cksum of packet, set both flag CSUM_DATA_VALID and * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper