Index: stable/7/sys/contrib/pf =================================================================== --- stable/7/sys/contrib/pf (revision 190246) +++ stable/7/sys/contrib/pf (revision 190247) Property changes on: stable/7/sys/contrib/pf ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/pf:r189714 Index: stable/7/sys/dev/ath/ath_hal =================================================================== --- stable/7/sys/dev/ath/ath_hal (revision 190246) +++ stable/7/sys/dev/ath/ath_hal (revision 190247) Property changes on: stable/7/sys/dev/ath/ath_hal ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/dev/ath/ath_hal:r189714 Index: stable/7/sys/dev/cxgb =================================================================== --- stable/7/sys/dev/cxgb (revision 190246) +++ stable/7/sys/dev/cxgb (revision 190247) Property changes on: stable/7/sys/dev/cxgb ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/dev/cxgb:r189714 Index: stable/7/sys/dev/txp/if_txp.c =================================================================== --- stable/7/sys/dev/txp/if_txp.c (revision 190246) +++ stable/7/sys/dev/txp/if_txp.c (revision 190247) @@ -1,1834 +1,2983 @@ /* $OpenBSD: if_txp.c,v 1.48 2001/06/27 06:34:50 kjc Exp $ */ /*- * Copyright (c) 2001 * Jason L. Wright , Theo de Raadt, and * Aaron Campbell . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Jason L. Wright, * Theo de Raadt and Aaron Campbell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Driver for 3c990 (Typhoon) Ethernet ASIC */ #include #include -#include -#include -#include +#include +#include #include +#include +#include +#include #include +#include +#include +#include #include +#include +#include +#include +#include #include #include #include #include +#include #include #include #include #include -#include #include -#include -#include -#include - -#include - -#include /* for vtophys */ -#include /* for vtophys */ -#include -#include -#include -#include - #include -#include + #include #include -#define TXP_USEIOSPACE -#define __STRICT_ALIGNMENT +#include +#include #include #include -#ifndef lint -static const char rcsid[] = - "$FreeBSD$"; -#endif +MODULE_DEPEND(txp, pci, 1, 1, 1); +MODULE_DEPEND(txp, ether, 1, 1, 1); /* + * XXX Known Typhoon firmware issues. + * + * 1. It seems that firmware has Tx TCP/UDP checksum offloading bug. + * The firmware hangs when it's told to compute TCP/UDP checksum. + * I'm not sure whether the firmware requires special alignment to + * do checksum offloading but datasheet says nothing about that. + * 2. Datasheet says nothing for maximum number of fragmented + * descriptors supported. Experimentation shows up to 16 fragment + * descriptors are supported in the firmware. For TSO case, upper + * stack can send 64KB sized IP datagram plus link header size( + * ethernet header + VLAN tag) frame but controller can handle up + * to 64KB frame given that PAGE_SIZE is 4KB(i.e. 16 * PAGE_SIZE). + * Because frames that need TSO operation of hardware can be + * larger than 64KB I disabled TSO capability. TSO operation for + * less than or equal to 16 fragment descriptors works without + * problems, though. + * 3. VLAN hardware tag stripping is always enabled in the firmware + * even if it's explicitly told to not strip the tag. It's + * possible to add the tag back in Rx handler if VLAN hardware + * tag is not active but I didn't try that as it would be + * layering violation. + * 4. TXP_CMD_RECV_BUFFER_CONTROL does not work as expected in + * datasheet such that driver should handle the alignment + * restriction by copying received frame to align the frame on + * 32bit boundary on strict-alignment architectures. This adds a + * lot of CPU burden and it effectively reduce Rx performance on + * strict-alignment architectures(e.g. sparc64, arm, mips and ia64). + * + * Unfortunately it seems that 3Com have no longer interests in + * releasing fixed firmware so we may have to live with these bugs. + */ + +#define TXP_CSUM_FEATURES (CSUM_IP) + +/* * Various supported device vendors/types and their names. */ static struct txp_type txp_devs[] = { { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_95, "3Com 3cR990-TX-95 Etherlink with 3XP Processor" }, { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_97, "3Com 3cR990-TX-97 Etherlink with 3XP Processor" }, { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_TXM, "3Com 3cR990B-TXM Etherlink with 3XP Processor" }, { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_95, "3Com 3cR990-SRV-95 Etherlink Server with 3XP Processor" }, { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_97, "3Com 3cR990-SRV-97 Etherlink Server with 3XP Processor" }, { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_SRV, "3Com 3cR990B-SRV Etherlink Server with 3XP Processor" }, { 0, 0, NULL } }; static int txp_probe(device_t); static int txp_attach(device_t); static int txp_detach(device_t); -static void txp_intr(void *); -static void txp_tick(void *); static int txp_shutdown(device_t); +static int txp_suspend(device_t); +static int txp_resume(device_t); +static int txp_intr(void *); +static void txp_int_task(void *, int); +static void txp_tick(void *); static int txp_ioctl(struct ifnet *, u_long, caddr_t); static void txp_start(struct ifnet *); static void txp_start_locked(struct ifnet *); +static int txp_encap(struct txp_softc *, struct txp_tx_ring *, struct mbuf **); static void txp_stop(struct txp_softc *); static void txp_init(void *); static void txp_init_locked(struct txp_softc *); -static void txp_watchdog(struct ifnet *); +static void txp_watchdog(struct txp_softc *); -static void txp_release_resources(struct txp_softc *); -static int txp_chip_init(struct txp_softc *); -static int txp_reset_adapter(struct txp_softc *); +static int txp_reset(struct txp_softc *); +static int txp_boot(struct txp_softc *, uint32_t); +static int txp_sleep(struct txp_softc *, int); +static int txp_wait(struct txp_softc *, uint32_t); static int txp_download_fw(struct txp_softc *); static int txp_download_fw_wait(struct txp_softc *); static int txp_download_fw_section(struct txp_softc *, struct txp_fw_section_header *, int); static int txp_alloc_rings(struct txp_softc *); +static void txp_init_rings(struct txp_softc *); +static int txp_dma_alloc(struct txp_softc *, char *, bus_dma_tag_t *, + bus_size_t, bus_size_t, bus_dmamap_t *, void **, bus_size_t, bus_addr_t *); +static void txp_dma_free(struct txp_softc *, bus_dma_tag_t *, bus_dmamap_t *, + void **); +static void txp_free_rings(struct txp_softc *); static int txp_rxring_fill(struct txp_softc *); static void txp_rxring_empty(struct txp_softc *); static void txp_set_filter(struct txp_softc *); static int txp_cmd_desc_numfree(struct txp_softc *); static int txp_command(struct txp_softc *, uint16_t, uint16_t, uint32_t, uint32_t, uint16_t *, uint32_t *, uint32_t *, int); -static int txp_command2(struct txp_softc *, uint16_t, uint16_t, +static int txp_ext_command(struct txp_softc *, uint16_t, uint16_t, uint32_t, uint32_t, struct txp_ext_desc *, uint8_t, struct txp_rsp_desc **, int); -static int txp_response(struct txp_softc *, uint32_t, uint16_t, uint16_t, +static int txp_response(struct txp_softc *, uint16_t, uint16_t, struct txp_rsp_desc **); static void txp_rsp_fixup(struct txp_softc *, struct txp_rsp_desc *, struct txp_rsp_desc *); -static void txp_capabilities(struct txp_softc *); +static int txp_set_capabilities(struct txp_softc *); static void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int txp_ifmedia_upd(struct ifnet *); #ifdef TXP_DEBUG static void txp_show_descriptor(void *); #endif static void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *); static void txp_rxbuf_reclaim(struct txp_softc *); -static void txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *); - -#ifdef TXP_USEIOSPACE -#define TXP_RES SYS_RES_IOPORT -#define TXP_RID TXP_PCI_LOIO -#else -#define TXP_RES SYS_RES_MEMORY -#define TXP_RID TXP_PCI_LOMEM +#ifndef __NO_STRICT_ALIGNMENT +static __inline void txp_fixup_rx(struct mbuf *); #endif +static int txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *, int); +static void txp_stats_save(struct txp_softc *); +static void txp_stats_update(struct txp_softc *, struct txp_rsp_desc *); +static void txp_sysctl_node(struct txp_softc *); +static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); +static int sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS); +static int prefer_iomap = 0; +TUNABLE_INT("hw.txp.prefer_iomap", &prefer_iomap); + static device_method_t txp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, txp_probe), DEVMETHOD(device_attach, txp_attach), DEVMETHOD(device_detach, txp_detach), DEVMETHOD(device_shutdown, txp_shutdown), - { 0, 0 } + DEVMETHOD(device_suspend, txp_suspend), + DEVMETHOD(device_resume, txp_resume), + + { NULL, NULL } }; static driver_t txp_driver = { "txp", txp_methods, sizeof(struct txp_softc) }; static devclass_t txp_devclass; DRIVER_MODULE(txp, pci, txp_driver, txp_devclass, 0, 0); -MODULE_DEPEND(txp, pci, 1, 1, 1); -MODULE_DEPEND(txp, ether, 1, 1, 1); static int txp_probe(device_t dev) { struct txp_type *t; t = txp_devs; while (t->txp_name != NULL) { if ((pci_get_vendor(dev) == t->txp_vid) && (pci_get_device(dev) == t->txp_did)) { device_set_desc(dev, t->txp_name); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } static int txp_attach(device_t dev) { struct txp_softc *sc; struct ifnet *ifp; + struct txp_rsp_desc *rsp; uint16_t p1; - uint32_t p2; - int error = 0, rid; - u_char eaddr[6]; + uint32_t p2, reg; + int error = 0, pmc, rid; + uint8_t eaddr[ETHER_ADDR_LEN], *ver; sc = device_get_softc(dev); sc->sc_dev = dev; - sc->sc_cold = 1; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0); + TASK_INIT(&sc->sc_int_task, 0, txp_int_task, sc); + TAILQ_INIT(&sc->sc_busy_list); + TAILQ_INIT(&sc->sc_free_list); - /* - * Map control/status registers. - */ - pci_enable_busmaster(dev); + ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts); + ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL); + ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 0, NULL); + ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); + ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL); + ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 0, NULL); + ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); + ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); - rid = TXP_RID; - sc->sc_res = bus_alloc_resource_any(dev, TXP_RES, &rid, - RF_ACTIVE); - + pci_enable_busmaster(dev); + /* Prefer memory space register mapping over IO space. */ + if (prefer_iomap == 0) { + sc->sc_res_id = PCIR_BAR(1); + sc->sc_res_type = SYS_RES_MEMORY; + } else { + sc->sc_res_id = PCIR_BAR(0); + sc->sc_res_type = SYS_RES_IOPORT; + } + sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type, + &sc->sc_res_id, RF_ACTIVE); + if (sc->sc_res == NULL && prefer_iomap == 0) { + sc->sc_res_id = PCIR_BAR(0); + sc->sc_res_type = SYS_RES_IOPORT; + sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type, + &sc->sc_res_id, RF_ACTIVE); + } if (sc->sc_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); - error = ENXIO; - goto fail; + ifmedia_removeall(&sc->sc_ifmedia); + mtx_destroy(&sc->sc_mtx); + return (ENXIO); } - sc->sc_bt = rman_get_bustag(sc->sc_res); - sc->sc_bh = rman_get_bushandle(sc->sc_res); + /* Enable MWI. */ + reg = pci_read_config(dev, PCIR_COMMAND, 2); + reg |= PCIM_CMD_MWRICEN; + pci_write_config(dev, PCIR_COMMAND, reg, 2); + /* Check cache line size. */ + reg = pci_read_config(dev, PCIR_CACHELNSZ, 1); + reg <<= 4; + if (reg == 0 || (reg % 16) != 0) + device_printf(sc->sc_dev, + "invalid cache line size : %u\n", reg); /* Allocate interrupt */ rid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } - if (txp_chip_init(sc)) { - error = ENXIO; + if ((error = txp_alloc_rings(sc)) != 0) goto fail; - } - - sc->sc_fwbuf = contigmalloc(32768, M_DEVBUF, - M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); - if (sc->sc_fwbuf == NULL) { - device_printf(dev, "no memory for firmware\n"); + txp_init_rings(sc); + txp_sysctl_node(sc); + /* Reset controller and make it reload sleep image. */ + if (txp_reset(sc) != 0) { error = ENXIO; goto fail; } - error = txp_download_fw(sc); - contigfree(sc->sc_fwbuf, 32768, M_DEVBUF); - sc->sc_fwbuf = NULL; - if (error) - goto fail; - - sc->sc_ldata = contigmalloc(sizeof(struct txp_ldata), M_DEVBUF, - M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); - if (sc->sc_ldata == NULL) { - device_printf(dev, "no memory for descriptor ring\n"); + /* Let controller boot from sleep image. */ + if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) { + device_printf(sc->sc_dev, "could not boot sleep image\n"); error = ENXIO; goto fail; } - bzero(sc->sc_ldata, sizeof(struct txp_ldata)); - if (txp_alloc_rings(sc)) { - error = ENXIO; - goto fail; - } - - if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0, - NULL, NULL, NULL, 1)) { - error = ENXIO; - goto fail; - } - + /* Get station address. */ if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0, - &p1, &p2, NULL, 1)) { + &p1, &p2, NULL, TXP_CMD_WAIT)) { error = ENXIO; goto fail; } + p1 = le16toh(p1); eaddr[0] = ((uint8_t *)&p1)[1]; eaddr[1] = ((uint8_t *)&p1)[0]; + p2 = le32toh(p2); eaddr[2] = ((uint8_t *)&p2)[3]; eaddr[3] = ((uint8_t *)&p2)[2]; eaddr[4] = ((uint8_t *)&p2)[1]; eaddr[5] = ((uint8_t *)&p2)[0]; - sc->sc_cold = 0; - - ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts); - ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); - ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); - ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); - ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); - ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL); - ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); - ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); - - sc->sc_xcvr = TXP_XCVR_AUTO; - txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0, - NULL, NULL, NULL, 0); - ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO); - ifp = sc->sc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { - device_printf(dev, "can not if_alloc()\n"); + device_printf(dev, "can not allocate ifnet structure\n"); error = ENOSPC; goto fail; } + + /* + * Show sleep image version information which may help to + * diagnose sleep image specific issues. + */ + rsp = NULL; + if (txp_ext_command(sc, TXP_CMD_READ_VERSION, 0, 0, 0, NULL, 0, + &rsp, TXP_CMD_WAIT)) { + device_printf(dev, "can not read sleep image version\n"); + error = ENXIO; + goto fail; + } + if (rsp->rsp_numdesc == 0) { + p2 = le32toh(rsp->rsp_par2) & 0xFFFF; + device_printf(dev, "Typhoon 1.0 sleep image (2000/%02u/%02u)\n", + p2 >> 8, p2 & 0xFF); + } else if (rsp->rsp_numdesc == 2) { + p2 = le32toh(rsp->rsp_par2); + ver = (uint8_t *)(rsp + 1); + /* + * Even if datasheet says the command returns a NULL + * terminated version string, explicitly terminate + * the string. Given that several bugs of firmware + * I can't trust this simple one. + */ + ver[25] = '\0'; + device_printf(dev, + "Typhoon 1.1+ sleep image %02u.%03u.%03u %s\n", + p2 >> 24, (p2 >> 12) & 0xFFF, p2 & 0xFFF, ver); + } else { + p2 = le32toh(rsp->rsp_par2); + device_printf(dev, + "Unknown Typhoon sleep image version: %u:0x%08x\n", + rsp->rsp_numdesc, p2); + } + if (rsp != NULL) + free(rsp, M_DEVBUF); + + sc->sc_xcvr = TXP_XCVR_AUTO; + txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0, + NULL, NULL, NULL, TXP_CMD_NOWAIT); + ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO); + ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); - ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = txp_ioctl; ifp->if_start = txp_start; - ifp->if_watchdog = txp_watchdog; ifp->if_init = txp_init; - ifp->if_baudrate = 100000000; - ifp->if_snd.ifq_maxlen = TX_ENTRIES; - ifp->if_hwassist = 0; - txp_capabilities(sc); - + ifp->if_snd.ifq_drv_maxlen = TX_ENTRIES - 1; + IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); + IFQ_SET_READY(&ifp->if_snd); /* - * Attach us everywhere + * It's possible to read firmware's offload capability but + * we have not downloaded the firmware yet so announce + * working capability here. We're not interested in IPSec + * capability and due to the lots of firmware bug we can't + * advertise the whole capability anyway. */ + ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM; + if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) + ifp->if_capabilities |= IFCAP_WOL_MAGIC; + /* Enable all capabilities. */ + ifp->if_capenable = ifp->if_capabilities; + ether_ifattach(ifp, eaddr); + /* VLAN capability setup. */ + ifp->if_capabilities |= IFCAP_VLAN_MTU; + ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; + ifp->if_capenable = ifp->if_capabilities; + /* Tell the upper layer(s) we support long frames. */ + ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); + + WRITE_REG(sc, TXP_IER, TXP_INTR_NONE); + WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL); + + /* Create local taskq. */ + sc->sc_tq = taskqueue_create_fast("txp_taskq", M_WAITOK, + taskqueue_thread_enqueue, &sc->sc_tq); + if (sc->sc_tq == NULL) { + device_printf(dev, "could not create taskqueue.\n"); + ether_ifdetach(ifp); + error = ENXIO; + goto fail; + } + taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", + device_get_nameunit(sc->sc_dev)); + + /* Put controller into sleep. */ + if (txp_sleep(sc, 0) != 0) { + ether_ifdetach(ifp); + error = ENXIO; + goto fail; + } + error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, - NULL, txp_intr, sc, &sc->sc_intrhand); + txp_intr, NULL, sc, &sc->sc_intrhand); - if (error) { + if (error != 0) { ether_ifdetach(ifp); - device_printf(dev, "couldn't set up irq\n"); + device_printf(dev, "couldn't set up interrupt handler.\n"); goto fail; } return (0); fail: - txp_release_resources(sc); - mtx_destroy(&sc->sc_mtx); + if (error != 0) + txp_detach(dev); return (error); } static int txp_detach(device_t dev) { struct txp_softc *sc; struct ifnet *ifp; - int i; sc = device_get_softc(dev); + ifp = sc->sc_ifp; + if (device_is_attached(dev)) { + TXP_LOCK(sc); + sc->sc_flags |= TXP_FLAG_DETACH; + txp_stop(sc); + TXP_UNLOCK(sc); + callout_drain(&sc->sc_tick); + taskqueue_drain(sc->sc_tq, &sc->sc_int_task); + ether_ifdetach(ifp); + } + WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL); - TXP_LOCK(sc); - txp_stop(sc); - TXP_UNLOCK(sc); - txp_shutdown(dev); - callout_drain(&sc->sc_tick); - ifmedia_removeall(&sc->sc_ifmedia); - ether_ifdetach(ifp); - - for (i = 0; i < RXBUF_ENTRIES; i++) - free(sc->sc_rxbufs[i].rb_sd, M_DEVBUF); - - txp_release_resources(sc); - - mtx_destroy(&sc->sc_mtx); - return (0); -} - -static void -txp_release_resources(struct txp_softc *sc) -{ - device_t dev; - - dev = sc->sc_dev; - if (sc->sc_intrhand != NULL) bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); - if (sc->sc_irq != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); - if (sc->sc_res != NULL) - bus_release_resource(dev, TXP_RES, TXP_RID, sc->sc_res); - - if (sc->sc_ldata != NULL) - contigfree(sc->sc_ldata, sizeof(struct txp_ldata), M_DEVBUF); - - if (sc->sc_ifp) + bus_release_resource(dev, sc->sc_res_type, sc->sc_res_id, + sc->sc_res); + if (sc->sc_ifp != NULL) { if_free(sc->sc_ifp); -} + sc->sc_ifp = NULL; + } + txp_free_rings(sc); + mtx_destroy(&sc->sc_mtx); -static int -txp_chip_init(struct txp_softc *sc) -{ - /* disable interrupts */ - WRITE_REG(sc, TXP_IER, 0); - WRITE_REG(sc, TXP_IMR, - TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | - TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | - TXP_INT_LATCH); - - /* ack all interrupts */ - WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH | - TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | - TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | - TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | - TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0); - - if (txp_reset_adapter(sc)) - return (-1); - - /* disable interrupts */ - WRITE_REG(sc, TXP_IER, 0); - WRITE_REG(sc, TXP_IMR, - TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | - TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | - TXP_INT_LATCH); - - /* ack all interrupts */ - WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH | - TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | - TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | - TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | - TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0); - return (0); } static int -txp_reset_adapter(struct txp_softc *sc) +txp_reset(struct txp_softc *sc) { uint32_t r; int i; + /* Disable interrupts. */ + WRITE_REG(sc, TXP_IER, TXP_INTR_NONE); + WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL); + /* Ack all pending interrupts. */ + WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL); + r = 0; WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL); DELAY(1000); WRITE_REG(sc, TXP_SRR, 0); - /* Should wait max 6 seconds */ + /* Should wait max 6 seconds. */ for (i = 0; i < 6000; i++) { r = READ_REG(sc, TXP_A2H_0); if (r == STAT_WAITING_FOR_HOST_REQUEST) break; DELAY(1000); } - if (r != STAT_WAITING_FOR_HOST_REQUEST) { + if (r != STAT_WAITING_FOR_HOST_REQUEST) device_printf(sc->sc_dev, "reset hung\n"); - return (-1); + + WRITE_REG(sc, TXP_IER, TXP_INTR_NONE); + WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL); + WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL); + + /* + * Give more time to complete loading sleep image before + * trying to boot from sleep image. + */ + DELAY(5000); + + return (0); +} + +static int +txp_boot(struct txp_softc *sc, uint32_t state) +{ + + /* See if it's waiting for boot, and try to boot it. */ + if (txp_wait(sc, state) != 0) { + device_printf(sc->sc_dev, "not waiting for boot\n"); + return (ENXIO); } + WRITE_REG(sc, TXP_H2A_2, TXP_ADDR_HI(sc->sc_ldata.txp_boot_paddr)); + TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE); + WRITE_REG(sc, TXP_H2A_1, TXP_ADDR_LO(sc->sc_ldata.txp_boot_paddr)); + TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE); + WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD); + TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE); + + /* See if it booted. */ + if (txp_wait(sc, STAT_RUNNING) != 0) { + device_printf(sc->sc_dev, "firmware not running\n"); + return (ENXIO); + } + + /* Clear TX and CMD ring write registers. */ + WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL); + TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE); + WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL); + TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE); + WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL); + TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE); + WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL); + TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE); + return (0); } static int txp_download_fw(struct txp_softc *sc) { struct txp_fw_file_header *fileheader; struct txp_fw_section_header *secthead; - int error, sect; - uint32_t r, i, ier, imr; + int sect; + uint32_t error, ier, imr; - r = 0; + TXP_LOCK_ASSERT(sc); + error = 0; ier = READ_REG(sc, TXP_IER); WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0); imr = READ_REG(sc, TXP_IMR); WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0); - for (i = 0; i < 10000; i++) { - r = READ_REG(sc, TXP_A2H_0); - if (r == STAT_WAITING_FOR_HOST_REQUEST) - break; - DELAY(50); - } - if (r != STAT_WAITING_FOR_HOST_REQUEST) { + if (txp_wait(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) { device_printf(sc->sc_dev, "not waiting for host request\n"); - error = -1; + error = ETIMEDOUT; goto fail; } - /* Ack the status */ + /* Ack the status. */ WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0); fileheader = (struct txp_fw_file_header *)tc990image; if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) { - device_printf(sc->sc_dev, "fw invalid magic\n"); - error = -1; + device_printf(sc->sc_dev, "firmware invalid magic\n"); goto fail; } - /* Tell boot firmware to get ready for image */ - WRITE_REG(sc, TXP_H2A_1, fileheader->addr); - WRITE_REG(sc, TXP_H2A_2, fileheader->hmac[0]); - WRITE_REG(sc, TXP_H2A_3, fileheader->hmac[1]); - WRITE_REG(sc, TXP_H2A_4, fileheader->hmac[2]); - WRITE_REG(sc, TXP_H2A_5, fileheader->hmac[3]); - WRITE_REG(sc, TXP_H2A_6, fileheader->hmac[4]); + /* Tell boot firmware to get ready for image. */ + WRITE_REG(sc, TXP_H2A_1, le32toh(fileheader->addr)); + TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE); + WRITE_REG(sc, TXP_H2A_2, le32toh(fileheader->hmac[0])); + TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE); + WRITE_REG(sc, TXP_H2A_3, le32toh(fileheader->hmac[1])); + TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE); + WRITE_REG(sc, TXP_H2A_4, le32toh(fileheader->hmac[2])); + TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE); + WRITE_REG(sc, TXP_H2A_5, le32toh(fileheader->hmac[3])); + TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE); + WRITE_REG(sc, TXP_H2A_6, le32toh(fileheader->hmac[4])); + TXP_BARRIER(sc, TXP_H2A_6, 4, BUS_SPACE_BARRIER_WRITE); WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE); + TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE); if (txp_download_fw_wait(sc)) { - device_printf(sc->sc_dev, "fw wait failed, initial\n"); - error = -1; + device_printf(sc->sc_dev, "firmware wait failed, initial\n"); + error = ETIMEDOUT; goto fail; } secthead = (struct txp_fw_section_header *)(((uint8_t *)tc990image) + sizeof(struct txp_fw_file_header)); - for (sect = 0; sect < fileheader->nsections; sect++) { - - if (txp_download_fw_section(sc, secthead, sect)) { - error = -1; + for (sect = 0; sect < le32toh(fileheader->nsections); sect++) { + if ((error = txp_download_fw_section(sc, secthead, sect)) != 0) goto fail; - } secthead = (struct txp_fw_section_header *) - (((uint8_t *)secthead) + secthead->nbytes + + (((uint8_t *)secthead) + le32toh(secthead->nbytes) + sizeof(*secthead)); } WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE); + TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE); - for (i = 0; i < 10000; i++) { - r = READ_REG(sc, TXP_A2H_0); - if (r == STAT_WAITING_FOR_BOOT) - break; - DELAY(50); - } - if (r != STAT_WAITING_FOR_BOOT) { + if (txp_wait(sc, STAT_WAITING_FOR_BOOT) != 0) { device_printf(sc->sc_dev, "not waiting for boot\n"); - error = -1; + error = ETIMEDOUT; goto fail; } fail: WRITE_REG(sc, TXP_IER, ier); WRITE_REG(sc, TXP_IMR, imr); return (error); } static int txp_download_fw_wait(struct txp_softc *sc) { - uint32_t i, r; + uint32_t i; - r = 0; - for (i = 0; i < 10000; i++) { - r = READ_REG(sc, TXP_ISR); - if (r & TXP_INT_A2H_0) + TXP_LOCK_ASSERT(sc); + + for (i = 0; i < TXP_TIMEOUT; i++) { + if ((READ_REG(sc, TXP_ISR) & TXP_INT_A2H_0) != 0) break; DELAY(50); } - if (!(r & TXP_INT_A2H_0)) { - device_printf(sc->sc_dev, "fw wait failed comm0\n"); - return (-1); + if (i == TXP_TIMEOUT) { + device_printf(sc->sc_dev, "firmware wait failed comm0\n"); + return (ETIMEDOUT); } WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0); - r = READ_REG(sc, TXP_A2H_0); - if (r != STAT_WAITING_FOR_SEGMENT) { - device_printf(sc->sc_dev, "fw not waiting for segment\n"); - return (-1); + if (READ_REG(sc, TXP_A2H_0) != STAT_WAITING_FOR_SEGMENT) { + device_printf(sc->sc_dev, "firmware not waiting for segment\n"); + return (ETIMEDOUT); } return (0); } static int txp_download_fw_section(struct txp_softc *sc, struct txp_fw_section_header *sect, int sectnum) { - vm_offset_t dma; + bus_dma_tag_t sec_tag; + bus_dmamap_t sec_map; + bus_addr_t sec_paddr; + uint8_t *sec_buf; int rseg, err = 0; struct mbuf m; uint16_t csum; - /* Skip zero length sections */ - if (sect->nbytes == 0) + TXP_LOCK_ASSERT(sc); + + /* Skip zero length sections. */ + if (le32toh(sect->nbytes) == 0) return (0); - /* Make sure we aren't past the end of the image */ + /* Make sure we aren't past the end of the image. */ rseg = ((uint8_t *)sect) - ((uint8_t *)tc990image); if (rseg >= sizeof(tc990image)) { - device_printf(sc->sc_dev, "fw invalid section address, " - "section %d\n", sectnum); - return (-1); + device_printf(sc->sc_dev, + "firmware invalid section address, section %d\n", sectnum); + return (EIO); } - /* Make sure this section doesn't go past the end */ - rseg += sect->nbytes; + /* Make sure this section doesn't go past the end. */ + rseg += le32toh(sect->nbytes); if (rseg >= sizeof(tc990image)) { - device_printf(sc->sc_dev, "fw truncated section %d\n", + device_printf(sc->sc_dev, "firmware truncated section %d\n", sectnum); - return (-1); + return (EIO); } - bcopy(((uint8_t *)sect) + sizeof(*sect), sc->sc_fwbuf, sect->nbytes); - dma = vtophys(sc->sc_fwbuf); + sec_tag = NULL; + sec_map = NULL; + sec_buf = NULL; + /* XXX */ + TXP_UNLOCK(sc); + err = txp_dma_alloc(sc, "firmware sections", &sec_tag, sizeof(uint32_t), + 0, &sec_map, (void **)&sec_buf, le32toh(sect->nbytes), &sec_paddr); + TXP_LOCK(sc); + if (err != 0) + goto bail; + bcopy(((uint8_t *)sect) + sizeof(*sect), sec_buf, + le32toh(sect->nbytes)); /* * dummy up mbuf and verify section checksum */ m.m_type = MT_DATA; m.m_next = m.m_nextpkt = NULL; - m.m_len = sect->nbytes; - m.m_data = sc->sc_fwbuf; + m.m_len = le32toh(sect->nbytes); + m.m_data = sec_buf; m.m_flags = 0; - csum = in_cksum(&m, sect->nbytes); + csum = in_cksum(&m, le32toh(sect->nbytes)); if (csum != sect->cksum) { - device_printf(sc->sc_dev, "fw section %d, bad " - "cksum (expected 0x%x got 0x%x)\n", - sectnum, sect->cksum, csum); - err = -1; + device_printf(sc->sc_dev, + "firmware section %d, bad cksum (expected 0x%x got 0x%x)\n", + sectnum, le16toh(sect->cksum), csum); + err = EIO; goto bail; } - WRITE_REG(sc, TXP_H2A_1, sect->nbytes); - WRITE_REG(sc, TXP_H2A_2, sect->cksum); - WRITE_REG(sc, TXP_H2A_3, sect->addr); - WRITE_REG(sc, TXP_H2A_4, 0); - WRITE_REG(sc, TXP_H2A_5, dma & 0xffffffff); + bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_PREWRITE); + + WRITE_REG(sc, TXP_H2A_1, le32toh(sect->nbytes)); + TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE); + WRITE_REG(sc, TXP_H2A_2, le16toh(sect->cksum)); + TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE); + WRITE_REG(sc, TXP_H2A_3, le32toh(sect->addr)); + TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE); + WRITE_REG(sc, TXP_H2A_4, TXP_ADDR_HI(sec_paddr)); + TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE); + WRITE_REG(sc, TXP_H2A_5, TXP_ADDR_LO(sec_paddr)); + TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE); WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE); + TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE); if (txp_download_fw_wait(sc)) { - device_printf(sc->sc_dev, "fw wait failed, " - "section %d\n", sectnum); - err = -1; + device_printf(sc->sc_dev, + "firmware wait failed, section %d\n", sectnum); + err = ETIMEDOUT; } + bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_POSTWRITE); bail: + txp_dma_free(sc, &sec_tag, &sec_map, (void **)&sec_buf); return (err); } -static void +static int txp_intr(void *vsc) { - struct txp_softc *sc = vsc; - struct txp_hostvar *hv = sc->sc_hostvar; + struct txp_softc *sc; + uint32_t status; + + sc = vsc; + status = READ_REG(sc, TXP_ISR); + if ((status & TXP_INT_LATCH) == 0) + return (FILTER_STRAY); + WRITE_REG(sc, TXP_ISR, status); + WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL); + taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task); + + return (FILTER_HANDLED); +} + +static void +txp_int_task(void *arg, int pending) +{ + struct txp_softc *sc; + struct ifnet *ifp; + struct txp_hostvar *hv; uint32_t isr; + int more; - /* mask all interrupts */ - TXP_LOCK(sc); - WRITE_REG(sc, TXP_IMR, TXP_INT_RESERVED | TXP_INT_SELF | - TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | - TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 | - TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | - TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH); + sc = (struct txp_softc *)arg; + TXP_LOCK(sc); + ifp = sc->sc_ifp; + hv = sc->sc_hostvar; isr = READ_REG(sc, TXP_ISR); - while (isr) { + if ((isr & TXP_INT_LATCH) != 0) WRITE_REG(sc, TXP_ISR, isr); + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { + bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag, + sc->sc_cdata.txp_hostvar_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + more = 0; if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff)) - txp_rx_reclaim(sc, &sc->sc_rxhir); + more += txp_rx_reclaim(sc, &sc->sc_rxhir, + sc->sc_process_limit); if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff)) - txp_rx_reclaim(sc, &sc->sc_rxlor); - + more += txp_rx_reclaim(sc, &sc->sc_rxlor, + sc->sc_process_limit); + /* + * XXX + * It seems controller is not smart enough to handle + * FIFO overflow conditions under heavy network load. + * No matter how often new Rx buffers are passed to + * controller the situation didn't change. Maybe + * flow-control would be the only way to mitigate the + * issue but firmware does not have commands that + * control the threshold of emitting pause frames. + */ if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx) txp_rxbuf_reclaim(sc); - if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons != - TXP_OFFSET2IDX(*(sc->sc_txhir.r_off)))) + TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off))))) txp_tx_reclaim(sc, &sc->sc_txhir); - if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons != - TXP_OFFSET2IDX(*(sc->sc_txlor.r_off)))) + TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off))))) txp_tx_reclaim(sc, &sc->sc_txlor); - - isr = READ_REG(sc, TXP_ISR); + bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag, + sc->sc_cdata.txp_hostvar_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) + txp_start_locked(sc->sc_ifp); + if (more != 0 || READ_REG(sc, TXP_ISR & TXP_INT_LATCH) != 0) { + taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task); + TXP_UNLOCK(sc); + return; + } } - /* unmask all interrupts */ - WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3); - - txp_start_locked(sc->sc_ifp); + /* Re-enable interrupts. */ + WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE); TXP_UNLOCK(sc); } -static void -txp_rx_reclaim(struct txp_softc *sc, struct txp_rx_ring *r) +#ifndef __NO_STRICT_ALIGNMENT +static __inline void +txp_fixup_rx(struct mbuf *m) { - struct ifnet *ifp = sc->sc_ifp; + int i; + uint16_t *src, *dst; + + src = mtod(m, uint16_t *); + dst = src - (TXP_RXBUF_ALIGN - ETHER_ALIGN) / sizeof *src; + + for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) + *dst++ = *src++; + + m->m_data -= TXP_RXBUF_ALIGN - ETHER_ALIGN; +} +#endif + +static int +txp_rx_reclaim(struct txp_softc *sc, struct txp_rx_ring *r, int count) +{ + struct ifnet *ifp; struct txp_rx_desc *rxd; struct mbuf *m; - struct txp_swdesc *sd = NULL; - uint32_t roff, woff; + struct txp_rx_swdesc *sd; + uint32_t roff, woff, rx_stat, prog; TXP_LOCK_ASSERT(sc); - roff = *r->r_roff; - woff = *r->r_woff; - rxd = r->r_desc + (roff / sizeof(struct txp_rx_desc)); - while (roff != woff) { + ifp = sc->sc_ifp; - if (rxd->rx_flags & RX_FLAGS_ERROR) { - device_printf(sc->sc_dev, "error 0x%x\n", - rxd->rx_stat); - ifp->if_ierrors++; - goto next; - } + bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD | + BUS_DMASYNC_POSTWRITE); - /* retrieve stashed pointer */ - sd = rxd->rx_sd; - + roff = le32toh(*r->r_roff); + woff = le32toh(*r->r_woff); + rxd = r->r_desc + roff / sizeof(struct txp_rx_desc); + for (prog = 0; roff != woff; prog++, count--) { + if (count <= 0) + break; + bcopy((u_long *)&rxd->rx_vaddrlo, &sd, sizeof(sd)); + KASSERT(sd != NULL, ("%s: Rx desc ring corrupted", __func__)); + bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map); m = sd->sd_mbuf; + KASSERT(m != NULL, ("%s: Rx buffer ring corrupted", __func__)); sd->sd_mbuf = NULL; - - m->m_pkthdr.len = m->m_len = rxd->rx_len; - -#ifdef __STRICT_ALIGNMENT - { - /* - * XXX Nice chip, except it won't accept "off by 2" - * buffers, so we're force to copy. Supposedly - * this will be fixed in a newer firmware rev - * and this will be temporary. - */ - struct mbuf *mnew; - - mnew = m_devget(mtod(m, caddr_t), rxd->rx_len, - ETHER_ALIGN, ifp, NULL); + TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next); + TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next); + if ((rxd->rx_flags & RX_FLAGS_ERROR) != 0) { + if (bootverbose) + device_printf(sc->sc_dev, "Rx error %u\n", + le32toh(rxd->rx_stat) & RX_ERROR_MASK); m_freem(m); - if (mnew == NULL) { - ifp->if_ierrors++; - goto next; - } - m = mnew; + goto next; } + + m->m_pkthdr.len = m->m_len = le16toh(rxd->rx_len); + m->m_pkthdr.rcvif = ifp; +#ifndef __NO_STRICT_ALIGNMENT + txp_fixup_rx(m); #endif + rx_stat = le32toh(rxd->rx_stat); + if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { + if ((rx_stat & RX_STAT_IPCKSUMBAD) != 0) + m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; + else if ((rx_stat & RX_STAT_IPCKSUMGOOD) != 0) + m->m_pkthdr.csum_flags |= + CSUM_IP_CHECKED|CSUM_IP_VALID; - if (rxd->rx_stat & RX_STAT_IPCKSUMBAD) - m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; - else if (rxd->rx_stat & RX_STAT_IPCKSUMGOOD) - m->m_pkthdr.csum_flags |= - CSUM_IP_CHECKED|CSUM_IP_VALID; - - if ((rxd->rx_stat & RX_STAT_TCPCKSUMGOOD) || - (rxd->rx_stat & RX_STAT_UDPCKSUMGOOD)) { - m->m_pkthdr.csum_flags |= - CSUM_DATA_VALID|CSUM_PSEUDO_HDR; - m->m_pkthdr.csum_data = 0xffff; + if ((rx_stat & RX_STAT_TCPCKSUMGOOD) != 0 || + (rx_stat & RX_STAT_UDPCKSUMGOOD) != 0) { + m->m_pkthdr.csum_flags |= + CSUM_DATA_VALID | CSUM_PSEUDO_HDR; + m->m_pkthdr.csum_data = 0xffff; + } } - if (rxd->rx_stat & RX_STAT_VLAN) { - m->m_pkthdr.ether_vtag = htons(rxd->rx_vlan >> 16); + /* + * XXX + * Typhoon has a firmware bug that VLAN tag is always + * stripped out even if it is told to not remove the tag. + * Therefore don't check if_capenable here. + */ + if (/* (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && */ + (rx_stat & RX_STAT_VLAN) != 0) { + m->m_pkthdr.ether_vtag = + bswap16((le32toh(rxd->rx_vlan) >> 16)); m->m_flags |= M_VLANTAG; } TXP_UNLOCK(sc); (*ifp->if_input)(ifp, m); TXP_LOCK(sc); next: - roff += sizeof(struct txp_rx_desc); if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) { roff = 0; rxd = r->r_desc; } else rxd++; - woff = *r->r_woff; + prog++; } - *r->r_roff = woff; + if (prog == 0) + return (0); + + bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD | + BUS_DMASYNC_PREWRITE); + *r->r_roff = le32toh(roff); + + return (count > 0 ? 0 : EAGAIN); } static void txp_rxbuf_reclaim(struct txp_softc *sc) { - struct ifnet *ifp = sc->sc_ifp; - struct txp_hostvar *hv = sc->sc_hostvar; + struct txp_hostvar *hv; struct txp_rxbuf_desc *rbd; - struct txp_swdesc *sd; - uint32_t i; + struct txp_rx_swdesc *sd; + bus_dma_segment_t segs[1]; + int nsegs, prod, prog; + uint32_t cons; TXP_LOCK_ASSERT(sc); - if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) + + hv = sc->sc_hostvar; + cons = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_read_idx)); + prod = sc->sc_rxbufprod; + TXP_DESC_INC(prod, RXBUF_ENTRIES); + if (prod == cons) return; - i = sc->sc_rxbufprod; - rbd = sc->sc_rxbufs + i; + bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag, + sc->sc_cdata.txp_rxbufs_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); - while (1) { - sd = rbd->rb_sd; - if (sd->sd_mbuf != NULL) + for (prog = 0; prod != cons; prog++) { + sd = TAILQ_FIRST(&sc->sc_free_list); + if (sd == NULL) break; - + rbd = sc->sc_rxbufs + prod; + bcopy((u_long *)&rbd->rb_vaddrlo, &sd, sizeof(sd)); sd->sd_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (sd->sd_mbuf == NULL) - return; - sd->sd_mbuf->m_pkthdr.rcvif = ifp; + break; sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES; - - rbd->rb_paddrlo = vtophys(mtod(sd->sd_mbuf, vm_offset_t)) - & 0xffffffff; - rbd->rb_paddrhi = 0; - - hv->hv_rx_buf_write_idx = TXP_IDX2OFFSET(i); - - if (++i == RXBUF_ENTRIES) { - i = 0; - rbd = sc->sc_rxbufs; - } else - rbd++; +#ifndef __NO_STRICT_ALIGNMENT + m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN); +#endif + if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag, + sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0) != 0) { + m_freem(sd->sd_mbuf); + sd->sd_mbuf = NULL; + break; + } + KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__, + nsegs)); + TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next); + TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next); + bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map, + BUS_DMASYNC_PREREAD); + rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr)); + rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr)); + TXP_DESC_INC(prod, RXBUF_ENTRIES); } - sc->sc_rxbufprod = i; + if (prog == 0) + return; + bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag, + sc->sc_cdata.txp_rxbufs_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + prod = (prod + RXBUF_ENTRIES - 1) % RXBUF_ENTRIES; + sc->sc_rxbufprod = prod; + hv->hv_rx_buf_write_idx = htole32(TXP_IDX2OFFSET(prod)); } /* * Reclaim mbufs and entries from a transmit ring. */ static void txp_tx_reclaim(struct txp_softc *sc, struct txp_tx_ring *r) { - struct ifnet *ifp = sc->sc_ifp; - uint32_t idx = TXP_OFFSET2IDX(*(r->r_off)); - uint32_t cons = r->r_cons, cnt = r->r_cnt; - struct txp_tx_desc *txd = r->r_desc + cons; - struct txp_swdesc *sd = sc->sc_txd + cons; - struct mbuf *m; + struct ifnet *ifp; + uint32_t idx; + uint32_t cons, cnt; + struct txp_tx_desc *txd; + struct txp_swdesc *sd; TXP_LOCK_ASSERT(sc); - while (cons != idx) { - if (cnt == 0) - break; - if ((txd->tx_flags & TX_FLAGS_TYPE_M) == - TX_FLAGS_TYPE_DATA) { - m = sd->sd_mbuf; - if (m != NULL) { - m_freem(m); + bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD | + BUS_DMASYNC_POSTWRITE); + ifp = sc->sc_ifp; + idx = TXP_OFFSET2IDX(le32toh(*(r->r_off))); + cons = r->r_cons; + cnt = r->r_cnt; + txd = r->r_desc + cons; + sd = sc->sc_txd + cons; + + for (cnt = r->r_cnt; cons != idx && cnt > 0; cnt--) { + if ((txd->tx_flags & TX_FLAGS_TYPE_M) == TX_FLAGS_TYPE_DATA) { + if (sd->sd_mbuf != NULL) { + bus_dmamap_sync(sc->sc_cdata.txp_tx_tag, + sd->sd_map, BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->sc_cdata.txp_tx_tag, + sd->sd_map); + m_freem(sd->sd_mbuf); + sd->sd_mbuf = NULL; txd->tx_addrlo = 0; txd->tx_addrhi = 0; - ifp->if_opackets++; + txd->tx_flags = 0; } } ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (++cons == TX_ENTRIES) { txd = r->r_desc; cons = 0; sd = sc->sc_txd; } else { txd++; sd++; } - - cnt--; } + bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD | + BUS_DMASYNC_PREWRITE); r->r_cons = cons; r->r_cnt = cnt; if (cnt == 0) - ifp->if_timer = 0; + sc->sc_watchdog_timer = 0; } static int txp_shutdown(device_t dev) { + + return (txp_suspend(dev)); +} + +static int +txp_suspend(device_t dev) +{ struct txp_softc *sc; + struct ifnet *ifp; + uint8_t *eaddr; + uint16_t p1; + uint32_t p2; + int pmc; + uint16_t pmstat; sc = device_get_softc(dev); TXP_LOCK(sc); + ifp = sc->sc_ifp; + txp_stop(sc); + txp_init_rings(sc); + /* Reset controller and make it reload sleep image. */ + txp_reset(sc); + /* Let controller boot from sleep image. */ + if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) + device_printf(sc->sc_dev, "couldn't boot sleep image\n"); - /* mask all interrupts */ - WRITE_REG(sc, TXP_IMR, - TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | - TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | - TXP_INT_LATCH); + /* Set station address. */ + eaddr = IF_LLADDR(sc->sc_ifp); + p1 = 0; + ((uint8_t *)&p1)[1] = eaddr[0]; + ((uint8_t *)&p1)[0] = eaddr[1]; + p1 = le16toh(p1); + ((uint8_t *)&p2)[3] = eaddr[2]; + ((uint8_t *)&p2)[2] = eaddr[3]; + ((uint8_t *)&p2)[1] = eaddr[4]; + ((uint8_t *)&p2)[0] = eaddr[5]; + p2 = le32toh(p2); + txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0, NULL, NULL, + NULL, TXP_CMD_WAIT); + txp_set_filter(sc); + WRITE_REG(sc, TXP_IER, TXP_INTR_NONE); + WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL); + txp_sleep(sc, sc->sc_ifp->if_capenable); + if (pci_find_extcap(sc->sc_dev, PCIY_PMG, &pmc) == 0) { + /* Request PME. */ + pmstat = pci_read_config(sc->sc_dev, + pmc + PCIR_POWER_STATUS, 2); + pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); + if ((ifp->if_capenable & IFCAP_WOL) != 0) + pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; + pci_write_config(sc->sc_dev, + pmc + PCIR_POWER_STATUS, pmstat, 2); + } + TXP_UNLOCK(sc); - txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0); - txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0); - txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL, 0); + return (0); +} + +static int +txp_resume(device_t dev) +{ + struct txp_softc *sc; + int pmc; + uint16_t pmstat; + + sc = device_get_softc(dev); + + TXP_LOCK(sc); + if (pci_find_extcap(sc->sc_dev, PCIY_PMG, &pmc) == 0) { + /* Disable PME and clear PME status. */ + pmstat = pci_read_config(sc->sc_dev, + pmc + PCIR_POWER_STATUS, 2); + if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { + pmstat &= ~PCIM_PSTAT_PMEENABLE; + pci_write_config(sc->sc_dev, + pmc + PCIR_POWER_STATUS, pmstat, 2); + } + } + if ((sc->sc_ifp->if_flags & IFF_UP) != 0) + txp_init_locked(sc); TXP_UNLOCK(sc); return (0); } +struct txp_dmamap_arg { + bus_addr_t txp_busaddr; +}; + +static void +txp_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) +{ + struct txp_dmamap_arg *ctx; + + if (error != 0) + return; + + KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); + + ctx = (struct txp_dmamap_arg *)arg; + ctx->txp_busaddr = segs[0].ds_addr; +} + static int +txp_dma_alloc(struct txp_softc *sc, char *type, bus_dma_tag_t *tag, + bus_size_t alignment, bus_size_t boundary, bus_dmamap_t *map, void **buf, + bus_size_t size, bus_addr_t *paddr) +{ + struct txp_dmamap_arg ctx; + int error; + + /* Create DMA block tag. */ + error = bus_dma_tag_create( + sc->sc_cdata.txp_parent_tag, /* parent */ + alignment, boundary, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + size, /* maxsize */ + 1, /* nsegments */ + size, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + tag); + if (error != 0) { + device_printf(sc->sc_dev, + "could not create DMA tag for %s.\n", type); + return (error); + } + + *paddr = 0; + /* Allocate DMA'able memory and load the DMA map. */ + error = bus_dmamem_alloc(*tag, buf, BUS_DMA_WAITOK | BUS_DMA_ZERO | + BUS_DMA_COHERENT, map); + if (error != 0) { + device_printf(sc->sc_dev, + "could not allocate DMA'able memory for %s.\n", type); + return (error); + } + + ctx.txp_busaddr = 0; + error = bus_dmamap_load(*tag, *map, *(uint8_t **)buf, + size, txp_dmamap_cb, &ctx, BUS_DMA_NOWAIT); + if (error != 0 || ctx.txp_busaddr == 0) { + device_printf(sc->sc_dev, + "could not load DMA'able memory for %s.\n", type); + return (error); + } + *paddr = ctx.txp_busaddr; + + return (0); +} + +static void +txp_dma_free(struct txp_softc *sc, bus_dma_tag_t *tag, bus_dmamap_t *map, + void **buf) +{ + + if (*tag != NULL) { + if (*map != NULL) + bus_dmamap_unload(*tag, *map); + if (*map != NULL && buf != NULL) + bus_dmamem_free(*tag, *(uint8_t **)buf, *map); + *(uint8_t **)buf = NULL; + *map = NULL; + bus_dma_tag_destroy(*tag); + *tag = NULL; + } +} + +static int txp_alloc_rings(struct txp_softc *sc) { struct txp_boot_record *boot; struct txp_ldata *ld; - uint32_t r; - int i; + struct txp_swdesc *txd; + struct txp_rxbuf_desc *rbd; + struct txp_rx_swdesc *sd; + int error, i; - r = 0; - ld = sc->sc_ldata; - boot = &ld->txp_boot; + ld = &sc->sc_ldata; + boot = ld->txp_boot; /* boot record */ sc->sc_boot = boot; - /* host variables */ - bzero(&ld->txp_hostvar, sizeof(struct txp_hostvar)); - boot->br_hostvar_lo = vtophys(&ld->txp_hostvar); - boot->br_hostvar_hi = 0; - sc->sc_hostvar = (struct txp_hostvar *)&ld->txp_hostvar; + /* + * Create parent ring/DMA block tag. + * Datasheet says that all ring addresses and descriptors + * support 64bits addressing. However the controller is + * known to have no support DAC so limit DMA address space + * to 32bits. + */ + error = bus_dma_tag_create( + bus_get_dma_tag(sc->sc_dev), /* parent */ + 1, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ + 0, /* nsegments */ + BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->sc_cdata.txp_parent_tag); + if (error != 0) { + device_printf(sc->sc_dev, "could not create parent DMA tag.\n"); + return (error); + } - /* hi priority tx ring */ - boot->br_txhipri_lo = vtophys(&ld->txp_txhiring);; - boot->br_txhipri_hi = 0; - boot->br_txhipri_siz = TX_ENTRIES * sizeof(struct txp_tx_desc); + /* Boot record. */ + error = txp_dma_alloc(sc, "boot record", + &sc->sc_cdata.txp_boot_tag, sizeof(uint32_t), 0, + &sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot, + sizeof(struct txp_boot_record), + &sc->sc_ldata.txp_boot_paddr); + if (error != 0) + return (error); + boot = sc->sc_ldata.txp_boot; + sc->sc_boot = boot; + + /* Host variables. */ + error = txp_dma_alloc(sc, "host variables", + &sc->sc_cdata.txp_hostvar_tag, sizeof(uint32_t), 0, + &sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar, + sizeof(struct txp_hostvar), + &sc->sc_ldata.txp_hostvar_paddr); + if (error != 0) + return (error); + boot->br_hostvar_lo = + htole32(TXP_ADDR_LO(sc->sc_ldata.txp_hostvar_paddr)); + boot->br_hostvar_hi = + htole32(TXP_ADDR_HI(sc->sc_ldata.txp_hostvar_paddr)); + sc->sc_hostvar = sc->sc_ldata.txp_hostvar; + + /* Hi priority tx ring. */ + error = txp_dma_alloc(sc, "hi priority tx ring", + &sc->sc_cdata.txp_txhiring_tag, sizeof(struct txp_tx_desc), 0, + &sc->sc_cdata.txp_txhiring_map, (void **)&sc->sc_ldata.txp_txhiring, + sizeof(struct txp_tx_desc) * TX_ENTRIES, + &sc->sc_ldata.txp_txhiring_paddr); + if (error != 0) + return (error); + boot->br_txhipri_lo = + htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txhiring_paddr)); + boot->br_txhipri_hi = + htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txhiring_paddr)); + boot->br_txhipri_siz = + htole32(TX_ENTRIES * sizeof(struct txp_tx_desc)); + sc->sc_txhir.r_tag = sc->sc_cdata.txp_txhiring_tag; + sc->sc_txhir.r_map = sc->sc_cdata.txp_txhiring_map; sc->sc_txhir.r_reg = TXP_H2A_1; - sc->sc_txhir.r_desc = (struct txp_tx_desc *)&ld->txp_txhiring; + sc->sc_txhir.r_desc = sc->sc_ldata.txp_txhiring; sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0; sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx; - /* lo priority tx ring */ - boot->br_txlopri_lo = vtophys(&ld->txp_txloring); - boot->br_txlopri_hi = 0; - boot->br_txlopri_siz = TX_ENTRIES * sizeof(struct txp_tx_desc); + /* Low priority tx ring. */ + error = txp_dma_alloc(sc, "low priority tx ring", + &sc->sc_cdata.txp_txloring_tag, sizeof(struct txp_tx_desc), 0, + &sc->sc_cdata.txp_txloring_map, (void **)&sc->sc_ldata.txp_txloring, + sizeof(struct txp_tx_desc) * TX_ENTRIES, + &sc->sc_ldata.txp_txloring_paddr); + if (error != 0) + return (error); + boot->br_txlopri_lo = + htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txloring_paddr)); + boot->br_txlopri_hi = + htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txloring_paddr)); + boot->br_txlopri_siz = + htole32(TX_ENTRIES * sizeof(struct txp_tx_desc)); + sc->sc_txlor.r_tag = sc->sc_cdata.txp_txloring_tag; + sc->sc_txlor.r_map = sc->sc_cdata.txp_txloring_map; sc->sc_txlor.r_reg = TXP_H2A_3; - sc->sc_txlor.r_desc = (struct txp_tx_desc *)&ld->txp_txloring; + sc->sc_txlor.r_desc = sc->sc_ldata.txp_txloring; sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0; sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx; - /* high priority rx ring */ - boot->br_rxhipri_lo = vtophys(&ld->txp_rxhiring); - boot->br_rxhipri_hi = 0; - boot->br_rxhipri_siz = RX_ENTRIES * sizeof(struct txp_rx_desc); - sc->sc_rxhir.r_desc = (struct txp_rx_desc *)&ld->txp_rxhiring; + /* High priority rx ring. */ + error = txp_dma_alloc(sc, "hi priority rx ring", + &sc->sc_cdata.txp_rxhiring_tag, sizeof(struct txp_rx_desc), 0, + &sc->sc_cdata.txp_rxhiring_map, (void **)&sc->sc_ldata.txp_rxhiring, + sizeof(struct txp_rx_desc) * RX_ENTRIES, + &sc->sc_ldata.txp_rxhiring_paddr); + if (error != 0) + return (error); + boot->br_rxhipri_lo = + htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxhiring_paddr)); + boot->br_rxhipri_hi = + htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxhiring_paddr)); + boot->br_rxhipri_siz = + htole32(RX_ENTRIES * sizeof(struct txp_rx_desc)); + sc->sc_rxhir.r_tag = sc->sc_cdata.txp_rxhiring_tag; + sc->sc_rxhir.r_map = sc->sc_cdata.txp_rxhiring_map; + sc->sc_rxhir.r_desc = sc->sc_ldata.txp_rxhiring; sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx; sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx; - /* low priority rx ring */ - boot->br_rxlopri_lo = vtophys(&ld->txp_rxloring); - boot->br_rxlopri_hi = 0; - boot->br_rxlopri_siz = RX_ENTRIES * sizeof(struct txp_rx_desc); - sc->sc_rxlor.r_desc = (struct txp_rx_desc *)&ld->txp_rxloring; + /* Low priority rx ring. */ + error = txp_dma_alloc(sc, "low priority rx ring", + &sc->sc_cdata.txp_rxloring_tag, sizeof(struct txp_rx_desc), 0, + &sc->sc_cdata.txp_rxloring_map, (void **)&sc->sc_ldata.txp_rxloring, + sizeof(struct txp_rx_desc) * RX_ENTRIES, + &sc->sc_ldata.txp_rxloring_paddr); + if (error != 0) + return (error); + boot->br_rxlopri_lo = + htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxloring_paddr)); + boot->br_rxlopri_hi = + htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxloring_paddr)); + boot->br_rxlopri_siz = + htole32(RX_ENTRIES * sizeof(struct txp_rx_desc)); + sc->sc_rxlor.r_tag = sc->sc_cdata.txp_rxloring_tag; + sc->sc_rxlor.r_map = sc->sc_cdata.txp_rxloring_map; + sc->sc_rxlor.r_desc = sc->sc_ldata.txp_rxloring; sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx; sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx; - /* command ring */ - bzero(&ld->txp_cmdring, sizeof(struct txp_cmd_desc) * CMD_ENTRIES); - boot->br_cmd_lo = vtophys(&ld->txp_cmdring); - boot->br_cmd_hi = 0; - boot->br_cmd_siz = CMD_ENTRIES * sizeof(struct txp_cmd_desc); - sc->sc_cmdring.base = (struct txp_cmd_desc *)&ld->txp_cmdring; + /* Command ring. */ + error = txp_dma_alloc(sc, "command ring", + &sc->sc_cdata.txp_cmdring_tag, sizeof(struct txp_cmd_desc), 0, + &sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring, + sizeof(struct txp_cmd_desc) * CMD_ENTRIES, + &sc->sc_ldata.txp_cmdring_paddr); + if (error != 0) + return (error); + boot->br_cmd_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_cmdring_paddr)); + boot->br_cmd_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_cmdring_paddr)); + boot->br_cmd_siz = htole32(CMD_ENTRIES * sizeof(struct txp_cmd_desc)); + sc->sc_cmdring.base = sc->sc_ldata.txp_cmdring; sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc); sc->sc_cmdring.lastwrite = 0; - /* response ring */ - bzero(&ld->txp_rspring, sizeof(struct txp_rsp_desc) * RSP_ENTRIES); - boot->br_resp_lo = vtophys(&ld->txp_rspring); - boot->br_resp_hi = 0; - boot->br_resp_siz = CMD_ENTRIES * sizeof(struct txp_rsp_desc); - sc->sc_rspring.base = (struct txp_rsp_desc *)&ld->txp_rspring; + /* Response ring. */ + error = txp_dma_alloc(sc, "response ring", + &sc->sc_cdata.txp_rspring_tag, sizeof(struct txp_rsp_desc), 0, + &sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring, + sizeof(struct txp_rsp_desc) * RSP_ENTRIES, + &sc->sc_ldata.txp_rspring_paddr); + if (error != 0) + return (error); + boot->br_resp_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rspring_paddr)); + boot->br_resp_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rspring_paddr)); + boot->br_resp_siz = htole32(RSP_ENTRIES * sizeof(struct txp_rsp_desc)); + sc->sc_rspring.base = sc->sc_ldata.txp_rspring; sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc); sc->sc_rspring.lastwrite = 0; - /* receive buffer ring */ - boot->br_rxbuf_lo = vtophys(&ld->txp_rxbufs); - boot->br_rxbuf_hi = 0; - boot->br_rxbuf_siz = RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc); - sc->sc_rxbufs = (struct txp_rxbuf_desc *)&ld->txp_rxbufs; + /* Receive buffer ring. */ + error = txp_dma_alloc(sc, "receive buffer ring", + &sc->sc_cdata.txp_rxbufs_tag, sizeof(struct txp_rxbuf_desc), 0, + &sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs, + sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES, + &sc->sc_ldata.txp_rxbufs_paddr); + if (error != 0) + return (error); + boot->br_rxbuf_lo = + htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxbufs_paddr)); + boot->br_rxbuf_hi = + htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxbufs_paddr)); + boot->br_rxbuf_siz = + htole32(RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc)); + sc->sc_rxbufs = sc->sc_ldata.txp_rxbufs; + /* Zero ring. */ + error = txp_dma_alloc(sc, "zero buffer", + &sc->sc_cdata.txp_zero_tag, sizeof(uint32_t), 0, + &sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero, + sizeof(uint32_t), &sc->sc_ldata.txp_zero_paddr); + if (error != 0) + return (error); + boot->br_zero_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_zero_paddr)); + boot->br_zero_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_zero_paddr)); + + bus_dmamap_sync(sc->sc_cdata.txp_boot_tag, sc->sc_cdata.txp_boot_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + /* Create Tx buffers. */ + error = bus_dma_tag_create( + sc->sc_cdata.txp_parent_tag, /* parent */ + 1, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MCLBYTES * TXP_MAXTXSEGS, /* maxsize */ + TXP_MAXTXSEGS, /* nsegments */ + MCLBYTES, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->sc_cdata.txp_tx_tag); + if (error != 0) { + device_printf(sc->sc_dev, "could not create Tx DMA tag.\n"); + goto fail; + } + + /* Create tag for Rx buffers. */ + error = bus_dma_tag_create( + sc->sc_cdata.txp_parent_tag, /* parent */ + TXP_RXBUF_ALIGN, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MCLBYTES, /* maxsize */ + 1, /* nsegments */ + MCLBYTES, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->sc_cdata.txp_rx_tag); + if (error != 0) { + device_printf(sc->sc_dev, "could not create Rx DMA tag.\n"); + goto fail; + } + + /* Create DMA maps for Tx buffers. */ + for (i = 0; i < TX_ENTRIES; i++) { + txd = &sc->sc_txd[i]; + txd->sd_mbuf = NULL; + txd->sd_map = NULL; + error = bus_dmamap_create(sc->sc_cdata.txp_tx_tag, 0, + &txd->sd_map); + if (error != 0) { + device_printf(sc->sc_dev, + "could not create Tx dmamap.\n"); + goto fail; + } + } + + /* Create DMA maps for Rx buffers. */ for (i = 0; i < RXBUF_ENTRIES; i++) { - struct txp_swdesc *sd; - if (sc->sc_rxbufs[i].rb_sd != NULL) - continue; - sc->sc_rxbufs[i].rb_sd = malloc(sizeof(struct txp_swdesc), - M_DEVBUF, M_NOWAIT); - if (sc->sc_rxbufs[i].rb_sd == NULL) - return (ENOBUFS); - sd = sc->sc_rxbufs[i].rb_sd; + sd = malloc(sizeof(struct txp_rx_swdesc), M_DEVBUF, + M_NOWAIT | M_ZERO); + if (sd == NULL) { + error = ENOMEM; + goto fail; + } + /* + * The virtual address part of descriptor is not used + * by hardware so use that to save an ring entry. We + * need bcopy here otherwise the address wouldn't be + * valid on big-endian architectures. + */ + rbd = sc->sc_rxbufs + i; + bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd)); sd->sd_mbuf = NULL; + sd->sd_map = NULL; + error = bus_dmamap_create(sc->sc_cdata.txp_rx_tag, 0, + &sd->sd_map); + if (error != 0) { + device_printf(sc->sc_dev, + "could not create Rx dmamap.\n"); + goto fail; + } + TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next); } + +fail: + return (error); +} + +static void +txp_init_rings(struct txp_softc *sc) +{ + + bzero(sc->sc_ldata.txp_hostvar, sizeof(struct txp_hostvar)); + bzero(sc->sc_ldata.txp_zero, sizeof(uint32_t)); + sc->sc_txhir.r_cons = 0; + sc->sc_txhir.r_prod = 0; + sc->sc_txhir.r_cnt = 0; + sc->sc_txlor.r_cons = 0; + sc->sc_txlor.r_prod = 0; + sc->sc_txlor.r_cnt = 0; + sc->sc_cmdring.lastwrite = 0; + sc->sc_rspring.lastwrite = 0; sc->sc_rxbufprod = 0; + bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag, + sc->sc_cdata.txp_hostvar_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); +} - /* zero dma */ - bzero(&ld->txp_zero, sizeof(uint32_t)); - boot->br_zero_lo = vtophys(&ld->txp_zero); - boot->br_zero_hi = 0; +static int +txp_wait(struct txp_softc *sc, uint32_t state) +{ + uint32_t reg; + int i; - /* See if it's waiting for boot, and try to boot it */ - for (i = 0; i < 10000; i++) { - r = READ_REG(sc, TXP_A2H_0); - if (r == STAT_WAITING_FOR_BOOT) + for (i = 0; i < TXP_TIMEOUT; i++) { + reg = READ_REG(sc, TXP_A2H_0); + if (reg == state) break; DELAY(50); } - if (r != STAT_WAITING_FOR_BOOT) { - device_printf(sc->sc_dev, "not waiting for boot\n"); - return (ENXIO); - } + return (i == TXP_TIMEOUT ? ETIMEDOUT : 0); +} - WRITE_REG(sc, TXP_H2A_2, 0); - WRITE_REG(sc, TXP_H2A_1, vtophys(sc->sc_boot)); - WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD); +static void +txp_free_rings(struct txp_softc *sc) +{ + struct txp_swdesc *txd; + struct txp_rx_swdesc *sd; + int i; - /* See if it booted */ - for (i = 0; i < 10000; i++) { - r = READ_REG(sc, TXP_A2H_0); - if (r == STAT_RUNNING) - break; - DELAY(50); + /* Tx buffers. */ + if (sc->sc_cdata.txp_tx_tag != NULL) { + for (i = 0; i < TX_ENTRIES; i++) { + txd = &sc->sc_txd[i]; + if (txd->sd_map != NULL) { + bus_dmamap_destroy(sc->sc_cdata.txp_tx_tag, + txd->sd_map); + txd->sd_map = NULL; + } + } + bus_dma_tag_destroy(sc->sc_cdata.txp_tx_tag); + sc->sc_cdata.txp_tx_tag = NULL; } - if (r != STAT_RUNNING) { - device_printf(sc->sc_dev, "fw not running\n"); - return (ENXIO); + /* Rx buffers. */ + if (sc->sc_cdata.txp_rx_tag != NULL) { + if (sc->sc_rxbufs != NULL) { + KASSERT(TAILQ_FIRST(&sc->sc_busy_list) == NULL, + ("%s : still have busy Rx buffers", __func__)); + while ((sd = TAILQ_FIRST(&sc->sc_free_list)) != NULL) { + TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next); + if (sd->sd_map != NULL) { + bus_dmamap_destroy( + sc->sc_cdata.txp_rx_tag, + sd->sd_map); + sd->sd_map = NULL; + } + free(sd, M_DEVBUF); + } + } + bus_dma_tag_destroy(sc->sc_cdata.txp_rx_tag); + sc->sc_cdata.txp_rx_tag = NULL; } - /* Clear TX and CMD ring write registers */ - WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL); - WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL); - WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL); - WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL); + /* Hi priority Tx ring. */ + txp_dma_free(sc, &sc->sc_cdata.txp_txhiring_tag, + &sc->sc_cdata.txp_txhiring_map, + (void **)&sc->sc_ldata.txp_txhiring); + /* Low priority Tx ring. */ + txp_dma_free(sc, &sc->sc_cdata.txp_txloring_tag, + &sc->sc_cdata.txp_txloring_map, + (void **)&sc->sc_ldata.txp_txloring); + /* Hi priority Rx ring. */ + txp_dma_free(sc, &sc->sc_cdata.txp_rxhiring_tag, + &sc->sc_cdata.txp_rxhiring_map, + (void **)&sc->sc_ldata.txp_rxhiring); + /* Low priority Rx ring. */ + txp_dma_free(sc, &sc->sc_cdata.txp_rxloring_tag, + &sc->sc_cdata.txp_rxloring_map, + (void **)&sc->sc_ldata.txp_rxloring); + /* Receive buffer ring. */ + txp_dma_free(sc, &sc->sc_cdata.txp_rxbufs_tag, + &sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs); + /* Command ring. */ + txp_dma_free(sc, &sc->sc_cdata.txp_cmdring_tag, + &sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring); + /* Response ring. */ + txp_dma_free(sc, &sc->sc_cdata.txp_rspring_tag, + &sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring); + /* Zero ring. */ + txp_dma_free(sc, &sc->sc_cdata.txp_zero_tag, + &sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero); + /* Host variables. */ + txp_dma_free(sc, &sc->sc_cdata.txp_hostvar_tag, + &sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar); + /* Boot record. */ + txp_dma_free(sc, &sc->sc_cdata.txp_boot_tag, + &sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot); - return (0); + if (sc->sc_cdata.txp_parent_tag != NULL) { + bus_dma_tag_destroy(sc->sc_cdata.txp_parent_tag); + sc->sc_cdata.txp_parent_tag = NULL; + } + } static int txp_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct txp_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; - int error = 0; + int capenable, error = 0, mask; - switch (command) { + switch(command) { case SIOCSIFFLAGS: TXP_LOCK(sc); - if (ifp->if_flags & IFF_UP) { - txp_init_locked(sc); + if ((ifp->if_flags & IFF_UP) != 0) { + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { + if (((ifp->if_flags ^ sc->sc_if_flags) + & (IFF_PROMISC | IFF_ALLMULTI)) != 0) + txp_set_filter(sc); + } else { + if ((sc->sc_flags & TXP_FLAG_DETACH) == 0) + txp_init_locked(sc); + } } else { - if (ifp->if_drv_flags & IFF_DRV_RUNNING) + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) txp_stop(sc); } + sc->sc_if_flags = ifp->if_flags; TXP_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* * Multicast list has changed; set the hardware * filter accordingly. */ TXP_LOCK(sc); - txp_set_filter(sc); + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) + txp_set_filter(sc); TXP_UNLOCK(sc); - error = 0; break; + case SIOCSIFCAP: + TXP_LOCK(sc); + capenable = ifp->if_capenable; + mask = ifr->ifr_reqcap ^ ifp->if_capenable; + if ((mask & IFCAP_TXCSUM) != 0 && + (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { + ifp->if_capenable ^= IFCAP_TXCSUM; + if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) + ifp->if_hwassist |= TXP_CSUM_FEATURES; + else + ifp->if_hwassist &= ~TXP_CSUM_FEATURES; + } + if ((mask & IFCAP_RXCSUM) != 0 && + (ifp->if_capabilities & IFCAP_RXCSUM) != 0) + ifp->if_capenable ^= IFCAP_RXCSUM; + if ((mask & IFCAP_WOL_MAGIC) != 0 && + (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) + ifp->if_capenable ^= IFCAP_WOL_MAGIC; + if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && + (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) + ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; + if ((mask & IFCAP_VLAN_HWCSUM) != 0 && + (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) + ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; + if ((ifp->if_capenable & IFCAP_TXCSUM) == 0) + ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; + if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) + ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; + if (capenable != ifp->if_capenable) + txp_set_capabilities(sc); + TXP_UNLOCK(sc); + VLAN_CAPABILITIES(ifp); + break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static int txp_rxring_fill(struct txp_softc *sc) { - int i; - struct ifnet *ifp; - struct txp_swdesc *sd; + struct txp_rxbuf_desc *rbd; + struct txp_rx_swdesc *sd; + bus_dma_segment_t segs[1]; + int error, i, nsegs; TXP_LOCK_ASSERT(sc); - ifp = sc->sc_ifp; + bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag, + sc->sc_cdata.txp_rxbufs_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + for (i = 0; i < RXBUF_ENTRIES; i++) { - sd = sc->sc_rxbufs[i].rb_sd; + sd = TAILQ_FIRST(&sc->sc_free_list); + if (sd == NULL) + return (ENOMEM); + rbd = sc->sc_rxbufs + i; + bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd)); + KASSERT(sd->sd_mbuf == NULL, + ("%s : Rx buffer ring corrupted", __func__)); sd->sd_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (sd->sd_mbuf == NULL) - return (ENOBUFS); - + return (ENOMEM); sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES; - sd->sd_mbuf->m_pkthdr.rcvif = ifp; - - sc->sc_rxbufs[i].rb_paddrlo = - vtophys(mtod(sd->sd_mbuf, vm_offset_t)); - sc->sc_rxbufs[i].rb_paddrhi = 0; +#ifndef __NO_STRICT_ALIGNMENT + m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN); +#endif + if ((error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag, + sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0)) != 0) { + m_freem(sd->sd_mbuf); + sd->sd_mbuf = NULL; + return (error); + } + KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__, + nsegs)); + TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next); + TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next); + bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map, + BUS_DMASYNC_PREREAD); + rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr)); + rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr)); } - sc->sc_hostvar->hv_rx_buf_write_idx = (RXBUF_ENTRIES - 1) * - sizeof(struct txp_rxbuf_desc); + bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag, + sc->sc_cdata.txp_rxbufs_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + sc->sc_rxbufprod = RXBUF_ENTRIES - 1; + sc->sc_hostvar->hv_rx_buf_write_idx = + htole32(TXP_IDX2OFFSET(RXBUF_ENTRIES - 1)); return (0); } static void txp_rxring_empty(struct txp_softc *sc) { - int i; - struct txp_swdesc *sd; + struct txp_rx_swdesc *sd; + int cnt; TXP_LOCK_ASSERT(sc); + if (sc->sc_rxbufs == NULL) return; + bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag, + sc->sc_cdata.txp_hostvar_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); - for (i = 0; i < RXBUF_ENTRIES; i++) { - if (&sc->sc_rxbufs[i] == NULL) - continue; - sd = sc->sc_rxbufs[i].rb_sd; - if (sd == NULL) - continue; - if (sd->sd_mbuf != NULL) { - m_freem(sd->sd_mbuf); - sd->sd_mbuf = NULL; - } + /* Release allocated Rx buffers. */ + cnt = 0; + while ((sd = TAILQ_FIRST(&sc->sc_busy_list)) != NULL) { + TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next); + KASSERT(sd->sd_mbuf != NULL, + ("%s : Rx buffer ring corrupted", __func__)); + bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map); + m_freem(sd->sd_mbuf); + sd->sd_mbuf = NULL; + TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next); + cnt++; } } static void txp_init(void *xsc) { struct txp_softc *sc; sc = xsc; TXP_LOCK(sc); txp_init_locked(sc); TXP_UNLOCK(sc); } static void txp_init_locked(struct txp_softc *sc) { struct ifnet *ifp; + uint8_t *eaddr; uint16_t p1; uint32_t p2; + int error; TXP_LOCK_ASSERT(sc); ifp = sc->sc_ifp; - if (ifp->if_drv_flags & IFF_DRV_RUNNING) + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; - txp_stop(sc); + /* Initialize ring structure. */ + txp_init_rings(sc); + /* Wakeup controller. */ + WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_WAKEUP); + TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE); + /* + * It seems that earlier NV image can go back to online from + * wakeup command but newer ones require controller reset. + * So jut reset controller again. + */ + if (txp_reset(sc) != 0) + goto init_fail; + /* Download firmware. */ + error = txp_download_fw(sc); + if (error != 0) { + device_printf(sc->sc_dev, "could not download firmware.\n"); + goto init_fail; + } + bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag, + sc->sc_cdata.txp_hostvar_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + if ((error = txp_rxring_fill(sc)) != 0) { + device_printf(sc->sc_dev, "no memory for Rx buffers.\n"); + goto init_fail; + } + bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag, + sc->sc_cdata.txp_hostvar_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + if (txp_boot(sc, STAT_WAITING_FOR_BOOT) != 0) { + device_printf(sc->sc_dev, "could not boot firmware.\n"); + goto init_fail; + } - txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0, - NULL, NULL, NULL, 1); + /* + * Quite contrary to Typhoon T2 software functional specification, + * it seems that TXP_CMD_RECV_BUFFER_CONTROL command is not + * implemented in the firmware. This means driver should have to + * handle misaligned frames on alignment architectures. AFAIK this + * is the only controller manufactured by 3Com that has this stupid + * bug. 3Com should fix this. + */ + if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0, + NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0) + goto init_fail; + /* Undocumented command(interrupt coalescing disable?) - From Linux. */ + if (txp_command(sc, TXP_CMD_FILTER_DEFINE, 0, 0, 0, NULL, NULL, NULL, + TXP_CMD_NOWAIT) != 0) + goto init_fail; /* Set station address. */ - ((uint8_t *)&p1)[1] = IF_LLADDR(sc->sc_ifp)[0]; - ((uint8_t *)&p1)[0] = IF_LLADDR(sc->sc_ifp)[1]; - ((uint8_t *)&p2)[3] = IF_LLADDR(sc->sc_ifp)[2]; - ((uint8_t *)&p2)[2] = IF_LLADDR(sc->sc_ifp)[3]; - ((uint8_t *)&p2)[1] = IF_LLADDR(sc->sc_ifp)[4]; - ((uint8_t *)&p2)[0] = IF_LLADDR(sc->sc_ifp)[5]; - txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0, - NULL, NULL, NULL, 1); + eaddr = IF_LLADDR(sc->sc_ifp); + p1 = 0; + ((uint8_t *)&p1)[1] = eaddr[0]; + ((uint8_t *)&p1)[0] = eaddr[1]; + p1 = le16toh(p1); + ((uint8_t *)&p2)[3] = eaddr[2]; + ((uint8_t *)&p2)[2] = eaddr[3]; + ((uint8_t *)&p2)[1] = eaddr[4]; + ((uint8_t *)&p2)[0] = eaddr[5]; + p2 = le32toh(p2); + if (txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0, + NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0) + goto init_fail; txp_set_filter(sc); + txp_set_capabilities(sc); - txp_rxring_fill(sc); + if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0, + NULL, NULL, NULL, TXP_CMD_NOWAIT)) + goto init_fail; + if (txp_command(sc, TXP_CMD_XCVR_SELECT, sc->sc_xcvr, 0, 0, + NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0) + goto init_fail; + if (txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL, + TXP_CMD_NOWAIT) != 0) + goto init_fail; + if (txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL, + TXP_CMD_NOWAIT) != 0) + goto init_fail; - txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1); - txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1); + /* Ack all pending interrupts and enable interrupts. */ + WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL); + WRITE_REG(sc, TXP_IER, TXP_INTRS); + WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE); - WRITE_REG(sc, TXP_IER, TXP_INT_RESERVED | TXP_INT_SELF | - TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | - TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 | - TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | - TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH); - WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3); - ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; - ifp->if_timer = 0; callout_reset(&sc->sc_tick, hz, txp_tick, sc); + return; + +init_fail: + txp_rxring_empty(sc); + txp_init_rings(sc); + txp_reset(sc); + WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL); } static void txp_tick(void *vsc) { - struct txp_softc *sc = vsc; - struct ifnet *ifp = sc->sc_ifp; - struct txp_rsp_desc *rsp = NULL; + struct txp_softc *sc; + struct ifnet *ifp; + struct txp_rsp_desc *rsp; struct txp_ext_desc *ext; + int link; + sc = vsc; TXP_LOCK_ASSERT(sc); + bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag, + sc->sc_cdata.txp_hostvar_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); txp_rxbuf_reclaim(sc); + bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag, + sc->sc_cdata.txp_hostvar_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - if (txp_command2(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0, - &rsp, 1)) + ifp = sc->sc_ifp; + rsp = NULL; + + link = sc->sc_flags & TXP_FLAG_LINK; + if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0, + &rsp, TXP_CMD_WAIT)) goto out; if (rsp->rsp_numdesc != 6) goto out; - if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0, - NULL, NULL, NULL, 1)) - goto out; - ext = (struct txp_ext_desc *)(rsp + 1); + txp_stats_update(sc, rsp); + if (link == 0 && (sc->sc_flags & TXP_FLAG_LINK) != 0) { + ext = (struct txp_ext_desc *)(rsp + 1); + /* Update baudrate with resolved speed. */ + if ((ext[5].ext_2 & 0x02) != 0) + ifp->if_baudrate = IF_Mbps(100); + else + ifp->if_baudrate = IF_Mbps(10); + } - ifp->if_ierrors += ext[3].ext_2 + ext[3].ext_3 + ext[3].ext_4 + - ext[4].ext_1 + ext[4].ext_4; - ifp->if_oerrors += ext[0].ext_1 + ext[1].ext_1 + ext[1].ext_4 + - ext[2].ext_1; - ifp->if_collisions += ext[0].ext_2 + ext[0].ext_3 + ext[1].ext_2 + - ext[1].ext_3; - ifp->if_opackets += rsp->rsp_par2; - ifp->if_ipackets += ext[2].ext_3; - out: if (rsp != NULL) free(rsp, M_DEVBUF); - + txp_watchdog(sc); callout_reset(&sc->sc_tick, hz, txp_tick, sc); } static void txp_start(struct ifnet *ifp) { struct txp_softc *sc; sc = ifp->if_softc; TXP_LOCK(sc); txp_start_locked(ifp); TXP_UNLOCK(sc); } static void txp_start_locked(struct ifnet *ifp) { - struct txp_softc *sc = ifp->if_softc; - struct txp_tx_ring *r = &sc->sc_txhir; - struct txp_tx_desc *txd; - struct txp_frag_desc *fxd; - struct mbuf *m, *m0; - struct txp_swdesc *sd; - uint32_t firstprod, firstcnt, prod, cnt; + struct txp_softc *sc; + struct mbuf *m_head; + int enq; + sc = ifp->if_softc; TXP_LOCK_ASSERT(sc); + if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != - IFF_DRV_RUNNING) + IFF_DRV_RUNNING || (sc->sc_flags & TXP_FLAG_LINK) == 0) return; - prod = r->r_prod; - cnt = r->r_cnt; - - while (1) { - IF_DEQUEUE(&ifp->if_snd, m); - if (m == NULL) + for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { + IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); + if (m_head == NULL) break; + /* + * Pack the data into the transmit ring. If we + * don't have room, set the OACTIVE flag and wait + * for the NIC to drain the ring. + * ATM only Hi-ring is used. + */ + if (txp_encap(sc, &sc->sc_txhir, &m_head)) { + if (m_head == NULL) + break; + IFQ_DRV_PREPEND(&ifp->if_snd, m_head); + ifp->if_drv_flags |= IFF_DRV_OACTIVE; + break; + } - firstprod = prod; - firstcnt = cnt; + /* + * If there's a BPF listener, bounce a copy of this frame + * to him. + */ + ETHER_BPF_MTAP(ifp, m_head); - sd = sc->sc_txd + prod; - sd->sd_mbuf = m; + /* Send queued frame. */ + WRITE_REG(sc, sc->sc_txhir.r_reg, + TXP_IDX2OFFSET(sc->sc_txhir.r_prod)); + } - if ((TX_ENTRIES - cnt) < 4) - goto oactive; + if (enq > 0) { + /* Set a timeout in case the chip goes out to lunch. */ + sc->sc_watchdog_timer = TXP_TX_TIMEOUT; + } +} - txd = r->r_desc + prod; +static int +txp_encap(struct txp_softc *sc, struct txp_tx_ring *r, struct mbuf **m_head) +{ + struct txp_tx_desc *first_txd; + struct txp_frag_desc *fxd; + struct txp_swdesc *sd; + struct mbuf *m; + bus_dma_segment_t txsegs[TXP_MAXTXSEGS]; + int error, i, nsegs; - txd->tx_flags = TX_FLAGS_TYPE_DATA; - txd->tx_numdesc = 0; - txd->tx_addrlo = 0; - txd->tx_addrhi = 0; - txd->tx_totlen = 0; - txd->tx_pflags = 0; + TXP_LOCK_ASSERT(sc); - if (++prod == TX_ENTRIES) - prod = 0; + M_ASSERTPKTHDR((*m_head)); - if (++cnt >= (TX_ENTRIES - 4)) - goto oactive; + m = *m_head; + first_txd = r->r_desc + r->r_prod; + sd = sc->sc_txd + r->r_prod; - if (m->m_flags & M_VLANTAG) { - txd->tx_pflags = TX_PFLAGS_VLAN | - (htons(m->m_pkthdr.ether_vtag) << TX_PFLAGS_VLANTAG_S); + error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag, sd->sd_map, + *m_head, txsegs, &nsegs, 0); + if (error == EFBIG) { + m = m_collapse(*m_head, M_DONTWAIT, TXP_MAXTXSEGS); + if (m == NULL) { + m_freem(*m_head); + *m_head = NULL; + return (ENOMEM); } + *m_head = m; + error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag, + sd->sd_map, *m_head, txsegs, &nsegs, 0); + if (error != 0) { + m_freem(*m_head); + *m_head = NULL; + return (error); + } + } else if (error != 0) + return (error); + if (nsegs == 0) { + m_freem(*m_head); + *m_head = NULL; + return (EIO); + } - if (m->m_pkthdr.csum_flags & CSUM_IP) - txd->tx_pflags |= TX_PFLAGS_IPCKSUM; + /* Check descriptor overrun. */ + if (r->r_cnt + nsegs >= TX_ENTRIES - TXP_TXD_RESERVED) { + bus_dmamap_unload(sc->sc_cdata.txp_tx_tag, sd->sd_map); + return (ENOBUFS); + } + bus_dmamap_sync(sc->sc_cdata.txp_tx_tag, sd->sd_map, + BUS_DMASYNC_PREWRITE); + sd->sd_mbuf = m; -#if 0 - if (m->m_pkthdr.csum_flags & CSUM_TCP) - txd->tx_pflags |= TX_PFLAGS_TCPCKSUM; - if (m->m_pkthdr.csum_flags & CSUM_UDP) - txd->tx_pflags |= TX_PFLAGS_UDPCKSUM; + first_txd->tx_flags = TX_FLAGS_TYPE_DATA; + first_txd->tx_numdesc = 0; + first_txd->tx_addrlo = 0; + first_txd->tx_addrhi = 0; + first_txd->tx_totlen = 0; + first_txd->tx_pflags = 0; + r->r_cnt++; + TXP_DESC_INC(r->r_prod, TX_ENTRIES); + + /* Configure Tx IP/TCP/UDP checksum offload. */ + if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) + first_txd->tx_pflags |= htole32(TX_PFLAGS_IPCKSUM); +#ifdef notyet + /* XXX firmware bug. */ + if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) + first_txd->tx_pflags |= htole32(TX_PFLAGS_TCPCKSUM); + if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) + first_txd->tx_pflags |= htole32(TX_PFLAGS_UDPCKSUM); #endif - fxd = (struct txp_frag_desc *)(r->r_desc + prod); - for (m0 = m; m0 != NULL; m0 = m0->m_next) { - if (m0->m_len == 0) - continue; - if (++cnt >= (TX_ENTRIES - 4)) - goto oactive; + /* Configure VLAN hardware tag insertion. */ + if ((m->m_flags & M_VLANTAG) != 0) + first_txd->tx_pflags |= + htole32(TX_PFLAGS_VLAN | TX_PFLAGS_PRIO | + (bswap16(m->m_pkthdr.ether_vtag) << TX_PFLAGS_VLANTAG_S)); - txd->tx_numdesc++; - - fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG; - fxd->frag_rsvd1 = 0; - fxd->frag_len = m0->m_len; - fxd->frag_addrlo = vtophys(mtod(m0, vm_offset_t)); - fxd->frag_addrhi = 0; - fxd->frag_rsvd2 = 0; - - if (++prod == TX_ENTRIES) { - fxd = (struct txp_frag_desc *)r->r_desc; - prod = 0; - } else - fxd++; - - } - - ifp->if_timer = 5; - - ETHER_BPF_MTAP(ifp, m); - WRITE_REG(sc, r->r_reg, TXP_IDX2OFFSET(prod)); + for (i = 0; i < nsegs; i++) { + fxd = (struct txp_frag_desc *)(r->r_desc + r->r_prod); + fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG | TX_FLAGS_VALID; + fxd->frag_rsvd1 = 0; + fxd->frag_len = htole16(txsegs[i].ds_len); + fxd->frag_addrhi = htole32(TXP_ADDR_HI(txsegs[i].ds_addr)); + fxd->frag_addrlo = htole32(TXP_ADDR_LO(txsegs[i].ds_addr)); + fxd->frag_rsvd2 = 0; + first_txd->tx_numdesc++; + r->r_cnt++; + TXP_DESC_INC(r->r_prod, TX_ENTRIES); } - r->r_prod = prod; - r->r_cnt = cnt; - return; + /* Lastly set valid flag. */ + first_txd->tx_flags |= TX_FLAGS_VALID; -oactive: - ifp->if_drv_flags |= IFF_DRV_OACTIVE; - r->r_prod = firstprod; - r->r_cnt = firstcnt; - IF_PREPEND(&ifp->if_snd, m); + /* Sync descriptors. */ + bus_dmamap_sync(r->r_tag, r->r_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + return (0); } /* * Handle simple commands sent to the typhoon */ static int txp_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2, uint32_t in3, uint16_t *out1, uint32_t *out2, uint32_t *out3, int wait) { - struct txp_rsp_desc *rsp = NULL; + struct txp_rsp_desc *rsp; - if (txp_command2(sc, id, in1, in2, in3, NULL, 0, &rsp, wait)) + rsp = NULL; + if (txp_ext_command(sc, id, in1, in2, in3, NULL, 0, &rsp, wait) != 0) { + device_printf(sc->sc_dev, "command 0x%02x failed\n", id); return (-1); + } - if (!wait) + if (wait == TXP_CMD_NOWAIT) return (0); + KASSERT(rsp != NULL, ("rsp is NULL!\n")); if (out1 != NULL) - *out1 = rsp->rsp_par1; + *out1 = le16toh(rsp->rsp_par1); if (out2 != NULL) - *out2 = rsp->rsp_par2; + *out2 = le32toh(rsp->rsp_par2); if (out3 != NULL) - *out3 = rsp->rsp_par3; + *out3 = le32toh(rsp->rsp_par3); free(rsp, M_DEVBUF); return (0); } static int -txp_command2(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2, +txp_ext_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2, uint32_t in3, struct txp_ext_desc *in_extp, uint8_t in_extn, struct txp_rsp_desc **rspp, int wait) { - struct txp_hostvar *hv = sc->sc_hostvar; + struct txp_hostvar *hv; struct txp_cmd_desc *cmd; struct txp_ext_desc *ext; uint32_t idx, i; uint16_t seq; + int error; + error = 0; + hv = sc->sc_hostvar; if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) { - device_printf(sc->sc_dev, "no free cmd descriptors\n"); - return (-1); + device_printf(sc->sc_dev, + "%s : out of free cmd descriptors for command 0x%02x\n", + __func__, id); + return (ENOBUFS); } + bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag, + sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_POSTWRITE); idx = sc->sc_cmdring.lastwrite; cmd = (struct txp_cmd_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx); bzero(cmd, sizeof(*cmd)); cmd->cmd_numdesc = in_extn; - cmd->cmd_seq = seq = sc->sc_seq++; - cmd->cmd_id = id; - cmd->cmd_par1 = in1; - cmd->cmd_par2 = in2; - cmd->cmd_par3 = in3; + seq = sc->sc_seq++; + cmd->cmd_seq = htole16(seq); + cmd->cmd_id = htole16(id); + cmd->cmd_par1 = htole16(in1); + cmd->cmd_par2 = htole32(in2); + cmd->cmd_par3 = htole32(in3); cmd->cmd_flags = CMD_FLAGS_TYPE_CMD | - (wait ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID; + (wait == TXP_CMD_WAIT ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID; idx += sizeof(struct txp_cmd_desc); if (idx == sc->sc_cmdring.size) idx = 0; for (i = 0; i < in_extn; i++) { ext = (struct txp_ext_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx); bcopy(in_extp, ext, sizeof(struct txp_ext_desc)); in_extp++; idx += sizeof(struct txp_cmd_desc); if (idx == sc->sc_cmdring.size) idx = 0; } sc->sc_cmdring.lastwrite = idx; - + bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag, + sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag, + sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_PREREAD | + BUS_DMASYNC_PREWRITE); WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite); + TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE); - if (!wait) + if (wait == TXP_CMD_NOWAIT) return (0); - for (i = 0; i < 10000; i++) { - idx = hv->hv_resp_read_idx; - if (idx != hv->hv_resp_write_idx) { - *rspp = NULL; - if (txp_response(sc, idx, id, seq, rspp)) - return (-1); - if (*rspp != NULL) + for (i = 0; i < TXP_TIMEOUT; i++) { + bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag, + sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_POSTREAD | + BUS_DMASYNC_POSTWRITE); + if (le32toh(hv->hv_resp_read_idx) != + le32toh(hv->hv_resp_write_idx)) { + error = txp_response(sc, id, seq, rspp); + bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag, + sc->sc_cdata.txp_hostvar_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + if (error != 0) + return (error); + if (*rspp != NULL) break; } DELAY(50); } - if (i == 1000 || (*rspp) == NULL) { - device_printf(sc->sc_dev, "0x%x command failed\n", id); - return (-1); + if (i == TXP_TIMEOUT) { + device_printf(sc->sc_dev, "command 0x%02x timedout\n", id); + error = ETIMEDOUT; } - return (0); + return (error); } static int -txp_response(struct txp_softc *sc, uint32_t ridx, uint16_t id, uint16_t seq, +txp_response(struct txp_softc *sc, uint16_t id, uint16_t seq, struct txp_rsp_desc **rspp) { - struct txp_hostvar *hv = sc->sc_hostvar; + struct txp_hostvar *hv; struct txp_rsp_desc *rsp; + uint32_t ridx; - while (ridx != hv->hv_resp_write_idx) { + bus_dmamap_sync(sc->sc_cdata.txp_rspring_tag, + sc->sc_cdata.txp_rspring_map, BUS_DMASYNC_POSTREAD); + hv = sc->sc_hostvar; + ridx = le32toh(hv->hv_resp_read_idx); + while (ridx != le32toh(hv->hv_resp_write_idx)) { rsp = (struct txp_rsp_desc *)(((uint8_t *)sc->sc_rspring.base) + ridx); - if (id == rsp->rsp_id && rsp->rsp_seq == seq) { + if (id == le16toh(rsp->rsp_id) && + le16toh(rsp->rsp_seq) == seq) { *rspp = (struct txp_rsp_desc *)malloc( sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1), M_DEVBUF, M_NOWAIT); - if ((*rspp) == NULL) - return (-1); + if (*rspp == NULL) { + device_printf(sc->sc_dev,"%s : command 0x%02x " + "memory allocation failure\n", + __func__, id); + return (ENOMEM); + } txp_rsp_fixup(sc, rsp, *rspp); return (0); } - if (rsp->rsp_flags & RSP_FLAGS_ERROR) { - device_printf(sc->sc_dev, "response error!\n"); + if ((rsp->rsp_flags & RSP_FLAGS_ERROR) != 0) { + device_printf(sc->sc_dev, + "%s : command 0x%02x response error!\n", __func__, + le16toh(rsp->rsp_id)); txp_rsp_fixup(sc, rsp, NULL); - ridx = hv->hv_resp_read_idx; + ridx = le32toh(hv->hv_resp_read_idx); continue; } - switch (rsp->rsp_id) { + /* + * The following unsolicited responses are handled during + * processing of TXP_CMD_READ_STATISTICS which requires + * response. Driver abuses the command to detect media + * status change. + * TXP_CMD_FILTER_DEFINE is not an unsolicited response + * but we don't process response ring in interrupt handler + * so we have to ignore this command here, otherwise + * unknown command message would be printed. + */ + switch (le16toh(rsp->rsp_id)) { case TXP_CMD_CYCLE_STATISTICS: + case TXP_CMD_FILTER_DEFINE: + break; case TXP_CMD_MEDIA_STATUS_READ: + if ((le16toh(rsp->rsp_par1) & 0x0800) == 0) { + sc->sc_flags |= TXP_FLAG_LINK; + if_link_state_change(sc->sc_ifp, + LINK_STATE_UP); + } else { + sc->sc_flags &= ~TXP_FLAG_LINK; + if_link_state_change(sc->sc_ifp, + LINK_STATE_DOWN); + } break; case TXP_CMD_HELLO_RESPONSE: - device_printf(sc->sc_dev, "hello\n"); + /* + * Driver should repsond to hello message but + * TXP_CMD_READ_STATISTICS is issued for every + * hz, therefore there is no need to send an + * explicit command here. + */ + device_printf(sc->sc_dev, "%s : hello\n", __func__); break; default: - device_printf(sc->sc_dev, "unknown id(0x%x)\n", - rsp->rsp_id); + device_printf(sc->sc_dev, + "%s : unknown command 0x%02x\n", __func__, + le16toh(rsp->rsp_id)); } - txp_rsp_fixup(sc, rsp, NULL); - ridx = hv->hv_resp_read_idx; - hv->hv_resp_read_idx = ridx; + ridx = le32toh(hv->hv_resp_read_idx); } return (0); } static void txp_rsp_fixup(struct txp_softc *sc, struct txp_rsp_desc *rsp, struct txp_rsp_desc *dst) { - struct txp_rsp_desc *src = rsp; - struct txp_hostvar *hv = sc->sc_hostvar; + struct txp_rsp_desc *src; + struct txp_hostvar *hv; uint32_t i, ridx; - ridx = hv->hv_resp_read_idx; + src = rsp; + hv = sc->sc_hostvar; + ridx = le32toh(hv->hv_resp_read_idx); for (i = 0; i < rsp->rsp_numdesc + 1; i++) { if (dst != NULL) bcopy(src, dst++, sizeof(struct txp_rsp_desc)); ridx += sizeof(struct txp_rsp_desc); if (ridx == sc->sc_rspring.size) { src = sc->sc_rspring.base; ridx = 0; } else src++; - sc->sc_rspring.lastwrite = hv->hv_resp_read_idx = ridx; + sc->sc_rspring.lastwrite = ridx; } - hv->hv_resp_read_idx = ridx; + hv->hv_resp_read_idx = htole32(ridx); } static int txp_cmd_desc_numfree(struct txp_softc *sc) { - struct txp_hostvar *hv = sc->sc_hostvar; - struct txp_boot_record *br = sc->sc_boot; + struct txp_hostvar *hv; + struct txp_boot_record *br; uint32_t widx, ridx, nfree; + bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag, + sc->sc_cdata.txp_hostvar_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + hv = sc->sc_hostvar; + br = sc->sc_boot; widx = sc->sc_cmdring.lastwrite; - ridx = hv->hv_cmd_read_idx; + ridx = le32toh(hv->hv_cmd_read_idx); if (widx == ridx) { /* Ring is completely free */ - nfree = br->br_cmd_siz - sizeof(struct txp_cmd_desc); + nfree = le32toh(br->br_cmd_siz) - sizeof(struct txp_cmd_desc); } else { if (widx > ridx) - nfree = br->br_cmd_siz - + nfree = le32toh(br->br_cmd_siz) - (widx - ridx + sizeof(struct txp_cmd_desc)); else nfree = ridx - widx - sizeof(struct txp_cmd_desc); } return (nfree / sizeof(struct txp_cmd_desc)); } +static int +txp_sleep(struct txp_softc *sc, int capenable) +{ + uint16_t events; + int error; + + events = 0; + if ((capenable & IFCAP_WOL_MAGIC) != 0) + events |= 0x01; + error = txp_command(sc, TXP_CMD_ENABLE_WAKEUP_EVENTS, events, 0, 0, + NULL, NULL, NULL, TXP_CMD_NOWAIT); + if (error == 0) { + /* Goto sleep. */ + error = txp_command(sc, TXP_CMD_GOTO_SLEEP, 0, 0, 0, NULL, + NULL, NULL, TXP_CMD_NOWAIT); + if (error == 0) { + error = txp_wait(sc, STAT_SLEEPING); + if (error != 0) + device_printf(sc->sc_dev, + "unable to enter into sleep\n"); + } + } + + return (error); +} + static void txp_stop(struct txp_softc *sc) { struct ifnet *ifp; TXP_LOCK_ASSERT(sc); ifp = sc->sc_ifp; + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + return; + + WRITE_REG(sc, TXP_IER, TXP_INTR_NONE); + WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL); + ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); + sc->sc_flags &= ~TXP_FLAG_LINK; callout_stop(&sc->sc_tick); - txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1); - txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1); + txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, + TXP_CMD_NOWAIT); + txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, + TXP_CMD_NOWAIT); + /* Save statistics for later use. */ + txp_stats_save(sc); + /* Halt controller. */ + txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL, + TXP_CMD_NOWAIT); + if (txp_wait(sc, STAT_HALTED) != 0) + device_printf(sc->sc_dev, "controller halt timedout!\n"); + /* Reclaim Tx/Rx buffers. */ + if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons != + TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off))))) + txp_tx_reclaim(sc, &sc->sc_txhir); + if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons != + TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off))))) + txp_tx_reclaim(sc, &sc->sc_txlor); txp_rxring_empty(sc); + + txp_init_rings(sc); + /* Reset controller and make it reload sleep image. */ + txp_reset(sc); + /* Let controller boot from sleep image. */ + if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) + device_printf(sc->sc_dev, "could not boot sleep image\n"); + txp_sleep(sc, 0); } static void -txp_watchdog(struct ifnet *ifp) +txp_watchdog(struct txp_softc *sc) { + struct ifnet *ifp; + TXP_LOCK_ASSERT(sc); + + if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer) + return; + + ifp = sc->sc_ifp; + if_printf(ifp, "watchdog timeout -- resetting\n"); + ifp->if_oerrors++; + txp_stop(sc); + txp_init_locked(sc); } static int txp_ifmedia_upd(struct ifnet *ifp) { struct txp_softc *sc = ifp->if_softc; struct ifmedia *ifm = &sc->sc_ifmedia; uint16_t new_xcvr; TXP_LOCK(sc); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { TXP_UNLOCK(sc); return (EINVAL); } if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) { if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) new_xcvr = TXP_XCVR_10_FDX; else new_xcvr = TXP_XCVR_10_HDX; } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) new_xcvr = TXP_XCVR_100_FDX; else new_xcvr = TXP_XCVR_100_HDX; } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { new_xcvr = TXP_XCVR_AUTO; } else { TXP_UNLOCK(sc); return (EINVAL); } /* nothing to do */ if (sc->sc_xcvr == new_xcvr) { TXP_UNLOCK(sc); return (0); } txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0, - NULL, NULL, NULL, 0); + NULL, NULL, NULL, TXP_CMD_NOWAIT); sc->sc_xcvr = new_xcvr; TXP_UNLOCK(sc); return (0); } static void txp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct txp_softc *sc = ifp->if_softc; struct ifmedia *ifm = &sc->sc_ifmedia; uint16_t bmsr, bmcr, anar, anlpar; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; TXP_LOCK(sc); + /* Check whether firmware is running. */ + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + goto bail; if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0, - &bmsr, NULL, NULL, 1)) + &bmsr, NULL, NULL, TXP_CMD_WAIT)) goto bail; if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0, - &bmsr, NULL, NULL, 1)) + &bmsr, NULL, NULL, TXP_CMD_WAIT)) goto bail; if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0, - &bmcr, NULL, NULL, 1)) + &bmcr, NULL, NULL, TXP_CMD_WAIT)) goto bail; if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0, - &anlpar, NULL, NULL, 1)) + &anlpar, NULL, NULL, TXP_CMD_WAIT)) goto bail; if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANAR, 0, - &anar, NULL, NULL, 1)) + &anar, NULL, NULL, TXP_CMD_WAIT)) goto bail; TXP_UNLOCK(sc); if (bmsr & BMSR_LINK) ifmr->ifm_status |= IFM_ACTIVE; if (bmcr & BMCR_ISO) { ifmr->ifm_active |= IFM_NONE; ifmr->ifm_status = 0; return; } if (bmcr & BMCR_LOOP) ifmr->ifm_active |= IFM_LOOP; if (bmcr & BMCR_AUTOEN) { if ((bmsr & BMSR_ACOMP) == 0) { ifmr->ifm_active |= IFM_NONE; return; } anlpar &= anar; if (anlpar & ANLPAR_TX_FD) ifmr->ifm_active |= IFM_100_TX|IFM_FDX; else if (anlpar & ANLPAR_T4) ifmr->ifm_active |= IFM_100_T4; else if (anlpar & ANLPAR_TX) ifmr->ifm_active |= IFM_100_TX; else if (anlpar & ANLPAR_10_FD) ifmr->ifm_active |= IFM_10_T|IFM_FDX; else if (anlpar & ANLPAR_10) ifmr->ifm_active |= IFM_10_T; else ifmr->ifm_active |= IFM_NONE; } else ifmr->ifm_active = ifm->ifm_cur->ifm_media; return; bail: TXP_UNLOCK(sc); ifmr->ifm_active |= IFM_NONE; ifmr->ifm_status &= ~IFM_AVALID; } #ifdef TXP_DEBUG static void txp_show_descriptor(void *d) { struct txp_cmd_desc *cmd = d; struct txp_rsp_desc *rsp = d; struct txp_tx_desc *txd = d; struct txp_frag_desc *frgd = d; switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) { case CMD_FLAGS_TYPE_CMD: /* command descriptor */ printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", - cmd->cmd_flags, cmd->cmd_numdesc, cmd->cmd_id, cmd->cmd_seq, - cmd->cmd_par1, cmd->cmd_par2, cmd->cmd_par3); + cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id), + le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1), + le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3)); break; case CMD_FLAGS_TYPE_RESP: /* response descriptor */ printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", - rsp->rsp_flags, rsp->rsp_numdesc, rsp->rsp_id, rsp->rsp_seq, - rsp->rsp_par1, rsp->rsp_par2, rsp->rsp_par3); + rsp->rsp_flags, rsp->rsp_numdesc, le16toh(rsp->rsp_id), + le16toh(rsp->rsp_seq), le16toh(rsp->rsp_par1), + le32toh(rsp->rsp_par2), le32toh(rsp->rsp_par3)); break; case CMD_FLAGS_TYPE_DATA: /* data header (assuming tx for now) */ printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]", - txd->tx_flags, txd->tx_numdesc, txd->tx_totlen, - txd->tx_addrlo, txd->tx_addrhi, txd->tx_pflags); + txd->tx_flags, txd->tx_numdesc, le16toh(txd->tx_totlen), + le32toh(txd->tx_addrlo), le32toh(txd->tx_addrhi), + le32toh(txd->tx_pflags)); break; case CMD_FLAGS_TYPE_FRAG: /* fragment descriptor */ printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]", - frgd->frag_flags, frgd->frag_rsvd1, frgd->frag_len, - frgd->frag_addrlo, frgd->frag_addrhi, frgd->frag_rsvd2); + frgd->frag_flags, frgd->frag_rsvd1, le16toh(frgd->frag_len), + le32toh(frgd->frag_addrlo), le32toh(frgd->frag_addrhi), + le32toh(frgd->frag_rsvd2)); break; default: printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", cmd->cmd_flags & CMD_FLAGS_TYPE_M, - cmd->cmd_flags, cmd->cmd_numdesc, cmd->cmd_id, cmd->cmd_seq, - cmd->cmd_par1, cmd->cmd_par2, cmd->cmd_par3); + cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id), + le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1), + le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3)); break; } } #endif static void txp_set_filter(struct txp_softc *sc) { struct ifnet *ifp; uint32_t crc, mchash[2]; uint16_t filter; struct ifmultiaddr *ifma; int mcnt; TXP_LOCK_ASSERT(sc); ifp = sc->sc_ifp; filter = TXP_RXFILT_DIRECT; if ((ifp->if_flags & IFF_BROADCAST) != 0) filter |= TXP_RXFILT_BROADCAST; if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { if ((ifp->if_flags & IFF_ALLMULTI) != 0) filter |= TXP_RXFILT_ALLMULTI; if ((ifp->if_flags & IFF_PROMISC) != 0) filter = TXP_RXFILT_PROMISC; goto setit; } mchash[0] = mchash[1] = 0; mcnt = 0; IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN); crc &= 0x3f; mchash[crc >> 5] |= 1 << (crc & 0x1f); mcnt++; } IF_ADDR_UNLOCK(ifp); if (mcnt > 0) { filter |= TXP_RXFILT_HASHMULTI; txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE, 2, mchash[0], - mchash[1], NULL, NULL, NULL, 0); + mchash[1], NULL, NULL, NULL, TXP_CMD_NOWAIT); } setit: - txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0, - NULL, NULL, NULL, 1); + NULL, NULL, NULL, TXP_CMD_NOWAIT); } +static int +txp_set_capabilities(struct txp_softc *sc) +{ + struct ifnet *ifp; + uint32_t rxcap, txcap; + + TXP_LOCK_ASSERT(sc); + + rxcap = txcap = 0; + ifp = sc->sc_ifp; + if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) { + if ((ifp->if_hwassist & CSUM_IP) != 0) + txcap |= OFFLOAD_IPCKSUM; + if ((ifp->if_hwassist & CSUM_TCP) != 0) + txcap |= OFFLOAD_TCPCKSUM; + if ((ifp->if_hwassist & CSUM_UDP) != 0) + txcap |= OFFLOAD_UDPCKSUM; + rxcap = txcap; + } + if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) + rxcap &= ~(OFFLOAD_IPCKSUM | OFFLOAD_TCPCKSUM | + OFFLOAD_UDPCKSUM); + if ((ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { + rxcap |= OFFLOAD_VLAN; + txcap |= OFFLOAD_VLAN; + } + + /* Tell firmware new offload configuration. */ + return (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0, txcap, rxcap, NULL, + NULL, NULL, TXP_CMD_NOWAIT)); +} + static void -txp_capabilities(struct txp_softc *sc) +txp_stats_save(struct txp_softc *sc) { - struct ifnet *ifp = sc->sc_ifp; - struct txp_rsp_desc *rsp = NULL; - struct txp_ext_desc *ext; + struct txp_rsp_desc *rsp; - if (txp_command2(sc, TXP_CMD_OFFLOAD_READ, 0, 0, 0, NULL, 0, &rsp, 1)) - goto out; + TXP_LOCK_ASSERT(sc); - if (rsp->rsp_numdesc != 1) + rsp = NULL; + if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0, + &rsp, TXP_CMD_WAIT)) goto out; + if (rsp->rsp_numdesc != 6) + goto out; + txp_stats_update(sc, rsp); +out: + if (rsp != NULL) + free(rsp, M_DEVBUF); + bcopy(&sc->sc_stats, &sc->sc_ostats, sizeof(struct txp_hw_stats)); +} + +static void +txp_stats_update(struct txp_softc *sc, struct txp_rsp_desc *rsp) +{ + struct ifnet *ifp; + struct txp_hw_stats *ostats, *stats; + struct txp_ext_desc *ext; + + TXP_LOCK_ASSERT(sc); + + ifp = sc->sc_ifp; ext = (struct txp_ext_desc *)(rsp + 1); + ostats = &sc->sc_ostats; + stats = &sc->sc_stats; + stats->tx_frames = ostats->tx_frames + le32toh(rsp->rsp_par2); + stats->tx_bytes = ostats->tx_bytes + (uint64_t)le32toh(rsp->rsp_par3) + + ((uint64_t)le32toh(ext[0].ext_1) << 32); + stats->tx_deferred = ostats->tx_deferred + le32toh(ext[0].ext_2); + stats->tx_late_colls = ostats->tx_late_colls + le32toh(ext[0].ext_3); + stats->tx_colls = ostats->tx_colls + le32toh(ext[0].ext_4); + stats->tx_carrier_lost = ostats->tx_carrier_lost + + le32toh(ext[1].ext_1); + stats->tx_multi_colls = ostats->tx_multi_colls + + le32toh(ext[1].ext_2); + stats->tx_excess_colls = ostats->tx_excess_colls + + le32toh(ext[1].ext_3); + stats->tx_fifo_underruns = ostats->tx_fifo_underruns + + le32toh(ext[1].ext_4); + stats->tx_mcast_oflows = ostats->tx_mcast_oflows + + le32toh(ext[2].ext_1); + stats->tx_filtered = ostats->tx_filtered + le32toh(ext[2].ext_2); + stats->rx_frames = ostats->rx_frames + le32toh(ext[2].ext_3); + stats->rx_bytes = ostats->rx_bytes + (uint64_t)le32toh(ext[2].ext_4) + + ((uint64_t)le32toh(ext[3].ext_1) << 32); + stats->rx_fifo_oflows = ostats->rx_fifo_oflows + le32toh(ext[3].ext_2); + stats->rx_badssd = ostats->rx_badssd + le32toh(ext[3].ext_3); + stats->rx_crcerrs = ostats->rx_crcerrs + le32toh(ext[3].ext_4); + stats->rx_lenerrs = ostats->rx_lenerrs + le32toh(ext[4].ext_1); + stats->rx_bcast_frames = ostats->rx_bcast_frames + + le32toh(ext[4].ext_2); + stats->rx_mcast_frames = ostats->rx_mcast_frames + + le32toh(ext[4].ext_3); + stats->rx_oflows = ostats->rx_oflows + le32toh(ext[4].ext_4); + stats->rx_filtered = ostats->rx_filtered + le32toh(ext[5].ext_1); - sc->sc_tx_capability = ext->ext_1 & OFFLOAD_MASK; - sc->sc_rx_capability = ext->ext_2 & OFFLOAD_MASK; - ifp->if_capabilities = 0; + ifp->if_ierrors = stats->rx_fifo_oflows + stats->rx_badssd + + stats->rx_crcerrs + stats->rx_lenerrs + stats->rx_oflows; + ifp->if_oerrors = stats->tx_deferred + stats->tx_carrier_lost + + stats->tx_fifo_underruns + stats->tx_mcast_oflows; + ifp->if_collisions = stats->tx_late_colls + stats->tx_multi_colls + + stats->tx_excess_colls; + ifp->if_opackets = stats->tx_frames; + ifp->if_ipackets = stats->rx_frames; +} - if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_VLAN) { - sc->sc_tx_capability |= OFFLOAD_VLAN; - sc->sc_rx_capability |= OFFLOAD_VLAN; - ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; - } +#define TXP_SYSCTL_STAT_ADD32(c, h, n, p, d) \ + SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) -#if 0 - /* not ready yet */ - if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPSEC) { - sc->sc_tx_capability |= OFFLOAD_IPSEC; - sc->sc_rx_capability |= OFFLOAD_IPSEC; - ifp->if_capabilities |= IFCAP_IPSEC; - } +#if __FreeBSD_version > 800000 +#define TXP_SYSCTL_STAT_ADD64(c, h, n, p, d) \ + SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) +#else +#define TXP_SYSCTL_STAT_ADD64(c, h, n, p, d) \ + SYSCTL_ADD_ULONG(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) #endif - if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPCKSUM) { - sc->sc_tx_capability |= OFFLOAD_IPCKSUM; - sc->sc_rx_capability |= OFFLOAD_IPCKSUM; - ifp->if_capabilities |= IFCAP_HWCSUM; - ifp->if_hwassist |= CSUM_IP; - } +static void +txp_sysctl_node(struct txp_softc *sc) +{ + struct sysctl_ctx_list *ctx; + struct sysctl_oid_list *child, *parent; + struct sysctl_oid *tree; + struct txp_hw_stats *stats; + int error; - if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_TCPCKSUM) { -#if 0 - sc->sc_tx_capability |= OFFLOAD_TCPCKSUM; -#endif - sc->sc_rx_capability |= OFFLOAD_TCPCKSUM; - ifp->if_capabilities |= IFCAP_HWCSUM; + stats = &sc->sc_stats; + ctx = device_get_sysctl_ctx(sc->sc_dev); + child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev)); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit", + CTLTYPE_INT | CTLFLAG_RW, &sc->sc_process_limit, 0, + sysctl_hw_txp_proc_limit, "I", + "max number of Rx events to process"); + /* Pull in device tunables. */ + sc->sc_process_limit = TXP_PROC_DEFAULT; + error = resource_int_value(device_get_name(sc->sc_dev), + device_get_unit(sc->sc_dev), "process_limit", + &sc->sc_process_limit); + if (error == 0) { + if (sc->sc_process_limit < TXP_PROC_MIN || + sc->sc_process_limit > TXP_PROC_MAX) { + device_printf(sc->sc_dev, + "process_limit value out of range; " + "using default: %d\n", TXP_PROC_DEFAULT); + sc->sc_process_limit = TXP_PROC_DEFAULT; + } } + tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, + NULL, "TXP statistics"); + parent = SYSCTL_CHILDREN(tree); - if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_UDPCKSUM) { -#if 0 - sc->sc_tx_capability |= OFFLOAD_UDPCKSUM; -#endif - sc->sc_rx_capability |= OFFLOAD_UDPCKSUM; - ifp->if_capabilities |= IFCAP_HWCSUM; - } - ifp->if_capenable = ifp->if_capabilities; + /* Tx statistics. */ + tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, + NULL, "Tx MAC statistics"); + child = SYSCTL_CHILDREN(tree); - if (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0, - sc->sc_tx_capability, sc->sc_rx_capability, NULL, NULL, NULL, 1)) - goto out; + TXP_SYSCTL_STAT_ADD32(ctx, child, "frames", + &stats->tx_frames, "Frames"); + TXP_SYSCTL_STAT_ADD64(ctx, child, "octets", + &stats->tx_bytes, "Octets"); + TXP_SYSCTL_STAT_ADD32(ctx, child, "deferred", + &stats->tx_deferred, "Deferred frames"); + TXP_SYSCTL_STAT_ADD32(ctx, child, "late_colls", + &stats->tx_late_colls, "Late collisions"); + TXP_SYSCTL_STAT_ADD32(ctx, child, "colls", + &stats->tx_colls, "Collisions"); + TXP_SYSCTL_STAT_ADD32(ctx, child, "carrier_lost", + &stats->tx_carrier_lost, "Carrier lost"); + TXP_SYSCTL_STAT_ADD32(ctx, child, "multi_colls", + &stats->tx_multi_colls, "Multiple collisions"); + TXP_SYSCTL_STAT_ADD32(ctx, child, "excess_colls", + &stats->tx_excess_colls, "Excessive collisions"); + TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_underruns", + &stats->tx_fifo_underruns, "FIFO underruns"); + TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_oflows", + &stats->tx_mcast_oflows, "Multicast overflows"); + TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered", + &stats->tx_filtered, "Filtered frames"); -out: - if (rsp != NULL) - free(rsp, M_DEVBUF); + /* Rx statistics. */ + tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, + NULL, "Rx MAC statistics"); + child = SYSCTL_CHILDREN(tree); + + TXP_SYSCTL_STAT_ADD32(ctx, child, "frames", + &stats->rx_frames, "Frames"); + TXP_SYSCTL_STAT_ADD64(ctx, child, "octets", + &stats->rx_bytes, "Octets"); + TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", + &stats->rx_fifo_oflows, "FIFO overflows"); + TXP_SYSCTL_STAT_ADD32(ctx, child, "badssd", + &stats->rx_badssd, "Bad SSD"); + TXP_SYSCTL_STAT_ADD32(ctx, child, "crcerrs", + &stats->rx_crcerrs, "CRC errors"); + TXP_SYSCTL_STAT_ADD32(ctx, child, "lenerrs", + &stats->rx_lenerrs, "Length errors"); + TXP_SYSCTL_STAT_ADD32(ctx, child, "bcast_frames", + &stats->rx_bcast_frames, "Broadcast frames"); + TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_frames", + &stats->rx_mcast_frames, "Multicast frames"); + TXP_SYSCTL_STAT_ADD32(ctx, child, "oflows", + &stats->rx_oflows, "Overflows"); + TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered", + &stats->rx_filtered, "Filtered frames"); +} + +#undef TXP_SYSCTL_STAT_ADD32 +#undef TXP_SYSCTL_STAT_ADD64 + +static int +sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) +{ + int error, value; + + if (arg1 == NULL) + return (EINVAL); + value = *(int *)arg1; + error = sysctl_handle_int(oidp, &value, 0, req); + if (error || req->newptr == NULL) + return (error); + if (value < low || value > high) + return (EINVAL); + *(int *)arg1 = value; + + return (0); +} + +static int +sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS) +{ + return (sysctl_int_range(oidp, arg1, arg2, req, + TXP_PROC_MIN, TXP_PROC_MAX)); } Index: stable/7/sys/dev/txp/if_txpreg.h =================================================================== --- stable/7/sys/dev/txp/if_txpreg.h (revision 190246) +++ stable/7/sys/dev/txp/if_txpreg.h (revision 190247) @@ -1,663 +1,768 @@ /* $OpenBSD: if_txpreg.h,v 1.30 2001/06/23 04:18:02 jason Exp $ */ /* $FreeBSD$ */ /*- * Copyright (c) 2001 Aaron Campbell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Aaron Campbell. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ -#define TXP_PCI_LOMEM PCIR_BAR(1) /* memory map BAR */ -#define TXP_PCI_LOIO PCIR_BAR(0) /* IO map BAR */ - /* * Typhoon registers. */ #define TXP_SRR 0x00 /* soft reset register */ #define TXP_ISR 0x04 /* interrupt status register */ #define TXP_IER 0x08 /* interrupt enable register */ #define TXP_IMR 0x0c /* interrupt mask register */ #define TXP_SIR 0x10 /* self interrupt register */ #define TXP_H2A_7 0x14 /* host->arm comm 7 */ #define TXP_H2A_6 0x18 /* host->arm comm 6 */ #define TXP_H2A_5 0x1c /* host->arm comm 5 */ #define TXP_H2A_4 0x20 /* host->arm comm 4 */ #define TXP_H2A_3 0x24 /* host->arm comm 3 */ #define TXP_H2A_2 0x28 /* host->arm comm 2 */ #define TXP_H2A_1 0x2c /* host->arm comm 1 */ #define TXP_H2A_0 0x30 /* host->arm comm 0 */ #define TXP_A2H_3 0x34 /* arm->host comm 3 */ #define TXP_A2H_2 0x38 /* arm->host comm 2 */ #define TXP_A2H_1 0x3c /* arm->host comm 1 */ #define TXP_A2H_0 0x40 /* arm->host comm 0 */ /* * interrupt bits (IMR, ISR, IER) */ #define TXP_INT_RESERVED 0xffff0000 #define TXP_INT_A2H_7 0x00008000 /* arm->host comm 7 */ #define TXP_INT_A2H_6 0x00004000 /* arm->host comm 6 */ #define TXP_INT_A2H_5 0x00002000 /* arm->host comm 5 */ #define TXP_INT_A2H_4 0x00001000 /* arm->host comm 4 */ #define TXP_INT_SELF 0x00000800 /* self interrupt */ #define TXP_INT_PCI_TABORT 0x00000400 /* pci target abort */ #define TXP_INT_PCI_MABORT 0x00000200 /* pci master abort */ #define TXP_INT_DMA3 0x00000100 /* dma3 done */ #define TXP_INT_DMA2 0x00000080 /* dma2 done */ #define TXP_INT_DMA1 0x00000040 /* dma1 done */ #define TXP_INT_DMA0 0x00000020 /* dma0 done */ #define TXP_INT_A2H_3 0x00000010 /* arm->host comm 3 */ #define TXP_INT_A2H_2 0x00000008 /* arm->host comm 2 */ #define TXP_INT_A2H_1 0x00000004 /* arm->host comm 1 */ #define TXP_INT_A2H_0 0x00000002 /* arm->host comm 0 */ #define TXP_INT_LATCH 0x00000001 /* interrupt latch */ /* + * Controller periodically generates TXP_INT_A2H_3 interrupt so + * we don't want to see them in interrupt handler. + */ +#define TXP_INTRS 0xFFFFFFEF +#define TXP_INTR_ALL 0xFFFFFFFF +#define TXP_INTR_NONE 0x00000000 + +/* * soft reset register (SRR) */ #define TXP_SRR_ALL 0x0000007f /* full reset */ /* * Typhoon boot commands. */ #define TXP_BOOTCMD_NULL 0x00 +#define TXP_BOOTCMD_WAKEUP 0xfa #define TXP_BOOTCMD_DOWNLOAD_COMPLETE 0xfb #define TXP_BOOTCMD_SEGMENT_AVAILABLE 0xfc #define TXP_BOOTCMD_RUNTIME_IMAGE 0xfd #define TXP_BOOTCMD_REGISTER_BOOT_RECORD 0xff /* * Typhoon runtime commands. */ #define TXP_CMD_GLOBAL_RESET 0x00 #define TXP_CMD_TX_ENABLE 0x01 #define TXP_CMD_TX_DISABLE 0x02 #define TXP_CMD_RX_ENABLE 0x03 #define TXP_CMD_RX_DISABLE 0x04 #define TXP_CMD_RX_FILTER_WRITE 0x05 #define TXP_CMD_RX_FILTER_READ 0x06 #define TXP_CMD_READ_STATISTICS 0x07 #define TXP_CMD_CYCLE_STATISTICS 0x08 #define TXP_CMD_CLEAR_STATISTICS 0x09 #define TXP_CMD_MEMORY_READ 0x0a #define TXP_CMD_MEMORY_WRITE_SINGLE 0x0b #define TXP_CMD_VARIABLE_SECTION_READ 0x0c #define TXP_CMD_VARIABLE_SECTION_WRITE 0x0d #define TXP_CMD_STATIC_SECTION_READ 0x0e #define TXP_CMD_STATIC_SECTION_WRITE 0x0f #define TXP_CMD_IMAGE_SECTION_PROGRAM 0x10 #define TXP_CMD_NVRAM_PAGE_READ 0x11 #define TXP_CMD_NVRAM_PAGE_WRITE 0x12 #define TXP_CMD_XCVR_SELECT 0x13 #define TXP_CMD_TEST_MUX 0x14 #define TXP_CMD_PHYLOOPBACK_ENABLE 0x15 #define TXP_CMD_PHYLOOPBACK_DISABLE 0x16 #define TXP_CMD_MAC_CONTROL_READ 0x17 #define TXP_CMD_MAC_CONTROL_WRITE 0x18 #define TXP_CMD_MAX_PKT_SIZE_READ 0x19 #define TXP_CMD_MAX_PKT_SIZE_WRITE 0x1a #define TXP_CMD_MEDIA_STATUS_READ 0x1b #define TXP_CMD_MEDIA_STATUS_WRITE 0x1c #define TXP_CMD_NETWORK_DIAGS_READ 0x1d #define TXP_CMD_NETWORK_DIAGS_WRITE 0x1e #define TXP_CMD_PHY_MGMT_READ 0x1f #define TXP_CMD_PHY_MGMT_WRITE 0x20 #define TXP_CMD_VARIABLE_PARAMETER_READ 0x21 #define TXP_CMD_VARIABLE_PARAMETER_WRITE 0x22 #define TXP_CMD_GOTO_SLEEP 0x23 #define TXP_CMD_FIREWALL_CONTROL 0x24 #define TXP_CMD_MCAST_HASH_MASK_WRITE 0x25 #define TXP_CMD_STATION_ADDRESS_WRITE 0x26 #define TXP_CMD_STATION_ADDRESS_READ 0x27 #define TXP_CMD_STATION_MASK_WRITE 0x28 #define TXP_CMD_STATION_MASK_READ 0x29 #define TXP_CMD_VLAN_ETHER_TYPE_READ 0x2a #define TXP_CMD_VLAN_ETHER_TYPE_WRITE 0x2b #define TXP_CMD_VLAN_MASK_READ 0x2c #define TXP_CMD_VLAN_MASK_WRITE 0x2d #define TXP_CMD_BCAST_THROTTLE_WRITE 0x2e #define TXP_CMD_BCAST_THROTTLE_READ 0x2f #define TXP_CMD_DHCP_PREVENT_WRITE 0x30 #define TXP_CMD_DHCP_PREVENT_READ 0x31 #define TXP_CMD_RECV_BUFFER_CONTROL 0x32 #define TXP_CMD_SOFTWARE_RESET 0x33 #define TXP_CMD_CREATE_SA 0x34 #define TXP_CMD_DELETE_SA 0x35 #define TXP_CMD_ENABLE_RX_IP_OPTION 0x36 #define TXP_CMD_RANDOM_NUMBER_CONTROL 0x37 #define TXP_CMD_RANDOM_NUMBER_READ 0x38 #define TXP_CMD_MATRIX_TABLE_MODE_WRITE 0x39 #define TXP_CMD_MATRIX_DETAIL_READ 0x3a #define TXP_CMD_FILTER_ARRAY_READ 0x3b #define TXP_CMD_FILTER_DETAIL_READ 0x3c #define TXP_CMD_FILTER_TABLE_MODE_WRITE 0x3d #define TXP_CMD_FILTER_TCL_WRITE 0x3e #define TXP_CMD_FILTER_TBL_READ 0x3f +#define TXP_CMD_READ_VERSION 0x43 #define TXP_CMD_FILTER_DEFINE 0x45 #define TXP_CMD_ADD_WAKEUP_PKT 0x46 #define TXP_CMD_ADD_SLEEP_PKT 0x47 #define TXP_CMD_ENABLE_SLEEP_EVENTS 0x48 #define TXP_CMD_ENABLE_WAKEUP_EVENTS 0x49 #define TXP_CMD_GET_IP_ADDRESS 0x4a #define TXP_CMD_READ_PCI_REG 0x4c #define TXP_CMD_WRITE_PCI_REG 0x4d #define TXP_CMD_OFFLOAD_READ 0x4e #define TXP_CMD_OFFLOAD_WRITE 0x4f #define TXP_CMD_HELLO_RESPONSE 0x57 #define TXP_CMD_ENABLE_RX_FILTER 0x58 #define TXP_CMD_RX_FILTER_CAPABILITY 0x59 #define TXP_CMD_HALT 0x5d #define TXP_CMD_READ_IPSEC_INFO 0x54 #define TXP_CMD_GET_IPSEC_ENABLE 0x67 #define TXP_CMD_INVALID 0xffff #define TXP_FRAGMENT 0x0000 #define TXP_TXFRAME 0x0001 #define TXP_COMMAND 0x0002 #define TXP_OPTION 0x0003 #define TXP_RECEIVE 0x0004 #define TXP_RESPONSE 0x0005 #define TXP_TYPE_IPSEC 0x0000 #define TXP_TYPE_TCPSEGMENT 0x0001 #define TXP_PFLAG_NOCRC 0x0000 #define TXP_PFLAG_IPCKSUM 0x0001 #define TXP_PFLAG_TCPCKSUM 0x0002 #define TXP_PFLAG_TCPSEGMENT 0x0004 #define TXP_PFLAG_INSERTVLAN 0x0008 #define TXP_PFLAG_IPSEC 0x0010 #define TXP_PFLAG_PRIORITY 0x0020 #define TXP_PFLAG_UDPCKSUM 0x0040 #define TXP_PFLAG_PADFRAME 0x0080 #define TXP_MISC_FIRSTDESC 0x0000 #define TXP_MISC_LASTDESC 0x0001 #define TXP_ERR_INTERNAL 0x0000 #define TXP_ERR_FIFOUNDERRUN 0x0001 #define TXP_ERR_BADSSD 0x0002 #define TXP_ERR_RUNT 0x0003 #define TXP_ERR_CRC 0x0004 #define TXP_ERR_OVERSIZE 0x0005 #define TXP_ERR_ALIGNMENT 0x0006 #define TXP_ERR_DRIBBLEBIT 0x0007 #define TXP_PROTO_UNKNOWN 0x0000 #define TXP_PROTO_IP 0x0001 #define TXP_PROTO_IPX 0x0002 #define TXP_PROTO_RESERVED 0x0003 #define TXP_STAT_PROTO 0x0001 #define TXP_STAT_VLAN 0x0002 #define TXP_STAT_IPFRAGMENT 0x0004 #define TXP_STAT_IPSEC 0x0008 #define TXP_STAT_IPCKSUMBAD 0x0010 #define TXP_STAT_TCPCKSUMBAD 0x0020 #define TXP_STAT_UDPCKSUMBAD 0x0040 #define TXP_STAT_IPCKSUMGOOD 0x0080 #define TXP_STAT_TCPCKSUMGOOD 0x0100 #define TXP_STAT_UDPCKSUMGOOD 0x0200 struct txp_tx_desc { - volatile uint8_t tx_flags; /* type/descriptor flags */ - volatile uint8_t tx_numdesc; /* number of descriptors */ - volatile uint16_t tx_totlen; /* total packet length */ - volatile uint32_t tx_addrlo; /* virt addr low word */ - volatile uint32_t tx_addrhi; /* virt addr high word */ - volatile uint32_t tx_pflags; /* processing flags */ + uint8_t tx_flags; /* type/descriptor flags */ + uint8_t tx_numdesc; /* number of descriptors */ + uint16_t tx_totlen; /* total packet length */ + uint32_t tx_addrlo; /* virt addr low word */ + uint32_t tx_addrhi; /* virt addr high word */ + uint32_t tx_pflags; /* processing flags */ }; #define TX_FLAGS_TYPE_M 0x07 /* type mask */ #define TX_FLAGS_TYPE_FRAG 0x00 /* type: fragment */ #define TX_FLAGS_TYPE_DATA 0x01 /* type: data frame */ #define TX_FLAGS_TYPE_CMD 0x02 /* type: command frame */ #define TX_FLAGS_TYPE_OPT 0x03 /* type: options */ #define TX_FLAGS_TYPE_RX 0x04 /* type: command */ #define TX_FLAGS_TYPE_RESP 0x05 /* type: response */ #define TX_FLAGS_RESP 0x40 /* response requested */ #define TX_FLAGS_VALID 0x80 /* valid descriptor */ #define TX_PFLAGS_DNAC 0x00000001 /* do not add crc */ #define TX_PFLAGS_IPCKSUM 0x00000002 /* ip checksum */ #define TX_PFLAGS_TCPCKSUM 0x00000004 /* tcp checksum */ #define TX_PFLAGS_TCPSEG 0x00000008 /* tcp segmentation */ #define TX_PFLAGS_VLAN 0x00000010 /* insert vlan */ #define TX_PFLAGS_IPSEC 0x00000020 /* perform ipsec */ #define TX_PFLAGS_PRIO 0x00000040 /* priority field valid */ #define TX_PFLAGS_UDPCKSUM 0x00000080 /* udp checksum */ #define TX_PFLAGS_PADFRAME 0x00000100 /* pad frame */ #define TX_PFLAGS_VLANTAG_M 0x0ffff000 /* vlan tag mask */ #define TX_PFLAGS_VLANPRI_M 0x00700000 /* vlan priority mask */ #define TX_PFLAGS_VLANTAG_S 12 /* amount to shift tag */ struct txp_rx_desc { - volatile uint8_t rx_flags; /* type/descriptor flags */ - volatile uint8_t rx_numdesc; /* number of descriptors */ - volatile uint16_t rx_len; /* frame length */ -#ifdef notdef - volatile uint32_t rx_vaddrlo; /* virtual address, lo word */ - volatile uint32_t rx_vaddrhi; /* virtual address, hi word */ -#endif - union { - struct txp_swdesc *rx_sd; - uint64_t rx_dummy; - } txp_rx_u; - volatile uint32_t rx_stat; /* status */ - volatile uint16_t rx_filter; /* filter status */ - volatile uint16_t rx_hash; /* hash status */ - volatile uint32_t rx_vlan; /* vlan tag/priority */ + uint8_t rx_flags; /* type/descriptor flags */ + uint8_t rx_numdesc; /* number of descriptors */ + uint16_t rx_len; /* frame length */ + uint32_t rx_vaddrlo; /* virtual address, lo word */ + uint32_t rx_vaddrhi; /* virtual address, hi word */ + uint32_t rx_stat; /* status */ + uint16_t rx_filter; /* filter status */ + uint16_t rx_hash; /* hash status */ + uint32_t rx_vlan; /* vlan tag/priority */ }; -#define rx_sd txp_rx_u.rx_sd - /* txp_rx_desc.rx_flags */ #define RX_FLAGS_TYPE_M 0x07 /* type mask */ #define RX_FLAGS_TYPE_FRAG 0x00 /* type: fragment */ #define RX_FLAGS_TYPE_DATA 0x01 /* type: data frame */ #define RX_FLAGS_TYPE_CMD 0x02 /* type: command frame */ #define RX_FLAGS_TYPE_OPT 0x03 /* type: options */ #define RX_FLAGS_TYPE_RX 0x04 /* type: command */ #define RX_FLAGS_TYPE_RESP 0x05 /* type: response */ #define RX_FLAGS_RCV_TYPE_M 0x18 /* rcvtype mask */ #define RX_FLAGS_RCV_TYPE_RX 0x00 /* rcvtype: receive */ #define RX_FLAGS_RCV_TYPE_RSP 0x08 /* rcvtype: response */ #define RX_FLAGS_ERROR 0x40 /* error in packet */ /* txp_rx_desc.rx_stat (if rx_flags & RX_FLAGS_ERROR bit set) */ #define RX_ERROR_ADAPTER 0x00000000 /* adapter internal error */ #define RX_ERROR_FIFO 0x00000001 /* fifo underrun */ #define RX_ERROR_BADSSD 0x00000002 /* bad ssd */ #define RX_ERROR_RUNT 0x00000003 /* runt packet */ #define RX_ERROR_CRC 0x00000004 /* bad crc */ #define RX_ERROR_OVERSIZE 0x00000005 /* oversized packet */ #define RX_ERROR_ALIGN 0x00000006 /* alignment error */ #define RX_ERROR_DRIBBLE 0x00000007 /* dribble bit */ +#define RX_ERROR_MASK 0x07 /* txp_rx_desc.rx_stat (if rx_flags & RX_FLAGS_ERROR not bit set) */ #define RX_STAT_PROTO_M 0x00000003 /* protocol mask */ #define RX_STAT_PROTO_UK 0x00000000 /* unknown protocol */ #define RX_STAT_PROTO_IPX 0x00000001 /* IPX */ #define RX_STAT_PROTO_IP 0x00000002 /* IP */ #define RX_STAT_PROTO_RSV 0x00000003 /* reserved */ #define RX_STAT_VLAN 0x00000004 /* vlan tag (in rxd) */ #define RX_STAT_IPFRAG 0x00000008 /* fragment, ipsec not done */ #define RX_STAT_IPSEC 0x00000010 /* ipsec decoded packet */ #define RX_STAT_IPCKSUMBAD 0x00000020 /* ip checksum failed */ #define RX_STAT_UDPCKSUMBAD 0x00000040 /* udp checksum failed */ #define RX_STAT_TCPCKSUMBAD 0x00000080 /* tcp checksum failed */ #define RX_STAT_IPCKSUMGOOD 0x00000100 /* ip checksum succeeded */ #define RX_STAT_UDPCKSUMGOOD 0x00000200 /* udp checksum succeeded */ #define RX_STAT_TCPCKSUMGOOD 0x00000400 /* tcp checksum succeeded */ struct txp_rxbuf_desc { - volatile uint32_t rb_paddrlo; - volatile uint32_t rb_paddrhi; -#ifdef notdef - volatile uint32_t rb_vaddrlo; - volatile uint32_t rb_vaddrhi; -#endif - union { - struct txp_swdesc *rb_sd; - uint64_t rb_dummy; - } txp_rb_u; + uint32_t rb_paddrlo; + uint32_t rb_paddrhi; + uint32_t rb_vaddrlo; + uint32_t rb_vaddrhi; }; -#define rb_sd txp_rb_u.rb_sd - /* Extension descriptor */ struct txp_ext_desc { - volatile uint32_t ext_1; - volatile uint32_t ext_2; - volatile uint32_t ext_3; - volatile uint32_t ext_4; + uint32_t ext_1; + uint32_t ext_2; + uint32_t ext_3; + uint32_t ext_4; }; struct txp_cmd_desc { - volatile uint8_t cmd_flags; - volatile uint8_t cmd_numdesc; - volatile uint16_t cmd_id; - volatile uint16_t cmd_seq; - volatile uint16_t cmd_par1; - volatile uint32_t cmd_par2; - volatile uint32_t cmd_par3; + uint8_t cmd_flags; + uint8_t cmd_numdesc; + uint16_t cmd_id; + uint16_t cmd_seq; + uint16_t cmd_par1; + uint32_t cmd_par2; + uint32_t cmd_par3; }; #define CMD_FLAGS_TYPE_M 0x07 /* type mask */ #define CMD_FLAGS_TYPE_FRAG 0x00 /* type: fragment */ #define CMD_FLAGS_TYPE_DATA 0x01 /* type: data frame */ #define CMD_FLAGS_TYPE_CMD 0x02 /* type: command frame */ #define CMD_FLAGS_TYPE_OPT 0x03 /* type: options */ #define CMD_FLAGS_TYPE_RX 0x04 /* type: command */ #define CMD_FLAGS_TYPE_RESP 0x05 /* type: response */ #define CMD_FLAGS_RESP 0x40 /* response requested */ #define CMD_FLAGS_VALID 0x80 /* valid descriptor */ struct txp_rsp_desc { - volatile uint8_t rsp_flags; - volatile uint8_t rsp_numdesc; - volatile uint16_t rsp_id; - volatile uint16_t rsp_seq; - volatile uint16_t rsp_par1; - volatile uint32_t rsp_par2; - volatile uint32_t rsp_par3; + uint8_t rsp_flags; + uint8_t rsp_numdesc; + uint16_t rsp_id; + uint16_t rsp_seq; + uint16_t rsp_par1; + uint32_t rsp_par2; + uint32_t rsp_par3; }; #define RSP_FLAGS_TYPE_M 0x07 /* type mask */ #define RSP_FLAGS_TYPE_FRAG 0x00 /* type: fragment */ #define RSP_FLAGS_TYPE_DATA 0x01 /* type: data frame */ #define RSP_FLAGS_TYPE_CMD 0x02 /* type: command frame */ #define RSP_FLAGS_TYPE_OPT 0x03 /* type: options */ #define RSP_FLAGS_TYPE_RX 0x04 /* type: command */ #define RSP_FLAGS_TYPE_RESP 0x05 /* type: response */ #define RSP_FLAGS_ERROR 0x40 /* response error */ struct txp_frag_desc { - volatile uint8_t frag_flags; /* type/descriptor flags */ - volatile uint8_t frag_rsvd1; - volatile uint16_t frag_len; /* bytes in this fragment */ - volatile uint32_t frag_addrlo; /* phys addr low word */ - volatile uint32_t frag_addrhi; /* phys addr high word */ - volatile uint32_t frag_rsvd2; + uint8_t frag_flags; /* type/descriptor flags */ + uint8_t frag_rsvd1; + uint16_t frag_len; /* bytes in this fragment */ + uint32_t frag_addrlo; /* phys addr low word */ + uint32_t frag_addrhi; /* phys addr high word */ + uint32_t frag_rsvd2; }; #define FRAG_FLAGS_TYPE_M 0x07 /* type mask */ #define FRAG_FLAGS_TYPE_FRAG 0x00 /* type: fragment */ #define FRAG_FLAGS_TYPE_DATA 0x01 /* type: data frame */ #define FRAG_FLAGS_TYPE_CMD 0x02 /* type: command frame */ #define FRAG_FLAGS_TYPE_OPT 0x03 /* type: options */ #define FRAG_FLAGS_TYPE_RX 0x04 /* type: command */ #define FRAG_FLAGS_TYPE_RESP 0x05 /* type: response */ struct txp_opt_desc { uint8_t opt_desctype:3, opt_rsvd:1, opt_type:4; uint8_t opt_num; uint16_t opt_dep1; uint32_t opt_dep2; uint32_t opt_dep3; uint32_t opt_dep4; }; struct txp_ipsec_desc { uint8_t ipsec_desctpe:3, ipsec_rsvd:1, ipsec_type:4; uint8_t ipsec_num; uint16_t ipsec_flags; uint16_t ipsec_ah1; uint16_t ipsec_esp1; uint16_t ipsec_ah2; uint16_t ipsec_esp2; uint32_t ipsec_rsvd1; }; struct txp_tcpseg_desc { - uint8_t tcpseg_desctype:3, - tcpseg_rsvd:1, - tcpseg_type:4; - + uint8_t tcpseg_type; uint8_t tcpseg_num; - - uint16_t tcpseg_mss:12, - tcpseg_misc:4; - + uint16_t tcpseg_mss; uint32_t tcpseg_respaddr; uint32_t tcpseg_txbytes; uint32_t tcpseg_lss; }; +#define TCPSEG_DESC_TYPE_M 0x07 /* type mask */ +#define TCPSEG_DESC_TYPE_FRAG 0x00 /* type: fragment */ +#define TCPSEG_DESC_TYPE_DATA 0x01 /* type: data frame */ +#define TCPSEG_DESC_TYPE_CMD 0x02 /* type: command frame */ +#define TCPSEG_DESC_TYPE_OPT 0x03 /* type: options */ +#define TCPSEG_DESC_TYPE_RX 0x04 /* type: command */ +#define TCPSEG_DESC_TYPE_RESP 0x05 /* type: response */ +#define TCPSEG_OPT_IPSEC 0x00 +#define TCPSEG_OPT_TSO 0x10 +#define TCPSEG_MSS_MASK 0x0FFF +#define TCPSEG_MSS_FIRST 0x1000 +#define TCPSEG_MSS_LAST 0x2000 /* * Transceiver types */ #define TXP_XCVR_10_HDX 0 #define TXP_XCVR_10_FDX 1 #define TXP_XCVR_100_HDX 2 #define TXP_XCVR_100_FDX 3 #define TXP_XCVR_AUTO 4 #define TXP_MEDIA_CRC 0x0004 /* crc strip disable */ #define TXP_MEDIA_CD 0x0010 /* collision detection */ #define TXP_MEDIA_CS 0x0020 /* carrier sense */ #define TXP_MEDIA_POL 0x0400 /* polarity reversed */ #define TXP_MEDIA_NOLINK 0x0800 /* 0 = link, 1 = no link */ /* * receive filter bits (par1 to TXP_CMD_RX_FILTER_{READ|WRITE} */ #define TXP_RXFILT_DIRECT 0x0001 /* directed packets */ #define TXP_RXFILT_ALLMULTI 0x0002 /* all multicast packets */ #define TXP_RXFILT_BROADCAST 0x0004 /* broadcast packets */ #define TXP_RXFILT_PROMISC 0x0008 /* promiscuous mode */ #define TXP_RXFILT_HASHMULTI 0x0010 /* use multicast filter */ /* * boot record (pointers to rings) */ struct txp_boot_record { - volatile uint32_t br_hostvar_lo; /* host ring pointer */ - volatile uint32_t br_hostvar_hi; - volatile uint32_t br_txlopri_lo; /* tx low pri ring */ - volatile uint32_t br_txlopri_hi; - volatile uint32_t br_txlopri_siz; - volatile uint32_t br_txhipri_lo; /* tx high pri ring */ - volatile uint32_t br_txhipri_hi; - volatile uint32_t br_txhipri_siz; - volatile uint32_t br_rxlopri_lo; /* rx low pri ring */ - volatile uint32_t br_rxlopri_hi; - volatile uint32_t br_rxlopri_siz; - volatile uint32_t br_rxbuf_lo; /* rx buffer ring */ - volatile uint32_t br_rxbuf_hi; - volatile uint32_t br_rxbuf_siz; - volatile uint32_t br_cmd_lo; /* command ring */ - volatile uint32_t br_cmd_hi; - volatile uint32_t br_cmd_siz; - volatile uint32_t br_resp_lo; /* response ring */ - volatile uint32_t br_resp_hi; - volatile uint32_t br_resp_siz; - volatile uint32_t br_zero_lo; /* zero word */ - volatile uint32_t br_zero_hi; - volatile uint32_t br_rxhipri_lo; /* rx high pri ring */ - volatile uint32_t br_rxhipri_hi; - volatile uint32_t br_rxhipri_siz; + uint32_t br_hostvar_lo; /* host ring pointer */ + uint32_t br_hostvar_hi; + uint32_t br_txlopri_lo; /* tx low pri ring */ + uint32_t br_txlopri_hi; + uint32_t br_txlopri_siz; + uint32_t br_txhipri_lo; /* tx high pri ring */ + uint32_t br_txhipri_hi; + uint32_t br_txhipri_siz; + uint32_t br_rxlopri_lo; /* rx low pri ring */ + uint32_t br_rxlopri_hi; + uint32_t br_rxlopri_siz; + uint32_t br_rxbuf_lo; /* rx buffer ring */ + uint32_t br_rxbuf_hi; + uint32_t br_rxbuf_siz; + uint32_t br_cmd_lo; /* command ring */ + uint32_t br_cmd_hi; + uint32_t br_cmd_siz; + uint32_t br_resp_lo; /* response ring */ + uint32_t br_resp_hi; + uint32_t br_resp_siz; + uint32_t br_zero_lo; /* zero word */ + uint32_t br_zero_hi; + uint32_t br_rxhipri_lo; /* rx high pri ring */ + uint32_t br_rxhipri_hi; + uint32_t br_rxhipri_siz; }; /* * hostvar structure (shared with typhoon) */ struct txp_hostvar { - volatile uint32_t hv_rx_hi_read_idx; /* host->arm */ - volatile uint32_t hv_rx_lo_read_idx; /* host->arm */ - volatile uint32_t hv_rx_buf_write_idx; /* host->arm */ - volatile uint32_t hv_resp_read_idx; /* host->arm */ - volatile uint32_t hv_tx_lo_desc_read_idx; /* arm->host */ - volatile uint32_t hv_tx_hi_desc_read_idx; /* arm->host */ - volatile uint32_t hv_rx_lo_write_idx; /* arm->host */ - volatile uint32_t hv_rx_buf_read_idx; /* arm->host */ - volatile uint32_t hv_cmd_read_idx; /* arm->host */ - volatile uint32_t hv_resp_write_idx; /* arm->host */ - volatile uint32_t hv_rx_hi_write_idx; /* arm->host */ + uint32_t hv_rx_hi_read_idx; /* host->arm */ + uint32_t hv_rx_lo_read_idx; /* host->arm */ + uint32_t hv_rx_buf_write_idx; /* host->arm */ + uint32_t hv_resp_read_idx; /* host->arm */ + uint32_t hv_tx_lo_desc_read_idx; /* arm->host */ + uint32_t hv_tx_hi_desc_read_idx; /* arm->host */ + uint32_t hv_rx_lo_write_idx; /* arm->host */ + uint32_t hv_rx_buf_read_idx; /* arm->host */ + uint32_t hv_cmd_read_idx; /* arm->host */ + uint32_t hv_resp_write_idx; /* arm->host */ + uint32_t hv_rx_hi_write_idx; /* arm->host */ }; /* * TYPHOON status register state (in TXP_A2H_0) */ #define STAT_ROM_CODE 0x00000001 #define STAT_ROM_EEPROM_LOAD 0x00000002 #define STAT_WAITING_FOR_BOOT 0x00000007 #define STAT_RUNNING 0x00000009 #define STAT_WAITING_FOR_HOST_REQUEST 0x0000000d #define STAT_WAITING_FOR_SEGMENT 0x00000010 #define STAT_SLEEPING 0x00000011 #define STAT_HALTED 0x00000014 #define TX_ENTRIES 256 #define RX_ENTRIES 128 #define RXBUF_ENTRIES 256 #define CMD_ENTRIES 32 #define RSP_ENTRIES 32 #define OFFLOAD_TCPCKSUM 0x00000002 /* tcp checksum */ #define OFFLOAD_UDPCKSUM 0x00000004 /* udp checksum */ #define OFFLOAD_IPCKSUM 0x00000008 /* ip checksum */ #define OFFLOAD_IPSEC 0x00000010 /* ipsec enable */ #define OFFLOAD_BCAST 0x00000020 /* broadcast throttle */ #define OFFLOAD_DHCP 0x00000040 /* dhcp prevention */ #define OFFLOAD_VLAN 0x00000080 /* vlan enable */ #define OFFLOAD_FILTER 0x00000100 /* filter enable */ #define OFFLOAD_TCPSEG 0x00000200 /* tcp segmentation */ #define OFFLOAD_MASK 0xfffffffe /* mask off low bit */ /* * Macros for converting array indices to offsets within the descriptor * arrays. The chip operates on offsets, but it's much easier for us * to operate on indices. Assumes descriptor entries are 16 bytes. */ #define TXP_IDX2OFFSET(idx) ((idx) << 4) #define TXP_OFFSET2IDX(off) ((off) >> 4) struct txp_cmd_ring { struct txp_cmd_desc *base; uint32_t lastwrite; uint32_t size; }; struct txp_rsp_ring { struct txp_rsp_desc *base; uint32_t lastwrite; uint32_t size; }; struct txp_tx_ring { struct txp_tx_desc *r_desc; /* base address of descs */ + bus_dma_tag_t r_tag; + bus_dmamap_t r_map; uint32_t r_reg; /* register to activate */ uint32_t r_prod; /* producer */ uint32_t r_cons; /* consumer */ uint32_t r_cnt; /* # descs in use */ - volatile uint32_t *r_off; /* hostvar index pointer */ + uint32_t *r_off; /* hostvar index pointer */ }; struct txp_swdesc { - struct mbuf * sd_mbuf; + struct mbuf *sd_mbuf; bus_dmamap_t sd_map; }; +struct txp_rx_swdesc { + TAILQ_ENTRY(txp_rx_swdesc) sd_next; + struct mbuf *sd_mbuf; + bus_dmamap_t sd_map; +}; + struct txp_rx_ring { struct txp_rx_desc *r_desc; /* base address of descs */ - volatile uint32_t *r_roff; /* hv read offset ptr */ - volatile uint32_t *r_woff; /* hv write offset ptr */ + bus_dma_tag_t r_tag; + bus_dmamap_t r_map; + uint32_t *r_roff; /* hv read offset ptr */ + uint32_t *r_woff; /* hv write offset ptr */ }; struct txp_ldata { - struct txp_boot_record txp_boot; - struct txp_hostvar txp_hostvar; - struct txp_tx_desc txp_txhiring[TX_ENTRIES]; - struct txp_tx_desc txp_txloring[TX_ENTRIES]; - struct txp_rxbuf_desc txp_rxbufs[RXBUF_ENTRIES]; - struct txp_rx_desc txp_rxhiring[RX_ENTRIES]; - struct txp_rx_desc txp_rxloring[RX_ENTRIES]; - struct txp_cmd_desc txp_cmdring[CMD_ENTRIES]; - struct txp_rsp_desc txp_rspring[RSP_ENTRIES]; - uint32_t txp_zero; + struct txp_boot_record *txp_boot; + bus_addr_t txp_boot_paddr; + struct txp_hostvar *txp_hostvar; + bus_addr_t txp_hostvar_paddr; + struct txp_tx_desc *txp_txhiring; + bus_addr_t txp_txhiring_paddr; + struct txp_tx_desc *txp_txloring; + bus_addr_t txp_txloring_paddr; + struct txp_rxbuf_desc *txp_rxbufs; + bus_addr_t txp_rxbufs_paddr; + struct txp_rx_desc *txp_rxhiring; + bus_addr_t txp_rxhiring_paddr; + struct txp_rx_desc *txp_rxloring; + bus_addr_t txp_rxloring_paddr; + struct txp_cmd_desc *txp_cmdring; + bus_addr_t txp_cmdring_paddr; + struct txp_rsp_desc *txp_rspring; + bus_addr_t txp_rspring_paddr; + uint32_t *txp_zero; + bus_addr_t txp_zero_paddr; }; +struct txp_chain_data { + bus_dma_tag_t txp_parent_tag; + bus_dma_tag_t txp_boot_tag; + bus_dmamap_t txp_boot_map; + bus_dma_tag_t txp_hostvar_tag; + bus_dmamap_t txp_hostvar_map; + bus_dma_tag_t txp_txhiring_tag; + bus_dmamap_t txp_txhiring_map; + bus_dma_tag_t txp_txloring_tag; + bus_dmamap_t txp_txloring_map; + bus_dma_tag_t txp_tx_tag; + bus_dma_tag_t txp_rx_tag; + bus_dma_tag_t txp_rxbufs_tag; + bus_dmamap_t txp_rxbufs_map; + bus_dma_tag_t txp_rxhiring_tag; + bus_dmamap_t txp_rxhiring_map; + bus_dma_tag_t txp_rxloring_tag; + bus_dmamap_t txp_rxloring_map; + bus_dma_tag_t txp_cmdring_tag; + bus_dmamap_t txp_cmdring_map; + bus_dma_tag_t txp_rspring_tag; + bus_dmamap_t txp_rspring_map; + bus_dma_tag_t txp_zero_tag; + bus_dmamap_t txp_zero_map; +}; + +struct txp_hw_stats { + uint32_t tx_frames; + uint64_t tx_bytes; + uint32_t tx_deferred; + uint32_t tx_late_colls; + uint32_t tx_colls; + uint32_t tx_carrier_lost; + uint32_t tx_multi_colls; + uint32_t tx_excess_colls; + uint32_t tx_fifo_underruns; + uint32_t tx_mcast_oflows; + uint32_t tx_filtered; + uint32_t rx_frames; + uint64_t rx_bytes; + uint32_t rx_fifo_oflows; + uint32_t rx_badssd; + uint32_t rx_crcerrs; + uint32_t rx_lenerrs; + uint32_t rx_bcast_frames; + uint32_t rx_mcast_frames; + uint32_t rx_oflows; + uint32_t rx_filtered; +}; + struct txp_softc { struct ifnet *sc_ifp; device_t sc_dev; struct txp_hostvar *sc_hostvar; struct txp_boot_record *sc_boot; - bus_space_handle_t sc_bh; /* bus handle (regs) */ - bus_space_tag_t sc_bt; /* bus tag (regs) */ struct resource *sc_res; + int sc_res_id; + int sc_res_type; struct resource *sc_irq; void *sc_intrhand; - struct mtx sc_mtx; - struct txp_ldata *sc_ldata; - void *sc_fwbuf; + struct txp_chain_data sc_cdata; + struct txp_ldata sc_ldata; int sc_rxbufprod; + int sc_process_limit; struct txp_cmd_ring sc_cmdring; struct txp_rsp_ring sc_rspring; - struct txp_swdesc sc_txd[TX_ENTRIES]; struct callout sc_tick; struct ifmedia sc_ifmedia; + struct txp_hw_stats sc_ostats; + struct txp_hw_stats sc_stats; struct txp_tx_ring sc_txhir, sc_txlor; + struct txp_swdesc sc_txd[TX_ENTRIES]; struct txp_rxbuf_desc *sc_rxbufs; struct txp_rx_ring sc_rxhir, sc_rxlor; uint16_t sc_xcvr; uint16_t sc_seq; - int sc_cold; - uint32_t sc_rx_capability, sc_tx_capability; + int sc_watchdog_timer; + int sc_if_flags; + int sc_flags; +#define TXP_FLAG_DETACH 0x4000 +#define TXP_FLAG_LINK 0x8000 + TAILQ_HEAD(, txp_rx_swdesc) sc_free_list; + TAILQ_HEAD(, txp_rx_swdesc) sc_busy_list; + struct task sc_int_task; + struct taskqueue *sc_tq; + struct mtx sc_mtx; }; struct txp_fw_file_header { uint8_t magicid[8]; /* TYPHOON\0 */ uint32_t version; uint32_t nsections; uint32_t addr; uint32_t hmac[5]; }; struct txp_fw_section_header { uint32_t nbytes; uint16_t cksum; uint16_t reserved; uint32_t addr; }; #define TXP_MAX_SEGLEN 0xffff -#define TXP_MAX_PKTLEN 0x0800 +#define TXP_MAX_PKTLEN (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) -#define WRITE_REG(sc,reg,val) \ - bus_space_write_4((sc)->sc_bt, (sc)->sc_bh, reg, val) -#define READ_REG(sc,reg) \ - bus_space_read_4((sc)->sc_bt, (sc)->sc_bh, reg) +#define WRITE_REG(sc, reg, val) bus_write_4((sc)->sc_res, reg, val) +#define READ_REG(sc, reg) bus_read_4((sc)->sc_res, reg) +#define TXP_BARRIER(sc, o, l, f) bus_barrier((sc)->sc_res, (o), (l), (f)) #define TXP_LOCK(sc) mtx_lock(&(sc)->sc_mtx) #define TXP_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) #define TXP_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) +#define TXP_MAXTXSEGS 16 +#define TXP_RXBUF_ALIGN (sizeof(uint32_t)) + +#define TXP_PROC_MIN 16 +#define TXP_PROC_MAX RX_ENTRIES +#define TXP_PROC_DEFAULT (RX_ENTRIES / 2) + +#define TXP_ADDR_HI(x) ((uint64_t)(x) >> 32) +#define TXP_ADDR_LO(x) ((uint64_t)(x) & 0xffffffff) + /* * 3Com PCI vendor ID. */ #define TXP_VENDORID_3COM 0x10B7 /* * 3cR990 device IDs */ #define TXP_DEVICEID_3CR990_TX_95 0x9902 #define TXP_DEVICEID_3CR990_TX_97 0x9903 #define TXP_DEVICEID_3CR990B_TXM 0x9904 #define TXP_DEVICEID_3CR990_SRV_95 0x9908 #define TXP_DEVICEID_3CR990_SRV_97 0x9909 #define TXP_DEVICEID_3CR990B_SRV 0x990A struct txp_type { uint16_t txp_vid; uint16_t txp_did; char *txp_name; }; + +#define TXP_TIMEOUT 10000 +#define TXP_CMD_NOWAIT 0 +#define TXP_CMD_WAIT 1 +#define TXP_TX_TIMEOUT 5 + +/* + * Each frame requires one frame descriptor and one or more + * fragment descriptors. If TSO is used frame descriptor block + * requires one or two option frame descriptors depending on + * number of framents. Therefore we will consume three + * additional descriptors at most to use TSO for a frame and + * one reserved descriptor in order not to full Tx descriptor + * ring. + */ +#define TXP_TXD_RESERVED 4 + +#define TXP_DESC_INC(x, y) ((x) = ((x) + 1) % (y)) Index: stable/7/sys =================================================================== --- stable/7/sys (revision 190246) +++ stable/7/sys (revision 190247) Property changes on: stable/7/sys ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys:r189714