Index: head/sys/dev/hatm/if_hatm.c =================================================================== --- head/sys/dev/hatm/if_hatm.c (revision 121674) +++ head/sys/dev/hatm/if_hatm.c (revision 121675) @@ -1,2409 +1,2398 @@ /* * Copyright (c) 2001-2003 * Fraunhofer Institute for Open Communication Systems (FhG Fokus). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Author: Hartmut Brandt * * ForeHE driver. * * This file contains the module and driver infrastructure stuff as well * as a couple of utility functions and the entire initialisation. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_natm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ENABLE_BPF #include #endif #include #include #include #include #include #include #include #include #include #include #include #include static const struct { uint16_t vid; uint16_t did; const char *name; } hatm_devs[] = { { 0x1127, 0x400, "FORE HE" }, { 0, 0, NULL } }; SYSCTL_DECL(_hw_atm); MODULE_DEPEND(hatm, utopia, 1, 1, 1); MODULE_DEPEND(hatm, pci, 1, 1, 1); MODULE_DEPEND(hatm, atm, 1, 1, 1); #define EEPROM_DELAY 400 /* microseconds */ /* Read from EEPROM 0000 0011b */ static const uint32_t readtab[] = { HE_REGM_HOST_PROM_SEL | HE_REGM_HOST_PROM_CLOCK, 0, HE_REGM_HOST_PROM_CLOCK, 0, /* 0 */ HE_REGM_HOST_PROM_CLOCK, 0, /* 0 */ HE_REGM_HOST_PROM_CLOCK, 0, /* 0 */ HE_REGM_HOST_PROM_CLOCK, 0, /* 0 */ HE_REGM_HOST_PROM_CLOCK, 0, /* 0 */ HE_REGM_HOST_PROM_CLOCK, HE_REGM_HOST_PROM_DATA_IN, /* 0 */ HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN, HE_REGM_HOST_PROM_DATA_IN, /* 1 */ HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN, HE_REGM_HOST_PROM_DATA_IN, /* 1 */ }; static const uint32_t clocktab[] = { 0, HE_REGM_HOST_PROM_CLOCK, 0, HE_REGM_HOST_PROM_CLOCK, 0, HE_REGM_HOST_PROM_CLOCK, 0, HE_REGM_HOST_PROM_CLOCK, 0, HE_REGM_HOST_PROM_CLOCK, 0, HE_REGM_HOST_PROM_CLOCK, 0, HE_REGM_HOST_PROM_CLOCK, 0, HE_REGM_HOST_PROM_CLOCK, 0 }; /* * Convert cell rate to ATM Forum format */ u_int hatm_cps2atmf(uint32_t pcr) { u_int e; if (pcr == 0) return (0); pcr <<= 9; e = 0; while (pcr > (1024 - 1)) { e++; pcr >>= 1; } return ((1 << 14) | (e << 9) | (pcr & 0x1ff)); } u_int hatm_atmf2cps(uint32_t fcr) { fcr &= 0x7fff; return ((1 << ((fcr >> 9) & 0x1f)) * (512 + (fcr & 0x1ff)) / 512 * (fcr >> 14)); } /************************************************************ * * Initialisation */ /* * Probe for a HE controller */ static int hatm_probe(device_t dev) { int i; for (i = 0; hatm_devs[i].name; i++) if (pci_get_vendor(dev) == hatm_devs[i].vid && pci_get_device(dev) == hatm_devs[i].did) { device_set_desc(dev, hatm_devs[i].name); return (0); } return (ENXIO); } /* * Allocate and map DMA-able memory. We support only contiguous mappings. */ static void dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (error) return; KASSERT(nsegs == 1, ("too many segments for DMA: %d", nsegs)); KASSERT(segs[0].ds_addr <= 0xffffffffUL, ("phys addr too large %lx", (u_long)segs[0].ds_addr)); *(bus_addr_t *)arg = segs[0].ds_addr; } static int hatm_alloc_dmamem(struct hatm_softc *sc, const char *what, struct dmamem *mem) { int error; mem->base = NULL; /* * Alignement does not work in the bus_dmamem_alloc function below * on FreeBSD. malloc seems to align objects at least to the object * size so increase the size to the alignment if the size is lesser * than the alignemnt. * XXX on sparc64 this is (probably) not needed. */ if (mem->size < mem->align) mem->size = mem->align; error = bus_dma_tag_create(sc->parent_tag, mem->align, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, mem->size, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &mem->tag); if (error) { if_printf(&sc->ifatm.ifnet, "DMA tag create (%s)\n", what); return (error); } error = bus_dmamem_alloc(mem->tag, &mem->base, 0, &mem->map); if (error) { if_printf(&sc->ifatm.ifnet, "DMA mem alloc (%s): %d\n", what, error); bus_dma_tag_destroy(mem->tag); mem->base = NULL; return (error); } error = bus_dmamap_load(mem->tag, mem->map, mem->base, mem->size, dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT); if (error) { if_printf(&sc->ifatm.ifnet, "DMA map load (%s): %d\n", what, error); bus_dmamem_free(mem->tag, mem->base, mem->map); bus_dma_tag_destroy(mem->tag); mem->base = NULL; return (error); } DBG(sc, DMA, ("%s S/A/V/P 0x%x 0x%x %p 0x%lx", what, mem->size, mem->align, mem->base, (u_long)mem->paddr)); return (0); } /* * Destroy all the resources of an DMA-able memory region. */ static void hatm_destroy_dmamem(struct dmamem *mem) { if (mem->base != NULL) { bus_dmamap_unload(mem->tag, mem->map); bus_dmamem_free(mem->tag, mem->base, mem->map); (void)bus_dma_tag_destroy(mem->tag); mem->base = NULL; } } /* * Initialize/destroy DMA maps for the large pool 0 */ static void hatm_destroy_rmaps(struct hatm_softc *sc) { u_int b; DBG(sc, ATTACH, ("destroying rmaps and lbuf pointers...")); if (sc->rmaps != NULL) { for (b = 0; b < sc->lbufs_size; b++) bus_dmamap_destroy(sc->mbuf_tag, sc->rmaps[b]); free(sc->rmaps, M_DEVBUF); } if (sc->lbufs != NULL) free(sc->lbufs, M_DEVBUF); } static void hatm_init_rmaps(struct hatm_softc *sc) { u_int b; int err; DBG(sc, ATTACH, ("allocating rmaps and lbuf pointers...")); sc->lbufs = malloc(sizeof(sc->lbufs[0]) * sc->lbufs_size, M_DEVBUF, M_ZERO | M_WAITOK); /* allocate and create the DMA maps for the large pool */ sc->rmaps = malloc(sizeof(sc->rmaps[0]) * sc->lbufs_size, M_DEVBUF, M_WAITOK); for (b = 0; b < sc->lbufs_size; b++) { err = bus_dmamap_create(sc->mbuf_tag, 0, &sc->rmaps[b]); if (err != 0) panic("bus_dmamap_create: %d\n", err); } } /* * Initialize and destroy small mbuf page pointers and pages */ static void hatm_destroy_smbufs(struct hatm_softc *sc) { u_int i, b; struct mbuf_page *pg; if (sc->mbuf_pages != NULL) { for (i = 0; i < sc->mbuf_npages; i++) { pg = sc->mbuf_pages[i]; for (b = 0; b < pg->hdr.nchunks; b++) { if (MBUF_TST_BIT(pg->hdr.card, b)) if_printf(&sc->ifatm.ifnet, "%s -- mbuf page=%u card buf %u\n", __func__, i, b); - if (MBUF_TST_BIT(pg->hdr.used, b)) - if_printf(&sc->ifatm.ifnet, - "%s -- mbuf page=%u used buf %u\n", - __func__, i, b); } bus_dmamap_unload(sc->mbuf_tag, pg->hdr.map); bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map); free(pg, M_DEVBUF); } free(sc->mbuf_pages, M_DEVBUF); } } static void hatm_init_smbufs(struct hatm_softc *sc) { sc->mbuf_pages = malloc(sizeof(sc->mbuf_pages[0]) * HE_CONFIG_MAX_MBUF_PAGES, M_DEVBUF, M_WAITOK); sc->mbuf_npages = 0; } /* * Initialize/destroy TPDs. This is called from attach/detach. */ static void hatm_destroy_tpds(struct hatm_softc *sc) { struct tpd *t; if (sc->tpds.base == NULL) return; DBG(sc, ATTACH, ("releasing TPDs ...")); if (sc->tpd_nfree != sc->tpd_total) if_printf(&sc->ifatm.ifnet, "%u tpds still in use from %u\n", sc->tpd_total - sc->tpd_nfree, sc->tpd_total); while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) { SLIST_REMOVE_HEAD(&sc->tpd_free, link); bus_dmamap_destroy(sc->tx_tag, t->map); } hatm_destroy_dmamem(&sc->tpds); free(sc->tpd_used, M_DEVBUF); DBG(sc, ATTACH, ("... done")); } static int hatm_init_tpds(struct hatm_softc *sc) { int error; u_int i; struct tpd *t; DBG(sc, ATTACH, ("allocating %u TPDs and maps ...", sc->tpd_total)); error = hatm_alloc_dmamem(sc, "TPD memory", &sc->tpds); if (error != 0) { DBG(sc, ATTACH, ("... dmamem error=%d", error)); return (error); } /* put all the TPDs on the free list and allocate DMA maps */ for (i = 0; i < sc->tpd_total; i++) { t = TPD_ADDR(sc, i); t->no = i; t->mbuf = NULL; error = bus_dmamap_create(sc->tx_tag, 0, &t->map); if (error != 0) { DBG(sc, ATTACH, ("... dmamap error=%d", error)); while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) { SLIST_REMOVE_HEAD(&sc->tpd_free, link); bus_dmamap_destroy(sc->tx_tag, t->map); } hatm_destroy_dmamem(&sc->tpds); return (error); } SLIST_INSERT_HEAD(&sc->tpd_free, t, link); } /* allocate and zero bitmap */ sc->tpd_used = malloc(sizeof(uint8_t) * (sc->tpd_total + 7) / 8, M_DEVBUF, M_ZERO | M_WAITOK); sc->tpd_nfree = sc->tpd_total; DBG(sc, ATTACH, ("... done")); return (0); } /* * Free all the TPDs that where given to the card. * An mbuf chain may be attached to a TPD - free it also and * unload its associated DMA map. */ static void hatm_stop_tpds(struct hatm_softc *sc) { u_int i; struct tpd *t; DBG(sc, ATTACH, ("free TPDs ...")); for (i = 0; i < sc->tpd_total; i++) { if (TPD_TST_USED(sc, i)) { t = TPD_ADDR(sc, i); if (t->mbuf) { m_freem(t->mbuf); t->mbuf = NULL; bus_dmamap_unload(sc->tx_tag, t->map); } TPD_CLR_USED(sc, i); SLIST_INSERT_HEAD(&sc->tpd_free, t, link); sc->tpd_nfree++; } } } /* * This frees ALL resources of this interface and leaves the structure * in an indeterminate state. This is called just before detaching or * on a failed attach. No lock should be held. */ static void hatm_destroy(struct hatm_softc *sc) { u_int cid; bus_teardown_intr(sc->dev, sc->irqres, sc->ih); hatm_destroy_rmaps(sc); hatm_destroy_smbufs(sc); hatm_destroy_tpds(sc); if (sc->vcc_zone != NULL) { for (cid = 0; cid < HE_MAX_VCCS; cid++) if (sc->vccs[cid] != NULL) uma_zfree(sc->vcc_zone, sc->vccs[cid]); uma_zdestroy(sc->vcc_zone); } /* * Release all memory allocated to the various queues and * Status pages. These have there own flag which shows whether * they are really allocated. */ hatm_destroy_dmamem(&sc->irq_0.mem); hatm_destroy_dmamem(&sc->rbp_s0.mem); hatm_destroy_dmamem(&sc->rbp_l0.mem); hatm_destroy_dmamem(&sc->rbp_s1.mem); hatm_destroy_dmamem(&sc->rbrq_0.mem); hatm_destroy_dmamem(&sc->rbrq_1.mem); hatm_destroy_dmamem(&sc->tbrq.mem); hatm_destroy_dmamem(&sc->tpdrq.mem); hatm_destroy_dmamem(&sc->hsp_mem); if (sc->irqres != NULL) bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irqid, sc->irqres); if (sc->tx_tag != NULL) if (bus_dma_tag_destroy(sc->tx_tag)) if_printf(&sc->ifatm.ifnet, "mbuf DMA tag busy\n"); if (sc->mbuf_tag != NULL) if (bus_dma_tag_destroy(sc->mbuf_tag)) if_printf(&sc->ifatm.ifnet, "mbuf DMA tag busy\n"); if (sc->parent_tag != NULL) if (bus_dma_tag_destroy(sc->parent_tag)) if_printf(&sc->ifatm.ifnet, "parent DMA tag busy\n"); if (sc->memres != NULL) bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->memid, sc->memres); sysctl_ctx_free(&sc->sysctl_ctx); cv_destroy(&sc->cv_rcclose); cv_destroy(&sc->vcc_cv); - mtx_destroy(&sc->mbuf0_mtx); - mtx_destroy(&sc->mbuf1_mtx); mtx_destroy(&sc->mtx); } /* * 4.4 Card reset */ static int hatm_reset(struct hatm_softc *sc) { u_int v, count; WRITE4(sc, HE_REGO_RESET_CNTL, 0x00); BARRIER_W(sc); WRITE4(sc, HE_REGO_RESET_CNTL, 0xff); BARRIER_RW(sc); count = 0; while (((v = READ4(sc, HE_REGO_RESET_CNTL)) & HE_REGM_RESET_STATE) == 0) { BARRIER_R(sc); if (++count == 100) { if_printf(&sc->ifatm.ifnet, "reset failed\n"); return (ENXIO); } DELAY(1000); } return (0); } /* * 4.5 Set Bus Width */ static void hatm_init_bus_width(struct hatm_softc *sc) { uint32_t v, v1; v = READ4(sc, HE_REGO_HOST_CNTL); BARRIER_R(sc); if (v & HE_REGM_HOST_BUS64) { sc->pci64 = 1; v1 = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4); v1 |= HE_PCIM_CTL0_64BIT; pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v1, 4); v |= HE_REGM_HOST_DESC_RD64 | HE_REGM_HOST_DATA_RD64 | HE_REGM_HOST_DATA_WR64; WRITE4(sc, HE_REGO_HOST_CNTL, v); BARRIER_W(sc); } else { sc->pci64 = 0; v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4); v &= ~HE_PCIM_CTL0_64BIT; pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4); } } /* * 4.6 Set Host Endianess */ static void hatm_init_endianess(struct hatm_softc *sc) { uint32_t v; v = READ4(sc, HE_REGO_LB_SWAP); BARRIER_R(sc); #if BYTE_ORDER == BIG_ENDIAN v |= HE_REGM_LBSWAP_INTR_SWAP | HE_REGM_LBSWAP_DESC_WR_SWAP | HE_REGM_LBSWAP_BIG_ENDIAN; v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP | HE_REGM_LBSWAP_DESC_RD_SWAP | HE_REGM_LBSWAP_DATA_RD_SWAP); #else v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP | HE_REGM_LBSWAP_DESC_RD_SWAP | HE_REGM_LBSWAP_DATA_RD_SWAP | HE_REGM_LBSWAP_INTR_SWAP | HE_REGM_LBSWAP_DESC_WR_SWAP | HE_REGM_LBSWAP_BIG_ENDIAN); #endif if (sc->he622) v |= HE_REGM_LBSWAP_XFER_SIZE; WRITE4(sc, HE_REGO_LB_SWAP, v); BARRIER_W(sc); } /* * 4.7 Read EEPROM */ static uint8_t hatm_read_prom_byte(struct hatm_softc *sc, u_int addr) { uint32_t val, tmp_read, byte_read; u_int i, j; int n; val = READ4(sc, HE_REGO_HOST_CNTL); val &= HE_REGM_HOST_PROM_BITS; BARRIER_R(sc); val |= HE_REGM_HOST_PROM_WREN; WRITE4(sc, HE_REGO_HOST_CNTL, val); BARRIER_W(sc); /* send READ */ for (i = 0; i < sizeof(readtab) / sizeof(readtab[0]); i++) { WRITE4(sc, HE_REGO_HOST_CNTL, val | readtab[i]); BARRIER_W(sc); DELAY(EEPROM_DELAY); } /* send ADDRESS */ for (n = 7, j = 0; n >= 0; n--) { WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] | (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN)); BARRIER_W(sc); DELAY(EEPROM_DELAY); WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] | (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN)); BARRIER_W(sc); DELAY(EEPROM_DELAY); } val &= ~HE_REGM_HOST_PROM_WREN; WRITE4(sc, HE_REGO_HOST_CNTL, val); BARRIER_W(sc); /* read DATA */ byte_read = 0; for (n = 7, j = 0; n >= 0; n--) { WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]); BARRIER_W(sc); DELAY(EEPROM_DELAY); tmp_read = READ4(sc, HE_REGO_HOST_CNTL); byte_read |= (uint8_t)(((tmp_read & HE_REGM_HOST_PROM_DATA_OUT) >> HE_REGS_HOST_PROM_DATA_OUT) << n); WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]); BARRIER_W(sc); DELAY(EEPROM_DELAY); } WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]); BARRIER_W(sc); DELAY(EEPROM_DELAY); return (byte_read); } static void hatm_init_read_eeprom(struct hatm_softc *sc) { u_int n, count; u_char byte; uint32_t v; for (n = count = 0; count < HE_EEPROM_PROD_ID_LEN; count++) { byte = hatm_read_prom_byte(sc, HE_EEPROM_PROD_ID + count); if (n > 0 || byte != ' ') sc->prod_id[n++] = byte; } while (n > 0 && sc->prod_id[n-1] == ' ') n--; sc->prod_id[n] = '\0'; for (n = count = 0; count < HE_EEPROM_REV_LEN; count++) { byte = hatm_read_prom_byte(sc, HE_EEPROM_REV + count); if (n > 0 || byte != ' ') sc->rev[n++] = byte; } while (n > 0 && sc->rev[n-1] == ' ') n--; sc->rev[n] = '\0'; sc->ifatm.mib.hw_version = sc->rev[0]; sc->ifatm.mib.serial = hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 0) << 0; sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 1) << 8; sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 2) << 16; sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 3) << 24; v = hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 0) << 0; v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 1) << 8; v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 2) << 16; v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 3) << 24; switch (v) { case HE_MEDIA_UTP155: sc->ifatm.mib.media = IFM_ATM_UTP_155; sc->ifatm.mib.pcr = ATM_RATE_155M; break; case HE_MEDIA_MMF155: sc->ifatm.mib.media = IFM_ATM_MM_155; sc->ifatm.mib.pcr = ATM_RATE_155M; break; case HE_MEDIA_MMF622: sc->ifatm.mib.media = IFM_ATM_MM_622; sc->ifatm.mib.device = ATM_DEVICE_HE622; sc->ifatm.mib.pcr = ATM_RATE_622M; sc->he622 = 1; break; case HE_MEDIA_SMF155: sc->ifatm.mib.media = IFM_ATM_SM_155; sc->ifatm.mib.pcr = ATM_RATE_155M; break; case HE_MEDIA_SMF622: sc->ifatm.mib.media = IFM_ATM_SM_622; sc->ifatm.mib.device = ATM_DEVICE_HE622; sc->ifatm.mib.pcr = ATM_RATE_622M; sc->he622 = 1; break; } sc->ifatm.mib.esi[0] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 0); sc->ifatm.mib.esi[1] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 1); sc->ifatm.mib.esi[2] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 2); sc->ifatm.mib.esi[3] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 3); sc->ifatm.mib.esi[4] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 4); sc->ifatm.mib.esi[5] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 5); } /* * Clear unused interrupt queue */ static void hatm_clear_irq(struct hatm_softc *sc, u_int group) { WRITE4(sc, HE_REGO_IRQ_BASE(group), 0); WRITE4(sc, HE_REGO_IRQ_HEAD(group), 0); WRITE4(sc, HE_REGO_IRQ_CNTL(group), 0); WRITE4(sc, HE_REGO_IRQ_DATA(group), 0); } /* * 4.10 Initialize interrupt queues */ static void hatm_init_irq(struct hatm_softc *sc, struct heirq *q, u_int group) { u_int i; if (q->size == 0) { hatm_clear_irq(sc, group); return; } q->group = group; q->sc = sc; q->irq = q->mem.base; q->head = 0; q->tailp = q->irq + (q->size - 1); *q->tailp = 0; for (i = 0; i < q->size; i++) q->irq[i] = HE_REGM_ITYPE_INVALID; WRITE4(sc, HE_REGO_IRQ_BASE(group), q->mem.paddr); WRITE4(sc, HE_REGO_IRQ_HEAD(group), ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) | (q->thresh << HE_REGS_IRQ_HEAD_THRESH)); WRITE4(sc, HE_REGO_IRQ_CNTL(group), q->line); WRITE4(sc, HE_REGO_IRQ_DATA(group), 0); } /* * 5.1.3 Initialize connection memory */ static void hatm_init_cm(struct hatm_softc *sc) { u_int rsra, mlbm, rabr, numbuffs; u_int tsra, tabr, mtpd; u_int n; for (n = 0; n < HE_CONFIG_TXMEM; n++) WRITE_TCM4(sc, n, 0); for (n = 0; n < HE_CONFIG_RXMEM; n++) WRITE_RCM4(sc, n, 0); numbuffs = sc->r0_numbuffs + sc->r1_numbuffs + sc->tx_numbuffs; rsra = 0; mlbm = ((rsra + sc->ifatm.mib.max_vccs * 8) + 0x7ff) & ~0x7ff; rabr = ((mlbm + numbuffs * 2) + 0x7ff) & ~0x7ff; sc->rsrb = ((rabr + 2048) + (2 * sc->ifatm.mib.max_vccs - 1)) & ~(2 * sc->ifatm.mib.max_vccs - 1); tsra = 0; sc->tsrb = tsra + sc->ifatm.mib.max_vccs * 8; sc->tsrc = sc->tsrb + sc->ifatm.mib.max_vccs * 4; sc->tsrd = sc->tsrc + sc->ifatm.mib.max_vccs * 2; tabr = sc->tsrd + sc->ifatm.mib.max_vccs * 1; mtpd = ((tabr + 1024) + (16 * sc->ifatm.mib.max_vccs - 1)) & ~(16 * sc->ifatm.mib.max_vccs - 1); DBG(sc, ATTACH, ("rsra=%x mlbm=%x rabr=%x rsrb=%x", rsra, mlbm, rabr, sc->rsrb)); DBG(sc, ATTACH, ("tsra=%x tsrb=%x tsrc=%x tsrd=%x tabr=%x mtpd=%x", tsra, sc->tsrb, sc->tsrc, sc->tsrd, tabr, mtpd)); WRITE4(sc, HE_REGO_TSRB_BA, sc->tsrb); WRITE4(sc, HE_REGO_TSRC_BA, sc->tsrc); WRITE4(sc, HE_REGO_TSRD_BA, sc->tsrd); WRITE4(sc, HE_REGO_TMABR_BA, tabr); WRITE4(sc, HE_REGO_TPD_BA, mtpd); WRITE4(sc, HE_REGO_RCMRSRB_BA, sc->rsrb); WRITE4(sc, HE_REGO_RCMLBM_BA, mlbm); WRITE4(sc, HE_REGO_RCMABR_BA, rabr); BARRIER_W(sc); } /* * 5.1.4 Initialize Local buffer Pools */ static void hatm_init_rx_buffer_pool(struct hatm_softc *sc, u_int num, /* bank */ u_int start, /* start row */ u_int numbuffs /* number of entries */ ) { u_int row_size; /* bytes per row */ uint32_t row_addr; /* start address of this row */ u_int lbuf_size; /* bytes per lbuf */ u_int lbufs_per_row; /* number of lbufs per memory row */ uint32_t lbufd_index; /* index of lbuf descriptor */ uint32_t lbufd_addr; /* address of lbuf descriptor */ u_int lbuf_row_cnt; /* current lbuf in current row */ uint32_t lbuf_addr; /* address of current buffer */ u_int i; row_size = sc->bytes_per_row;; row_addr = start * row_size; lbuf_size = sc->cells_per_lbuf * 48; lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf; /* descriptor index */ lbufd_index = num; /* 2 words per entry */ lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2; /* write head of queue */ WRITE4(sc, HE_REGO_RLBF_H(num), lbufd_index); lbuf_row_cnt = 0; for (i = 0; i < numbuffs; i++) { lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32; WRITE_RCM4(sc, lbufd_addr, lbuf_addr); lbufd_index += 2; WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index); if (++lbuf_row_cnt == lbufs_per_row) { lbuf_row_cnt = 0; row_addr += row_size; } lbufd_addr += 2 * 2; } WRITE4(sc, HE_REGO_RLBF_T(num), lbufd_index - 2); WRITE4(sc, HE_REGO_RLBF_C(num), numbuffs); BARRIER_W(sc); } static void hatm_init_tx_buffer_pool(struct hatm_softc *sc, u_int start, /* start row */ u_int numbuffs /* number of entries */ ) { u_int row_size; /* bytes per row */ uint32_t row_addr; /* start address of this row */ u_int lbuf_size; /* bytes per lbuf */ u_int lbufs_per_row; /* number of lbufs per memory row */ uint32_t lbufd_index; /* index of lbuf descriptor */ uint32_t lbufd_addr; /* address of lbuf descriptor */ u_int lbuf_row_cnt; /* current lbuf in current row */ uint32_t lbuf_addr; /* address of current buffer */ u_int i; row_size = sc->bytes_per_row;; row_addr = start * row_size; lbuf_size = sc->cells_per_lbuf * 48; lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf; /* descriptor index */ lbufd_index = sc->r0_numbuffs + sc->r1_numbuffs; /* 2 words per entry */ lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2; /* write head of queue */ WRITE4(sc, HE_REGO_TLBF_H, lbufd_index); lbuf_row_cnt = 0; for (i = 0; i < numbuffs; i++) { lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32; WRITE_RCM4(sc, lbufd_addr, lbuf_addr); lbufd_index++; WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index); if (++lbuf_row_cnt == lbufs_per_row) { lbuf_row_cnt = 0; row_addr += row_size; } lbufd_addr += 2; } WRITE4(sc, HE_REGO_TLBF_T, lbufd_index - 1); BARRIER_W(sc); } /* * 5.1.5 Initialize Intermediate Receive Queues */ static void hatm_init_imed_queues(struct hatm_softc *sc) { u_int n; if (sc->he622) { for (n = 0; n < 8; n++) { WRITE4(sc, HE_REGO_INMQ_S(n), 0x10*n+0x000f); WRITE4(sc, HE_REGO_INMQ_L(n), 0x10*n+0x200f); } } else { for (n = 0; n < 8; n++) { WRITE4(sc, HE_REGO_INMQ_S(n), n); WRITE4(sc, HE_REGO_INMQ_L(n), n+0x8); } } } /* * 5.1.7 Init CS block */ static void hatm_init_cs_block(struct hatm_softc *sc) { u_int n, i; u_int clkfreg, cellrate, decr, tmp; static const uint32_t erthr[2][5][3] = HE_REGT_CS_ERTHR; static const uint32_t erctl[2][3] = HE_REGT_CS_ERCTL; static const uint32_t erstat[2][2] = HE_REGT_CS_ERSTAT; static const uint32_t rtfwr[2] = HE_REGT_CS_RTFWR; static const uint32_t rtatr[2] = HE_REGT_CS_RTATR; static const uint32_t bwalloc[2][6] = HE_REGT_CS_BWALLOC; static const uint32_t orcf[2][2] = HE_REGT_CS_ORCF; /* Clear Rate Controller Start Times and Occupied Flags */ for (n = 0; n < 32; n++) WRITE_MBOX4(sc, HE_REGO_CS_STTIM(n), 0); clkfreg = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK; cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M; decr = cellrate / 32; for (n = 0; n < 16; n++) { tmp = clkfreg / cellrate; WRITE_MBOX4(sc, HE_REGO_CS_TGRLD(n), tmp - 1); cellrate -= decr; } i = (sc->cells_per_lbuf == 2) ? 0 :(sc->cells_per_lbuf == 4) ? 1 : 2; /* table 5.2 */ WRITE_MBOX4(sc, HE_REGO_CS_ERTHR0, erthr[sc->he622][0][i]); WRITE_MBOX4(sc, HE_REGO_CS_ERTHR1, erthr[sc->he622][1][i]); WRITE_MBOX4(sc, HE_REGO_CS_ERTHR2, erthr[sc->he622][2][i]); WRITE_MBOX4(sc, HE_REGO_CS_ERTHR3, erthr[sc->he622][3][i]); WRITE_MBOX4(sc, HE_REGO_CS_ERTHR4, erthr[sc->he622][4][i]); WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, erctl[sc->he622][0]); WRITE_MBOX4(sc, HE_REGO_CS_ERCTL1, erctl[sc->he622][1]); WRITE_MBOX4(sc, HE_REGO_CS_ERCTL2, erctl[sc->he622][2]); WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT0, erstat[sc->he622][0]); WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT1, erstat[sc->he622][1]); WRITE_MBOX4(sc, HE_REGO_CS_RTFWR, rtfwr[sc->he622]); WRITE_MBOX4(sc, HE_REGO_CS_RTATR, rtatr[sc->he622]); WRITE_MBOX4(sc, HE_REGO_CS_TFBSET, bwalloc[sc->he622][0]); WRITE_MBOX4(sc, HE_REGO_CS_WCRMAX, bwalloc[sc->he622][1]); WRITE_MBOX4(sc, HE_REGO_CS_WCRMIN, bwalloc[sc->he622][2]); WRITE_MBOX4(sc, HE_REGO_CS_WCRINC, bwalloc[sc->he622][3]); WRITE_MBOX4(sc, HE_REGO_CS_WCRDEC, bwalloc[sc->he622][4]); WRITE_MBOX4(sc, HE_REGO_CS_WCRCEIL, bwalloc[sc->he622][5]); WRITE_MBOX4(sc, HE_REGO_CS_OTPPER, orcf[sc->he622][0]); WRITE_MBOX4(sc, HE_REGO_CS_OTWPER, orcf[sc->he622][1]); WRITE_MBOX4(sc, HE_REGO_CS_OTTLIM, 8); for (n = 0; n < 8; n++) WRITE_MBOX4(sc, HE_REGO_CS_HGRRT(n), 0); } /* * 5.1.8 CS Block Connection Memory Initialisation */ static void hatm_init_cs_block_cm(struct hatm_softc *sc) { u_int n, i; u_int expt, mant, etrm, wcr, ttnrm, tnrm; uint32_t rate; uint32_t clkfreq, cellrate, decr; uint32_t *rg, rtg, val = 0; uint64_t drate; u_int buf, buf_limit; uint32_t base = READ4(sc, HE_REGO_RCMABR_BA); for (n = 0; n < HE_REGL_CM_GQTBL; n++) WRITE_RCM4(sc, base + HE_REGO_CM_GQTBL + n, 0); for (n = 0; n < HE_REGL_CM_RGTBL; n++) WRITE_RCM4(sc, base + HE_REGO_CM_RGTBL + n, 0); tnrm = 0; for (n = 0; n < HE_REGL_CM_TNRMTBL * 4; n++) { expt = (n >> 5) & 0x1f; mant = ((n & 0x18) << 4) | 0x7f; wcr = (1 << expt) * (mant + 512) / 512; etrm = n & 0x7; ttnrm = wcr / 10 / (1 << etrm); if (ttnrm > 255) ttnrm = 255; else if(ttnrm < 2) ttnrm = 2; tnrm = (tnrm << 8) | (ttnrm & 0xff); if (n % 4 == 0) WRITE_RCM4(sc, base + HE_REGO_CM_TNRMTBL + (n/4), tnrm); } clkfreq = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK; buf_limit = 4; cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M; decr = cellrate / 32; /* compute GRID top row in 1000 * cps */ for (n = 0; n < 16; n++) { u_int interval = clkfreq / cellrate; sc->rate_grid[0][n] = (u_int64_t)clkfreq * 1000 / interval; cellrate -= decr; } /* compute the other rows according to 2.4 */ for (i = 1; i < 16; i++) for (n = 0; n < 16; n++) sc->rate_grid[i][n] = sc->rate_grid[i-1][n] / ((i < 14) ? 2 : 4); /* first entry is line rate */ n = hatm_cps2atmf(sc->he622 ? ATM_RATE_622M : ATM_RATE_155M); expt = (n >> 9) & 0x1f; mant = n & 0x1f0; sc->rate_grid[0][0] = (u_int64_t)(1<he622 ? ATM_RATE_622M : ATM_RATE_155M; rg = &sc->rate_grid[15][15]; for (rate = 0; rate < 2 * HE_REGL_CM_RTGTBL; rate++) { /* unpack the ATMF rate */ expt = rate >> 5; mant = (rate & 0x1f) << 4; /* get the cell rate - minimum is 10 per second */ drate = (uint64_t)(1 << expt) * 1000 * (mant + 512) / 512; if (drate < 10 * 1000) drate = 10 * 1000; /* now look up the grid index */ while (drate >= *rg && rg-- > &sc->rate_grid[0][0]) ; rg++; rtg = rg - &sc->rate_grid[0][0]; /* now compute the buffer limit */ buf = drate * sc->tx_numbuffs / (cellrate * 2) / 1000; if (buf == 0) buf = 1; else if (buf > buf_limit) buf = buf_limit; /* make value */ val = (val << 16) | (rtg << 8) | buf; /* write */ if (rate % 2 == 1) WRITE_RCM4(sc, base + HE_REGO_CM_RTGTBL + rate/2, val); } } /* * Clear an unused receive group buffer pool */ static void hatm_clear_rpool(struct hatm_softc *sc, u_int group, u_int large) { WRITE4(sc, HE_REGO_RBP_S(large, group), 0); WRITE4(sc, HE_REGO_RBP_T(large, group), 0); WRITE4(sc, HE_REGO_RBP_QI(large, group), 1); WRITE4(sc, HE_REGO_RBP_BL(large, group), 0); } /* * Initialize a receive group buffer pool */ static void hatm_init_rpool(struct hatm_softc *sc, struct herbp *q, u_int group, u_int large) { if (q->size == 0) { hatm_clear_rpool(sc, group, large); return; } bzero(q->mem.base, q->mem.size); q->rbp = q->mem.base; q->head = q->tail = 0; DBG(sc, ATTACH, ("RBP%u%c=0x%lx", group, "SL"[large], (u_long)q->mem.paddr)); WRITE4(sc, HE_REGO_RBP_S(large, group), q->mem.paddr); WRITE4(sc, HE_REGO_RBP_T(large, group), 0); WRITE4(sc, HE_REGO_RBP_QI(large, group), ((q->size - 1) << HE_REGS_RBP_SIZE) | HE_REGM_RBP_INTR_ENB | (q->thresh << HE_REGS_RBP_THRESH)); WRITE4(sc, HE_REGO_RBP_BL(large, group), (q->bsize >> 2) & ~1); } /* * Clear an unused receive buffer return queue */ static void hatm_clear_rbrq(struct hatm_softc *sc, u_int group) { WRITE4(sc, HE_REGO_RBRQ_ST(group), 0); WRITE4(sc, HE_REGO_RBRQ_H(group), 0); WRITE4(sc, HE_REGO_RBRQ_Q(group), (1 << HE_REGS_RBRQ_THRESH)); WRITE4(sc, HE_REGO_RBRQ_I(group), 0); } /* * Initialize receive buffer return queue */ static void hatm_init_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group) { if (rq->size == 0) { hatm_clear_rbrq(sc, group); return; } rq->rbrq = rq->mem.base; rq->head = 0; DBG(sc, ATTACH, ("RBRQ%u=0x%lx", group, (u_long)rq->mem.paddr)); WRITE4(sc, HE_REGO_RBRQ_ST(group), rq->mem.paddr); WRITE4(sc, HE_REGO_RBRQ_H(group), 0); WRITE4(sc, HE_REGO_RBRQ_Q(group), (rq->thresh << HE_REGS_RBRQ_THRESH) | ((rq->size - 1) << HE_REGS_RBRQ_SIZE)); WRITE4(sc, HE_REGO_RBRQ_I(group), (rq->tout << HE_REGS_RBRQ_TIME) | (rq->pcnt << HE_REGS_RBRQ_COUNT)); } /* * Clear an unused transmit buffer return queue N */ static void hatm_clear_tbrq(struct hatm_softc *sc, u_int group) { WRITE4(sc, HE_REGO_TBRQ_B_T(group), 0); WRITE4(sc, HE_REGO_TBRQ_H(group), 0); WRITE4(sc, HE_REGO_TBRQ_S(group), 0); WRITE4(sc, HE_REGO_TBRQ_THRESH(group), 1); } /* * Initialize transmit buffer return queue N */ static void hatm_init_tbrq(struct hatm_softc *sc, struct hetbrq *tq, u_int group) { if (tq->size == 0) { hatm_clear_tbrq(sc, group); return; } tq->tbrq = tq->mem.base; tq->head = 0; DBG(sc, ATTACH, ("TBRQ%u=0x%lx", group, (u_long)tq->mem.paddr)); WRITE4(sc, HE_REGO_TBRQ_B_T(group), tq->mem.paddr); WRITE4(sc, HE_REGO_TBRQ_H(group), 0); WRITE4(sc, HE_REGO_TBRQ_S(group), tq->size - 1); WRITE4(sc, HE_REGO_TBRQ_THRESH(group), tq->thresh); } /* * Initialize TPDRQ */ static void hatm_init_tpdrq(struct hatm_softc *sc) { struct hetpdrq *tq; tq = &sc->tpdrq; tq->tpdrq = tq->mem.base; tq->tail = tq->head = 0; DBG(sc, ATTACH, ("TPDRQ=0x%lx", (u_long)tq->mem.paddr)); WRITE4(sc, HE_REGO_TPDRQ_H, tq->mem.paddr); WRITE4(sc, HE_REGO_TPDRQ_T, 0); WRITE4(sc, HE_REGO_TPDRQ_S, tq->size - 1); } /* * Function can be called by the infrastructure to start the card. */ static void hatm_init(void *p) { struct hatm_softc *sc = p; mtx_lock(&sc->mtx); hatm_stop(sc); hatm_initialize(sc); mtx_unlock(&sc->mtx); } enum { CTL_ISTATS, }; /* * Sysctl handler */ static int hatm_sysctl(SYSCTL_HANDLER_ARGS) { struct hatm_softc *sc = arg1; uint32_t *ret; int error; size_t len; switch (arg2) { case CTL_ISTATS: len = sizeof(sc->istats); break; default: panic("bad control code"); } ret = malloc(len, M_TEMP, M_WAITOK); mtx_lock(&sc->mtx); switch (arg2) { case CTL_ISTATS: sc->istats.mcc += READ4(sc, HE_REGO_MCC); sc->istats.oec += READ4(sc, HE_REGO_OEC); sc->istats.dcc += READ4(sc, HE_REGO_DCC); sc->istats.cec += READ4(sc, HE_REGO_CEC); bcopy(&sc->istats, ret, sizeof(sc->istats)); break; } mtx_unlock(&sc->mtx); error = SYSCTL_OUT(req, ret, len); free(ret, M_TEMP); return (error); } static int kenv_getuint(struct hatm_softc *sc, const char *var, u_int *ptr, u_int def, int rw) { char full[IFNAMSIZ + 3 + 20]; char *val, *end; u_int u; *ptr = def; if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, var, rw ? CTLFLAG_RW : CTLFLAG_RD, ptr, 0, "") == NULL) return (ENOMEM); snprintf(full, sizeof(full), "hw.%s.%s", device_get_nameunit(sc->dev), var); if ((val = getenv(full)) == NULL) return (0); u = strtoul(val, &end, 0); if (end == val || *end != '\0') { freeenv(val); return (EINVAL); } if (bootverbose) if_printf(&sc->ifatm.ifnet, "%s=%u\n", full, u); *ptr = u; return (0); } /* * Set configurable parameters. Many of these are configurable via * kenv. */ static int hatm_configure(struct hatm_softc *sc) { /* Receive buffer pool 0 small */ kenv_getuint(sc, "rbps0_size", &sc->rbp_s0.size, HE_CONFIG_RBPS0_SIZE, 0); kenv_getuint(sc, "rbps0_thresh", &sc->rbp_s0.thresh, HE_CONFIG_RBPS0_THRESH, 0); sc->rbp_s0.bsize = MBUF0_SIZE; /* Receive buffer pool 0 large */ kenv_getuint(sc, "rbpl0_size", &sc->rbp_l0.size, HE_CONFIG_RBPL0_SIZE, 0); kenv_getuint(sc, "rbpl0_thresh", &sc->rbp_l0.thresh, HE_CONFIG_RBPL0_THRESH, 0); sc->rbp_l0.bsize = MCLBYTES - MBUFL_OFFSET; /* Receive buffer return queue 0 */ kenv_getuint(sc, "rbrq0_size", &sc->rbrq_0.size, HE_CONFIG_RBRQ0_SIZE, 0); kenv_getuint(sc, "rbrq0_thresh", &sc->rbrq_0.thresh, HE_CONFIG_RBRQ0_THRESH, 0); kenv_getuint(sc, "rbrq0_tout", &sc->rbrq_0.tout, HE_CONFIG_RBRQ0_TOUT, 0); kenv_getuint(sc, "rbrq0_pcnt", &sc->rbrq_0.pcnt, HE_CONFIG_RBRQ0_PCNT, 0); /* Receive buffer pool 1 small */ kenv_getuint(sc, "rbps1_size", &sc->rbp_s1.size, HE_CONFIG_RBPS1_SIZE, 0); kenv_getuint(sc, "rbps1_thresh", &sc->rbp_s1.thresh, HE_CONFIG_RBPS1_THRESH, 0); sc->rbp_s1.bsize = MBUF1_SIZE; /* Receive buffer return queue 1 */ kenv_getuint(sc, "rbrq1_size", &sc->rbrq_1.size, HE_CONFIG_RBRQ1_SIZE, 0); kenv_getuint(sc, "rbrq1_thresh", &sc->rbrq_1.thresh, HE_CONFIG_RBRQ1_THRESH, 0); kenv_getuint(sc, "rbrq1_tout", &sc->rbrq_1.tout, HE_CONFIG_RBRQ1_TOUT, 0); kenv_getuint(sc, "rbrq1_pcnt", &sc->rbrq_1.pcnt, HE_CONFIG_RBRQ1_PCNT, 0); /* Interrupt queue 0 */ kenv_getuint(sc, "irq0_size", &sc->irq_0.size, HE_CONFIG_IRQ0_SIZE, 0); kenv_getuint(sc, "irq0_thresh", &sc->irq_0.thresh, HE_CONFIG_IRQ0_THRESH, 0); sc->irq_0.line = HE_CONFIG_IRQ0_LINE; /* Transmit buffer return queue 0 */ kenv_getuint(sc, "tbrq0_size", &sc->tbrq.size, HE_CONFIG_TBRQ_SIZE, 0); kenv_getuint(sc, "tbrq0_thresh", &sc->tbrq.thresh, HE_CONFIG_TBRQ_THRESH, 0); /* Transmit buffer ready queue */ kenv_getuint(sc, "tpdrq_size", &sc->tpdrq.size, HE_CONFIG_TPDRQ_SIZE, 0); /* Max TPDs per VCC */ kenv_getuint(sc, "tpdmax", &sc->max_tpd, HE_CONFIG_TPD_MAXCC, 0); return (0); } #ifdef HATM_DEBUG /* * Get TSRs from connection memory */ static int hatm_sysctl_tsr(SYSCTL_HANDLER_ARGS) { struct hatm_softc *sc = arg1; int error, i, j; uint32_t *val; val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 15, M_TEMP, M_WAITOK); mtx_lock(&sc->mtx); for (i = 0; i < HE_MAX_VCCS; i++) for (j = 0; j <= 14; j++) val[15 * i + j] = READ_TSR(sc, i, j); mtx_unlock(&sc->mtx); error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 15); free(val, M_TEMP); if (error != 0 || req->newptr == NULL) return (error); return (EPERM); } /* * Get TPDs from connection memory */ static int hatm_sysctl_tpd(SYSCTL_HANDLER_ARGS) { struct hatm_softc *sc = arg1; int error, i, j; uint32_t *val; val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 16, M_TEMP, M_WAITOK); mtx_lock(&sc->mtx); for (i = 0; i < HE_MAX_VCCS; i++) for (j = 0; j < 16; j++) val[16 * i + j] = READ_TCM4(sc, 16 * i + j); mtx_unlock(&sc->mtx); error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 16); free(val, M_TEMP); if (error != 0 || req->newptr == NULL) return (error); return (EPERM); } /* * Get mbox registers */ static int hatm_sysctl_mbox(SYSCTL_HANDLER_ARGS) { struct hatm_softc *sc = arg1; int error, i; uint32_t *val; val = malloc(sizeof(uint32_t) * HE_REGO_CS_END, M_TEMP, M_WAITOK); mtx_lock(&sc->mtx); for (i = 0; i < HE_REGO_CS_END; i++) val[i] = READ_MBOX4(sc, i); mtx_unlock(&sc->mtx); error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_REGO_CS_END); free(val, M_TEMP); if (error != 0 || req->newptr == NULL) return (error); return (EPERM); } /* * Get connection memory */ static int hatm_sysctl_cm(SYSCTL_HANDLER_ARGS) { struct hatm_softc *sc = arg1; int error, i; uint32_t *val; val = malloc(sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1), M_TEMP, M_WAITOK); mtx_lock(&sc->mtx); val[0] = READ4(sc, HE_REGO_RCMABR_BA); for (i = 0; i < HE_CONFIG_RXMEM; i++) val[i + 1] = READ_RCM4(sc, i); mtx_unlock(&sc->mtx); error = SYSCTL_OUT(req, val, sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1)); free(val, M_TEMP); if (error != 0 || req->newptr == NULL) return (error); return (EPERM); } /* * Get local buffer memory */ static int hatm_sysctl_lbmem(SYSCTL_HANDLER_ARGS) { struct hatm_softc *sc = arg1; int error, i; uint32_t *val; u_int bytes = (1 << 21); val = malloc(bytes, M_TEMP, M_WAITOK); mtx_lock(&sc->mtx); for (i = 0; i < bytes / 4; i++) val[i] = READ_LB4(sc, i); mtx_unlock(&sc->mtx); error = SYSCTL_OUT(req, val, bytes); free(val, M_TEMP); if (error != 0 || req->newptr == NULL) return (error); return (EPERM); } /* * Get all card registers */ static int hatm_sysctl_heregs(SYSCTL_HANDLER_ARGS) { struct hatm_softc *sc = arg1; int error, i; uint32_t *val; val = malloc(HE_REGO_END, M_TEMP, M_WAITOK); mtx_lock(&sc->mtx); for (i = 0; i < HE_REGO_END; i += 4) val[i / 4] = READ4(sc, i); mtx_unlock(&sc->mtx); error = SYSCTL_OUT(req, val, HE_REGO_END); free(val, M_TEMP); if (error != 0 || req->newptr == NULL) return (error); return (EPERM); } #endif /* * Suni register access */ /* * read at most n SUNI registers starting at reg into val */ static int hatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *val, u_int *n) { u_int i; struct hatm_softc *sc = (struct hatm_softc *)ifatm; if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4) return (EINVAL); if (reg + *n > (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4) *n = reg - (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4; mtx_assert(&sc->mtx, MA_OWNED); for (i = 0; i < *n; i++) val[i] = READ4(sc, HE_REGO_SUNI + 4 * (reg + i)); return (0); } /* * change the bits given by mask to them in val in register reg */ static int hatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val) { uint32_t regval; struct hatm_softc *sc = (struct hatm_softc *)ifatm; if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4) return (EINVAL); mtx_assert(&sc->mtx, MA_OWNED); regval = READ4(sc, HE_REGO_SUNI + 4 * reg); regval = (regval & ~mask) | (val & mask); WRITE4(sc, HE_REGO_SUNI + 4 * reg, regval); return (0); } static struct utopia_methods hatm_utopia_methods = { hatm_utopia_readregs, hatm_utopia_writereg, }; /* * Detach - if it is running, stop. Destroy. */ static int hatm_detach(device_t dev) { struct hatm_softc *sc = (struct hatm_softc *)device_get_softc(dev); mtx_lock(&sc->mtx); hatm_stop(sc); if (sc->utopia.state & UTP_ST_ATTACHED) { utopia_stop(&sc->utopia); utopia_detach(&sc->utopia); } mtx_unlock(&sc->mtx); atm_ifdetach(&sc->ifatm.ifnet); hatm_destroy(sc); return (0); } /* * Attach to the device. Assume that no locking is needed here. * All resource we allocate here are freed by calling hatm_destroy. */ static int hatm_attach(device_t dev) { struct hatm_softc *sc; int unit; int error; uint32_t v; struct ifnet *ifp; sc = device_get_softc(dev); unit = device_get_unit(dev); sc->dev = dev; sc->ifatm.mib.device = ATM_DEVICE_HE155; sc->ifatm.mib.serial = 0; sc->ifatm.mib.hw_version = 0; sc->ifatm.mib.sw_version = 0; sc->ifatm.mib.vpi_bits = HE_CONFIG_VPI_BITS; sc->ifatm.mib.vci_bits = HE_CONFIG_VCI_BITS; sc->ifatm.mib.max_vpcs = 0; sc->ifatm.mib.max_vccs = HE_MAX_VCCS; sc->ifatm.mib.media = IFM_ATM_UNKNOWN; sc->he622 = 0; sc->ifatm.phy = &sc->utopia; - SLIST_INIT(&sc->mbuf0_list); - SLIST_INIT(&sc->mbuf1_list); SLIST_INIT(&sc->tpd_free); mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); - mtx_init(&sc->mbuf0_mtx, device_get_nameunit(dev), "HEb0", MTX_DEF); - mtx_init(&sc->mbuf1_mtx, device_get_nameunit(dev), "HEb1", MTX_DEF); cv_init(&sc->vcc_cv, "HEVCCcv"); cv_init(&sc->cv_rcclose, "RCClose"); sysctl_ctx_init(&sc->sysctl_ctx); /* * 4.2 BIOS Configuration */ v = pci_read_config(dev, PCIR_COMMAND, 2); v |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN; pci_write_config(dev, PCIR_COMMAND, v, 2); /* * 4.3 PCI Bus Controller-Specific Initialisation */ v = pci_read_config(dev, HE_PCIR_GEN_CNTL_0, 4); v |= HE_PCIM_CTL0_MRL | HE_PCIM_CTL0_MRM | HE_PCIM_CTL0_IGNORE_TIMEOUT; #if BYTE_ORDER == BIG_ENDIAN && 0 v |= HE_PCIM_CTL0_BIGENDIAN; #endif pci_write_config(dev, HE_PCIR_GEN_CNTL_0, v, 4); /* * Map memory */ v = pci_read_config(dev, PCIR_COMMAND, 2); if (!(v & PCIM_CMD_MEMEN)) { device_printf(dev, "failed to enable memory\n"); error = ENXIO; goto failed; } sc->memid = PCIR_BAR(0); sc->memres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->memid, 0, ~0, 1, RF_ACTIVE); if (sc->memres == NULL) { device_printf(dev, "could not map memory\n"); error = ENXIO; goto failed; } sc->memh = rman_get_bushandle(sc->memres); sc->memt = rman_get_bustag(sc->memres); /* * ALlocate a DMA tag for subsequent allocations */ if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->parent_tag)) { device_printf(dev, "could not allocate DMA tag\n"); error = ENOMEM; goto failed; } if (bus_dma_tag_create(sc->parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MBUF_ALLOC_SIZE, 1, MBUF_ALLOC_SIZE, 0, NULL, NULL, &sc->mbuf_tag)) { device_printf(dev, "could not allocate mbuf DMA tag\n"); error = ENOMEM; goto failed; } /* * Allocate a DMA tag for packets to send. Here we have a problem with * the specification of the maximum number of segments. Theoretically * this would be the size of the transmit ring - 1 multiplied by 3, * but this would not work. So make the maximum number of TPDs * occupied by one packet a configuration parameter. */ if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, HE_MAX_PDU, 3 * HE_CONFIG_MAX_TPD_PER_PACKET, HE_MAX_PDU, 0, NULL, NULL, &sc->tx_tag)) { device_printf(dev, "could not allocate TX tag\n"); error = ENOMEM; goto failed; } /* * Setup the interrupt */ sc->irqid = 0; sc->irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->irqid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->irqres == 0) { device_printf(dev, "could not allocate irq\n"); error = ENXIO; goto failed; } ifp = &sc->ifatm.ifnet; ifp->if_softc = sc; ifp->if_unit = unit; ifp->if_name = "hatm"; /* * Make the sysctl tree */ error = ENOMEM; if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO, device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL) goto failed; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "istats", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, CTL_ISTATS, hatm_sysctl, "LU", "internal statistics") == NULL) goto failed; #ifdef HATM_DEBUG if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "tsr", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, hatm_sysctl_tsr, "S", "transmission status registers") == NULL) goto failed; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "tpd", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, hatm_sysctl_tpd, "S", "transmission packet descriptors") == NULL) goto failed; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "mbox", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, hatm_sysctl_mbox, "S", "mbox registers") == NULL) goto failed; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "cm", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, hatm_sysctl_cm, "S", "connection memory") == NULL) goto failed; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "heregs", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, hatm_sysctl_heregs, "S", "card registers") == NULL) goto failed; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "lbmem", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, hatm_sysctl_lbmem, "S", "local memory") == NULL) goto failed; kenv_getuint(sc, "debug", &sc->debug, 0, 1); #endif /* * Configure */ if ((error = hatm_configure(sc)) != 0) goto failed; /* * Compute memory parameters */ if (sc->rbp_s0.size != 0) { sc->rbp_s0.mask = (sc->rbp_s0.size - 1) << 3; sc->rbp_s0.mem.size = sc->rbp_s0.size * 8; sc->rbp_s0.mem.align = sc->rbp_s0.mem.size; } if (sc->rbp_l0.size != 0) { sc->rbp_l0.mask = (sc->rbp_l0.size - 1) << 3; sc->rbp_l0.mem.size = sc->rbp_l0.size * 8; sc->rbp_l0.mem.align = sc->rbp_l0.mem.size; } if (sc->rbp_s1.size != 0) { sc->rbp_s1.mask = (sc->rbp_s1.size - 1) << 3; sc->rbp_s1.mem.size = sc->rbp_s1.size * 8; sc->rbp_s1.mem.align = sc->rbp_s1.mem.size; } if (sc->rbrq_0.size != 0) { sc->rbrq_0.mem.size = sc->rbrq_0.size * 8; sc->rbrq_0.mem.align = sc->rbrq_0.mem.size; } if (sc->rbrq_1.size != 0) { sc->rbrq_1.mem.size = sc->rbrq_1.size * 8; sc->rbrq_1.mem.align = sc->rbrq_1.mem.size; } sc->irq_0.mem.size = sc->irq_0.size * sizeof(uint32_t); sc->irq_0.mem.align = 4 * 1024; sc->tbrq.mem.size = sc->tbrq.size * 4; sc->tbrq.mem.align = 2 * sc->tbrq.mem.size; /* ZZZ */ sc->tpdrq.mem.size = sc->tpdrq.size * 8; sc->tpdrq.mem.align = sc->tpdrq.mem.size; sc->hsp_mem.size = sizeof(struct he_hsp); sc->hsp_mem.align = 1024; sc->lbufs_size = sc->rbp_l0.size + sc->rbrq_0.size; sc->tpd_total = sc->tbrq.size + sc->tpdrq.size; sc->tpds.align = 64; sc->tpds.size = sc->tpd_total * HE_TPD_SIZE; hatm_init_rmaps(sc); hatm_init_smbufs(sc); if ((error = hatm_init_tpds(sc)) != 0) goto failed; /* * Allocate memory */ if ((error = hatm_alloc_dmamem(sc, "IRQ", &sc->irq_0.mem)) != 0 || (error = hatm_alloc_dmamem(sc, "TBRQ0", &sc->tbrq.mem)) != 0 || (error = hatm_alloc_dmamem(sc, "TPDRQ", &sc->tpdrq.mem)) != 0 || (error = hatm_alloc_dmamem(sc, "HSP", &sc->hsp_mem)) != 0) goto failed; if (sc->rbp_s0.mem.size != 0 && (error = hatm_alloc_dmamem(sc, "RBPS0", &sc->rbp_s0.mem))) goto failed; if (sc->rbp_l0.mem.size != 0 && (error = hatm_alloc_dmamem(sc, "RBPL0", &sc->rbp_l0.mem))) goto failed; if (sc->rbp_s1.mem.size != 0 && (error = hatm_alloc_dmamem(sc, "RBPS1", &sc->rbp_s1.mem))) goto failed; if (sc->rbrq_0.mem.size != 0 && (error = hatm_alloc_dmamem(sc, "RBRQ0", &sc->rbrq_0.mem))) goto failed; if (sc->rbrq_1.mem.size != 0 && (error = hatm_alloc_dmamem(sc, "RBRQ1", &sc->rbrq_1.mem))) goto failed; if ((sc->vcc_zone = uma_zcreate("HE vccs", sizeof(struct hevcc), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0)) == NULL) { device_printf(dev, "cannot allocate zone for vccs\n"); goto failed; } /* * 4.4 Reset the card. */ if ((error = hatm_reset(sc)) != 0) goto failed; /* * Read the prom. */ hatm_init_bus_width(sc); hatm_init_read_eeprom(sc); hatm_init_endianess(sc); /* * Initialize interface */ ifp->if_flags = IFF_SIMPLEX; ifp->if_ioctl = hatm_ioctl; ifp->if_start = hatm_start; ifp->if_watchdog = NULL; ifp->if_init = hatm_init; utopia_attach(&sc->utopia, &sc->ifatm, &sc->media, &sc->mtx, &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), &hatm_utopia_methods); utopia_init_media(&sc->utopia); /* these two SUNI routines need the lock */ mtx_lock(&sc->mtx); /* poll while we are not running */ sc->utopia.flags |= UTP_FL_POLL_CARRIER; utopia_start(&sc->utopia); utopia_reset(&sc->utopia); mtx_unlock(&sc->mtx); atm_ifattach(ifp); #ifdef ENABLE_BPF bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc)); #endif error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET, hatm_intr, &sc->irq_0, &sc->ih); if (error != 0) { device_printf(dev, "could not setup interrupt\n"); hatm_detach(dev); return (error); } return (0); failed: hatm_destroy(sc); return (error); } /* * Start the interface. Assume a state as from attach(). */ void hatm_initialize(struct hatm_softc *sc) { uint32_t v; u_int cid; static const u_int layout[2][7] = HE_CONFIG_MEM_LAYOUT; if (sc->ifatm.ifnet.if_flags & IFF_RUNNING) return; hatm_init_bus_width(sc); hatm_init_endianess(sc); if_printf(&sc->ifatm.ifnet, "%s, Rev. %s, S/N %u, " "MAC=%02x:%02x:%02x:%02x:%02x:%02x (%ubit PCI)\n", sc->prod_id, sc->rev, sc->ifatm.mib.serial, sc->ifatm.mib.esi[0], sc->ifatm.mib.esi[1], sc->ifatm.mib.esi[2], sc->ifatm.mib.esi[3], sc->ifatm.mib.esi[4], sc->ifatm.mib.esi[5], sc->pci64 ? 64 : 32); /* * 4.8 SDRAM Controller Initialisation * 4.9 Initialize RNUM value */ if (sc->he622) WRITE4(sc, HE_REGO_SDRAM_CNTL, HE_REGM_SDRAM_64BIT); else WRITE4(sc, HE_REGO_SDRAM_CNTL, 0); BARRIER_W(sc); v = READ4(sc, HE_REGO_LB_SWAP); BARRIER_R(sc); v |= 0xf << HE_REGS_LBSWAP_RNUM; WRITE4(sc, HE_REGO_LB_SWAP, v); BARRIER_W(sc); hatm_init_irq(sc, &sc->irq_0, 0); hatm_clear_irq(sc, 1); hatm_clear_irq(sc, 2); hatm_clear_irq(sc, 3); WRITE4(sc, HE_REGO_GRP_1_0_MAP, 0); WRITE4(sc, HE_REGO_GRP_3_2_MAP, 0); WRITE4(sc, HE_REGO_GRP_5_4_MAP, 0); WRITE4(sc, HE_REGO_GRP_7_6_MAP, 0); BARRIER_W(sc); /* * 4.11 Enable PCI Bus Controller State Machine */ v = READ4(sc, HE_REGO_HOST_CNTL); BARRIER_R(sc); v |= HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB | HE_REGM_HOST_QUICK_RD | HE_REGM_HOST_QUICK_WR; WRITE4(sc, HE_REGO_HOST_CNTL, v); BARRIER_W(sc); /* * 5.1.1 Generic configuration state */ sc->cells_per_row = layout[sc->he622][0]; sc->bytes_per_row = layout[sc->he622][1]; sc->r0_numrows = layout[sc->he622][2]; sc->tx_numrows = layout[sc->he622][3]; sc->r1_numrows = layout[sc->he622][4]; sc->r0_startrow = layout[sc->he622][5]; sc->tx_startrow = sc->r0_startrow + sc->r0_numrows; sc->r1_startrow = sc->tx_startrow + sc->tx_numrows; sc->cells_per_lbuf = layout[sc->he622][6]; sc->r0_numbuffs = sc->r0_numrows * (sc->cells_per_row / sc->cells_per_lbuf); sc->r1_numbuffs = sc->r1_numrows * (sc->cells_per_row / sc->cells_per_lbuf); sc->tx_numbuffs = sc->tx_numrows * (sc->cells_per_row / sc->cells_per_lbuf); if (sc->r0_numbuffs > 2560) sc->r0_numbuffs = 2560; if (sc->r1_numbuffs > 2560) sc->r1_numbuffs = 2560; if (sc->tx_numbuffs > 5120) sc->tx_numbuffs = 5120; DBG(sc, ATTACH, ("cells_per_row=%u bytes_per_row=%u r0_numrows=%u " "tx_numrows=%u r1_numrows=%u r0_startrow=%u tx_startrow=%u " "r1_startrow=%u cells_per_lbuf=%u\nr0_numbuffs=%u r1_numbuffs=%u " "tx_numbuffs=%u\n", sc->cells_per_row, sc->bytes_per_row, sc->r0_numrows, sc->tx_numrows, sc->r1_numrows, sc->r0_startrow, sc->tx_startrow, sc->r1_startrow, sc->cells_per_lbuf, sc->r0_numbuffs, sc->r1_numbuffs, sc->tx_numbuffs)); /* * 5.1.2 Configure Hardware dependend registers */ if (sc->he622) { WRITE4(sc, HE_REGO_LBARB, (0x2 << HE_REGS_LBARB_SLICE) | (0xf << HE_REGS_LBARB_RNUM) | (0x3 << HE_REGS_LBARB_THPRI) | (0x3 << HE_REGS_LBARB_RHPRI) | (0x2 << HE_REGS_LBARB_TLPRI) | (0x1 << HE_REGS_LBARB_RLPRI) | (0x28 << HE_REGS_LBARB_BUS_MULT) | (0x50 << HE_REGS_LBARB_NET_PREF)); BARRIER_W(sc); WRITE4(sc, HE_REGO_SDRAMCON, /* HW bug: don't use banking */ /* HE_REGM_SDRAMCON_BANK | */ HE_REGM_SDRAMCON_WIDE | (0x384 << HE_REGS_SDRAMCON_REF)); BARRIER_W(sc); WRITE4(sc, HE_REGO_RCMCONFIG, (0x1 << HE_REGS_RCMCONFIG_BANK_WAIT) | (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) | (0x0 << HE_REGS_RCMCONFIG_TYPE)); WRITE4(sc, HE_REGO_TCMCONFIG, (0x2 << HE_REGS_TCMCONFIG_BANK_WAIT) | (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) | (0x0 << HE_REGS_TCMCONFIG_TYPE)); } else { WRITE4(sc, HE_REGO_LBARB, (0x2 << HE_REGS_LBARB_SLICE) | (0xf << HE_REGS_LBARB_RNUM) | (0x3 << HE_REGS_LBARB_THPRI) | (0x3 << HE_REGS_LBARB_RHPRI) | (0x2 << HE_REGS_LBARB_TLPRI) | (0x1 << HE_REGS_LBARB_RLPRI) | (0x46 << HE_REGS_LBARB_BUS_MULT) | (0x8C << HE_REGS_LBARB_NET_PREF)); BARRIER_W(sc); WRITE4(sc, HE_REGO_SDRAMCON, /* HW bug: don't use banking */ /* HE_REGM_SDRAMCON_BANK | */ (0x150 << HE_REGS_SDRAMCON_REF)); BARRIER_W(sc); WRITE4(sc, HE_REGO_RCMCONFIG, (0x0 << HE_REGS_RCMCONFIG_BANK_WAIT) | (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) | (0x0 << HE_REGS_RCMCONFIG_TYPE)); WRITE4(sc, HE_REGO_TCMCONFIG, (0x1 << HE_REGS_TCMCONFIG_BANK_WAIT) | (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) | (0x0 << HE_REGS_TCMCONFIG_TYPE)); } WRITE4(sc, HE_REGO_LBCONFIG, (sc->cells_per_lbuf * 48)); WRITE4(sc, HE_REGO_RLBC_H, 0); WRITE4(sc, HE_REGO_RLBC_T, 0); WRITE4(sc, HE_REGO_RLBC_H2, 0); WRITE4(sc, HE_REGO_RXTHRSH, 512); WRITE4(sc, HE_REGO_LITHRSH, 256); WRITE4(sc, HE_REGO_RLBF0_C, sc->r0_numbuffs); WRITE4(sc, HE_REGO_RLBF1_C, sc->r1_numbuffs); if (sc->he622) { WRITE4(sc, HE_REGO_RCCONFIG, (8 << HE_REGS_RCCONFIG_UTDELAY) | (sc->ifatm.mib.vpi_bits << HE_REGS_RCCONFIG_VP) | (sc->ifatm.mib.vci_bits << HE_REGS_RCCONFIG_VC)); WRITE4(sc, HE_REGO_TXCONFIG, (32 << HE_REGS_TXCONFIG_THRESH) | (sc->ifatm.mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) | (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE)); } else { WRITE4(sc, HE_REGO_RCCONFIG, (0 << HE_REGS_RCCONFIG_UTDELAY) | HE_REGM_RCCONFIG_UT_MODE | (sc->ifatm.mib.vpi_bits << HE_REGS_RCCONFIG_VP) | (sc->ifatm.mib.vci_bits << HE_REGS_RCCONFIG_VC)); WRITE4(sc, HE_REGO_TXCONFIG, (32 << HE_REGS_TXCONFIG_THRESH) | HE_REGM_TXCONFIG_UTMODE | (sc->ifatm.mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) | (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE)); } WRITE4(sc, HE_REGO_TXAAL5_PROTO, 0); if (sc->rbp_s1.size != 0) { WRITE4(sc, HE_REGO_RHCONFIG, HE_REGM_RHCONFIG_PHYENB | ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) | (1 << HE_REGS_RHCONFIG_OAM_GID)); } else { WRITE4(sc, HE_REGO_RHCONFIG, HE_REGM_RHCONFIG_PHYENB | ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) | (0 << HE_REGS_RHCONFIG_OAM_GID)); } BARRIER_W(sc); hatm_init_cm(sc); hatm_init_rx_buffer_pool(sc, 0, sc->r0_startrow, sc->r0_numbuffs); hatm_init_rx_buffer_pool(sc, 1, sc->r1_startrow, sc->r1_numbuffs); hatm_init_tx_buffer_pool(sc, sc->tx_startrow, sc->tx_numbuffs); hatm_init_imed_queues(sc); /* * 5.1.6 Application tunable Parameters */ WRITE4(sc, HE_REGO_MCC, 0); WRITE4(sc, HE_REGO_OEC, 0); WRITE4(sc, HE_REGO_DCC, 0); WRITE4(sc, HE_REGO_CEC, 0); hatm_init_cs_block(sc); hatm_init_cs_block_cm(sc); hatm_init_rpool(sc, &sc->rbp_s0, 0, 0); hatm_init_rpool(sc, &sc->rbp_l0, 0, 1); hatm_init_rpool(sc, &sc->rbp_s1, 1, 0); hatm_clear_rpool(sc, 1, 1); hatm_clear_rpool(sc, 2, 0); hatm_clear_rpool(sc, 2, 1); hatm_clear_rpool(sc, 3, 0); hatm_clear_rpool(sc, 3, 1); hatm_clear_rpool(sc, 4, 0); hatm_clear_rpool(sc, 4, 1); hatm_clear_rpool(sc, 5, 0); hatm_clear_rpool(sc, 5, 1); hatm_clear_rpool(sc, 6, 0); hatm_clear_rpool(sc, 6, 1); hatm_clear_rpool(sc, 7, 0); hatm_clear_rpool(sc, 7, 1); hatm_init_rbrq(sc, &sc->rbrq_0, 0); hatm_init_rbrq(sc, &sc->rbrq_1, 1); hatm_clear_rbrq(sc, 2); hatm_clear_rbrq(sc, 3); hatm_clear_rbrq(sc, 4); hatm_clear_rbrq(sc, 5); hatm_clear_rbrq(sc, 6); hatm_clear_rbrq(sc, 7); sc->lbufs_next = 0; bzero(sc->lbufs, sizeof(sc->lbufs[0]) * sc->lbufs_size); hatm_init_tbrq(sc, &sc->tbrq, 0); hatm_clear_tbrq(sc, 1); hatm_clear_tbrq(sc, 2); hatm_clear_tbrq(sc, 3); hatm_clear_tbrq(sc, 4); hatm_clear_tbrq(sc, 5); hatm_clear_tbrq(sc, 6); hatm_clear_tbrq(sc, 7); hatm_init_tpdrq(sc); WRITE4(sc, HE_REGO_UBUFF_BA, (sc->he622 ? 0x104780 : 0x800)); /* * Initialize HSP */ bzero(sc->hsp_mem.base, sc->hsp_mem.size); sc->hsp = sc->hsp_mem.base; WRITE4(sc, HE_REGO_HSP_BA, sc->hsp_mem.paddr); /* * 5.1.12 Enable transmit and receive * Enable bus master and interrupts */ v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0); v |= 0x18000000; WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v); v = READ4(sc, HE_REGO_RCCONFIG); v |= HE_REGM_RCCONFIG_RXENB; WRITE4(sc, HE_REGO_RCCONFIG, v); v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4); v |= HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB; pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4); sc->ifatm.ifnet.if_flags |= IFF_RUNNING; sc->ifatm.ifnet.if_baudrate = 53 * 8 * sc->ifatm.mib.pcr; sc->utopia.flags &= ~UTP_FL_POLL_CARRIER; /* reopen vccs */ for (cid = 0; cid < HE_MAX_VCCS; cid++) if (sc->vccs[cid] != NULL) hatm_load_vc(sc, cid, 1); ATMEV_SEND_IFSTATE_CHANGED(&sc->ifatm, sc->utopia.carrier == UTP_CARR_OK); } /* * This functions stops the card and frees all resources allocated after * the attach. Must have the global lock. */ void hatm_stop(struct hatm_softc *sc) { uint32_t v; u_int i, p, cid; struct mbuf_chunk_hdr *ch; struct mbuf_page *pg; mtx_assert(&sc->mtx, MA_OWNED); if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) return; sc->ifatm.ifnet.if_flags &= ~IFF_RUNNING; ATMEV_SEND_IFSTATE_CHANGED(&sc->ifatm, sc->utopia.carrier == UTP_CARR_OK); sc->utopia.flags |= UTP_FL_POLL_CARRIER; /* * Stop and reset the hardware so that everything remains * stable. */ v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0); v &= ~0x18000000; WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v); v = READ4(sc, HE_REGO_RCCONFIG); v &= ~HE_REGM_RCCONFIG_RXENB; WRITE4(sc, HE_REGO_RCCONFIG, v); WRITE4(sc, HE_REGO_RHCONFIG, (0x2 << HE_REGS_RHCONFIG_PTMR_PRE)); BARRIER_W(sc); v = READ4(sc, HE_REGO_HOST_CNTL); BARRIER_R(sc); v &= ~(HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB); WRITE4(sc, HE_REGO_HOST_CNTL, v); BARRIER_W(sc); /* * Disable bust master and interrupts */ v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4); v &= ~(HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB); pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4); (void)hatm_reset(sc); /* * Card resets the SUNI when resetted, so re-initialize it */ utopia_reset(&sc->utopia); /* * Give any waiters on closing a VCC a chance. They will stop * to wait if they see that IFF_RUNNING disappeared. */ while (!(cv_waitq_empty(&sc->vcc_cv))) { cv_broadcast(&sc->vcc_cv); DELAY(100); } while (!(cv_waitq_empty(&sc->cv_rcclose))) { cv_broadcast(&sc->cv_rcclose); } /* * Now free all resources. */ /* * Free the large mbufs that are given to the card. */ for (i = 0 ; i < sc->lbufs_size; i++) { if (sc->lbufs[i] != NULL) { bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[i]); m_freem(sc->lbufs[i]); sc->lbufs[i] = NULL; } } /* * Free small buffers */ for (p = 0; p < sc->mbuf_npages; p++) { pg = sc->mbuf_pages[p]; for (i = 0; i < pg->hdr.nchunks; i++) { if (MBUF_TST_BIT(pg->hdr.card, i)) { MBUF_CLR_BIT(pg->hdr.card, i); - MBUF_CLR_BIT(pg->hdr.used, i); ch = (struct mbuf_chunk_hdr *) ((char *)pg + i * pg->hdr.chunksize + pg->hdr.hdroff); m_freem(ch->mbuf); } } } hatm_stop_tpds(sc); /* * Free all partial reassembled PDUs on any VCC. */ for (cid = 0; cid < HE_MAX_VCCS; cid++) { if (sc->vccs[cid] != NULL) { if (sc->vccs[cid]->chain != NULL) { m_freem(sc->vccs[cid]->chain); sc->vccs[cid]->chain = NULL; sc->vccs[cid]->last = NULL; } if (!(sc->vccs[cid]->vflags & (HE_VCC_RX_OPEN | HE_VCC_TX_OPEN))) { hatm_tx_vcc_closed(sc, cid); uma_zfree(sc->vcc_zone, sc->vccs[cid]); sc->vccs[cid] = NULL; sc->open_vccs--; } else { sc->vccs[cid]->vflags = 0; sc->vccs[cid]->ntpds = 0; } } } if (sc->rbp_s0.size != 0) bzero(sc->rbp_s0.mem.base, sc->rbp_s0.mem.size); if (sc->rbp_l0.size != 0) bzero(sc->rbp_l0.mem.base, sc->rbp_l0.mem.size); if (sc->rbp_s1.size != 0) bzero(sc->rbp_s1.mem.base, sc->rbp_s1.mem.size); if (sc->rbrq_0.size != 0) bzero(sc->rbrq_0.mem.base, sc->rbrq_0.mem.size); if (sc->rbrq_1.size != 0) bzero(sc->rbrq_1.mem.base, sc->rbrq_1.mem.size); bzero(sc->tbrq.mem.base, sc->tbrq.mem.size); bzero(sc->tpdrq.mem.base, sc->tpdrq.mem.size); bzero(sc->hsp_mem.base, sc->hsp_mem.size); } /************************************************************ * * Driver infrastructure */ devclass_t hatm_devclass; static device_method_t hatm_methods[] = { DEVMETHOD(device_probe, hatm_probe), DEVMETHOD(device_attach, hatm_attach), DEVMETHOD(device_detach, hatm_detach), {0,0} }; static driver_t hatm_driver = { "hatm", hatm_methods, sizeof(struct hatm_softc), }; DRIVER_MODULE(hatm, pci, hatm_driver, hatm_devclass, NULL, 0); Index: head/sys/dev/hatm/if_hatm_intr.c =================================================================== --- head/sys/dev/hatm/if_hatm_intr.c (revision 121674) +++ head/sys/dev/hatm/if_hatm_intr.c (revision 121675) @@ -1,679 +1,694 @@ /* * Copyright (c) 2001-2003 * Fraunhofer Institute for Open Communication Systems (FhG Fokus). * All rights reserved. * Author: Hartmut Brandt * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * ForeHE driver. * * Interrupt handler. */ #include "opt_inet.h" #include "opt_natm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include CTASSERT(sizeof(struct mbuf_page) == MBUF_ALLOC_SIZE); CTASSERT(sizeof(struct mbuf0_chunk) == MBUF0_CHUNK); CTASSERT(sizeof(struct mbuf1_chunk) == MBUF1_CHUNK); CTASSERT(sizeof(((struct mbuf0_chunk *)NULL)->storage) >= MBUF0_SIZE); CTASSERT(sizeof(((struct mbuf1_chunk *)NULL)->storage) >= MBUF1_SIZE); CTASSERT(sizeof(struct tpd) <= HE_TPD_SIZE); +static void hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group); + /* + * Free an external mbuf to a list. We use atomic functions so that + * we don't need a mutex for the list. + */ +static __inline void +hatm_ext_free(struct mbufx_free **list, struct mbufx_free *buf) +{ + for (;;) { + buf->link = *list; + if (atomic_cmpset_ptr(list, buf->link, buf)) + break; + } +} + +static __inline struct mbufx_free * +hatm_ext_alloc(struct hatm_softc *sc, u_int g) +{ + struct mbufx_free *buf; + + for (;;) { + if ((buf = sc->mbuf_list[g]) == NULL) + break; + if (atomic_cmpset_ptr(&sc->mbuf_list[g], buf, buf->link)) + break; + } + if (buf == NULL) { + hatm_mbuf_page_alloc(sc, g); + for (;;) { + if ((buf = sc->mbuf_list[g]) == NULL) + break; + if (atomic_cmpset_ptr(&sc->mbuf_list[g], buf, buf->link)) + break; + } + } + return (buf); +} + +/* * Either the queue treshold was crossed or a TPD with the INTR bit set * was transmitted. */ static void he_intr_tbrq(struct hatm_softc *sc, struct hetbrq *q, u_int group) { uint32_t *tailp = &sc->hsp->group[group].tbrq_tail; u_int no; while (q->head != (*tailp >> 2)) { no = (q->tbrq[q->head].addr & HE_REGM_TBRQ_ADDR) >> HE_REGS_TPD_ADDR; hatm_tx_complete(sc, TPD_ADDR(sc, no), (q->tbrq[q->head].addr & HE_REGM_TBRQ_FLAGS)); if (++q->head == q->size) q->head = 0; } WRITE4(sc, HE_REGO_TBRQ_H(group), q->head << 2); } /* * DMA loader function for external mbuf page. */ static void hatm_extbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (error) { printf("%s: mapping error %d\n", __func__, error); return; } KASSERT(nsegs == 1, ("too many segments for DMA: %d", nsegs)); KASSERT(segs[0].ds_addr <= 0xffffffffLU, ("phys addr too large %lx", (u_long)segs[0].ds_addr)); *(uint32_t *)arg = segs[0].ds_addr; } /* * Allocate a page of external mbuf storage for the small pools. * Create a DMA map and load it. Put all the chunks onto the right * free list. */ static void hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group) { struct mbuf_page *pg; int err; u_int i; if (sc->mbuf_npages == HE_CONFIG_MAX_MBUF_PAGES) return; if ((pg = malloc(MBUF_ALLOC_SIZE, M_DEVBUF, M_NOWAIT)) == NULL) return; bzero(pg->hdr.card, sizeof(pg->hdr.card)); - bzero(pg->hdr.used, sizeof(pg->hdr.used)); err = bus_dmamap_create(sc->mbuf_tag, 0, &pg->hdr.map); if (err != 0) { if_printf(&sc->ifatm.ifnet, "%s -- bus_dmamap_create: %d\n", __func__, err); free(pg, M_DEVBUF); return; } err = bus_dmamap_load(sc->mbuf_tag, pg->hdr.map, pg, MBUF_ALLOC_SIZE, hatm_extbuf_helper, &pg->hdr.phys, BUS_DMA_NOWAIT); if (err != 0) { if_printf(&sc->ifatm.ifnet, "%s -- mbuf mapping failed %d\n", __func__, err); bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map); free(pg, M_DEVBUF); return; } sc->mbuf_pages[sc->mbuf_npages] = pg; if (group == 0) { struct mbuf0_chunk *c; pg->hdr.nchunks = MBUF0_PER_PAGE; pg->hdr.chunksize = MBUF0_CHUNK; pg->hdr.hdroff = sizeof(c->storage); c = (struct mbuf0_chunk *)pg; for (i = 0; i < MBUF0_PER_PAGE; i++, c++) { c->hdr.pageno = sc->mbuf_npages; c->hdr.chunkno = i; - SLIST_INSERT_HEAD(&sc->mbuf0_list, - (struct mbufx_free *)c, link); + hatm_ext_free(&sc->mbuf_list[0], + (struct mbufx_free *)c); } } else { struct mbuf1_chunk *c; pg->hdr.nchunks = MBUF1_PER_PAGE; pg->hdr.chunksize = MBUF1_CHUNK; pg->hdr.hdroff = sizeof(c->storage); c = (struct mbuf1_chunk *)pg; for (i = 0; i < MBUF1_PER_PAGE; i++, c++) { c->hdr.pageno = sc->mbuf_npages; c->hdr.chunkno = i; - SLIST_INSERT_HEAD(&sc->mbuf1_list, - (struct mbufx_free *)c, link); + hatm_ext_free(&sc->mbuf_list[1], + (struct mbufx_free *)c); } } sc->mbuf_npages++; } /* * Free an mbuf and put it onto the free list. */ static void hatm_mbuf0_free(void *buf, void *args) { struct hatm_softc *sc = args; struct mbuf0_chunk *c = buf; - mtx_lock(&sc->mbuf0_mtx); - SLIST_INSERT_HEAD(&sc->mbuf0_list, (struct mbufx_free *)c, link); - MBUF_CLR_BIT(sc->mbuf_pages[c->hdr.pageno]->hdr.used, c->hdr.chunkno); - mtx_unlock(&sc->mbuf0_mtx); + hatm_ext_free(&sc->mbuf_list[0], (struct mbufx_free *)c); } static void hatm_mbuf1_free(void *buf, void *args) { struct hatm_softc *sc = args; struct mbuf1_chunk *c = buf; - mtx_lock(&sc->mbuf1_mtx); - SLIST_INSERT_HEAD(&sc->mbuf1_list, (struct mbufx_free *)c, link); - MBUF_CLR_BIT(sc->mbuf_pages[c->hdr.pageno]->hdr.used, c->hdr.chunkno); - mtx_unlock(&sc->mbuf1_mtx); + hatm_ext_free(&sc->mbuf_list[1], (struct mbufx_free *)c); } /* * Allocate an external mbuf storage */ static int hatm_mbuf_alloc(struct hatm_softc *sc, u_int group, struct mbuf *m, uint32_t *phys, uint32_t *handle) { struct mbufx_free *cf; struct mbuf_page *pg; if (group == 0) { struct mbuf0_chunk *buf0; - mtx_lock(&sc->mbuf0_mtx); - if ((cf = SLIST_FIRST(&sc->mbuf0_list)) == NULL) { - hatm_mbuf_page_alloc(sc, group); - if ((cf = SLIST_FIRST(&sc->mbuf0_list)) == NULL) { - mtx_unlock(&sc->mbuf0_mtx); - return (-1); - } - } - SLIST_REMOVE_HEAD(&sc->mbuf0_list, link); + if ((cf = hatm_ext_alloc(sc, 0)) == NULL) + return (-1); buf0 = (struct mbuf0_chunk *)cf; pg = sc->mbuf_pages[buf0->hdr.pageno]; MBUF_SET_BIT(pg->hdr.card, buf0->hdr.chunkno); - mtx_unlock(&sc->mbuf0_mtx); m_extadd(m, (caddr_t)buf0, MBUF0_SIZE, hatm_mbuf0_free, sc, M_PKTHDR, EXT_NET_DRV); m->m_data += MBUF0_OFFSET; buf0->hdr.mbuf = m; *handle = MBUF_MAKE_HANDLE(buf0->hdr.pageno, buf0->hdr.chunkno); } else if (group == 1) { struct mbuf1_chunk *buf1; - mtx_lock(&sc->mbuf1_mtx); - if ((cf = SLIST_FIRST(&sc->mbuf1_list)) == NULL) { - hatm_mbuf_page_alloc(sc, group); - if ((cf = SLIST_FIRST(&sc->mbuf1_list)) == NULL) { - mtx_unlock(&sc->mbuf1_mtx); - return (-1); - } - } - SLIST_REMOVE_HEAD(&sc->mbuf1_list, link); + if ((cf = hatm_ext_alloc(sc, 1)) == NULL) + return (-1); buf1 = (struct mbuf1_chunk *)cf; pg = sc->mbuf_pages[buf1->hdr.pageno]; MBUF_SET_BIT(pg->hdr.card, buf1->hdr.chunkno); - mtx_unlock(&sc->mbuf1_mtx); m_extadd(m, (caddr_t)buf1, MBUF1_SIZE, hatm_mbuf1_free, sc, M_PKTHDR, EXT_NET_DRV); m->m_data += MBUF1_OFFSET; buf1->hdr.mbuf = m; *handle = MBUF_MAKE_HANDLE(buf1->hdr.pageno, buf1->hdr.chunkno); } else return (-1); *phys = pg->hdr.phys + (mtod(m, char *) - (char *)pg); bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map, BUS_DMASYNC_PREREAD); return (0); } static void hatm_mbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { uint32_t *ptr = (uint32_t *)arg; if (nsegs == 0) { printf("%s: error=%d\n", __func__, error); return; } KASSERT(nsegs == 1, ("too many segments for mbuf: %d", nsegs)); KASSERT(segs[0].ds_addr <= 0xffffffffLU, ("phys addr too large %lx", (u_long)segs[0].ds_addr)); *ptr = segs[0].ds_addr; } /* * Receive buffer pool interrupt. This means the number of entries in the * queue has dropped below the threshold. Try to supply new buffers. */ static void he_intr_rbp(struct hatm_softc *sc, struct herbp *rbp, u_int large, u_int group) { u_int ntail; struct mbuf *m; int error; DBG(sc, INTR, ("%s buffer supply threshold crossed for group %u", large ? "large" : "small", group)); rbp->head = (READ4(sc, HE_REGO_RBP_S(large, group)) >> HE_REGS_RBP_HEAD) & (rbp->size - 1); for (;;) { if ((ntail = rbp->tail + 1) == rbp->size) ntail = 0; if (ntail == rbp->head) break; /* allocate the MBUF */ if (large) { if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL) { if_printf(&sc->ifatm.ifnet, "no mbuf clusters\n"); break; } m->m_data += MBUFL_OFFSET; if (sc->lbufs[sc->lbufs_next] != NULL) panic("hatm: lbufs full %u", sc->lbufs_next); sc->lbufs[sc->lbufs_next] = m; if ((error = bus_dmamap_load(sc->mbuf_tag, sc->rmaps[sc->lbufs_next], m->m_data, rbp->bsize, hatm_mbuf_helper, &rbp->rbp[rbp->tail].phys, BUS_DMA_NOWAIT)) != NULL) panic("hatm: mbuf mapping failed %d", error); bus_dmamap_sync(sc->mbuf_tag, sc->rmaps[sc->lbufs_next], BUS_DMASYNC_PREREAD); rbp->rbp[rbp->tail].handle = sc->lbufs_next | MBUF_LARGE_FLAG; if (++sc->lbufs_next == sc->lbufs_size) sc->lbufs_next = 0; } else { MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { if_printf(&sc->ifatm.ifnet, "no mbufs\n"); break; } if (hatm_mbuf_alloc(sc, group, m, &rbp->rbp[rbp->tail].phys, &rbp->rbp[rbp->tail].handle)) { m_freem(m); break; } } DBG(sc, DMA, ("MBUF loaded: handle=%x m=%p phys=%x", rbp->rbp[rbp->tail].handle, m, rbp->rbp[rbp->tail].phys)); rbp->rbp[rbp->tail].handle <<= HE_REGS_RBRQ_ADDR; rbp->tail = ntail; } WRITE4(sc, HE_REGO_RBP_T(large, group), (rbp->tail << HE_REGS_RBP_TAIL)); } /* * Extract the buffer and hand it to the receive routine */ static struct mbuf * hatm_rx_buffer(struct hatm_softc *sc, u_int group, u_int handle) { u_int pageno; u_int chunkno; struct mbuf *m; if (handle & MBUF_LARGE_FLAG) { /* large buffer - sync and unload */ handle &= ~MBUF_LARGE_FLAG; DBG(sc, RX, ("RX large handle=%x", handle)); bus_dmamap_sync(sc->mbuf_tag, sc->rmaps[handle], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[handle]); m = sc->lbufs[handle]; sc->lbufs[handle] = NULL; return (m); } MBUF_PARSE_HANDLE(handle, pageno, chunkno); DBG(sc, RX, ("RX group=%u handle=%x page=%u chunk=%u", group, handle, pageno, chunkno)); if (group == 0) { struct mbuf0_chunk *c0; c0 = (struct mbuf0_chunk *)sc->mbuf_pages[pageno] + chunkno; KASSERT(c0->hdr.pageno == pageno, ("pageno = %u/%u", c0->hdr.pageno, pageno)); KASSERT(c0->hdr.chunkno == chunkno, ("chunkno = %u/%u", c0->hdr.chunkno, chunkno)); m = c0->hdr.mbuf; } else { struct mbuf1_chunk *c1; c1 = (struct mbuf1_chunk *)sc->mbuf_pages[pageno] + chunkno; KASSERT(c1->hdr.pageno == pageno, ("pageno = %u/%u", c1->hdr.pageno, pageno)); KASSERT(c1->hdr.chunkno == chunkno, ("chunkno = %u/%u", c1->hdr.chunkno, chunkno)); m = c1->hdr.mbuf; } MBUF_CLR_BIT(sc->mbuf_pages[pageno]->hdr.card, chunkno); - MBUF_SET_BIT(sc->mbuf_pages[pageno]->hdr.used, chunkno); bus_dmamap_sync(sc->mbuf_tag, sc->mbuf_pages[pageno]->hdr.map, BUS_DMASYNC_POSTREAD); return (m); } /* * Interrupt because of receive buffer returned. */ static void he_intr_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group) { struct he_rbrqen *e; uint32_t flags, tail; u_int cid, len; struct mbuf *m; for (;;) { tail = sc->hsp->group[group].rbrq_tail >> 3; if (rq->head == tail) break; e = &rq->rbrq[rq->head]; flags = e->addr & HE_REGM_RBRQ_FLAGS; if (!(flags & HE_REGM_RBRQ_HBUF_ERROR)) m = hatm_rx_buffer(sc, group, (e->addr & HE_REGM_RBRQ_ADDR) >> HE_REGS_RBRQ_ADDR); else m = NULL; cid = (e->len & HE_REGM_RBRQ_CID) >> HE_REGS_RBRQ_CID; len = 4 * (e->len & HE_REGM_RBRQ_LEN); hatm_rx(sc, cid, flags, m, len); if (++rq->head == rq->size) rq->head = 0; } WRITE4(sc, HE_REGO_RBRQ_H(group), rq->head << 3); } void hatm_intr(void *p) { struct heirq *q = p; struct hatm_softc *sc = q->sc; u_int status; u_int tail; /* if we have a stray interrupt with a non-initialized card, * we cannot even lock before looking at the flag */ if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) return; mtx_lock(&sc->mtx); (void)READ4(sc, HE_REGO_INT_FIFO); tail = *q->tailp; if (q->head == tail) { /* workaround for tail pointer not updated bug (8.1.1) */ DBG(sc, INTR, ("hatm: intr tailq not updated bug triggered")); /* read the tail pointer from the card */ tail = READ4(sc, HE_REGO_IRQ_BASE(q->group)) & HE_REGM_IRQ_BASE_TAIL; BARRIER_R(sc); sc->istats.bug_no_irq_upd++; } /* clear the interrupt */ WRITE4(sc, HE_REGO_INT_FIFO, HE_REGM_INT_FIFO_CLRA); BARRIER_W(sc); while (q->head != tail) { status = q->irq[q->head]; q->irq[q->head] = HE_REGM_ITYPE_INVALID; if (++q->head == (q->size - 1)) q->head = 0; switch (status & HE_REGM_ITYPE) { case HE_REGM_ITYPE_TBRQ: DBG(sc, INTR, ("TBRQ treshold %u", status & HE_REGM_IGROUP)); sc->istats.itype_tbrq++; he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP); break; case HE_REGM_ITYPE_TPD: DBG(sc, INTR, ("TPD ready %u", status & HE_REGM_IGROUP)); sc->istats.itype_tpd++; he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP); break; case HE_REGM_ITYPE_RBPS: sc->istats.itype_rbps++; switch (status & HE_REGM_IGROUP) { case 0: he_intr_rbp(sc, &sc->rbp_s0, 0, 0); break; case 1: he_intr_rbp(sc, &sc->rbp_s1, 0, 1); break; default: if_printf(&sc->ifatm.ifnet, "bad INTR RBPS%u\n", status & HE_REGM_IGROUP); break; } break; case HE_REGM_ITYPE_RBPL: sc->istats.itype_rbpl++; switch (status & HE_REGM_IGROUP) { case 0: he_intr_rbp(sc, &sc->rbp_l0, 1, 0); break; default: if_printf(&sc->ifatm.ifnet, "bad INTR RBPL%u\n", status & HE_REGM_IGROUP); break; } break; case HE_REGM_ITYPE_RBRQ: DBG(sc, INTR, ("INTERRUPT RBRQ %u", status & HE_REGM_IGROUP)); sc->istats.itype_rbrq++; switch (status & HE_REGM_IGROUP) { case 0: he_intr_rbrq(sc, &sc->rbrq_0, 0); break; case 1: if (sc->rbrq_1.size > 0) { he_intr_rbrq(sc, &sc->rbrq_1, 1); break; } /* FALLTHRU */ default: if_printf(&sc->ifatm.ifnet, "bad INTR RBRQ%u\n", status & HE_REGM_IGROUP); break; } break; case HE_REGM_ITYPE_RBRQT: DBG(sc, INTR, ("INTERRUPT RBRQT %u", status & HE_REGM_IGROUP)); sc->istats.itype_rbrqt++; switch (status & HE_REGM_IGROUP) { case 0: he_intr_rbrq(sc, &sc->rbrq_0, 0); break; case 1: if (sc->rbrq_1.size > 0) { he_intr_rbrq(sc, &sc->rbrq_1, 1); break; } /* FALLTHRU */ default: if_printf(&sc->ifatm.ifnet, "bad INTR RBRQT%u\n", status & HE_REGM_IGROUP); break; } break; case HE_REGM_ITYPE_PHYS: sc->istats.itype_phys++; utopia_intr(&sc->utopia); break; #if HE_REGM_ITYPE_UNKNOWN != HE_REGM_ITYPE_INVALID case HE_REGM_ITYPE_UNKNOWN: sc->istats.itype_unknown++; if_printf(&sc->ifatm.ifnet, "bad interrupt\n"); break; #endif case HE_REGM_ITYPE_ERR: sc->istats.itype_err++; switch (status) { case HE_REGM_ITYPE_PERR: if_printf(&sc->ifatm.ifnet, "parity error\n"); break; case HE_REGM_ITYPE_ABORT: if_printf(&sc->ifatm.ifnet, "abort interrupt " "addr=0x%08x\n", READ4(sc, HE_REGO_ABORT_ADDR)); break; default: if_printf(&sc->ifatm.ifnet, "bad interrupt type %08x\n", status); break; } break; case HE_REGM_ITYPE_INVALID: /* this is the documented fix for the ISW bug 8.1.1 * Note, that the documented fix is partly wrong: * the ISWs should be intialized to 0xf8 not 0xff */ sc->istats.bug_bad_isw++; DBG(sc, INTR, ("hatm: invalid ISW bug triggered")); he_intr_tbrq(sc, &sc->tbrq, 0); he_intr_rbp(sc, &sc->rbp_s0, 0, 0); he_intr_rbp(sc, &sc->rbp_l0, 1, 0); he_intr_rbp(sc, &sc->rbp_s1, 0, 1); he_intr_rbrq(sc, &sc->rbrq_0, 0); he_intr_rbrq(sc, &sc->rbrq_1, 1); utopia_intr(&sc->utopia); break; default: if_printf(&sc->ifatm.ifnet, "bad interrupt type %08x\n", status); break; } } /* write back head to clear queue */ WRITE4(sc, HE_REGO_IRQ_HEAD(0), ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) | (q->thresh << HE_REGS_IRQ_HEAD_THRESH) | (q->head << HE_REGS_IRQ_HEAD_HEAD)); BARRIER_W(sc); /* workaround the back-to-back irq access problem (8.1.2) */ (void)READ4(sc, HE_REGO_INT_FIFO); BARRIER_R(sc); mtx_unlock(&sc->mtx); } Index: head/sys/dev/hatm/if_hatmvar.h =================================================================== --- head/sys/dev/hatm/if_hatmvar.h (revision 121674) +++ head/sys/dev/hatm/if_hatmvar.h (revision 121675) @@ -1,621 +1,616 @@ /* * Copyright (c) 2001-2003 * Fraunhofer Institute for Open Communication Systems (FhG Fokus). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Author: Hartmut Brandt * * $FreeBSD$ * * Fore HE driver for NATM */ /* * Debug statistics of the HE driver */ struct istats { uint32_t tdprq_full; uint32_t hbuf_error; uint32_t crc_error; uint32_t len_error; uint32_t flow_closed; uint32_t flow_drop; uint32_t tpd_no_mem; uint32_t rx_seg; uint32_t empty_hbuf; uint32_t short_aal5; uint32_t badlen_aal5; uint32_t bug_bad_isw; uint32_t bug_no_irq_upd; uint32_t itype_tbrq; uint32_t itype_tpd; uint32_t itype_rbps; uint32_t itype_rbpl; uint32_t itype_rbrq; uint32_t itype_rbrqt; uint32_t itype_unknown; uint32_t itype_phys; uint32_t itype_err; uint32_t defrag; uint32_t mcc; uint32_t oec; uint32_t dcc; uint32_t cec; }; /* Card memory layout parameters */ #define HE_CONFIG_MEM_LAYOUT { \ { /* 155 */ \ 20, /* cells_per_row */ \ 1024, /* bytes_per_row */ \ 512, /* r0_numrows */ \ 1018, /* tx_numrows */ \ 512, /* r1_numrows */ \ 6, /* r0_startrow */ \ 2 /* cells_per_lbuf */ \ }, { /* 622 */ \ 40, /* cells_per_row */ \ 2048, /* bytes_per_row */ \ 256, /* r0_numrows */ \ 512, /* tx_numrows */ \ 256, /* r1_numrows */ \ 0, /* r0_startrow */ \ 4 /* cells_per_lbuf */ \ } \ } /*********************************************************************/ struct hatm_softc; /* * A chunk of DMA-able memory */ struct dmamem { u_int size; /* in bytes */ u_int align; /* alignement */ bus_dma_tag_t tag; /* DMA tag */ void *base; /* the memory */ bus_addr_t paddr; /* physical address */ bus_dmamap_t map; /* the MAP */ }; /* * RBP (Receive Buffer Pool) queue entry and queue. */ struct herbp { u_int size; /* RBP number of entries (power of two) */ u_int thresh; /* interrupt treshold */ uint32_t bsize; /* buffer size in bytes */ u_int offset; /* free space at start for small bufs */ uint32_t mask; /* mask for index */ struct dmamem mem; /* the queue area */ struct he_rbpen *rbp; uint32_t head, tail; /* head and tail */ }; /* * RBRQ (Receive Buffer Return Queue) entry and queue. */ struct herbrq { u_int size; /* number of entries */ u_int thresh; /* interrupt threshold */ u_int tout; /* timeout value */ u_int pcnt; /* packet count threshold */ struct dmamem mem; /* memory */ struct he_rbrqen *rbrq; uint32_t head; /* driver end */ }; /* * TPDRQ (Transmit Packet Descriptor Ready Queue) entry and queue */ struct hetpdrq { u_int size; /* number of entries */ struct dmamem mem; /* memory */ struct he_tpdrqen *tpdrq; u_int head; /* head (copy of adapter) */ u_int tail; /* written back to adapter */ }; /* * TBRQ (Transmit Buffer Return Queue) entry and queue */ struct hetbrq { u_int size; /* number of entries */ u_int thresh; /* interrupt threshold */ struct dmamem mem; /* memory */ struct he_tbrqen *tbrq; u_int head; /* adapter end */ }; /*==================================================================*/ /* * TPDs are 32 byte and must be aligned on 64 byte boundaries. That means, * that half of the space is free. We use this space to plug in a link for * the list of free TPDs. Note, that the m_act member of the mbufs contain * a pointer to the dmamap. * * The maximum number of TDPs is the size of the common transmit packet * descriptor ready queue plus the sizes of the transmit buffer return queues * (currently only queue 0). We allocate and map these TPD when initializing * the card. We also allocate on DMA map for each TPD. Only the map in the * last TPD of a packets is used when a packet is transmitted. * This is signalled by having the mbuf member of this TPD non-zero and * pointing to the mbuf. */ #define HE_TPD_SIZE 64 struct tpd { struct he_tpd tpd; /* at beginning */ SLIST_ENTRY(tpd) link; /* free cid list link */ struct mbuf *mbuf; /* the buf chain */ bus_dmamap_t map; /* map */ uint32_t cid; /* CID */ uint16_t no; /* number of this tpd */ }; SLIST_HEAD(tpd_list, tpd); #define TPD_SET_USED(SC, I) do { \ (SC)->tpd_used[(I) / 8] |= (1 << ((I) % 8)); \ } while (0) #define TPD_CLR_USED(SC, I) do { \ (SC)->tpd_used[(I) / 8] &= ~(1 << ((I) % 8)); \ } while (0) #define TPD_TST_USED(SC, I) ((SC)->tpd_used[(I) / 8] & (1 << ((I) % 8))) #define TPD_ADDR(SC, I) ((struct tpd *)((char *)sc->tpds.base + \ (I) * HE_TPD_SIZE)) /*==================================================================*/ /* * External MBUFs. The card needs a lot of mbufs in the pools for high * performance. The problem with using mbufs directly is that we would need * a dmamap for each of the mbufs. This can exhaust iommu space on the sparc * and it eats also a lot of processing time. So we use external mbufs * for the small buffers and clusters for the large buffers. * For receive group 0 we use 5 ATM cells, for group 1 one (52 byte) ATM * cell. The mbuf storage is allocated pagewise and one dmamap is used per * page. * * The handle we give to the card for the small buffers is a word combined * of the page number and the number of the chunk in the page. This restricts * the number of chunks per page to 256 (8 bit) and the number of pages to * 65536 (16 bits). * * A chunk may be in one of three states: free, on the card and floating around * in the system. If it is free, it is on one of the two free lists and * start with a struct mbufx_free. Each page has a bitmap that tracks where * its chunks are. * * For large buffers we use mbuf clusters. Here we have two problems: we need * to track the buffers on the card (in the case we want to stop it) and * we need to map the 64bit mbuf address to a 26bit handle for 64-bit machines. * The card uses the buffers in the order we give it to the card. Therefor * we can use a private array holding pointers to the mbufs as a circular * queue for both tasks. This is done with the lbufs member of softc. The * handle for these buffer is the lbufs index ored with a flag. */ #define MBUF0_SIZE (5 * 48) /* 240 */ #define MBUF1_SIZE (52) #define MBUF0_CHUNK 256 /* 16 free bytes */ #define MBUF1_CHUNK 96 /* 44 free bytes */ #ifdef XXX #define MBUF0_OFFSET (MBUF0_CHUNK - sizeof(struct mbuf_chunk_hdr) \ - MBUF0_SIZE) #else #define MBUF0_OFFSET 0 #endif #define MBUF1_OFFSET (MBUF1_CHUNK - sizeof(struct mbuf_chunk_hdr) \ - MBUF1_SIZE) #define MBUFL_OFFSET 16 /* two pointers for HARP */ #define MBUF_ALLOC_SIZE (PAGE_SIZE) /* each allocated page has one of these structures at its very end. */ struct mbuf_page_hdr { uint8_t card[32]; /* bitmap for on-card */ - uint8_t used[32]; /* bitmap for used but not on-card */ uint16_t nchunks; /* chunks on this page */ bus_dmamap_t map; /* the DMA MAP */ uint32_t phys; /* physical base address */ uint32_t hdroff; /* chunk header offset */ uint32_t chunksize; /* chunk size */ }; struct mbuf_page { char storage[MBUF_ALLOC_SIZE - sizeof(struct mbuf_page_hdr)]; struct mbuf_page_hdr hdr; }; /* numbers per page */ #define MBUF0_PER_PAGE ((MBUF_ALLOC_SIZE - sizeof(struct mbuf_page_hdr)) / \ MBUF0_CHUNK) #define MBUF1_PER_PAGE ((MBUF_ALLOC_SIZE - sizeof(struct mbuf_page_hdr)) / \ MBUF1_CHUNK) #define MBUF_CLR_BIT(ARRAY, BIT) ((ARRAY)[(BIT) / 8] &= ~(1 << ((BIT) % 8))) #define MBUF_SET_BIT(ARRAY, BIT) ((ARRAY)[(BIT) / 8] |= (1 << ((BIT) % 8))) #define MBUF_TST_BIT(ARRAY, BIT) ((ARRAY)[(BIT) / 8] & (1 << ((BIT) % 8))) #define MBUF_MAKE_HANDLE(PAGENO, CHUNKNO) \ (((PAGENO) << 10) | (CHUNKNO)) #define MBUF_PARSE_HANDLE(HANDLE, PAGENO, CHUNKNO) do { \ (CHUNKNO) = (HANDLE) & 0x3ff; \ (PAGENO) = ((HANDLE) >> 10) & 0x3ff; \ } while (0) #define MBUF_LARGE_FLAG (1 << 20) /* chunks have the following structure at the end */ struct mbuf_chunk_hdr { struct mbuf *mbuf; uint16_t pageno; uint16_t chunkno; }; #define MBUFX_STORAGE_SIZE(X) (MBUF##X##_CHUNK \ - sizeof(struct mbuf_chunk_hdr)) struct mbuf0_chunk { char storage[MBUFX_STORAGE_SIZE(0)]; struct mbuf_chunk_hdr hdr; }; struct mbuf1_chunk { char storage[MBUFX_STORAGE_SIZE(1)]; struct mbuf_chunk_hdr hdr; }; struct mbufx_free { - SLIST_ENTRY(mbufx_free) link; + struct mbufx_free *link; }; -SLIST_HEAD(mbufx_free_list, mbufx_free); /*==================================================================*/ /* * Interrupt queue */ struct heirq { u_int size; /* number of entries */ u_int thresh; /* re-interrupt threshold */ u_int line; /* interrupt line to use */ struct dmamem mem; /* interrupt queues */ uint32_t * irq; /* interrupt queue */ uint32_t head; /* head index */ uint32_t * tailp; /* pointer to tail */ struct hatm_softc *sc; /* back pointer */ u_int group; /* interrupt group */ }; /* * This structure describes all information for a VCC open on the card. * The array of these structures is indexed by the compressed connection ID * (CID). This structure must begin with the atmio_vcc. */ struct hevcc { struct atmio_vcc param; /* traffic parameters */ void * rxhand; /* NATM protocol block */ u_int vflags; /* private flags */ uint32_t ipackets; uint32_t opackets; uint32_t ibytes; uint32_t obytes; u_int rc; /* rate control group for CBR */ struct mbuf * chain; /* partial received PDU */ struct mbuf * last; /* last mbuf in chain */ u_int ntpds; /* number of active TPDs */ }; #define HE_VCC_OPEN 0x000f0000 #define HE_VCC_RX_OPEN 0x00010000 #define HE_VCC_RX_CLOSING 0x00020000 #define HE_VCC_TX_OPEN 0x00040000 #define HE_VCC_TX_CLOSING 0x00080000 #define HE_VCC_FLOW_CTRL 0x00100000 /* * CBR rate groups */ struct herg { u_int refcnt; /* how many connections reference this group */ u_int rate; /* the value */ }; /* * Softc */ struct hatm_softc { struct ifatm ifatm; /* common ATM stuff */ struct mtx mtx; /* lock */ struct ifmedia media; /* media */ device_t dev; /* device */ int memid; /* resoure id for memory */ struct resource * memres; /* memory resource */ bus_space_handle_t memh; /* handle */ bus_space_tag_t memt; /* ... and tag */ bus_dma_tag_t parent_tag; /* global restriction */ struct cv vcc_cv; /* condition variable */ int irqid; /* resource id */ struct resource * irqres; /* resource */ void * ih; /* interrupt handle */ struct utopia utopia; /* utopia state */ /* rest has to be reset by stop */ int he622; /* this is a HE622 */ int pci64; /* 64bit bus */ char prod_id[HE_EEPROM_PROD_ID_LEN + 1]; char rev[HE_EEPROM_REV_LEN + 1]; struct heirq irq_0; /* interrupt queues 0 */ /* generic network controller state */ u_int cells_per_row; u_int bytes_per_row; u_int r0_numrows; u_int tx_numrows; u_int r1_numrows; u_int r0_startrow; u_int tx_startrow; u_int r1_startrow; u_int cells_per_lbuf; u_int r0_numbuffs; u_int r1_numbuffs; u_int tx_numbuffs; /* HSP */ struct he_hsp *hsp; struct dmamem hsp_mem; /*** TX ***/ struct hetbrq tbrq; /* TBRQ 0 */ struct hetpdrq tpdrq; /* TPDRQ */ struct tpd_list tpd_free; /* Free TPDs */ u_int tpd_nfree; /* number of free TPDs */ u_int tpd_total; /* total TPDs */ uint8_t *tpd_used; /* bitmap of used TPDs */ struct dmamem tpds; /* TPD memory */ bus_dma_tag_t tx_tag; /* DMA tag for all tx mbufs */ /*** RX ***/ /* receive/transmit groups */ struct herbp rbp_s0; /* RBPS0 */ struct herbp rbp_l0; /* RBPL0 */ struct herbp rbp_s1; /* RBPS1 */ struct herbrq rbrq_0; /* RBRQ0 */ struct herbrq rbrq_1; /* RBRQ1 */ /* list of external mbuf storage */ bus_dma_tag_t mbuf_tag; struct mbuf_page **mbuf_pages; u_int mbuf_npages; - struct mtx mbuf0_mtx; - struct mbufx_free_list mbuf0_list; - struct mtx mbuf1_mtx; - struct mbufx_free_list mbuf1_list; + struct mbufx_free *mbuf_list[2]; /* mbuf cluster tracking and mapping for group 0 */ struct mbuf **lbufs; /* mbufs */ bus_dmamap_t *rmaps; /* DMA maps */ u_int lbufs_size; u_int lbufs_next; /* VCCs */ struct hevcc *vccs[HE_MAX_VCCS]; u_int cbr_bw; /* BW allocated to CBR */ u_int max_tpd; /* per VCC */ u_int open_vccs; uma_zone_t vcc_zone; /* rate groups */ struct herg rate_ctrl[HE_REGN_CS_STPER]; /* memory offsets */ u_int tsrb, tsrc, tsrd; u_int rsrb; struct cv cv_rcclose; /* condition variable */ uint32_t rate_grid[16][16]; /* our copy */ /* sysctl support */ struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; /* internal statistics */ struct istats istats; #ifdef HATM_DEBUG /* debugging */ u_int debug; #endif }; #define READ4(SC,OFF) bus_space_read_4(SC->memt, SC->memh, (OFF)) #define READ2(SC,OFF) bus_space_read_2(SC->memt, SC->memh, (OFF)) #define READ1(SC,OFF) bus_space_read_1(SC->memt, SC->memh, (OFF)) #define WRITE4(SC,OFF,VAL) bus_space_write_4(SC->memt, SC->memh, (OFF), (VAL)) #define WRITE2(SC,OFF,VAL) bus_space_write_2(SC->memt, SC->memh, (OFF), (VAL)) #define WRITE1(SC,OFF,VAL) bus_space_write_1(SC->memt, SC->memh, (OFF), (VAL)) #define BARRIER_R(SC) bus_space_barrier(SC->memt, SC->memh, 0, HE_REGO_END, \ BUS_SPACE_BARRIER_READ) #define BARRIER_W(SC) bus_space_barrier(SC->memt, SC->memh, 0, HE_REGO_END, \ BUS_SPACE_BARRIER_WRITE) #define BARRIER_RW(SC) bus_space_barrier(SC->memt, SC->memh, 0, HE_REGO_END, \ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE) #define READ_SUNI(SC,OFF) READ4(SC, HE_REGO_SUNI + 4 * (OFF)) #define WRITE_SUNI(SC,OFF,VAL) WRITE4(SC, HE_REGO_SUNI + 4 * (OFF), (VAL)) #define READ_LB4(SC,OFF) \ ({ \ WRITE4(SC, HE_REGO_LB_MEM_ADDR, (OFF)); \ WRITE4(SC, HE_REGO_LB_MEM_ACCESS, \ (HE_REGM_LB_MEM_HNDSHK | HE_REGM_LB_MEM_READ)); \ while((READ4(SC, HE_REGO_LB_MEM_ACCESS) & HE_REGM_LB_MEM_HNDSHK))\ ; \ READ4(SC, HE_REGO_LB_MEM_DATA); \ }) #define WRITE_LB4(SC,OFF,VAL) \ do { \ WRITE4(SC, HE_REGO_LB_MEM_ADDR, (OFF)); \ WRITE4(SC, HE_REGO_LB_MEM_DATA, (VAL)); \ WRITE4(SC, HE_REGO_LB_MEM_ACCESS, \ (HE_REGM_LB_MEM_HNDSHK | HE_REGM_LB_MEM_WRITE)); \ while((READ4(SC, HE_REGO_LB_MEM_ACCESS) & HE_REGM_LB_MEM_HNDSHK))\ ; \ } while(0) #define WRITE_MEM4(SC,OFF,VAL,SPACE) \ do { \ WRITE4(SC, HE_REGO_CON_DAT, (VAL)); \ WRITE4(SC, HE_REGO_CON_CTL, \ (SPACE | HE_REGM_CON_WE | HE_REGM_CON_STATUS | (OFF))); \ while((READ4(SC, HE_REGO_CON_CTL) & HE_REGM_CON_STATUS) != 0) \ ; \ } while(0) #define READ_MEM4(SC,OFF,SPACE) \ ({ \ WRITE4(SC, HE_REGO_CON_CTL, \ (SPACE | HE_REGM_CON_STATUS | (OFF))); \ while((READ4(SC, HE_REGO_CON_CTL) & HE_REGM_CON_STATUS) != 0) \ ; \ READ4(SC, HE_REGO_CON_DAT); \ }) #define WRITE_TCM4(SC,OFF,VAL) WRITE_MEM4(SC,(OFF),(VAL),HE_REGM_CON_TCM) #define WRITE_RCM4(SC,OFF,VAL) WRITE_MEM4(SC,(OFF),(VAL),HE_REGM_CON_RCM) #define WRITE_MBOX4(SC,OFF,VAL) WRITE_MEM4(SC,(OFF),(VAL),HE_REGM_CON_MBOX) #define READ_TCM4(SC,OFF) READ_MEM4(SC,(OFF),HE_REGM_CON_TCM) #define READ_RCM4(SC,OFF) READ_MEM4(SC,(OFF),HE_REGM_CON_RCM) #define READ_MBOX4(SC,OFF) READ_MEM4(SC,(OFF),HE_REGM_CON_MBOX) #define WRITE_TCM(SC,OFF,BYTES,VAL) \ WRITE_MEM4(SC,(OFF) | ((~(BYTES) & 0xf) << HE_REGS_CON_DIS), \ (VAL), HE_REGM_CON_TCM) #define WRITE_RCM(SC,OFF,BYTES,VAL) \ WRITE_MEM4(SC,(OFF) | ((~(BYTES) & 0xf) << HE_REGS_CON_DIS), \ (VAL), HE_REGM_CON_RCM) #define READ_TSR(SC,CID,NR) \ ({ \ uint32_t _v; \ if((NR) <= 7) { \ _v = READ_TCM4(SC, HE_REGO_TSRA(0,CID,NR)); \ } else if((NR) <= 11) { \ _v = READ_TCM4(SC, HE_REGO_TSRB((SC)->tsrb,CID,(NR-8)));\ } else if((NR) <= 13) { \ _v = READ_TCM4(SC, HE_REGO_TSRC((SC)->tsrc,CID,(NR-12)));\ } else { \ _v = READ_TCM4(SC, HE_REGO_TSRD((SC)->tsrd,CID)); \ } \ _v; \ }) #define WRITE_TSR(SC,CID,NR,BEN,VAL) \ do { \ if((NR) <= 7) { \ WRITE_TCM(SC, HE_REGO_TSRA(0,CID,NR),BEN,VAL); \ } else if((NR) <= 11) { \ WRITE_TCM(SC, HE_REGO_TSRB((SC)->tsrb,CID,(NR-8)),BEN,VAL);\ } else if((NR) <= 13) { \ WRITE_TCM(SC, HE_REGO_TSRC((SC)->tsrc,CID,(NR-12)),BEN,VAL);\ } else { \ WRITE_TCM(SC, HE_REGO_TSRD((SC)->tsrd,CID),BEN,VAL); \ } \ } while(0) #define READ_RSR(SC,CID,NR) \ ({ \ uint32_t _v; \ if((NR) <= 7) { \ _v = READ_RCM4(SC, HE_REGO_RSRA(0,CID,NR)); \ } else { \ _v = READ_RCM4(SC, HE_REGO_RSRB((SC)->rsrb,CID,(NR-8)));\ } \ _v; \ }) #define WRITE_RSR(SC,CID,NR,BEN,VAL) \ do { \ if((NR) <= 7) { \ WRITE_RCM(SC, HE_REGO_RSRA(0,CID,NR),BEN,VAL); \ } else { \ WRITE_RCM(SC, HE_REGO_RSRB((SC)->rsrb,CID,(NR-8)),BEN,VAL);\ } \ } while(0) #ifdef HATM_DEBUG #define DBG(SC, FL, PRINT) do { \ if((SC)->debug & DBG_##FL) { \ if_printf(&(SC)->ifatm.ifnet, "%s: ", __func__); \ printf PRINT; \ printf("\n"); \ } \ } while (0) enum { DBG_RX = 0x0001, DBG_TX = 0x0002, DBG_VCC = 0x0004, DBG_IOCTL = 0x0008, DBG_ATTACH = 0x0010, DBG_INTR = 0x0020, DBG_DMA = 0x0040, DBG_DMAH = 0x0080, DBG_ALL = 0x00ff }; #else #define DBG(SC, FL, PRINT) #endif u_int hatm_cps2atmf(uint32_t); u_int hatm_atmf2cps(uint32_t); void hatm_intr(void *); int hatm_ioctl(struct ifnet *, u_long, caddr_t); void hatm_initialize(struct hatm_softc *); void hatm_stop(struct hatm_softc *sc); void hatm_start(struct ifnet *); void hatm_rx(struct hatm_softc *sc, u_int cid, u_int flags, struct mbuf *m, u_int len); void hatm_tx_complete(struct hatm_softc *sc, struct tpd *tpd, uint32_t); int hatm_tx_vcc_can_open(struct hatm_softc *sc, u_int cid, struct hevcc *); void hatm_tx_vcc_open(struct hatm_softc *sc, u_int cid); void hatm_rx_vcc_open(struct hatm_softc *sc, u_int cid); void hatm_tx_vcc_close(struct hatm_softc *sc, u_int cid); void hatm_rx_vcc_close(struct hatm_softc *sc, u_int cid); void hatm_tx_vcc_closed(struct hatm_softc *sc, u_int cid); void hatm_vcc_closed(struct hatm_softc *sc, u_int cid); void hatm_load_vc(struct hatm_softc *sc, u_int cid, int reopen);