Index: head/sys/dev/fatm/if_fatm.c =================================================================== --- head/sys/dev/fatm/if_fatm.c (revision 147720) +++ head/sys/dev/fatm/if_fatm.c (revision 147721) @@ -1,3109 +1,3109 @@ /*- * Copyright (c) 2001-2003 * Fraunhofer Institute for Open Communication Systems (FhG Fokus). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Author: Hartmut Brandt * * Fore PCA200E driver for NATM */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_natm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ENABLE_BPF #include #endif #ifdef INET #include #include #endif #include #include #include #include #include #include #include #include #include #include devclass_t fatm_devclass; static const struct { uint16_t vid; uint16_t did; const char *name; } fatm_devs[] = { { 0x1127, 0x300, "FORE PCA200E" }, { 0, 0, NULL } }; static const struct rate { uint32_t ratio; uint32_t cell_rate; } rate_table[] = { #include }; #define RATE_TABLE_SIZE (sizeof(rate_table) / sizeof(rate_table[0])) SYSCTL_DECL(_hw_atm); MODULE_DEPEND(fatm, utopia, 1, 1, 1); static int fatm_utopia_readregs(struct ifatm *, u_int, uint8_t *, u_int *); static int fatm_utopia_writereg(struct ifatm *, u_int, u_int, u_int); static const struct utopia_methods fatm_utopia_methods = { fatm_utopia_readregs, fatm_utopia_writereg }; #define VC_OK(SC, VPI, VCI) \ (((VPI) & ~((1 << IFP2IFATM((SC)->ifp)->mib.vpi_bits) - 1)) == 0 && \ (VCI) != 0 && ((VCI) & ~((1 << IFP2IFATM((SC)->ifp)->mib.vci_bits) - 1)) == 0) static int fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc); /* * Probing is easy: step trough the list of known vendor and device * ids and compare. If one is found - it's our. */ static int fatm_probe(device_t dev) { int i; for (i = 0; fatm_devs[i].name; i++) if (pci_get_vendor(dev) == fatm_devs[i].vid && pci_get_device(dev) == fatm_devs[i].did) { device_set_desc(dev, fatm_devs[i].name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /* * Function called at completion of a SUNI writeregs/readregs command. * This is called from the interrupt handler while holding the softc lock. * We use the queue entry as the randevouze point. */ static void fatm_utopia_writeregs_complete(struct fatm_softc *sc, struct cmdqueue *q) { H_SYNCSTAT_POSTREAD(sc, q->q.statp); if(H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { sc->istats.suni_reg_errors++; q->error = EIO; } wakeup(q); } /* * Write a SUNI register. The bits that are 1 in mask are written from val * into register reg. We wait for the command to complete by sleeping on * the register memory. * * We assume, that we already hold the softc mutex. */ static int fatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val) { int error; struct cmdqueue *q; struct fatm_softc *sc; sc = ifatm->ifp->if_softc; FATM_CHECKLOCK(sc); if (!(ifatm->ifp->if_flags & IFF_RUNNING)) return (EIO); /* get queue element and fill it */ q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) { sc->istats.cmd_queue_full++; return (EIO); } NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN); q->error = 0; q->cb = fatm_utopia_writeregs_complete; H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, 0); BARRIER_W(sc); WRITE4(sc, q->q.card + FATMOC_OP, FATM_MAKE_SETOC3(reg, val, mask) | FATM_OP_INTERRUPT_SEL); BARRIER_W(sc); /* * Wait for the command to complete */ error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_setreg", hz); switch(error) { case EWOULDBLOCK: error = EIO; break; case ERESTART: error = EINTR; break; case 0: error = q->error; break; } return (error); } /* * Function called at completion of a SUNI readregs command. * This is called from the interrupt handler while holding the softc lock. * We use reg_mem as the randevouze point. */ static void fatm_utopia_readregs_complete(struct fatm_softc *sc, struct cmdqueue *q) { H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { sc->istats.suni_reg_errors++; q->error = EIO; } wakeup(&sc->reg_mem); } /* * Read SUNI registers * * We use a preallocated buffer to read the registers. Therefor we need * to protect against multiple threads trying to read registers. We do this * with a condition variable and a flag. We wait for the command to complete by sleeping on * the register memory. * * We assume, that we already hold the softc mutex. */ static int fatm_utopia_readregs_internal(struct fatm_softc *sc) { int error, i; uint32_t *ptr; struct cmdqueue *q; /* get the buffer */ for (;;) { if (!(sc->ifp->if_flags & IFF_RUNNING)) return (EIO); if (!(sc->flags & FATM_REGS_INUSE)) break; cv_wait(&sc->cv_regs, &sc->mtx); } sc->flags |= FATM_REGS_INUSE; q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) { sc->istats.cmd_queue_full++; return (EIO); } NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN); q->error = 0; q->cb = fatm_utopia_readregs_complete; H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map, BUS_DMASYNC_PREREAD); WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, sc->reg_mem.paddr); BARRIER_W(sc); WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_OC3_GET_REG | FATM_OP_INTERRUPT_SEL); BARRIER_W(sc); /* * Wait for the command to complete */ error = msleep(&sc->reg_mem, &sc->mtx, PZERO | PCATCH, "fatm_getreg", hz); switch(error) { case EWOULDBLOCK: error = EIO; break; case ERESTART: error = EINTR; break; case 0: bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map, BUS_DMASYNC_POSTREAD); error = q->error; break; } if (error != 0) { /* declare buffer to be free */ sc->flags &= ~FATM_REGS_INUSE; cv_signal(&sc->cv_regs); return (error); } /* swap if needed */ ptr = (uint32_t *)sc->reg_mem.mem; for (i = 0; i < FATM_NREGS; i++) ptr[i] = le32toh(ptr[i]) & 0xff; return (0); } /* * Read SUNI registers for the SUNI module. * * We assume, that we already hold the mutex. */ static int fatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *valp, u_int *np) { int err; int i; struct fatm_softc *sc; if (reg >= FATM_NREGS) return (EINVAL); if (reg + *np > FATM_NREGS) *np = FATM_NREGS - reg; sc = ifatm->ifp->if_softc; FATM_CHECKLOCK(sc); err = fatm_utopia_readregs_internal(sc); if (err != 0) return (err); for (i = 0; i < *np; i++) valp[i] = ((uint32_t *)sc->reg_mem.mem)[reg + i]; /* declare buffer to be free */ sc->flags &= ~FATM_REGS_INUSE; cv_signal(&sc->cv_regs); return (0); } /* * Check whether the hard is beating. We remember the last heart beat and * compare it to the current one. If it appears stuck for 10 times, we have * a problem. * * Assume we hold the lock. */ static void fatm_check_heartbeat(struct fatm_softc *sc) { uint32_t h; FATM_CHECKLOCK(sc); h = READ4(sc, FATMO_HEARTBEAT); DBG(sc, BEAT, ("heartbeat %08x", h)); if (sc->stop_cnt == 10) return; if (h == sc->heartbeat) { if (++sc->stop_cnt == 10) { log(LOG_ERR, "i960 stopped???\n"); WRITE4(sc, FATMO_HIMR, 1); } return; } sc->stop_cnt = 0; sc->heartbeat = h; } /* * Ensure that the heart is still beating. */ static void fatm_watchdog(struct ifnet *ifp) { struct fatm_softc *sc = ifp->if_softc; FATM_LOCK(sc); if (ifp->if_flags & IFF_RUNNING) { fatm_check_heartbeat(sc); ifp->if_timer = 5; } FATM_UNLOCK(sc); } /* * Hard reset the i960 on the board. This is done by initializing registers, * clearing interrupts and waiting for the selftest to finish. Not sure, * whether all these barriers are actually needed. * * Assumes that we hold the lock. */ static int fatm_reset(struct fatm_softc *sc) { int w; uint32_t val; FATM_CHECKLOCK(sc); WRITE4(sc, FATMO_APP_BASE, FATMO_COMMON_ORIGIN); BARRIER_W(sc); WRITE4(sc, FATMO_UART_TO_960, XMIT_READY); BARRIER_W(sc); WRITE4(sc, FATMO_UART_TO_HOST, XMIT_READY); BARRIER_W(sc); WRITE4(sc, FATMO_BOOT_STATUS, COLD_START); BARRIER_W(sc); WRITE1(sc, FATMO_HCR, FATM_HCR_RESET); BARRIER_W(sc); DELAY(1000); WRITE1(sc, FATMO_HCR, 0); BARRIER_RW(sc); DELAY(1000); for (w = 100; w; w--) { BARRIER_R(sc); val = READ4(sc, FATMO_BOOT_STATUS); switch (val) { case SELF_TEST_OK: return (0); case SELF_TEST_FAIL: return (EIO); } DELAY(1000); } return (EIO); } /* * Stop the card. Must be called WITH the lock held * Reset, free transmit and receive buffers. Wakeup everybody who may sleep. */ static void fatm_stop(struct fatm_softc *sc) { int i; struct cmdqueue *q; struct rbuf *rb; struct txqueue *tx; uint32_t stat; FATM_CHECKLOCK(sc); /* Stop the board */ utopia_stop(&sc->utopia); (void)fatm_reset(sc); /* stop watchdog */ sc->ifp->if_timer = 0; if (sc->ifp->if_flags & IFF_RUNNING) { sc->ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp), sc->utopia.carrier == UTP_CARR_OK); /* * Collect transmit mbufs, partial receive mbufs and * supplied mbufs */ for (i = 0; i < FATM_TX_QLEN; i++) { tx = GET_QUEUE(sc->txqueue, struct txqueue, i); if (tx->m) { bus_dmamap_unload(sc->tx_tag, tx->map); m_freem(tx->m); tx->m = NULL; } } /* Collect supplied mbufs */ while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) { LIST_REMOVE(rb, link); bus_dmamap_unload(sc->rbuf_tag, rb->map); m_free(rb->m); rb->m = NULL; LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); } /* Unwait any waiters */ wakeup(&sc->sadi_mem); /* wakeup all threads waiting for STAT or REG buffers */ cv_broadcast(&sc->cv_stat); cv_broadcast(&sc->cv_regs); sc->flags &= ~(FATM_STAT_INUSE | FATM_REGS_INUSE); /* wakeup all threads waiting on commands */ for (i = 0; i < FATM_CMD_QLEN; i++) { q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, i); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if ((stat = H_GETSTAT(q->q.statp)) != FATM_STAT_FREE) { H_SETSTAT(q->q.statp, stat | FATM_STAT_ERROR); H_SYNCSTAT_PREWRITE(sc, q->q.statp); wakeup(q); } } utopia_reset_media(&sc->utopia); } sc->small_cnt = sc->large_cnt = 0; /* Reset vcc info */ if (sc->vccs != NULL) { sc->open_vccs = 0; for (i = 0; i < FORE_MAX_VCC + 1; i++) { if (sc->vccs[i] != NULL) { if ((sc->vccs[i]->vflags & (FATM_VCC_OPEN | FATM_VCC_TRY_OPEN)) == 0) { uma_zfree(sc->vcc_zone, sc->vccs[i]); sc->vccs[i] = NULL; } else { sc->vccs[i]->vflags = 0; sc->open_vccs++; } } } } } /* * Load the firmware into the board and save the entry point. */ static uint32_t firmware_load(struct fatm_softc *sc) { struct firmware *fw = (struct firmware *)firmware; DBG(sc, INIT, ("loading - entry=%x", fw->entry)); bus_space_write_region_4(sc->memt, sc->memh, fw->offset, firmware, sizeof(firmware) / sizeof(firmware[0])); BARRIER_RW(sc); return (fw->entry); } /* * Read a character from the virtual UART. The availability of a character * is signaled by a non-null value of the 32 bit register. The eating of * the character by us is signalled to the card by setting that register * to zero. */ static int rx_getc(struct fatm_softc *sc) { int w = 50; int c; while (w--) { c = READ4(sc, FATMO_UART_TO_HOST); BARRIER_RW(sc); if (c != 0) { WRITE4(sc, FATMO_UART_TO_HOST, 0); DBGC(sc, UART, ("%c", c & 0xff)); return (c & 0xff); } DELAY(1000); } return (-1); } /* * Eat up characters from the board and stuff them in the bit-bucket. */ static void rx_flush(struct fatm_softc *sc) { int w = 10000; while (w-- && rx_getc(sc) >= 0) ; } /* * Write a character to the card. The UART is available if the register * is zero. */ static int tx_putc(struct fatm_softc *sc, u_char c) { int w = 10; int c1; while (w--) { c1 = READ4(sc, FATMO_UART_TO_960); BARRIER_RW(sc); if (c1 == 0) { WRITE4(sc, FATMO_UART_TO_960, c | CHAR_AVAIL); DBGC(sc, UART, ("%c", c & 0xff)); return (0); } DELAY(1000); } return (-1); } /* * Start the firmware. This is doing by issuing a 'go' command with * the hex entry address of the firmware. Then we wait for the self-test to * succeed. */ static int fatm_start_firmware(struct fatm_softc *sc, uint32_t start) { static char hex[] = "0123456789abcdef"; u_int w, val; DBG(sc, INIT, ("starting")); rx_flush(sc); tx_putc(sc, '\r'); DELAY(1000); rx_flush(sc); tx_putc(sc, 'g'); (void)rx_getc(sc); tx_putc(sc, 'o'); (void)rx_getc(sc); tx_putc(sc, ' '); (void)rx_getc(sc); tx_putc(sc, hex[(start >> 12) & 0xf]); (void)rx_getc(sc); tx_putc(sc, hex[(start >> 8) & 0xf]); (void)rx_getc(sc); tx_putc(sc, hex[(start >> 4) & 0xf]); (void)rx_getc(sc); tx_putc(sc, hex[(start >> 0) & 0xf]); (void)rx_getc(sc); tx_putc(sc, '\r'); rx_flush(sc); for (w = 100; w; w--) { BARRIER_R(sc); val = READ4(sc, FATMO_BOOT_STATUS); switch (val) { case CP_RUNNING: return (0); case SELF_TEST_FAIL: return (EIO); } DELAY(1000); } return (EIO); } /* * Initialize one card and host queue. */ static void init_card_queue(struct fatm_softc *sc, struct fqueue *queue, int qlen, size_t qel_size, size_t desc_size, cardoff_t off, u_char **statpp, uint32_t *cardstat, u_char *descp, uint32_t carddesc) { struct fqelem *el = queue->chunk; while (qlen--) { el->card = off; off += 8; /* size of card entry */ el->statp = (uint32_t *)(*statpp); (*statpp) += sizeof(uint32_t); H_SETSTAT(el->statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, el->statp); WRITE4(sc, el->card + FATMOS_STATP, (*cardstat)); (*cardstat) += sizeof(uint32_t); el->ioblk = descp; descp += desc_size; el->card_ioblk = carddesc; carddesc += desc_size; el = (struct fqelem *)((u_char *)el + qel_size); } queue->tail = queue->head = 0; } /* * Issue the initialize operation to the card, wait for completion and * initialize the on-board and host queue structures with offsets and * addresses. */ static int fatm_init_cmd(struct fatm_softc *sc) { int w, c; u_char *statp; uint32_t card_stat; u_int cnt; struct fqelem *el; cardoff_t off; DBG(sc, INIT, ("command")); WRITE4(sc, FATMO_ISTAT, 0); WRITE4(sc, FATMO_IMASK, 1); WRITE4(sc, FATMO_HLOGGER, 0); WRITE4(sc, FATMO_INIT + FATMOI_RECEIVE_TRESHOLD, 0); WRITE4(sc, FATMO_INIT + FATMOI_NUM_CONNECT, FORE_MAX_VCC); WRITE4(sc, FATMO_INIT + FATMOI_CQUEUE_LEN, FATM_CMD_QLEN); WRITE4(sc, FATMO_INIT + FATMOI_TQUEUE_LEN, FATM_TX_QLEN); WRITE4(sc, FATMO_INIT + FATMOI_RQUEUE_LEN, FATM_RX_QLEN); WRITE4(sc, FATMO_INIT + FATMOI_RPD_EXTENSION, RPD_EXTENSIONS); WRITE4(sc, FATMO_INIT + FATMOI_TPD_EXTENSION, TPD_EXTENSIONS); /* * initialize buffer descriptors */ WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_QUEUE_LENGTH, SMALL_SUPPLY_QLEN); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_BUFFER_SIZE, SMALL_BUFFER_LEN); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_POOL_SIZE, SMALL_POOL_SIZE); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_SUPPLY_BLKSIZE, SMALL_SUPPLY_BLKSIZE); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_QUEUE_LENGTH, LARGE_SUPPLY_QLEN); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_BUFFER_SIZE, LARGE_BUFFER_LEN); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_POOL_SIZE, LARGE_POOL_SIZE); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_SUPPLY_BLKSIZE, LARGE_SUPPLY_BLKSIZE); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_QUEUE_LENGTH, 0); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_BUFFER_SIZE, 0); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_POOL_SIZE, 0); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_SUPPLY_BLKSIZE, 0); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_QUEUE_LENGTH, 0); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_BUFFER_SIZE, 0); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_POOL_SIZE, 0); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_SUPPLY_BLKSIZE, 0); /* * Start the command */ BARRIER_W(sc); WRITE4(sc, FATMO_INIT + FATMOI_STATUS, FATM_STAT_PENDING); BARRIER_W(sc); WRITE4(sc, FATMO_INIT + FATMOI_OP, FATM_OP_INITIALIZE); BARRIER_W(sc); /* * Busy wait for completion */ w = 100; while (w--) { c = READ4(sc, FATMO_INIT + FATMOI_STATUS); BARRIER_R(sc); if (c & FATM_STAT_COMPLETE) break; DELAY(1000); } if (c & FATM_STAT_ERROR) return (EIO); /* * Initialize the queues */ statp = sc->stat_mem.mem; card_stat = sc->stat_mem.paddr; /* * Command queue. This is special in that it's on the card. */ el = sc->cmdqueue.chunk; off = READ4(sc, FATMO_COMMAND_QUEUE); DBG(sc, INIT, ("cmd queue=%x", off)); for (cnt = 0; cnt < FATM_CMD_QLEN; cnt++) { el = &((struct cmdqueue *)sc->cmdqueue.chunk + cnt)->q; el->card = off; off += 32; /* size of card structure */ el->statp = (uint32_t *)statp; statp += sizeof(uint32_t); H_SETSTAT(el->statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, el->statp); WRITE4(sc, el->card + FATMOC_STATP, card_stat); card_stat += sizeof(uint32_t); } sc->cmdqueue.tail = sc->cmdqueue.head = 0; /* * Now the other queues. These are in memory */ init_card_queue(sc, &sc->txqueue, FATM_TX_QLEN, sizeof(struct txqueue), TPD_SIZE, READ4(sc, FATMO_TRANSMIT_QUEUE), &statp, &card_stat, sc->txq_mem.mem, sc->txq_mem.paddr); init_card_queue(sc, &sc->rxqueue, FATM_RX_QLEN, sizeof(struct rxqueue), RPD_SIZE, READ4(sc, FATMO_RECEIVE_QUEUE), &statp, &card_stat, sc->rxq_mem.mem, sc->rxq_mem.paddr); init_card_queue(sc, &sc->s1queue, SMALL_SUPPLY_QLEN, sizeof(struct supqueue), BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE), READ4(sc, FATMO_SMALL_B1_QUEUE), &statp, &card_stat, sc->s1q_mem.mem, sc->s1q_mem.paddr); init_card_queue(sc, &sc->l1queue, LARGE_SUPPLY_QLEN, sizeof(struct supqueue), BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE), READ4(sc, FATMO_LARGE_B1_QUEUE), &statp, &card_stat, sc->l1q_mem.mem, sc->l1q_mem.paddr); sc->txcnt = 0; return (0); } /* * Read PROM. Called only from attach code. Here we spin because the interrupt * handler is not yet set up. */ static int fatm_getprom(struct fatm_softc *sc) { int i; struct prom *prom; struct cmdqueue *q; DBG(sc, INIT, ("reading prom")); q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head); NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN); q->error = 0; q->cb = NULL;; H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map, BUS_DMASYNC_PREREAD); WRITE4(sc, q->q.card + FATMOC_GPROM_BUF, sc->prom_mem.paddr); BARRIER_W(sc); WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_GET_PROM_DATA); BARRIER_W(sc); for (i = 0; i < 1000; i++) { H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & (FATM_STAT_COMPLETE | FATM_STAT_ERROR)) break; DELAY(1000); } if (i == 1000) { if_printf(sc->ifp, "getprom timeout\n"); return (EIO); } H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { if_printf(sc->ifp, "getprom error\n"); return (EIO); } H_SETSTAT(q->q.statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, q->q.statp); NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN); bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map, BUS_DMASYNC_POSTREAD); #ifdef notdef { u_int i; printf("PROM: "); u_char *ptr = (u_char *)sc->prom_mem.mem; for (i = 0; i < sizeof(struct prom); i++) printf("%02x ", *ptr++); printf("\n"); } #endif prom = (struct prom *)sc->prom_mem.mem; bcopy(prom->mac + 2, IFP2IFATM(sc->ifp)->mib.esi, 6); IFP2IFATM(sc->ifp)->mib.serial = le32toh(prom->serial); IFP2IFATM(sc->ifp)->mib.hw_version = le32toh(prom->version); IFP2IFATM(sc->ifp)->mib.sw_version = READ4(sc, FATMO_FIRMWARE_RELEASE); if_printf(sc->ifp, "ESI=%02x:%02x:%02x:%02x:%02x:%02x " "serial=%u hw=0x%x sw=0x%x\n", IFP2IFATM(sc->ifp)->mib.esi[0], IFP2IFATM(sc->ifp)->mib.esi[1], IFP2IFATM(sc->ifp)->mib.esi[2], IFP2IFATM(sc->ifp)->mib.esi[3], IFP2IFATM(sc->ifp)->mib.esi[4], IFP2IFATM(sc->ifp)->mib.esi[5], IFP2IFATM(sc->ifp)->mib.serial, IFP2IFATM(sc->ifp)->mib.hw_version, IFP2IFATM(sc->ifp)->mib.sw_version); return (0); } /* * This is the callback function for bus_dmamap_load. We assume, that we * have a 32-bit bus and so have always one segment. */ static void dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *ptr = (bus_addr_t *)arg; if (error != 0) { printf("%s: error=%d\n", __func__, error); return; } KASSERT(nsegs == 1, ("too many DMA segments")); KASSERT(segs[0].ds_addr <= 0xffffffff, ("DMA address too large %lx", (u_long)segs[0].ds_addr)); *ptr = segs[0].ds_addr; } /* * Allocate a chunk of DMA-able memory and map it. */ static int alloc_dma_memory(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem) { int error; mem->mem = NULL; if (bus_dma_tag_create(sc->parent_dmat, mem->align, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, mem->size, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) { if_printf(sc->ifp, "could not allocate %s DMA tag\n", nm); return (ENOMEM); } error = bus_dmamem_alloc(mem->dmat, &mem->mem, 0, &mem->map); if (error) { if_printf(sc->ifp, "could not allocate %s DMA memory: " "%d\n", nm, error); bus_dma_tag_destroy(mem->dmat); mem->mem = NULL; return (error); } error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size, dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT); if (error) { if_printf(sc->ifp, "could not load %s DMA memory: " "%d\n", nm, error); bus_dmamem_free(mem->dmat, mem->mem, mem->map); bus_dma_tag_destroy(mem->dmat); mem->mem = NULL; return (error); } DBG(sc, DMA, ("DMA %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem, (u_long)mem->paddr, mem->size, mem->align)); return (0); } #ifdef TEST_DMA_SYNC static int alloc_dma_memoryX(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem) { int error; mem->mem = NULL; if (bus_dma_tag_create(NULL, mem->align, 0, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, NULL, NULL, mem->size, 1, mem->size, BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) { if_printf(sc->ifp, "could not allocate %s DMA tag\n", nm); return (ENOMEM); } mem->mem = contigmalloc(mem->size, M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR_32BIT, mem->align, 0); error = bus_dmamap_create(mem->dmat, 0, &mem->map); if (error) { if_printf(sc->ifp, "could not allocate %s DMA map: " "%d\n", nm, error); contigfree(mem->mem, mem->size, M_DEVBUF); bus_dma_tag_destroy(mem->dmat); mem->mem = NULL; return (error); } error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size, dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT); if (error) { if_printf(sc->ifp, "could not load %s DMA memory: " "%d\n", nm, error); bus_dmamap_destroy(mem->dmat, mem->map); contigfree(mem->mem, mem->size, M_DEVBUF); bus_dma_tag_destroy(mem->dmat); mem->mem = NULL; return (error); } DBG(sc, DMA, ("DMAX %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem, (u_long)mem->paddr, mem->size, mem->align)); printf("DMAX: %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem, (u_long)mem->paddr, mem->size, mem->align); return (0); } #endif /* TEST_DMA_SYNC */ /* * Destroy all resources of an dma-able memory chunk */ static void destroy_dma_memory(struct fatm_mem *mem) { if (mem->mem != NULL) { bus_dmamap_unload(mem->dmat, mem->map); bus_dmamem_free(mem->dmat, mem->mem, mem->map); bus_dma_tag_destroy(mem->dmat); mem->mem = NULL; } } #ifdef TEST_DMA_SYNC static void destroy_dma_memoryX(struct fatm_mem *mem) { if (mem->mem != NULL) { bus_dmamap_unload(mem->dmat, mem->map); bus_dmamap_destroy(mem->dmat, mem->map); contigfree(mem->mem, mem->size, M_DEVBUF); bus_dma_tag_destroy(mem->dmat); mem->mem = NULL; } } #endif /* TEST_DMA_SYNC */ /* * Try to supply buffers to the card if there are free entries in the queues */ static void fatm_supply_small_buffers(struct fatm_softc *sc) { int nblocks, nbufs; struct supqueue *q; struct rbd *bd; int i, j, error, cnt; struct mbuf *m; struct rbuf *rb; bus_addr_t phys; nbufs = max(4 * sc->open_vccs, 32); nbufs = min(nbufs, SMALL_POOL_SIZE); nbufs -= sc->small_cnt; nblocks = (nbufs + SMALL_SUPPLY_BLKSIZE - 1) / SMALL_SUPPLY_BLKSIZE; for (cnt = 0; cnt < nblocks; cnt++) { q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) break; bd = (struct rbd *)q->q.ioblk; for (i = 0; i < SMALL_SUPPLY_BLKSIZE; i++) { if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) { if_printf(sc->ifp, "out of rbufs\n"); break; } MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); break; } MH_ALIGN(m, SMALL_BUFFER_LEN); error = bus_dmamap_load(sc->rbuf_tag, rb->map, m->m_data, SMALL_BUFFER_LEN, dmaload_helper, &phys, BUS_DMA_NOWAIT); if (error) { if_printf(sc->ifp, "dmamap_load mbuf failed %d", error); m_freem(m); LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); break; } bus_dmamap_sync(sc->rbuf_tag, rb->map, BUS_DMASYNC_PREREAD); LIST_REMOVE(rb, link); LIST_INSERT_HEAD(&sc->rbuf_used, rb, link); rb->m = m; bd[i].handle = rb - sc->rbufs; H_SETDESC(bd[i].buffer, phys); } if (i < SMALL_SUPPLY_BLKSIZE) { for (j = 0; j < i; j++) { rb = sc->rbufs + bd[j].handle; bus_dmamap_unload(sc->rbuf_tag, rb->map); m_free(rb->m); rb->m = NULL; LIST_REMOVE(rb, link); LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); } break; } H_SYNCQ_PREWRITE(&sc->s1q_mem, bd, sizeof(struct rbd) * SMALL_SUPPLY_BLKSIZE); H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); WRITE4(sc, q->q.card, q->q.card_ioblk); BARRIER_W(sc); sc->small_cnt += SMALL_SUPPLY_BLKSIZE; NEXT_QUEUE_ENTRY(sc->s1queue.head, SMALL_SUPPLY_QLEN); } } /* * Try to supply buffers to the card if there are free entries in the queues * We assume that all buffers are within the address space accessible by the * card (32-bit), so we don't need bounce buffers. */ static void fatm_supply_large_buffers(struct fatm_softc *sc) { int nbufs, nblocks, cnt; struct supqueue *q; struct rbd *bd; int i, j, error; struct mbuf *m; struct rbuf *rb; bus_addr_t phys; nbufs = max(4 * sc->open_vccs, 32); nbufs = min(nbufs, LARGE_POOL_SIZE); nbufs -= sc->large_cnt; nblocks = (nbufs + LARGE_SUPPLY_BLKSIZE - 1) / LARGE_SUPPLY_BLKSIZE; for (cnt = 0; cnt < nblocks; cnt++) { q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) break; bd = (struct rbd *)q->q.ioblk; for (i = 0; i < LARGE_SUPPLY_BLKSIZE; i++) { if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) { if_printf(sc->ifp, "out of rbufs\n"); break; } if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL) { LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); break; } /* No MEXT_ALIGN */ m->m_data += MCLBYTES - LARGE_BUFFER_LEN; error = bus_dmamap_load(sc->rbuf_tag, rb->map, m->m_data, LARGE_BUFFER_LEN, dmaload_helper, &phys, BUS_DMA_NOWAIT); if (error) { if_printf(sc->ifp, "dmamap_load mbuf failed %d", error); m_freem(m); LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); break; } bus_dmamap_sync(sc->rbuf_tag, rb->map, BUS_DMASYNC_PREREAD); LIST_REMOVE(rb, link); LIST_INSERT_HEAD(&sc->rbuf_used, rb, link); rb->m = m; bd[i].handle = rb - sc->rbufs; H_SETDESC(bd[i].buffer, phys); } if (i < LARGE_SUPPLY_BLKSIZE) { for (j = 0; j < i; j++) { rb = sc->rbufs + bd[j].handle; bus_dmamap_unload(sc->rbuf_tag, rb->map); m_free(rb->m); rb->m = NULL; LIST_REMOVE(rb, link); LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); } break; } H_SYNCQ_PREWRITE(&sc->l1q_mem, bd, sizeof(struct rbd) * LARGE_SUPPLY_BLKSIZE); H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); WRITE4(sc, q->q.card, q->q.card_ioblk); BARRIER_W(sc); sc->large_cnt += LARGE_SUPPLY_BLKSIZE; NEXT_QUEUE_ENTRY(sc->l1queue.head, LARGE_SUPPLY_QLEN); } } /* * Actually start the card. The lock must be held here. * Reset, load the firmware, start it, initializes queues, read the PROM * and supply receive buffers to the card. */ static void fatm_init_locked(struct fatm_softc *sc) { struct rxqueue *q; int i, c, error; uint32_t start; DBG(sc, INIT, ("initialize")); if (sc->ifp->if_flags & IFF_RUNNING) fatm_stop(sc); /* * Hard reset the board */ if (fatm_reset(sc)) return; start = firmware_load(sc); if (fatm_start_firmware(sc, start) || fatm_init_cmd(sc) || fatm_getprom(sc)) { fatm_reset(sc); return; } /* * Handle media */ c = READ4(sc, FATMO_MEDIA_TYPE); switch (c) { case FORE_MT_TAXI_100: IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_TAXI_100; IFP2IFATM(sc->ifp)->mib.pcr = 227273; break; case FORE_MT_TAXI_140: IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_TAXI_140; IFP2IFATM(sc->ifp)->mib.pcr = 318181; break; case FORE_MT_UTP_SONET: IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155; IFP2IFATM(sc->ifp)->mib.pcr = 353207; break; case FORE_MT_MM_OC3_ST: case FORE_MT_MM_OC3_SC: IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155; IFP2IFATM(sc->ifp)->mib.pcr = 353207; break; case FORE_MT_SM_OC3_ST: case FORE_MT_SM_OC3_SC: IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_SM_155; IFP2IFATM(sc->ifp)->mib.pcr = 353207; break; default: log(LOG_ERR, "fatm: unknown media type %d\n", c); IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN; IFP2IFATM(sc->ifp)->mib.pcr = 353207; break; } sc->ifp->if_baudrate = 53 * 8 * IFP2IFATM(sc->ifp)->mib.pcr; utopia_init_media(&sc->utopia); /* * Initialize the RBDs */ for (i = 0; i < FATM_RX_QLEN; i++) { q = GET_QUEUE(sc->rxqueue, struct rxqueue, i); WRITE4(sc, q->q.card + 0, q->q.card_ioblk); } BARRIER_W(sc); /* * Supply buffers to the card */ fatm_supply_small_buffers(sc); fatm_supply_large_buffers(sc); /* * Now set flags, that we are ready */ sc->ifp->if_flags |= IFF_RUNNING; /* * Start the watchdog timer */ sc->ifp->if_timer = 5; /* start SUNI */ utopia_start(&sc->utopia); ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp), sc->utopia.carrier == UTP_CARR_OK); /* start all channels */ for (i = 0; i < FORE_MAX_VCC + 1; i++) if (sc->vccs[i] != NULL) { sc->vccs[i]->vflags |= FATM_VCC_REOPEN; error = fatm_load_vc(sc, sc->vccs[i]); if (error != 0) { if_printf(sc->ifp, "reopening %u " "failed: %d\n", i, error); sc->vccs[i]->vflags &= ~FATM_VCC_REOPEN; } } DBG(sc, INIT, ("done")); } /* * This is the exported as initialisation function. */ static void fatm_init(void *p) { struct fatm_softc *sc = p; FATM_LOCK(sc); fatm_init_locked(sc); FATM_UNLOCK(sc); } /************************************************************/ /* * The INTERRUPT handling */ /* * Check the command queue. If a command was completed, call the completion * function for that command. */ static void fatm_intr_drain_cmd(struct fatm_softc *sc) { struct cmdqueue *q; int stat; /* * Drain command queue */ for (;;) { q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.tail); H_SYNCSTAT_POSTREAD(sc, q->q.statp); stat = H_GETSTAT(q->q.statp); if (stat != FATM_STAT_COMPLETE && stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) && stat != FATM_STAT_ERROR) break; (*q->cb)(sc, q); H_SETSTAT(q->q.statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, q->q.statp); NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN); } } /* * Drain the small buffer supply queue. */ static void fatm_intr_drain_small_buffers(struct fatm_softc *sc) { struct supqueue *q; int stat; for (;;) { q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.tail); H_SYNCSTAT_POSTREAD(sc, q->q.statp); stat = H_GETSTAT(q->q.statp); if ((stat & FATM_STAT_COMPLETE) == 0) break; if (stat & FATM_STAT_ERROR) log(LOG_ERR, "%s: status %x\n", __func__, stat); H_SETSTAT(q->q.statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, q->q.statp); NEXT_QUEUE_ENTRY(sc->s1queue.tail, SMALL_SUPPLY_QLEN); } } /* * Drain the large buffer supply queue. */ static void fatm_intr_drain_large_buffers(struct fatm_softc *sc) { struct supqueue *q; int stat; for (;;) { q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.tail); H_SYNCSTAT_POSTREAD(sc, q->q.statp); stat = H_GETSTAT(q->q.statp); if ((stat & FATM_STAT_COMPLETE) == 0) break; if (stat & FATM_STAT_ERROR) log(LOG_ERR, "%s status %x\n", __func__, stat); H_SETSTAT(q->q.statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, q->q.statp); NEXT_QUEUE_ENTRY(sc->l1queue.tail, LARGE_SUPPLY_QLEN); } } /* * Check the receive queue. Send any received PDU up the protocol stack * (except when there was an error or the VCI appears to be closed. In this * case discard the PDU). */ static void fatm_intr_drain_rx(struct fatm_softc *sc) { struct rxqueue *q; int stat, mlen; u_int i; uint32_t h; struct mbuf *last, *m0; struct rpd *rpd; struct rbuf *rb; u_int vci, vpi, pt; struct atm_pseudohdr aph; struct ifnet *ifp; struct card_vcc *vc; for (;;) { q = GET_QUEUE(sc->rxqueue, struct rxqueue, sc->rxqueue.tail); H_SYNCSTAT_POSTREAD(sc, q->q.statp); stat = H_GETSTAT(q->q.statp); if ((stat & FATM_STAT_COMPLETE) == 0) break; rpd = (struct rpd *)q->q.ioblk; H_SYNCQ_POSTREAD(&sc->rxq_mem, rpd, RPD_SIZE); rpd->nseg = le32toh(rpd->nseg); mlen = 0; m0 = last = 0; for (i = 0; i < rpd->nseg; i++) { rb = sc->rbufs + rpd->segment[i].handle; if (m0 == NULL) { m0 = last = rb->m; } else { last->m_next = rb->m; last = rb->m; } last->m_next = NULL; if (last->m_flags & M_EXT) sc->large_cnt--; else sc->small_cnt--; bus_dmamap_sync(sc->rbuf_tag, rb->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rbuf_tag, rb->map); rb->m = NULL; LIST_REMOVE(rb, link); LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); last->m_len = le32toh(rpd->segment[i].length); mlen += last->m_len; } m0->m_pkthdr.len = mlen; m0->m_pkthdr.rcvif = sc->ifp; h = le32toh(rpd->atm_header); vpi = (h >> 20) & 0xff; vci = (h >> 4 ) & 0xffff; pt = (h >> 1 ) & 0x7; /* * Locate the VCC this packet belongs to */ if (!VC_OK(sc, vpi, vci)) vc = NULL; else if ((vc = sc->vccs[vci]) == NULL || !(sc->vccs[vci]->vflags & FATM_VCC_OPEN)) { sc->istats.rx_closed++; vc = NULL; } DBG(sc, RCV, ("RCV: vc=%u.%u pt=%u mlen=%d %s", vpi, vci, pt, mlen, vc == NULL ? "dropped" : "")); if (vc == NULL) { m_freem(m0); } else { #ifdef ENABLE_BPF if (!(vc->param.flags & ATMIO_FLAG_NG) && vc->param.aal == ATMIO_AAL_5 && (vc->param.flags & ATM_PH_LLCSNAP)) BPF_MTAP(sc->ifp, m0); #endif ATM_PH_FLAGS(&aph) = vc->param.flags; ATM_PH_VPI(&aph) = vpi; ATM_PH_SETVCI(&aph, vci); ifp = sc->ifp; ifp->if_ipackets++; vc->ipackets++; vc->ibytes += m0->m_pkthdr.len; atm_input(ifp, &aph, m0, vc->rxhand); } H_SETSTAT(q->q.statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, q->q.statp); WRITE4(sc, q->q.card, q->q.card_ioblk); BARRIER_W(sc); NEXT_QUEUE_ENTRY(sc->rxqueue.tail, FATM_RX_QLEN); } } /* * Check the transmit queue. Free the mbuf chains that we were transmitting. */ static void fatm_intr_drain_tx(struct fatm_softc *sc) { struct txqueue *q; int stat; /* * Drain tx queue */ for (;;) { q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.tail); H_SYNCSTAT_POSTREAD(sc, q->q.statp); stat = H_GETSTAT(q->q.statp); if (stat != FATM_STAT_COMPLETE && stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) && stat != FATM_STAT_ERROR) break; H_SETSTAT(q->q.statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, q->q.statp); bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->tx_tag, q->map); m_freem(q->m); q->m = NULL; sc->txcnt--; NEXT_QUEUE_ENTRY(sc->txqueue.tail, FATM_TX_QLEN); } } /* * Interrupt handler */ static void fatm_intr(void *p) { struct fatm_softc *sc = (struct fatm_softc *)p; FATM_LOCK(sc); if (!READ4(sc, FATMO_PSR)) { FATM_UNLOCK(sc); return; } WRITE4(sc, FATMO_HCR, FATM_HCR_CLRIRQ); if (!(sc->ifp->if_flags & IFF_RUNNING)) { FATM_UNLOCK(sc); return; } fatm_intr_drain_cmd(sc); fatm_intr_drain_rx(sc); fatm_intr_drain_tx(sc); fatm_intr_drain_small_buffers(sc); fatm_intr_drain_large_buffers(sc); fatm_supply_small_buffers(sc); fatm_supply_large_buffers(sc); FATM_UNLOCK(sc); if (sc->retry_tx && _IF_QLEN(&sc->ifp->if_snd)) (*sc->ifp->if_start)(sc->ifp); } /* * Get device statistics. This must be called with the softc locked. * We use a preallocated buffer, so we need to protect this buffer. * We do this by using a condition variable and a flag. If the flag is set * the buffer is in use by one thread (one thread is executing a GETSTAT * card command). In this case all other threads that are trying to get * statistics block on that condition variable. When the thread finishes * using the buffer it resets the flag and signals the condition variable. This * will wakeup the next thread that is waiting for the buffer. If the interface * is stopped the stopping function will broadcast the cv. All threads will * find that the interface has been stopped and return. * * Aquiring of the buffer is done by the fatm_getstat() function. The freeing * must be done by the caller when he has finished using the buffer. */ static void fatm_getstat_complete(struct fatm_softc *sc, struct cmdqueue *q) { H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { sc->istats.get_stat_errors++; q->error = EIO; } wakeup(&sc->sadi_mem); } static int fatm_getstat(struct fatm_softc *sc) { int error; struct cmdqueue *q; /* * Wait until either the interface is stopped or we can get the * statistics buffer */ for (;;) { if (!(sc->ifp->if_flags & IFF_RUNNING)) return (EIO); if (!(sc->flags & FATM_STAT_INUSE)) break; cv_wait(&sc->cv_stat, &sc->mtx); } sc->flags |= FATM_STAT_INUSE; q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) { sc->istats.cmd_queue_full++; return (EIO); } NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN); q->error = 0; q->cb = fatm_getstat_complete; H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map, BUS_DMASYNC_PREREAD); WRITE4(sc, q->q.card + FATMOC_GSTAT_BUF, sc->sadi_mem.paddr); BARRIER_W(sc); WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_REQUEST_STATS | FATM_OP_INTERRUPT_SEL); BARRIER_W(sc); /* * Wait for the command to complete */ error = msleep(&sc->sadi_mem, &sc->mtx, PZERO | PCATCH, "fatm_stat", hz); switch (error) { case EWOULDBLOCK: error = EIO; break; case ERESTART: error = EINTR; break; case 0: bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map, BUS_DMASYNC_POSTREAD); error = q->error; break; } /* * Swap statistics */ if (q->error == 0) { u_int i; uint32_t *p = (uint32_t *)sc->sadi_mem.mem; for (i = 0; i < sizeof(struct fatm_stats) / sizeof(uint32_t); i++, p++) *p = be32toh(*p); } return (error); } /* * Create a copy of a single mbuf. It can have either internal or * external data, it may have a packet header. External data is really * copied, so the new buffer is writeable. */ static struct mbuf * copy_mbuf(struct mbuf *m) { struct mbuf *new; MGET(new, M_DONTWAIT, MT_DATA); if (new == NULL) return (NULL); if (m->m_flags & M_PKTHDR) { M_MOVE_PKTHDR(new, m); if (m->m_len > MHLEN) { MCLGET(new, M_TRYWAIT); if ((m->m_flags & M_EXT) == 0) { m_free(new); return (NULL); } } } else { if (m->m_len > MLEN) { MCLGET(new, M_TRYWAIT); if ((m->m_flags & M_EXT) == 0) { m_free(new); return (NULL); } } } bcopy(m->m_data, new->m_data, m->m_len); new->m_len = m->m_len; new->m_flags &= ~M_RDONLY; return (new); } /* * All segments must have a four byte aligned buffer address and a four * byte aligned length. Step through an mbuf chain and check these conditions. * If the buffer address is not aligned and this is a normal mbuf, move * the data down. Else make a copy of the mbuf with aligned data. * If the buffer length is not aligned steel data from the next mbuf. * We don't need to check whether this has more than one external reference, * because steeling data doesn't change the external cluster. * If the last mbuf is not aligned, fill with zeroes. * * Return packet length (well we should have this in the packet header), * but be careful not to count the zero fill at the end. * * If fixing fails free the chain and zero the pointer. * * We assume, that aligning the virtual address also aligns the mapped bus * address. */ static u_int fatm_fix_chain(struct fatm_softc *sc, struct mbuf **mp) { struct mbuf *m = *mp, *prev = NULL, *next, *new; u_int mlen = 0, fill = 0; int first, off; u_char *d, *cp; do { next = m->m_next; if ((uintptr_t)mtod(m, void *) % 4 != 0 || (m->m_len % 4 != 0 && next)) { /* * Needs fixing */ first = (m == *mp); d = mtod(m, u_char *); if ((off = (uintptr_t)(void *)d % 4) != 0) { if (!(m->m_flags & M_EXT) || !MEXT_IS_REF(m)) { sc->istats.fix_addr_copy++; bcopy(d, d - off, m->m_len); m->m_data = (caddr_t)(d - off); } else { if ((new = copy_mbuf(m)) == NULL) { sc->istats.fix_addr_noext++; goto fail; } sc->istats.fix_addr_ext++; if (prev) prev->m_next = new; new->m_next = next; m_free(m); m = new; } } if ((off = m->m_len % 4) != 0) { if ((m->m_flags & M_EXT) && MEXT_IS_REF(m)) { if ((new = copy_mbuf(m)) == NULL) { sc->istats.fix_len_noext++; goto fail; } sc->istats.fix_len_copy++; if (prev) prev->m_next = new; new->m_next = next; m_free(m); m = new; } else sc->istats.fix_len++; d = mtod(m, u_char *) + m->m_len; off = 4 - off; while (off) { if (next == NULL) { *d++ = 0; fill++; } else if (next->m_len == 0) { sc->istats.fix_empty++; next = m_free(next); continue; } else { cp = mtod(next, u_char *); *d++ = *cp++; next->m_len--; next->m_data = (caddr_t)cp; } off--; m->m_len++; } } if (first) *mp = m; } mlen += m->m_len; prev = m; } while ((m = next) != NULL); return (mlen - fill); fail: m_freem(*mp); *mp = NULL; return (0); } /* * The helper function is used to load the computed physical addresses * into the transmit descriptor. */ static void fatm_tpd_load(void *varg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize, int error) { struct tpd *tpd = varg; if (error) return; KASSERT(nsegs <= TPD_EXTENSIONS + TXD_FIXED, ("too many segments")); tpd->spec = 0; while (nsegs--) { H_SETDESC(tpd->segment[tpd->spec].buffer, segs->ds_addr); H_SETDESC(tpd->segment[tpd->spec].length, segs->ds_len); tpd->spec++; segs++; } } /* * Start output. * * Note, that we update the internal statistics without the lock here. */ static int fatm_tx(struct fatm_softc *sc, struct mbuf *m, struct card_vcc *vc, u_int mlen) { struct txqueue *q; u_int nblks; int error, aal, nsegs; struct tpd *tpd; /* * Get a queue element. * If there isn't one - try to drain the transmit queue * We used to sleep here if that doesn't help, but we * should not sleep here, because we are called with locks. */ q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) { fatm_intr_drain_tx(sc); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) { if (sc->retry_tx) { sc->istats.tx_retry++; IF_PREPEND(&sc->ifp->if_snd, m); return (1); } sc->istats.tx_queue_full++; m_freem(m); return (0); } sc->istats.tx_queue_almost_full++; } tpd = q->q.ioblk; m->m_data += sizeof(struct atm_pseudohdr); m->m_len -= sizeof(struct atm_pseudohdr); #ifdef ENABLE_BPF if (!(vc->param.flags & ATMIO_FLAG_NG) && vc->param.aal == ATMIO_AAL_5 && (vc->param.flags & ATM_PH_LLCSNAP)) BPF_MTAP(sc->ifp, m); #endif /* map the mbuf */ error = bus_dmamap_load_mbuf(sc->tx_tag, q->map, m, fatm_tpd_load, tpd, BUS_DMA_NOWAIT); if(error) { sc->ifp->if_oerrors++; if_printf(sc->ifp, "mbuf loaded error=%d\n", error); m_freem(m); return (0); } nsegs = tpd->spec; bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_PREWRITE); /* * OK. Now go and do it. */ aal = (vc->param.aal == ATMIO_AAL_5) ? 5 : 0; H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); q->m = m; /* * If the transmit queue is almost full, schedule a * transmit interrupt so that transmit descriptors can * be recycled. */ H_SETDESC(tpd->spec, TDX_MKSPEC((sc->txcnt >= (4 * FATM_TX_QLEN) / 5), aal, nsegs, mlen)); H_SETDESC(tpd->atm_header, TDX_MKHDR(vc->param.vpi, vc->param.vci, 0, 0)); if (vc->param.traffic == ATMIO_TRAFFIC_UBR) H_SETDESC(tpd->stream, 0); else { u_int i; for (i = 0; i < RATE_TABLE_SIZE; i++) if (rate_table[i].cell_rate < vc->param.tparam.pcr) break; if (i > 0) i--; H_SETDESC(tpd->stream, rate_table[i].ratio); } H_SYNCQ_PREWRITE(&sc->txq_mem, tpd, TPD_SIZE); nblks = TDX_SEGS2BLKS(nsegs); DBG(sc, XMIT, ("XMIT: mlen=%d spec=0x%x nsegs=%d blocks=%d", mlen, le32toh(tpd->spec), nsegs, nblks)); WRITE4(sc, q->q.card + 0, q->q.card_ioblk | nblks); BARRIER_W(sc); sc->txcnt++; sc->ifp->if_opackets++; vc->obytes += m->m_pkthdr.len; vc->opackets++; NEXT_QUEUE_ENTRY(sc->txqueue.head, FATM_TX_QLEN); return (0); } static void fatm_start(struct ifnet *ifp) { struct atm_pseudohdr aph; struct fatm_softc *sc; struct mbuf *m; u_int mlen, vpi, vci; struct card_vcc *vc; - sc = (struct fatm_softc *)ifp->if_softc; + sc = ifp->if_softc; while (1) { IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; /* * Loop through the mbuf chain and compute the total length * of the packet. Check that all data pointer are * 4 byte aligned. If they are not, call fatm_mfix to * fix that problem. This comes more or less from the * en driver. */ mlen = fatm_fix_chain(sc, &m); if (m == NULL) continue; if (m->m_len < sizeof(struct atm_pseudohdr) && (m = m_pullup(m, sizeof(struct atm_pseudohdr))) == NULL) continue; aph = *mtod(m, struct atm_pseudohdr *); mlen -= sizeof(struct atm_pseudohdr); if (mlen == 0) { m_freem(m); continue; } if (mlen > FATM_MAXPDU) { sc->istats.tx_pdu2big++; m_freem(m); continue; } vci = ATM_PH_VCI(&aph); vpi = ATM_PH_VPI(&aph); /* * From here on we need the softc */ FATM_LOCK(sc); if (!(ifp->if_flags & IFF_RUNNING)) { FATM_UNLOCK(sc); m_freem(m); break; } if (!VC_OK(sc, vpi, vci) || (vc = sc->vccs[vci]) == NULL || !(vc->vflags & FATM_VCC_OPEN)) { FATM_UNLOCK(sc); m_freem(m); continue; } if (fatm_tx(sc, m, vc, mlen)) { FATM_UNLOCK(sc); break; } FATM_UNLOCK(sc); } } /* * VCC managment * * This may seem complicated. The reason for this is, that we need an * asynchronuous open/close for the NATM VCCs because our ioctl handler * is called with the radix node head of the routing table locked. Therefor * we cannot sleep there and wait for the open/close to succeed. For this * reason we just initiate the operation from the ioctl. */ /* * Command the card to open/close a VC. * Return the queue entry for waiting if we are succesful. */ static struct cmdqueue * fatm_start_vcc(struct fatm_softc *sc, u_int vpi, u_int vci, uint32_t cmd, u_int mtu, void (*func)(struct fatm_softc *, struct cmdqueue *)) { struct cmdqueue *q; q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) { sc->istats.cmd_queue_full++; return (NULL); } NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN); q->error = 0; q->cb = func; H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); WRITE4(sc, q->q.card + FATMOC_ACTIN_VPVC, MKVPVC(vpi, vci)); BARRIER_W(sc); WRITE4(sc, q->q.card + FATMOC_ACTIN_MTU, mtu); BARRIER_W(sc); WRITE4(sc, q->q.card + FATMOC_OP, cmd); BARRIER_W(sc); return (q); } /* * The VC has been opened/closed and somebody has been waiting for this. * Wake him up. */ static void fatm_cmd_complete(struct fatm_softc *sc, struct cmdqueue *q) { H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { sc->istats.get_stat_errors++; q->error = EIO; } wakeup(q); } /* * Open complete */ static void fatm_open_finish(struct fatm_softc *sc, struct card_vcc *vc) { vc->vflags &= ~FATM_VCC_TRY_OPEN; vc->vflags |= FATM_VCC_OPEN; if (vc->vflags & FATM_VCC_REOPEN) { vc->vflags &= ~FATM_VCC_REOPEN; return; } /* inform management if this is not an NG * VCC or it's an NG PVC. */ if (!(vc->param.flags & ATMIO_FLAG_NG) || (vc->param.flags & ATMIO_FLAG_PVC)) ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), 0, vc->param.vci, 1); } /* * The VC that we have tried to open asynchronuosly has been opened. */ static void fatm_open_complete(struct fatm_softc *sc, struct cmdqueue *q) { u_int vci; struct card_vcc *vc; vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC)); vc = sc->vccs[vci]; H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { sc->istats.get_stat_errors++; sc->vccs[vci] = NULL; uma_zfree(sc->vcc_zone, vc); if_printf(sc->ifp, "opening VCI %u failed\n", vci); return; } fatm_open_finish(sc, vc); } /* * Wait on the queue entry until the VCC is opened/closed. */ static int fatm_waitvcc(struct fatm_softc *sc, struct cmdqueue *q) { int error; /* * Wait for the command to complete */ error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_vci", hz); if (error != 0) return (error); return (q->error); } /* * Start to open a VCC. This just initiates the operation. */ static int fatm_open_vcc(struct fatm_softc *sc, struct atmio_openvcc *op) { int error; struct card_vcc *vc; /* * Check parameters */ if ((op->param.flags & ATMIO_FLAG_NOTX) && (op->param.flags & ATMIO_FLAG_NORX)) return (EINVAL); if (!VC_OK(sc, op->param.vpi, op->param.vci)) return (EINVAL); if (op->param.aal != ATMIO_AAL_0 && op->param.aal != ATMIO_AAL_5) return (EINVAL); vc = uma_zalloc(sc->vcc_zone, M_NOWAIT | M_ZERO); if (vc == NULL) return (ENOMEM); error = 0; FATM_LOCK(sc); if (!(sc->ifp->if_flags & IFF_RUNNING)) { error = EIO; goto done; } if (sc->vccs[op->param.vci] != NULL) { error = EBUSY; goto done; } vc->param = op->param; vc->rxhand = op->rxhand; switch (op->param.traffic) { case ATMIO_TRAFFIC_UBR: break; case ATMIO_TRAFFIC_CBR: if (op->param.tparam.pcr == 0 || op->param.tparam.pcr > IFP2IFATM(sc->ifp)->mib.pcr) { error = EINVAL; goto done; } break; default: error = EINVAL; goto done; } vc->ibytes = vc->obytes = 0; vc->ipackets = vc->opackets = 0; vc->vflags = FATM_VCC_TRY_OPEN; sc->vccs[op->param.vci] = vc; sc->open_vccs++; error = fatm_load_vc(sc, vc); if (error != 0) { sc->vccs[op->param.vci] = NULL; sc->open_vccs--; goto done; } /* don't free below */ vc = NULL; done: FATM_UNLOCK(sc); if (vc != NULL) uma_zfree(sc->vcc_zone, vc); return (error); } /* * Try to initialize the given VC */ static int fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc) { uint32_t cmd; struct cmdqueue *q; int error; /* Command and buffer strategy */ cmd = FATM_OP_ACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL | (0 << 16); if (vc->param.aal == ATMIO_AAL_0) cmd |= (0 << 8); else cmd |= (5 << 8); q = fatm_start_vcc(sc, vc->param.vpi, vc->param.vci, cmd, 1, (vc->param.flags & ATMIO_FLAG_ASYNC) ? fatm_open_complete : fatm_cmd_complete); if (q == NULL) return (EIO); if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) { error = fatm_waitvcc(sc, q); if (error != 0) return (error); fatm_open_finish(sc, vc); } return (0); } /* * Finish close */ static void fatm_close_finish(struct fatm_softc *sc, struct card_vcc *vc) { /* inform management of this is not an NG * VCC or it's an NG PVC. */ if (!(vc->param.flags & ATMIO_FLAG_NG) || (vc->param.flags & ATMIO_FLAG_PVC)) ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), 0, vc->param.vci, 0); sc->vccs[vc->param.vci] = NULL; sc->open_vccs--; uma_zfree(sc->vcc_zone, vc); } /* * The VC has been closed. */ static void fatm_close_complete(struct fatm_softc *sc, struct cmdqueue *q) { u_int vci; struct card_vcc *vc; vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC)); vc = sc->vccs[vci]; H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { sc->istats.get_stat_errors++; /* keep the VCC in that state */ if_printf(sc->ifp, "closing VCI %u failed\n", vci); return; } fatm_close_finish(sc, vc); } /* * Initiate closing a VCC */ static int fatm_close_vcc(struct fatm_softc *sc, struct atmio_closevcc *cl) { int error; struct cmdqueue *q; struct card_vcc *vc; if (!VC_OK(sc, cl->vpi, cl->vci)) return (EINVAL); error = 0; FATM_LOCK(sc); if (!(sc->ifp->if_flags & IFF_RUNNING)) { error = EIO; goto done; } vc = sc->vccs[cl->vci]; if (vc == NULL || !(vc->vflags & (FATM_VCC_OPEN | FATM_VCC_TRY_OPEN))) { error = ENOENT; goto done; } q = fatm_start_vcc(sc, cl->vpi, cl->vci, FATM_OP_DEACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL, 1, (vc->param.flags & ATMIO_FLAG_ASYNC) ? fatm_close_complete : fatm_cmd_complete); if (q == NULL) { error = EIO; goto done; } vc->vflags &= ~(FATM_VCC_OPEN | FATM_VCC_TRY_OPEN); vc->vflags |= FATM_VCC_TRY_CLOSE; if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) { error = fatm_waitvcc(sc, q); if (error != 0) goto done; fatm_close_finish(sc, vc); } done: FATM_UNLOCK(sc); return (error); } /* * IOCTL handler */ static int fatm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t arg) { int error; struct fatm_softc *sc = ifp->if_softc; struct ifaddr *ifa = (struct ifaddr *)arg; struct ifreq *ifr = (struct ifreq *)arg; struct atmio_closevcc *cl = (struct atmio_closevcc *)arg; struct atmio_openvcc *op = (struct atmio_openvcc *)arg; struct atmio_vcctable *vtab; error = 0; switch (cmd) { case SIOCATMOPENVCC: /* kernel internal use */ error = fatm_open_vcc(sc, op); break; case SIOCATMCLOSEVCC: /* kernel internal use */ error = fatm_close_vcc(sc, cl); break; case SIOCSIFADDR: FATM_LOCK(sc); ifp->if_flags |= IFF_UP; if (!(ifp->if_flags & IFF_RUNNING)) fatm_init_locked(sc); switch (ifa->ifa_addr->sa_family) { #ifdef INET case AF_INET: case AF_INET6: ifa->ifa_rtrequest = atm_rtrequest; break; #endif default: break; } FATM_UNLOCK(sc); break; case SIOCSIFFLAGS: FATM_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_flags & IFF_RUNNING)) { fatm_init_locked(sc); } } else { if (ifp->if_flags & IFF_RUNNING) { fatm_stop(sc); } } FATM_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (ifp->if_flags & IFF_RUNNING) error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); else error = EINVAL; break; case SIOCATMGVCCS: /* return vcc table */ vtab = atm_getvccs((struct atmio_vcc **)sc->vccs, FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 1); error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) + vtab->count * sizeof(vtab->vccs[0])); free(vtab, M_DEVBUF); break; case SIOCATMGETVCCS: /* internal netgraph use */ vtab = atm_getvccs((struct atmio_vcc **)sc->vccs, FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 0); if (vtab == NULL) { error = ENOMEM; break; } *(void **)arg = vtab; break; default: DBG(sc, IOCTL, ("+++ cmd=%08lx arg=%p", cmd, arg)); error = EINVAL; break; } return (error); } /* * Detach from the interface and free all resources allocated during * initialisation and later. */ static int fatm_detach(device_t dev) { u_int i; struct rbuf *rb; struct fatm_softc *sc; struct txqueue *tx; - sc = (struct fatm_softc *)device_get_softc(dev); + sc = device_get_softc(dev); if (device_is_alive(dev)) { FATM_LOCK(sc); fatm_stop(sc); utopia_detach(&sc->utopia); FATM_UNLOCK(sc); atm_ifdetach(sc->ifp); /* XXX race */ } if (sc->ih != NULL) bus_teardown_intr(dev, sc->irqres, sc->ih); while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) { if_printf(sc->ifp, "rbuf %p still in use!\n", rb); bus_dmamap_unload(sc->rbuf_tag, rb->map); m_freem(rb->m); LIST_REMOVE(rb, link); LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); } if (sc->txqueue.chunk != NULL) { for (i = 0; i < FATM_TX_QLEN; i++) { tx = GET_QUEUE(sc->txqueue, struct txqueue, i); bus_dmamap_destroy(sc->tx_tag, tx->map); } } while ((rb = LIST_FIRST(&sc->rbuf_free)) != NULL) { bus_dmamap_destroy(sc->rbuf_tag, rb->map); LIST_REMOVE(rb, link); } if (sc->rbufs != NULL) free(sc->rbufs, M_DEVBUF); if (sc->vccs != NULL) { for (i = 0; i < FORE_MAX_VCC + 1; i++) if (sc->vccs[i] != NULL) { uma_zfree(sc->vcc_zone, sc->vccs[i]); sc->vccs[i] = NULL; } free(sc->vccs, M_DEVBUF); } if (sc->vcc_zone != NULL) uma_zdestroy(sc->vcc_zone); if (sc->l1queue.chunk != NULL) free(sc->l1queue.chunk, M_DEVBUF); if (sc->s1queue.chunk != NULL) free(sc->s1queue.chunk, M_DEVBUF); if (sc->rxqueue.chunk != NULL) free(sc->rxqueue.chunk, M_DEVBUF); if (sc->txqueue.chunk != NULL) free(sc->txqueue.chunk, M_DEVBUF); if (sc->cmdqueue.chunk != NULL) free(sc->cmdqueue.chunk, M_DEVBUF); destroy_dma_memory(&sc->reg_mem); destroy_dma_memory(&sc->sadi_mem); destroy_dma_memory(&sc->prom_mem); #ifdef TEST_DMA_SYNC destroy_dma_memoryX(&sc->s1q_mem); destroy_dma_memoryX(&sc->l1q_mem); destroy_dma_memoryX(&sc->rxq_mem); destroy_dma_memoryX(&sc->txq_mem); destroy_dma_memoryX(&sc->stat_mem); #endif if (sc->tx_tag != NULL) if (bus_dma_tag_destroy(sc->tx_tag)) printf("tx DMA tag busy!\n"); if (sc->rbuf_tag != NULL) if (bus_dma_tag_destroy(sc->rbuf_tag)) printf("rbuf DMA tag busy!\n"); if (sc->parent_dmat != NULL) if (bus_dma_tag_destroy(sc->parent_dmat)) printf("parent DMA tag busy!\n"); if (sc->irqres != NULL) bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irqres); if (sc->memres != NULL) bus_release_resource(dev, SYS_RES_MEMORY, sc->memid, sc->memres); (void)sysctl_ctx_free(&sc->sysctl_ctx); cv_destroy(&sc->cv_stat); cv_destroy(&sc->cv_regs); mtx_destroy(&sc->mtx); if_free(sc->ifp); return (0); } /* * Sysctl handler */ static int fatm_sysctl_istats(SYSCTL_HANDLER_ARGS) { struct fatm_softc *sc = arg1; u_long *ret; int error; ret = malloc(sizeof(sc->istats), M_TEMP, M_WAITOK); FATM_LOCK(sc); bcopy(&sc->istats, ret, sizeof(sc->istats)); FATM_UNLOCK(sc); error = SYSCTL_OUT(req, ret, sizeof(sc->istats)); free(ret, M_TEMP); return (error); } /* * Sysctl handler for card statistics * This is disable because it destroys the PHY statistics. */ static int fatm_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct fatm_softc *sc = arg1; int error; const struct fatm_stats *s; u_long *ret; u_int i; ret = malloc(sizeof(u_long) * FATM_NSTATS, M_TEMP, M_WAITOK); FATM_LOCK(sc); if ((error = fatm_getstat(sc)) == 0) { s = sc->sadi_mem.mem; i = 0; ret[i++] = s->phy_4b5b.crc_header_errors; ret[i++] = s->phy_4b5b.framing_errors; ret[i++] = s->phy_oc3.section_bip8_errors; ret[i++] = s->phy_oc3.path_bip8_errors; ret[i++] = s->phy_oc3.line_bip24_errors; ret[i++] = s->phy_oc3.line_febe_errors; ret[i++] = s->phy_oc3.path_febe_errors; ret[i++] = s->phy_oc3.corr_hcs_errors; ret[i++] = s->phy_oc3.ucorr_hcs_errors; ret[i++] = s->atm.cells_transmitted; ret[i++] = s->atm.cells_received; ret[i++] = s->atm.vpi_bad_range; ret[i++] = s->atm.vpi_no_conn; ret[i++] = s->atm.vci_bad_range; ret[i++] = s->atm.vci_no_conn; ret[i++] = s->aal0.cells_transmitted; ret[i++] = s->aal0.cells_received; ret[i++] = s->aal0.cells_dropped; ret[i++] = s->aal4.cells_transmitted; ret[i++] = s->aal4.cells_received; ret[i++] = s->aal4.cells_crc_errors; ret[i++] = s->aal4.cels_protocol_errors; ret[i++] = s->aal4.cells_dropped; ret[i++] = s->aal4.cspdus_transmitted; ret[i++] = s->aal4.cspdus_received; ret[i++] = s->aal4.cspdus_protocol_errors; ret[i++] = s->aal4.cspdus_dropped; ret[i++] = s->aal5.cells_transmitted; ret[i++] = s->aal5.cells_received; ret[i++] = s->aal5.congestion_experienced; ret[i++] = s->aal5.cells_dropped; ret[i++] = s->aal5.cspdus_transmitted; ret[i++] = s->aal5.cspdus_received; ret[i++] = s->aal5.cspdus_crc_errors; ret[i++] = s->aal5.cspdus_protocol_errors; ret[i++] = s->aal5.cspdus_dropped; ret[i++] = s->aux.small_b1_failed; ret[i++] = s->aux.large_b1_failed; ret[i++] = s->aux.small_b2_failed; ret[i++] = s->aux.large_b2_failed; ret[i++] = s->aux.rpd_alloc_failed; ret[i++] = s->aux.receive_carrier; } /* declare the buffer free */ sc->flags &= ~FATM_STAT_INUSE; cv_signal(&sc->cv_stat); FATM_UNLOCK(sc); if (error == 0) error = SYSCTL_OUT(req, ret, sizeof(u_long) * FATM_NSTATS); free(ret, M_TEMP); return (error); } #define MAXDMASEGS 32 /* maximum number of receive descriptors */ /* * Attach to the device. * * We assume, that there is a global lock (Giant in this case) that protects * multiple threads from entering this function. This makes sense, doesn't it? */ static int fatm_attach(device_t dev) { struct ifnet *ifp; struct fatm_softc *sc; int unit; uint16_t cfg; int error = 0; struct rbuf *rb; u_int i; struct txqueue *tx; sc = device_get_softc(dev); unit = device_get_unit(dev); ifp = sc->ifp = if_alloc(IFT_ATM); if (ifp == NULL) { error = ENOSPC; goto fail; } IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_PCA200E; IFP2IFATM(sc->ifp)->mib.serial = 0; IFP2IFATM(sc->ifp)->mib.hw_version = 0; IFP2IFATM(sc->ifp)->mib.sw_version = 0; IFP2IFATM(sc->ifp)->mib.vpi_bits = 0; IFP2IFATM(sc->ifp)->mib.vci_bits = FORE_VCIBITS; IFP2IFATM(sc->ifp)->mib.max_vpcs = 0; IFP2IFATM(sc->ifp)->mib.max_vccs = FORE_MAX_VCC; IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN; IFP2IFATM(sc->ifp)->phy = &sc->utopia; LIST_INIT(&sc->rbuf_free); LIST_INIT(&sc->rbuf_used); /* * Initialize mutex and condition variables. */ mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); cv_init(&sc->cv_stat, "fatm_stat"); cv_init(&sc->cv_regs, "fatm_regs"); sysctl_ctx_init(&sc->sysctl_ctx); /* * Make the sysctl tree */ if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO, device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL) goto fail; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "istats", CTLFLAG_RD, sc, 0, fatm_sysctl_istats, "LU", "internal statistics") == NULL) goto fail; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "stats", CTLFLAG_RD, sc, 0, fatm_sysctl_stats, "LU", "card statistics") == NULL) goto fail; if (SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "retry_tx", CTLFLAG_RW, &sc->retry_tx, 0, "retry flag") == NULL) goto fail; #ifdef FATM_DEBUG if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "debug flags") == NULL) goto fail; sc->debug = FATM_DEBUG; #endif /* * Network subsystem stuff */ ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_SIMPLEX; ifp->if_ioctl = fatm_ioctl; ifp->if_start = fatm_start; ifp->if_watchdog = fatm_watchdog; ifp->if_init = fatm_init; ifp->if_linkmib = &IFP2IFATM(sc->ifp)->mib; ifp->if_linkmiblen = sizeof(IFP2IFATM(sc->ifp)->mib); /* * Enable memory and bustmaster */ cfg = pci_read_config(dev, PCIR_COMMAND, 2); cfg |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, cfg, 2); /* * Map memory */ cfg = pci_read_config(dev, PCIR_COMMAND, 2); if (!(cfg & PCIM_CMD_MEMEN)) { if_printf(ifp, "failed to enable memory mapping\n"); error = ENXIO; goto fail; } sc->memid = 0x10; sc->memres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->memid, RF_ACTIVE); if (sc->memres == NULL) { if_printf(ifp, "could not map memory\n"); error = ENXIO; goto fail; } sc->memh = rman_get_bushandle(sc->memres); sc->memt = rman_get_bustag(sc->memres); /* * Convert endianess of slave access */ cfg = pci_read_config(dev, FATM_PCIR_MCTL, 1); cfg |= FATM_PCIM_SWAB; pci_write_config(dev, FATM_PCIR_MCTL, cfg, 1); /* * Allocate interrupt (activate at the end) */ sc->irqid = 0; sc->irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid, RF_SHAREABLE | RF_ACTIVE); if (sc->irqres == NULL) { if_printf(ifp, "could not allocate irq\n"); error = ENXIO; goto fail; } /* * Allocate the parent DMA tag. This is used simply to hold overall * restrictions for the controller (and PCI bus) and is never used * to do anything. */ if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, MAXDMASEGS, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->parent_dmat)) { if_printf(ifp, "could not allocate parent DMA tag\n"); error = ENOMEM; goto fail; } /* * Allocate the receive buffer DMA tag. This tag must map a maximum of * a mbuf cluster. */ if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rbuf_tag)) { if_printf(ifp, "could not allocate rbuf DMA tag\n"); error = ENOMEM; goto fail; } /* * Allocate the transmission DMA tag. Must add 1, because * rounded up PDU will be 65536 bytes long. */ if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, FATM_MAXPDU + 1, TPD_EXTENSIONS + TXD_FIXED, MCLBYTES, 0, NULL, NULL, &sc->tx_tag)) { if_printf(ifp, "could not allocate tx DMA tag\n"); error = ENOMEM; goto fail; } /* * Allocate DMAable memory. */ sc->stat_mem.size = sizeof(uint32_t) * (FATM_CMD_QLEN + FATM_TX_QLEN + FATM_RX_QLEN + SMALL_SUPPLY_QLEN + LARGE_SUPPLY_QLEN); sc->stat_mem.align = 4; sc->txq_mem.size = FATM_TX_QLEN * TPD_SIZE; sc->txq_mem.align = 32; sc->rxq_mem.size = FATM_RX_QLEN * RPD_SIZE; sc->rxq_mem.align = 32; sc->s1q_mem.size = SMALL_SUPPLY_QLEN * BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE); sc->s1q_mem.align = 32; sc->l1q_mem.size = LARGE_SUPPLY_QLEN * BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE); sc->l1q_mem.align = 32; #ifdef TEST_DMA_SYNC if ((error = alloc_dma_memoryX(sc, "STATUS", &sc->stat_mem)) != 0 || (error = alloc_dma_memoryX(sc, "TXQ", &sc->txq_mem)) != 0 || (error = alloc_dma_memoryX(sc, "RXQ", &sc->rxq_mem)) != 0 || (error = alloc_dma_memoryX(sc, "S1Q", &sc->s1q_mem)) != 0 || (error = alloc_dma_memoryX(sc, "L1Q", &sc->l1q_mem)) != 0) goto fail; #else if ((error = alloc_dma_memory(sc, "STATUS", &sc->stat_mem)) != 0 || (error = alloc_dma_memory(sc, "TXQ", &sc->txq_mem)) != 0 || (error = alloc_dma_memory(sc, "RXQ", &sc->rxq_mem)) != 0 || (error = alloc_dma_memory(sc, "S1Q", &sc->s1q_mem)) != 0 || (error = alloc_dma_memory(sc, "L1Q", &sc->l1q_mem)) != 0) goto fail; #endif sc->prom_mem.size = sizeof(struct prom); sc->prom_mem.align = 32; if ((error = alloc_dma_memory(sc, "PROM", &sc->prom_mem)) != 0) goto fail; sc->sadi_mem.size = sizeof(struct fatm_stats); sc->sadi_mem.align = 32; if ((error = alloc_dma_memory(sc, "STATISTICS", &sc->sadi_mem)) != 0) goto fail; sc->reg_mem.size = sizeof(uint32_t) * FATM_NREGS; sc->reg_mem.align = 32; if ((error = alloc_dma_memory(sc, "REGISTERS", &sc->reg_mem)) != 0) goto fail; /* * Allocate queues */ sc->cmdqueue.chunk = malloc(FATM_CMD_QLEN * sizeof(struct cmdqueue), M_DEVBUF, M_ZERO | M_WAITOK); sc->txqueue.chunk = malloc(FATM_TX_QLEN * sizeof(struct txqueue), M_DEVBUF, M_ZERO | M_WAITOK); sc->rxqueue.chunk = malloc(FATM_RX_QLEN * sizeof(struct rxqueue), M_DEVBUF, M_ZERO | M_WAITOK); sc->s1queue.chunk = malloc(SMALL_SUPPLY_QLEN * sizeof(struct supqueue), M_DEVBUF, M_ZERO | M_WAITOK); sc->l1queue.chunk = malloc(LARGE_SUPPLY_QLEN * sizeof(struct supqueue), M_DEVBUF, M_ZERO | M_WAITOK); sc->vccs = malloc((FORE_MAX_VCC + 1) * sizeof(sc->vccs[0]), M_DEVBUF, M_ZERO | M_WAITOK); sc->vcc_zone = uma_zcreate("FATM vccs", sizeof(struct card_vcc), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); if (sc->vcc_zone == NULL) { error = ENOMEM; goto fail; } /* * Allocate memory for the receive buffer headers. The total number * of headers should probably also include the maximum number of * buffers on the receive queue. */ sc->rbuf_total = SMALL_POOL_SIZE + LARGE_POOL_SIZE; sc->rbufs = malloc(sc->rbuf_total * sizeof(struct rbuf), M_DEVBUF, M_ZERO | M_WAITOK); /* * Put all rbuf headers on the free list and create DMA maps. */ for (rb = sc->rbufs, i = 0; i < sc->rbuf_total; i++, rb++) { if ((error = bus_dmamap_create(sc->rbuf_tag, 0, &rb->map))) { if_printf(sc->ifp, "creating rx map: %d\n", error); goto fail; } LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); } /* * Create dma maps for transmission. In case of an error, free the * allocated DMA maps, because on some architectures maps are NULL * and we cannot distinguish between a failure and a NULL map in * the detach routine. */ for (i = 0; i < FATM_TX_QLEN; i++) { tx = GET_QUEUE(sc->txqueue, struct txqueue, i); if ((error = bus_dmamap_create(sc->tx_tag, 0, &tx->map))) { if_printf(sc->ifp, "creating tx map: %d\n", error); while (i > 0) { tx = GET_QUEUE(sc->txqueue, struct txqueue, i - 1); bus_dmamap_destroy(sc->tx_tag, tx->map); i--; } goto fail; } } utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->mtx, &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), &fatm_utopia_methods); sc->utopia.flags |= UTP_FL_NORESET | UTP_FL_POLL_CARRIER; /* * Attach the interface */ atm_ifattach(ifp); ifp->if_snd.ifq_maxlen = 512; #ifdef ENABLE_BPF bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc)); #endif error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET, fatm_intr, sc, &sc->ih); if (error) { if_printf(ifp, "couldn't setup irq\n"); goto fail; } fail: if (error) fatm_detach(dev); return (error); } #if defined(FATM_DEBUG) && 0 static void dump_s1_queue(struct fatm_softc *sc) { int i; struct supqueue *q; for(i = 0; i < SMALL_SUPPLY_QLEN; i++) { q = GET_QUEUE(sc->s1queue, struct supqueue, i); printf("%2d: card=%x(%x,%x) stat=%x\n", i, q->q.card, READ4(sc, q->q.card), READ4(sc, q->q.card + 4), *q->q.statp); } } #endif /* * Driver infrastructure. */ static device_method_t fatm_methods[] = { DEVMETHOD(device_probe, fatm_probe), DEVMETHOD(device_attach, fatm_attach), DEVMETHOD(device_detach, fatm_detach), { 0, 0 } }; static driver_t fatm_driver = { "fatm", fatm_methods, sizeof(struct fatm_softc), }; DRIVER_MODULE(fatm, pci, fatm_driver, fatm_devclass, 0, 0); Index: head/sys/dev/hatm/if_hatm.c =================================================================== --- head/sys/dev/hatm/if_hatm.c (revision 147720) +++ head/sys/dev/hatm/if_hatm.c (revision 147721) @@ -1,2417 +1,2417 @@ /*- * Copyright (c) 2001-2003 * Fraunhofer Institute for Open Communication Systems (FhG Fokus). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Author: Hartmut Brandt * * ForeHE driver. * * This file contains the module and driver infrastructure stuff as well * as a couple of utility functions and the entire initialisation. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_natm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ENABLE_BPF #include #endif #include #include #include #include #include #include #include #include #include #include #include #include static const struct { uint16_t vid; uint16_t did; const char *name; } hatm_devs[] = { { 0x1127, 0x400, "FORE HE" }, { 0, 0, NULL } }; SYSCTL_DECL(_hw_atm); MODULE_DEPEND(hatm, utopia, 1, 1, 1); MODULE_DEPEND(hatm, pci, 1, 1, 1); MODULE_DEPEND(hatm, atm, 1, 1, 1); #define EEPROM_DELAY 400 /* microseconds */ /* Read from EEPROM 0000 0011b */ static const uint32_t readtab[] = { HE_REGM_HOST_PROM_SEL | HE_REGM_HOST_PROM_CLOCK, 0, HE_REGM_HOST_PROM_CLOCK, 0, /* 0 */ HE_REGM_HOST_PROM_CLOCK, 0, /* 0 */ HE_REGM_HOST_PROM_CLOCK, 0, /* 0 */ HE_REGM_HOST_PROM_CLOCK, 0, /* 0 */ HE_REGM_HOST_PROM_CLOCK, 0, /* 0 */ HE_REGM_HOST_PROM_CLOCK, HE_REGM_HOST_PROM_DATA_IN, /* 0 */ HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN, HE_REGM_HOST_PROM_DATA_IN, /* 1 */ HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN, HE_REGM_HOST_PROM_DATA_IN, /* 1 */ }; static const uint32_t clocktab[] = { 0, HE_REGM_HOST_PROM_CLOCK, 0, HE_REGM_HOST_PROM_CLOCK, 0, HE_REGM_HOST_PROM_CLOCK, 0, HE_REGM_HOST_PROM_CLOCK, 0, HE_REGM_HOST_PROM_CLOCK, 0, HE_REGM_HOST_PROM_CLOCK, 0, HE_REGM_HOST_PROM_CLOCK, 0, HE_REGM_HOST_PROM_CLOCK, 0 }; /* * Convert cell rate to ATM Forum format */ u_int hatm_cps2atmf(uint32_t pcr) { u_int e; if (pcr == 0) return (0); pcr <<= 9; e = 0; while (pcr > (1024 - 1)) { e++; pcr >>= 1; } return ((1 << 14) | (e << 9) | (pcr & 0x1ff)); } u_int hatm_atmf2cps(uint32_t fcr) { fcr &= 0x7fff; return ((1 << ((fcr >> 9) & 0x1f)) * (512 + (fcr & 0x1ff)) / 512 * (fcr >> 14)); } /************************************************************ * * Initialisation */ /* * Probe for a HE controller */ static int hatm_probe(device_t dev) { int i; for (i = 0; hatm_devs[i].name; i++) if (pci_get_vendor(dev) == hatm_devs[i].vid && pci_get_device(dev) == hatm_devs[i].did) { device_set_desc(dev, hatm_devs[i].name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /* * Allocate and map DMA-able memory. We support only contiguous mappings. */ static void dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (error) return; KASSERT(nsegs == 1, ("too many segments for DMA: %d", nsegs)); KASSERT(segs[0].ds_addr <= 0xffffffffUL, ("phys addr too large %lx", (u_long)segs[0].ds_addr)); *(bus_addr_t *)arg = segs[0].ds_addr; } static int hatm_alloc_dmamem(struct hatm_softc *sc, const char *what, struct dmamem *mem) { int error; mem->base = NULL; /* * Alignement does not work in the bus_dmamem_alloc function below * on FreeBSD. malloc seems to align objects at least to the object * size so increase the size to the alignment if the size is lesser * than the alignemnt. * XXX on sparc64 this is (probably) not needed. */ if (mem->size < mem->align) mem->size = mem->align; error = bus_dma_tag_create(sc->parent_tag, mem->align, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, mem->size, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &mem->tag); if (error) { if_printf(sc->ifp, "DMA tag create (%s)\n", what); return (error); } error = bus_dmamem_alloc(mem->tag, &mem->base, 0, &mem->map); if (error) { if_printf(sc->ifp, "DMA mem alloc (%s): %d\n", what, error); bus_dma_tag_destroy(mem->tag); mem->base = NULL; return (error); } error = bus_dmamap_load(mem->tag, mem->map, mem->base, mem->size, dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT); if (error) { if_printf(sc->ifp, "DMA map load (%s): %d\n", what, error); bus_dmamem_free(mem->tag, mem->base, mem->map); bus_dma_tag_destroy(mem->tag); mem->base = NULL; return (error); } DBG(sc, DMA, ("%s S/A/V/P 0x%x 0x%x %p 0x%lx", what, mem->size, mem->align, mem->base, (u_long)mem->paddr)); return (0); } /* * Destroy all the resources of an DMA-able memory region. */ static void hatm_destroy_dmamem(struct dmamem *mem) { if (mem->base != NULL) { bus_dmamap_unload(mem->tag, mem->map); bus_dmamem_free(mem->tag, mem->base, mem->map); (void)bus_dma_tag_destroy(mem->tag); mem->base = NULL; } } /* * Initialize/destroy DMA maps for the large pool 0 */ static void hatm_destroy_rmaps(struct hatm_softc *sc) { u_int b; DBG(sc, ATTACH, ("destroying rmaps and lbuf pointers...")); if (sc->rmaps != NULL) { for (b = 0; b < sc->lbufs_size; b++) bus_dmamap_destroy(sc->mbuf_tag, sc->rmaps[b]); free(sc->rmaps, M_DEVBUF); } if (sc->lbufs != NULL) free(sc->lbufs, M_DEVBUF); } static void hatm_init_rmaps(struct hatm_softc *sc) { u_int b; int err; DBG(sc, ATTACH, ("allocating rmaps and lbuf pointers...")); sc->lbufs = malloc(sizeof(sc->lbufs[0]) * sc->lbufs_size, M_DEVBUF, M_ZERO | M_WAITOK); /* allocate and create the DMA maps for the large pool */ sc->rmaps = malloc(sizeof(sc->rmaps[0]) * sc->lbufs_size, M_DEVBUF, M_WAITOK); for (b = 0; b < sc->lbufs_size; b++) { err = bus_dmamap_create(sc->mbuf_tag, 0, &sc->rmaps[b]); if (err != 0) panic("bus_dmamap_create: %d\n", err); } } /* * Initialize and destroy small mbuf page pointers and pages */ static void hatm_destroy_smbufs(struct hatm_softc *sc) { u_int i, b; struct mbuf_page *pg; struct mbuf_chunk_hdr *h; if (sc->mbuf_pages != NULL) { for (i = 0; i < sc->mbuf_npages; i++) { pg = sc->mbuf_pages[i]; for (b = 0; b < pg->hdr.nchunks; b++) { h = (struct mbuf_chunk_hdr *) ((char *)pg + b * pg->hdr.chunksize + pg->hdr.hdroff); if (h->flags & MBUF_CARD) if_printf(sc->ifp, "%s -- mbuf page=%u card buf %u\n", __func__, i, b); if (h->flags & MBUF_USED) if_printf(sc->ifp, "%s -- mbuf page=%u used buf %u\n", __func__, i, b); } bus_dmamap_unload(sc->mbuf_tag, pg->hdr.map); bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map); free(pg, M_DEVBUF); } free(sc->mbuf_pages, M_DEVBUF); } } static void hatm_init_smbufs(struct hatm_softc *sc) { sc->mbuf_pages = malloc(sizeof(sc->mbuf_pages[0]) * sc->mbuf_max_pages, M_DEVBUF, M_WAITOK); sc->mbuf_npages = 0; } /* * Initialize/destroy TPDs. This is called from attach/detach. */ static void hatm_destroy_tpds(struct hatm_softc *sc) { struct tpd *t; if (sc->tpds.base == NULL) return; DBG(sc, ATTACH, ("releasing TPDs ...")); if (sc->tpd_nfree != sc->tpd_total) if_printf(sc->ifp, "%u tpds still in use from %u\n", sc->tpd_total - sc->tpd_nfree, sc->tpd_total); while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) { SLIST_REMOVE_HEAD(&sc->tpd_free, link); bus_dmamap_destroy(sc->tx_tag, t->map); } hatm_destroy_dmamem(&sc->tpds); free(sc->tpd_used, M_DEVBUF); DBG(sc, ATTACH, ("... done")); } static int hatm_init_tpds(struct hatm_softc *sc) { int error; u_int i; struct tpd *t; DBG(sc, ATTACH, ("allocating %u TPDs and maps ...", sc->tpd_total)); error = hatm_alloc_dmamem(sc, "TPD memory", &sc->tpds); if (error != 0) { DBG(sc, ATTACH, ("... dmamem error=%d", error)); return (error); } /* put all the TPDs on the free list and allocate DMA maps */ for (i = 0; i < sc->tpd_total; i++) { t = TPD_ADDR(sc, i); t->no = i; t->mbuf = NULL; error = bus_dmamap_create(sc->tx_tag, 0, &t->map); if (error != 0) { DBG(sc, ATTACH, ("... dmamap error=%d", error)); while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) { SLIST_REMOVE_HEAD(&sc->tpd_free, link); bus_dmamap_destroy(sc->tx_tag, t->map); } hatm_destroy_dmamem(&sc->tpds); return (error); } SLIST_INSERT_HEAD(&sc->tpd_free, t, link); } /* allocate and zero bitmap */ sc->tpd_used = malloc(sizeof(uint8_t) * (sc->tpd_total + 7) / 8, M_DEVBUF, M_ZERO | M_WAITOK); sc->tpd_nfree = sc->tpd_total; DBG(sc, ATTACH, ("... done")); return (0); } /* * Free all the TPDs that where given to the card. * An mbuf chain may be attached to a TPD - free it also and * unload its associated DMA map. */ static void hatm_stop_tpds(struct hatm_softc *sc) { u_int i; struct tpd *t; DBG(sc, ATTACH, ("free TPDs ...")); for (i = 0; i < sc->tpd_total; i++) { if (TPD_TST_USED(sc, i)) { t = TPD_ADDR(sc, i); if (t->mbuf) { m_freem(t->mbuf); t->mbuf = NULL; bus_dmamap_unload(sc->tx_tag, t->map); } TPD_CLR_USED(sc, i); SLIST_INSERT_HEAD(&sc->tpd_free, t, link); sc->tpd_nfree++; } } } /* * This frees ALL resources of this interface and leaves the structure * in an indeterminate state. This is called just before detaching or * on a failed attach. No lock should be held. */ static void hatm_destroy(struct hatm_softc *sc) { u_int cid; bus_teardown_intr(sc->dev, sc->irqres, sc->ih); hatm_destroy_rmaps(sc); hatm_destroy_smbufs(sc); hatm_destroy_tpds(sc); if (sc->vcc_zone != NULL) { for (cid = 0; cid < HE_MAX_VCCS; cid++) if (sc->vccs[cid] != NULL) uma_zfree(sc->vcc_zone, sc->vccs[cid]); uma_zdestroy(sc->vcc_zone); } /* * Release all memory allocated to the various queues and * Status pages. These have there own flag which shows whether * they are really allocated. */ hatm_destroy_dmamem(&sc->irq_0.mem); hatm_destroy_dmamem(&sc->rbp_s0.mem); hatm_destroy_dmamem(&sc->rbp_l0.mem); hatm_destroy_dmamem(&sc->rbp_s1.mem); hatm_destroy_dmamem(&sc->rbrq_0.mem); hatm_destroy_dmamem(&sc->rbrq_1.mem); hatm_destroy_dmamem(&sc->tbrq.mem); hatm_destroy_dmamem(&sc->tpdrq.mem); hatm_destroy_dmamem(&sc->hsp_mem); if (sc->irqres != NULL) bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irqid, sc->irqres); if (sc->tx_tag != NULL) if (bus_dma_tag_destroy(sc->tx_tag)) if_printf(sc->ifp, "mbuf DMA tag busy\n"); if (sc->mbuf_tag != NULL) if (bus_dma_tag_destroy(sc->mbuf_tag)) if_printf(sc->ifp, "mbuf DMA tag busy\n"); if (sc->parent_tag != NULL) if (bus_dma_tag_destroy(sc->parent_tag)) if_printf(sc->ifp, "parent DMA tag busy\n"); if (sc->memres != NULL) bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->memid, sc->memres); sysctl_ctx_free(&sc->sysctl_ctx); cv_destroy(&sc->cv_rcclose); cv_destroy(&sc->vcc_cv); mtx_destroy(&sc->mtx); } /* * 4.4 Card reset */ static int hatm_reset(struct hatm_softc *sc) { u_int v, count; WRITE4(sc, HE_REGO_RESET_CNTL, 0x00); BARRIER_W(sc); WRITE4(sc, HE_REGO_RESET_CNTL, 0xff); BARRIER_RW(sc); count = 0; while (((v = READ4(sc, HE_REGO_RESET_CNTL)) & HE_REGM_RESET_STATE) == 0) { BARRIER_R(sc); if (++count == 100) { if_printf(sc->ifp, "reset failed\n"); return (ENXIO); } DELAY(1000); } return (0); } /* * 4.5 Set Bus Width */ static void hatm_init_bus_width(struct hatm_softc *sc) { uint32_t v, v1; v = READ4(sc, HE_REGO_HOST_CNTL); BARRIER_R(sc); if (v & HE_REGM_HOST_BUS64) { sc->pci64 = 1; v1 = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4); v1 |= HE_PCIM_CTL0_64BIT; pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v1, 4); v |= HE_REGM_HOST_DESC_RD64 | HE_REGM_HOST_DATA_RD64 | HE_REGM_HOST_DATA_WR64; WRITE4(sc, HE_REGO_HOST_CNTL, v); BARRIER_W(sc); } else { sc->pci64 = 0; v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4); v &= ~HE_PCIM_CTL0_64BIT; pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4); } } /* * 4.6 Set Host Endianess */ static void hatm_init_endianess(struct hatm_softc *sc) { uint32_t v; v = READ4(sc, HE_REGO_LB_SWAP); BARRIER_R(sc); #if BYTE_ORDER == BIG_ENDIAN v |= HE_REGM_LBSWAP_INTR_SWAP | HE_REGM_LBSWAP_DESC_WR_SWAP | HE_REGM_LBSWAP_BIG_ENDIAN; v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP | HE_REGM_LBSWAP_DESC_RD_SWAP | HE_REGM_LBSWAP_DATA_RD_SWAP); #else v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP | HE_REGM_LBSWAP_DESC_RD_SWAP | HE_REGM_LBSWAP_DATA_RD_SWAP | HE_REGM_LBSWAP_INTR_SWAP | HE_REGM_LBSWAP_DESC_WR_SWAP | HE_REGM_LBSWAP_BIG_ENDIAN); #endif if (sc->he622) v |= HE_REGM_LBSWAP_XFER_SIZE; WRITE4(sc, HE_REGO_LB_SWAP, v); BARRIER_W(sc); } /* * 4.7 Read EEPROM */ static uint8_t hatm_read_prom_byte(struct hatm_softc *sc, u_int addr) { uint32_t val, tmp_read, byte_read; u_int i, j; int n; val = READ4(sc, HE_REGO_HOST_CNTL); val &= HE_REGM_HOST_PROM_BITS; BARRIER_R(sc); val |= HE_REGM_HOST_PROM_WREN; WRITE4(sc, HE_REGO_HOST_CNTL, val); BARRIER_W(sc); /* send READ */ for (i = 0; i < sizeof(readtab) / sizeof(readtab[0]); i++) { WRITE4(sc, HE_REGO_HOST_CNTL, val | readtab[i]); BARRIER_W(sc); DELAY(EEPROM_DELAY); } /* send ADDRESS */ for (n = 7, j = 0; n >= 0; n--) { WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] | (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN)); BARRIER_W(sc); DELAY(EEPROM_DELAY); WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] | (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN)); BARRIER_W(sc); DELAY(EEPROM_DELAY); } val &= ~HE_REGM_HOST_PROM_WREN; WRITE4(sc, HE_REGO_HOST_CNTL, val); BARRIER_W(sc); /* read DATA */ byte_read = 0; for (n = 7, j = 0; n >= 0; n--) { WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]); BARRIER_W(sc); DELAY(EEPROM_DELAY); tmp_read = READ4(sc, HE_REGO_HOST_CNTL); byte_read |= (uint8_t)(((tmp_read & HE_REGM_HOST_PROM_DATA_OUT) >> HE_REGS_HOST_PROM_DATA_OUT) << n); WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]); BARRIER_W(sc); DELAY(EEPROM_DELAY); } WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]); BARRIER_W(sc); DELAY(EEPROM_DELAY); return (byte_read); } static void hatm_init_read_eeprom(struct hatm_softc *sc) { u_int n, count; u_char byte; uint32_t v; for (n = count = 0; count < HE_EEPROM_PROD_ID_LEN; count++) { byte = hatm_read_prom_byte(sc, HE_EEPROM_PROD_ID + count); if (n > 0 || byte != ' ') sc->prod_id[n++] = byte; } while (n > 0 && sc->prod_id[n-1] == ' ') n--; sc->prod_id[n] = '\0'; for (n = count = 0; count < HE_EEPROM_REV_LEN; count++) { byte = hatm_read_prom_byte(sc, HE_EEPROM_REV + count); if (n > 0 || byte != ' ') sc->rev[n++] = byte; } while (n > 0 && sc->rev[n-1] == ' ') n--; sc->rev[n] = '\0'; IFP2IFATM(sc->ifp)->mib.hw_version = sc->rev[0]; IFP2IFATM(sc->ifp)->mib.serial = hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 0) << 0; IFP2IFATM(sc->ifp)->mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 1) << 8; IFP2IFATM(sc->ifp)->mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 2) << 16; IFP2IFATM(sc->ifp)->mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 3) << 24; v = hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 0) << 0; v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 1) << 8; v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 2) << 16; v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 3) << 24; switch (v) { case HE_MEDIA_UTP155: IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155; IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M; break; case HE_MEDIA_MMF155: IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155; IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M; break; case HE_MEDIA_MMF622: IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_622; IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_HE622; IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_622M; sc->he622 = 1; break; case HE_MEDIA_SMF155: IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_SM_155; IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M; break; case HE_MEDIA_SMF622: IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_SM_622; IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_HE622; IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_622M; sc->he622 = 1; break; } IFP2IFATM(sc->ifp)->mib.esi[0] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 0); IFP2IFATM(sc->ifp)->mib.esi[1] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 1); IFP2IFATM(sc->ifp)->mib.esi[2] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 2); IFP2IFATM(sc->ifp)->mib.esi[3] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 3); IFP2IFATM(sc->ifp)->mib.esi[4] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 4); IFP2IFATM(sc->ifp)->mib.esi[5] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 5); } /* * Clear unused interrupt queue */ static void hatm_clear_irq(struct hatm_softc *sc, u_int group) { WRITE4(sc, HE_REGO_IRQ_BASE(group), 0); WRITE4(sc, HE_REGO_IRQ_HEAD(group), 0); WRITE4(sc, HE_REGO_IRQ_CNTL(group), 0); WRITE4(sc, HE_REGO_IRQ_DATA(group), 0); } /* * 4.10 Initialize interrupt queues */ static void hatm_init_irq(struct hatm_softc *sc, struct heirq *q, u_int group) { u_int i; if (q->size == 0) { hatm_clear_irq(sc, group); return; } q->group = group; q->sc = sc; q->irq = q->mem.base; q->head = 0; q->tailp = q->irq + (q->size - 1); *q->tailp = 0; for (i = 0; i < q->size; i++) q->irq[i] = HE_REGM_ITYPE_INVALID; WRITE4(sc, HE_REGO_IRQ_BASE(group), q->mem.paddr); WRITE4(sc, HE_REGO_IRQ_HEAD(group), ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) | (q->thresh << HE_REGS_IRQ_HEAD_THRESH)); WRITE4(sc, HE_REGO_IRQ_CNTL(group), q->line); WRITE4(sc, HE_REGO_IRQ_DATA(group), 0); } /* * 5.1.3 Initialize connection memory */ static void hatm_init_cm(struct hatm_softc *sc) { u_int rsra, mlbm, rabr, numbuffs; u_int tsra, tabr, mtpd; u_int n; for (n = 0; n < HE_CONFIG_TXMEM; n++) WRITE_TCM4(sc, n, 0); for (n = 0; n < HE_CONFIG_RXMEM; n++) WRITE_RCM4(sc, n, 0); numbuffs = sc->r0_numbuffs + sc->r1_numbuffs + sc->tx_numbuffs; rsra = 0; mlbm = ((rsra + IFP2IFATM(sc->ifp)->mib.max_vccs * 8) + 0x7ff) & ~0x7ff; rabr = ((mlbm + numbuffs * 2) + 0x7ff) & ~0x7ff; sc->rsrb = ((rabr + 2048) + (2 * IFP2IFATM(sc->ifp)->mib.max_vccs - 1)) & ~(2 * IFP2IFATM(sc->ifp)->mib.max_vccs - 1); tsra = 0; sc->tsrb = tsra + IFP2IFATM(sc->ifp)->mib.max_vccs * 8; sc->tsrc = sc->tsrb + IFP2IFATM(sc->ifp)->mib.max_vccs * 4; sc->tsrd = sc->tsrc + IFP2IFATM(sc->ifp)->mib.max_vccs * 2; tabr = sc->tsrd + IFP2IFATM(sc->ifp)->mib.max_vccs * 1; mtpd = ((tabr + 1024) + (16 * IFP2IFATM(sc->ifp)->mib.max_vccs - 1)) & ~(16 * IFP2IFATM(sc->ifp)->mib.max_vccs - 1); DBG(sc, ATTACH, ("rsra=%x mlbm=%x rabr=%x rsrb=%x", rsra, mlbm, rabr, sc->rsrb)); DBG(sc, ATTACH, ("tsra=%x tsrb=%x tsrc=%x tsrd=%x tabr=%x mtpd=%x", tsra, sc->tsrb, sc->tsrc, sc->tsrd, tabr, mtpd)); WRITE4(sc, HE_REGO_TSRB_BA, sc->tsrb); WRITE4(sc, HE_REGO_TSRC_BA, sc->tsrc); WRITE4(sc, HE_REGO_TSRD_BA, sc->tsrd); WRITE4(sc, HE_REGO_TMABR_BA, tabr); WRITE4(sc, HE_REGO_TPD_BA, mtpd); WRITE4(sc, HE_REGO_RCMRSRB_BA, sc->rsrb); WRITE4(sc, HE_REGO_RCMLBM_BA, mlbm); WRITE4(sc, HE_REGO_RCMABR_BA, rabr); BARRIER_W(sc); } /* * 5.1.4 Initialize Local buffer Pools */ static void hatm_init_rx_buffer_pool(struct hatm_softc *sc, u_int num, /* bank */ u_int start, /* start row */ u_int numbuffs /* number of entries */ ) { u_int row_size; /* bytes per row */ uint32_t row_addr; /* start address of this row */ u_int lbuf_size; /* bytes per lbuf */ u_int lbufs_per_row; /* number of lbufs per memory row */ uint32_t lbufd_index; /* index of lbuf descriptor */ uint32_t lbufd_addr; /* address of lbuf descriptor */ u_int lbuf_row_cnt; /* current lbuf in current row */ uint32_t lbuf_addr; /* address of current buffer */ u_int i; row_size = sc->bytes_per_row;; row_addr = start * row_size; lbuf_size = sc->cells_per_lbuf * 48; lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf; /* descriptor index */ lbufd_index = num; /* 2 words per entry */ lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2; /* write head of queue */ WRITE4(sc, HE_REGO_RLBF_H(num), lbufd_index); lbuf_row_cnt = 0; for (i = 0; i < numbuffs; i++) { lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32; WRITE_RCM4(sc, lbufd_addr, lbuf_addr); lbufd_index += 2; WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index); if (++lbuf_row_cnt == lbufs_per_row) { lbuf_row_cnt = 0; row_addr += row_size; } lbufd_addr += 2 * 2; } WRITE4(sc, HE_REGO_RLBF_T(num), lbufd_index - 2); WRITE4(sc, HE_REGO_RLBF_C(num), numbuffs); BARRIER_W(sc); } static void hatm_init_tx_buffer_pool(struct hatm_softc *sc, u_int start, /* start row */ u_int numbuffs /* number of entries */ ) { u_int row_size; /* bytes per row */ uint32_t row_addr; /* start address of this row */ u_int lbuf_size; /* bytes per lbuf */ u_int lbufs_per_row; /* number of lbufs per memory row */ uint32_t lbufd_index; /* index of lbuf descriptor */ uint32_t lbufd_addr; /* address of lbuf descriptor */ u_int lbuf_row_cnt; /* current lbuf in current row */ uint32_t lbuf_addr; /* address of current buffer */ u_int i; row_size = sc->bytes_per_row;; row_addr = start * row_size; lbuf_size = sc->cells_per_lbuf * 48; lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf; /* descriptor index */ lbufd_index = sc->r0_numbuffs + sc->r1_numbuffs; /* 2 words per entry */ lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2; /* write head of queue */ WRITE4(sc, HE_REGO_TLBF_H, lbufd_index); lbuf_row_cnt = 0; for (i = 0; i < numbuffs; i++) { lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32; WRITE_RCM4(sc, lbufd_addr, lbuf_addr); lbufd_index++; WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index); if (++lbuf_row_cnt == lbufs_per_row) { lbuf_row_cnt = 0; row_addr += row_size; } lbufd_addr += 2; } WRITE4(sc, HE_REGO_TLBF_T, lbufd_index - 1); BARRIER_W(sc); } /* * 5.1.5 Initialize Intermediate Receive Queues */ static void hatm_init_imed_queues(struct hatm_softc *sc) { u_int n; if (sc->he622) { for (n = 0; n < 8; n++) { WRITE4(sc, HE_REGO_INMQ_S(n), 0x10*n+0x000f); WRITE4(sc, HE_REGO_INMQ_L(n), 0x10*n+0x200f); } } else { for (n = 0; n < 8; n++) { WRITE4(sc, HE_REGO_INMQ_S(n), n); WRITE4(sc, HE_REGO_INMQ_L(n), n+0x8); } } } /* * 5.1.7 Init CS block */ static void hatm_init_cs_block(struct hatm_softc *sc) { u_int n, i; u_int clkfreg, cellrate, decr, tmp; static const uint32_t erthr[2][5][3] = HE_REGT_CS_ERTHR; static const uint32_t erctl[2][3] = HE_REGT_CS_ERCTL; static const uint32_t erstat[2][2] = HE_REGT_CS_ERSTAT; static const uint32_t rtfwr[2] = HE_REGT_CS_RTFWR; static const uint32_t rtatr[2] = HE_REGT_CS_RTATR; static const uint32_t bwalloc[2][6] = HE_REGT_CS_BWALLOC; static const uint32_t orcf[2][2] = HE_REGT_CS_ORCF; /* Clear Rate Controller Start Times and Occupied Flags */ for (n = 0; n < 32; n++) WRITE_MBOX4(sc, HE_REGO_CS_STTIM(n), 0); clkfreg = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK; cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M; decr = cellrate / 32; for (n = 0; n < 16; n++) { tmp = clkfreg / cellrate; WRITE_MBOX4(sc, HE_REGO_CS_TGRLD(n), tmp - 1); cellrate -= decr; } i = (sc->cells_per_lbuf == 2) ? 0 :(sc->cells_per_lbuf == 4) ? 1 : 2; /* table 5.2 */ WRITE_MBOX4(sc, HE_REGO_CS_ERTHR0, erthr[sc->he622][0][i]); WRITE_MBOX4(sc, HE_REGO_CS_ERTHR1, erthr[sc->he622][1][i]); WRITE_MBOX4(sc, HE_REGO_CS_ERTHR2, erthr[sc->he622][2][i]); WRITE_MBOX4(sc, HE_REGO_CS_ERTHR3, erthr[sc->he622][3][i]); WRITE_MBOX4(sc, HE_REGO_CS_ERTHR4, erthr[sc->he622][4][i]); WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, erctl[sc->he622][0]); WRITE_MBOX4(sc, HE_REGO_CS_ERCTL1, erctl[sc->he622][1]); WRITE_MBOX4(sc, HE_REGO_CS_ERCTL2, erctl[sc->he622][2]); WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT0, erstat[sc->he622][0]); WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT1, erstat[sc->he622][1]); WRITE_MBOX4(sc, HE_REGO_CS_RTFWR, rtfwr[sc->he622]); WRITE_MBOX4(sc, HE_REGO_CS_RTATR, rtatr[sc->he622]); WRITE_MBOX4(sc, HE_REGO_CS_TFBSET, bwalloc[sc->he622][0]); WRITE_MBOX4(sc, HE_REGO_CS_WCRMAX, bwalloc[sc->he622][1]); WRITE_MBOX4(sc, HE_REGO_CS_WCRMIN, bwalloc[sc->he622][2]); WRITE_MBOX4(sc, HE_REGO_CS_WCRINC, bwalloc[sc->he622][3]); WRITE_MBOX4(sc, HE_REGO_CS_WCRDEC, bwalloc[sc->he622][4]); WRITE_MBOX4(sc, HE_REGO_CS_WCRCEIL, bwalloc[sc->he622][5]); WRITE_MBOX4(sc, HE_REGO_CS_OTPPER, orcf[sc->he622][0]); WRITE_MBOX4(sc, HE_REGO_CS_OTWPER, orcf[sc->he622][1]); WRITE_MBOX4(sc, HE_REGO_CS_OTTLIM, 8); for (n = 0; n < 8; n++) WRITE_MBOX4(sc, HE_REGO_CS_HGRRT(n), 0); } /* * 5.1.8 CS Block Connection Memory Initialisation */ static void hatm_init_cs_block_cm(struct hatm_softc *sc) { u_int n, i; u_int expt, mant, etrm, wcr, ttnrm, tnrm; uint32_t rate; uint32_t clkfreq, cellrate, decr; uint32_t *rg, rtg, val = 0; uint64_t drate; u_int buf, buf_limit; uint32_t base = READ4(sc, HE_REGO_RCMABR_BA); for (n = 0; n < HE_REGL_CM_GQTBL; n++) WRITE_RCM4(sc, base + HE_REGO_CM_GQTBL + n, 0); for (n = 0; n < HE_REGL_CM_RGTBL; n++) WRITE_RCM4(sc, base + HE_REGO_CM_RGTBL + n, 0); tnrm = 0; for (n = 0; n < HE_REGL_CM_TNRMTBL * 4; n++) { expt = (n >> 5) & 0x1f; mant = ((n & 0x18) << 4) | 0x7f; wcr = (1 << expt) * (mant + 512) / 512; etrm = n & 0x7; ttnrm = wcr / 10 / (1 << etrm); if (ttnrm > 255) ttnrm = 255; else if(ttnrm < 2) ttnrm = 2; tnrm = (tnrm << 8) | (ttnrm & 0xff); if (n % 4 == 0) WRITE_RCM4(sc, base + HE_REGO_CM_TNRMTBL + (n/4), tnrm); } clkfreq = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK; buf_limit = 4; cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M; decr = cellrate / 32; /* compute GRID top row in 1000 * cps */ for (n = 0; n < 16; n++) { u_int interval = clkfreq / cellrate; sc->rate_grid[0][n] = (u_int64_t)clkfreq * 1000 / interval; cellrate -= decr; } /* compute the other rows according to 2.4 */ for (i = 1; i < 16; i++) for (n = 0; n < 16; n++) sc->rate_grid[i][n] = sc->rate_grid[i-1][n] / ((i < 14) ? 2 : 4); /* first entry is line rate */ n = hatm_cps2atmf(sc->he622 ? ATM_RATE_622M : ATM_RATE_155M); expt = (n >> 9) & 0x1f; mant = n & 0x1f0; sc->rate_grid[0][0] = (u_int64_t)(1<he622 ? ATM_RATE_622M : ATM_RATE_155M; rg = &sc->rate_grid[15][15]; for (rate = 0; rate < 2 * HE_REGL_CM_RTGTBL; rate++) { /* unpack the ATMF rate */ expt = rate >> 5; mant = (rate & 0x1f) << 4; /* get the cell rate - minimum is 10 per second */ drate = (uint64_t)(1 << expt) * 1000 * (mant + 512) / 512; if (drate < 10 * 1000) drate = 10 * 1000; /* now look up the grid index */ while (drate >= *rg && rg-- > &sc->rate_grid[0][0]) ; rg++; rtg = rg - &sc->rate_grid[0][0]; /* now compute the buffer limit */ buf = drate * sc->tx_numbuffs / (cellrate * 2) / 1000; if (buf == 0) buf = 1; else if (buf > buf_limit) buf = buf_limit; /* make value */ val = (val << 16) | (rtg << 8) | buf; /* write */ if (rate % 2 == 1) WRITE_RCM4(sc, base + HE_REGO_CM_RTGTBL + rate/2, val); } } /* * Clear an unused receive group buffer pool */ static void hatm_clear_rpool(struct hatm_softc *sc, u_int group, u_int large) { WRITE4(sc, HE_REGO_RBP_S(large, group), 0); WRITE4(sc, HE_REGO_RBP_T(large, group), 0); WRITE4(sc, HE_REGO_RBP_QI(large, group), 1); WRITE4(sc, HE_REGO_RBP_BL(large, group), 0); } /* * Initialize a receive group buffer pool */ static void hatm_init_rpool(struct hatm_softc *sc, struct herbp *q, u_int group, u_int large) { if (q->size == 0) { hatm_clear_rpool(sc, group, large); return; } bzero(q->mem.base, q->mem.size); q->rbp = q->mem.base; q->head = q->tail = 0; DBG(sc, ATTACH, ("RBP%u%c=0x%lx", group, "SL"[large], (u_long)q->mem.paddr)); WRITE4(sc, HE_REGO_RBP_S(large, group), q->mem.paddr); WRITE4(sc, HE_REGO_RBP_T(large, group), 0); WRITE4(sc, HE_REGO_RBP_QI(large, group), ((q->size - 1) << HE_REGS_RBP_SIZE) | HE_REGM_RBP_INTR_ENB | (q->thresh << HE_REGS_RBP_THRESH)); WRITE4(sc, HE_REGO_RBP_BL(large, group), (q->bsize >> 2) & ~1); } /* * Clear an unused receive buffer return queue */ static void hatm_clear_rbrq(struct hatm_softc *sc, u_int group) { WRITE4(sc, HE_REGO_RBRQ_ST(group), 0); WRITE4(sc, HE_REGO_RBRQ_H(group), 0); WRITE4(sc, HE_REGO_RBRQ_Q(group), (1 << HE_REGS_RBRQ_THRESH)); WRITE4(sc, HE_REGO_RBRQ_I(group), 0); } /* * Initialize receive buffer return queue */ static void hatm_init_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group) { if (rq->size == 0) { hatm_clear_rbrq(sc, group); return; } rq->rbrq = rq->mem.base; rq->head = 0; DBG(sc, ATTACH, ("RBRQ%u=0x%lx", group, (u_long)rq->mem.paddr)); WRITE4(sc, HE_REGO_RBRQ_ST(group), rq->mem.paddr); WRITE4(sc, HE_REGO_RBRQ_H(group), 0); WRITE4(sc, HE_REGO_RBRQ_Q(group), (rq->thresh << HE_REGS_RBRQ_THRESH) | ((rq->size - 1) << HE_REGS_RBRQ_SIZE)); WRITE4(sc, HE_REGO_RBRQ_I(group), (rq->tout << HE_REGS_RBRQ_TIME) | (rq->pcnt << HE_REGS_RBRQ_COUNT)); } /* * Clear an unused transmit buffer return queue N */ static void hatm_clear_tbrq(struct hatm_softc *sc, u_int group) { WRITE4(sc, HE_REGO_TBRQ_B_T(group), 0); WRITE4(sc, HE_REGO_TBRQ_H(group), 0); WRITE4(sc, HE_REGO_TBRQ_S(group), 0); WRITE4(sc, HE_REGO_TBRQ_THRESH(group), 1); } /* * Initialize transmit buffer return queue N */ static void hatm_init_tbrq(struct hatm_softc *sc, struct hetbrq *tq, u_int group) { if (tq->size == 0) { hatm_clear_tbrq(sc, group); return; } tq->tbrq = tq->mem.base; tq->head = 0; DBG(sc, ATTACH, ("TBRQ%u=0x%lx", group, (u_long)tq->mem.paddr)); WRITE4(sc, HE_REGO_TBRQ_B_T(group), tq->mem.paddr); WRITE4(sc, HE_REGO_TBRQ_H(group), 0); WRITE4(sc, HE_REGO_TBRQ_S(group), tq->size - 1); WRITE4(sc, HE_REGO_TBRQ_THRESH(group), tq->thresh); } /* * Initialize TPDRQ */ static void hatm_init_tpdrq(struct hatm_softc *sc) { struct hetpdrq *tq; tq = &sc->tpdrq; tq->tpdrq = tq->mem.base; tq->tail = tq->head = 0; DBG(sc, ATTACH, ("TPDRQ=0x%lx", (u_long)tq->mem.paddr)); WRITE4(sc, HE_REGO_TPDRQ_H, tq->mem.paddr); WRITE4(sc, HE_REGO_TPDRQ_T, 0); WRITE4(sc, HE_REGO_TPDRQ_S, tq->size - 1); } /* * Function can be called by the infrastructure to start the card. */ static void hatm_init(void *p) { struct hatm_softc *sc = p; mtx_lock(&sc->mtx); hatm_stop(sc); hatm_initialize(sc); mtx_unlock(&sc->mtx); } enum { CTL_ISTATS, }; /* * Sysctl handler */ static int hatm_sysctl(SYSCTL_HANDLER_ARGS) { struct hatm_softc *sc = arg1; uint32_t *ret; int error; size_t len; switch (arg2) { case CTL_ISTATS: len = sizeof(sc->istats); break; default: panic("bad control code"); } ret = malloc(len, M_TEMP, M_WAITOK); mtx_lock(&sc->mtx); switch (arg2) { case CTL_ISTATS: sc->istats.mcc += READ4(sc, HE_REGO_MCC); sc->istats.oec += READ4(sc, HE_REGO_OEC); sc->istats.dcc += READ4(sc, HE_REGO_DCC); sc->istats.cec += READ4(sc, HE_REGO_CEC); bcopy(&sc->istats, ret, sizeof(sc->istats)); break; } mtx_unlock(&sc->mtx); error = SYSCTL_OUT(req, ret, len); free(ret, M_TEMP); return (error); } static int kenv_getuint(struct hatm_softc *sc, const char *var, u_int *ptr, u_int def, int rw) { char full[IFNAMSIZ + 3 + 20]; char *val, *end; u_int u; *ptr = def; if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, var, rw ? CTLFLAG_RW : CTLFLAG_RD, ptr, 0, "") == NULL) return (ENOMEM); snprintf(full, sizeof(full), "hw.%s.%s", device_get_nameunit(sc->dev), var); if ((val = getenv(full)) == NULL) return (0); u = strtoul(val, &end, 0); if (end == val || *end != '\0') { freeenv(val); return (EINVAL); } if (bootverbose) if_printf(sc->ifp, "%s=%u\n", full, u); *ptr = u; return (0); } /* * Set configurable parameters. Many of these are configurable via * kenv. */ static int hatm_configure(struct hatm_softc *sc) { /* Receive buffer pool 0 small */ kenv_getuint(sc, "rbps0_size", &sc->rbp_s0.size, HE_CONFIG_RBPS0_SIZE, 0); kenv_getuint(sc, "rbps0_thresh", &sc->rbp_s0.thresh, HE_CONFIG_RBPS0_THRESH, 0); sc->rbp_s0.bsize = MBUF0_SIZE; /* Receive buffer pool 0 large */ kenv_getuint(sc, "rbpl0_size", &sc->rbp_l0.size, HE_CONFIG_RBPL0_SIZE, 0); kenv_getuint(sc, "rbpl0_thresh", &sc->rbp_l0.thresh, HE_CONFIG_RBPL0_THRESH, 0); sc->rbp_l0.bsize = MCLBYTES - MBUFL_OFFSET; /* Receive buffer return queue 0 */ kenv_getuint(sc, "rbrq0_size", &sc->rbrq_0.size, HE_CONFIG_RBRQ0_SIZE, 0); kenv_getuint(sc, "rbrq0_thresh", &sc->rbrq_0.thresh, HE_CONFIG_RBRQ0_THRESH, 0); kenv_getuint(sc, "rbrq0_tout", &sc->rbrq_0.tout, HE_CONFIG_RBRQ0_TOUT, 0); kenv_getuint(sc, "rbrq0_pcnt", &sc->rbrq_0.pcnt, HE_CONFIG_RBRQ0_PCNT, 0); /* Receive buffer pool 1 small */ kenv_getuint(sc, "rbps1_size", &sc->rbp_s1.size, HE_CONFIG_RBPS1_SIZE, 0); kenv_getuint(sc, "rbps1_thresh", &sc->rbp_s1.thresh, HE_CONFIG_RBPS1_THRESH, 0); sc->rbp_s1.bsize = MBUF1_SIZE; /* Receive buffer return queue 1 */ kenv_getuint(sc, "rbrq1_size", &sc->rbrq_1.size, HE_CONFIG_RBRQ1_SIZE, 0); kenv_getuint(sc, "rbrq1_thresh", &sc->rbrq_1.thresh, HE_CONFIG_RBRQ1_THRESH, 0); kenv_getuint(sc, "rbrq1_tout", &sc->rbrq_1.tout, HE_CONFIG_RBRQ1_TOUT, 0); kenv_getuint(sc, "rbrq1_pcnt", &sc->rbrq_1.pcnt, HE_CONFIG_RBRQ1_PCNT, 0); /* Interrupt queue 0 */ kenv_getuint(sc, "irq0_size", &sc->irq_0.size, HE_CONFIG_IRQ0_SIZE, 0); kenv_getuint(sc, "irq0_thresh", &sc->irq_0.thresh, HE_CONFIG_IRQ0_THRESH, 0); sc->irq_0.line = HE_CONFIG_IRQ0_LINE; /* Transmit buffer return queue 0 */ kenv_getuint(sc, "tbrq0_size", &sc->tbrq.size, HE_CONFIG_TBRQ_SIZE, 0); kenv_getuint(sc, "tbrq0_thresh", &sc->tbrq.thresh, HE_CONFIG_TBRQ_THRESH, 0); /* Transmit buffer ready queue */ kenv_getuint(sc, "tpdrq_size", &sc->tpdrq.size, HE_CONFIG_TPDRQ_SIZE, 0); /* Max TPDs per VCC */ kenv_getuint(sc, "tpdmax", &sc->max_tpd, HE_CONFIG_TPD_MAXCC, 0); /* external mbuf pages */ kenv_getuint(sc, "max_mbuf_pages", &sc->mbuf_max_pages, HE_CONFIG_MAX_MBUF_PAGES, 0); /* mpsafe */ kenv_getuint(sc, "mpsafe", &sc->mpsafe, 0, 0); if (sc->mpsafe != 0) sc->mpsafe = INTR_MPSAFE; return (0); } #ifdef HATM_DEBUG /* * Get TSRs from connection memory */ static int hatm_sysctl_tsr(SYSCTL_HANDLER_ARGS) { struct hatm_softc *sc = arg1; int error, i, j; uint32_t *val; val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 15, M_TEMP, M_WAITOK); mtx_lock(&sc->mtx); for (i = 0; i < HE_MAX_VCCS; i++) for (j = 0; j <= 14; j++) val[15 * i + j] = READ_TSR(sc, i, j); mtx_unlock(&sc->mtx); error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 15); free(val, M_TEMP); if (error != 0 || req->newptr == NULL) return (error); return (EPERM); } /* * Get TPDs from connection memory */ static int hatm_sysctl_tpd(SYSCTL_HANDLER_ARGS) { struct hatm_softc *sc = arg1; int error, i, j; uint32_t *val; val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 16, M_TEMP, M_WAITOK); mtx_lock(&sc->mtx); for (i = 0; i < HE_MAX_VCCS; i++) for (j = 0; j < 16; j++) val[16 * i + j] = READ_TCM4(sc, 16 * i + j); mtx_unlock(&sc->mtx); error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 16); free(val, M_TEMP); if (error != 0 || req->newptr == NULL) return (error); return (EPERM); } /* * Get mbox registers */ static int hatm_sysctl_mbox(SYSCTL_HANDLER_ARGS) { struct hatm_softc *sc = arg1; int error, i; uint32_t *val; val = malloc(sizeof(uint32_t) * HE_REGO_CS_END, M_TEMP, M_WAITOK); mtx_lock(&sc->mtx); for (i = 0; i < HE_REGO_CS_END; i++) val[i] = READ_MBOX4(sc, i); mtx_unlock(&sc->mtx); error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_REGO_CS_END); free(val, M_TEMP); if (error != 0 || req->newptr == NULL) return (error); return (EPERM); } /* * Get connection memory */ static int hatm_sysctl_cm(SYSCTL_HANDLER_ARGS) { struct hatm_softc *sc = arg1; int error, i; uint32_t *val; val = malloc(sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1), M_TEMP, M_WAITOK); mtx_lock(&sc->mtx); val[0] = READ4(sc, HE_REGO_RCMABR_BA); for (i = 0; i < HE_CONFIG_RXMEM; i++) val[i + 1] = READ_RCM4(sc, i); mtx_unlock(&sc->mtx); error = SYSCTL_OUT(req, val, sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1)); free(val, M_TEMP); if (error != 0 || req->newptr == NULL) return (error); return (EPERM); } /* * Get local buffer memory */ static int hatm_sysctl_lbmem(SYSCTL_HANDLER_ARGS) { struct hatm_softc *sc = arg1; int error, i; uint32_t *val; u_int bytes = (1 << 21); val = malloc(bytes, M_TEMP, M_WAITOK); mtx_lock(&sc->mtx); for (i = 0; i < bytes / 4; i++) val[i] = READ_LB4(sc, i); mtx_unlock(&sc->mtx); error = SYSCTL_OUT(req, val, bytes); free(val, M_TEMP); if (error != 0 || req->newptr == NULL) return (error); return (EPERM); } /* * Get all card registers */ static int hatm_sysctl_heregs(SYSCTL_HANDLER_ARGS) { struct hatm_softc *sc = arg1; int error, i; uint32_t *val; val = malloc(HE_REGO_END, M_TEMP, M_WAITOK); mtx_lock(&sc->mtx); for (i = 0; i < HE_REGO_END; i += 4) val[i / 4] = READ4(sc, i); mtx_unlock(&sc->mtx); error = SYSCTL_OUT(req, val, HE_REGO_END); free(val, M_TEMP); if (error != 0 || req->newptr == NULL) return (error); return (EPERM); } #endif /* * Suni register access */ /* * read at most n SUNI registers starting at reg into val */ static int hatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *val, u_int *n) { u_int i; - struct hatm_softc *sc = (struct hatm_softc *)ifatm; + struct hatm_softc *sc = ifatm->ifp->if_softc; if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4) return (EINVAL); if (reg + *n > (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4) *n = reg - (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4; mtx_assert(&sc->mtx, MA_OWNED); for (i = 0; i < *n; i++) val[i] = READ4(sc, HE_REGO_SUNI + 4 * (reg + i)); return (0); } /* * change the bits given by mask to them in val in register reg */ static int hatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val) { uint32_t regval; - struct hatm_softc *sc = (struct hatm_softc *)ifatm; + struct hatm_softc *sc = ifatm->ifp->if_softc; if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4) return (EINVAL); mtx_assert(&sc->mtx, MA_OWNED); regval = READ4(sc, HE_REGO_SUNI + 4 * reg); regval = (regval & ~mask) | (val & mask); WRITE4(sc, HE_REGO_SUNI + 4 * reg, regval); return (0); } static struct utopia_methods hatm_utopia_methods = { hatm_utopia_readregs, hatm_utopia_writereg, }; /* * Detach - if it is running, stop. Destroy. */ static int hatm_detach(device_t dev) { - struct hatm_softc *sc = (struct hatm_softc *)device_get_softc(dev); + struct hatm_softc *sc = device_get_softc(dev); mtx_lock(&sc->mtx); hatm_stop(sc); if (sc->utopia.state & UTP_ST_ATTACHED) { utopia_stop(&sc->utopia); utopia_detach(&sc->utopia); } mtx_unlock(&sc->mtx); atm_ifdetach(sc->ifp); if_free(sc->ifp); hatm_destroy(sc); return (0); } /* * Attach to the device. Assume that no locking is needed here. * All resource we allocate here are freed by calling hatm_destroy. */ static int hatm_attach(device_t dev) { struct hatm_softc *sc; int error; uint32_t v; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->ifp = if_alloc(IFT_ATM); if (ifp == NULL) { device_printf(dev, "could not if_alloc()\n"); error = ENOSPC; goto failed; } sc->dev = dev; IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_HE155; IFP2IFATM(sc->ifp)->mib.serial = 0; IFP2IFATM(sc->ifp)->mib.hw_version = 0; IFP2IFATM(sc->ifp)->mib.sw_version = 0; IFP2IFATM(sc->ifp)->mib.vpi_bits = HE_CONFIG_VPI_BITS; IFP2IFATM(sc->ifp)->mib.vci_bits = HE_CONFIG_VCI_BITS; IFP2IFATM(sc->ifp)->mib.max_vpcs = 0; IFP2IFATM(sc->ifp)->mib.max_vccs = HE_MAX_VCCS; IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN; sc->he622 = 0; IFP2IFATM(sc->ifp)->phy = &sc->utopia; SLIST_INIT(&sc->tpd_free); mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); cv_init(&sc->vcc_cv, "HEVCCcv"); cv_init(&sc->cv_rcclose, "RCClose"); sysctl_ctx_init(&sc->sysctl_ctx); /* * 4.2 BIOS Configuration */ v = pci_read_config(dev, PCIR_COMMAND, 2); v |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN; pci_write_config(dev, PCIR_COMMAND, v, 2); /* * 4.3 PCI Bus Controller-Specific Initialisation */ v = pci_read_config(dev, HE_PCIR_GEN_CNTL_0, 4); v |= HE_PCIM_CTL0_MRL | HE_PCIM_CTL0_MRM | HE_PCIM_CTL0_IGNORE_TIMEOUT; #if BYTE_ORDER == BIG_ENDIAN && 0 v |= HE_PCIM_CTL0_BIGENDIAN; #endif pci_write_config(dev, HE_PCIR_GEN_CNTL_0, v, 4); /* * Map memory */ v = pci_read_config(dev, PCIR_COMMAND, 2); if (!(v & PCIM_CMD_MEMEN)) { device_printf(dev, "failed to enable memory\n"); error = ENXIO; goto failed; } sc->memid = PCIR_BAR(0); sc->memres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->memid, RF_ACTIVE); if (sc->memres == NULL) { device_printf(dev, "could not map memory\n"); error = ENXIO; goto failed; } sc->memh = rman_get_bushandle(sc->memres); sc->memt = rman_get_bustag(sc->memres); /* * ALlocate a DMA tag for subsequent allocations */ if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->parent_tag)) { device_printf(dev, "could not allocate DMA tag\n"); error = ENOMEM; goto failed; } if (bus_dma_tag_create(sc->parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MBUF_ALLOC_SIZE, 1, MBUF_ALLOC_SIZE, 0, NULL, NULL, &sc->mbuf_tag)) { device_printf(dev, "could not allocate mbuf DMA tag\n"); error = ENOMEM; goto failed; } /* * Allocate a DMA tag for packets to send. Here we have a problem with * the specification of the maximum number of segments. Theoretically * this would be the size of the transmit ring - 1 multiplied by 3, * but this would not work. So make the maximum number of TPDs * occupied by one packet a configuration parameter. */ if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, HE_MAX_PDU, 3 * HE_CONFIG_MAX_TPD_PER_PACKET, HE_MAX_PDU, 0, NULL, NULL, &sc->tx_tag)) { device_printf(dev, "could not allocate TX tag\n"); error = ENOMEM; goto failed; } /* * Setup the interrupt */ sc->irqid = 0; sc->irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid, RF_SHAREABLE | RF_ACTIVE); if (sc->irqres == 0) { device_printf(dev, "could not allocate irq\n"); error = ENXIO; goto failed; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); /* * Make the sysctl tree */ error = ENOMEM; if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO, device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL) goto failed; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "istats", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, CTL_ISTATS, hatm_sysctl, "LU", "internal statistics") == NULL) goto failed; #ifdef HATM_DEBUG if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "tsr", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, hatm_sysctl_tsr, "S", "transmission status registers") == NULL) goto failed; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "tpd", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, hatm_sysctl_tpd, "S", "transmission packet descriptors") == NULL) goto failed; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "mbox", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, hatm_sysctl_mbox, "S", "mbox registers") == NULL) goto failed; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "cm", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, hatm_sysctl_cm, "S", "connection memory") == NULL) goto failed; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "heregs", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, hatm_sysctl_heregs, "S", "card registers") == NULL) goto failed; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "lbmem", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, hatm_sysctl_lbmem, "S", "local memory") == NULL) goto failed; kenv_getuint(sc, "debug", &sc->debug, HATM_DEBUG, 1); #endif /* * Configure */ if ((error = hatm_configure(sc)) != 0) goto failed; /* * Compute memory parameters */ if (sc->rbp_s0.size != 0) { sc->rbp_s0.mask = (sc->rbp_s0.size - 1) << 3; sc->rbp_s0.mem.size = sc->rbp_s0.size * 8; sc->rbp_s0.mem.align = sc->rbp_s0.mem.size; } if (sc->rbp_l0.size != 0) { sc->rbp_l0.mask = (sc->rbp_l0.size - 1) << 3; sc->rbp_l0.mem.size = sc->rbp_l0.size * 8; sc->rbp_l0.mem.align = sc->rbp_l0.mem.size; } if (sc->rbp_s1.size != 0) { sc->rbp_s1.mask = (sc->rbp_s1.size - 1) << 3; sc->rbp_s1.mem.size = sc->rbp_s1.size * 8; sc->rbp_s1.mem.align = sc->rbp_s1.mem.size; } if (sc->rbrq_0.size != 0) { sc->rbrq_0.mem.size = sc->rbrq_0.size * 8; sc->rbrq_0.mem.align = sc->rbrq_0.mem.size; } if (sc->rbrq_1.size != 0) { sc->rbrq_1.mem.size = sc->rbrq_1.size * 8; sc->rbrq_1.mem.align = sc->rbrq_1.mem.size; } sc->irq_0.mem.size = sc->irq_0.size * sizeof(uint32_t); sc->irq_0.mem.align = 4 * 1024; sc->tbrq.mem.size = sc->tbrq.size * 4; sc->tbrq.mem.align = 2 * sc->tbrq.mem.size; /* ZZZ */ sc->tpdrq.mem.size = sc->tpdrq.size * 8; sc->tpdrq.mem.align = sc->tpdrq.mem.size; sc->hsp_mem.size = sizeof(struct he_hsp); sc->hsp_mem.align = 1024; sc->lbufs_size = sc->rbp_l0.size + sc->rbrq_0.size; sc->tpd_total = sc->tbrq.size + sc->tpdrq.size; sc->tpds.align = 64; sc->tpds.size = sc->tpd_total * HE_TPD_SIZE; hatm_init_rmaps(sc); hatm_init_smbufs(sc); if ((error = hatm_init_tpds(sc)) != 0) goto failed; /* * Allocate memory */ if ((error = hatm_alloc_dmamem(sc, "IRQ", &sc->irq_0.mem)) != 0 || (error = hatm_alloc_dmamem(sc, "TBRQ0", &sc->tbrq.mem)) != 0 || (error = hatm_alloc_dmamem(sc, "TPDRQ", &sc->tpdrq.mem)) != 0 || (error = hatm_alloc_dmamem(sc, "HSP", &sc->hsp_mem)) != 0) goto failed; if (sc->rbp_s0.mem.size != 0 && (error = hatm_alloc_dmamem(sc, "RBPS0", &sc->rbp_s0.mem))) goto failed; if (sc->rbp_l0.mem.size != 0 && (error = hatm_alloc_dmamem(sc, "RBPL0", &sc->rbp_l0.mem))) goto failed; if (sc->rbp_s1.mem.size != 0 && (error = hatm_alloc_dmamem(sc, "RBPS1", &sc->rbp_s1.mem))) goto failed; if (sc->rbrq_0.mem.size != 0 && (error = hatm_alloc_dmamem(sc, "RBRQ0", &sc->rbrq_0.mem))) goto failed; if (sc->rbrq_1.mem.size != 0 && (error = hatm_alloc_dmamem(sc, "RBRQ1", &sc->rbrq_1.mem))) goto failed; if ((sc->vcc_zone = uma_zcreate("HE vccs", sizeof(struct hevcc), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0)) == NULL) { device_printf(dev, "cannot allocate zone for vccs\n"); goto failed; } /* * 4.4 Reset the card. */ if ((error = hatm_reset(sc)) != 0) goto failed; /* * Read the prom. */ hatm_init_bus_width(sc); hatm_init_read_eeprom(sc); hatm_init_endianess(sc); /* * Initialize interface */ ifp->if_flags = IFF_SIMPLEX; ifp->if_ioctl = hatm_ioctl; ifp->if_start = hatm_start; ifp->if_watchdog = NULL; ifp->if_init = hatm_init; utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->mtx, &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), &hatm_utopia_methods); utopia_init_media(&sc->utopia); /* these two SUNI routines need the lock */ mtx_lock(&sc->mtx); /* poll while we are not running */ sc->utopia.flags |= UTP_FL_POLL_CARRIER; utopia_start(&sc->utopia); utopia_reset(&sc->utopia); mtx_unlock(&sc->mtx); atm_ifattach(ifp); #ifdef ENABLE_BPF bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc)); #endif error = bus_setup_intr(dev, sc->irqres, sc->mpsafe | INTR_TYPE_NET, hatm_intr, &sc->irq_0, &sc->ih); if (error != 0) { device_printf(dev, "could not setup interrupt\n"); hatm_detach(dev); return (error); } return (0); failed: hatm_destroy(sc); return (error); } /* * Start the interface. Assume a state as from attach(). */ void hatm_initialize(struct hatm_softc *sc) { uint32_t v; u_int cid; static const u_int layout[2][7] = HE_CONFIG_MEM_LAYOUT; if (sc->ifp->if_flags & IFF_RUNNING) return; hatm_init_bus_width(sc); hatm_init_endianess(sc); if_printf(sc->ifp, "%s, Rev. %s, S/N %u, " "MAC=%02x:%02x:%02x:%02x:%02x:%02x (%ubit PCI)\n", sc->prod_id, sc->rev, IFP2IFATM(sc->ifp)->mib.serial, IFP2IFATM(sc->ifp)->mib.esi[0], IFP2IFATM(sc->ifp)->mib.esi[1], IFP2IFATM(sc->ifp)->mib.esi[2], IFP2IFATM(sc->ifp)->mib.esi[3], IFP2IFATM(sc->ifp)->mib.esi[4], IFP2IFATM(sc->ifp)->mib.esi[5], sc->pci64 ? 64 : 32); /* * 4.8 SDRAM Controller Initialisation * 4.9 Initialize RNUM value */ if (sc->he622) WRITE4(sc, HE_REGO_SDRAM_CNTL, HE_REGM_SDRAM_64BIT); else WRITE4(sc, HE_REGO_SDRAM_CNTL, 0); BARRIER_W(sc); v = READ4(sc, HE_REGO_LB_SWAP); BARRIER_R(sc); v |= 0xf << HE_REGS_LBSWAP_RNUM; WRITE4(sc, HE_REGO_LB_SWAP, v); BARRIER_W(sc); hatm_init_irq(sc, &sc->irq_0, 0); hatm_clear_irq(sc, 1); hatm_clear_irq(sc, 2); hatm_clear_irq(sc, 3); WRITE4(sc, HE_REGO_GRP_1_0_MAP, 0); WRITE4(sc, HE_REGO_GRP_3_2_MAP, 0); WRITE4(sc, HE_REGO_GRP_5_4_MAP, 0); WRITE4(sc, HE_REGO_GRP_7_6_MAP, 0); BARRIER_W(sc); /* * 4.11 Enable PCI Bus Controller State Machine */ v = READ4(sc, HE_REGO_HOST_CNTL); BARRIER_R(sc); v |= HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB | HE_REGM_HOST_QUICK_RD | HE_REGM_HOST_QUICK_WR; WRITE4(sc, HE_REGO_HOST_CNTL, v); BARRIER_W(sc); /* * 5.1.1 Generic configuration state */ sc->cells_per_row = layout[sc->he622][0]; sc->bytes_per_row = layout[sc->he622][1]; sc->r0_numrows = layout[sc->he622][2]; sc->tx_numrows = layout[sc->he622][3]; sc->r1_numrows = layout[sc->he622][4]; sc->r0_startrow = layout[sc->he622][5]; sc->tx_startrow = sc->r0_startrow + sc->r0_numrows; sc->r1_startrow = sc->tx_startrow + sc->tx_numrows; sc->cells_per_lbuf = layout[sc->he622][6]; sc->r0_numbuffs = sc->r0_numrows * (sc->cells_per_row / sc->cells_per_lbuf); sc->r1_numbuffs = sc->r1_numrows * (sc->cells_per_row / sc->cells_per_lbuf); sc->tx_numbuffs = sc->tx_numrows * (sc->cells_per_row / sc->cells_per_lbuf); if (sc->r0_numbuffs > 2560) sc->r0_numbuffs = 2560; if (sc->r1_numbuffs > 2560) sc->r1_numbuffs = 2560; if (sc->tx_numbuffs > 5120) sc->tx_numbuffs = 5120; DBG(sc, ATTACH, ("cells_per_row=%u bytes_per_row=%u r0_numrows=%u " "tx_numrows=%u r1_numrows=%u r0_startrow=%u tx_startrow=%u " "r1_startrow=%u cells_per_lbuf=%u\nr0_numbuffs=%u r1_numbuffs=%u " "tx_numbuffs=%u\n", sc->cells_per_row, sc->bytes_per_row, sc->r0_numrows, sc->tx_numrows, sc->r1_numrows, sc->r0_startrow, sc->tx_startrow, sc->r1_startrow, sc->cells_per_lbuf, sc->r0_numbuffs, sc->r1_numbuffs, sc->tx_numbuffs)); /* * 5.1.2 Configure Hardware dependend registers */ if (sc->he622) { WRITE4(sc, HE_REGO_LBARB, (0x2 << HE_REGS_LBARB_SLICE) | (0xf << HE_REGS_LBARB_RNUM) | (0x3 << HE_REGS_LBARB_THPRI) | (0x3 << HE_REGS_LBARB_RHPRI) | (0x2 << HE_REGS_LBARB_TLPRI) | (0x1 << HE_REGS_LBARB_RLPRI) | (0x28 << HE_REGS_LBARB_BUS_MULT) | (0x50 << HE_REGS_LBARB_NET_PREF)); BARRIER_W(sc); WRITE4(sc, HE_REGO_SDRAMCON, /* HW bug: don't use banking */ /* HE_REGM_SDRAMCON_BANK | */ HE_REGM_SDRAMCON_WIDE | (0x384 << HE_REGS_SDRAMCON_REF)); BARRIER_W(sc); WRITE4(sc, HE_REGO_RCMCONFIG, (0x1 << HE_REGS_RCMCONFIG_BANK_WAIT) | (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) | (0x0 << HE_REGS_RCMCONFIG_TYPE)); WRITE4(sc, HE_REGO_TCMCONFIG, (0x2 << HE_REGS_TCMCONFIG_BANK_WAIT) | (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) | (0x0 << HE_REGS_TCMCONFIG_TYPE)); } else { WRITE4(sc, HE_REGO_LBARB, (0x2 << HE_REGS_LBARB_SLICE) | (0xf << HE_REGS_LBARB_RNUM) | (0x3 << HE_REGS_LBARB_THPRI) | (0x3 << HE_REGS_LBARB_RHPRI) | (0x2 << HE_REGS_LBARB_TLPRI) | (0x1 << HE_REGS_LBARB_RLPRI) | (0x46 << HE_REGS_LBARB_BUS_MULT) | (0x8C << HE_REGS_LBARB_NET_PREF)); BARRIER_W(sc); WRITE4(sc, HE_REGO_SDRAMCON, /* HW bug: don't use banking */ /* HE_REGM_SDRAMCON_BANK | */ (0x150 << HE_REGS_SDRAMCON_REF)); BARRIER_W(sc); WRITE4(sc, HE_REGO_RCMCONFIG, (0x0 << HE_REGS_RCMCONFIG_BANK_WAIT) | (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) | (0x0 << HE_REGS_RCMCONFIG_TYPE)); WRITE4(sc, HE_REGO_TCMCONFIG, (0x1 << HE_REGS_TCMCONFIG_BANK_WAIT) | (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) | (0x0 << HE_REGS_TCMCONFIG_TYPE)); } WRITE4(sc, HE_REGO_LBCONFIG, (sc->cells_per_lbuf * 48)); WRITE4(sc, HE_REGO_RLBC_H, 0); WRITE4(sc, HE_REGO_RLBC_T, 0); WRITE4(sc, HE_REGO_RLBC_H2, 0); WRITE4(sc, HE_REGO_RXTHRSH, 512); WRITE4(sc, HE_REGO_LITHRSH, 256); WRITE4(sc, HE_REGO_RLBF0_C, sc->r0_numbuffs); WRITE4(sc, HE_REGO_RLBF1_C, sc->r1_numbuffs); if (sc->he622) { WRITE4(sc, HE_REGO_RCCONFIG, (8 << HE_REGS_RCCONFIG_UTDELAY) | (IFP2IFATM(sc->ifp)->mib.vpi_bits << HE_REGS_RCCONFIG_VP) | (IFP2IFATM(sc->ifp)->mib.vci_bits << HE_REGS_RCCONFIG_VC)); WRITE4(sc, HE_REGO_TXCONFIG, (32 << HE_REGS_TXCONFIG_THRESH) | (IFP2IFATM(sc->ifp)->mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) | (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE)); } else { WRITE4(sc, HE_REGO_RCCONFIG, (0 << HE_REGS_RCCONFIG_UTDELAY) | HE_REGM_RCCONFIG_UT_MODE | (IFP2IFATM(sc->ifp)->mib.vpi_bits << HE_REGS_RCCONFIG_VP) | (IFP2IFATM(sc->ifp)->mib.vci_bits << HE_REGS_RCCONFIG_VC)); WRITE4(sc, HE_REGO_TXCONFIG, (32 << HE_REGS_TXCONFIG_THRESH) | HE_REGM_TXCONFIG_UTMODE | (IFP2IFATM(sc->ifp)->mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) | (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE)); } WRITE4(sc, HE_REGO_TXAAL5_PROTO, 0); if (sc->rbp_s1.size != 0) { WRITE4(sc, HE_REGO_RHCONFIG, HE_REGM_RHCONFIG_PHYENB | ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) | (1 << HE_REGS_RHCONFIG_OAM_GID)); } else { WRITE4(sc, HE_REGO_RHCONFIG, HE_REGM_RHCONFIG_PHYENB | ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) | (0 << HE_REGS_RHCONFIG_OAM_GID)); } BARRIER_W(sc); hatm_init_cm(sc); hatm_init_rx_buffer_pool(sc, 0, sc->r0_startrow, sc->r0_numbuffs); hatm_init_rx_buffer_pool(sc, 1, sc->r1_startrow, sc->r1_numbuffs); hatm_init_tx_buffer_pool(sc, sc->tx_startrow, sc->tx_numbuffs); hatm_init_imed_queues(sc); /* * 5.1.6 Application tunable Parameters */ WRITE4(sc, HE_REGO_MCC, 0); WRITE4(sc, HE_REGO_OEC, 0); WRITE4(sc, HE_REGO_DCC, 0); WRITE4(sc, HE_REGO_CEC, 0); hatm_init_cs_block(sc); hatm_init_cs_block_cm(sc); hatm_init_rpool(sc, &sc->rbp_s0, 0, 0); hatm_init_rpool(sc, &sc->rbp_l0, 0, 1); hatm_init_rpool(sc, &sc->rbp_s1, 1, 0); hatm_clear_rpool(sc, 1, 1); hatm_clear_rpool(sc, 2, 0); hatm_clear_rpool(sc, 2, 1); hatm_clear_rpool(sc, 3, 0); hatm_clear_rpool(sc, 3, 1); hatm_clear_rpool(sc, 4, 0); hatm_clear_rpool(sc, 4, 1); hatm_clear_rpool(sc, 5, 0); hatm_clear_rpool(sc, 5, 1); hatm_clear_rpool(sc, 6, 0); hatm_clear_rpool(sc, 6, 1); hatm_clear_rpool(sc, 7, 0); hatm_clear_rpool(sc, 7, 1); hatm_init_rbrq(sc, &sc->rbrq_0, 0); hatm_init_rbrq(sc, &sc->rbrq_1, 1); hatm_clear_rbrq(sc, 2); hatm_clear_rbrq(sc, 3); hatm_clear_rbrq(sc, 4); hatm_clear_rbrq(sc, 5); hatm_clear_rbrq(sc, 6); hatm_clear_rbrq(sc, 7); sc->lbufs_next = 0; bzero(sc->lbufs, sizeof(sc->lbufs[0]) * sc->lbufs_size); hatm_init_tbrq(sc, &sc->tbrq, 0); hatm_clear_tbrq(sc, 1); hatm_clear_tbrq(sc, 2); hatm_clear_tbrq(sc, 3); hatm_clear_tbrq(sc, 4); hatm_clear_tbrq(sc, 5); hatm_clear_tbrq(sc, 6); hatm_clear_tbrq(sc, 7); hatm_init_tpdrq(sc); WRITE4(sc, HE_REGO_UBUFF_BA, (sc->he622 ? 0x104780 : 0x800)); /* * Initialize HSP */ bzero(sc->hsp_mem.base, sc->hsp_mem.size); sc->hsp = sc->hsp_mem.base; WRITE4(sc, HE_REGO_HSP_BA, sc->hsp_mem.paddr); /* * 5.1.12 Enable transmit and receive * Enable bus master and interrupts */ v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0); v |= 0x18000000; WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v); v = READ4(sc, HE_REGO_RCCONFIG); v |= HE_REGM_RCCONFIG_RXENB; WRITE4(sc, HE_REGO_RCCONFIG, v); v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4); v |= HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB; pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4); sc->ifp->if_flags |= IFF_RUNNING; sc->ifp->if_baudrate = 53 * 8 * IFP2IFATM(sc->ifp)->mib.pcr; sc->utopia.flags &= ~UTP_FL_POLL_CARRIER; /* reopen vccs */ for (cid = 0; cid < HE_MAX_VCCS; cid++) if (sc->vccs[cid] != NULL) hatm_load_vc(sc, cid, 1); ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp), sc->utopia.carrier == UTP_CARR_OK); } /* * This functions stops the card and frees all resources allocated after * the attach. Must have the global lock. */ void hatm_stop(struct hatm_softc *sc) { uint32_t v; u_int i, p, cid; struct mbuf_chunk_hdr *ch; struct mbuf_page *pg; mtx_assert(&sc->mtx, MA_OWNED); if (!(sc->ifp->if_flags & IFF_RUNNING)) return; sc->ifp->if_flags &= ~IFF_RUNNING; ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp), sc->utopia.carrier == UTP_CARR_OK); sc->utopia.flags |= UTP_FL_POLL_CARRIER; /* * Stop and reset the hardware so that everything remains * stable. */ v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0); v &= ~0x18000000; WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v); v = READ4(sc, HE_REGO_RCCONFIG); v &= ~HE_REGM_RCCONFIG_RXENB; WRITE4(sc, HE_REGO_RCCONFIG, v); WRITE4(sc, HE_REGO_RHCONFIG, (0x2 << HE_REGS_RHCONFIG_PTMR_PRE)); BARRIER_W(sc); v = READ4(sc, HE_REGO_HOST_CNTL); BARRIER_R(sc); v &= ~(HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB); WRITE4(sc, HE_REGO_HOST_CNTL, v); BARRIER_W(sc); /* * Disable bust master and interrupts */ v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4); v &= ~(HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB); pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4); (void)hatm_reset(sc); /* * Card resets the SUNI when resetted, so re-initialize it */ utopia_reset(&sc->utopia); /* * Give any waiters on closing a VCC a chance. They will stop * to wait if they see that IFF_RUNNING disappeared. */ cv_broadcast(&sc->vcc_cv); cv_broadcast(&sc->cv_rcclose); /* * Now free all resources. */ /* * Free the large mbufs that are given to the card. */ for (i = 0 ; i < sc->lbufs_size; i++) { if (sc->lbufs[i] != NULL) { bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[i]); m_freem(sc->lbufs[i]); sc->lbufs[i] = NULL; } } /* * Free small buffers */ for (p = 0; p < sc->mbuf_npages; p++) { pg = sc->mbuf_pages[p]; for (i = 0; i < pg->hdr.nchunks; i++) { ch = (struct mbuf_chunk_hdr *) ((char *)pg + i * pg->hdr.chunksize + pg->hdr.hdroff); if (ch->flags & MBUF_CARD) { ch->flags &= ~MBUF_CARD; ch->flags |= MBUF_USED; hatm_ext_free(&sc->mbuf_list[pg->hdr.pool], (struct mbufx_free *)((u_char *)ch - pg->hdr.hdroff)); } } } hatm_stop_tpds(sc); /* * Free all partial reassembled PDUs on any VCC. */ for (cid = 0; cid < HE_MAX_VCCS; cid++) { if (sc->vccs[cid] != NULL) { if (sc->vccs[cid]->chain != NULL) { m_freem(sc->vccs[cid]->chain); sc->vccs[cid]->chain = NULL; sc->vccs[cid]->last = NULL; } if (!(sc->vccs[cid]->vflags & (HE_VCC_RX_OPEN | HE_VCC_TX_OPEN))) { hatm_tx_vcc_closed(sc, cid); uma_zfree(sc->vcc_zone, sc->vccs[cid]); sc->vccs[cid] = NULL; sc->open_vccs--; } else { sc->vccs[cid]->vflags = 0; sc->vccs[cid]->ntpds = 0; } } } if (sc->rbp_s0.size != 0) bzero(sc->rbp_s0.mem.base, sc->rbp_s0.mem.size); if (sc->rbp_l0.size != 0) bzero(sc->rbp_l0.mem.base, sc->rbp_l0.mem.size); if (sc->rbp_s1.size != 0) bzero(sc->rbp_s1.mem.base, sc->rbp_s1.mem.size); if (sc->rbrq_0.size != 0) bzero(sc->rbrq_0.mem.base, sc->rbrq_0.mem.size); if (sc->rbrq_1.size != 0) bzero(sc->rbrq_1.mem.base, sc->rbrq_1.mem.size); bzero(sc->tbrq.mem.base, sc->tbrq.mem.size); bzero(sc->tpdrq.mem.base, sc->tpdrq.mem.size); bzero(sc->hsp_mem.base, sc->hsp_mem.size); } /************************************************************ * * Driver infrastructure */ devclass_t hatm_devclass; static device_method_t hatm_methods[] = { DEVMETHOD(device_probe, hatm_probe), DEVMETHOD(device_attach, hatm_attach), DEVMETHOD(device_detach, hatm_detach), {0,0} }; static driver_t hatm_driver = { "hatm", hatm_methods, sizeof(struct hatm_softc), }; DRIVER_MODULE(hatm, pci, hatm_driver, hatm_devclass, NULL, 0); Index: head/sys/dev/hatm/if_hatm_ioctl.c =================================================================== --- head/sys/dev/hatm/if_hatm_ioctl.c (revision 147720) +++ head/sys/dev/hatm/if_hatm_ioctl.c (revision 147721) @@ -1,384 +1,384 @@ /*- * Copyright (c) 2001-2003 * Fraunhofer Institute for Open Communication Systems (FhG Fokus). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Author: Hartmut Brandt * * ForeHE driver. * * Ioctl handler. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_natm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static u_int hatm_natm_traffic = ATMIO_TRAFFIC_UBR; static u_int hatm_natm_pcr = 0; static int hatm_sysctl_natm_traffic(SYSCTL_HANDLER_ARGS); SYSCTL_DECL(_hw_atm); SYSCTL_PROC(_hw_atm, OID_AUTO, natm_traffic, CTLTYPE_UINT | CTLFLAG_RW, &hatm_natm_traffic, sizeof(hatm_natm_traffic), hatm_sysctl_natm_traffic, "IU", "traffic type for NATM connections"); SYSCTL_UINT(_hw_atm, OID_AUTO, natm_pcr, CTLFLAG_RW, &hatm_natm_pcr, 0, "PCR for NATM connections"); /* * Try to open the given VCC. */ static int hatm_open_vcc(struct hatm_softc *sc, struct atmio_openvcc *arg) { u_int cid; struct hevcc *vcc; int error = 0; DBG(sc, VCC, ("Open VCC: %u.%u flags=%#x", arg->param.vpi, arg->param.vci, arg->param.flags)); if ((arg->param.vpi & ~HE_VPI_MASK) || (arg->param.vci & ~HE_VCI_MASK) || (arg->param.vci == 0)) return (EINVAL); cid = HE_CID(arg->param.vpi, arg->param.vci); if ((arg->param.flags & ATMIO_FLAG_NOTX) && (arg->param.flags & ATMIO_FLAG_NORX)) return (EINVAL); vcc = uma_zalloc(sc->vcc_zone, M_NOWAIT | M_ZERO); if (vcc == NULL) return (ENOMEM); mtx_lock(&sc->mtx); if (!(sc->ifp->if_flags & IFF_RUNNING)) { error = EIO; goto done; } if (sc->vccs[cid] != NULL) { error = EBUSY; goto done; } vcc->param = arg->param; vcc->rxhand = arg->rxhand; switch (vcc->param.aal) { case ATMIO_AAL_0: case ATMIO_AAL_5: case ATMIO_AAL_RAW: break; default: error = EINVAL; goto done; } switch (vcc->param.traffic) { case ATMIO_TRAFFIC_UBR: case ATMIO_TRAFFIC_CBR: case ATMIO_TRAFFIC_ABR: break; default: error = EINVAL; goto done; } vcc->ntpds = 0; vcc->chain = vcc->last = NULL; vcc->ibytes = vcc->ipackets = 0; vcc->obytes = vcc->opackets = 0; if (!(vcc->param.flags & ATMIO_FLAG_NOTX) && (error = hatm_tx_vcc_can_open(sc, cid, vcc)) != 0) goto done; /* ok - go ahead */ sc->vccs[cid] = vcc; hatm_load_vc(sc, cid, 0); /* don't free below */ vcc = NULL; sc->open_vccs++; done: mtx_unlock(&sc->mtx); if (vcc != NULL) uma_zfree(sc->vcc_zone, vcc); return (error); } void hatm_load_vc(struct hatm_softc *sc, u_int cid, int reopen) { struct hevcc *vcc = sc->vccs[cid]; if (!(vcc->param.flags & ATMIO_FLAG_NOTX)) hatm_tx_vcc_open(sc, cid); if (!(vcc->param.flags & ATMIO_FLAG_NORX)) hatm_rx_vcc_open(sc, cid); if (reopen) return; /* inform management about non-NG and NG-PVCs */ if (!(vcc->param.flags & ATMIO_FLAG_NG) || (vcc->param.flags & ATMIO_FLAG_PVC)) ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), vcc->param.vpi, vcc->param.vci, 1); } /* * VCC has been finally closed. */ void hatm_vcc_closed(struct hatm_softc *sc, u_int cid) { struct hevcc *vcc = sc->vccs[cid]; /* inform management about non-NG and NG-PVCs */ if (!(vcc->param.flags & ATMIO_FLAG_NG) || (vcc->param.flags & ATMIO_FLAG_PVC)) ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), HE_VPI(cid), HE_VCI(cid), 0); sc->open_vccs--; uma_zfree(sc->vcc_zone, vcc); sc->vccs[cid] = NULL; } /* * Try to close the given VCC */ static int hatm_close_vcc(struct hatm_softc *sc, struct atmio_closevcc *arg) { u_int cid; struct hevcc *vcc; int error = 0; DBG(sc, VCC, ("Close VCC: %u.%u", arg->vpi, arg->vci)); if((arg->vpi & ~HE_VPI_MASK) || (arg->vci & ~HE_VCI_MASK) || (arg->vci == 0)) return (EINVAL); cid = HE_CID(arg->vpi, arg->vci); mtx_lock(&sc->mtx); vcc = sc->vccs[cid]; if (!(sc->ifp->if_flags & IFF_RUNNING)) { error = EIO; goto done; } if (vcc == NULL || !(vcc->vflags & HE_VCC_OPEN)) { error = ENOENT; goto done; } if (vcc->vflags & HE_VCC_TX_OPEN) hatm_tx_vcc_close(sc, cid); if (vcc->vflags & HE_VCC_RX_OPEN) hatm_rx_vcc_close(sc, cid); if (vcc->param.flags & ATMIO_FLAG_ASYNC) goto done; while ((sc->ifp->if_flags & IFF_RUNNING) && (vcc->vflags & (HE_VCC_TX_CLOSING | HE_VCC_RX_CLOSING))) cv_wait(&sc->vcc_cv, &sc->mtx); if (!(sc->ifp->if_flags & IFF_RUNNING)) { error = EIO; goto done; } if (!(vcc->vflags & ATMIO_FLAG_NOTX)) hatm_tx_vcc_closed(sc, cid); hatm_vcc_closed(sc, cid); done: mtx_unlock(&sc->mtx); return (error); } /* * IOCTL handler */ int hatm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ifreq *ifr = (struct ifreq *)data; struct ifaddr *ifa = (struct ifaddr *)data; - struct hatm_softc *sc = (struct hatm_softc *)ifp->if_softc; + struct hatm_softc *sc = ifp->if_softc; struct atmio_vcctable *vtab; int error = 0; switch (cmd) { case SIOCSIFADDR: mtx_lock(&sc->mtx); ifp->if_flags |= IFF_UP; if (!(ifp->if_flags & IFF_RUNNING)) hatm_initialize(sc); switch (ifa->ifa_addr->sa_family) { #ifdef INET case AF_INET: case AF_INET6: ifa->ifa_rtrequest = atm_rtrequest; break; #endif default: break; } mtx_unlock(&sc->mtx); break; case SIOCSIFFLAGS: mtx_lock(&sc->mtx); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_flags & IFF_RUNNING)) { hatm_initialize(sc); } } else { if (ifp->if_flags & IFF_RUNNING) { hatm_stop(sc); } } mtx_unlock(&sc->mtx); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); break; case SIOCSIFMTU: /* * Set the interface MTU. */ if (ifr->ifr_mtu > ATMMTU) error = EINVAL; else ifp->if_mtu = ifr->ifr_mtu; break; case SIOCATMGVCCS: /* return vcc table */ vtab = atm_getvccs((struct atmio_vcc **)sc->vccs, HE_MAX_VCCS, sc->open_vccs, &sc->mtx, 1); error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) + vtab->count * sizeof(vtab->vccs[0])); free(vtab, M_DEVBUF); break; case SIOCATMGETVCCS: /* netgraph internal use */ vtab = atm_getvccs((struct atmio_vcc **)sc->vccs, HE_MAX_VCCS, sc->open_vccs, &sc->mtx, 0); if (vtab == NULL) { error = ENOMEM; break; } *(void **)data = vtab; break; case SIOCATMOPENVCC: /* kernel internal use */ error = hatm_open_vcc(sc, (struct atmio_openvcc *)data); break; case SIOCATMCLOSEVCC: /* kernel internal use */ error = hatm_close_vcc(sc, (struct atmio_closevcc *)data); break; default: DBG(sc, IOCTL, ("cmd=%08lx arg=%p", cmd, data)); error = EINVAL; break; } return (error); } static int hatm_sysctl_natm_traffic(SYSCTL_HANDLER_ARGS) { int error; int tmp; tmp = hatm_natm_traffic; error = sysctl_handle_int(oidp, &tmp, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (tmp != ATMIO_TRAFFIC_UBR && tmp != ATMIO_TRAFFIC_CBR) return (EINVAL); hatm_natm_traffic = tmp; return (0); } Index: head/sys/dev/hatm/if_hatm_tx.c =================================================================== --- head/sys/dev/hatm/if_hatm_tx.c (revision 147720) +++ head/sys/dev/hatm/if_hatm_tx.c (revision 147721) @@ -1,826 +1,826 @@ /*- * Copyright (c) 2001-2003 * Fraunhofer Institute for Open Communication Systems (FhG Fokus). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Author: Hartmut Brandt * * ForeHE driver. * * Transmission. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_natm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ENABLE_BPF #include #endif #include #include #include #include #include #include #include #include #include #include #include #include /* * These macros are used to trace the flow of transmit mbufs and to * detect transmit mbuf leaks in the driver. */ #ifdef HATM_DEBUG #define hatm_free_txmbuf(SC) \ do { \ if (--sc->txmbuf < 0) \ DBG(sc, TX, ("txmbuf below 0!")); \ else if (sc->txmbuf == 0) \ DBG(sc, TX, ("txmbuf now 0")); \ } while (0) #define hatm_get_txmbuf(SC) \ do { \ if (++sc->txmbuf > 20000) \ DBG(sc, TX, ("txmbuf %u", sc->txmbuf)); \ else if (sc->txmbuf == 1) \ DBG(sc, TX, ("txmbuf leaves 0")); \ } while (0) #else #define hatm_free_txmbuf(SC) do { } while (0) #define hatm_get_txmbuf(SC) do { } while (0) #endif /* * Allocate a new TPD, zero the TPD part. Cannot return NULL if * flag is 0. The TPD is removed from the free list and its used * bit is set. */ static struct tpd * hatm_alloc_tpd(struct hatm_softc *sc, u_int flags) { struct tpd *t; /* if we allocate a transmit TPD check for the reserve */ if (flags & M_NOWAIT) { if (sc->tpd_nfree <= HE_CONFIG_TPD_RESERVE) return (NULL); } else { if (sc->tpd_nfree == 0) return (NULL); } /* make it beeing used */ t = SLIST_FIRST(&sc->tpd_free); KASSERT(t != NULL, ("tpd botch")); SLIST_REMOVE_HEAD(&sc->tpd_free, link); TPD_SET_USED(sc, t->no); sc->tpd_nfree--; /* initialize */ t->mbuf = NULL; t->cid = 0; bzero(&t->tpd, sizeof(t->tpd)); t->tpd.addr = t->no << HE_REGS_TPD_ADDR; return (t); } /* * Free a TPD. If the mbuf pointer in that TPD is not zero, it is assumed, that * the DMA map of this TPD was used to load this mbuf. The map is unloaded * and the mbuf is freed. The TPD is put back onto the free list and * its used bit is cleared. */ static void hatm_free_tpd(struct hatm_softc *sc, struct tpd *tpd) { if (tpd->mbuf != NULL) { bus_dmamap_unload(sc->tx_tag, tpd->map); hatm_free_txmbuf(sc); m_freem(tpd->mbuf); tpd->mbuf = NULL; } /* insert TPD into free list */ SLIST_INSERT_HEAD(&sc->tpd_free, tpd, link); TPD_CLR_USED(sc, tpd->no); sc->tpd_nfree++; } /* * Queue a number of TPD. If there is not enough space none of the TPDs * is queued and an error code is returned. */ static int hatm_queue_tpds(struct hatm_softc *sc, u_int count, struct tpd **list, u_int cid) { u_int space; u_int i; if (count >= sc->tpdrq.size) { sc->istats.tdprq_full++; return (EBUSY); } if (sc->tpdrq.tail < sc->tpdrq.head) space = sc->tpdrq.head - sc->tpdrq.tail; else space = sc->tpdrq.head - sc->tpdrq.tail + sc->tpdrq.size; if (space <= count) { sc->tpdrq.head = (READ4(sc, HE_REGO_TPDRQ_H) >> HE_REGS_TPDRQ_H_H) & (sc->tpdrq.size - 1); if (sc->tpdrq.tail < sc->tpdrq.head) space = sc->tpdrq.head - sc->tpdrq.tail; else space = sc->tpdrq.head - sc->tpdrq.tail + sc->tpdrq.size; if (space <= count) { if_printf(sc->ifp, "TPDRQ full\n"); sc->istats.tdprq_full++; return (EBUSY); } } /* we are going to write to the TPD queue space */ bus_dmamap_sync(sc->tpdrq.mem.tag, sc->tpdrq.mem.map, BUS_DMASYNC_PREWRITE); /* put the entries into the TPD space */ for (i = 0; i < count; i++) { /* we are going to 'write' the TPD to the device */ bus_dmamap_sync(sc->tpds.tag, sc->tpds.map, BUS_DMASYNC_PREWRITE); sc->tpdrq.tpdrq[sc->tpdrq.tail].tpd = sc->tpds.paddr + HE_TPD_SIZE * list[i]->no; sc->tpdrq.tpdrq[sc->tpdrq.tail].cid = cid; if (++sc->tpdrq.tail == sc->tpdrq.size) sc->tpdrq.tail = 0; } /* update tail pointer */ WRITE4(sc, HE_REGO_TPDRQ_T, (sc->tpdrq.tail << HE_REGS_TPDRQ_T_T)); return (0); } /* * Helper struct for communication with the DMA load helper. */ struct load_txbuf_arg { struct hatm_softc *sc; struct tpd *first; struct mbuf *mbuf; struct hevcc *vcc; int error; u_int pti; u_int vpi, vci; }; /* * Loader callback for the mbuf. This function allocates the TPDs and * fills them. It puts the dmamap and and the mbuf pointer into the last * TPD and then tries to queue all the TPDs. If anything fails, all TPDs * allocated by this function are freed and the error flag is set in the * argument structure. The first TPD must then be freed by the caller. */ static void hatm_load_txbuf(void *uarg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize, int error) { struct load_txbuf_arg *arg = uarg; u_int tpds_needed, i, n, tpd_cnt; int need_intr; struct tpd *tpd; struct tpd *tpd_list[HE_CONFIG_MAX_TPD_PER_PACKET]; if (error != 0) { DBG(arg->sc, DMA, ("%s -- error=%d plen=%d\n", __func__, error, arg->mbuf->m_pkthdr.len)); return; } /* ensure, we have enough TPDs (remember, we already have one) */ tpds_needed = (nseg + 2) / 3; if (HE_CONFIG_TPD_RESERVE + tpds_needed - 1 > arg->sc->tpd_nfree) { if_printf(arg->sc->ifp, "%s -- out of TPDs (need %d, " "have %u)\n", __func__, tpds_needed - 1, arg->sc->tpd_nfree + 1); arg->error = 1; return; } /* * Check for the maximum number of TPDs on the connection. */ need_intr = 0; if (arg->sc->max_tpd > 0) { if (arg->vcc->ntpds + tpds_needed > arg->sc->max_tpd) { arg->sc->istats.flow_closed++; arg->vcc->vflags |= HE_VCC_FLOW_CTRL; ATMEV_SEND_FLOW_CONTROL(IFP2IFATM(arg->sc->ifp), arg->vpi, arg->vci, 1); arg->error = 1; return; } if (arg->vcc->ntpds + tpds_needed > (9 * arg->sc->max_tpd) / 10) need_intr = 1; } tpd = arg->first; tpd_cnt = 0; tpd_list[tpd_cnt++] = tpd; for (i = n = 0; i < nseg; i++, n++) { if (n == 3) { if ((tpd = hatm_alloc_tpd(arg->sc, M_NOWAIT)) == NULL) /* may not fail (see check above) */ panic("%s: out of TPDs", __func__); tpd->cid = arg->first->cid; tpd->tpd.addr |= arg->pti; tpd_list[tpd_cnt++] = tpd; n = 0; } KASSERT(segs[i].ds_addr <= 0xffffffffLU, ("phys addr too large %lx", (u_long)segs[i].ds_addr)); DBG(arg->sc, DMA, ("DMA loaded: %lx/%lu", (u_long)segs[i].ds_addr, (u_long)segs[i].ds_len)); tpd->tpd.bufs[n].addr = segs[i].ds_addr; tpd->tpd.bufs[n].len = segs[i].ds_len; DBG(arg->sc, TX, ("seg[%u]=tpd[%u,%u]=%x/%u", i, tpd_cnt, n, tpd->tpd.bufs[n].addr, tpd->tpd.bufs[n].len)); if (i == nseg - 1) tpd->tpd.bufs[n].len |= HE_REGM_TPD_LST; } /* * Swap the MAP in the first and the last TPD and set the mbuf * pointer into the last TPD. We use the map in the last TPD, because * the map must stay valid until the last TPD is processed by the card. */ if (tpd_cnt > 1) { bus_dmamap_t tmp; tmp = arg->first->map; arg->first->map = tpd_list[tpd_cnt - 1]->map; tpd_list[tpd_cnt - 1]->map = tmp; } tpd_list[tpd_cnt - 1]->mbuf = arg->mbuf; if (need_intr) tpd_list[tpd_cnt - 1]->tpd.addr |= HE_REGM_TPD_INTR; /* queue the TPDs */ if (hatm_queue_tpds(arg->sc, tpd_cnt, tpd_list, arg->first->cid)) { /* free all, except the first TPD */ for (i = 1; i < tpd_cnt; i++) hatm_free_tpd(arg->sc, tpd_list[i]); arg->error = 1; return; } arg->vcc->ntpds += tpd_cnt; } /* * Start output on the interface */ void hatm_start(struct ifnet *ifp) { - struct hatm_softc *sc = (struct hatm_softc *)ifp->if_softc; + struct hatm_softc *sc = ifp->if_softc; struct mbuf *m; struct atm_pseudohdr *aph; u_int cid; struct tpd *tpd; struct load_txbuf_arg arg; u_int len; int error; if (!(ifp->if_flags & IFF_RUNNING)) return; mtx_lock(&sc->mtx); arg.sc = sc; while (1) { IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; hatm_get_txmbuf(sc); if (m->m_len < sizeof(*aph)) if ((m = m_pullup(m, sizeof(*aph))) == NULL) { hatm_free_txmbuf(sc); continue; } aph = mtod(m, struct atm_pseudohdr *); arg.vci = ATM_PH_VCI(aph); arg.vpi = ATM_PH_VPI(aph); m_adj(m, sizeof(*aph)); if ((len = m->m_pkthdr.len) == 0) { hatm_free_txmbuf(sc); m_freem(m); continue; } if ((arg.vpi & ~HE_VPI_MASK) || (arg.vci & ~HE_VCI_MASK) || (arg.vci == 0)) { hatm_free_txmbuf(sc); m_freem(m); continue; } cid = HE_CID(arg.vpi, arg.vci); arg.vcc = sc->vccs[cid]; if (arg.vcc == NULL || !(arg.vcc->vflags & HE_VCC_OPEN)) { hatm_free_txmbuf(sc); m_freem(m); continue; } if (arg.vcc->vflags & HE_VCC_FLOW_CTRL) { hatm_free_txmbuf(sc); m_freem(m); sc->istats.flow_drop++; continue; } arg.pti = 0; if (arg.vcc->param.aal == ATMIO_AAL_RAW) { if (len < 52) { /* too short */ hatm_free_txmbuf(sc); m_freem(m); continue; } /* * Get the header and ignore except * payload type and CLP. */ if (m->m_len < 4 && (m = m_pullup(m, 4)) == NULL) { hatm_free_txmbuf(sc); continue; } arg.pti = mtod(m, u_char *)[3] & 0xf; arg.pti = ((arg.pti & 0xe) << 2) | ((arg.pti & 1) << 1); m_adj(m, 4); len -= 4; if (len % 48 != 0) { m_adj(m, -((int)(len % 48))); len -= len % 48; } } #ifdef ENABLE_BPF if (!(arg.vcc->param.flags & ATMIO_FLAG_NG) && (arg.vcc->param.aal == ATMIO_AAL_5) && (arg.vcc->param.flags & ATM_PH_LLCSNAP)) BPF_MTAP(ifp, m); #endif /* Now load a DMA map with the packet. Allocate the first * TPD to get a map. Additional TPDs may be allocated by the * callback. */ if ((tpd = hatm_alloc_tpd(sc, M_NOWAIT)) == NULL) { hatm_free_txmbuf(sc); m_freem(m); sc->ifp->if_oerrors++; continue; } tpd->cid = cid; tpd->tpd.addr |= arg.pti; arg.first = tpd; arg.error = 0; arg.mbuf = m; error = bus_dmamap_load_mbuf(sc->tx_tag, tpd->map, m, hatm_load_txbuf, &arg, BUS_DMA_NOWAIT); if (error == EFBIG) { /* try to defragment the packet */ sc->istats.defrag++; m = m_defrag(m, M_DONTWAIT); if (m == NULL) { tpd->mbuf = NULL; hatm_free_txmbuf(sc); hatm_free_tpd(sc, tpd); sc->ifp->if_oerrors++; continue; } arg.mbuf = m; error = bus_dmamap_load_mbuf(sc->tx_tag, tpd->map, m, hatm_load_txbuf, &arg, BUS_DMA_NOWAIT); } if (error != 0) { if_printf(sc->ifp, "mbuf loaded error=%d\n", error); hatm_free_tpd(sc, tpd); sc->ifp->if_oerrors++; continue; } if (arg.error) { hatm_free_tpd(sc, tpd); sc->ifp->if_oerrors++; continue; } arg.vcc->opackets++; arg.vcc->obytes += len; sc->ifp->if_opackets++; } mtx_unlock(&sc->mtx); } void hatm_tx_complete(struct hatm_softc *sc, struct tpd *tpd, uint32_t flags) { struct hevcc *vcc = sc->vccs[tpd->cid]; DBG(sc, TX, ("tx_complete cid=%#x flags=%#x", tpd->cid, flags)); if (vcc == NULL) return; if ((flags & HE_REGM_TBRQ_EOS) && (vcc->vflags & HE_VCC_TX_CLOSING)) { vcc->vflags &= ~HE_VCC_TX_CLOSING; if (vcc->param.flags & ATMIO_FLAG_ASYNC) { hatm_tx_vcc_closed(sc, tpd->cid); if (!(vcc->vflags & HE_VCC_OPEN)) { hatm_vcc_closed(sc, tpd->cid); vcc = NULL; } } else cv_signal(&sc->vcc_cv); } hatm_free_tpd(sc, tpd); if (vcc == NULL) return; vcc->ntpds--; if ((vcc->vflags & HE_VCC_FLOW_CTRL) && vcc->ntpds <= HE_CONFIG_TPD_FLOW_ENB) { vcc->vflags &= ~HE_VCC_FLOW_CTRL; ATMEV_SEND_FLOW_CONTROL(IFP2IFATM(sc->ifp), HE_VPI(tpd->cid), HE_VCI(tpd->cid), 0); } } /* * Convert CPS to Rate for a rate group */ static u_int cps_to_rate(struct hatm_softc *sc, uint32_t cps) { u_int clk = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK; u_int period, rate; /* how many double ticks between two cells */ period = (clk + 2 * cps - 1) / (2 * cps); rate = hatm_cps2atmf(period); if (hatm_atmf2cps(rate) < period) rate++; return (rate); } /* * Check whether the VCC is really closed on the hardware and available for * open. Check that we have enough resources. If this function returns ok, * a later actual open must succeed. Assume, that we are locked between this * function and the next one, so that nothing does change. For CBR this * assigns the rate group and set the rate group's parameter. */ int hatm_tx_vcc_can_open(struct hatm_softc *sc, u_int cid, struct hevcc *vcc) { uint32_t v, line_rate; u_int rc, idx, free_idx; struct atmio_tparam *t = &vcc->param.tparam; /* verify that connection is closed */ #if 0 v = READ_TSR(sc, cid, 4); if(!(v & HE_REGM_TSR4_SESS_END)) { if_printf(sc->ifp, "cid=%#x not closed (TSR4)\n", cid); return (EBUSY); } #endif v = READ_TSR(sc, cid, 0); if((v & HE_REGM_TSR0_CONN_STATE) != 0) { if_printf(sc->ifp, "cid=%#x not closed (TSR0=%#x)\n", cid, v); return (EBUSY); } /* check traffic parameters */ line_rate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M; switch (vcc->param.traffic) { case ATMIO_TRAFFIC_UBR: if (t->pcr == 0 || t->pcr > line_rate) t->pcr = line_rate; if (t->mcr != 0 || t->icr != 0 || t->tbe != 0 || t->nrm != 0 || t->trm != 0 || t->adtf != 0 || t->rif != 0 || t->rdf != 0 || t->cdf != 0) return (EINVAL); break; case ATMIO_TRAFFIC_CBR: /* * Compute rate group index */ if (t->pcr < 10) t->pcr = 10; if (sc->cbr_bw + t->pcr > line_rate) return (EINVAL); if (t->mcr != 0 || t->icr != 0 || t->tbe != 0 || t->nrm != 0 || t->trm != 0 || t->adtf != 0 || t->rif != 0 || t->rdf != 0 || t->cdf != 0) return (EINVAL); rc = cps_to_rate(sc, t->pcr); free_idx = HE_REGN_CS_STPER; for (idx = 0; idx < HE_REGN_CS_STPER; idx++) { if (sc->rate_ctrl[idx].refcnt == 0) { if (free_idx == HE_REGN_CS_STPER) free_idx = idx; } else { if (sc->rate_ctrl[idx].rate == rc) break; } } if (idx == HE_REGN_CS_STPER) { if ((idx = free_idx) == HE_REGN_CS_STPER) return (EBUSY); sc->rate_ctrl[idx].rate = rc; } vcc->rc = idx; /* commit */ sc->rate_ctrl[idx].refcnt++; sc->cbr_bw += t->pcr; break; case ATMIO_TRAFFIC_ABR: if (t->pcr > line_rate) t->pcr = line_rate; if (t->mcr > line_rate) t->mcr = line_rate; if (t->icr > line_rate) t->icr = line_rate; if (t->tbe == 0 || t->tbe >= 1 << 24 || t->nrm > 7 || t->trm > 7 || t->adtf >= 1 << 10 || t->rif > 15 || t->rdf > 15 || t->cdf > 7) return (EINVAL); break; default: return (EINVAL); } return (0); } #define NRM_CODE2VAL(CODE) (2 * (1 << (CODE))) /* * Actually open the transmit VCC */ void hatm_tx_vcc_open(struct hatm_softc *sc, u_int cid) { struct hevcc *vcc = sc->vccs[cid]; uint32_t tsr0, tsr4, atmf, crm; const struct atmio_tparam *t = &vcc->param.tparam; if (vcc->param.aal == ATMIO_AAL_5) { tsr0 = HE_REGM_TSR0_AAL_5 << HE_REGS_TSR0_AAL; tsr4 = HE_REGM_TSR4_AAL_5 << HE_REGS_TSR4_AAL; } else { tsr0 = HE_REGM_TSR0_AAL_0 << HE_REGS_TSR0_AAL; tsr4 = HE_REGM_TSR4_AAL_0 << HE_REGS_TSR4_AAL; } tsr4 |= 1; switch (vcc->param.traffic) { case ATMIO_TRAFFIC_UBR: atmf = hatm_cps2atmf(t->pcr); tsr0 |= HE_REGM_TSR0_TRAFFIC_UBR << HE_REGS_TSR0_TRAFFIC; tsr0 |= HE_REGM_TSR0_USE_WMIN | HE_REGM_TSR0_UPDATE_GER; WRITE_TSR(sc, cid, 0, 0xf, tsr0); WRITE_TSR(sc, cid, 4, 0xf, tsr4); WRITE_TSR(sc, cid, 1, 0xf, (atmf << HE_REGS_TSR1_PCR)); WRITE_TSR(sc, cid, 2, 0xf, (atmf << HE_REGS_TSR2_ACR)); WRITE_TSR(sc, cid, 9, 0xf, HE_REGM_TSR9_INIT); WRITE_TSR(sc, cid, 3, 0xf, 0); WRITE_TSR(sc, cid, 5, 0xf, 0); WRITE_TSR(sc, cid, 6, 0xf, 0); WRITE_TSR(sc, cid, 7, 0xf, 0); WRITE_TSR(sc, cid, 8, 0xf, 0); WRITE_TSR(sc, cid, 10, 0xf, 0); WRITE_TSR(sc, cid, 11, 0xf, 0); WRITE_TSR(sc, cid, 12, 0xf, 0); WRITE_TSR(sc, cid, 13, 0xf, 0); WRITE_TSR(sc, cid, 14, 0xf, 0); break; case ATMIO_TRAFFIC_CBR: atmf = hatm_cps2atmf(t->pcr); if (sc->rate_ctrl[vcc->rc].refcnt == 1) WRITE_MBOX4(sc, HE_REGO_CS_STPER(vcc->rc), sc->rate_ctrl[vcc->rc].rate); tsr0 |= HE_REGM_TSR0_TRAFFIC_CBR << HE_REGS_TSR0_TRAFFIC; tsr0 |= vcc->rc; WRITE_TSR(sc, cid, 1, 0xf, (atmf << HE_REGS_TSR1_PCR)); WRITE_TSR(sc, cid, 2, 0xf, (atmf << HE_REGS_TSR2_ACR)); WRITE_TSR(sc, cid, 3, 0xf, 0); WRITE_TSR(sc, cid, 5, 0xf, 0); WRITE_TSR(sc, cid, 6, 0xf, 0); WRITE_TSR(sc, cid, 7, 0xf, 0); WRITE_TSR(sc, cid, 8, 0xf, 0); WRITE_TSR(sc, cid, 10, 0xf, 0); WRITE_TSR(sc, cid, 11, 0xf, 0); WRITE_TSR(sc, cid, 12, 0xf, 0); WRITE_TSR(sc, cid, 13, 0xf, 0); WRITE_TSR(sc, cid, 14, 0xf, 0); WRITE_TSR(sc, cid, 4, 0xf, tsr4); WRITE_TSR(sc, cid, 9, 0xf, HE_REGM_TSR9_INIT); WRITE_TSR(sc, cid, 0, 0xf, tsr0); break; case ATMIO_TRAFFIC_ABR: if ((crm = t->tbe / NRM_CODE2VAL(t->nrm)) > 0xffff) crm = 0xffff; tsr0 |= HE_REGM_TSR0_TRAFFIC_ABR << HE_REGS_TSR0_TRAFFIC; tsr0 |= HE_REGM_TSR0_USE_WMIN | HE_REGM_TSR0_UPDATE_GER; WRITE_TSR(sc, cid, 0, 0xf, tsr0); WRITE_TSR(sc, cid, 4, 0xf, tsr4); WRITE_TSR(sc, cid, 1, 0xf, ((hatm_cps2atmf(t->pcr) << HE_REGS_TSR1_PCR) | (hatm_cps2atmf(t->mcr) << HE_REGS_TSR1_MCR))); WRITE_TSR(sc, cid, 2, 0xf, (hatm_cps2atmf(t->icr) << HE_REGS_TSR2_ACR)); WRITE_TSR(sc, cid, 3, 0xf, ((NRM_CODE2VAL(t->nrm) - 1) << HE_REGS_TSR3_NRM) | (crm << HE_REGS_TSR3_CRM)); WRITE_TSR(sc, cid, 5, 0xf, 0); WRITE_TSR(sc, cid, 6, 0xf, 0); WRITE_TSR(sc, cid, 7, 0xf, 0); WRITE_TSR(sc, cid, 8, 0xf, 0); WRITE_TSR(sc, cid, 10, 0xf, 0); WRITE_TSR(sc, cid, 12, 0xf, 0); WRITE_TSR(sc, cid, 14, 0xf, 0); WRITE_TSR(sc, cid, 9, 0xf, HE_REGM_TSR9_INIT); WRITE_TSR(sc, cid, 11, 0xf, (hatm_cps2atmf(t->icr) << HE_REGS_TSR11_ICR) | (t->trm << HE_REGS_TSR11_TRM) | (t->nrm << HE_REGS_TSR11_NRM) | (t->adtf << HE_REGS_TSR11_ADTF)); WRITE_TSR(sc, cid, 13, 0xf, (t->rdf << HE_REGS_TSR13_RDF) | (t->rif << HE_REGS_TSR13_RIF) | (t->cdf << HE_REGS_TSR13_CDF) | (crm << HE_REGS_TSR13_CRM)); break; default: return; } vcc->vflags |= HE_VCC_TX_OPEN; } /* * Close the TX side of a VCC. Set the CLOSING flag. */ void hatm_tx_vcc_close(struct hatm_softc *sc, u_int cid) { struct hevcc *vcc = sc->vccs[cid]; struct tpd *tpd_list[1]; u_int i, pcr = 0; WRITE_TSR(sc, cid, 4, 0x8, HE_REGM_TSR4_FLUSH); switch (vcc->param.traffic) { case ATMIO_TRAFFIC_CBR: WRITE_TSR(sc, cid, 14, 0x8, HE_REGM_TSR14_CBR_DELETE); break; case ATMIO_TRAFFIC_ABR: WRITE_TSR(sc, cid, 14, 0x4, HE_REGM_TSR14_ABR_CLOSE); pcr = vcc->param.tparam.pcr; /* FALL THROUGH */ case ATMIO_TRAFFIC_UBR: WRITE_TSR(sc, cid, 1, 0xf, hatm_cps2atmf(HE_CONFIG_FLUSH_RATE) << HE_REGS_TSR1_MCR | hatm_cps2atmf(pcr) << HE_REGS_TSR1_PCR); break; } tpd_list[0] = hatm_alloc_tpd(sc, 0); tpd_list[0]->tpd.addr |= HE_REGM_TPD_EOS | HE_REGM_TPD_INTR; tpd_list[0]->cid = cid; vcc->vflags |= HE_VCC_TX_CLOSING; vcc->vflags &= ~HE_VCC_TX_OPEN; i = 0; while (hatm_queue_tpds(sc, 1, tpd_list, cid) != 0) { if (++i == 1000) panic("TPDRQ permanently full"); DELAY(1000); } } void hatm_tx_vcc_closed(struct hatm_softc *sc, u_int cid) { if (sc->vccs[cid]->param.traffic == ATMIO_TRAFFIC_CBR) { sc->cbr_bw -= sc->vccs[cid]->param.tparam.pcr; sc->rate_ctrl[sc->vccs[cid]->rc].refcnt--; } } Index: head/sys/dev/patm/if_patm_attach.c =================================================================== --- head/sys/dev/patm/if_patm_attach.c (revision 147720) +++ head/sys/dev/patm/if_patm_attach.c (revision 147721) @@ -1,1074 +1,1074 @@ /*- * Copyright (c) 2003 * Fraunhofer Institute for Open Communication Systems (FhG Fokus). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Author: Hartmut Brandt * * Driver for IDT77252 based cards like ProSum's. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_natm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ENABLE_BPF #include #endif #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(patm, utopia, 1, 1, 1); MODULE_DEPEND(patm, pci, 1, 1, 1); MODULE_DEPEND(patm, atm, 1, 1, 1); MODULE_DEPEND(patm, libmbpool, 1, 1, 1); devclass_t patm_devclass; static int patm_probe(device_t dev); static int patm_attach(device_t dev); static int patm_detach(device_t dev); static device_method_t patm_methods[] = { DEVMETHOD(device_probe, patm_probe), DEVMETHOD(device_attach, patm_attach), DEVMETHOD(device_detach, patm_detach), {0,0} }; static driver_t patm_driver = { "patm", patm_methods, sizeof(struct patm_softc), }; DRIVER_MODULE(patm, pci, patm_driver, patm_devclass, NULL, 0); static const struct { u_int devid; const char *desc; } devs[] = { { PCI_DEVICE_IDT77252, "NICStAR (77222/77252) ATM adapter" }, { PCI_DEVICE_IDT77v252, "NICStAR (77v252) ATM adapter" }, { PCI_DEVICE_IDT77v222, "NICStAR (77v222) ATM adapter" }, { 0, NULL } }; SYSCTL_DECL(_hw_atm); static int patm_phy_readregs(struct ifatm *, u_int, uint8_t *, u_int *); static int patm_phy_writereg(struct ifatm *, u_int, u_int, u_int); static const struct utopia_methods patm_utopia_methods = { patm_phy_readregs, patm_phy_writereg }; static void patm_destroy(struct patm_softc *sc); static int patm_sysctl_istats(SYSCTL_HANDLER_ARGS); static int patm_sysctl_eeprom(SYSCTL_HANDLER_ARGS); static void patm_read_eeprom(struct patm_softc *sc); static int patm_sq_init(struct patm_softc *sc); static int patm_rbuf_init(struct patm_softc *sc); static int patm_txmap_init(struct patm_softc *sc); static void patm_env_getuint(struct patm_softc *, u_int *, const char *); #ifdef PATM_DEBUG static int patm_sysctl_regs(SYSCTL_HANDLER_ARGS); static int patm_sysctl_tsq(SYSCTL_HANDLER_ARGS); int patm_dump_vc(u_int unit, u_int vc) __unused; int patm_dump_regs(u_int unit) __unused; int patm_dump_sram(u_int unit, u_int from, u_int words) __unused; #endif /* * Probe for a IDT77252 controller */ static int patm_probe(device_t dev) { u_int i; if (pci_get_vendor(dev) == PCI_VENDOR_IDT) { for (i = 0; devs[i].desc != NULL; i++) if (pci_get_device(dev) == devs[i].devid) { device_set_desc(dev, devs[i].desc); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } /* * Attach */ static int patm_attach(device_t dev) { struct patm_softc *sc; int error; struct ifnet *ifp; int rid; u_int a; static const struct idt_mmap idt_mmap[4] = IDT_MMAP; sc = device_get_softc(dev); sc->dev = dev; #ifdef IATM_DEBUG sc->debug = IATM_DEBUG; #endif ifp = sc->ifp = if_alloc(IFT_ATM); if (ifp == NULL) { return (ENOSPC); } IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_IDTABR25; IFP2IFATM(sc->ifp)->mib.serial = 0; IFP2IFATM(sc->ifp)->mib.hw_version = 0; IFP2IFATM(sc->ifp)->mib.sw_version = 0; IFP2IFATM(sc->ifp)->mib.vpi_bits = PATM_VPI_BITS; IFP2IFATM(sc->ifp)->mib.vci_bits = 0; /* set below */; IFP2IFATM(sc->ifp)->mib.max_vpcs = 0; IFP2IFATM(sc->ifp)->mib.max_vccs = 0; /* set below */ IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN; IFP2IFATM(sc->ifp)->phy = &sc->utopia; ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_SIMPLEX; ifp->if_watchdog = NULL; ifp->if_init = patm_init; ifp->if_ioctl = patm_ioctl; ifp->if_start = patm_start; ifp->if_watchdog = NULL; /* do this early so we can destroy unconditionally */ mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); mtx_init(&sc->tst_lock, "tst lock", NULL, MTX_DEF); cv_init(&sc->vcc_cv, "vcc_close"); callout_init(&sc->tst_callout, CALLOUT_MPSAFE); sysctl_ctx_init(&sc->sysctl_ctx); /* * Get revision */ sc->revision = pci_read_config(dev, PCIR_REVID, 4) & 0xf; /* * Enable PCI bus master and memory */ pci_enable_busmaster(dev); rid = IDT_PCI_REG_MEMBASE; sc->memres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->memres == NULL) { patm_printf(sc, "could not map memory\n"); error = ENXIO; goto fail; } sc->memh = rman_get_bushandle(sc->memres); sc->memt = rman_get_bustag(sc->memres); /* * Allocate the interrupt (enable it later) */ sc->irqid = 0; sc->irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid, RF_SHAREABLE | RF_ACTIVE); if (sc->irqres == 0) { patm_printf(sc, "could not allocate irq\n"); error = ENXIO; goto fail; } /* * Construct the sysctl tree */ error = ENOMEM; if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO, device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL) goto fail; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "istats", CTLFLAG_RD, sc, 0, patm_sysctl_istats, "S", "internal statistics") == NULL) goto fail; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "eeprom", CTLFLAG_RD, sc, 0, patm_sysctl_eeprom, "S", "EEPROM contents") == NULL) goto fail; if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "lbuf_max", CTLFLAG_RD, &sc->lbuf_max, 0, "maximum number of large receive buffers") == NULL) goto fail; patm_env_getuint(sc, &sc->lbuf_max, "lbuf_max"); if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "max_txmaps", CTLFLAG_RW, &sc->tx_maxmaps, 0, "maximum number of TX DMA maps") == NULL) goto fail; patm_env_getuint(sc, &sc->tx_maxmaps, "tx_maxmaps"); #ifdef PATM_DEBUG if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "debug flags") == NULL) goto fail; sc->debug = PATM_DEBUG; patm_env_getuint(sc, &sc->debug, "debug"); if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "regs", CTLFLAG_RD, sc, 0, patm_sysctl_regs, "S", "registers") == NULL) goto fail; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "tsq", CTLFLAG_RD, sc, 0, patm_sysctl_tsq, "S", "TSQ") == NULL) goto fail; #endif patm_reset(sc); /* * Detect and attach the phy. */ patm_debug(sc, ATTACH, "attaching utopia"); IFP2IFATM(sc->ifp)->phy = &sc->utopia; utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->mtx, &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), &patm_utopia_methods); /* * Start the PHY because we need the autodetection */ patm_debug(sc, ATTACH, "starting utopia"); mtx_lock(&sc->mtx); utopia_start(&sc->utopia); utopia_reset(&sc->utopia); mtx_unlock(&sc->mtx); /* Read EEPROM */ patm_read_eeprom(sc); /* analyze it */ if (strncmp(sc->eeprom + PATM_PROATM_NAME_OFFSET, PATM_PROATM_NAME, strlen(PATM_PROATM_NAME)) == 0) { if (sc->utopia.chip->type == UTP_TYPE_IDT77105) { IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_PROATM25; IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_25_6M; IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_25; sc->flags |= PATM_25M; patm_printf(sc, "ProATM 25 interface; "); } else { /* cannot really know which media */ IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_PROATM155; IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M; IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155; patm_printf(sc, "ProATM 155 interface; "); } bcopy(sc->eeprom + PATM_PROATM_MAC_OFFSET, IFP2IFATM(sc->ifp)->mib.esi, sizeof(IFP2IFATM(sc->ifp)->mib.esi)); } else { if (sc->utopia.chip->type == UTP_TYPE_IDT77105) { IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_IDTABR25; IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_25_6M; IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_25; sc->flags |= PATM_25M; patm_printf(sc, "IDT77252 25MBit interface; "); } else { /* cannot really know which media */ IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_IDTABR155; IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M; IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155; patm_printf(sc, "IDT77252 155MBit interface; "); } bcopy(sc->eeprom + PATM_IDT_MAC_OFFSET, IFP2IFATM(sc->ifp)->mib.esi, sizeof(IFP2IFATM(sc->ifp)->mib.esi)); } printf("idt77252 Rev. %c; %s PHY\n", 'A' + sc->revision, sc->utopia.chip->name); utopia_reset_media(&sc->utopia); utopia_init_media(&sc->utopia); /* * Determine RAM size */ for (a = 0; a < 0x20000; a++) patm_sram_write(sc, a, 0); patm_sram_write(sc, 0, 0xdeadbeef); if (patm_sram_read(sc, 0x4004) == 0xdeadbeef) sc->mmap = &idt_mmap[0]; else if (patm_sram_read(sc, 0x8000) == 0xdeadbeef) sc->mmap = &idt_mmap[1]; else if (patm_sram_read(sc, 0x20000) == 0xdeadbeef) sc->mmap = &idt_mmap[2]; else sc->mmap = &idt_mmap[3]; IFP2IFATM(sc->ifp)->mib.vci_bits = sc->mmap->vcbits - IFP2IFATM(sc->ifp)->mib.vpi_bits; IFP2IFATM(sc->ifp)->mib.max_vccs = sc->mmap->max_conn; patm_sram_write(sc, 0, 0); patm_printf(sc, "%uK x 32 SRAM; %u connections\n", sc->mmap->sram, sc->mmap->max_conn); /* initialize status queues */ error = patm_sq_init(sc); if (error != 0) goto fail; /* get TST */ sc->tst_soft = malloc(sizeof(uint32_t) * sc->mmap->tst_size, M_DEVBUF, M_WAITOK); /* allocate all the receive buffer stuff */ error = patm_rbuf_init(sc); if (error != 0) goto fail; /* * Allocate SCD tag * * Don't use BUS_DMA_ALLOCNOW, because we never need bouncing with * bus_dmamem_alloc() */ error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct patm_scd), 1, sizeof(struct patm_scd), 0, NULL, NULL, &sc->scd_tag); if (error) { patm_printf(sc, "SCD DMA tag create %d\n", error); goto fail; } LIST_INIT(&sc->scd_list); /* allocate VCC zone and pointers */ if ((sc->vcc_zone = uma_zcreate("PATM vccs", sizeof(struct patm_vcc), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0)) == NULL) { patm_printf(sc, "cannot allocate zone for vccs\n"); goto fail; } sc->vccs = malloc(sizeof(sc->vccs[0]) * sc->mmap->max_conn, M_DEVBUF, M_WAITOK | M_ZERO); /* allocate transmission resources */ error = patm_txmap_init(sc); if (error != 0) goto fail; /* poll while we are not running */ sc->utopia.flags |= UTP_FL_POLL_CARRIER; patm_debug(sc, ATTACH, "attaching interface"); atm_ifattach(ifp); #ifdef ENABLE_BPF bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc)); #endif patm_debug(sc, ATTACH, "attaching interrupt handler"); error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET, patm_intr, sc, &sc->ih); if (error != 0) { patm_printf(sc, "could not setup interrupt\n"); atm_ifdetach(sc->ifp); if_free(sc->ifp); goto fail; } return (0); fail: patm_destroy(sc); return (error); } /* * Detach */ static int patm_detach(device_t dev) { struct patm_softc *sc; - sc = (struct patm_softc *)device_get_softc(dev); + sc = device_get_softc(dev); mtx_lock(&sc->mtx); patm_stop(sc); if (sc->utopia.state & UTP_ST_ATTACHED) { patm_debug(sc, ATTACH, "detaching utopia"); utopia_stop(&sc->utopia); utopia_detach(&sc->utopia); } mtx_unlock(&sc->mtx); atm_ifdetach(sc->ifp); if_free(sc->ifp); patm_destroy(sc); return (0); } /* * Destroy everything. Assume we are stopped. */ static void patm_destroy(struct patm_softc *sc) { u_int i; struct patm_txmap *map; if (sc->ih != NULL) bus_teardown_intr(sc->dev, sc->irqres, sc->ih); if (sc->tx_mapzone != NULL) { /* all maps must be free */ while ((map = SLIST_FIRST(&sc->tx_maps_free)) != NULL) { bus_dmamap_destroy(sc->tx_tag, map->map); SLIST_REMOVE_HEAD(&sc->tx_maps_free, link); uma_zfree(sc->tx_mapzone, map); } uma_zdestroy(sc->tx_mapzone); } if (sc->scd_tag != NULL) bus_dma_tag_destroy(sc->scd_tag); if (sc->tx_tag != NULL) bus_dma_tag_destroy(sc->scd_tag); if (sc->vccs != NULL) { for (i = 0; i < sc->mmap->max_conn; i++) if (sc->vccs[i] != NULL) uma_zfree(sc->vcc_zone, sc->vccs[i]); free(sc->vccs, M_DEVBUF); } if (sc->vcc_zone != NULL) uma_zdestroy(sc->vcc_zone); if (sc->lbufs != NULL) { for (i = 0; i < sc->lbuf_max; i++) bus_dmamap_destroy(sc->lbuf_tag, sc->lbufs[i].map); free(sc->lbufs, M_DEVBUF); } if (sc->lbuf_tag != NULL) bus_dma_tag_destroy(sc->lbuf_tag); if (sc->sbuf_pool != NULL) mbp_destroy(sc->sbuf_pool); if (sc->vbuf_pool != NULL) mbp_destroy(sc->vbuf_pool); if (sc->sbuf_tag != NULL) bus_dma_tag_destroy(sc->sbuf_tag); if (sc->tst_soft != NULL) free(sc->tst_soft, M_DEVBUF); /* * Free all status queue memory resources */ if (sc->tsq != NULL) { bus_dmamap_unload(sc->sq_tag, sc->sq_map); bus_dmamem_free(sc->sq_tag, sc->tsq, sc->sq_map); bus_dma_tag_destroy(sc->sq_tag); } if (sc->irqres != NULL) bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irqid, sc->irqres); if (sc->memres != NULL) bus_release_resource(sc->dev, SYS_RES_MEMORY, IDT_PCI_REG_MEMBASE, sc->memres); /* this was initialize unconditionally */ sysctl_ctx_free(&sc->sysctl_ctx); cv_destroy(&sc->vcc_cv); mtx_destroy(&sc->tst_lock); mtx_destroy(&sc->mtx); } /* * Try to find a variable in the environment and parse it as an unsigned * integer. */ static void patm_env_getuint(struct patm_softc *sc, u_int *var, const char *name) { char full[IFNAMSIZ + 3 + 20]; char *val, *end; u_long u; snprintf(full, sizeof(full), "hw.%s.%s", device_get_nameunit(sc->dev), name); if ((val = getenv(full)) != NULL) { u = strtoul(val, &end, 0); if (end > val && *end == '\0') { if (bootverbose) patm_printf(sc, "%s=%lu\n", full, u); *var = u; } freeenv(val); } } /* * Sysctl handler for internal statistics * * LOCK: unlocked, needed */ static int patm_sysctl_istats(SYSCTL_HANDLER_ARGS) { struct patm_softc *sc = arg1; uint32_t *ret; int error; ret = malloc(sizeof(sc->stats), M_TEMP, M_WAITOK); mtx_lock(&sc->mtx); bcopy(&sc->stats, ret, sizeof(sc->stats)); mtx_unlock(&sc->mtx); error = SYSCTL_OUT(req, ret, sizeof(sc->stats)); free(ret, M_TEMP); return (error); } /* * Sysctl handler for EEPROM * * LOCK: unlocked, needed */ static int patm_sysctl_eeprom(SYSCTL_HANDLER_ARGS) { struct patm_softc *sc = arg1; void *ret; int error; ret = malloc(sizeof(sc->eeprom), M_TEMP, M_WAITOK); mtx_lock(&sc->mtx); bcopy(sc->eeprom, ret, sizeof(sc->eeprom)); mtx_unlock(&sc->mtx); error = SYSCTL_OUT(req, ret, sizeof(sc->eeprom)); free(ret, M_TEMP); return (error); } /* * Read the EEPROM. We assume that this is a XIRCOM 25020 */ static void patm_read_eeprom(struct patm_softc *sc) { u_int gp; uint8_t byte; int i, addr; static const uint32_t tab[] = { /* CS transition to reset the chip */ IDT_GP_EECS | IDT_GP_EESCLK, 0, /* read command 0x03 */ IDT_GP_EESCLK, 0, IDT_GP_EESCLK, 0, IDT_GP_EESCLK, 0, IDT_GP_EESCLK, 0, IDT_GP_EESCLK, 0, IDT_GP_EESCLK, IDT_GP_EEDO, IDT_GP_EESCLK | IDT_GP_EEDO, IDT_GP_EEDO, IDT_GP_EESCLK | IDT_GP_EEDO, 0, /* address 0x00 */ IDT_GP_EESCLK, 0, IDT_GP_EESCLK, 0, IDT_GP_EESCLK, 0, IDT_GP_EESCLK, 0, IDT_GP_EESCLK, 0, IDT_GP_EESCLK, 0, IDT_GP_EESCLK, 0, IDT_GP_EESCLK, 0, }; /* go to a known state (chip enabled) */ gp = patm_nor_read(sc, IDT_NOR_GP); gp &= ~(IDT_GP_EESCLK | IDT_GP_EECS | IDT_GP_EEDO); for (i = 0; i < sizeof(tab) / sizeof(tab[0]); i++) { patm_nor_write(sc, IDT_NOR_GP, gp | tab[i]); DELAY(40); } /* read out the prom */ for (addr = 0; addr < 256; addr++) { byte = 0; for (i = 0; i < 8; i++) { byte <<= 1; if (patm_nor_read(sc, IDT_NOR_GP) & IDT_GP_EEDI) byte |= 1; /* rising CLK */ patm_nor_write(sc, IDT_NOR_GP, gp | IDT_GP_EESCLK); DELAY(40); /* falling clock */ patm_nor_write(sc, IDT_NOR_GP, gp); DELAY(40); } sc->eeprom[addr] = byte; } } /* * PHY access read */ static int patm_phy_readregs(struct ifatm *ifatm, u_int reg, uint8_t *val, u_int *n) { struct patm_softc *sc = ifatm->ifp->if_softc; u_int cnt = *n; if (reg >= 0x100) return (EINVAL); patm_cmd_wait(sc); while (reg < 0x100 && cnt > 0) { patm_nor_write(sc, IDT_NOR_CMD, IDT_MKCMD_RUTIL(1, 0, reg)); patm_cmd_wait(sc); *val = patm_nor_read(sc, IDT_NOR_D0); patm_debug(sc, PHY, "phy(%02x)=%02x", reg, *val); val++; reg++; cnt--; } *n = *n - cnt; return (0); } /* * Write PHY reg */ static int patm_phy_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val) { struct patm_softc *sc = ifatm->ifp->if_softc; u_int old, new; if (reg >= 0x100) return (EINVAL); patm_cmd_wait(sc); patm_nor_write(sc, IDT_NOR_CMD, IDT_MKCMD_RUTIL(1, 0, reg)); patm_cmd_wait(sc); old = patm_nor_read(sc, IDT_NOR_D0); new = (old & ~mask) | (val & mask); patm_debug(sc, PHY, "phy(%02x) %02x -> %02x", reg, old, new); patm_nor_write(sc, IDT_NOR_D0, new); patm_nor_write(sc, IDT_NOR_CMD, IDT_MKCMD_WUTIL(1, 0, reg)); patm_cmd_wait(sc); return (0); } /* * Allocate a large chunk of DMA able memory for the transmit * and receive status queues. We align this to a page boundary * to ensure the alignment. */ static int patm_sq_init(struct patm_softc *sc) { int error; void *p; /* compute size of the two queues */ sc->sq_size = IDT_TSQ_SIZE * IDT_TSQE_SIZE + PATM_RSQ_SIZE * IDT_RSQE_SIZE + IDT_RAWHND_SIZE; patm_debug(sc, ATTACH, "allocating status queues (%zu) ...", sc->sq_size); /* * allocate tag * Don't use BUS_DMA_ALLOCNOW, because we never need bouncing with * bus_dmamem_alloc() */ error = bus_dma_tag_create(NULL, PATM_SQ_ALIGNMENT, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sc->sq_size, 1, sc->sq_size, 0, NULL, NULL, &sc->sq_tag); if (error) { patm_printf(sc, "memory DMA tag create %d\n", error); return (error); } /* allocate memory */ error = bus_dmamem_alloc(sc->sq_tag, &p, 0, &sc->sq_map); if (error) { patm_printf(sc, "memory DMA alloc %d\n", error); bus_dma_tag_destroy(sc->sq_tag); return (error); } /* map it */ sc->tsq_phy = 0x1fff; error = bus_dmamap_load(sc->sq_tag, sc->sq_map, p, sc->sq_size, patm_load_callback, &sc->tsq_phy, BUS_DMA_NOWAIT); if (error) { patm_printf(sc, "memory DMA map load %d\n", error); bus_dmamem_free(sc->sq_tag, p, sc->sq_map); bus_dma_tag_destroy(sc->sq_tag); return (error); } /* set queue start */ sc->tsq = p; sc->rsq = (void *)((char *)p + IDT_TSQ_SIZE * IDT_TSQE_SIZE); sc->rsq_phy = sc->tsq_phy + IDT_TSQ_SIZE * IDT_TSQE_SIZE; sc->rawhnd = (void *)((char *)sc->rsq + PATM_RSQ_SIZE * IDT_RSQE_SIZE); sc->rawhnd_phy = sc->rsq_phy + PATM_RSQ_SIZE * IDT_RSQE_SIZE; return (0); } /* * Initialize all receive buffer stuff */ static int patm_rbuf_init(struct patm_softc *sc) { u_int i; int error; patm_debug(sc, ATTACH, "allocating Rx buffer resources ..."); /* * Create a tag for small buffers. We allocate these page wise. * Don't use BUS_DMA_ALLOCNOW, because we never need bouncing with * bus_dmamem_alloc() */ if ((error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, SMBUF_PAGE_SIZE, 1, SMBUF_PAGE_SIZE, 0, NULL, NULL, &sc->sbuf_tag)) != 0) { patm_printf(sc, "sbuf DMA tag create %d\n", error); return (error); } error = mbp_create(&sc->sbuf_pool, "patm sbufs", sc->sbuf_tag, SMBUF_MAX_PAGES, SMBUF_PAGE_SIZE, SMBUF_CHUNK_SIZE); if (error != 0) { patm_printf(sc, "smbuf pool create %d\n", error); return (error); } error = mbp_create(&sc->vbuf_pool, "patm vbufs", sc->sbuf_tag, VMBUF_MAX_PAGES, SMBUF_PAGE_SIZE, VMBUF_CHUNK_SIZE); if (error != 0) { patm_printf(sc, "vmbuf pool create %d\n", error); return (error); } /* * Create a tag for large buffers. * Don't use BUS_DMA_ALLOCNOW, because it makes no sense with multiple * maps using one tag. Rather use BUS_DMA_NOWAIT when loading the map * to prevent EINPROGRESS. */ if ((error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->lbuf_tag)) != 0) { patm_printf(sc, "lbuf DMA tag create %d\n", error); return (error); } if (sc->lbuf_max < IDT_FBQ_SIZE) sc->lbuf_max = LMBUF_MAX; sc->lbufs = malloc(sizeof(sc->lbufs[0]) * sc->lbuf_max, M_DEVBUF, M_ZERO | M_WAITOK); SLIST_INIT(&sc->lbuf_free_list); for (i = 0; i < sc->lbuf_max; i++) { struct lmbuf *b = &sc->lbufs[i]; error = bus_dmamap_create(sc->lbuf_tag, 0, &b->map); if (error) { /* must deallocate here, because a test for NULL * does not work on most archs */ while (i-- > 0) bus_dmamap_destroy(sc->lbuf_tag, sc->lbufs[i].map); free(sc->lbufs, M_DEVBUF); sc->lbufs = NULL; return (error); } b->handle = i; SLIST_INSERT_HEAD(&sc->lbuf_free_list, b, link); } return (0); } /* * Allocate everything needed for the transmission maps. */ static int patm_txmap_init(struct patm_softc *sc) { int error; struct patm_txmap *map; /* get transmission tag */ error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 65536, IDT_SCQ_SIZE - 1, 65536, 0, NULL, NULL, &sc->tx_tag); if (error) { patm_printf(sc, "cannot allocate TX tag %d\n", error); return (error); } if ((sc->tx_mapzone = uma_zcreate("PATM tx maps", sizeof(struct patm_txmap), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0)) == NULL) return (ENOMEM); if (sc->tx_maxmaps < PATM_CFG_TXMAPS_MAX) sc->tx_maxmaps = PATM_CFG_TXMAPS_MAX; sc->tx_nmaps = PATM_CFG_TXMAPS_INIT; for (sc->tx_nmaps = 0; sc->tx_nmaps < PATM_CFG_TXMAPS_INIT; sc->tx_nmaps++) { map = uma_zalloc(sc->tx_mapzone, M_WAITOK); error = bus_dmamap_create(sc->tx_tag, 0, &map->map); if (error) { uma_zfree(sc->tx_mapzone, map); return (ENOMEM); } SLIST_INSERT_HEAD(&sc->tx_maps_free, map, link); } return (0); } #ifdef PATM_DEBUG /* * Sysctl handler for REGS * * LOCK: unlocked, needed */ static int patm_sysctl_regs(SYSCTL_HANDLER_ARGS) { struct patm_softc *sc = arg1; uint32_t *ret; int error, i; ret = malloc(IDT_NOR_END, M_TEMP, M_WAITOK); mtx_lock(&sc->mtx); for (i = 0; i < IDT_NOR_END; i += 4) ret[i / 4] = patm_nor_read(sc, i); mtx_unlock(&sc->mtx); error = SYSCTL_OUT(req, ret, IDT_NOR_END); free(ret, M_TEMP); return (error); } /* * Sysctl handler for TSQ * * LOCK: unlocked, needed */ static int patm_sysctl_tsq(SYSCTL_HANDLER_ARGS) { struct patm_softc *sc = arg1; void *ret; int error; ret = malloc(IDT_TSQ_SIZE * IDT_TSQE_SIZE, M_TEMP, M_WAITOK); mtx_lock(&sc->mtx); memcpy(ret, sc->tsq, IDT_TSQ_SIZE * IDT_TSQE_SIZE); mtx_unlock(&sc->mtx); error = SYSCTL_OUT(req, ret, IDT_TSQ_SIZE * IDT_TSQE_SIZE); free(ret, M_TEMP); return (error); } /* * debugging */ static struct patm_softc * patm_dump_unit(u_int unit) { devclass_t dc; struct patm_softc *sc; dc = devclass_find("patm"); if (dc == NULL) { printf("%s: can't find devclass\n", __func__); return (NULL); } sc = devclass_get_softc(dc, unit); if (sc == NULL) { printf("%s: invalid unit number: %d\n", __func__, unit); return (NULL); } return (sc); } int patm_dump_vc(u_int unit, u_int vc) { struct patm_softc *sc; uint32_t tct[8]; uint32_t rct[4]; uint32_t scd[12]; u_int i; if ((sc = patm_dump_unit(unit)) == NULL) return (0); for (i = 0; i < 8; i++) tct[i] = patm_sram_read(sc, vc * 8 + i); for (i = 0; i < 4; i++) rct[i] = patm_sram_read(sc, sc->mmap->rct + vc * 4 + i); for (i = 0; i < 12; i++) scd[i] = patm_sram_read(sc, (tct[0] & 0x7ffff) + i); printf("TCT%3u: %08x %08x %08x %08x %08x %08x %08x %08x\n", vc, tct[0], tct[1], tct[2], tct[3], tct[4], tct[5], tct[6], tct[7]); printf("RCT%3u: %08x %08x %08x %08x\n", vc, rct[0], rct[1], rct[2], rct[3]); printf("SCD%3u: %08x %08x %08x %08x %08x %08x %08x %08x\n", vc, scd[0], scd[1], scd[2], scd[3], scd[4], scd[5], scd[6], scd[7]); printf(" %08x %08x %08x %08x\n", scd[8], scd[9], scd[10], scd[11]); return (0); } int patm_dump_regs(u_int unit) { struct patm_softc *sc; u_int i; if ((sc = patm_dump_unit(unit)) == NULL) return (0); for (i = 0; i <= IDT_NOR_DNOW; i += 4) printf("%x: %08x\n", i, patm_nor_read(sc, i)); return (0); } int patm_dump_sram(u_int unit, u_int from, u_int words) { struct patm_softc *sc; u_int i; if ((sc = patm_dump_unit(unit)) == NULL) return (0); for (i = 0; i < words; i++) { if (i % 8 == 0) printf("%05x:", from + i); printf(" %08x", patm_sram_read(sc, from + i)); if (i % 8 == 7) printf("\n"); } if (i % 8 != 0) printf("\n"); return (0); } #endif Index: head/sys/dev/patm/if_patm_tx.c =================================================================== --- head/sys/dev/patm/if_patm_tx.c (revision 147720) +++ head/sys/dev/patm/if_patm_tx.c (revision 147721) @@ -1,1276 +1,1276 @@ /*- * Copyright (c) 2003 * Fraunhofer Institute for Open Communication Systems (FhG Fokus). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The TST allocation algorithm is from the IDT driver which is: * * Copyright (c) 2000, 2001 Richard Hodges and Matriplex, inc. * All rights reserved. * * Copyright (c) 1996, 1997, 1998, 1999 Mark Tinguely * All rights reserved. * * Author: Hartmut Brandt * * Driver for IDT77252 based cards like ProSum's. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_natm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ENABLE_BPF #include #endif #include #include #include #include #include #include #include #include #include #include static struct mbuf *patm_tx_pad(struct patm_softc *sc, struct mbuf *m0); static void patm_launch(struct patm_softc *sc, struct patm_scd *scd); static struct patm_txmap *patm_txmap_get(struct patm_softc *); static void patm_load_txbuf(void *, bus_dma_segment_t *, int, bus_size_t, int); static void patm_tst_alloc(struct patm_softc *sc, struct patm_vcc *vcc); static void patm_tst_free(struct patm_softc *sc, struct patm_vcc *vcc); static void patm_tst_timer(void *p); static void patm_tst_update(struct patm_softc *); static void patm_tct_start(struct patm_softc *sc, struct patm_vcc *); static const char *dump_scd(struct patm_softc *sc, struct patm_scd *scd) __unused; static void patm_tct_print(struct patm_softc *sc, u_int cid) __unused; /* * Structure for communication with the loader function for transmission */ struct txarg { struct patm_softc *sc; struct patm_scd *scd; /* scheduling channel */ struct patm_vcc *vcc; /* the VCC of this PDU */ struct mbuf *mbuf; u_int hdr; /* cell header */ }; static __inline u_int cbr2slots(struct patm_softc *sc, struct patm_vcc *vcc) { /* compute the number of slots we need, make sure to get at least * the specified PCR */ return ((u_int)(((uint64_t)(sc->mmap->tst_size - 1) * vcc->vcc.tparam.pcr + IFP2IFATM(sc->ifp)->mib.pcr - 1) / IFP2IFATM(sc->ifp)->mib.pcr)); } static __inline u_int slots2cr(struct patm_softc *sc, u_int slots) { return ((slots * IFP2IFATM(sc->ifp)->mib.pcr + sc->mmap->tst_size - 2) / (sc->mmap->tst_size - 1)); } /* check if we can open this one */ int patm_tx_vcc_can_open(struct patm_softc *sc, struct patm_vcc *vcc) { /* check resources */ switch (vcc->vcc.traffic) { case ATMIO_TRAFFIC_CBR: { u_int slots = cbr2slots(sc, vcc); if (slots > sc->tst_free + sc->tst_reserve) return (EINVAL); break; } case ATMIO_TRAFFIC_VBR: if (vcc->vcc.tparam.scr > sc->bwrem) return (EINVAL); if (vcc->vcc.tparam.pcr > IFP2IFATM(sc->ifp)->mib.pcr) return (EINVAL); if (vcc->vcc.tparam.scr > vcc->vcc.tparam.pcr || vcc->vcc.tparam.mbs == 0) return (EINVAL); break; case ATMIO_TRAFFIC_ABR: if (vcc->vcc.tparam.tbe == 0 || vcc->vcc.tparam.nrm == 0) /* needed to compute CRM */ return (EINVAL); if (vcc->vcc.tparam.pcr > IFP2IFATM(sc->ifp)->mib.pcr || vcc->vcc.tparam.icr > vcc->vcc.tparam.pcr || vcc->vcc.tparam.mcr > vcc->vcc.tparam.icr) return (EINVAL); if (vcc->vcc.tparam.mcr > sc->bwrem || vcc->vcc.tparam.icr > sc->bwrem) return (EINVAL); break; } return (0); } #define NEXT_TAG(T) do { \ (T) = ((T) + 1) % IDT_TSQE_TAG_SPACE; \ } while (0) /* * open it */ void patm_tx_vcc_open(struct patm_softc *sc, struct patm_vcc *vcc) { struct patm_scd *scd; if (vcc->vcc.traffic == ATMIO_TRAFFIC_UBR) { /* we use UBR0 */ vcc->scd = sc->scd0; vcc->vflags |= PATM_VCC_TX_OPEN; return; } /* get an SCD */ scd = patm_scd_alloc(sc); if (scd == NULL) { /* should not happen */ patm_printf(sc, "out of SCDs\n"); return; } vcc->scd = scd; patm_scd_setup(sc, scd); patm_tct_setup(sc, scd, vcc); if (vcc->vcc.traffic != ATMIO_TRAFFIC_CBR) patm_tct_start(sc, vcc); vcc->vflags |= PATM_VCC_TX_OPEN; } /* * close the given vcc for transmission */ void patm_tx_vcc_close(struct patm_softc *sc, struct patm_vcc *vcc) { struct patm_scd *scd; struct mbuf *m; vcc->vflags |= PATM_VCC_TX_CLOSING; if (vcc->vcc.traffic == ATMIO_TRAFFIC_UBR) { /* let the queue PDUs go out */ vcc->scd = NULL; vcc->vflags &= ~(PATM_VCC_TX_OPEN | PATM_VCC_TX_CLOSING); return; } scd = vcc->scd; /* empty the waitq */ for (;;) { _IF_DEQUEUE(&scd->q, m); if (m == NULL) break; m_freem(m); } if (scd->num_on_card == 0) { /* we are idle */ vcc->vflags &= ~PATM_VCC_TX_OPEN; if (vcc->vcc.traffic == ATMIO_TRAFFIC_CBR) patm_tst_free(sc, vcc); patm_sram_write4(sc, scd->sram + 0, 0, 0, 0, 0); patm_sram_write4(sc, scd->sram + 4, 0, 0, 0, 0); patm_scd_free(sc, scd); vcc->scd = NULL; vcc->vflags &= ~PATM_VCC_TX_CLOSING; return; } /* speed up transmission */ patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_UIER(vcc->cid, 0xff)); patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_ULACR(vcc->cid, 0xff)); /* wait for the interrupt to drop the number to 0 */ patm_debug(sc, VCC, "%u buffers still on card", scd->num_on_card); } /* transmission side finally closed */ void patm_tx_vcc_closed(struct patm_softc *sc, struct patm_vcc *vcc) { patm_debug(sc, VCC, "%u.%u TX closed", vcc->vcc.vpi, vcc->vcc.vci); if (vcc->vcc.traffic == ATMIO_TRAFFIC_VBR) sc->bwrem += vcc->vcc.tparam.scr; } /* * Pull off packets from the interface queue and try to transmit them. * If the transmission fails because of a full transmit channel, we drop * packets for CBR and queue them for other channels up to limit. * This limit should depend on the CDVT for VBR and ABR, but it doesn't. */ void patm_start(struct ifnet *ifp) { - struct patm_softc *sc = (struct patm_softc *)ifp->if_softc; + struct patm_softc *sc = ifp->if_softc; struct mbuf *m; struct atm_pseudohdr *aph; u_int vpi, vci, cid; struct patm_vcc *vcc; mtx_lock(&sc->mtx); if (!(ifp->if_flags & IFF_RUNNING)) { mtx_unlock(&sc->mtx); return; } while (1) { /* get a new mbuf */ IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; /* split of pseudo header */ if (m->m_len < sizeof(*aph) && (m = m_pullup(m, sizeof(*aph))) == NULL) { sc->ifp->if_oerrors++; continue; } aph = mtod(m, struct atm_pseudohdr *); vci = ATM_PH_VCI(aph); vpi = ATM_PH_VPI(aph); m_adj(m, sizeof(*aph)); /* reject empty packets */ if (m->m_pkthdr.len == 0) { m_freem(m); sc->ifp->if_oerrors++; continue; } /* check whether this is a legal vcc */ if (!LEGAL_VPI(sc, vpi) || !LEGAL_VCI(sc, vci) || vci == 0) { m_freem(m); sc->ifp->if_oerrors++; continue; } cid = PATM_CID(sc, vpi, vci); vcc = sc->vccs[cid]; if (vcc == NULL) { m_freem(m); sc->ifp->if_oerrors++; continue; } /* must be multiple of 48 if not AAL5 */ if (vcc->vcc.aal == ATMIO_AAL_0 || vcc->vcc.aal == ATMIO_AAL_34) { /* XXX AAL3/4 format? */ if (m->m_pkthdr.len % 48 != 0 && (m = patm_tx_pad(sc, m)) == NULL) { sc->ifp->if_oerrors++; continue; } } else if (vcc->vcc.aal == ATMIO_AAL_RAW) { switch (vcc->vflags & PATM_RAW_FORMAT) { default: case PATM_RAW_CELL: if (m->m_pkthdr.len != 53) { sc->ifp->if_oerrors++; m_freem(m); continue; } break; case PATM_RAW_NOHEC: if (m->m_pkthdr.len != 52) { sc->ifp->if_oerrors++; m_freem(m); continue; } break; case PATM_RAW_CS: if (m->m_pkthdr.len != 64) { sc->ifp->if_oerrors++; m_freem(m); continue; } break; } } /* save data */ m->m_pkthdr.header = vcc; /* try to put it on the channels queue */ if (_IF_QFULL(&vcc->scd->q)) { sc->ifp->if_oerrors++; sc->stats.tx_qfull++; m_freem(m); continue; } _IF_ENQUEUE(&vcc->scd->q, m); #ifdef ENABLE_BPF if (!(vcc->vcc.flags & ATMIO_FLAG_NG) && (vcc->vcc.aal == ATMIO_AAL_5) && (vcc->vcc.flags & ATM_PH_LLCSNAP)) BPF_MTAP(ifp, m); #endif /* kick the channel to life */ patm_launch(sc, vcc->scd); } mtx_unlock(&sc->mtx); } /* * Pad non-AAL5 packet to a multiple of 48-byte. * We assume AAL0 only. We have still to decide on the format of AAL3/4. */ static struct mbuf * patm_tx_pad(struct patm_softc *sc, struct mbuf *m0) { struct mbuf *last, *m; u_int plen, pad, space; plen = m_length(m0, &last); if (plen != m0->m_pkthdr.len) { patm_printf(sc, "%s: mbuf length mismatch %d %u\n", __func__, m0->m_pkthdr.len, plen); m0->m_pkthdr.len = plen; if (plen == 0) { m_freem(m0); sc->ifp->if_oerrors++; return (NULL); } if (plen % 48 == 0) return (m0); } pad = 48 - plen % 48; m0->m_pkthdr.len += pad; if (M_WRITABLE(last)) { if (M_TRAILINGSPACE(last) >= pad) { bzero(last->m_data + last->m_len, pad); last->m_len += pad; return (m0); } space = M_LEADINGSPACE(last); if (space + M_TRAILINGSPACE(last) >= pad) { bcopy(last->m_data, last->m_data + space, last->m_len); last->m_data -= space; bzero(last->m_data + last->m_len, pad); last->m_len += pad; return (m0); } } MGET(m, M_DONTWAIT, MT_DATA); if (m == 0) { m_freem(m0); sc->ifp->if_oerrors++; return (NULL); } bzero(mtod(m, u_char *), pad); m->m_len = pad; last->m_next = m; return (m0); } /* * Try to put as many packets from the channels queue onto the channel */ static void patm_launch(struct patm_softc *sc, struct patm_scd *scd) { struct txarg a; struct mbuf *m, *tmp; u_int segs; struct patm_txmap *map; int error; a.sc = sc; a.scd = scd; /* limit the number of outstanding packets to the tag space */ while (scd->num_on_card < IDT_TSQE_TAG_SPACE) { /* get the next packet */ _IF_DEQUEUE(&scd->q, m); if (m == NULL) break; a.vcc = m->m_pkthdr.header; /* we must know the number of segments beforehand - count * this may actually give a wrong number of segments for * AAL_RAW where we still need to remove the cell header */ segs = 0; for (tmp = m; tmp != NULL; tmp = tmp->m_next) if (tmp->m_len != 0) segs++; /* check whether there is space in the queue */ if (segs >= scd->space) { /* put back */ _IF_PREPEND(&scd->q, m); sc->stats.tx_out_of_tbds++; break; } /* get a DMA map */ if ((map = patm_txmap_get(sc)) == NULL) { _IF_PREPEND(&scd->q, m); sc->stats.tx_out_of_maps++; break; } /* load the map */ m->m_pkthdr.header = map; a.mbuf = m; /* handle AAL_RAW */ if (a.vcc->vcc.aal == ATMIO_AAL_RAW) { u_char hdr[4]; m_copydata(m, 0, 4, hdr); a.hdr = (hdr[0] << 24) | (hdr[1] << 16) | (hdr[2] << 8) | hdr[3]; switch (a.vcc->vflags & PATM_RAW_FORMAT) { default: case PATM_RAW_CELL: m_adj(m, 5); break; case PATM_RAW_NOHEC: m_adj(m, 4); break; case PATM_RAW_CS: m_adj(m, 16); break; } } else a.hdr = IDT_TBD_HDR(a.vcc->vcc.vpi, a.vcc->vcc.vci, 0, 0); error = bus_dmamap_load_mbuf(sc->tx_tag, map->map, m, patm_load_txbuf, &a, BUS_DMA_NOWAIT); if (error == EFBIG) { if ((m = m_defrag(m, M_DONTWAIT)) == NULL) { sc->ifp->if_oerrors++; continue; } error = bus_dmamap_load_mbuf(sc->tx_tag, map->map, m, patm_load_txbuf, &a, BUS_DMA_NOWAIT); } if (error != 0) { sc->stats.tx_load_err++; sc->ifp->if_oerrors++; SLIST_INSERT_HEAD(&sc->tx_maps_free, map, link); m_freem(m); continue; } sc->ifp->if_opackets++; } } /* * Load the DMA segments into the scheduling channel */ static void patm_load_txbuf(void *uarg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize, int error) { struct txarg *a= uarg; struct patm_scd *scd = a->scd; u_int w1, w3, cnt; struct idt_tbd *tbd = NULL; u_int rest = mapsize; if (error != 0) return; cnt = 0; while (nseg > 0) { if (segs->ds_len == 0) { /* transmit buffer length must be > 0 */ nseg--; segs++; continue; } /* rest after this buffer */ rest -= segs->ds_len; /* put together status word */ w1 = 0; if (rest < 48 /* && a->vcc->vcc.aal != ATMIO_AAL_5 */) /* last cell is in this buffer */ w1 |= IDT_TBD_EPDU; if (a->vcc->vcc.aal == ATMIO_AAL_5) w1 |= IDT_TBD_AAL5; else if (a->vcc->vcc.aal == ATMIO_AAL_34) w1 |= IDT_TBD_AAL34; else w1 |= IDT_TBD_AAL0; w1 |= segs->ds_len; /* AAL5 PDU length (unpadded) */ if (a->vcc->vcc.aal == ATMIO_AAL_5) w3 = mapsize; else w3 = 0; if (rest == 0) w1 |= IDT_TBD_TSIF | IDT_TBD_GTSI | (scd->tag << IDT_TBD_TAG_SHIFT); tbd = &scd->scq[scd->tail]; tbd->flags = htole32(w1); tbd->addr = htole32(segs->ds_addr); tbd->aal5 = htole32(w3); tbd->hdr = htole32(a->hdr); patm_debug(a->sc, TX, "TBD(%u): %08x %08x %08x %08x", scd->tail, w1, segs->ds_addr, w3, a->hdr); /* got to next entry */ if (++scd->tail == IDT_SCQ_SIZE) scd->tail = 0; cnt++; nseg--; segs++; } scd->space -= cnt; scd->num_on_card++; KASSERT(rest == 0, ("bad mbuf")); KASSERT(cnt > 0, ("no segs")); KASSERT(scd->space > 0, ("scq full")); KASSERT(scd->on_card[scd->tag] == NULL, ("scd on_card wedged %u%s", scd->tag, dump_scd(a->sc, scd))); scd->on_card[scd->tag] = a->mbuf; a->mbuf->m_pkthdr.csum_data = cnt; NEXT_TAG(scd->tag); patm_debug(a->sc, TX, "SCD tail %u (%lx:%lx)", scd->tail, (u_long)scd->phy, (u_long)scd->phy + (scd->tail << IDT_TBD_SHIFT)); patm_sram_write(a->sc, scd->sram, scd->phy + (scd->tail << IDT_TBD_SHIFT)); if (patm_sram_read(a->sc, a->vcc->cid * 8 + 3) & IDT_TCT_IDLE) { /* * if the connection is idle start it. We cannot rely * on a flag set by patm_tx_idle() here, because sometimes * the card seems to place an idle TSI into the TSQ but * forgets to raise an interrupt. */ patm_nor_write(a->sc, IDT_NOR_TCMDQ, IDT_TCMDQ_START(a->vcc->cid)); } } /* * packet transmitted */ void patm_tx(struct patm_softc *sc, u_int stamp, u_int status) { u_int cid, tag, last; struct mbuf *m; struct patm_vcc *vcc; struct patm_scd *scd; struct patm_txmap *map; /* get the connection */ cid = PATM_CID(sc, IDT_TBD_VPI(status), IDT_TBD_VCI(status)); if ((vcc = sc->vccs[cid]) == NULL) { /* closed UBR connection */ return; } scd = vcc->scd; tag = IDT_TSQE_TAG(stamp); last = scd->last_tag; if (tag == last) { patm_printf(sc, "same tag %u\n", tag); return; } /* Errata 12 requests us to free all entries up to the one * with the given tag. */ do { /* next tag to try */ NEXT_TAG(last); m = scd->on_card[last]; KASSERT(m != NULL, ("%stag=%u", dump_scd(sc, scd), tag)); scd->on_card[last] = NULL; patm_debug(sc, TX, "ok tag=%x", last); map = m->m_pkthdr.header; scd->space += m->m_pkthdr.csum_data; bus_dmamap_sync(sc->tx_tag, map->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->tx_tag, map->map); m_freem(m); SLIST_INSERT_HEAD(&sc->tx_maps_free, map, link); scd->num_on_card--; if (vcc->vflags & PATM_VCC_TX_CLOSING) { if (scd->num_on_card == 0) { /* done with this VCC */ if (vcc->vcc.traffic == ATMIO_TRAFFIC_CBR) patm_tst_free(sc, vcc); patm_sram_write4(sc, scd->sram + 0, 0, 0, 0, 0); patm_sram_write4(sc, scd->sram + 4, 0, 0, 0, 0); patm_scd_free(sc, scd); vcc->scd = NULL; vcc->vflags &= ~PATM_VCC_TX_CLOSING; if (vcc->vcc.flags & ATMIO_FLAG_ASYNC) { patm_tx_vcc_closed(sc, vcc); if (!(vcc->vflags & PATM_VCC_OPEN)) patm_vcc_closed(sc, vcc); } else cv_signal(&sc->vcc_cv); return; } patm_debug(sc, VCC, "%u buffers still on card", scd->num_on_card); if (vcc->vcc.traffic == ATMIO_TRAFFIC_ABR) { /* insist on speeding up transmission for ABR */ patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_UIER(vcc->cid, 0xff)); patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_ULACR(vcc->cid, 0xff)); } } } while (last != tag); scd->last_tag = tag; if (vcc->vcc.traffic == ATMIO_TRAFFIC_ABR) { u_int acri, cps; acri = (patm_sram_read(sc, 8 * cid + 2) >> IDT_TCT_ACRI_SHIFT) & 0x3fff; cps = IFP2IFATM(sc->ifp)->mib.pcr * 32 / ((1 << (acri >> 10)) * (acri & 0x3ff)); if (cps != vcc->cps) { patm_debug(sc, VCC, "ACRI=%04x CPS=%u", acri, cps); ATMEV_SEND_ACR_CHANGED(IFP2IFATM(sc->ifp), vcc->vcc.vpi, vcc->vcc.vci, cps); vcc->cps = cps; } } patm_launch(sc, scd); } /* * VBR/ABR connection went idle * Either restart it or set the idle flag. */ void patm_tx_idle(struct patm_softc *sc, u_int cid) { struct patm_vcc *vcc; patm_debug(sc, VCC, "idle %u", cid); if ((vcc = sc->vccs[cid]) != NULL && (vcc->vflags & (PATM_VCC_TX_OPEN | PATM_VCC_TX_CLOSING)) != 0 && vcc->scd != NULL && (vcc->scd->num_on_card != 0 || _IF_QLEN(&vcc->scd->q) != 0)) { /* * If there is any packet outstanding in the SCD re-activate * the channel and kick it. */ patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_START(vcc->cid)); patm_launch(sc, vcc->scd); } } /* * Convert a (24bit) rate to the atm-forum form * Our rate is never larger than 19 bit. */ static u_int cps2atmf(u_int cps) { u_int e; if (cps == 0) return (0); cps <<= 9; e = 0; while (cps > (1024 - 1)) { e++; cps >>= 1; } return ((1 << 14) | (e << 9) | (cps & 0x1ff)); } /* * Do a binary search on the log2rate table to convert the rate * to its log form. This assumes that the ATM-Forum form is monotonically * increasing with the plain cell rate. */ static u_int rate2log(struct patm_softc *sc, u_int rate) { const uint32_t *tbl; u_int lower, upper, mid, done, val, afr; afr = cps2atmf(rate); if (sc->flags & PATM_25M) tbl = patm_rtables25; else tbl = patm_rtables155; lower = 0; upper = 255; done = 0; while (!done) { mid = (lower + upper) / 2; val = tbl[mid] >> 17; if (val == afr || upper == lower) break; if (afr > val) lower = mid + 1; else upper = mid - 1; } if (val > afr && mid > 0) mid--; return (mid); } /* * Return the table index for an increase table. The increase table * must be selected not by the RIF itself, but by PCR/2^RIF. Each table * represents an additive increase of a cell rate that can be computed * from the first table entry (the value in this entry will not be clamped * by the link rate). */ static u_int get_air_table(struct patm_softc *sc, u_int rif, u_int pcr) { const uint32_t *tbl; u_int increase, base, lair0, ret, t, cps; #define GET_ENTRY(TAB, IDX) (0xffff & ((IDX & 1) ? \ (tbl[512 + (IDX / 2) + 128 * (TAB)] >> 16) : \ (tbl[512 + (IDX / 2) + 128 * (TAB)]))) #define MANT_BITS 10 #define FRAC_BITS 16 #define DIFF_TO_FP(D) (((D) & ((1 << MANT_BITS) - 1)) << ((D) >> MANT_BITS)) #define AFR_TO_INT(A) ((1 << (((A) >> 9) & 0x1f)) * \ (512 + ((A) & 0x1ff)) / 512 * ((A) >> 14)) if (sc->flags & PATM_25M) tbl = patm_rtables25; else tbl = patm_rtables155; if (rif >= patm_rtables_ntab) rif = patm_rtables_ntab - 1; increase = pcr >> rif; ret = 0; for (t = 0; t < patm_rtables_ntab; t++) { /* get base rate of this table */ base = GET_ENTRY(t, 0); /* convert this to fixed point */ lair0 = DIFF_TO_FP(base) >> FRAC_BITS; /* get the CPS from the log2rate table */ cps = AFR_TO_INT(tbl[lair0] >> 17) - 10; if (increase >= cps) break; ret = t; } return (ret + 4); } /* * Setup the TCT */ void patm_tct_setup(struct patm_softc *sc, struct patm_scd *scd, struct patm_vcc *vcc) { uint32_t tct[8]; u_int sram; u_int mbs, token; u_int tmp, crm, rdf, cdf, air, mcr; bzero(tct, sizeof(tct)); if (vcc == NULL) { /* special case for UBR0 */ sram = 0; tct[0] = IDT_TCT_UBR | scd->sram; tct[7] = IDT_TCT_UBR_FLG; } else { sram = vcc->cid * 8; switch (vcc->vcc.traffic) { case ATMIO_TRAFFIC_CBR: patm_tst_alloc(sc, vcc); tct[0] = IDT_TCT_CBR | scd->sram; /* must account for what was really allocated */ break; case ATMIO_TRAFFIC_VBR: /* compute parameters for the TCT */ scd->init_er = rate2log(sc, vcc->vcc.tparam.pcr); scd->lacr = rate2log(sc, vcc->vcc.tparam.scr); /* get the 16-bit fraction of SCR/PCR * both a 24 bit. Do it the simple way. */ token = (uint64_t)(vcc->vcc.tparam.scr << 16) / vcc->vcc.tparam.pcr; patm_debug(sc, VCC, "VBR: init_er=%u lacr=%u " "token=0x%04x\n", scd->init_er, scd->lacr, token); tct[0] = IDT_TCT_VBR | scd->sram; tct[2] = IDT_TCT_TSIF; tct[3] = IDT_TCT_IDLE | IDT_TCT_HALT; tct[4] = IDT_TCT_MAXIDLE; tct[5] = 0x01000000; if ((mbs = vcc->vcc.tparam.mbs) > 0xff) mbs = 0xff; tct[6] = (mbs << 16) | token; sc->bwrem -= vcc->vcc.tparam.scr; break; case ATMIO_TRAFFIC_ABR: scd->init_er = rate2log(sc, vcc->vcc.tparam.pcr); scd->lacr = rate2log(sc, vcc->vcc.tparam.icr); mcr = rate2log(sc, vcc->vcc.tparam.mcr); /* compute CRM */ tmp = vcc->vcc.tparam.tbe / vcc->vcc.tparam.nrm; if (tmp * vcc->vcc.tparam.nrm < vcc->vcc.tparam.tbe) tmp++; for (crm = 1; tmp > (1 << crm); crm++) ; if (crm > 0x7) crm = 7; air = get_air_table(sc, vcc->vcc.tparam.rif, vcc->vcc.tparam.pcr); if ((rdf = vcc->vcc.tparam.rdf) >= patm_rtables_ntab) rdf = patm_rtables_ntab - 1; rdf += patm_rtables_ntab + 4; if ((cdf = vcc->vcc.tparam.cdf) >= patm_rtables_ntab) cdf = patm_rtables_ntab - 1; cdf += patm_rtables_ntab + 4; patm_debug(sc, VCC, "ABR: init_er=%u lacr=%u mcr=%u " "crm=%u air=%u rdf=%u cdf=%u\n", scd->init_er, scd->lacr, mcr, crm, air, rdf, cdf); tct[0] = IDT_TCT_ABR | scd->sram; tct[1] = crm << IDT_TCT_CRM_SHIFT; tct[3] = IDT_TCT_HALT | IDT_TCT_IDLE | (4 << IDT_TCT_NAGE_SHIFT); tct[4] = mcr << IDT_TCT_LMCR_SHIFT; tct[5] = (cdf << IDT_TCT_CDF_SHIFT) | (rdf << IDT_TCT_RDF_SHIFT) | (air << IDT_TCT_AIR_SHIFT); sc->bwrem -= vcc->vcc.tparam.mcr; break; } } patm_sram_write4(sc, sram + 0, tct[0], tct[1], tct[2], tct[3]); patm_sram_write4(sc, sram + 4, tct[4], tct[5], tct[6], tct[7]); patm_debug(sc, VCC, "TCT[%u]: %08x %08x %08x %08x %08x %08x %08x %08x", sram / 8, patm_sram_read(sc, sram + 0), patm_sram_read(sc, sram + 1), patm_sram_read(sc, sram + 2), patm_sram_read(sc, sram + 3), patm_sram_read(sc, sram + 4), patm_sram_read(sc, sram + 5), patm_sram_read(sc, sram + 6), patm_sram_read(sc, sram + 7)); } /* * Start a channel */ static void patm_tct_start(struct patm_softc *sc, struct patm_vcc *vcc) { patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_UIER(vcc->cid, vcc->scd->init_er)); patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_SLACR(vcc->cid, vcc->scd->lacr)); } static void patm_tct_print(struct patm_softc *sc, u_int cid) { #ifdef PATM_DEBUG u_int sram = cid * 8; #endif patm_debug(sc, VCC, "TCT[%u]: %08x %08x %08x %08x %08x %08x %08x %08x", sram / 8, patm_sram_read(sc, sram + 0), patm_sram_read(sc, sram + 1), patm_sram_read(sc, sram + 2), patm_sram_read(sc, sram + 3), patm_sram_read(sc, sram + 4), patm_sram_read(sc, sram + 5), patm_sram_read(sc, sram + 6), patm_sram_read(sc, sram + 7)); } /* * Setup the SCD */ void patm_scd_setup(struct patm_softc *sc, struct patm_scd *scd) { patm_sram_write4(sc, scd->sram + 0, scd->phy, 0, 0xffffffff, 0); patm_sram_write4(sc, scd->sram + 4, 0, 0, 0, 0); patm_debug(sc, VCC, "SCD(%x): %08x %08x %08x %08x %08x %08x %08x %08x", scd->sram, patm_sram_read(sc, scd->sram + 0), patm_sram_read(sc, scd->sram + 1), patm_sram_read(sc, scd->sram + 2), patm_sram_read(sc, scd->sram + 3), patm_sram_read(sc, scd->sram + 4), patm_sram_read(sc, scd->sram + 5), patm_sram_read(sc, scd->sram + 6), patm_sram_read(sc, scd->sram + 7)); } /* * Grow the TX map table if possible */ static void patm_txmaps_grow(struct patm_softc *sc) { u_int i; struct patm_txmap *map; int err; if (sc->tx_nmaps >= sc->tx_maxmaps) return; for (i = sc->tx_nmaps; i < sc->tx_nmaps + PATM_CFG_TXMAPS_STEP; i++) { map = uma_zalloc(sc->tx_mapzone, M_NOWAIT); err = bus_dmamap_create(sc->tx_tag, 0, &map->map); if (err) { uma_zfree(sc->tx_mapzone, map); break; } SLIST_INSERT_HEAD(&sc->tx_maps_free, map, link); } sc->tx_nmaps = i; } /* * Allocate a transmission map */ static struct patm_txmap * patm_txmap_get(struct patm_softc *sc) { struct patm_txmap *map; if ((map = SLIST_FIRST(&sc->tx_maps_free)) == NULL) { patm_txmaps_grow(sc); if ((map = SLIST_FIRST(&sc->tx_maps_free)) == NULL) return (NULL); } SLIST_REMOVE_HEAD(&sc->tx_maps_free, link); return (map); } /* * Look whether we are in the process of updating the TST on the chip. * If we are set the flag that we need another update. * If we are not start the update. */ static __inline void patm_tst_start(struct patm_softc *sc) { if (!(sc->tst_state & TST_PENDING)) { sc->tst_state |= TST_PENDING; if (!(sc->tst_state & TST_WAIT)) { /* timer not running */ patm_tst_update(sc); } } } /* * Allocate TST entries to a CBR connection */ static void patm_tst_alloc(struct patm_softc *sc, struct patm_vcc *vcc) { u_int slots; u_int qptr, pptr; u_int qmax, pmax; u_int pspc, last; mtx_lock(&sc->tst_lock); /* compute the number of slots we need, make sure to get at least * the specified PCR */ slots = cbr2slots(sc, vcc); vcc->scd->slots = slots; sc->bwrem -= slots2cr(sc, slots); patm_debug(sc, TST, "tst_alloc: cbr=%u link=%u tst=%u slots=%u", vcc->vcc.tparam.pcr, IFP2IFATM(sc->ifp)->mib.pcr, sc->mmap->tst_size, slots); qmax = sc->mmap->tst_size - 1; pmax = qmax << 8; pspc = pmax / slots; pptr = pspc >> 1; /* starting point */ qptr = pptr >> 8; last = qptr; while (slots > 0) { if (qptr >= qmax) qptr -= qmax; if (sc->tst_soft[qptr] != IDT_TST_VBR) { /* used - try next */ qptr++; continue; } patm_debug(sc, TST, "slot[%u] = %u.%u diff=%d", qptr, vcc->vcc.vpi, vcc->vcc.vci, (int)qptr - (int)last); last = qptr; sc->tst_soft[qptr] = IDT_TST_CBR | vcc->cid | TST_BOTH; sc->tst_free--; if ((pptr += pspc) >= pmax) pptr -= pmax; qptr = pptr >> 8; slots--; } patm_tst_start(sc); mtx_unlock(&sc->tst_lock); } /* * Free a CBR connection's TST entries */ static void patm_tst_free(struct patm_softc *sc, struct patm_vcc *vcc) { u_int i; mtx_lock(&sc->tst_lock); for (i = 0; i < sc->mmap->tst_size - 1; i++) { if ((sc->tst_soft[i] & IDT_TST_MASK) == vcc->cid) { sc->tst_soft[i] = IDT_TST_VBR | TST_BOTH; sc->tst_free++; } } sc->bwrem += slots2cr(sc, vcc->scd->slots); patm_tst_start(sc); mtx_unlock(&sc->tst_lock); } /* * Write the soft TST into the idle incore TST and start the wait timer. * We assume that we hold the tst lock. */ static void patm_tst_update(struct patm_softc *sc) { u_int flag; /* flag to clear from soft TST */ u_int idle; /* the idle TST */ u_int act; /* the active TST */ u_int i; if (sc->tst_state & TST_ACT1) { act = 1; idle = 0; flag = TST_CH0; } else { act = 0; idle = 1; flag = TST_CH1; } /* update the idle one */ for (i = 0; i < sc->mmap->tst_size - 1; i++) if (sc->tst_soft[i] & flag) { patm_sram_write(sc, sc->tst_base[idle] + i, sc->tst_soft[i] & ~TST_BOTH); sc->tst_soft[i] &= ~flag; } /* the used one jump to the idle one */ patm_sram_write(sc, sc->tst_jump[act], IDT_TST_BR | (sc->tst_base[idle] << 2)); /* wait for the chip to jump */ sc->tst_state &= ~TST_PENDING; sc->tst_state |= TST_WAIT; callout_reset(&sc->tst_callout, 1, patm_tst_timer, sc); } /* * Timer for TST updates */ static void patm_tst_timer(void *p) { struct patm_softc *sc = p; u_int act; /* active TST */ u_int now; /* current place in TST */ mtx_lock(&sc->tst_lock); if (sc->tst_state & TST_WAIT) { /* ignore the PENDING state while we are waiting for * the chip to switch tables. Once the switch is done, * we will again lock at PENDING */ act = (sc->tst_state & TST_ACT1) ? 1 : 0; now = patm_nor_read(sc, IDT_NOR_NOW) >> 2; if (now >= sc->tst_base[act] && now <= sc->tst_jump[act]) { /* not yet */ callout_reset(&sc->tst_callout, 1, patm_tst_timer, sc); goto done; } sc->tst_state &= ~TST_WAIT; /* change back jump */ patm_sram_write(sc, sc->tst_jump[act], IDT_TST_BR | (sc->tst_base[act] << 2)); /* switch */ sc->tst_state ^= TST_ACT1; } if (sc->tst_state & TST_PENDING) /* we got another update request while the timer was running. */ patm_tst_update(sc); done: mtx_unlock(&sc->tst_lock); } static const char * dump_scd(struct patm_softc *sc, struct patm_scd *scd) { u_int i; for (i = 0; i < IDT_TSQE_TAG_SPACE; i++) printf("on_card[%u] = %p\n", i, scd->on_card[i]); printf("space=%u tag=%u num_on_card=%u last_tag=%u\n", scd->space, scd->tag, scd->num_on_card, scd->last_tag); return (""); }