Index: head/sys/dev/sec/sec.c =================================================================== --- head/sys/dev/sec/sec.c (revision 293038) +++ head/sys/dev/sec/sec.c (revision 293039) @@ -1,1875 +1,1876 @@ /*- * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and * 3.0 are supported. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include "cryptodev_if.h" #include #include static int sec_probe(device_t dev); static int sec_attach(device_t dev); static int sec_detach(device_t dev); static int sec_suspend(device_t dev); static int sec_resume(device_t dev); static int sec_shutdown(device_t dev); static void sec_primary_intr(void *arg); static void sec_secondary_intr(void *arg); static int sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand, int *irid, driver_intr_t handler, const char *iname); static void sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand, int irid, const char *iname); static int sec_controller_reset(struct sec_softc *sc); static int sec_channel_reset(struct sec_softc *sc, int channel, int full); static int sec_init(struct sec_softc *sc); static int sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem, bus_size_t size); static int sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, void *mem, bus_size_t size, int type, struct sec_desc_map_info *sdmi); static void sec_free_dma_mem(struct sec_dma_mem *dma_mem); static void sec_enqueue(struct sec_softc *sc); static int sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel); static int sec_eu_channel(struct sec_softc *sc, int eu); static int sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc, u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype); static int sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize); static int sec_alloc_session(struct sec_softc *sc); static int sec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri); static int sec_freesession(device_t dev, uint64_t tid); static int sec_process(device_t dev, struct cryptop *crp, int hint); static int sec_split_cri(struct cryptoini *cri, struct cryptoini **enc, struct cryptoini **mac); static int sec_split_crp(struct cryptop *crp, struct cryptodesc **enc, struct cryptodesc **mac); static int sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc, int buftype); static int sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc, struct cryptodesc *mac, int buftype); static struct sec_session *sec_get_session(struct sec_softc *sc, u_int sid); static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr); /* AESU */ static int sec_aesu_newsession(struct sec_softc *sc, struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac); static int sec_aesu_make_desc(struct sec_softc *sc, struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp, int buftype); /* DEU */ static int sec_deu_newsession(struct sec_softc *sc, struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac); static int sec_deu_make_desc(struct sec_softc *sc, struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp, int buftype); /* MDEU */ static int sec_mdeu_can_handle(u_int alg); static int sec_mdeu_config(struct cryptodesc *crd, u_int *eu, u_int *mode, u_int *hashlen); static int sec_mdeu_newsession(struct sec_softc *sc, struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac); static int sec_mdeu_make_desc(struct sec_softc *sc, struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp, int buftype); static device_method_t sec_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sec_probe), DEVMETHOD(device_attach, sec_attach), DEVMETHOD(device_detach, sec_detach), DEVMETHOD(device_suspend, sec_suspend), DEVMETHOD(device_resume, sec_resume), DEVMETHOD(device_shutdown, sec_shutdown), /* Crypto methods */ DEVMETHOD(cryptodev_newsession, sec_newsession), DEVMETHOD(cryptodev_freesession,sec_freesession), DEVMETHOD(cryptodev_process, sec_process), DEVMETHOD_END }; static driver_t sec_driver = { "sec", sec_methods, sizeof(struct sec_softc), }; static devclass_t sec_devclass; DRIVER_MODULE(sec, simplebus, sec_driver, sec_devclass, 0, 0); MODULE_DEPEND(sec, crypto, 1, 1, 1); static struct sec_eu_methods sec_eus[] = { { sec_aesu_newsession, sec_aesu_make_desc, }, { sec_deu_newsession, sec_deu_make_desc, }, { sec_mdeu_newsession, sec_mdeu_make_desc, }, { NULL, NULL } }; static inline void sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op) { /* Sync only if dma memory is valid */ if (dma_mem->dma_vaddr != NULL) bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op); } static inline void sec_free_session(struct sec_softc *sc, struct sec_session *ses) { SEC_LOCK(sc, sessions); ses->ss_used = 0; SEC_UNLOCK(sc, sessions); } static inline void * sec_get_pointer_data(struct sec_desc *desc, u_int n) { return (desc->sd_ptr_dmem[n].dma_vaddr); } static int sec_probe(device_t dev) { struct sec_softc *sc; uint64_t id; if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "fsl,sec2.0")) return (ENXIO); sc = device_get_softc(dev); sc->sc_rrid = 0; sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid, RF_ACTIVE); if (sc->sc_rres == NULL) return (ENXIO); sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); id = SEC_READ(sc, SEC_ID); bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres); switch (id) { case SEC_20_ID: device_set_desc(dev, "Freescale Security Engine 2.0"); sc->sc_version = 2; break; case SEC_30_ID: device_set_desc(dev, "Freescale Security Engine 3.0"); sc->sc_version = 3; break; case SEC_31_ID: device_set_desc(dev, "Freescale Security Engine 3.1"); sc->sc_version = 3; break; default: - device_printf(dev, "unknown SEC ID 0x%016llx!\n", id); + device_printf(dev, "unknown SEC ID 0x%16"PRIx64"!\n", id); return (ENXIO); } return (0); } static int sec_attach(device_t dev) { struct sec_softc *sc; struct sec_hw_lt *lt; int error = 0; int i; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_blocked = 0; sc->sc_shutdown = 0; sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver ID!\n"); return (ENXIO); } /* Init locks */ mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev), "SEC Controller lock", MTX_DEF); mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev), "SEC Descriptors lock", MTX_DEF); mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev), "SEC Sessions lock", MTX_DEF); /* Allocate I/O memory for SEC registers */ sc->sc_rrid = 0; sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid, RF_ACTIVE); if (sc->sc_rres == NULL) { device_printf(dev, "could not allocate I/O memory!\n"); goto fail1; } sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); /* Setup interrupts */ sc->sc_pri_irid = 0; error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand, &sc->sc_pri_irid, sec_primary_intr, "primary"); if (error) goto fail2; if (sc->sc_version == 3) { sc->sc_sec_irid = 1; error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand, &sc->sc_sec_irid, sec_secondary_intr, "secondary"); if (error) goto fail3; } /* Alloc DMA memory for descriptors and link tables */ error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem), SEC_DESCRIPTORS * sizeof(struct sec_hw_desc)); if (error) goto fail4; error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem), (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt)); if (error) goto fail5; /* Fill in descriptors and link tables */ for (i = 0; i < SEC_DESCRIPTORS; i++) { sc->sc_desc[i].sd_desc = (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i; sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr + (i * sizeof(struct sec_hw_desc)); } for (i = 0; i < SEC_LT_ENTRIES + 1; i++) { sc->sc_lt[i].sl_lt = (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i; sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr + (i * sizeof(struct sec_hw_lt)); } /* Last entry in link table is used to create a circle */ lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt; lt->shl_length = 0; lt->shl_r = 0; lt->shl_n = 1; lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr; /* Init descriptor and link table queues pointers */ SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES); SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES); /* Create masks for fast checks */ sc->sc_int_error_mask = 0; for (i = 0; i < SEC_CHANNELS; i++) sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i)); switch (sc->sc_version) { case 2: sc->sc_channel_idle_mask = (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) | (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) | (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) | (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S); break; case 3: sc->sc_channel_idle_mask = (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) | (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) | (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) | (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S); break; } /* Init hardware */ error = sec_init(sc); if (error) goto fail6; /* Register in OCF (AESU) */ crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); /* Register in OCF (DEU) */ crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); /* Register in OCF (MDEU) */ crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0); crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0); if (sc->sc_version >= 3) { crypto_register(sc->sc_cid, CRYPTO_SHA2_384_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA2_512_HMAC, 0, 0); } return (0); fail6: sec_free_dma_mem(&(sc->sc_lt_dmem)); fail5: sec_free_dma_mem(&(sc->sc_desc_dmem)); fail4: sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand, sc->sc_sec_irid, "secondary"); fail3: sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand, sc->sc_pri_irid, "primary"); fail2: bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres); fail1: mtx_destroy(&sc->sc_controller_lock); mtx_destroy(&sc->sc_descriptors_lock); mtx_destroy(&sc->sc_sessions_lock); return (ENXIO); } static int sec_detach(device_t dev) { struct sec_softc *sc = device_get_softc(dev); int i, error, timeout = SEC_TIMEOUT; /* Prepare driver to shutdown */ SEC_LOCK(sc, descriptors); sc->sc_shutdown = 1; SEC_UNLOCK(sc, descriptors); /* Wait until all queued processing finishes */ while (1) { SEC_LOCK(sc, descriptors); i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc); SEC_UNLOCK(sc, descriptors); if (i == 0) break; if (timeout < 0) { device_printf(dev, "queue flush timeout!\n"); /* DMA can be still active - stop it */ for (i = 0; i < SEC_CHANNELS; i++) sec_channel_reset(sc, i, 1); break; } timeout -= 1000; DELAY(1000); } /* Disable interrupts */ SEC_WRITE(sc, SEC_IER, 0); /* Unregister from OCF */ crypto_unregister_all(sc->sc_cid); /* Free DMA memory */ for (i = 0; i < SEC_DESCRIPTORS; i++) SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i])); sec_free_dma_mem(&(sc->sc_lt_dmem)); sec_free_dma_mem(&(sc->sc_desc_dmem)); /* Release interrupts */ sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand, sc->sc_pri_irid, "primary"); sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand, sc->sc_sec_irid, "secondary"); /* Release memory */ if (sc->sc_rres) { error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres); if (error) device_printf(dev, "bus_release_resource() failed for" " I/O memory, error %d\n", error); sc->sc_rres = NULL; } mtx_destroy(&sc->sc_controller_lock); mtx_destroy(&sc->sc_descriptors_lock); mtx_destroy(&sc->sc_sessions_lock); return (0); } static int sec_suspend(device_t dev) { return (0); } static int sec_resume(device_t dev) { return (0); } static int sec_shutdown(device_t dev) { return (0); } static int sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand, int *irid, driver_intr_t handler, const char *iname) { int error; (*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid, RF_ACTIVE); if ((*ires) == NULL) { device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname); return (ENXIO); } error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET, NULL, handler, sc, ihand); if (error) { device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname); if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires)) device_printf(sc->sc_dev, "could not release %s IRQ\n", iname); (*ires) = NULL; return (error); } return (0); } static void sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand, int irid, const char *iname) { int error; if (ires == NULL) return; error = bus_teardown_intr(sc->sc_dev, ires, ihand); if (error) device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s" " IRQ, error %d\n", iname, error); error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires); if (error) device_printf(sc->sc_dev, "bus_release_resource() failed for %s" " IRQ, error %d\n", iname, error); } static void sec_primary_intr(void *arg) { struct sec_softc *sc = arg; struct sec_desc *desc; uint64_t isr; int i, wakeup = 0; SEC_LOCK(sc, controller); /* Check for errors */ isr = SEC_READ(sc, SEC_ISR); if (isr & sc->sc_int_error_mask) { /* Check each channel for error */ for (i = 0; i < SEC_CHANNELS; i++) { if ((isr & SEC_INT_CH_ERR(i)) == 0) continue; device_printf(sc->sc_dev, "I/O error on channel %i!\n", i); /* Find and mark problematic descriptor */ desc = sec_find_desc(sc, SEC_READ(sc, SEC_CHAN_CDPR(i))); if (desc != NULL) desc->sd_error = EIO; /* Do partial channel reset */ sec_channel_reset(sc, i, 0); } } /* ACK interrupt */ SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL); SEC_UNLOCK(sc, controller); SEC_LOCK(sc, descriptors); /* Handle processed descriptors */ SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); while (SEC_QUEUED_DESC_CNT(sc) > 0) { desc = SEC_GET_QUEUED_DESC(sc); if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) { SEC_PUT_BACK_QUEUED_DESC(sc); break; } SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); desc->sd_crp->crp_etype = desc->sd_error; crypto_done(desc->sd_crp); SEC_DESC_FREE_POINTERS(desc); SEC_DESC_FREE_LT(sc, desc); SEC_DESC_QUEUED2FREE(sc); } SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (!sc->sc_shutdown) { wakeup = sc->sc_blocked; sc->sc_blocked = 0; } SEC_UNLOCK(sc, descriptors); /* Enqueue ready descriptors in hardware */ sec_enqueue(sc); if (wakeup) crypto_unblock(sc->sc_cid, wakeup); } static void sec_secondary_intr(void *arg) { struct sec_softc *sc = arg; device_printf(sc->sc_dev, "spurious secondary interrupt!\n"); sec_primary_intr(arg); } static int sec_controller_reset(struct sec_softc *sc) { int timeout = SEC_TIMEOUT; /* Reset Controller */ SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR); while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) { DELAY(1000); timeout -= 1000; if (timeout < 0) { device_printf(sc->sc_dev, "timeout while waiting for " "device reset!\n"); return (ETIMEDOUT); } } return (0); } static int sec_channel_reset(struct sec_softc *sc, int channel, int full) { int timeout = SEC_TIMEOUT; uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON; uint64_t reg; /* Reset Channel */ reg = SEC_READ(sc, SEC_CHAN_CCR(channel)); SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit); while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) { DELAY(1000); timeout -= 1000; if (timeout < 0) { device_printf(sc->sc_dev, "timeout while waiting for " "channel reset!\n"); return (ETIMEDOUT); } } if (full) { reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS; switch(sc->sc_version) { case 2: reg |= SEC_CHAN_CCR_CDWE; break; case 3: reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN; break; } SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg); } return (0); } static int sec_init(struct sec_softc *sc) { uint64_t reg; int error, i; /* Reset controller twice to clear all pending interrupts */ error = sec_controller_reset(sc); if (error) return (error); error = sec_controller_reset(sc); if (error) return (error); /* Reset channels */ for (i = 0; i < SEC_CHANNELS; i++) { error = sec_channel_reset(sc, i, 1); if (error) return (error); } /* Enable Interrupts */ reg = SEC_INT_ITO; for (i = 0; i < SEC_CHANNELS; i++) reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i); SEC_WRITE(sc, SEC_IER, reg); return (error); } static void sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct sec_dma_mem *dma_mem = arg; if (error) return; KASSERT(nseg == 1, ("Wrong number of segments, should be 1")); dma_mem->dma_paddr = segs->ds_addr; } static void sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct sec_desc_map_info *sdmi = arg; struct sec_softc *sc = sdmi->sdmi_sc; struct sec_lt *lt = NULL; bus_addr_t addr; bus_size_t size; int i; SEC_LOCK_ASSERT(sc, descriptors); if (error) return; for (i = 0; i < nseg; i++) { addr = segs[i].ds_addr; size = segs[i].ds_len; /* Skip requested offset */ if (sdmi->sdmi_offset >= size) { sdmi->sdmi_offset -= size; continue; } addr += sdmi->sdmi_offset; size -= sdmi->sdmi_offset; sdmi->sdmi_offset = 0; /* Do not link more than requested */ if (sdmi->sdmi_size < size) size = sdmi->sdmi_size; lt = SEC_ALLOC_LT_ENTRY(sc); lt->sl_lt->shl_length = size; lt->sl_lt->shl_r = 0; lt->sl_lt->shl_n = 0; lt->sl_lt->shl_ptr = addr; if (sdmi->sdmi_lt_first == NULL) sdmi->sdmi_lt_first = lt; sdmi->sdmi_lt_used += 1; if ((sdmi->sdmi_size -= size) == 0) break; } sdmi->sdmi_lt_last = lt; } static void sec_dma_map_desc_cb2(void *arg, bus_dma_segment_t *segs, int nseg, bus_size_t size, int error) { sec_dma_map_desc_cb(arg, segs, nseg, error); } static int sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem, bus_size_t size) { int error; if (dma_mem->dma_vaddr != NULL) return (EBUSY); error = bus_dma_tag_create(NULL, /* parent */ SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ size, 1, /* maxsize, nsegments */ size, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &(dma_mem->dma_tag)); /* dmat */ if (error) { device_printf(sc->sc_dev, "failed to allocate busdma tag, error" " %i!\n", error); goto err1; } error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr), BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map)); if (error) { device_printf(sc->sc_dev, "failed to allocate DMA safe" " memory, error %i!\n", error); goto err2; } error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map, dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "cannot get address of the DMA" " memory, error %i\n", error); goto err3; } dma_mem->dma_is_map = 0; return (0); err3: bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map); err2: bus_dma_tag_destroy(dma_mem->dma_tag); err1: dma_mem->dma_vaddr = NULL; return(error); } static int sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, void *mem, bus_size_t size, int type, struct sec_desc_map_info *sdmi) { int error; if (dma_mem->dma_vaddr != NULL) return (EBUSY); switch (type) { case SEC_MEMORY: break; case SEC_UIO: size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE; break; case SEC_MBUF: size = m_length((struct mbuf*)mem, NULL); break; default: return (EINVAL); } error = bus_dma_tag_create(NULL, /* parent */ SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ size, /* maxsize */ SEC_FREE_LT_CNT(sc), /* nsegments */ SEC_MAX_DMA_BLOCK_SIZE, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &(dma_mem->dma_tag)); /* dmat */ if (error) { device_printf(sc->sc_dev, "failed to allocate busdma tag, error" " %i!\n", error); dma_mem->dma_vaddr = NULL; return (error); } error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map)); if (error) { device_printf(sc->sc_dev, "failed to create DMA map, error %i!" "\n", error); bus_dma_tag_destroy(dma_mem->dma_tag); return (error); } switch (type) { case SEC_MEMORY: error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map, mem, size, sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT); break; case SEC_UIO: error = bus_dmamap_load_uio(dma_mem->dma_tag, dma_mem->dma_map, mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT); break; case SEC_MBUF: error = bus_dmamap_load_mbuf(dma_mem->dma_tag, dma_mem->dma_map, mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT); break; } if (error) { device_printf(sc->sc_dev, "cannot get address of the DMA" " memory, error %i!\n", error); bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map); bus_dma_tag_destroy(dma_mem->dma_tag); return (error); } dma_mem->dma_is_map = 1; dma_mem->dma_vaddr = mem; return (0); } static void sec_free_dma_mem(struct sec_dma_mem *dma_mem) { /* Check for double free */ if (dma_mem->dma_vaddr == NULL) return; bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map); if (dma_mem->dma_is_map) bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map); else bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map); bus_dma_tag_destroy(dma_mem->dma_tag); dma_mem->dma_vaddr = NULL; } static int sec_eu_channel(struct sec_softc *sc, int eu) { uint64_t reg; int channel = 0; SEC_LOCK_ASSERT(sc, controller); reg = SEC_READ(sc, SEC_EUASR); switch (eu) { case SEC_EU_AFEU: channel = SEC_EUASR_AFEU(reg); break; case SEC_EU_DEU: channel = SEC_EUASR_DEU(reg); break; case SEC_EU_MDEU_A: case SEC_EU_MDEU_B: channel = SEC_EUASR_MDEU(reg); break; case SEC_EU_RNGU: channel = SEC_EUASR_RNGU(reg); break; case SEC_EU_PKEU: channel = SEC_EUASR_PKEU(reg); break; case SEC_EU_AESU: channel = SEC_EUASR_AESU(reg); break; case SEC_EU_KEU: channel = SEC_EUASR_KEU(reg); break; case SEC_EU_CRCU: channel = SEC_EUASR_CRCU(reg); break; } return (channel - 1); } static int sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel) { u_int fflvl = SEC_MAX_FIFO_LEVEL; uint64_t reg; int i; SEC_LOCK_ASSERT(sc, controller); /* Find free channel if have not got one */ if (channel < 0) { for (i = 0; i < SEC_CHANNELS; i++) { reg = SEC_READ(sc, SEC_CHAN_CSR(channel)); if ((reg & sc->sc_channel_idle_mask) == 0) { channel = i; break; } } } /* There is no free channel */ if (channel < 0) return (-1); /* Check FIFO level on selected channel */ reg = SEC_READ(sc, SEC_CHAN_CSR(channel)); switch(sc->sc_version) { case 2: fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M; break; case 3: fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M; break; } if (fflvl >= SEC_MAX_FIFO_LEVEL) return (-1); /* Enqueue descriptor in channel */ SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr); return (channel); } static void sec_enqueue(struct sec_softc *sc) { struct sec_desc *desc; int ch0, ch1; SEC_LOCK(sc, descriptors); SEC_LOCK(sc, controller); while (SEC_READY_DESC_CNT(sc) > 0) { desc = SEC_GET_READY_DESC(sc); ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0); ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1); /* * Both EU are used by the same channel. * Enqueue descriptor in channel used by busy EUs. */ if (ch0 >= 0 && ch0 == ch1) { if (sec_enqueue_desc(sc, desc, ch0) >= 0) { SEC_DESC_READY2QUEUED(sc); continue; } } /* * Only one EU is free. * Enqueue descriptor in channel used by busy EU. */ if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) { if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1) >= 0) { SEC_DESC_READY2QUEUED(sc); continue; } } /* * Both EU are free. * Enqueue descriptor in first free channel. */ if (ch0 < 0 && ch1 < 0) { if (sec_enqueue_desc(sc, desc, -1) >= 0) { SEC_DESC_READY2QUEUED(sc); continue; } } /* Current descriptor can not be queued at the moment */ SEC_PUT_BACK_READY_DESC(sc); break; } SEC_UNLOCK(sc, controller); SEC_UNLOCK(sc, descriptors); } static struct sec_desc * sec_find_desc(struct sec_softc *sc, bus_addr_t paddr) { struct sec_desc *desc = NULL; int i; SEC_LOCK_ASSERT(sc, descriptors); for (i = 0; i < SEC_CHANNELS; i++) { if (sc->sc_desc[i].sd_desc_paddr == paddr) { desc = &(sc->sc_desc[i]); break; } } return (desc); } static int sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize) { struct sec_hw_desc_ptr *ptr; SEC_LOCK_ASSERT(sc, descriptors); ptr = &(desc->sd_desc->shd_pointer[n]); ptr->shdp_length = dsize; ptr->shdp_extent = 0; ptr->shdp_j = 0; ptr->shdp_ptr = data; return (0); } static int sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc, u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype) { struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 }; struct sec_hw_desc_ptr *ptr; int error; SEC_LOCK_ASSERT(sc, descriptors); /* For flat memory map only requested region */ if (dtype == SEC_MEMORY) { data = (uint8_t*)(data) + doffset; sdmi.sdmi_offset = 0; } error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), data, dsize, dtype, &sdmi); if (error) return (error); sdmi.sdmi_lt_last->sl_lt->shl_r = 1; desc->sd_lt_used += sdmi.sdmi_lt_used; ptr = &(desc->sd_desc->shd_pointer[n]); ptr->shdp_length = dsize; ptr->shdp_extent = 0; ptr->shdp_j = 1; ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr; return (0); } static int sec_split_cri(struct cryptoini *cri, struct cryptoini **enc, struct cryptoini **mac) { struct cryptoini *e, *m; e = cri; m = cri->cri_next; /* We can haldle only two operations */ if (m && m->cri_next) return (EINVAL); if (sec_mdeu_can_handle(e->cri_alg)) { cri = m; m = e; e = cri; } if (m && !sec_mdeu_can_handle(m->cri_alg)) return (EINVAL); *enc = e; *mac = m; return (0); } static int sec_split_crp(struct cryptop *crp, struct cryptodesc **enc, struct cryptodesc **mac) { struct cryptodesc *e, *m, *t; e = crp->crp_desc; m = e->crd_next; /* We can haldle only two operations */ if (m && m->crd_next) return (EINVAL); if (sec_mdeu_can_handle(e->crd_alg)) { t = m; m = e; e = t; } if (m && !sec_mdeu_can_handle(m->crd_alg)) return (EINVAL); *enc = e; *mac = m; return (0); } static int sec_alloc_session(struct sec_softc *sc) { struct sec_session *ses = NULL; int sid = -1; u_int i; SEC_LOCK(sc, sessions); for (i = 0; i < SEC_MAX_SESSIONS; i++) { if (sc->sc_sessions[i].ss_used == 0) { ses = &(sc->sc_sessions[i]); ses->ss_used = 1; ses->ss_ivlen = 0; ses->ss_klen = 0; ses->ss_mklen = 0; sid = i; break; } } SEC_UNLOCK(sc, sessions); return (sid); } static struct sec_session * sec_get_session(struct sec_softc *sc, u_int sid) { struct sec_session *ses; if (sid >= SEC_MAX_SESSIONS) return (NULL); SEC_LOCK(sc, sessions); ses = &(sc->sc_sessions[sid]); if (ses->ss_used == 0) ses = NULL; SEC_UNLOCK(sc, sessions); return (ses); } static int sec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri) { struct sec_softc *sc = device_get_softc(dev); struct sec_eu_methods *eu = sec_eus; struct cryptoini *enc = NULL; struct cryptoini *mac = NULL; struct sec_session *ses; int error = -1; int sid; error = sec_split_cri(cri, &enc, &mac); if (error) return (error); /* Check key lengths */ if (enc && enc->cri_key && (enc->cri_klen / 8) > SEC_MAX_KEY_LEN) return (E2BIG); if (mac && mac->cri_key && (mac->cri_klen / 8) > SEC_MAX_KEY_LEN) return (E2BIG); /* Only SEC 3.0 supports digests larger than 256 bits */ if (sc->sc_version < 3 && mac && mac->cri_klen > 256) return (E2BIG); sid = sec_alloc_session(sc); if (sid < 0) return (ENOMEM); ses = sec_get_session(sc, sid); /* Find EU for this session */ while (eu->sem_make_desc != NULL) { error = eu->sem_newsession(sc, ses, enc, mac); if (error >= 0) break; eu++; } /* If not found, return EINVAL */ if (error < 0) { sec_free_session(sc, ses); return (EINVAL); } /* Save cipher key */ if (enc && enc->cri_key) { ses->ss_klen = enc->cri_klen / 8; memcpy(ses->ss_key, enc->cri_key, ses->ss_klen); } /* Save digest key */ if (mac && mac->cri_key) { ses->ss_mklen = mac->cri_klen / 8; memcpy(ses->ss_mkey, mac->cri_key, ses->ss_mklen); } ses->ss_eu = eu; *sidp = sid; return (0); } static int sec_freesession(device_t dev, uint64_t tid) { struct sec_softc *sc = device_get_softc(dev); struct sec_session *ses; int error = 0; ses = sec_get_session(sc, CRYPTO_SESID2LID(tid)); if (ses == NULL) return (EINVAL); sec_free_session(sc, ses); return (error); } static int sec_process(device_t dev, struct cryptop *crp, int hint) { struct sec_softc *sc = device_get_softc(dev); struct sec_desc *desc = NULL; struct cryptodesc *mac, *enc; struct sec_session *ses; int buftype, error = 0; /* Check Session ID */ ses = sec_get_session(sc, CRYPTO_SESID2LID(crp->crp_sid)); if (ses == NULL) { crp->crp_etype = EINVAL; crypto_done(crp); return (0); } /* Check for input length */ if (crp->crp_ilen > SEC_MAX_DMA_BLOCK_SIZE) { crp->crp_etype = E2BIG; crypto_done(crp); return (0); } /* Get descriptors */ if (sec_split_crp(crp, &enc, &mac)) { crp->crp_etype = EINVAL; crypto_done(crp); return (0); } SEC_LOCK(sc, descriptors); SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Block driver if there is no free descriptors or we are going down */ if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) { sc->sc_blocked |= CRYPTO_SYMQ; SEC_UNLOCK(sc, descriptors); return (ERESTART); } /* Prepare descriptor */ desc = SEC_GET_FREE_DESC(sc); desc->sd_lt_used = 0; desc->sd_error = 0; desc->sd_crp = crp; if (crp->crp_flags & CRYPTO_F_IOV) buftype = SEC_UIO; else if (crp->crp_flags & CRYPTO_F_IMBUF) buftype = SEC_MBUF; else buftype = SEC_MEMORY; if (enc && enc->crd_flags & CRD_F_ENCRYPT) { if (enc->crd_flags & CRD_F_IV_EXPLICIT) memcpy(desc->sd_desc->shd_iv, enc->crd_iv, ses->ss_ivlen); else arc4rand(desc->sd_desc->shd_iv, ses->ss_ivlen, 0); if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0) crypto_copyback(crp->crp_flags, crp->crp_buf, enc->crd_inject, ses->ss_ivlen, desc->sd_desc->shd_iv); } else if (enc) { if (enc->crd_flags & CRD_F_IV_EXPLICIT) memcpy(desc->sd_desc->shd_iv, enc->crd_iv, ses->ss_ivlen); else crypto_copydata(crp->crp_flags, crp->crp_buf, enc->crd_inject, ses->ss_ivlen, desc->sd_desc->shd_iv); } if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) { if ((enc->crd_klen / 8) <= SEC_MAX_KEY_LEN) { ses->ss_klen = enc->crd_klen / 8; memcpy(ses->ss_key, enc->crd_key, ses->ss_klen); } else error = E2BIG; } if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) { if ((mac->crd_klen / 8) <= SEC_MAX_KEY_LEN) { ses->ss_mklen = mac->crd_klen / 8; memcpy(ses->ss_mkey, mac->crd_key, ses->ss_mklen); } else error = E2BIG; } if (!error) { memcpy(desc->sd_desc->shd_key, ses->ss_key, ses->ss_klen); memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, ses->ss_mklen); error = ses->ss_eu->sem_make_desc(sc, ses, desc, crp, buftype); } if (error) { SEC_DESC_FREE_POINTERS(desc); SEC_DESC_PUT_BACK_LT(sc, desc); SEC_PUT_BACK_FREE_DESC(sc); SEC_UNLOCK(sc, descriptors); crp->crp_etype = error; crypto_done(crp); return (0); } /* * Skip DONE interrupt if this is not last request in burst, but only * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE * signaling on each descriptor. */ if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3) desc->sd_desc->shd_dn = 0; else desc->sd_desc->shd_dn = 1; SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); SEC_DESC_FREE2READY(sc); SEC_UNLOCK(sc, descriptors); /* Enqueue ready descriptors in hardware */ sec_enqueue(sc); return (0); } static int sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc, int buftype) { struct sec_hw_desc *hd = desc->sd_desc; int error; hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP; hd->shd_eu_sel1 = SEC_EU_NONE; hd->shd_mode1 = 0; /* Pointer 0: NULL */ error = sec_make_pointer_direct(sc, desc, 0, 0, 0); if (error) return (error); /* Pointer 1: IV IN */ error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen); if (error) return (error); /* Pointer 2: Cipher Key */ error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_key), ses->ss_klen); if (error) return (error); /* Pointer 3: Data IN */ error = sec_make_pointer(sc, desc, 3, crp->crp_buf, enc->crd_skip, enc->crd_len, buftype); if (error) return (error); /* Pointer 4: Data OUT */ error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip, enc->crd_len, buftype); if (error) return (error); /* Pointer 5: IV OUT (Not used: NULL) */ error = sec_make_pointer_direct(sc, desc, 5, 0, 0); if (error) return (error); /* Pointer 6: NULL */ error = sec_make_pointer_direct(sc, desc, 6, 0, 0); return (error); } static int sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc, struct cryptodesc *mac, int buftype) { struct sec_hw_desc *hd = desc->sd_desc; u_int eu, mode, hashlen; int error; if (mac->crd_len < enc->crd_len) return (EINVAL); if (mac->crd_skip + mac->crd_len != enc->crd_skip + enc->crd_len) return (EINVAL); error = sec_mdeu_config(mac, &eu, &mode, &hashlen); if (error) return (error); hd->shd_desc_type = SEC_DT_HMAC_SNOOP; hd->shd_eu_sel1 = eu; hd->shd_mode1 = mode; /* Pointer 0: HMAC Key */ error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_mkey), ses->ss_mklen); if (error) return (error); /* Pointer 1: HMAC-Only Data IN */ error = sec_make_pointer(sc, desc, 1, crp->crp_buf, mac->crd_skip, mac->crd_len - enc->crd_len, buftype); if (error) return (error); /* Pointer 2: Cipher Key */ error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_key), ses->ss_klen); if (error) return (error); /* Pointer 3: IV IN */ error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen); if (error) return (error); /* Pointer 4: Data IN */ error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip, enc->crd_len, buftype); if (error) return (error); /* Pointer 5: Data OUT */ error = sec_make_pointer(sc, desc, 5, crp->crp_buf, enc->crd_skip, enc->crd_len, buftype); if (error) return (error); /* Pointer 6: HMAC OUT */ error = sec_make_pointer(sc, desc, 6, crp->crp_buf, mac->crd_inject, hashlen, buftype); return (error); } /* AESU */ static int sec_aesu_newsession(struct sec_softc *sc, struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac) { if (enc == NULL) return (-1); if (enc->cri_alg != CRYPTO_AES_CBC) return (-1); ses->ss_ivlen = AES_BLOCK_LEN; return (0); } static int sec_aesu_make_desc(struct sec_softc *sc, struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp, int buftype) { struct sec_hw_desc *hd = desc->sd_desc; struct cryptodesc *enc, *mac; int error; error = sec_split_crp(crp, &enc, &mac); if (error) return (error); if (!enc) return (EINVAL); hd->shd_eu_sel0 = SEC_EU_AESU; hd->shd_mode0 = SEC_AESU_MODE_CBC; if (enc->crd_alg != CRYPTO_AES_CBC) return (EINVAL); if (enc->crd_flags & CRD_F_ENCRYPT) { hd->shd_mode0 |= SEC_AESU_MODE_ED; hd->shd_dir = 0; } else hd->shd_dir = 1; if (mac) error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac, buftype); else error = sec_build_common_ns_desc(sc, desc, ses, crp, enc, buftype); return (error); } /* DEU */ static int sec_deu_newsession(struct sec_softc *sc, struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac) { if (enc == NULL) return (-1); switch (enc->cri_alg) { case CRYPTO_DES_CBC: case CRYPTO_3DES_CBC: break; default: return (-1); } ses->ss_ivlen = DES_BLOCK_LEN; return (0); } static int sec_deu_make_desc(struct sec_softc *sc, struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp, int buftype) { struct sec_hw_desc *hd = desc->sd_desc; struct cryptodesc *enc, *mac; int error; error = sec_split_crp(crp, &enc, &mac); if (error) return (error); if (!enc) return (EINVAL); hd->shd_eu_sel0 = SEC_EU_DEU; hd->shd_mode0 = SEC_DEU_MODE_CBC; switch (enc->crd_alg) { case CRYPTO_3DES_CBC: hd->shd_mode0 |= SEC_DEU_MODE_TS; break; case CRYPTO_DES_CBC: break; default: return (EINVAL); } if (enc->crd_flags & CRD_F_ENCRYPT) { hd->shd_mode0 |= SEC_DEU_MODE_ED; hd->shd_dir = 0; } else hd->shd_dir = 1; if (mac) error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac, buftype); else error = sec_build_common_ns_desc(sc, desc, ses, crp, enc, buftype); return (error); } /* MDEU */ static int sec_mdeu_can_handle(u_int alg) { switch (alg) { case CRYPTO_MD5: case CRYPTO_SHA1: case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: return (1); default: return (0); } } static int sec_mdeu_config(struct cryptodesc *crd, u_int *eu, u_int *mode, u_int *hashlen) { *mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT; *eu = SEC_EU_NONE; switch (crd->crd_alg) { case CRYPTO_MD5_HMAC: *mode |= SEC_MDEU_MODE_HMAC; /* FALLTHROUGH */ case CRYPTO_MD5: *eu = SEC_EU_MDEU_A; *mode |= SEC_MDEU_MODE_MD5; *hashlen = MD5_HASH_LEN; break; case CRYPTO_SHA1_HMAC: *mode |= SEC_MDEU_MODE_HMAC; /* FALLTHROUGH */ case CRYPTO_SHA1: *eu = SEC_EU_MDEU_A; *mode |= SEC_MDEU_MODE_SHA1; *hashlen = SHA1_HASH_LEN; break; case CRYPTO_SHA2_256_HMAC: *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256; *eu = SEC_EU_MDEU_A; break; case CRYPTO_SHA2_384_HMAC: *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384; *eu = SEC_EU_MDEU_B; break; case CRYPTO_SHA2_512_HMAC: *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512; *eu = SEC_EU_MDEU_B; break; default: return (EINVAL); } if (*mode & SEC_MDEU_MODE_HMAC) *hashlen = SEC_HMAC_HASH_LEN; return (0); } static int sec_mdeu_newsession(struct sec_softc *sc, struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac) { if (mac && sec_mdeu_can_handle(mac->cri_alg)) return (0); return (-1); } static int sec_mdeu_make_desc(struct sec_softc *sc, struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp, int buftype) { struct cryptodesc *enc, *mac; struct sec_hw_desc *hd = desc->sd_desc; u_int eu, mode, hashlen; int error; error = sec_split_crp(crp, &enc, &mac); if (error) return (error); if (enc) return (EINVAL); error = sec_mdeu_config(mac, &eu, &mode, &hashlen); if (error) return (error); hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP; hd->shd_eu_sel0 = eu; hd->shd_mode0 = mode; hd->shd_eu_sel1 = SEC_EU_NONE; hd->shd_mode1 = 0; /* Pointer 0: NULL */ error = sec_make_pointer_direct(sc, desc, 0, 0, 0); if (error) return (error); /* Pointer 1: Context In (Not used: NULL) */ error = sec_make_pointer_direct(sc, desc, 1, 0, 0); if (error) return (error); /* Pointer 2: HMAC Key (or NULL, depending on digest type) */ if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC) error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_mkey), ses->ss_mklen); else error = sec_make_pointer_direct(sc, desc, 2, 0, 0); if (error) return (error); /* Pointer 3: Input Data */ error = sec_make_pointer(sc, desc, 3, crp->crp_buf, mac->crd_skip, mac->crd_len, buftype); if (error) return (error); /* Pointer 4: NULL */ error = sec_make_pointer_direct(sc, desc, 4, 0, 0); if (error) return (error); /* Pointer 5: Hash out */ error = sec_make_pointer(sc, desc, 5, crp->crp_buf, mac->crd_inject, hashlen, buftype); if (error) return (error); /* Pointer 6: NULL */ error = sec_make_pointer_direct(sc, desc, 6, 0, 0); return (0); } Index: head/sys/dev/tsec/if_tsec.c =================================================================== --- head/sys/dev/tsec/if_tsec.c (revision 293038) +++ head/sys/dev/tsec/if_tsec.c (revision 293039) @@ -1,1940 +1,1940 @@ /*- * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski * Copyright (C) 2006-2007 Semihalf, Piotr Kruszynski * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Freescale integrated Three-Speed Ethernet Controller (TSEC) driver. */ #include __FBSDID("$FreeBSD$"); #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, bus_dmamap_t *dmap, bus_size_t dsize, void **vaddr, void *raddr, const char *dname); static void tsec_dma_ctl(struct tsec_softc *sc, int state); static int tsec_encap(struct tsec_softc *sc, struct mbuf *m_head, int fcb_inserted); static void tsec_free_dma(struct tsec_softc *sc); static void tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr); static int tsec_ifmedia_upd(struct ifnet *ifp); static void tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); static int tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp, uint32_t *paddr); static void tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void tsec_intrs_ctl(struct tsec_softc *sc, int state); static void tsec_init(void *xsc); static void tsec_init_locked(struct tsec_softc *sc); static int tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data); static void tsec_reset_mac(struct tsec_softc *sc); static void tsec_setfilter(struct tsec_softc *sc); static void tsec_set_mac_address(struct tsec_softc *sc); static void tsec_start(struct ifnet *ifp); static void tsec_start_locked(struct ifnet *ifp); static void tsec_stop(struct tsec_softc *sc); static void tsec_tick(void *arg); static void tsec_watchdog(struct tsec_softc *sc); static void tsec_add_sysctls(struct tsec_softc *sc); static int tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS); static int tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS); static void tsec_set_rxic(struct tsec_softc *sc); static void tsec_set_txic(struct tsec_softc *sc); static int tsec_receive_intr_locked(struct tsec_softc *sc, int count); static void tsec_transmit_intr_locked(struct tsec_softc *sc); static void tsec_error_intr_locked(struct tsec_softc *sc, int count); static void tsec_offload_setup(struct tsec_softc *sc); static void tsec_offload_process_frame(struct tsec_softc *sc, struct mbuf *m); static void tsec_setup_multicast(struct tsec_softc *sc); static int tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu); devclass_t tsec_devclass; DRIVER_MODULE(miibus, tsec, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(tsec, ether, 1, 1, 1); MODULE_DEPEND(tsec, miibus, 1, 1, 1); struct mtx tsec_phy_mtx; int tsec_attach(struct tsec_softc *sc) { uint8_t hwaddr[ETHER_ADDR_LEN]; struct ifnet *ifp; bus_dmamap_t *map_ptr; bus_dmamap_t **map_pptr; int error = 0; int i; /* Initialize global (because potentially shared) MII lock */ if (!mtx_initialized(&tsec_phy_mtx)) mtx_init(&tsec_phy_mtx, "tsec mii", NULL, MTX_DEF); /* Reset all TSEC counters */ TSEC_TX_RX_COUNTERS_INIT(sc); /* Stop DMA engine if enabled by firmware */ tsec_dma_ctl(sc, 0); /* Reset MAC */ tsec_reset_mac(sc); /* Disable interrupts for now */ tsec_intrs_ctl(sc, 0); /* Configure defaults for interrupts coalescing */ sc->rx_ic_time = 768; sc->rx_ic_count = 16; sc->tx_ic_time = 768; sc->tx_ic_count = 16; tsec_set_rxic(sc); tsec_set_txic(sc); tsec_add_sysctls(sc); /* Allocate a busdma tag and DMA safe memory for TX descriptors. */ error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_tx_dtag, &sc->tsec_tx_dmap, sizeof(*sc->tsec_tx_vaddr) * TSEC_TX_NUM_DESC, (void **)&sc->tsec_tx_vaddr, &sc->tsec_tx_raddr, "TX"); if (error) { tsec_detach(sc); return (ENXIO); } /* Allocate a busdma tag and DMA safe memory for RX descriptors. */ error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_rx_dtag, &sc->tsec_rx_dmap, sizeof(*sc->tsec_rx_vaddr) * TSEC_RX_NUM_DESC, (void **)&sc->tsec_rx_vaddr, &sc->tsec_rx_raddr, "RX"); if (error) { tsec_detach(sc); return (ENXIO); } /* Allocate a busdma tag for TX mbufs. */ error = bus_dma_tag_create(NULL, /* parent */ TSEC_TXBUFFER_ALIGNMENT, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ MCLBYTES * (TSEC_TX_NUM_DESC - 1), /* maxsize */ TSEC_TX_NUM_DESC - 1, /* nsegments */ MCLBYTES, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->tsec_tx_mtag); /* dmat */ if (error) { device_printf(sc->dev, "failed to allocate busdma tag " "(tx mbufs)\n"); tsec_detach(sc); return (ENXIO); } /* Allocate a busdma tag for RX mbufs. */ error = bus_dma_tag_create(NULL, /* parent */ TSEC_RXBUFFER_ALIGNMENT, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ MCLBYTES, /* maxsize */ 1, /* nsegments */ MCLBYTES, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->tsec_rx_mtag); /* dmat */ if (error) { device_printf(sc->dev, "failed to allocate busdma tag " "(rx mbufs)\n"); tsec_detach(sc); return (ENXIO); } /* Create TX busdma maps */ map_ptr = sc->tx_map_data; map_pptr = sc->tx_map_unused_data; for (i = 0; i < TSEC_TX_NUM_DESC; i++) { map_pptr[i] = &map_ptr[i]; error = bus_dmamap_create(sc->tsec_tx_mtag, 0, map_pptr[i]); if (error) { device_printf(sc->dev, "failed to init TX ring\n"); tsec_detach(sc); return (ENXIO); } } /* Create RX busdma maps and zero mbuf handlers */ for (i = 0; i < TSEC_RX_NUM_DESC; i++) { error = bus_dmamap_create(sc->tsec_rx_mtag, 0, &sc->rx_data[i].map); if (error) { device_printf(sc->dev, "failed to init RX ring\n"); tsec_detach(sc); return (ENXIO); } sc->rx_data[i].mbuf = NULL; } /* Create mbufs for RX buffers */ for (i = 0; i < TSEC_RX_NUM_DESC; i++) { error = tsec_new_rxbuf(sc->tsec_rx_mtag, sc->rx_data[i].map, &sc->rx_data[i].mbuf, &sc->rx_data[i].paddr); if (error) { device_printf(sc->dev, "can't load rx DMA map %d, " "error = %d\n", i, error); tsec_detach(sc); return (error); } } /* Create network interface for upper layers */ ifp = sc->tsec_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(sc->dev, "if_alloc() failed\n"); tsec_detach(sc); return (ENOMEM); } ifp->if_softc = sc; if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST; ifp->if_init = tsec_init; ifp->if_start = tsec_start; ifp->if_ioctl = tsec_ioctl; IFQ_SET_MAXLEN(&ifp->if_snd, TSEC_TX_NUM_DESC - 1); ifp->if_snd.ifq_drv_maxlen = TSEC_TX_NUM_DESC - 1; IFQ_SET_READY(&ifp->if_snd); ifp->if_capabilities = IFCAP_VLAN_MTU; if (sc->is_etsec) ifp->if_capabilities |= IFCAP_HWCSUM; ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING /* Advertise that polling is supported */ ifp->if_capabilities |= IFCAP_POLLING; #endif /* Attach PHY(s) */ error = mii_attach(sc->dev, &sc->tsec_miibus, ifp, tsec_ifmedia_upd, tsec_ifmedia_sts, BMSR_DEFCAPMASK, sc->phyaddr, MII_OFFSET_ANY, 0); if (error) { device_printf(sc->dev, "attaching PHYs failed\n"); if_free(ifp); sc->tsec_ifp = NULL; tsec_detach(sc); return (error); } sc->tsec_mii = device_get_softc(sc->tsec_miibus); /* Set MAC address */ tsec_get_hwaddr(sc, hwaddr); ether_ifattach(ifp, hwaddr); return (0); } int tsec_detach(struct tsec_softc *sc) { if (sc->tsec_ifp != NULL) { #ifdef DEVICE_POLLING if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(sc->tsec_ifp); #endif /* Stop TSEC controller and free TX queue */ if (sc->sc_rres) tsec_shutdown(sc->dev); /* Detach network interface */ ether_ifdetach(sc->tsec_ifp); if_free(sc->tsec_ifp); sc->tsec_ifp = NULL; } /* Free DMA resources */ tsec_free_dma(sc); return (0); } int tsec_shutdown(device_t dev) { struct tsec_softc *sc; sc = device_get_softc(dev); TSEC_GLOBAL_LOCK(sc); tsec_stop(sc); TSEC_GLOBAL_UNLOCK(sc); return (0); } int tsec_suspend(device_t dev) { /* TODO not implemented! */ return (0); } int tsec_resume(device_t dev) { /* TODO not implemented! */ return (0); } static void tsec_init(void *xsc) { struct tsec_softc *sc = xsc; TSEC_GLOBAL_LOCK(sc); tsec_init_locked(sc); TSEC_GLOBAL_UNLOCK(sc); } static void tsec_init_locked(struct tsec_softc *sc) { struct tsec_desc *tx_desc = sc->tsec_tx_vaddr; struct tsec_desc *rx_desc = sc->tsec_rx_vaddr; struct ifnet *ifp = sc->tsec_ifp; uint32_t timeout, val, i; if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; TSEC_GLOBAL_LOCK_ASSERT(sc); tsec_stop(sc); /* * These steps are according to the MPC8555E PowerQUICCIII RM: * 14.7 Initialization/Application Information */ /* Step 1: soft reset MAC */ tsec_reset_mac(sc); /* Step 2: Initialize MACCFG2 */ TSEC_WRITE(sc, TSEC_REG_MACCFG2, TSEC_MACCFG2_FULLDUPLEX | /* Full Duplex = 1 */ TSEC_MACCFG2_PADCRC | /* PAD/CRC append */ TSEC_MACCFG2_GMII | /* I/F Mode bit */ TSEC_MACCFG2_PRECNT /* Preamble count = 7 */ ); /* Step 3: Initialize ECNTRL * While the documentation states that R100M is ignored if RPM is * not set, it does seem to be needed to get the orange boxes to * work (which have a Marvell 88E1111 PHY). Go figure. */ /* * XXX kludge - use circumstancial evidence to program ECNTRL * correctly. Ideally we need some board information to guide * us here. */ i = TSEC_READ(sc, TSEC_REG_ID2); val = (i & 0xffff) ? (TSEC_ECNTRL_TBIM | TSEC_ECNTRL_SGMIIM) /* Sumatra */ : TSEC_ECNTRL_R100M; /* Orange + CDS */ TSEC_WRITE(sc, TSEC_REG_ECNTRL, TSEC_ECNTRL_STEN | val); /* Step 4: Initialize MAC station address */ tsec_set_mac_address(sc); /* * Step 5: Assign a Physical address to the TBI so as to not conflict * with the external PHY physical address */ TSEC_WRITE(sc, TSEC_REG_TBIPA, 5); TSEC_PHY_LOCK(sc); /* Step 6: Reset the management interface */ TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_RESETMGMT); /* Step 7: Setup the MII Mgmt clock speed */ TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_CLKDIV28); /* Step 8: Read MII Mgmt indicator register and check for Busy = 0 */ timeout = TSEC_READ_RETRY; while (--timeout && (TSEC_PHY_READ(sc, TSEC_REG_MIIMIND) & TSEC_MIIMIND_BUSY)) DELAY(TSEC_READ_DELAY); if (timeout == 0) { if_printf(ifp, "tsec_init_locked(): Mgmt busy timeout\n"); return; } TSEC_PHY_UNLOCK(sc); /* Step 9: Setup the MII Mgmt */ mii_mediachg(sc->tsec_mii); /* Step 10: Clear IEVENT register */ TSEC_WRITE(sc, TSEC_REG_IEVENT, 0xffffffff); /* Step 11: Enable interrupts */ #ifdef DEVICE_POLLING /* * ...only if polling is not turned on. Disable interrupts explicitly * if polling is enabled. */ if (ifp->if_capenable & IFCAP_POLLING ) tsec_intrs_ctl(sc, 0); else #endif /* DEVICE_POLLING */ tsec_intrs_ctl(sc, 1); /* Step 12: Initialize IADDRn */ TSEC_WRITE(sc, TSEC_REG_IADDR0, 0); TSEC_WRITE(sc, TSEC_REG_IADDR1, 0); TSEC_WRITE(sc, TSEC_REG_IADDR2, 0); TSEC_WRITE(sc, TSEC_REG_IADDR3, 0); TSEC_WRITE(sc, TSEC_REG_IADDR4, 0); TSEC_WRITE(sc, TSEC_REG_IADDR5, 0); TSEC_WRITE(sc, TSEC_REG_IADDR6, 0); TSEC_WRITE(sc, TSEC_REG_IADDR7, 0); /* Step 13: Initialize GADDRn */ TSEC_WRITE(sc, TSEC_REG_GADDR0, 0); TSEC_WRITE(sc, TSEC_REG_GADDR1, 0); TSEC_WRITE(sc, TSEC_REG_GADDR2, 0); TSEC_WRITE(sc, TSEC_REG_GADDR3, 0); TSEC_WRITE(sc, TSEC_REG_GADDR4, 0); TSEC_WRITE(sc, TSEC_REG_GADDR5, 0); TSEC_WRITE(sc, TSEC_REG_GADDR6, 0); TSEC_WRITE(sc, TSEC_REG_GADDR7, 0); /* Step 14: Initialize RCTRL */ TSEC_WRITE(sc, TSEC_REG_RCTRL, 0); /* Step 15: Initialize DMACTRL */ tsec_dma_ctl(sc, 1); /* Step 16: Initialize FIFO_PAUSE_CTRL */ TSEC_WRITE(sc, TSEC_REG_FIFO_PAUSE_CTRL, TSEC_FIFO_PAUSE_CTRL_EN); /* * Step 17: Initialize transmit/receive descriptor rings. * Initialize TBASE and RBASE. */ TSEC_WRITE(sc, TSEC_REG_TBASE, sc->tsec_tx_raddr); TSEC_WRITE(sc, TSEC_REG_RBASE, sc->tsec_rx_raddr); for (i = 0; i < TSEC_TX_NUM_DESC; i++) { tx_desc[i].bufptr = 0; tx_desc[i].length = 0; tx_desc[i].flags = ((i == TSEC_TX_NUM_DESC - 1) ? TSEC_TXBD_W : 0); } bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); for (i = 0; i < TSEC_RX_NUM_DESC; i++) { rx_desc[i].bufptr = sc->rx_data[i].paddr; rx_desc[i].length = 0; rx_desc[i].flags = TSEC_RXBD_E | TSEC_RXBD_I | ((i == TSEC_RX_NUM_DESC - 1) ? TSEC_RXBD_W : 0); } bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Step 18: Initialize the maximum receive buffer length */ TSEC_WRITE(sc, TSEC_REG_MRBLR, MCLBYTES); /* Step 19: Configure ethernet frame sizes */ TSEC_WRITE(sc, TSEC_REG_MINFLR, TSEC_MIN_FRAME_SIZE); tsec_set_mtu(sc, ifp->if_mtu); /* Step 20: Enable Rx and RxBD sdata snooping */ TSEC_WRITE(sc, TSEC_REG_ATTR, TSEC_ATTR_RDSEN | TSEC_ATTR_RBDSEN); TSEC_WRITE(sc, TSEC_REG_ATTRELI, 0); /* Step 21: Reset collision counters in hardware */ TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0); TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0); TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0); TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0); TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0); /* Step 22: Mask all CAM interrupts */ TSEC_WRITE(sc, TSEC_REG_MON_CAM1, 0xffffffff); TSEC_WRITE(sc, TSEC_REG_MON_CAM2, 0xffffffff); /* Step 23: Enable Rx and Tx */ val = TSEC_READ(sc, TSEC_REG_MACCFG1); val |= (TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN); TSEC_WRITE(sc, TSEC_REG_MACCFG1, val); /* Step 24: Reset TSEC counters for Tx and Rx rings */ TSEC_TX_RX_COUNTERS_INIT(sc); /* Step 25: Setup TCP/IP Off-Load engine */ if (sc->is_etsec) tsec_offload_setup(sc); /* Step 26: Setup multicast filters */ tsec_setup_multicast(sc); /* Step 27: Activate network interface */ ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->tsec_if_flags = ifp->if_flags; sc->tsec_watchdog = 0; /* Schedule watchdog timeout */ callout_reset(&sc->tsec_callout, hz, tsec_tick, sc); } static void tsec_set_mac_address(struct tsec_softc *sc) { uint32_t macbuf[2] = { 0, 0 }; char *macbufp, *curmac; int i; TSEC_GLOBAL_LOCK_ASSERT(sc); KASSERT((ETHER_ADDR_LEN <= sizeof(macbuf)), - ("tsec_set_mac_address: (%d <= %d", ETHER_ADDR_LEN, + ("tsec_set_mac_address: (%d <= %zd", ETHER_ADDR_LEN, sizeof(macbuf))); macbufp = (char *)macbuf; curmac = (char *)IF_LLADDR(sc->tsec_ifp); /* Correct order of MAC address bytes */ for (i = 1; i <= ETHER_ADDR_LEN; i++) macbufp[ETHER_ADDR_LEN-i] = curmac[i-1]; /* Initialize MAC station address MACSTNADDR2 and MACSTNADDR1 */ TSEC_WRITE(sc, TSEC_REG_MACSTNADDR2, macbuf[1]); TSEC_WRITE(sc, TSEC_REG_MACSTNADDR1, macbuf[0]); } /* * DMA control function, if argument state is: * 0 - DMA engine will be disabled * 1 - DMA engine will be enabled */ static void tsec_dma_ctl(struct tsec_softc *sc, int state) { device_t dev; uint32_t dma_flags, timeout; dev = sc->dev; dma_flags = TSEC_READ(sc, TSEC_REG_DMACTRL); switch (state) { case 0: /* Temporarily clear stop graceful stop bits. */ tsec_dma_ctl(sc, 1000); /* Set it again */ dma_flags |= (TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS); break; case 1000: case 1: /* Set write with response (WWR), wait (WOP) and snoop bits */ dma_flags |= (TSEC_DMACTRL_TDSEN | TSEC_DMACTRL_TBDSEN | DMACTRL_WWR | DMACTRL_WOP); /* Clear graceful stop bits */ dma_flags &= ~(TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS); break; default: device_printf(dev, "tsec_dma_ctl(): unknown state value: %d\n", state); } TSEC_WRITE(sc, TSEC_REG_DMACTRL, dma_flags); switch (state) { case 0: /* Wait for DMA stop */ timeout = TSEC_READ_RETRY; while (--timeout && (!(TSEC_READ(sc, TSEC_REG_IEVENT) & (TSEC_IEVENT_GRSC | TSEC_IEVENT_GTSC)))) DELAY(TSEC_READ_DELAY); if (timeout == 0) device_printf(dev, "tsec_dma_ctl(): timeout!\n"); break; case 1: /* Restart transmission function */ TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); } } /* * Interrupts control function, if argument state is: * 0 - all TSEC interrupts will be masked * 1 - all TSEC interrupts will be unmasked */ static void tsec_intrs_ctl(struct tsec_softc *sc, int state) { device_t dev; dev = sc->dev; switch (state) { case 0: TSEC_WRITE(sc, TSEC_REG_IMASK, 0); break; case 1: TSEC_WRITE(sc, TSEC_REG_IMASK, TSEC_IMASK_BREN | TSEC_IMASK_RXCEN | TSEC_IMASK_BSYEN | TSEC_IMASK_EBERREN | TSEC_IMASK_BTEN | TSEC_IMASK_TXEEN | TSEC_IMASK_TXBEN | TSEC_IMASK_TXFEN | TSEC_IMASK_XFUNEN | TSEC_IMASK_RXFEN); break; default: device_printf(dev, "tsec_intrs_ctl(): unknown state value: %d\n", state); } } static void tsec_reset_mac(struct tsec_softc *sc) { uint32_t maccfg1_flags; /* Set soft reset bit */ maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1); maccfg1_flags |= TSEC_MACCFG1_SOFT_RESET; TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags); /* Clear soft reset bit */ maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1); maccfg1_flags &= ~TSEC_MACCFG1_SOFT_RESET; TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags); } static void tsec_watchdog(struct tsec_softc *sc) { struct ifnet *ifp; TSEC_GLOBAL_LOCK_ASSERT(sc); if (sc->tsec_watchdog == 0 || --sc->tsec_watchdog > 0) return; ifp = sc->tsec_ifp; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if_printf(ifp, "watchdog timeout\n"); tsec_stop(sc); tsec_init_locked(sc); } static void tsec_start(struct ifnet *ifp) { struct tsec_softc *sc = ifp->if_softc; TSEC_TRANSMIT_LOCK(sc); tsec_start_locked(ifp); TSEC_TRANSMIT_UNLOCK(sc); } static void tsec_start_locked(struct ifnet *ifp) { struct tsec_softc *sc; struct mbuf *m0, *mtmp; struct tsec_tx_fcb *tx_fcb; unsigned int queued = 0; int csum_flags, fcb_inserted = 0; sc = ifp->if_softc; TSEC_TRANSMIT_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; if (sc->tsec_link == 0) return; bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { /* Get packet from the queue */ IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) break; /* Insert TCP/IP Off-load frame control block */ csum_flags = m0->m_pkthdr.csum_flags; if (csum_flags) { M_PREPEND(m0, sizeof(struct tsec_tx_fcb), M_NOWAIT); if (m0 == NULL) break; tx_fcb = mtod(m0, struct tsec_tx_fcb *); tx_fcb->flags = 0; tx_fcb->l3_offset = ETHER_HDR_LEN; tx_fcb->l4_offset = sizeof(struct ip); if (csum_flags & CSUM_IP) tx_fcb->flags |= TSEC_TX_FCB_IP4 | TSEC_TX_FCB_CSUM_IP; if (csum_flags & CSUM_TCP) tx_fcb->flags |= TSEC_TX_FCB_TCP | TSEC_TX_FCB_CSUM_TCP_UDP; if (csum_flags & CSUM_UDP) tx_fcb->flags |= TSEC_TX_FCB_UDP | TSEC_TX_FCB_CSUM_TCP_UDP; fcb_inserted = 1; } mtmp = m_defrag(m0, M_NOWAIT); if (mtmp) m0 = mtmp; if (tsec_encap(sc, m0, fcb_inserted)) { IFQ_DRV_PREPEND(&ifp->if_snd, m0); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } queued++; BPF_MTAP(ifp, m0); } bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (queued) { /* Enable transmitter and watchdog timer */ TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); sc->tsec_watchdog = 5; } } static int tsec_encap(struct tsec_softc *sc, struct mbuf *m0, int fcb_inserted) { struct tsec_desc *tx_desc = NULL; struct ifnet *ifp; bus_dma_segment_t segs[TSEC_TX_NUM_DESC]; bus_dmamap_t *mapp; int csum_flag = 0, error, seg, nsegs; TSEC_TRANSMIT_LOCK_ASSERT(sc); ifp = sc->tsec_ifp; if (TSEC_FREE_TX_DESC(sc) == 0) { /* No free descriptors */ return (-1); } /* Fetch unused map */ mapp = TSEC_ALLOC_TX_MAP(sc); /* Create mapping in DMA memory */ error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag, *mapp, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0 || nsegs > TSEC_FREE_TX_DESC(sc) || nsegs <= 0) { bus_dmamap_unload(sc->tsec_tx_mtag, *mapp); TSEC_FREE_TX_MAP(sc, mapp); return ((error != 0) ? error : -1); } bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_PREWRITE); if ((ifp->if_flags & IFF_DEBUG) && (nsegs > 1)) if_printf(ifp, "TX buffer has %d segments\n", nsegs); if (fcb_inserted) csum_flag = TSEC_TXBD_TOE; /* Everything is ok, now we can send buffers */ for (seg = 0; seg < nsegs; seg++) { tx_desc = TSEC_GET_CUR_TX_DESC(sc); tx_desc->length = segs[seg].ds_len; tx_desc->bufptr = segs[seg].ds_addr; /* * Set flags: * - wrap * - checksum * - ready to send * - transmit the CRC sequence after the last data byte * - interrupt after the last buffer */ tx_desc->flags = (tx_desc->flags & TSEC_TXBD_W) | ((seg == 0) ? csum_flag : 0) | TSEC_TXBD_R | TSEC_TXBD_TC | ((seg == nsegs - 1) ? TSEC_TXBD_L | TSEC_TXBD_I : 0); } /* Save mbuf and DMA mapping for release at later stage */ TSEC_PUT_TX_MBUF(sc, m0); TSEC_PUT_TX_MAP(sc, mapp); return (0); } static void tsec_setfilter(struct tsec_softc *sc) { struct ifnet *ifp; uint32_t flags; ifp = sc->tsec_ifp; flags = TSEC_READ(sc, TSEC_REG_RCTRL); /* Promiscuous mode */ if (ifp->if_flags & IFF_PROMISC) flags |= TSEC_RCTRL_PROM; else flags &= ~TSEC_RCTRL_PROM; TSEC_WRITE(sc, TSEC_REG_RCTRL, flags); } #ifdef DEVICE_POLLING static poll_handler_t tsec_poll; static int tsec_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { uint32_t ie; struct tsec_softc *sc = ifp->if_softc; int rx_npkts; rx_npkts = 0; TSEC_GLOBAL_LOCK(sc); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { TSEC_GLOBAL_UNLOCK(sc); return (rx_npkts); } if (cmd == POLL_AND_CHECK_STATUS) { tsec_error_intr_locked(sc, count); /* Clear all events reported */ ie = TSEC_READ(sc, TSEC_REG_IEVENT); TSEC_WRITE(sc, TSEC_REG_IEVENT, ie); } tsec_transmit_intr_locked(sc); TSEC_GLOBAL_TO_RECEIVE_LOCK(sc); rx_npkts = tsec_receive_intr_locked(sc, count); TSEC_RECEIVE_UNLOCK(sc); return (rx_npkts); } #endif /* DEVICE_POLLING */ static int tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct tsec_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; device_t dev; int mask, error = 0; dev = sc->dev; switch (command) { case SIOCSIFMTU: TSEC_GLOBAL_LOCK(sc); if (tsec_set_mtu(sc, ifr->ifr_mtu)) ifp->if_mtu = ifr->ifr_mtu; else error = EINVAL; TSEC_GLOBAL_UNLOCK(sc); break; case SIOCSIFFLAGS: TSEC_GLOBAL_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((sc->tsec_if_flags ^ ifp->if_flags) & IFF_PROMISC) tsec_setfilter(sc); if ((sc->tsec_if_flags ^ ifp->if_flags) & IFF_ALLMULTI) tsec_setup_multicast(sc); } else tsec_init_locked(sc); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) tsec_stop(sc); sc->tsec_if_flags = ifp->if_flags; TSEC_GLOBAL_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: if (ifp->if_drv_flags & IFF_DRV_RUNNING) { TSEC_GLOBAL_LOCK(sc); tsec_setup_multicast(sc); TSEC_GLOBAL_UNLOCK(sc); } case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->tsec_mii->mii_media, command); break; case SIOCSIFCAP: mask = ifp->if_capenable ^ ifr->ifr_reqcap; if ((mask & IFCAP_HWCSUM) && sc->is_etsec) { TSEC_GLOBAL_LOCK(sc); ifp->if_capenable &= ~IFCAP_HWCSUM; ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap; tsec_offload_setup(sc); TSEC_GLOBAL_UNLOCK(sc); } #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(tsec_poll, ifp); if (error) return (error); TSEC_GLOBAL_LOCK(sc); /* Disable interrupts */ tsec_intrs_ctl(sc, 0); ifp->if_capenable |= IFCAP_POLLING; TSEC_GLOBAL_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); TSEC_GLOBAL_LOCK(sc); /* Enable interrupts */ tsec_intrs_ctl(sc, 1); ifp->if_capenable &= ~IFCAP_POLLING; TSEC_GLOBAL_UNLOCK(sc); } } #endif break; default: error = ether_ioctl(ifp, command, data); } /* Flush buffers if not empty */ if (ifp->if_flags & IFF_UP) tsec_start(ifp); return (error); } static int tsec_ifmedia_upd(struct ifnet *ifp) { struct tsec_softc *sc = ifp->if_softc; struct mii_data *mii; TSEC_TRANSMIT_LOCK(sc); mii = sc->tsec_mii; mii_mediachg(mii); TSEC_TRANSMIT_UNLOCK(sc); return (0); } static void tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct tsec_softc *sc = ifp->if_softc; struct mii_data *mii; TSEC_TRANSMIT_LOCK(sc); mii = sc->tsec_mii; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; TSEC_TRANSMIT_UNLOCK(sc); } static int tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp, uint32_t *paddr) { struct mbuf *new_mbuf; bus_dma_segment_t seg[1]; int error, nsegs; KASSERT(mbufp != NULL, ("NULL mbuf pointer!")); new_mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MCLBYTES); if (new_mbuf == NULL) return (ENOBUFS); new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size; if (*mbufp) { bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(tag, map); } error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs, BUS_DMA_NOWAIT); KASSERT(nsegs == 1, ("Too many segments returned!")); if (nsegs != 1 || error) panic("tsec_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error); #if 0 if (error) { printf("tsec: bus_dmamap_load_mbuf_sg() returned: %d!\n", error); m_freem(new_mbuf); return (ENOBUFS); } #endif #if 0 KASSERT(((seg->ds_addr) & (TSEC_RXBUFFER_ALIGNMENT-1)) == 0, ("Wrong alignment of RX buffer!")); #endif bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD); (*mbufp) = new_mbuf; (*paddr) = seg->ds_addr; return (0); } static void tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { u_int32_t *paddr; KASSERT(nseg == 1, ("wrong number of segments, should be 1")); paddr = arg; *paddr = segs->ds_addr; } static int tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, bus_dmamap_t *dmap, bus_size_t dsize, void **vaddr, void *raddr, const char *dname) { int error; /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */ error = bus_dma_tag_create(NULL, /* parent */ PAGE_SIZE, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ dsize, 1, /* maxsize, nsegments */ dsize, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ dtag); /* dmat */ if (error) { device_printf(dev, "failed to allocate busdma %s tag\n", dname); (*vaddr) = NULL; return (ENXIO); } error = bus_dmamem_alloc(*dtag, vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO, dmap); if (error) { device_printf(dev, "failed to allocate %s DMA safe memory\n", dname); bus_dma_tag_destroy(*dtag); (*vaddr) = NULL; return (ENXIO); } error = bus_dmamap_load(*dtag, *dmap, *vaddr, dsize, tsec_map_dma_addr, raddr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "cannot get address of the %s " "descriptors\n", dname); bus_dmamem_free(*dtag, *vaddr, *dmap); bus_dma_tag_destroy(*dtag); (*vaddr) = NULL; return (ENXIO); } return (0); } static void tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr) { if (vaddr == NULL) return; /* Unmap descriptors from DMA memory */ bus_dmamap_sync(dtag, dmap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dtag, dmap); /* Free descriptors memory */ bus_dmamem_free(dtag, vaddr, dmap); /* Destroy descriptors tag */ bus_dma_tag_destroy(dtag); } static void tsec_free_dma(struct tsec_softc *sc) { int i; /* Free TX maps */ for (i = 0; i < TSEC_TX_NUM_DESC; i++) if (sc->tx_map_data[i] != NULL) bus_dmamap_destroy(sc->tsec_tx_mtag, sc->tx_map_data[i]); /* Destroy tag for TX mbufs */ bus_dma_tag_destroy(sc->tsec_tx_mtag); /* Free RX mbufs and maps */ for (i = 0; i < TSEC_RX_NUM_DESC; i++) { if (sc->rx_data[i].mbuf) { /* Unload buffer from DMA */ bus_dmamap_sync(sc->tsec_rx_mtag, sc->rx_data[i].map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->tsec_rx_mtag, sc->rx_data[i].map); /* Free buffer */ m_freem(sc->rx_data[i].mbuf); } /* Destroy map for this buffer */ if (sc->rx_data[i].map != NULL) bus_dmamap_destroy(sc->tsec_rx_mtag, sc->rx_data[i].map); } /* Destroy tag for RX mbufs */ bus_dma_tag_destroy(sc->tsec_rx_mtag); /* Unload TX/RX descriptors */ tsec_free_dma_desc(sc->tsec_tx_dtag, sc->tsec_tx_dmap, sc->tsec_tx_vaddr); tsec_free_dma_desc(sc->tsec_rx_dtag, sc->tsec_rx_dmap, sc->tsec_rx_vaddr); } static void tsec_stop(struct tsec_softc *sc) { struct ifnet *ifp; struct mbuf *m0; bus_dmamap_t *mapp; uint32_t tmpval; TSEC_GLOBAL_LOCK_ASSERT(sc); ifp = sc->tsec_ifp; /* Disable interface and watchdog timer */ callout_stop(&sc->tsec_callout); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->tsec_watchdog = 0; /* Disable all interrupts and stop DMA */ tsec_intrs_ctl(sc, 0); tsec_dma_ctl(sc, 0); /* Remove pending data from TX queue */ while (!TSEC_EMPTYQ_TX_MBUF(sc)) { m0 = TSEC_GET_TX_MBUF(sc); mapp = TSEC_GET_TX_MAP(sc); bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->tsec_tx_mtag, *mapp); TSEC_FREE_TX_MAP(sc, mapp); m_freem(m0); } /* Disable RX and TX */ tmpval = TSEC_READ(sc, TSEC_REG_MACCFG1); tmpval &= ~(TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN); TSEC_WRITE(sc, TSEC_REG_MACCFG1, tmpval); DELAY(10); } static void tsec_tick(void *arg) { struct tsec_softc *sc = arg; struct ifnet *ifp; int link; TSEC_GLOBAL_LOCK(sc); tsec_watchdog(sc); ifp = sc->tsec_ifp; link = sc->tsec_link; mii_tick(sc->tsec_mii); if (link == 0 && sc->tsec_link == 1 && (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))) tsec_start_locked(ifp); /* Schedule another timeout one second from now. */ callout_reset(&sc->tsec_callout, hz, tsec_tick, sc); TSEC_GLOBAL_UNLOCK(sc); } /* * This is the core RX routine. It replenishes mbufs in the descriptor and * sends data which have been dma'ed into host memory to upper layer. * * Loops at most count times if count is > 0, or until done if count < 0. */ static int tsec_receive_intr_locked(struct tsec_softc *sc, int count) { struct tsec_desc *rx_desc; struct ifnet *ifp; struct rx_data_type *rx_data; struct mbuf *m; device_t dev; uint32_t i; int c, rx_npkts; uint16_t flags; TSEC_RECEIVE_LOCK_ASSERT(sc); ifp = sc->tsec_ifp; rx_data = sc->rx_data; dev = sc->dev; rx_npkts = 0; bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (c = 0; ; c++) { if (count >= 0 && count-- == 0) break; rx_desc = TSEC_GET_CUR_RX_DESC(sc); flags = rx_desc->flags; /* Check if there is anything to receive */ if ((flags & TSEC_RXBD_E) || (c >= TSEC_RX_NUM_DESC)) { /* * Avoid generating another interrupt */ if (flags & TSEC_RXBD_E) TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXB | TSEC_IEVENT_RXF); /* * We didn't consume current descriptor and have to * return it to the queue */ TSEC_BACK_CUR_RX_DESC(sc); break; } if (flags & (TSEC_RXBD_LG | TSEC_RXBD_SH | TSEC_RXBD_NO | TSEC_RXBD_CR | TSEC_RXBD_OV | TSEC_RXBD_TR)) { rx_desc->length = 0; rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) | TSEC_RXBD_E | TSEC_RXBD_I; if (sc->frame != NULL) { m_free(sc->frame); sc->frame = NULL; } continue; } /* Ok... process frame */ i = TSEC_GET_CUR_RX_DESC_CNT(sc); m = rx_data[i].mbuf; m->m_len = rx_desc->length; if (sc->frame != NULL) { if ((flags & TSEC_RXBD_L) != 0) m->m_len -= m_length(sc->frame, NULL); m->m_flags &= ~M_PKTHDR; m_cat(sc->frame, m); } else { sc->frame = m; } m = NULL; if ((flags & TSEC_RXBD_L) != 0) { m = sc->frame; sc->frame = NULL; } if (tsec_new_rxbuf(sc->tsec_rx_mtag, rx_data[i].map, &rx_data[i].mbuf, &rx_data[i].paddr)) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); /* * We ran out of mbufs; didn't consume current * descriptor and have to return it to the queue. */ TSEC_BACK_CUR_RX_DESC(sc); break; } /* Attach new buffer to descriptor and clear flags */ rx_desc->bufptr = rx_data[i].paddr; rx_desc->length = 0; rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) | TSEC_RXBD_E | TSEC_RXBD_I; if (m != NULL) { m->m_pkthdr.rcvif = ifp; m_fixhdr(m); m_adj(m, -ETHER_CRC_LEN); if (sc->is_etsec) tsec_offload_process_frame(sc, m); TSEC_RECEIVE_UNLOCK(sc); (*ifp->if_input)(ifp, m); TSEC_RECEIVE_LOCK(sc); rx_npkts++; } } bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* * Make sure TSEC receiver is not halted. * * Various conditions can stop the TSEC receiver, but not all are * signaled and handled by error interrupt, so make sure the receiver * is running. Writing to TSEC_REG_RSTAT restarts the receiver when * halted, and is harmless if already running. */ TSEC_WRITE(sc, TSEC_REG_RSTAT, TSEC_RSTAT_QHLT); return (rx_npkts); } void tsec_receive_intr(void *arg) { struct tsec_softc *sc = arg; TSEC_RECEIVE_LOCK(sc); #ifdef DEVICE_POLLING if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) { TSEC_RECEIVE_UNLOCK(sc); return; } #endif /* Confirm the interrupt was received by driver */ TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXB | TSEC_IEVENT_RXF); tsec_receive_intr_locked(sc, -1); TSEC_RECEIVE_UNLOCK(sc); } static void tsec_transmit_intr_locked(struct tsec_softc *sc) { struct tsec_desc *tx_desc; struct ifnet *ifp; struct mbuf *m0; bus_dmamap_t *mapp; int send = 0; TSEC_TRANSMIT_LOCK_ASSERT(sc); ifp = sc->tsec_ifp; /* Update collision statistics */ if_inc_counter(ifp, IFCOUNTER_COLLISIONS, TSEC_READ(sc, TSEC_REG_MON_TNCL)); /* Reset collision counters in hardware */ TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0); TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0); TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0); TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0); TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0); bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); while (TSEC_CUR_DIFF_DIRTY_TX_DESC(sc)) { tx_desc = TSEC_GET_DIRTY_TX_DESC(sc); if (tx_desc->flags & TSEC_TXBD_R) { TSEC_BACK_DIRTY_TX_DESC(sc); break; } if ((tx_desc->flags & TSEC_TXBD_L) == 0) continue; /* * This is the last buf in this packet, so unmap and free it. */ m0 = TSEC_GET_TX_MBUF(sc); mapp = TSEC_GET_TX_MAP(sc); bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->tsec_tx_mtag, *mapp); TSEC_FREE_TX_MAP(sc, mapp); m_freem(m0); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); send = 1; } bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (send) { /* Now send anything that was pending */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; tsec_start_locked(ifp); /* Stop wathdog if all sent */ if (TSEC_EMPTYQ_TX_MBUF(sc)) sc->tsec_watchdog = 0; } } void tsec_transmit_intr(void *arg) { struct tsec_softc *sc = arg; TSEC_TRANSMIT_LOCK(sc); #ifdef DEVICE_POLLING if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) { TSEC_TRANSMIT_UNLOCK(sc); return; } #endif /* Confirm the interrupt was received by driver */ TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_TXB | TSEC_IEVENT_TXF); tsec_transmit_intr_locked(sc); TSEC_TRANSMIT_UNLOCK(sc); } static void tsec_error_intr_locked(struct tsec_softc *sc, int count) { struct ifnet *ifp; uint32_t eflags; TSEC_GLOBAL_LOCK_ASSERT(sc); ifp = sc->tsec_ifp; eflags = TSEC_READ(sc, TSEC_REG_IEVENT); /* Clear events bits in hardware */ TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXC | TSEC_IEVENT_BSY | TSEC_IEVENT_EBERR | TSEC_IEVENT_MSRO | TSEC_IEVENT_BABT | TSEC_IEVENT_TXC | TSEC_IEVENT_TXE | TSEC_IEVENT_LC | TSEC_IEVENT_CRL | TSEC_IEVENT_XFUN); /* Check transmitter errors */ if (eflags & TSEC_IEVENT_TXE) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if (eflags & TSEC_IEVENT_LC) if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); } /* Check receiver errors */ if (eflags & TSEC_IEVENT_BSY) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); /* Get data from RX buffers */ tsec_receive_intr_locked(sc, count); } if (ifp->if_flags & IFF_DEBUG) if_printf(ifp, "tsec_error_intr(): event flags: 0x%x\n", eflags); if (eflags & TSEC_IEVENT_EBERR) { if_printf(ifp, "System bus error occurred during" "DMA transaction (flags: 0x%x)\n", eflags); tsec_init_locked(sc); } if (eflags & TSEC_IEVENT_BABT) if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if (eflags & TSEC_IEVENT_BABR) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); } void tsec_error_intr(void *arg) { struct tsec_softc *sc = arg; TSEC_GLOBAL_LOCK(sc); tsec_error_intr_locked(sc, -1); TSEC_GLOBAL_UNLOCK(sc); } int tsec_miibus_readreg(device_t dev, int phy, int reg) { struct tsec_softc *sc; uint32_t timeout; int rv; sc = device_get_softc(dev); TSEC_PHY_LOCK(); TSEC_PHY_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg); TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCOM, 0); TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCOM, TSEC_MIIMCOM_READCYCLE); timeout = TSEC_READ_RETRY; while (--timeout && TSEC_PHY_READ(sc, TSEC_REG_MIIMIND) & (TSEC_MIIMIND_NOTVALID | TSEC_MIIMIND_BUSY)) DELAY(TSEC_READ_DELAY); if (timeout == 0) device_printf(dev, "Timeout while reading from PHY!\n"); rv = TSEC_PHY_READ(sc, TSEC_REG_MIIMSTAT); TSEC_PHY_UNLOCK(); return (rv); } int tsec_miibus_writereg(device_t dev, int phy, int reg, int value) { struct tsec_softc *sc; uint32_t timeout; sc = device_get_softc(dev); TSEC_PHY_LOCK(); TSEC_PHY_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg); TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCON, value); timeout = TSEC_READ_RETRY; while (--timeout && (TSEC_READ(sc, TSEC_REG_MIIMIND) & TSEC_MIIMIND_BUSY)) DELAY(TSEC_READ_DELAY); TSEC_PHY_UNLOCK(); if (timeout == 0) device_printf(dev, "Timeout while writing to PHY!\n"); return (0); } void tsec_miibus_statchg(device_t dev) { struct tsec_softc *sc; struct mii_data *mii; uint32_t ecntrl, id, tmp; int link; sc = device_get_softc(dev); mii = sc->tsec_mii; link = ((mii->mii_media_status & IFM_ACTIVE) ? 1 : 0); tmp = TSEC_READ(sc, TSEC_REG_MACCFG2) & ~TSEC_MACCFG2_IF; if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) tmp |= TSEC_MACCFG2_FULLDUPLEX; else tmp &= ~TSEC_MACCFG2_FULLDUPLEX; switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_1000_T: case IFM_1000_SX: tmp |= TSEC_MACCFG2_GMII; sc->tsec_link = link; break; case IFM_100_TX: case IFM_10_T: tmp |= TSEC_MACCFG2_MII; sc->tsec_link = link; break; case IFM_NONE: if (link) device_printf(dev, "No speed selected but link " "active!\n"); sc->tsec_link = 0; return; default: sc->tsec_link = 0; device_printf(dev, "Unknown speed (%d), link %s!\n", IFM_SUBTYPE(mii->mii_media_active), ((link) ? "up" : "down")); return; } TSEC_WRITE(sc, TSEC_REG_MACCFG2, tmp); /* XXX kludge - use circumstantial evidence for reduced mode. */ id = TSEC_READ(sc, TSEC_REG_ID2); if (id & 0xffff) { ecntrl = TSEC_READ(sc, TSEC_REG_ECNTRL) & ~TSEC_ECNTRL_R100M; ecntrl |= (tmp & TSEC_MACCFG2_MII) ? TSEC_ECNTRL_R100M : 0; TSEC_WRITE(sc, TSEC_REG_ECNTRL, ecntrl); } } static void tsec_add_sysctls(struct tsec_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid *tree; ctx = device_get_sysctl_ctx(sc->dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal", CTLFLAG_RD, 0, "TSEC Interrupts coalescing"); children = SYSCTL_CHILDREN(tree); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time", CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_RX, tsec_sysctl_ic_time, "I", "IC RX time threshold (0-65535)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_count", CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_RX, tsec_sysctl_ic_count, "I", "IC RX frame count threshold (0-255)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time", CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_TX, tsec_sysctl_ic_time, "I", "IC TX time threshold (0-65535)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_count", CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_TX, tsec_sysctl_ic_count, "I", "IC TX frame count threshold (0-255)"); } /* * With Interrupt Coalescing (IC) active, a transmit/receive frame * interrupt is raised either upon: * * - threshold-defined period of time elapsed, or * - threshold-defined number of frames is received/transmitted, * whichever occurs first. * * The following sysctls regulate IC behaviour (for TX/RX separately): * * dev.tsec..int_coal.rx_time * dev.tsec..int_coal.rx_count * dev.tsec..int_coal.tx_time * dev.tsec..int_coal.tx_count * * Values: * * - 0 for either time or count disables IC on the given TX/RX path * * - count: 1-255 (expresses frame count number; note that value of 1 is * effectively IC off) * * - time: 1-65535 (value corresponds to a real time period and is * expressed in units equivalent to 64 TSEC interface clocks, i.e. one timer * threshold unit is 26.5 us, 2.56 us, or 512 ns, corresponding to 10 Mbps, * 100 Mbps, or 1Gbps, respectively. For detailed discussion consult the * TSEC reference manual. */ static int tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS) { int error; uint32_t time; struct tsec_softc *sc = (struct tsec_softc *)arg1; time = (arg2 == TSEC_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time; error = sysctl_handle_int(oidp, &time, 0, req); if (error != 0) return (error); if (time > 65535) return (EINVAL); TSEC_IC_LOCK(sc); if (arg2 == TSEC_IC_RX) { sc->rx_ic_time = time; tsec_set_rxic(sc); } else { sc->tx_ic_time = time; tsec_set_txic(sc); } TSEC_IC_UNLOCK(sc); return (0); } static int tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS) { int error; uint32_t count; struct tsec_softc *sc = (struct tsec_softc *)arg1; count = (arg2 == TSEC_IC_RX) ? sc->rx_ic_count : sc->tx_ic_count; error = sysctl_handle_int(oidp, &count, 0, req); if (error != 0) return (error); if (count > 255) return (EINVAL); TSEC_IC_LOCK(sc); if (arg2 == TSEC_IC_RX) { sc->rx_ic_count = count; tsec_set_rxic(sc); } else { sc->tx_ic_count = count; tsec_set_txic(sc); } TSEC_IC_UNLOCK(sc); return (0); } static void tsec_set_rxic(struct tsec_softc *sc) { uint32_t rxic_val; if (sc->rx_ic_count == 0 || sc->rx_ic_time == 0) /* Disable RX IC */ rxic_val = 0; else { rxic_val = 0x80000000; rxic_val |= (sc->rx_ic_count << 21); rxic_val |= sc->rx_ic_time; } TSEC_WRITE(sc, TSEC_REG_RXIC, rxic_val); } static void tsec_set_txic(struct tsec_softc *sc) { uint32_t txic_val; if (sc->tx_ic_count == 0 || sc->tx_ic_time == 0) /* Disable TX IC */ txic_val = 0; else { txic_val = 0x80000000; txic_val |= (sc->tx_ic_count << 21); txic_val |= sc->tx_ic_time; } TSEC_WRITE(sc, TSEC_REG_TXIC, txic_val); } static void tsec_offload_setup(struct tsec_softc *sc) { struct ifnet *ifp = sc->tsec_ifp; uint32_t reg; TSEC_GLOBAL_LOCK_ASSERT(sc); reg = TSEC_READ(sc, TSEC_REG_TCTRL); reg |= TSEC_TCTRL_IPCSEN | TSEC_TCTRL_TUCSEN; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist = TSEC_CHECKSUM_FEATURES; else ifp->if_hwassist = 0; TSEC_WRITE(sc, TSEC_REG_TCTRL, reg); reg = TSEC_READ(sc, TSEC_REG_RCTRL); reg &= ~(TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN | TSEC_RCTRL_PRSDEP); reg |= TSEC_RCTRL_PRSDEP_PARSE_L2 | TSEC_RCTRL_VLEX; if (ifp->if_capenable & IFCAP_RXCSUM) reg |= TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN | TSEC_RCTRL_PRSDEP_PARSE_L234; TSEC_WRITE(sc, TSEC_REG_RCTRL, reg); } static void tsec_offload_process_frame(struct tsec_softc *sc, struct mbuf *m) { struct tsec_rx_fcb rx_fcb; int csum_flags = 0; int protocol, flags; TSEC_RECEIVE_LOCK_ASSERT(sc); m_copydata(m, 0, sizeof(struct tsec_rx_fcb), (caddr_t)(&rx_fcb)); flags = rx_fcb.flags; protocol = rx_fcb.protocol; if (TSEC_RX_FCB_IP_CSUM_CHECKED(flags)) { csum_flags |= CSUM_IP_CHECKED; if ((flags & TSEC_RX_FCB_IP_CSUM_ERROR) == 0) csum_flags |= CSUM_IP_VALID; } if ((protocol == IPPROTO_TCP || protocol == IPPROTO_UDP) && TSEC_RX_FCB_TCP_UDP_CSUM_CHECKED(flags) && (flags & TSEC_RX_FCB_TCP_UDP_CSUM_ERROR) == 0) { csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; } m->m_pkthdr.csum_flags = csum_flags; if (flags & TSEC_RX_FCB_VLAN) { m->m_pkthdr.ether_vtag = rx_fcb.vlan; m->m_flags |= M_VLANTAG; } m_adj(m, sizeof(struct tsec_rx_fcb)); } static void tsec_setup_multicast(struct tsec_softc *sc) { uint32_t hashtable[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; struct ifnet *ifp = sc->tsec_ifp; struct ifmultiaddr *ifma; uint32_t h; int i; TSEC_GLOBAL_LOCK_ASSERT(sc); if (ifp->if_flags & IFF_ALLMULTI) { for (i = 0; i < 8; i++) TSEC_WRITE(sc, TSEC_REG_GADDR(i), 0xFFFFFFFF); return; } if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = (ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 24) & 0xFF; hashtable[(h >> 5)] |= 1 << (0x1F - (h & 0x1F)); } if_maddr_runlock(ifp); for (i = 0; i < 8; i++) TSEC_WRITE(sc, TSEC_REG_GADDR(i), hashtable[i]); } static int tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu) { mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN; TSEC_GLOBAL_LOCK_ASSERT(sc); if (mtu >= TSEC_MIN_FRAME_SIZE && mtu <= TSEC_MAX_FRAME_SIZE) { TSEC_WRITE(sc, TSEC_REG_MAXFRM, mtu); return (mtu); } return (0); }