Index: head/sys/arm/allwinner/a10_mmc.c =================================================================== --- head/sys/arm/allwinner/a10_mmc.c (revision 292179) +++ head/sys/arm/allwinner/a10_mmc.c (revision 292180) @@ -1,885 +1,886 @@ /*- * Copyright (c) 2013 Alexander Fedorov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define A10_MMC_MEMRES 0 #define A10_MMC_IRQRES 1 #define A10_MMC_RESSZ 2 #define A10_MMC_DMA_SEGS 16 #define A10_MMC_DMA_MAX_SIZE 0x2000 #define A10_MMC_DMA_FTRGLEVEL 0x20070008 static int a10_mmc_pio_mode = 0; TUNABLE_INT("hw.a10.mmc.pio_mode", &a10_mmc_pio_mode); struct a10_mmc_softc { bus_space_handle_t a10_bsh; bus_space_tag_t a10_bst; device_t a10_dev; int a10_bus_busy; int a10_id; int a10_resid; int a10_timeout; struct callout a10_timeoutc; struct mmc_host a10_host; struct mmc_request * a10_req; struct mtx a10_mtx; struct resource * a10_res[A10_MMC_RESSZ]; uint32_t a10_intr; uint32_t a10_intr_wait; void * a10_intrhand; /* Fields required for DMA access. */ bus_addr_t a10_dma_desc_phys; bus_dmamap_t a10_dma_map; bus_dma_tag_t a10_dma_tag; void * a10_dma_desc; bus_dmamap_t a10_dma_buf_map; bus_dma_tag_t a10_dma_buf_tag; int a10_dma_inuse; int a10_dma_map_err; }; static struct resource_spec a10_mmc_res_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; static int a10_mmc_probe(device_t); static int a10_mmc_attach(device_t); static int a10_mmc_detach(device_t); static int a10_mmc_setup_dma(struct a10_mmc_softc *); static int a10_mmc_reset(struct a10_mmc_softc *); static void a10_mmc_intr(void *); static int a10_mmc_update_clock(struct a10_mmc_softc *); static int a10_mmc_update_ios(device_t, device_t); static int a10_mmc_request(device_t, device_t, struct mmc_request *); static int a10_mmc_get_ro(device_t, device_t); static int a10_mmc_acquire_host(device_t, device_t); static int a10_mmc_release_host(device_t, device_t); #define A10_MMC_LOCK(_sc) mtx_lock(&(_sc)->a10_mtx) #define A10_MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->a10_mtx) #define A10_MMC_READ_4(_sc, _reg) \ bus_space_read_4((_sc)->a10_bst, (_sc)->a10_bsh, _reg) #define A10_MMC_WRITE_4(_sc, _reg, _value) \ bus_space_write_4((_sc)->a10_bst, (_sc)->a10_bsh, _reg, _value) static int a10_mmc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "allwinner,sun4i-a10-mmc")) return (ENXIO); device_set_desc(dev, "Allwinner Integrated MMC/SD controller"); return (BUS_PROBE_DEFAULT); } static int a10_mmc_attach(device_t dev) { device_t child; struct a10_mmc_softc *sc; struct sysctl_ctx_list *ctx; struct sysctl_oid_list *tree; sc = device_get_softc(dev); sc->a10_dev = dev; sc->a10_req = NULL; sc->a10_id = device_get_unit(dev); if (sc->a10_id > 3) { device_printf(dev, "only 4 hosts are supported (0-3)\n"); return (ENXIO); } if (bus_alloc_resources(dev, a10_mmc_res_spec, sc->a10_res) != 0) { device_printf(dev, "cannot allocate device resources\n"); return (ENXIO); } sc->a10_bst = rman_get_bustag(sc->a10_res[A10_MMC_MEMRES]); sc->a10_bsh = rman_get_bushandle(sc->a10_res[A10_MMC_MEMRES]); if (bus_setup_intr(dev, sc->a10_res[A10_MMC_IRQRES], INTR_TYPE_MISC | INTR_MPSAFE, NULL, a10_mmc_intr, sc, &sc->a10_intrhand)) { bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res); device_printf(dev, "cannot setup interrupt handler\n"); return (ENXIO); } /* Activate the module clock. */ if (a10_clk_mmc_activate(sc->a10_id) != 0) { bus_teardown_intr(dev, sc->a10_res[A10_MMC_IRQRES], sc->a10_intrhand); bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res); device_printf(dev, "cannot activate mmc clock\n"); return (ENXIO); } sc->a10_timeout = 10; ctx = device_get_sysctl_ctx(dev); tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW, &sc->a10_timeout, 0, "Request timeout in seconds"); mtx_init(&sc->a10_mtx, device_get_nameunit(sc->a10_dev), "a10_mmc", MTX_DEF); callout_init_mtx(&sc->a10_timeoutc, &sc->a10_mtx, 0); /* Reset controller. */ if (a10_mmc_reset(sc) != 0) { device_printf(dev, "cannot reset the controller\n"); goto fail; } if (a10_mmc_pio_mode == 0 && a10_mmc_setup_dma(sc) != 0) { device_printf(sc->a10_dev, "Couldn't setup DMA!\n"); a10_mmc_pio_mode = 1; } if (bootverbose) device_printf(sc->a10_dev, "DMA status: %s\n", a10_mmc_pio_mode ? "disabled" : "enabled"); sc->a10_host.f_min = 400000; sc->a10_host.f_max = 52000000; sc->a10_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; sc->a10_host.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_HSPEED; sc->a10_host.mode = mode_sd; child = device_add_child(dev, "mmc", -1); if (child == NULL) { device_printf(dev, "attaching MMC bus failed!\n"); goto fail; } if (device_probe_and_attach(child) != 0) { device_printf(dev, "attaching MMC child failed!\n"); device_delete_child(dev, child); goto fail; } return (0); fail: callout_drain(&sc->a10_timeoutc); mtx_destroy(&sc->a10_mtx); bus_teardown_intr(dev, sc->a10_res[A10_MMC_IRQRES], sc->a10_intrhand); bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res); return (ENXIO); } static int a10_mmc_detach(device_t dev) { return (EBUSY); } static void a10_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err) { struct a10_mmc_softc *sc; sc = (struct a10_mmc_softc *)arg; if (err) { sc->a10_dma_map_err = err; return; } sc->a10_dma_desc_phys = segs[0].ds_addr; } static int a10_mmc_setup_dma(struct a10_mmc_softc *sc) { int dma_desc_size, error; /* Allocate the DMA descriptor memory. */ dma_desc_size = sizeof(struct a10_mmc_dma_desc) * A10_MMC_DMA_SEGS; error = bus_dma_tag_create(bus_get_dma_tag(sc->a10_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, dma_desc_size, 1, dma_desc_size, 0, NULL, NULL, &sc->a10_dma_tag); if (error) return (error); error = bus_dmamem_alloc(sc->a10_dma_tag, &sc->a10_dma_desc, BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->a10_dma_map); if (error) return (error); error = bus_dmamap_load(sc->a10_dma_tag, sc->a10_dma_map, sc->a10_dma_desc, dma_desc_size, a10_dma_desc_cb, sc, 0); if (error) return (error); if (sc->a10_dma_map_err) return (sc->a10_dma_map_err); /* Create the DMA map for data transfers. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->a10_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, A10_MMC_DMA_MAX_SIZE * A10_MMC_DMA_SEGS, A10_MMC_DMA_SEGS, A10_MMC_DMA_MAX_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->a10_dma_buf_tag); if (error) return (error); error = bus_dmamap_create(sc->a10_dma_buf_tag, 0, &sc->a10_dma_buf_map); if (error) return (error); return (0); } static void a10_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err) { int i; struct a10_mmc_dma_desc *dma_desc; struct a10_mmc_softc *sc; sc = (struct a10_mmc_softc *)arg; sc->a10_dma_map_err = err; dma_desc = sc->a10_dma_desc; /* Note nsegs is guaranteed to be zero if err is non-zero. */ for (i = 0; i < nsegs; i++) { dma_desc[i].buf_size = segs[i].ds_len; dma_desc[i].buf_addr = segs[i].ds_addr; dma_desc[i].config = A10_MMC_DMA_CONFIG_CH | A10_MMC_DMA_CONFIG_OWN; if (i == 0) dma_desc[i].config |= A10_MMC_DMA_CONFIG_FD; if (i < (nsegs - 1)) { dma_desc[i].config |= A10_MMC_DMA_CONFIG_DIC; dma_desc[i].next = sc->a10_dma_desc_phys + ((i + 1) * sizeof(struct a10_mmc_dma_desc)); } else { dma_desc[i].config |= A10_MMC_DMA_CONFIG_LD | A10_MMC_DMA_CONFIG_ER; dma_desc[i].next = 0; } } } static int a10_mmc_prepare_dma(struct a10_mmc_softc *sc) { bus_dmasync_op_t sync_op; int error; struct mmc_command *cmd; uint32_t val; cmd = sc->a10_req->cmd; if (cmd->data->len > A10_MMC_DMA_MAX_SIZE * A10_MMC_DMA_SEGS) return (EFBIG); error = bus_dmamap_load(sc->a10_dma_buf_tag, sc->a10_dma_buf_map, cmd->data->data, cmd->data->len, a10_dma_cb, sc, BUS_DMA_NOWAIT); if (error) return (error); if (sc->a10_dma_map_err) return (sc->a10_dma_map_err); sc->a10_dma_inuse = 1; if (cmd->data->flags & MMC_DATA_WRITE) sync_op = BUS_DMASYNC_PREWRITE; else sync_op = BUS_DMASYNC_PREREAD; bus_dmamap_sync(sc->a10_dma_buf_tag, sc->a10_dma_buf_map, sync_op); bus_dmamap_sync(sc->a10_dma_tag, sc->a10_dma_map, BUS_DMASYNC_PREWRITE); val = A10_MMC_READ_4(sc, A10_MMC_IMASK); val &= ~(A10_MMC_RX_DATA_REQ | A10_MMC_TX_DATA_REQ); A10_MMC_WRITE_4(sc, A10_MMC_IMASK, val); val = A10_MMC_READ_4(sc, A10_MMC_GCTRL); val &= ~A10_MMC_ACCESS_BY_AHB; val |= A10_MMC_DMA_ENABLE; A10_MMC_WRITE_4(sc, A10_MMC_GCTRL, val); val |= A10_MMC_DMA_RESET; A10_MMC_WRITE_4(sc, A10_MMC_GCTRL, val); A10_MMC_WRITE_4(sc, A10_MMC_DMAC, A10_MMC_IDMAC_SOFT_RST); A10_MMC_WRITE_4(sc, A10_MMC_DMAC, A10_MMC_IDMAC_IDMA_ON | A10_MMC_IDMAC_FIX_BURST); val = A10_MMC_READ_4(sc, A10_MMC_IDIE); val &= ~(A10_MMC_IDMAC_RECEIVE_INT | A10_MMC_IDMAC_TRANSMIT_INT); if (cmd->data->flags & MMC_DATA_WRITE) val |= A10_MMC_IDMAC_TRANSMIT_INT; else val |= A10_MMC_IDMAC_RECEIVE_INT; A10_MMC_WRITE_4(sc, A10_MMC_IDIE, val); A10_MMC_WRITE_4(sc, A10_MMC_DLBA, sc->a10_dma_desc_phys); A10_MMC_WRITE_4(sc, A10_MMC_FTRGL, A10_MMC_DMA_FTRGLEVEL); return (0); } static int a10_mmc_reset(struct a10_mmc_softc *sc) { int timeout; A10_MMC_WRITE_4(sc, A10_MMC_GCTRL, A10_MMC_READ_4(sc, A10_MMC_GCTRL) | A10_MMC_RESET); timeout = 1000; while (--timeout > 0) { if ((A10_MMC_READ_4(sc, A10_MMC_GCTRL) & A10_MMC_RESET) == 0) break; DELAY(100); } if (timeout == 0) return (ETIMEDOUT); /* Set the timeout. */ A10_MMC_WRITE_4(sc, A10_MMC_TIMEOUT, 0xffffffff); /* Clear pending interrupts. */ A10_MMC_WRITE_4(sc, A10_MMC_RINTR, 0xffffffff); A10_MMC_WRITE_4(sc, A10_MMC_IDST, 0xffffffff); /* Unmask interrupts. */ A10_MMC_WRITE_4(sc, A10_MMC_IMASK, A10_MMC_CMD_DONE | A10_MMC_INT_ERR_BIT | A10_MMC_DATA_OVER | A10_MMC_AUTOCMD_DONE); /* Enable interrupts and AHB access. */ A10_MMC_WRITE_4(sc, A10_MMC_GCTRL, A10_MMC_READ_4(sc, A10_MMC_GCTRL) | A10_MMC_INT_ENABLE); return (0); } static void a10_mmc_req_done(struct a10_mmc_softc *sc) { struct mmc_command *cmd; struct mmc_request *req; cmd = sc->a10_req->cmd; if (cmd->error != MMC_ERR_NONE) { /* Reset the controller. */ a10_mmc_reset(sc); a10_mmc_update_clock(sc); } if (sc->a10_dma_inuse == 0) { /* Reset the FIFO. */ A10_MMC_WRITE_4(sc, A10_MMC_GCTRL, A10_MMC_READ_4(sc, A10_MMC_GCTRL) | A10_MMC_FIFO_RESET); } req = sc->a10_req; callout_stop(&sc->a10_timeoutc); sc->a10_req = NULL; sc->a10_intr = 0; sc->a10_resid = 0; sc->a10_dma_inuse = 0; sc->a10_dma_map_err = 0; sc->a10_intr_wait = 0; req->done(req); } static void a10_mmc_req_ok(struct a10_mmc_softc *sc) { int timeout; struct mmc_command *cmd; uint32_t status; timeout = 1000; while (--timeout > 0) { status = A10_MMC_READ_4(sc, A10_MMC_STAS); if ((status & A10_MMC_CARD_DATA_BUSY) == 0) break; DELAY(1000); } cmd = sc->a10_req->cmd; if (timeout == 0) { cmd->error = MMC_ERR_FAILED; a10_mmc_req_done(sc); return; } if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) { cmd->resp[0] = A10_MMC_READ_4(sc, A10_MMC_RESP3); cmd->resp[1] = A10_MMC_READ_4(sc, A10_MMC_RESP2); cmd->resp[2] = A10_MMC_READ_4(sc, A10_MMC_RESP1); cmd->resp[3] = A10_MMC_READ_4(sc, A10_MMC_RESP0); } else cmd->resp[0] = A10_MMC_READ_4(sc, A10_MMC_RESP0); } /* All data has been transferred ? */ if (cmd->data != NULL && (sc->a10_resid << 2) < cmd->data->len) cmd->error = MMC_ERR_FAILED; a10_mmc_req_done(sc); } static void a10_mmc_timeout(void *arg) { struct a10_mmc_softc *sc; sc = (struct a10_mmc_softc *)arg; if (sc->a10_req != NULL) { device_printf(sc->a10_dev, "controller timeout\n"); sc->a10_req->cmd->error = MMC_ERR_TIMEOUT; a10_mmc_req_done(sc); } else device_printf(sc->a10_dev, "Spurious timeout - no active request\n"); } static int a10_mmc_pio_transfer(struct a10_mmc_softc *sc, struct mmc_data *data) { int i, write; uint32_t bit, *buf; buf = (uint32_t *)data->data; write = (data->flags & MMC_DATA_WRITE) ? 1 : 0; bit = write ? A10_MMC_FIFO_FULL : A10_MMC_FIFO_EMPTY; for (i = sc->a10_resid; i < (data->len >> 2); i++) { if ((A10_MMC_READ_4(sc, A10_MMC_STAS) & bit)) return (1); if (write) A10_MMC_WRITE_4(sc, A10_MMC_FIFO, buf[i]); else buf[i] = A10_MMC_READ_4(sc, A10_MMC_FIFO); sc->a10_resid = i + 1; } return (0); } static void a10_mmc_intr(void *arg) { bus_dmasync_op_t sync_op; struct a10_mmc_softc *sc; struct mmc_data *data; uint32_t idst, imask, rint; sc = (struct a10_mmc_softc *)arg; A10_MMC_LOCK(sc); rint = A10_MMC_READ_4(sc, A10_MMC_RINTR); idst = A10_MMC_READ_4(sc, A10_MMC_IDST); imask = A10_MMC_READ_4(sc, A10_MMC_IMASK); if (idst == 0 && imask == 0 && rint == 0) { A10_MMC_UNLOCK(sc); return; } #ifdef DEBUG device_printf(sc->a10_dev, "idst: %#x, imask: %#x, rint: %#x\n", idst, imask, rint); #endif if (sc->a10_req == NULL) { device_printf(sc->a10_dev, "Spurious interrupt - no active request, rint: 0x%08X\n", rint); goto end; } if (rint & A10_MMC_INT_ERR_BIT) { device_printf(sc->a10_dev, "error rint: 0x%08X\n", rint); if (rint & A10_MMC_RESP_TIMEOUT) sc->a10_req->cmd->error = MMC_ERR_TIMEOUT; else sc->a10_req->cmd->error = MMC_ERR_FAILED; a10_mmc_req_done(sc); goto end; } if (idst & A10_MMC_IDMAC_ERROR) { device_printf(sc->a10_dev, "error idst: 0x%08x\n", idst); sc->a10_req->cmd->error = MMC_ERR_FAILED; a10_mmc_req_done(sc); goto end; } sc->a10_intr |= rint; data = sc->a10_req->cmd->data; if (data != NULL && sc->a10_dma_inuse == 1 && (idst & A10_MMC_IDMAC_COMPLETE)) { if (data->flags & MMC_DATA_WRITE) sync_op = BUS_DMASYNC_POSTWRITE; else sync_op = BUS_DMASYNC_POSTREAD; bus_dmamap_sync(sc->a10_dma_buf_tag, sc->a10_dma_buf_map, sync_op); bus_dmamap_sync(sc->a10_dma_tag, sc->a10_dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->a10_dma_buf_tag, sc->a10_dma_buf_map); sc->a10_resid = data->len >> 2; } else if (data != NULL && sc->a10_dma_inuse == 0 && (rint & (A10_MMC_DATA_OVER | A10_MMC_RX_DATA_REQ | A10_MMC_TX_DATA_REQ)) != 0) a10_mmc_pio_transfer(sc, data); if ((sc->a10_intr & sc->a10_intr_wait) == sc->a10_intr_wait) a10_mmc_req_ok(sc); end: A10_MMC_WRITE_4(sc, A10_MMC_IDST, idst); A10_MMC_WRITE_4(sc, A10_MMC_RINTR, rint); A10_MMC_UNLOCK(sc); } static int a10_mmc_request(device_t bus, device_t child, struct mmc_request *req) { int blksz; struct a10_mmc_softc *sc; struct mmc_command *cmd; uint32_t cmdreg, val; sc = device_get_softc(bus); A10_MMC_LOCK(sc); if (sc->a10_req) { A10_MMC_UNLOCK(sc); return (EBUSY); } sc->a10_req = req; cmd = req->cmd; cmdreg = A10_MMC_START; if (cmd->opcode == MMC_GO_IDLE_STATE) cmdreg |= A10_MMC_SEND_INIT_SEQ; if (cmd->flags & MMC_RSP_PRESENT) cmdreg |= A10_MMC_RESP_EXP; if (cmd->flags & MMC_RSP_136) cmdreg |= A10_MMC_LONG_RESP; if (cmd->flags & MMC_RSP_CRC) cmdreg |= A10_MMC_CHECK_RESP_CRC; sc->a10_intr = 0; sc->a10_resid = 0; sc->a10_intr_wait = A10_MMC_CMD_DONE; cmd->error = MMC_ERR_NONE; if (cmd->data != NULL) { sc->a10_intr_wait |= A10_MMC_DATA_OVER; cmdreg |= A10_MMC_DATA_EXP | A10_MMC_WAIT_PREOVER; if (cmd->data->flags & MMC_DATA_MULTI) { cmdreg |= A10_MMC_SEND_AUTOSTOP; sc->a10_intr_wait |= A10_MMC_AUTOCMD_DONE; } if (cmd->data->flags & MMC_DATA_WRITE) cmdreg |= A10_MMC_WRITE; blksz = min(cmd->data->len, MMC_SECTOR_SIZE); A10_MMC_WRITE_4(sc, A10_MMC_BLKSZ, blksz); A10_MMC_WRITE_4(sc, A10_MMC_BCNTR, cmd->data->len); if (a10_mmc_pio_mode == 0) a10_mmc_prepare_dma(sc); /* Enable PIO access if sc->a10_dma_inuse is not set. */ if (sc->a10_dma_inuse == 0) { val = A10_MMC_READ_4(sc, A10_MMC_GCTRL); val &= ~A10_MMC_DMA_ENABLE; val |= A10_MMC_ACCESS_BY_AHB; A10_MMC_WRITE_4(sc, A10_MMC_GCTRL, val); val = A10_MMC_READ_4(sc, A10_MMC_IMASK); val |= A10_MMC_RX_DATA_REQ | A10_MMC_TX_DATA_REQ; A10_MMC_WRITE_4(sc, A10_MMC_IMASK, val); } } A10_MMC_WRITE_4(sc, A10_MMC_CARG, cmd->arg); A10_MMC_WRITE_4(sc, A10_MMC_CMDR, cmdreg | cmd->opcode); callout_reset(&sc->a10_timeoutc, sc->a10_timeout * hz, a10_mmc_timeout, sc); A10_MMC_UNLOCK(sc); return (0); } static int a10_mmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct a10_mmc_softc *sc; sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: *(int *)result = sc->a10_host.ios.bus_mode; break; case MMCBR_IVAR_BUS_WIDTH: *(int *)result = sc->a10_host.ios.bus_width; break; case MMCBR_IVAR_CHIP_SELECT: *(int *)result = sc->a10_host.ios.chip_select; break; case MMCBR_IVAR_CLOCK: *(int *)result = sc->a10_host.ios.clock; break; case MMCBR_IVAR_F_MIN: *(int *)result = sc->a10_host.f_min; break; case MMCBR_IVAR_F_MAX: *(int *)result = sc->a10_host.f_max; break; case MMCBR_IVAR_HOST_OCR: *(int *)result = sc->a10_host.host_ocr; break; case MMCBR_IVAR_MODE: *(int *)result = sc->a10_host.mode; break; case MMCBR_IVAR_OCR: *(int *)result = sc->a10_host.ocr; break; case MMCBR_IVAR_POWER_MODE: *(int *)result = sc->a10_host.ios.power_mode; break; case MMCBR_IVAR_VDD: *(int *)result = sc->a10_host.ios.vdd; break; case MMCBR_IVAR_CAPS: *(int *)result = sc->a10_host.caps; break; case MMCBR_IVAR_MAX_DATA: *(int *)result = 65535; break; } return (0); } static int a10_mmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct a10_mmc_softc *sc; sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: sc->a10_host.ios.bus_mode = value; break; case MMCBR_IVAR_BUS_WIDTH: sc->a10_host.ios.bus_width = value; break; case MMCBR_IVAR_CHIP_SELECT: sc->a10_host.ios.chip_select = value; break; case MMCBR_IVAR_CLOCK: sc->a10_host.ios.clock = value; break; case MMCBR_IVAR_MODE: sc->a10_host.mode = value; break; case MMCBR_IVAR_OCR: sc->a10_host.ocr = value; break; case MMCBR_IVAR_POWER_MODE: sc->a10_host.ios.power_mode = value; break; case MMCBR_IVAR_VDD: sc->a10_host.ios.vdd = value; break; /* These are read-only */ case MMCBR_IVAR_CAPS: case MMCBR_IVAR_HOST_OCR: case MMCBR_IVAR_F_MIN: case MMCBR_IVAR_F_MAX: case MMCBR_IVAR_MAX_DATA: return (EINVAL); } return (0); } static int a10_mmc_update_clock(struct a10_mmc_softc *sc) { uint32_t cmdreg; int retry; cmdreg = A10_MMC_START | A10_MMC_UPCLK_ONLY | A10_MMC_WAIT_PREOVER; A10_MMC_WRITE_4(sc, A10_MMC_CMDR, cmdreg); retry = 0xfffff; while (--retry > 0) { if ((A10_MMC_READ_4(sc, A10_MMC_CMDR) & A10_MMC_START) == 0) { A10_MMC_WRITE_4(sc, A10_MMC_RINTR, 0xffffffff); return (0); } DELAY(10); } A10_MMC_WRITE_4(sc, A10_MMC_RINTR, 0xffffffff); device_printf(sc->a10_dev, "timeout updating clock\n"); return (ETIMEDOUT); } static int a10_mmc_update_ios(device_t bus, device_t child) { int error; struct a10_mmc_softc *sc; struct mmc_ios *ios; uint32_t clkcr; sc = device_get_softc(bus); clkcr = A10_MMC_READ_4(sc, A10_MMC_CLKCR); if (clkcr & A10_MMC_CARD_CLK_ON) { /* Disable clock. */ clkcr &= ~A10_MMC_CARD_CLK_ON; A10_MMC_WRITE_4(sc, A10_MMC_CLKCR, clkcr); error = a10_mmc_update_clock(sc); if (error != 0) return (error); } ios = &sc->a10_host.ios; if (ios->clock) { /* Reset the divider. */ clkcr &= ~A10_MMC_CLKCR_DIV; A10_MMC_WRITE_4(sc, A10_MMC_CLKCR, clkcr); error = a10_mmc_update_clock(sc); if (error != 0) return (error); /* Set the MMC clock. */ error = a10_clk_mmc_cfg(sc->a10_id, ios->clock); if (error != 0) return (error); /* Enable clock. */ clkcr |= A10_MMC_CARD_CLK_ON; A10_MMC_WRITE_4(sc, A10_MMC_CLKCR, clkcr); error = a10_mmc_update_clock(sc); if (error != 0) return (error); } /* Set the bus width. */ switch (ios->bus_width) { case bus_width_1: A10_MMC_WRITE_4(sc, A10_MMC_WIDTH, A10_MMC_WIDTH1); break; case bus_width_4: A10_MMC_WRITE_4(sc, A10_MMC_WIDTH, A10_MMC_WIDTH4); break; case bus_width_8: A10_MMC_WRITE_4(sc, A10_MMC_WIDTH, A10_MMC_WIDTH8); break; } return (0); } static int a10_mmc_get_ro(device_t bus, device_t child) { return (0); } static int a10_mmc_acquire_host(device_t bus, device_t child) { struct a10_mmc_softc *sc; int error; sc = device_get_softc(bus); A10_MMC_LOCK(sc); while (sc->a10_bus_busy) { error = msleep(sc, &sc->a10_mtx, PCATCH, "mmchw", 0); if (error != 0) { A10_MMC_UNLOCK(sc); return (error); } } sc->a10_bus_busy++; A10_MMC_UNLOCK(sc); return (0); } static int a10_mmc_release_host(device_t bus, device_t child) { struct a10_mmc_softc *sc; sc = device_get_softc(bus); A10_MMC_LOCK(sc); sc->a10_bus_busy--; wakeup(sc); A10_MMC_UNLOCK(sc); return (0); } static device_method_t a10_mmc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, a10_mmc_probe), DEVMETHOD(device_attach, a10_mmc_attach), DEVMETHOD(device_detach, a10_mmc_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, a10_mmc_read_ivar), DEVMETHOD(bus_write_ivar, a10_mmc_write_ivar), DEVMETHOD(bus_print_child, bus_generic_print_child), /* MMC bridge interface */ DEVMETHOD(mmcbr_update_ios, a10_mmc_update_ios), DEVMETHOD(mmcbr_request, a10_mmc_request), DEVMETHOD(mmcbr_get_ro, a10_mmc_get_ro), DEVMETHOD(mmcbr_acquire_host, a10_mmc_acquire_host), DEVMETHOD(mmcbr_release_host, a10_mmc_release_host), DEVMETHOD_END }; static devclass_t a10_mmc_devclass; static driver_t a10_mmc_driver = { "a10_mmc", a10_mmc_methods, sizeof(struct a10_mmc_softc), }; DRIVER_MODULE(a10_mmc, simplebus, a10_mmc_driver, a10_mmc_devclass, 0, 0); +DRIVER_MODULE(mmc, a10_mmc, mmc_driver, mmc_devclass, NULL, NULL); Index: head/sys/arm/amlogic/aml8726/aml8726_mmc.c =================================================================== --- head/sys/arm/amlogic/aml8726/aml8726_mmc.c (revision 292179) +++ head/sys/arm/amlogic/aml8726/aml8726_mmc.c (revision 292180) @@ -1,1100 +1,1101 @@ /*- * Copyright 2013-2015 John Wehle * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Amlogic aml8726 MMC host controller driver. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "gpio_if.h" #include "mmcbr_if.h" struct aml8726_mmc_gpio { device_t dev; uint32_t pin; uint32_t pol; }; struct aml8726_mmc_softc { device_t dev; struct resource *res[2]; struct mtx mtx; struct callout ch; uint32_t port; unsigned int ref_freq; struct aml8726_mmc_gpio pwr_en; int voltages[2]; struct aml8726_mmc_gpio vselect; bus_dma_tag_t dmatag; bus_dmamap_t dmamap; void *ih_cookie; struct mmc_host host; int bus_busy; struct mmc_command *cmd; uint32_t stop_timeout; }; static struct resource_spec aml8726_mmc_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; #define AML_MMC_LOCK(sc) mtx_lock(&(sc)->mtx) #define AML_MMC_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define AML_MMC_LOCK_ASSERT(sc) mtx_assert(&(sc)->mtx, MA_OWNED) #define AML_MMC_LOCK_INIT(sc) \ mtx_init(&(sc)->mtx, device_get_nameunit((sc)->dev), \ "mmc", MTX_DEF) #define AML_MMC_LOCK_DESTROY(sc) mtx_destroy(&(sc)->mtx); #define CSR_WRITE_4(sc, reg, val) bus_write_4((sc)->res[0], reg, (val)) #define CSR_READ_4(sc, reg) bus_read_4((sc)->res[0], reg) #define CSR_BARRIER(sc, reg) bus_barrier((sc)->res[0], reg, 4, \ (BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)) #define PWR_ON_FLAG(pol) ((pol) == 0 ? GPIO_PIN_LOW : \ GPIO_PIN_HIGH) #define PWR_OFF_FLAG(pol) ((pol) == 0 ? GPIO_PIN_HIGH : \ GPIO_PIN_LOW) #define MSECS_TO_TICKS(ms) (((ms)*hz)/1000 + 1) static void aml8726_mmc_timeout(void *arg); static unsigned int aml8726_mmc_clk(phandle_t node) { pcell_t prop; ssize_t len; phandle_t clk_node; len = OF_getencprop(node, "clocks", &prop, sizeof(prop)); if ((len / sizeof(prop)) != 1 || prop == 0 || (clk_node = OF_node_from_xref(prop)) == 0) return (0); len = OF_getencprop(clk_node, "clock-frequency", &prop, sizeof(prop)); if ((len / sizeof(prop)) != 1 || prop == 0) return (0); return ((unsigned int)prop); } static uint32_t aml8726_mmc_freq(struct aml8726_mmc_softc *sc, uint32_t divisor) { return (sc->ref_freq / ((divisor + 1) * 2)); } static uint32_t aml8726_mmc_div(struct aml8726_mmc_softc *sc, uint32_t desired_freq) { uint32_t divisor; divisor = sc->ref_freq / (desired_freq * 2); if (divisor == 0) divisor = 1; divisor -= 1; if (aml8726_mmc_freq(sc, divisor) > desired_freq) divisor += 1; if (divisor > (AML_MMC_CONFIG_CMD_CLK_DIV_MASK >> AML_MMC_CONFIG_CMD_CLK_DIV_SHIFT)) { divisor = AML_MMC_CONFIG_CMD_CLK_DIV_MASK >> AML_MMC_CONFIG_CMD_CLK_DIV_SHIFT; } return (divisor); } static void aml8726_mmc_mapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *busaddrp; /* * There should only be one bus space address since * bus_dma_tag_create was called with nsegments = 1. */ busaddrp = (bus_addr_t *)arg; *busaddrp = segs->ds_addr; } static int aml8726_mmc_power_off(struct aml8726_mmc_softc *sc) { if (sc->pwr_en.dev == NULL) return (0); return (GPIO_PIN_SET(sc->pwr_en.dev, sc->pwr_en.pin, PWR_OFF_FLAG(sc->pwr_en.pol))); } static int aml8726_mmc_power_on(struct aml8726_mmc_softc *sc) { if (sc->pwr_en.dev == NULL) return (0); return (GPIO_PIN_SET(sc->pwr_en.dev, sc->pwr_en.pin, PWR_ON_FLAG(sc->pwr_en.pol))); } static void aml8726_mmc_soft_reset(struct aml8726_mmc_softc *sc, boolean_t enable_irq) { uint32_t icr; icr = AML_MMC_IRQ_CONFIG_SOFT_RESET; if (enable_irq == true) icr |= AML_MMC_IRQ_CONFIG_CMD_DONE_EN; CSR_WRITE_4(sc, AML_MMC_IRQ_CONFIG_REG, icr); CSR_BARRIER(sc, AML_MMC_IRQ_CONFIG_REG); } static int aml8726_mmc_start_command(struct aml8726_mmc_softc *sc, struct mmc_command *cmd) { struct mmc_ios *ios = &sc->host.ios; bus_addr_t baddr; uint32_t block_size; uint32_t bus_width; uint32_t cmdr; uint32_t extr; uint32_t mcfgr; uint32_t nbits_per_pkg; uint32_t timeout; int error; struct mmc_data *data; if (cmd->opcode > 0x3f) return (MMC_ERR_INVALID); /* * Ensure the hardware state machine is in a known state. */ aml8726_mmc_soft_reset(sc, true); /* * Start and transmission bits are per section 4.7.2 of the: * * SD Specifications Part 1 * Physicaly Layer Simplified Specification * Version 4.10 */ cmdr = AML_MMC_CMD_START_BIT | AML_MMC_CMD_TRANS_BIT_HOST | cmd->opcode; baddr = 0; extr = 0; mcfgr = sc->port; timeout = AML_MMC_CMD_TIMEOUT; /* * If this is a linked command, then use the previous timeout. */ if (cmd == cmd->mrq->stop && sc->stop_timeout) timeout = sc->stop_timeout; sc->stop_timeout = 0; if ((cmd->flags & MMC_RSP_136) != 0) { cmdr |= AML_MMC_CMD_RESP_CRC7_FROM_8; cmdr |= (133 << AML_MMC_CMD_RESP_BITS_SHIFT); } else if ((cmd->flags & MMC_RSP_PRESENT) != 0) cmdr |= (45 << AML_MMC_CMD_RESP_BITS_SHIFT); if ((cmd->flags & MMC_RSP_CRC) == 0) cmdr |= AML_MMC_CMD_RESP_NO_CRC7; if ((cmd->flags & MMC_RSP_BUSY) != 0) cmdr |= AML_MMC_CMD_CHECK_DAT0_BUSY; data = cmd->data; if (data && data->len && (data->flags & (MMC_DATA_READ | MMC_DATA_WRITE)) != 0) { block_size = data->len; if ((data->flags & MMC_DATA_MULTI) != 0) { block_size = MMC_SECTOR_SIZE; if ((data->len % block_size) != 0) return (MMC_ERR_INVALID); } cmdr |= (((data->len / block_size) - 1) << AML_MMC_CMD_REP_PKG_CNT_SHIFT); mcfgr |= (data->flags & MMC_DATA_STREAM) ? AML_MMC_MULT_CONFIG_STREAM_EN : 0; /* * The number of bits per package equals the number * of data bits + the number of CRC bits. There are * 16 bits of CRC calculate per bus line. * * A completed package appears to be detected by when * a counter decremented by the width underflows, thus * a value of zero always transfers 1 (or 4 bits depending * on the mode) which is why bus_width is subtracted. */ bus_width = (ios->bus_width == bus_width_4) ? 4 : 1; nbits_per_pkg = block_size * 8 + 16 * bus_width - bus_width; if (nbits_per_pkg > 0x3fff) return (MMC_ERR_INVALID); extr |= (nbits_per_pkg << AML_MMC_EXTENSION_PKT_SIZE_SHIFT); error = bus_dmamap_load(sc->dmatag, sc->dmamap, data->data, data->len, aml8726_mmc_mapmem, &baddr, BUS_DMA_NOWAIT); if (error) return (MMC_ERR_NO_MEMORY); if ((data->flags & MMC_DATA_READ) != 0) { cmdr |= AML_MMC_CMD_RESP_HAS_DATA; bus_dmamap_sync(sc->dmatag, sc->dmamap, BUS_DMASYNC_PREREAD); timeout = AML_MMC_READ_TIMEOUT * (data->len / block_size); } else { cmdr |= AML_MMC_CMD_CMD_HAS_DATA; bus_dmamap_sync(sc->dmatag, sc->dmamap, BUS_DMASYNC_PREWRITE); timeout = AML_MMC_WRITE_TIMEOUT * (data->len / block_size); } /* * Stop terminates a multiblock read / write and thus * can take as long to execute as an actual read / write. */ if (cmd->mrq->stop != NULL) sc->stop_timeout = timeout; } sc->cmd = cmd; cmd->error = MMC_ERR_NONE; if (timeout > AML_MMC_MAX_TIMEOUT) timeout = AML_MMC_MAX_TIMEOUT; callout_reset(&sc->ch, MSECS_TO_TICKS(timeout), aml8726_mmc_timeout, sc); CSR_WRITE_4(sc, AML_MMC_CMD_ARGUMENT_REG, cmd->arg); CSR_WRITE_4(sc, AML_MMC_MULT_CONFIG_REG, mcfgr); CSR_WRITE_4(sc, AML_MMC_EXTENSION_REG, extr); CSR_WRITE_4(sc, AML_MMC_DMA_ADDR_REG, (uint32_t)baddr); CSR_WRITE_4(sc, AML_MMC_CMD_SEND_REG, cmdr); CSR_BARRIER(sc, AML_MMC_CMD_SEND_REG); return (MMC_ERR_NONE); } static void aml8726_mmc_finish_command(struct aml8726_mmc_softc *sc, int mmc_error) { int mmc_stop_error; struct mmc_command *cmd; struct mmc_command *stop_cmd; struct mmc_data *data; AML_MMC_LOCK_ASSERT(sc); /* Clear all interrupts since the request is no longer in flight. */ CSR_WRITE_4(sc, AML_MMC_IRQ_STATUS_REG, AML_MMC_IRQ_STATUS_CLEAR_IRQ); CSR_BARRIER(sc, AML_MMC_IRQ_STATUS_REG); /* In some cases (e.g. finish called via timeout) this is a NOP. */ callout_stop(&sc->ch); cmd = sc->cmd; sc->cmd = NULL; cmd->error = mmc_error; data = cmd->data; if (data && data->len && (data->flags & (MMC_DATA_READ | MMC_DATA_WRITE)) != 0) { if ((data->flags & MMC_DATA_READ) != 0) bus_dmamap_sync(sc->dmatag, sc->dmamap, BUS_DMASYNC_POSTREAD); else bus_dmamap_sync(sc->dmatag, sc->dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->dmatag, sc->dmamap); } /* * If there's a linked stop command, then start the stop command. * In order to establish a known state attempt the stop command * even if the original request encountered an error. */ stop_cmd = (cmd->mrq->stop != cmd) ? cmd->mrq->stop : NULL; if (stop_cmd != NULL) { mmc_stop_error = aml8726_mmc_start_command(sc, stop_cmd); if (mmc_stop_error == MMC_ERR_NONE) { AML_MMC_UNLOCK(sc); return; } stop_cmd->error = mmc_stop_error; } AML_MMC_UNLOCK(sc); /* Execute the callback after dropping the lock. */ if (cmd->mrq) cmd->mrq->done(cmd->mrq); } static void aml8726_mmc_timeout(void *arg) { struct aml8726_mmc_softc *sc = (struct aml8726_mmc_softc *)arg; /* * The command failed to complete in time so forcefully * terminate it. */ aml8726_mmc_soft_reset(sc, false); /* * Ensure the command has terminated before continuing on * to things such as bus_dmamap_sync / bus_dmamap_unload. */ while ((CSR_READ_4(sc, AML_MMC_IRQ_STATUS_REG) & AML_MMC_IRQ_STATUS_CMD_BUSY) != 0) cpu_spinwait(); aml8726_mmc_finish_command(sc, MMC_ERR_TIMEOUT); } static void aml8726_mmc_intr(void *arg) { struct aml8726_mmc_softc *sc = (struct aml8726_mmc_softc *)arg; uint32_t cmdr; uint32_t isr; uint32_t mcfgr; uint32_t previous_byte; uint32_t resp; int mmc_error; unsigned int i; AML_MMC_LOCK(sc); isr = CSR_READ_4(sc, AML_MMC_IRQ_STATUS_REG); cmdr = CSR_READ_4(sc, AML_MMC_CMD_SEND_REG); if (sc->cmd == NULL) goto spurious; mmc_error = MMC_ERR_NONE; if ((isr & AML_MMC_IRQ_STATUS_CMD_DONE_IRQ) != 0) { /* Check for CRC errors if the command has completed. */ if ((cmdr & AML_MMC_CMD_RESP_NO_CRC7) == 0 && (isr & AML_MMC_IRQ_STATUS_RESP_CRC7_OK) == 0) mmc_error = MMC_ERR_BADCRC; if ((cmdr & AML_MMC_CMD_RESP_HAS_DATA) != 0 && (isr & AML_MMC_IRQ_STATUS_RD_CRC16_OK) == 0) mmc_error = MMC_ERR_BADCRC; if ((cmdr & AML_MMC_CMD_CMD_HAS_DATA) != 0 && (isr & AML_MMC_IRQ_STATUS_WR_CRC16_OK) == 0) mmc_error = MMC_ERR_BADCRC; } else { spurious: /* * Clear spurious interrupts while leaving intacted any * interrupts that may have occurred after we read the * interrupt status register. */ CSR_WRITE_4(sc, AML_MMC_IRQ_STATUS_REG, (AML_MMC_IRQ_STATUS_CLEAR_IRQ & isr)); CSR_BARRIER(sc, AML_MMC_IRQ_STATUS_REG); AML_MMC_UNLOCK(sc); return; } if ((cmdr & AML_MMC_CMD_RESP_BITS_MASK) != 0) { mcfgr = sc->port; mcfgr |= AML_MMC_MULT_CONFIG_RESP_READOUT_EN; CSR_WRITE_4(sc, AML_MMC_MULT_CONFIG_REG, mcfgr); if ((cmdr & AML_MMC_CMD_RESP_CRC7_FROM_8) != 0) { /* * Controller supplies 135:8 instead of * 127:0 so discard the leading 8 bits * and provide a trailing 8 zero bits * where the CRC belongs. */ previous_byte = 0; for (i = 0; i < 4; i++) { resp = CSR_READ_4(sc, AML_MMC_CMD_ARGUMENT_REG); sc->cmd->resp[3 - i] = (resp << 8) | previous_byte; previous_byte = (resp >> 24) & 0xff; } } else sc->cmd->resp[0] = CSR_READ_4(sc, AML_MMC_CMD_ARGUMENT_REG); } if ((isr & AML_MMC_IRQ_STATUS_CMD_BUSY) != 0 && /* * A multiblock operation may keep the hardware * busy until stop transmission is executed. */ (isr & AML_MMC_IRQ_STATUS_CMD_DONE_IRQ) == 0) { if (mmc_error == MMC_ERR_NONE) mmc_error = MMC_ERR_FAILED; /* * Issue a soft reset to terminate the command. * * Ensure the command has terminated before continuing on * to things such as bus_dmamap_sync / bus_dmamap_unload. */ aml8726_mmc_soft_reset(sc, false); while ((CSR_READ_4(sc, AML_MMC_IRQ_STATUS_REG) & AML_MMC_IRQ_STATUS_CMD_BUSY) != 0) cpu_spinwait(); } aml8726_mmc_finish_command(sc, mmc_error); } static int aml8726_mmc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "amlogic,aml8726-mmc")) return (ENXIO); device_set_desc(dev, "Amlogic aml8726 MMC"); return (BUS_PROBE_DEFAULT); } static int aml8726_mmc_attach(device_t dev) { struct aml8726_mmc_softc *sc = device_get_softc(dev); char *function_name; char *voltages; char *voltage; int error; int nvoltages; pcell_t prop[3]; phandle_t node; ssize_t len; device_t child; sc->dev = dev; node = ofw_bus_get_node(dev); sc->ref_freq = aml8726_mmc_clk(node); if (sc->ref_freq == 0) { device_printf(dev, "missing clocks attribute in FDT\n"); return (ENXIO); } /* * The pins must be specified as part of the device in order * to know which port to used. */ len = OF_getencprop(node, "pinctrl-0", prop, sizeof(prop)); if ((len / sizeof(prop[0])) != 1 || prop[0] == 0) { device_printf(dev, "missing pinctrl-0 attribute in FDT\n"); return (ENXIO); } len = OF_getprop_alloc(OF_node_from_xref(prop[0]), "amlogic,function", sizeof(char), (void **)&function_name); if (len < 0) { device_printf(dev, "missing amlogic,function attribute in FDT\n"); return (ENXIO); } if (strncmp("sdio-a", function_name, len) == 0) sc->port = AML_MMC_MULT_CONFIG_PORT_A; else if (strncmp("sdio-b", function_name, len) == 0) sc->port = AML_MMC_MULT_CONFIG_PORT_B; else if (strncmp("sdio-c", function_name, len) == 0) sc->port = AML_MMC_MULT_CONFIG_PORT_C; else { device_printf(dev, "unknown function attribute %.*s in FDT\n", len, function_name); free(function_name, M_OFWPROP); return (ENXIO); } free(function_name, M_OFWPROP); sc->pwr_en.dev = NULL; len = OF_getencprop(node, "mmc-pwr-en", prop, sizeof(prop)); if (len > 0) { if ((len / sizeof(prop[0])) == 3) { sc->pwr_en.dev = OF_device_from_xref(prop[0]); sc->pwr_en.pin = prop[1]; sc->pwr_en.pol = prop[2]; } if (sc->pwr_en.dev == NULL) { device_printf(dev, "unable to process mmc-pwr-en attribute in FDT\n"); return (ENXIO); } /* Turn off power and then configure the output driver. */ if (aml8726_mmc_power_off(sc) != 0 || GPIO_PIN_SETFLAGS(sc->pwr_en.dev, sc->pwr_en.pin, GPIO_PIN_OUTPUT) != 0) { device_printf(dev, "could not use gpio to control power\n"); return (ENXIO); } } len = OF_getprop_alloc(node, "mmc-voltages", sizeof(char), (void **)&voltages); if (len < 0) { device_printf(dev, "missing mmc-voltages attribute in FDT\n"); return (ENXIO); } sc->voltages[0] = 0; sc->voltages[1] = 0; voltage = voltages; nvoltages = 0; while (len && nvoltages < 2) { if (strncmp("1.8", voltage, len) == 0) sc->voltages[nvoltages] = MMC_OCR_LOW_VOLTAGE; else if (strncmp("3.3", voltage, len) == 0) sc->voltages[nvoltages] = MMC_OCR_320_330 | MMC_OCR_330_340; else { device_printf(dev, "unknown voltage attribute %.*s in FDT\n", len, voltage); free(voltages, M_OFWPROP); return (ENXIO); } nvoltages++; /* queue up next string */ while (*voltage && len) { voltage++; len--; } if (len) { voltage++; len--; } } free(voltages, M_OFWPROP); sc->vselect.dev = NULL; len = OF_getencprop(node, "mmc-vselect", prop, sizeof(prop)); if (len > 0) { if ((len / sizeof(prop[0])) == 2) { sc->vselect.dev = OF_device_from_xref(prop[0]); sc->vselect.pin = prop[1]; sc->vselect.pol = 1; } if (sc->vselect.dev == NULL) { device_printf(dev, "unable to process mmc-vselect attribute in FDT\n"); return (ENXIO); } /* * With the power off select voltage 0 and then * configure the output driver. */ if (GPIO_PIN_SET(sc->vselect.dev, sc->vselect.pin, 0) != 0 || GPIO_PIN_SETFLAGS(sc->vselect.dev, sc->vselect.pin, GPIO_PIN_OUTPUT) != 0) { device_printf(dev, "could not use gpio to set voltage\n"); return (ENXIO); } } if (nvoltages == 0) { device_printf(dev, "no voltages in FDT\n"); return (ENXIO); } else if (nvoltages == 1 && sc->vselect.dev != NULL) { device_printf(dev, "only one voltage in FDT\n"); return (ENXIO); } else if (nvoltages == 2 && sc->vselect.dev == NULL) { device_printf(dev, "too many voltages in FDT\n"); return (ENXIO); } if (bus_alloc_resources(dev, aml8726_mmc_spec, sc->res)) { device_printf(dev, "could not allocate resources for device\n"); return (ENXIO); } AML_MMC_LOCK_INIT(sc); error = bus_dma_tag_create(bus_get_dma_tag(dev), AML_MMC_ALIGN_DMA, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, AML_MMC_MAX_DMA, 1, AML_MMC_MAX_DMA, 0, NULL, NULL, &sc->dmatag); if (error) goto fail; error = bus_dmamap_create(sc->dmatag, 0, &sc->dmamap); if (error) goto fail; error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE, NULL, aml8726_mmc_intr, sc, &sc->ih_cookie); if (error) { device_printf(dev, "could not setup interrupt handler\n"); goto fail; } callout_init_mtx(&sc->ch, &sc->mtx, CALLOUT_RETURNUNLOCKED); sc->host.f_min = aml8726_mmc_freq(sc, aml8726_mmc_div(sc, 200000)); sc->host.f_max = aml8726_mmc_freq(sc, aml8726_mmc_div(sc, 50000000)); sc->host.host_ocr = sc->voltages[0] | sc->voltages[1]; sc->host.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_HSPEED; child = device_add_child(dev, "mmc", -1); if (!child) { device_printf(dev, "could not add mmc\n"); error = ENXIO; goto fail; } error = device_probe_and_attach(child); if (error) { device_printf(dev, "could not attach mmc\n"); goto fail; } return (0); fail: if (sc->ih_cookie) bus_teardown_intr(dev, sc->res[1], sc->ih_cookie); if (sc->dmamap) bus_dmamap_destroy(sc->dmatag, sc->dmamap); if (sc->dmatag) bus_dma_tag_destroy(sc->dmatag); AML_MMC_LOCK_DESTROY(sc); aml8726_mmc_power_off(sc); bus_release_resources(dev, aml8726_mmc_spec, sc->res); return (error); } static int aml8726_mmc_detach(device_t dev) { struct aml8726_mmc_softc *sc = device_get_softc(dev); AML_MMC_LOCK(sc); if (sc->cmd != NULL) { AML_MMC_UNLOCK(sc); return (EBUSY); } /* * Turn off the power, reset the hardware state machine, * disable the interrupts, and clear the interrupts. */ (void)aml8726_mmc_power_off(sc); aml8726_mmc_soft_reset(sc, false); CSR_WRITE_4(sc, AML_MMC_IRQ_STATUS_REG, AML_MMC_IRQ_STATUS_CLEAR_IRQ); /* This should be a NOP since no command was in flight. */ callout_stop(&sc->ch); AML_MMC_UNLOCK(sc); bus_generic_detach(dev); bus_teardown_intr(dev, sc->res[1], sc->ih_cookie); bus_dmamap_destroy(sc->dmatag, sc->dmamap); bus_dma_tag_destroy(sc->dmatag); AML_MMC_LOCK_DESTROY(sc); bus_release_resources(dev, aml8726_mmc_spec, sc->res); return (0); } static int aml8726_mmc_shutdown(device_t dev) { struct aml8726_mmc_softc *sc = device_get_softc(dev); /* * Turn off the power, reset the hardware state machine, * disable the interrupts, and clear the interrupts. */ (void)aml8726_mmc_power_off(sc); aml8726_mmc_soft_reset(sc, false); CSR_WRITE_4(sc, AML_MMC_IRQ_STATUS_REG, AML_MMC_IRQ_STATUS_CLEAR_IRQ); return (0); } static int aml8726_mmc_update_ios(device_t bus, device_t child) { struct aml8726_mmc_softc *sc = device_get_softc(bus); struct mmc_ios *ios = &sc->host.ios; int error; int i; uint32_t cfgr; cfgr = (2 << AML_MMC_CONFIG_WR_CRC_STAT_SHIFT) | (2 << AML_MMC_CONFIG_WR_DELAY_SHIFT) | AML_MMC_CONFIG_DMA_ENDIAN_SBW | (39 << AML_MMC_CONFIG_CMD_ARG_BITS_SHIFT); switch (ios->bus_width) { case bus_width_4: cfgr |= AML_MMC_CONFIG_BUS_WIDTH_4; break; case bus_width_1: cfgr |= AML_MMC_CONFIG_BUS_WIDTH_1; break; default: return (EINVAL); } cfgr |= aml8726_mmc_div(sc, ios->clock) << AML_MMC_CONFIG_CMD_CLK_DIV_SHIFT; CSR_WRITE_4(sc, AML_MMC_CONFIG_REG, cfgr); error = 0; switch (ios->power_mode) { case power_up: /* * Configure and power on the regulator so that the * voltage stabilizes prior to powering on the card. */ if (sc->vselect.dev != NULL) { for (i = 0; i < 2; i++) if ((sc->voltages[i] & (1 << ios->vdd)) != 0) break; if (i >= 2) return (EINVAL); error = GPIO_PIN_SET(sc->vselect.dev, sc->vselect.pin, i); } break; case power_on: error = aml8726_mmc_power_on(sc); break; case power_off: error = aml8726_mmc_power_off(sc); break; default: return (EINVAL); } return (error); } static int aml8726_mmc_request(device_t bus, device_t child, struct mmc_request *req) { struct aml8726_mmc_softc *sc = device_get_softc(bus); int mmc_error; AML_MMC_LOCK(sc); if (sc->cmd != NULL) { AML_MMC_UNLOCK(sc); return (EBUSY); } mmc_error = aml8726_mmc_start_command(sc, req->cmd); AML_MMC_UNLOCK(sc); /* Execute the callback after dropping the lock. */ if (mmc_error != MMC_ERR_NONE) { req->cmd->error = mmc_error; req->done(req); } return (0); } static int aml8726_mmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct aml8726_mmc_softc *sc = device_get_softc(bus); switch (which) { case MMCBR_IVAR_BUS_MODE: *(int *)result = sc->host.ios.bus_mode; break; case MMCBR_IVAR_BUS_WIDTH: *(int *)result = sc->host.ios.bus_width; break; case MMCBR_IVAR_CHIP_SELECT: *(int *)result = sc->host.ios.chip_select; break; case MMCBR_IVAR_CLOCK: *(int *)result = sc->host.ios.clock; break; case MMCBR_IVAR_F_MIN: *(int *)result = sc->host.f_min; break; case MMCBR_IVAR_F_MAX: *(int *)result = sc->host.f_max; break; case MMCBR_IVAR_HOST_OCR: *(int *)result = sc->host.host_ocr; break; case MMCBR_IVAR_MODE: *(int *)result = sc->host.mode; break; case MMCBR_IVAR_OCR: *(int *)result = sc->host.ocr; break; case MMCBR_IVAR_POWER_MODE: *(int *)result = sc->host.ios.power_mode; break; case MMCBR_IVAR_VDD: *(int *)result = sc->host.ios.vdd; break; case MMCBR_IVAR_CAPS: *(int *)result = sc->host.caps; break; case MMCBR_IVAR_MAX_DATA: *(int *)result = AML_MMC_MAX_DMA / MMC_SECTOR_SIZE; break; default: return (EINVAL); } return (0); } static int aml8726_mmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct aml8726_mmc_softc *sc = device_get_softc(bus); switch (which) { case MMCBR_IVAR_BUS_MODE: sc->host.ios.bus_mode = value; break; case MMCBR_IVAR_BUS_WIDTH: sc->host.ios.bus_width = value; break; case MMCBR_IVAR_CHIP_SELECT: sc->host.ios.chip_select = value; break; case MMCBR_IVAR_CLOCK: sc->host.ios.clock = value; break; case MMCBR_IVAR_MODE: sc->host.mode = value; break; case MMCBR_IVAR_OCR: sc->host.ocr = value; break; case MMCBR_IVAR_POWER_MODE: sc->host.ios.power_mode = value; break; case MMCBR_IVAR_VDD: sc->host.ios.vdd = value; break; /* These are read-only */ case MMCBR_IVAR_CAPS: case MMCBR_IVAR_HOST_OCR: case MMCBR_IVAR_F_MIN: case MMCBR_IVAR_F_MAX: case MMCBR_IVAR_MAX_DATA: default: return (EINVAL); } return (0); } static int aml8726_mmc_get_ro(device_t bus, device_t child) { return (0); } static int aml8726_mmc_acquire_host(device_t bus, device_t child) { struct aml8726_mmc_softc *sc = device_get_softc(bus); AML_MMC_LOCK(sc); while (sc->bus_busy) mtx_sleep(sc, &sc->mtx, PZERO, "mmc", hz / 5); sc->bus_busy++; AML_MMC_UNLOCK(sc); return (0); } static int aml8726_mmc_release_host(device_t bus, device_t child) { struct aml8726_mmc_softc *sc = device_get_softc(bus); AML_MMC_LOCK(sc); sc->bus_busy--; wakeup(sc); AML_MMC_UNLOCK(sc); return (0); } static device_method_t aml8726_mmc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aml8726_mmc_probe), DEVMETHOD(device_attach, aml8726_mmc_attach), DEVMETHOD(device_detach, aml8726_mmc_detach), DEVMETHOD(device_shutdown, aml8726_mmc_shutdown), /* Bus interface */ DEVMETHOD(bus_read_ivar, aml8726_mmc_read_ivar), DEVMETHOD(bus_write_ivar, aml8726_mmc_write_ivar), /* MMC bridge interface */ DEVMETHOD(mmcbr_update_ios, aml8726_mmc_update_ios), DEVMETHOD(mmcbr_request, aml8726_mmc_request), DEVMETHOD(mmcbr_get_ro, aml8726_mmc_get_ro), DEVMETHOD(mmcbr_acquire_host, aml8726_mmc_acquire_host), DEVMETHOD(mmcbr_release_host, aml8726_mmc_release_host), DEVMETHOD_END }; static driver_t aml8726_mmc_driver = { "aml8726_mmc", aml8726_mmc_methods, sizeof(struct aml8726_mmc_softc), }; static devclass_t aml8726_mmc_devclass; DRIVER_MODULE(aml8726_mmc, simplebus, aml8726_mmc_driver, aml8726_mmc_devclass, 0, 0); MODULE_DEPEND(aml8726_mmc, aml8726_gpio, 1, 1, 1); +DRIVER_MODULE(mmc, aml8726_mmc, mmc_driver, mmc_devclass, NULL, NULL); Index: head/sys/arm/amlogic/aml8726/aml8726_sdxc-m8.c =================================================================== --- head/sys/arm/amlogic/aml8726/aml8726_sdxc-m8.c (revision 292179) +++ head/sys/arm/amlogic/aml8726/aml8726_sdxc-m8.c (revision 292180) @@ -1,1379 +1,1380 @@ /*- * Copyright 2015 John Wehle * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Amlogic aml8726-m8 (and later) SDXC host controller driver. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "gpio_if.h" #include "mmcbr_if.h" /* * The table is sorted from highest to lowest and * last entry in the table is mark by freq == 0. */ struct { uint32_t voltage; uint32_t freq; uint32_t rx_phase; } aml8726_sdxc_clk_phases[] = { { MMC_OCR_LOW_VOLTAGE | MMC_OCR_320_330 | MMC_OCR_330_340, 100000000, 1 }, { MMC_OCR_320_330 | MMC_OCR_330_340, 45000000, 15 }, { MMC_OCR_LOW_VOLTAGE, 45000000, 11 }, { MMC_OCR_LOW_VOLTAGE | MMC_OCR_320_330 | MMC_OCR_330_340, 24999999, 15 }, { MMC_OCR_LOW_VOLTAGE | MMC_OCR_320_330 | MMC_OCR_330_340, 5000000, 23 }, { MMC_OCR_LOW_VOLTAGE | MMC_OCR_320_330 | MMC_OCR_330_340, 1000000, 55 }, { MMC_OCR_LOW_VOLTAGE | MMC_OCR_320_330 | MMC_OCR_330_340, 0, 1061 }, }; struct aml8726_sdxc_gpio { device_t dev; uint32_t pin; uint32_t pol; }; struct aml8726_sdxc_softc { device_t dev; boolean_t auto_fill_flush; struct resource *res[2]; struct mtx mtx; struct callout ch; unsigned int ref_freq; struct aml8726_sdxc_gpio pwr_en; int voltages[2]; struct aml8726_sdxc_gpio vselect; struct aml8726_sdxc_gpio card_rst; bus_dma_tag_t dmatag; bus_dmamap_t dmamap; void *ih_cookie; struct mmc_host host; int bus_busy; struct { uint32_t time; uint32_t error; } busy; struct mmc_command *cmd; }; static struct resource_spec aml8726_sdxc_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; #define AML_SDXC_LOCK(sc) mtx_lock(&(sc)->mtx) #define AML_SDXC_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define AML_SDXC_LOCK_ASSERT(sc) mtx_assert(&(sc)->mtx, MA_OWNED) #define AML_SDXC_LOCK_INIT(sc) \ mtx_init(&(sc)->mtx, device_get_nameunit((sc)->dev), \ "sdxc", MTX_DEF) #define AML_SDXC_LOCK_DESTROY(sc) mtx_destroy(&(sc)->mtx); #define CSR_WRITE_4(sc, reg, val) bus_write_4((sc)->res[0], reg, (val)) #define CSR_READ_4(sc, reg) bus_read_4((sc)->res[0], reg) #define CSR_BARRIER(sc, reg) bus_barrier((sc)->res[0], reg, 4, \ (BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)) #define PIN_ON_FLAG(pol) ((pol) == 0 ? \ GPIO_PIN_LOW : GPIO_PIN_HIGH) #define PIN_OFF_FLAG(pol) ((pol) == 0 ? \ GPIO_PIN_HIGH : GPIO_PIN_LOW) #define msecs_to_ticks(ms) (((ms)*hz)/1000 + 1) static void aml8726_sdxc_timeout(void *arg); static void aml8726_sdxc_mapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *busaddrp; /* * There should only be one bus space address since * bus_dma_tag_create was called with nsegments = 1. */ busaddrp = (bus_addr_t *)arg; *busaddrp = segs->ds_addr; } static int aml8726_sdxc_power_off(struct aml8726_sdxc_softc *sc) { if (sc->pwr_en.dev == NULL) return (0); return (GPIO_PIN_SET(sc->pwr_en.dev, sc->pwr_en.pin, PIN_OFF_FLAG(sc->pwr_en.pol))); } static int aml8726_sdxc_power_on(struct aml8726_sdxc_softc *sc) { if (sc->pwr_en.dev == NULL) return (0); return (GPIO_PIN_SET(sc->pwr_en.dev, sc->pwr_en.pin, PIN_ON_FLAG(sc->pwr_en.pol))); } static void aml8726_sdxc_soft_reset(struct aml8726_sdxc_softc *sc) { CSR_WRITE_4(sc, AML_SDXC_SOFT_RESET_REG, AML_SDXC_SOFT_RESET); CSR_BARRIER(sc, AML_SDXC_SOFT_RESET_REG); DELAY(5); } static void aml8726_sdxc_engage_dma(struct aml8726_sdxc_softc *sc) { int i; uint32_t pdmar; uint32_t sr; struct mmc_data *data; data = sc->cmd->data; if (data == NULL || data->len == 0) return; /* * Engaging the DMA hardware is recommended before writing * to AML_SDXC_SEND_REG so that the FIFOs are ready to go. * * Presumably AML_SDXC_CNTRL_REG and AML_SDXC_DMA_ADDR_REG * must be set up prior to this happening. */ pdmar = CSR_READ_4(sc, AML_SDXC_PDMA_REG); pdmar &= ~AML_SDXC_PDMA_RX_FLUSH_MODE_SW; pdmar |= AML_SDXC_PDMA_DMA_EN; if (sc->auto_fill_flush == true) { CSR_WRITE_4(sc, AML_SDXC_PDMA_REG, pdmar); CSR_BARRIER(sc, AML_SDXC_PDMA_REG); return; } if ((data->flags & MMC_DATA_READ) != 0) { pdmar |= AML_SDXC_PDMA_RX_FLUSH_MODE_SW; CSR_WRITE_4(sc, AML_SDXC_PDMA_REG, pdmar); CSR_BARRIER(sc, AML_SDXC_PDMA_REG); } else { pdmar |= AML_SDXC_PDMA_TX_FILL; CSR_WRITE_4(sc, AML_SDXC_PDMA_REG, pdmar); CSR_BARRIER(sc, AML_SDXC_PDMA_REG); /* * Wait up to 100us for data to show up. */ for (i = 0; i < 100; i++) { sr = CSR_READ_4(sc, AML_SDXC_STATUS_REG); if ((sr & AML_SDXC_STATUS_TX_CNT_MASK) != 0) break; DELAY(1); } if (i >= 100) device_printf(sc->dev, "TX FIFO fill timeout\n"); } } static void aml8726_sdxc_disengage_dma(struct aml8726_sdxc_softc *sc) { int i; uint32_t pdmar; uint32_t sr; struct mmc_data *data; data = sc->cmd->data; if (data == NULL || data->len == 0) return; pdmar = CSR_READ_4(sc, AML_SDXC_PDMA_REG); if (sc->auto_fill_flush == true) { pdmar &= ~AML_SDXC_PDMA_DMA_EN; CSR_WRITE_4(sc, AML_SDXC_PDMA_REG, pdmar); CSR_BARRIER(sc, AML_SDXC_PDMA_REG); return; } if ((data->flags & MMC_DATA_READ) != 0) { pdmar |= AML_SDXC_PDMA_RX_FLUSH_NOW; CSR_WRITE_4(sc, AML_SDXC_PDMA_REG, pdmar); CSR_BARRIER(sc, AML_SDXC_PDMA_REG); /* * Wait up to 100us for data to drain. */ for (i = 0; i < 100; i++) { sr = CSR_READ_4(sc, AML_SDXC_STATUS_REG); if ((sr & AML_SDXC_STATUS_RX_CNT_MASK) == 0) break; DELAY(1); } if (i >= 100) device_printf(sc->dev, "RX FIFO drain timeout\n"); } pdmar &= ~(AML_SDXC_PDMA_DMA_EN | AML_SDXC_PDMA_RX_FLUSH_MODE_SW); CSR_WRITE_4(sc, AML_SDXC_PDMA_REG, pdmar); CSR_BARRIER(sc, AML_SDXC_PDMA_REG); } static int aml8726_sdxc_start_command(struct aml8726_sdxc_softc *sc, struct mmc_command *cmd) { bus_addr_t baddr; uint32_t block_size; uint32_t ctlr; uint32_t ier; uint32_t sndr; uint32_t timeout; int error; struct mmc_data *data; AML_SDXC_LOCK_ASSERT(sc); if (cmd->opcode > 0x3f) return (MMC_ERR_INVALID); /* * Ensure the hardware state machine is in a known state. */ aml8726_sdxc_soft_reset(sc); sndr = cmd->opcode; if ((cmd->flags & MMC_RSP_136) != 0) { sndr |= AML_SDXC_SEND_CMD_HAS_RESP; sndr |= AML_SDXC_SEND_RESP_136; /* * According to the SD spec the 136 bit response is * used for getting the CID or CSD in which case the * CRC7 is embedded in the contents rather than being * calculated over the entire response (the controller * always checks the CRC7 over the entire response). */ sndr |= AML_SDXC_SEND_RESP_NO_CRC7_CHECK; } else if ((cmd->flags & MMC_RSP_PRESENT) != 0) sndr |= AML_SDXC_SEND_CMD_HAS_RESP; if ((cmd->flags & MMC_RSP_CRC) == 0) sndr |= AML_SDXC_SEND_RESP_NO_CRC7_CHECK; if (cmd->opcode == MMC_STOP_TRANSMISSION) sndr |= AML_SDXC_SEND_DATA_STOP; data = cmd->data; baddr = 0; ctlr = CSR_READ_4(sc, AML_SDXC_CNTRL_REG); ier = AML_SDXC_IRQ_ENABLE_STANDARD; timeout = AML_SDXC_CMD_TIMEOUT; ctlr &= ~AML_SDXC_CNTRL_PKG_LEN_MASK; if (data && data->len && (data->flags & (MMC_DATA_READ | MMC_DATA_WRITE)) != 0) { block_size = data->len; if ((data->flags & MMC_DATA_MULTI) != 0) { block_size = MMC_SECTOR_SIZE; if ((data->len % block_size) != 0) return (MMC_ERR_INVALID); } if (block_size > 512) return (MMC_ERR_INVALID); sndr |= AML_SDXC_SEND_CMD_HAS_DATA; sndr |= ((data->flags & MMC_DATA_WRITE) != 0) ? AML_SDXC_SEND_DATA_WRITE : 0; sndr |= (((data->len / block_size) - 1) << AML_SDXC_SEND_REP_PKG_CNT_SHIFT); ctlr |= ((block_size < 512) ? block_size : 0) << AML_SDXC_CNTRL_PKG_LEN_SHIFT; ier &= ~AML_SDXC_IRQ_ENABLE_RESP_OK; ier |= (sc->auto_fill_flush == true || (data->flags & MMC_DATA_WRITE) != 0) ? AML_SDXC_IRQ_ENABLE_DMA_DONE : AML_SDXC_IRQ_ENABLE_TRANSFER_DONE_OK; error = bus_dmamap_load(sc->dmatag, sc->dmamap, data->data, data->len, aml8726_sdxc_mapmem, &baddr, BUS_DMA_NOWAIT); if (error) return (MMC_ERR_NO_MEMORY); if ((data->flags & MMC_DATA_READ) != 0) { bus_dmamap_sync(sc->dmatag, sc->dmamap, BUS_DMASYNC_PREREAD); timeout = AML_SDXC_READ_TIMEOUT * (data->len / block_size); } else { bus_dmamap_sync(sc->dmatag, sc->dmamap, BUS_DMASYNC_PREWRITE); timeout = AML_SDXC_WRITE_TIMEOUT * (data->len / block_size); } } sc->cmd = cmd; cmd->error = MMC_ERR_NONE; sc->busy.time = 0; sc->busy.error = MMC_ERR_NONE; if (timeout > AML_SDXC_MAX_TIMEOUT) timeout = AML_SDXC_MAX_TIMEOUT; callout_reset(&sc->ch, msecs_to_ticks(timeout), aml8726_sdxc_timeout, sc); CSR_WRITE_4(sc, AML_SDXC_IRQ_ENABLE_REG, ier); CSR_WRITE_4(sc, AML_SDXC_CNTRL_REG, ctlr); CSR_WRITE_4(sc, AML_SDXC_DMA_ADDR_REG, (uint32_t)baddr); CSR_WRITE_4(sc, AML_SDXC_CMD_ARGUMENT_REG, cmd->arg); aml8726_sdxc_engage_dma(sc); CSR_WRITE_4(sc, AML_SDXC_SEND_REG, sndr); CSR_BARRIER(sc, AML_SDXC_SEND_REG); return (MMC_ERR_NONE); } static void aml8726_sdxc_finish_command(struct aml8726_sdxc_softc *sc, int mmc_error) { int mmc_stop_error; struct mmc_command *cmd; struct mmc_command *stop_cmd; struct mmc_data *data; AML_SDXC_LOCK_ASSERT(sc); /* Clear all interrupts since the request is no longer in flight. */ CSR_WRITE_4(sc, AML_SDXC_IRQ_STATUS_REG, AML_SDXC_IRQ_STATUS_CLEAR); CSR_BARRIER(sc, AML_SDXC_IRQ_STATUS_REG); /* In some cases (e.g. finish called via timeout) this is a NOP. */ callout_stop(&sc->ch); cmd = sc->cmd; sc->cmd = NULL; cmd->error = mmc_error; data = cmd->data; if (data && data->len && (data->flags & (MMC_DATA_READ | MMC_DATA_WRITE)) != 0) { if ((data->flags & MMC_DATA_READ) != 0) bus_dmamap_sync(sc->dmatag, sc->dmamap, BUS_DMASYNC_POSTREAD); else bus_dmamap_sync(sc->dmatag, sc->dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->dmatag, sc->dmamap); } /* * If there's a linked stop command, then start the stop command. * In order to establish a known state attempt the stop command * even if the original request encountered an error. */ stop_cmd = (cmd->mrq->stop != cmd) ? cmd->mrq->stop : NULL; if (stop_cmd != NULL) { /* * If the original command executed successfuly, then * the hardware will also have automatically executed * a stop command so don't bother with the one supplied * with the original request. */ if (mmc_error == MMC_ERR_NONE) { stop_cmd->error = MMC_ERR_NONE; stop_cmd->resp[0] = cmd->resp[0]; stop_cmd->resp[1] = cmd->resp[1]; stop_cmd->resp[2] = cmd->resp[2]; stop_cmd->resp[3] = cmd->resp[3]; } else { mmc_stop_error = aml8726_sdxc_start_command(sc, stop_cmd); if (mmc_stop_error == MMC_ERR_NONE) { AML_SDXC_UNLOCK(sc); return; } stop_cmd->error = mmc_stop_error; } } AML_SDXC_UNLOCK(sc); /* Execute the callback after dropping the lock. */ if (cmd->mrq != NULL) cmd->mrq->done(cmd->mrq); } static void aml8726_sdxc_timeout(void *arg) { struct aml8726_sdxc_softc *sc = (struct aml8726_sdxc_softc *)arg; /* * The command failed to complete in time so forcefully * terminate it. */ aml8726_sdxc_soft_reset(sc); /* * Ensure the command has terminated before continuing on * to things such as bus_dmamap_sync / bus_dmamap_unload. */ while ((CSR_READ_4(sc, AML_SDXC_STATUS_REG) & AML_SDXC_STATUS_BUSY) != 0) cpu_spinwait(); aml8726_sdxc_finish_command(sc, MMC_ERR_TIMEOUT); } static void aml8726_sdxc_busy_check(void *arg) { struct aml8726_sdxc_softc *sc = (struct aml8726_sdxc_softc *)arg; uint32_t sr; sc->busy.time += AML_SDXC_BUSY_POLL_INTVL; sr = CSR_READ_4(sc, AML_SDXC_STATUS_REG); if ((sr & AML_SDXC_STATUS_DAT0) == 0) { if (sc->busy.time < AML_SDXC_BUSY_TIMEOUT) { callout_reset(&sc->ch, msecs_to_ticks(AML_SDXC_BUSY_POLL_INTVL), aml8726_sdxc_busy_check, sc); AML_SDXC_UNLOCK(sc); return; } if (sc->busy.error == MMC_ERR_NONE) sc->busy.error = MMC_ERR_TIMEOUT; } aml8726_sdxc_finish_command(sc, sc->busy.error); } static void aml8726_sdxc_intr(void *arg) { struct aml8726_sdxc_softc *sc = (struct aml8726_sdxc_softc *)arg; uint32_t isr; uint32_t pdmar; uint32_t sndr; uint32_t sr; int i; int mmc_error; int start; int stop; AML_SDXC_LOCK(sc); isr = CSR_READ_4(sc, AML_SDXC_IRQ_STATUS_REG); sndr = CSR_READ_4(sc, AML_SDXC_SEND_REG); sr = CSR_READ_4(sc, AML_SDXC_STATUS_REG); if (sc->cmd == NULL) goto spurious; mmc_error = MMC_ERR_NONE; if ((isr & (AML_SDXC_IRQ_STATUS_TX_FIFO_EMPTY | AML_SDXC_IRQ_STATUS_RX_FIFO_FULL)) != 0) mmc_error = MMC_ERR_FIFO; else if ((isr & (AML_SDXC_IRQ_ENABLE_A_PKG_CRC_ERR | AML_SDXC_IRQ_ENABLE_RESP_CRC_ERR)) != 0) mmc_error = MMC_ERR_BADCRC; else if ((isr & (AML_SDXC_IRQ_ENABLE_A_PKG_TIMEOUT_ERR | AML_SDXC_IRQ_ENABLE_RESP_TIMEOUT_ERR)) != 0) mmc_error = MMC_ERR_TIMEOUT; else if ((isr & (AML_SDXC_IRQ_STATUS_RESP_OK | AML_SDXC_IRQ_STATUS_DMA_DONE | AML_SDXC_IRQ_STATUS_TRANSFER_DONE_OK)) != 0) { ; } else { spurious: /* * Clear spurious interrupts while leaving intacted any * interrupts that may have occurred after we read the * interrupt status register. */ CSR_WRITE_4(sc, AML_SDXC_IRQ_STATUS_REG, (AML_SDXC_IRQ_STATUS_CLEAR & isr)); CSR_BARRIER(sc, AML_SDXC_IRQ_STATUS_REG); AML_SDXC_UNLOCK(sc); return; } aml8726_sdxc_disengage_dma(sc); if ((sndr & AML_SDXC_SEND_CMD_HAS_RESP) != 0) { start = 0; stop = 1; if ((sndr & AML_SDXC_SEND_RESP_136) != 0) { start = 1; stop = start + 4;; } for (i = start; i < stop; i++) { pdmar = CSR_READ_4(sc, AML_SDXC_PDMA_REG); pdmar &= ~(AML_SDXC_PDMA_DMA_EN | AML_SDXC_PDMA_RESP_INDEX_MASK); pdmar |= i << AML_SDXC_PDMA_RESP_INDEX_SHIFT; CSR_WRITE_4(sc, AML_SDXC_PDMA_REG, pdmar); sc->cmd->resp[(stop - 1) - i] = CSR_READ_4(sc, AML_SDXC_CMD_ARGUMENT_REG); } } if ((sr & AML_SDXC_STATUS_BUSY) != 0 && /* * A multiblock operation may keep the hardware * busy until stop transmission is executed. */ (isr & (AML_SDXC_IRQ_STATUS_DMA_DONE | AML_SDXC_IRQ_STATUS_TRANSFER_DONE_OK)) == 0) { if (mmc_error == MMC_ERR_NONE) mmc_error = MMC_ERR_FAILED; /* * Issue a soft reset to terminate the command. * * Ensure the command has terminated before continuing on * to things such as bus_dmamap_sync / bus_dmamap_unload. */ aml8726_sdxc_soft_reset(sc); while ((CSR_READ_4(sc, AML_SDXC_STATUS_REG) & AML_SDXC_STATUS_BUSY) != 0) cpu_spinwait(); } /* * The stop command can be generated either manually or * automatically by the hardware if MISC_MANUAL_STOP_MODE * has not been set. In either case check for busy. */ if (((sc->cmd->flags & MMC_RSP_BUSY) != 0 || (sndr & AML_SDXC_SEND_INDEX_MASK) == MMC_STOP_TRANSMISSION) && (sr & AML_SDXC_STATUS_DAT0) == 0) { sc->busy.error = mmc_error; callout_reset(&sc->ch, msecs_to_ticks(AML_SDXC_BUSY_POLL_INTVL), aml8726_sdxc_busy_check, sc); CSR_WRITE_4(sc, AML_SDXC_IRQ_STATUS_REG, (AML_SDXC_IRQ_STATUS_CLEAR & isr)); CSR_BARRIER(sc, AML_SDXC_IRQ_STATUS_REG); AML_SDXC_UNLOCK(sc); return; } aml8726_sdxc_finish_command(sc, mmc_error); } static int aml8726_sdxc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "amlogic,aml8726-sdxc-m8")) return (ENXIO); device_set_desc(dev, "Amlogic aml8726-m8 SDXC"); return (BUS_PROBE_DEFAULT); } static int aml8726_sdxc_attach(device_t dev) { struct aml8726_sdxc_softc *sc = device_get_softc(dev); char *voltages; char *voltage; int error; int nvoltages; pcell_t prop[3]; phandle_t node; ssize_t len; device_t child; uint32_t ectlr; uint32_t miscr; uint32_t pdmar; sc->dev = dev; sc->auto_fill_flush = false; pdmar = AML_SDXC_PDMA_DMA_URGENT | (49 << AML_SDXC_PDMA_TX_THOLD_SHIFT) | (7 << AML_SDXC_PDMA_RX_THOLD_SHIFT) | (15 << AML_SDXC_PDMA_RD_BURST_SHIFT) | (7 << AML_SDXC_PDMA_WR_BURST_SHIFT); miscr = (2 << AML_SDXC_MISC_WCRC_OK_PAT_SHIFT) | (5 << AML_SDXC_MISC_WCRC_ERR_PAT_SHIFT); ectlr = (12 << AML_SDXC_ENH_CNTRL_SDIO_IRQ_PERIOD_SHIFT); /* * Certain bitfields are dependent on the hardware revision. */ switch (aml8726_soc_hw_rev) { case AML_SOC_HW_REV_M8: switch (aml8726_soc_metal_rev) { case AML_SOC_M8_METAL_REV_M2_A: sc->auto_fill_flush = true; miscr |= (6 << AML_SDXC_MISC_TXSTART_THOLD_SHIFT); ectlr |= (64 << AML_SDXC_ENH_CNTRL_RX_FULL_THOLD_SHIFT) | AML_SDXC_ENH_CNTRL_WR_RESP_MODE_SKIP_M8M2; break; default: miscr |= (7 << AML_SDXC_MISC_TXSTART_THOLD_SHIFT); ectlr |= (63 << AML_SDXC_ENH_CNTRL_RX_FULL_THOLD_SHIFT) | AML_SDXC_ENH_CNTRL_DMA_NO_WR_RESP_CHECK_M8 | (255 << AML_SDXC_ENH_CNTRL_RX_TIMEOUT_SHIFT_M8); break; } break; case AML_SOC_HW_REV_M8B: miscr |= (7 << AML_SDXC_MISC_TXSTART_THOLD_SHIFT); ectlr |= (63 << AML_SDXC_ENH_CNTRL_RX_FULL_THOLD_SHIFT) | AML_SDXC_ENH_CNTRL_DMA_NO_WR_RESP_CHECK_M8 | (255 << AML_SDXC_ENH_CNTRL_RX_TIMEOUT_SHIFT_M8); break; default: device_printf(dev, "unsupported SoC\n"); return (ENXIO); /* NOTREACHED */ } node = ofw_bus_get_node(dev); len = OF_getencprop(node, "clock-frequency", prop, sizeof(prop)); if ((len / sizeof(prop[0])) != 1 || prop[0] == 0) { device_printf(dev, "missing clock-frequency attribute in FDT\n"); return (ENXIO); } sc->ref_freq = prop[0]; sc->pwr_en.dev = NULL; len = OF_getencprop(node, "mmc-pwr-en", prop, sizeof(prop)); if (len > 0) { if ((len / sizeof(prop[0])) == 3) { sc->pwr_en.dev = OF_device_from_xref(prop[0]); sc->pwr_en.pin = prop[1]; sc->pwr_en.pol = prop[2]; } if (sc->pwr_en.dev == NULL) { device_printf(dev, "unable to process mmc-pwr-en attribute in FDT\n"); return (ENXIO); } /* Turn off power and then configure the output driver. */ if (aml8726_sdxc_power_off(sc) != 0 || GPIO_PIN_SETFLAGS(sc->pwr_en.dev, sc->pwr_en.pin, GPIO_PIN_OUTPUT) != 0) { device_printf(dev, "could not use gpio to control power\n"); return (ENXIO); } } len = OF_getprop_alloc(node, "mmc-voltages", sizeof(char), (void **)&voltages); if (len < 0) { device_printf(dev, "missing mmc-voltages attribute in FDT\n"); return (ENXIO); } sc->voltages[0] = 0; sc->voltages[1] = 0; voltage = voltages; nvoltages = 0; while (len && nvoltages < 2) { if (strncmp("1.8", voltage, len) == 0) sc->voltages[nvoltages] = MMC_OCR_LOW_VOLTAGE; else if (strncmp("3.3", voltage, len) == 0) sc->voltages[nvoltages] = MMC_OCR_320_330 | MMC_OCR_330_340; else { device_printf(dev, "unknown voltage attribute %.*s in FDT\n", len, voltage); free(voltages, M_OFWPROP); return (ENXIO); } nvoltages++; /* queue up next string */ while (*voltage && len) { voltage++; len--; } if (len) { voltage++; len--; } } free(voltages, M_OFWPROP); sc->vselect.dev = NULL; len = OF_getencprop(node, "mmc-vselect", prop, sizeof(prop)); if (len > 0) { if ((len / sizeof(prop[0])) == 2) { sc->vselect.dev = OF_device_from_xref(prop[0]); sc->vselect.pin = prop[1]; sc->vselect.pol = 1; } if (sc->vselect.dev == NULL) { device_printf(dev, "unable to process mmc-vselect attribute in FDT\n"); return (ENXIO); } /* * With the power off select voltage 0 and then * configure the output driver. */ if (GPIO_PIN_SET(sc->vselect.dev, sc->vselect.pin, 0) != 0 || GPIO_PIN_SETFLAGS(sc->vselect.dev, sc->vselect.pin, GPIO_PIN_OUTPUT) != 0) { device_printf(dev, "could not use gpio to set voltage\n"); return (ENXIO); } } if (nvoltages == 0) { device_printf(dev, "no voltages in FDT\n"); return (ENXIO); } else if (nvoltages == 1 && sc->vselect.dev != NULL) { device_printf(dev, "only one voltage in FDT\n"); return (ENXIO); } else if (nvoltages == 2 && sc->vselect.dev == NULL) { device_printf(dev, "too many voltages in FDT\n"); return (ENXIO); } sc->card_rst.dev = NULL; len = OF_getencprop(node, "mmc-rst", prop, sizeof(prop)); if (len > 0) { if ((len / sizeof(prop[0])) == 3) { sc->card_rst.dev = OF_device_from_xref(prop[0]); sc->card_rst.pin = prop[1]; sc->card_rst.pol = prop[2]; } if (sc->card_rst.dev == NULL) { device_printf(dev, "unable to process mmc-rst attribute in FDT\n"); return (ENXIO); } } if (bus_alloc_resources(dev, aml8726_sdxc_spec, sc->res)) { device_printf(dev, "could not allocate resources for device\n"); return (ENXIO); } AML_SDXC_LOCK_INIT(sc); error = bus_dma_tag_create(bus_get_dma_tag(dev), AML_SDXC_ALIGN_DMA, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, AML_SDXC_MAX_DMA, 1, AML_SDXC_MAX_DMA, 0, NULL, NULL, &sc->dmatag); if (error) goto fail; error = bus_dmamap_create(sc->dmatag, 0, &sc->dmamap); if (error) goto fail; error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE, NULL, aml8726_sdxc_intr, sc, &sc->ih_cookie); if (error) { device_printf(dev, "could not setup interrupt handler\n"); goto fail; } callout_init_mtx(&sc->ch, &sc->mtx, CALLOUT_RETURNUNLOCKED); sc->host.f_min = 200000; sc->host.f_max = 100000000; sc->host.host_ocr = sc->voltages[0] | sc->voltages[1]; sc->host.caps = MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA | MMC_CAP_HSPEED; aml8726_sdxc_soft_reset(sc); CSR_WRITE_4(sc, AML_SDXC_PDMA_REG, pdmar); CSR_WRITE_4(sc, AML_SDXC_MISC_REG, miscr); CSR_WRITE_4(sc, AML_SDXC_ENH_CNTRL_REG, ectlr); child = device_add_child(dev, "mmc", -1); if (!child) { device_printf(dev, "could not add mmc\n"); error = ENXIO; goto fail; } error = device_probe_and_attach(child); if (error) { device_printf(dev, "could not attach mmc\n"); goto fail; } return (0); fail: if (sc->ih_cookie) bus_teardown_intr(dev, sc->res[1], sc->ih_cookie); if (sc->dmamap) bus_dmamap_destroy(sc->dmatag, sc->dmamap); if (sc->dmatag) bus_dma_tag_destroy(sc->dmatag); AML_SDXC_LOCK_DESTROY(sc); (void)aml8726_sdxc_power_off(sc); bus_release_resources(dev, aml8726_sdxc_spec, sc->res); return (error); } static int aml8726_sdxc_detach(device_t dev) { struct aml8726_sdxc_softc *sc = device_get_softc(dev); AML_SDXC_LOCK(sc); if (sc->cmd != NULL) { AML_SDXC_UNLOCK(sc); return (EBUSY); } /* * Turn off the power, reset the hardware state machine, * and disable the interrupts. */ aml8726_sdxc_power_off(sc); aml8726_sdxc_soft_reset(sc); CSR_WRITE_4(sc, AML_SDXC_IRQ_ENABLE_REG, 0); AML_SDXC_UNLOCK(sc); bus_generic_detach(dev); bus_teardown_intr(dev, sc->res[1], sc->ih_cookie); bus_dmamap_destroy(sc->dmatag, sc->dmamap); bus_dma_tag_destroy(sc->dmatag); AML_SDXC_LOCK_DESTROY(sc); bus_release_resources(dev, aml8726_sdxc_spec, sc->res); return (0); } static int aml8726_sdxc_shutdown(device_t dev) { struct aml8726_sdxc_softc *sc = device_get_softc(dev); /* * Turn off the power, reset the hardware state machine, * and disable the interrupts. */ aml8726_sdxc_power_off(sc); aml8726_sdxc_soft_reset(sc); CSR_WRITE_4(sc, AML_SDXC_IRQ_ENABLE_REG, 0); return (0); } static int aml8726_sdxc_update_ios(device_t bus, device_t child) { struct aml8726_sdxc_softc *sc = device_get_softc(bus); struct mmc_ios *ios = &sc->host.ios; unsigned int divisor; int error; int i; uint32_t cctlr; uint32_t clk2r; uint32_t ctlr; uint32_t freq; ctlr = (7 << AML_SDXC_CNTRL_TX_ENDIAN_SHIFT) | (7 << AML_SDXC_CNTRL_RX_ENDIAN_SHIFT) | (0xf << AML_SDXC_CNTRL_RX_PERIOD_SHIFT) | (0x7f << AML_SDXC_CNTRL_RX_TIMEOUT_SHIFT); switch (ios->bus_width) { case bus_width_8: ctlr |= AML_SDXC_CNTRL_BUS_WIDTH_8; break; case bus_width_4: ctlr |= AML_SDXC_CNTRL_BUS_WIDTH_4; break; case bus_width_1: ctlr |= AML_SDXC_CNTRL_BUS_WIDTH_1; break; default: return (EINVAL); } CSR_WRITE_4(sc, AML_SDXC_CNTRL_REG, ctlr); /* * Disable clocks and then clock module prior to setting desired values. */ cctlr = CSR_READ_4(sc, AML_SDXC_CLK_CNTRL_REG); cctlr &= ~(AML_SDXC_CLK_CNTRL_TX_CLK_EN | AML_SDXC_CLK_CNTRL_RX_CLK_EN | AML_SDXC_CLK_CNTRL_SD_CLK_EN); CSR_WRITE_4(sc, AML_SDXC_CLK_CNTRL_REG, cctlr); CSR_BARRIER(sc, AML_SDXC_CLK_CNTRL_REG); cctlr &= ~AML_SDXC_CLK_CNTRL_CLK_MODULE_EN; CSR_WRITE_4(sc, AML_SDXC_CLK_CNTRL_REG, cctlr); CSR_BARRIER(sc, AML_SDXC_CLK_CNTRL_REG); /* * aml8726-m8 * * Clock select 1 fclk_div2 (1.275 GHz) */ cctlr &= ~AML_SDXC_CLK_CNTRL_CLK_SEL_MASK; cctlr |= (1 << AML_SDXC_CLK_CNTRL_CLK_SEL_SHIFT); divisor = sc->ref_freq / ios->clock - 1; if (divisor == 0 || divisor == -1) divisor = 1; if ((sc->ref_freq / (divisor + 1)) > ios->clock) divisor += 1; if (divisor > (AML_SDXC_CLK_CNTRL_CLK_DIV_MASK >> AML_SDXC_CLK_CNTRL_CLK_DIV_SHIFT)) divisor = AML_SDXC_CLK_CNTRL_CLK_DIV_MASK >> AML_SDXC_CLK_CNTRL_CLK_DIV_SHIFT; cctlr &= ~AML_SDXC_CLK_CNTRL_CLK_DIV_MASK; cctlr |= divisor << AML_SDXC_CLK_CNTRL_CLK_DIV_SHIFT; cctlr &= ~AML_SDXC_CLK_CNTRL_MEM_PWR_MASK; cctlr |= AML_SDXC_CLK_CNTRL_MEM_PWR_ON; CSR_WRITE_4(sc, AML_SDXC_CLK_CNTRL_REG, cctlr); CSR_BARRIER(sc, AML_SDXC_CLK_CNTRL_REG); /* * Enable clock module and then clocks after setting desired values. */ cctlr |= AML_SDXC_CLK_CNTRL_CLK_MODULE_EN; CSR_WRITE_4(sc, AML_SDXC_CLK_CNTRL_REG, cctlr); CSR_BARRIER(sc, AML_SDXC_CLK_CNTRL_REG); cctlr |= AML_SDXC_CLK_CNTRL_TX_CLK_EN | AML_SDXC_CLK_CNTRL_RX_CLK_EN | AML_SDXC_CLK_CNTRL_SD_CLK_EN; CSR_WRITE_4(sc, AML_SDXC_CLK_CNTRL_REG, cctlr); CSR_BARRIER(sc, AML_SDXC_CLK_CNTRL_REG); freq = sc->ref_freq / divisor; for (i = 0; aml8726_sdxc_clk_phases[i].voltage; i++) { if ((aml8726_sdxc_clk_phases[i].voltage & (1 << ios->vdd)) != 0 && freq > aml8726_sdxc_clk_phases[i].freq) break; if (aml8726_sdxc_clk_phases[i].freq == 0) break; } clk2r = (1 << AML_SDXC_CLK2_SD_PHASE_SHIFT) | (aml8726_sdxc_clk_phases[i].rx_phase << AML_SDXC_CLK2_RX_PHASE_SHIFT); CSR_WRITE_4(sc, AML_SDXC_CLK2_REG, clk2r); CSR_BARRIER(sc, AML_SDXC_CLK2_REG); error = 0; switch (ios->power_mode) { case power_up: /* * Configure and power on the regulator so that the * voltage stabilizes prior to powering on the card. */ if (sc->vselect.dev != NULL) { for (i = 0; i < 2; i++) if ((sc->voltages[i] & (1 << ios->vdd)) != 0) break; if (i >= 2) return (EINVAL); error = GPIO_PIN_SET(sc->vselect.dev, sc->vselect.pin, i); } break; case power_on: error = aml8726_sdxc_power_on(sc); if (error) break; if (sc->card_rst.dev != NULL) { if (GPIO_PIN_SET(sc->card_rst.dev, sc->card_rst.pin, PIN_ON_FLAG(sc->card_rst.pol)) != 0 || GPIO_PIN_SETFLAGS(sc->card_rst.dev, sc->card_rst.pin, GPIO_PIN_OUTPUT) != 0) error = ENXIO; DELAY(5); if (GPIO_PIN_SET(sc->card_rst.dev, sc->card_rst.pin, PIN_OFF_FLAG(sc->card_rst.pol)) != 0) error = ENXIO; DELAY(5); if (error) { device_printf(sc->dev, "could not use gpio to reset card\n"); break; } } break; case power_off: error = aml8726_sdxc_power_off(sc); break; default: return (EINVAL); } return (error); } static int aml8726_sdxc_request(device_t bus, device_t child, struct mmc_request *req) { struct aml8726_sdxc_softc *sc = device_get_softc(bus); int mmc_error; AML_SDXC_LOCK(sc); if (sc->cmd != NULL) { AML_SDXC_UNLOCK(sc); return (EBUSY); } mmc_error = aml8726_sdxc_start_command(sc, req->cmd); AML_SDXC_UNLOCK(sc); /* Execute the callback after dropping the lock. */ if (mmc_error != MMC_ERR_NONE) { req->cmd->error = mmc_error; req->done(req); } return (0); } static int aml8726_sdxc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct aml8726_sdxc_softc *sc = device_get_softc(bus); switch (which) { case MMCBR_IVAR_BUS_MODE: *(int *)result = sc->host.ios.bus_mode; break; case MMCBR_IVAR_BUS_WIDTH: *(int *)result = sc->host.ios.bus_width; break; case MMCBR_IVAR_CHIP_SELECT: *(int *)result = sc->host.ios.chip_select; break; case MMCBR_IVAR_CLOCK: *(int *)result = sc->host.ios.clock; break; case MMCBR_IVAR_F_MIN: *(int *)result = sc->host.f_min; break; case MMCBR_IVAR_F_MAX: *(int *)result = sc->host.f_max; break; case MMCBR_IVAR_HOST_OCR: *(int *)result = sc->host.host_ocr; break; case MMCBR_IVAR_MODE: *(int *)result = sc->host.mode; break; case MMCBR_IVAR_OCR: *(int *)result = sc->host.ocr; break; case MMCBR_IVAR_POWER_MODE: *(int *)result = sc->host.ios.power_mode; break; case MMCBR_IVAR_VDD: *(int *)result = sc->host.ios.vdd; break; case MMCBR_IVAR_CAPS: *(int *)result = sc->host.caps; break; case MMCBR_IVAR_MAX_DATA: *(int *)result = AML_SDXC_MAX_DMA / MMC_SECTOR_SIZE; break; default: return (EINVAL); } return (0); } static int aml8726_sdxc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct aml8726_sdxc_softc *sc = device_get_softc(bus); switch (which) { case MMCBR_IVAR_BUS_MODE: sc->host.ios.bus_mode = value; break; case MMCBR_IVAR_BUS_WIDTH: sc->host.ios.bus_width = value; break; case MMCBR_IVAR_CHIP_SELECT: sc->host.ios.chip_select = value; break; case MMCBR_IVAR_CLOCK: sc->host.ios.clock = value; break; case MMCBR_IVAR_MODE: sc->host.mode = value; break; case MMCBR_IVAR_OCR: sc->host.ocr = value; break; case MMCBR_IVAR_POWER_MODE: sc->host.ios.power_mode = value; break; case MMCBR_IVAR_VDD: sc->host.ios.vdd = value; break; /* These are read-only */ case MMCBR_IVAR_CAPS: case MMCBR_IVAR_HOST_OCR: case MMCBR_IVAR_F_MIN: case MMCBR_IVAR_F_MAX: case MMCBR_IVAR_MAX_DATA: default: return (EINVAL); } return (0); } static int aml8726_sdxc_get_ro(device_t bus, device_t child) { return (0); } static int aml8726_sdxc_acquire_host(device_t bus, device_t child) { struct aml8726_sdxc_softc *sc = device_get_softc(bus); AML_SDXC_LOCK(sc); while (sc->bus_busy) mtx_sleep(sc, &sc->mtx, PZERO, "sdxc", hz / 5); sc->bus_busy++; AML_SDXC_UNLOCK(sc); return (0); } static int aml8726_sdxc_release_host(device_t bus, device_t child) { struct aml8726_sdxc_softc *sc = device_get_softc(bus); AML_SDXC_LOCK(sc); sc->bus_busy--; wakeup(sc); AML_SDXC_UNLOCK(sc); return (0); } static device_method_t aml8726_sdxc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aml8726_sdxc_probe), DEVMETHOD(device_attach, aml8726_sdxc_attach), DEVMETHOD(device_detach, aml8726_sdxc_detach), DEVMETHOD(device_shutdown, aml8726_sdxc_shutdown), /* Bus interface */ DEVMETHOD(bus_read_ivar, aml8726_sdxc_read_ivar), DEVMETHOD(bus_write_ivar, aml8726_sdxc_write_ivar), /* MMC bridge interface */ DEVMETHOD(mmcbr_update_ios, aml8726_sdxc_update_ios), DEVMETHOD(mmcbr_request, aml8726_sdxc_request), DEVMETHOD(mmcbr_get_ro, aml8726_sdxc_get_ro), DEVMETHOD(mmcbr_acquire_host, aml8726_sdxc_acquire_host), DEVMETHOD(mmcbr_release_host, aml8726_sdxc_release_host), DEVMETHOD_END }; static driver_t aml8726_sdxc_driver = { "aml8726_sdxc", aml8726_sdxc_methods, sizeof(struct aml8726_sdxc_softc), }; static devclass_t aml8726_sdxc_devclass; DRIVER_MODULE(aml8726_sdxc, simplebus, aml8726_sdxc_driver, aml8726_sdxc_devclass, 0, 0); MODULE_DEPEND(aml8726_sdxc, aml8726_gpio, 1, 1, 1); +DRIVER_MODULE(mmc, aml8726_sdxc, mmc_driver, mmc_devclass, NULL, NULL); Index: head/sys/arm/at91/at91_mci.c =================================================================== --- head/sys/arm/at91/at91_mci.c (revision 292179) +++ head/sys/arm/at91/at91_mci.c (revision 292180) @@ -1,1414 +1,1415 @@ /*- * Copyright (c) 2006 Bernd Walter. All rights reserved. * Copyright (c) 2006 M. Warner Losh. All rights reserved. * Copyright (c) 2010 Greg Ansley. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include #endif #include "mmcbr_if.h" #include "opt_at91.h" /* * About running the MCI bus above 25MHz * * Historically, the MCI bus has been run at 30MHz on systems with a 60MHz * master clock, in part due to a bug in dev/mmc.c making always request * 30MHz, and in part over clocking the bus because 15MHz was too slow. * Fixing that bug causes the mmc driver to request a 25MHz clock (as it * should) and the logic in at91_mci_update_ios() picks the highest speed that * doesn't exceed that limit. With a 60MHz MCK that would be 15MHz, and * that's a real performance buzzkill when you've been getting away with 30MHz * all along. * * By defining AT91_MCI_ALLOW_OVERCLOCK (or setting the allow_overclock=1 * device hint or sysctl) you can enable logic in at91_mci_update_ios() to * overlcock the SD bus a little by running it at MCK / 2 when the requested * speed is 25MHz and the next highest speed is 15MHz or less. This appears * to work on virtually all SD cards, since it is what this driver has been * doing prior to the introduction of this option, where the overclocking vs * underclocking decision was automaticly "overclock". Modern SD cards can * run at 45mhz/1-bit in standard mode (high speed mode enable commands not * sent) without problems. * * Speaking of high-speed mode, the rm9200 manual says the MCI device supports * the SD v1.0 specification and can run up to 50MHz. This is interesting in * that the SD v1.0 spec caps the speed at 25MHz; high speed mode was added in * the v1.10 spec. Furthermore, high speed mode doesn't just crank up the * clock, it alters the signal timing. The rm9200 MCI device doesn't support * these altered timings. So while speeds over 25MHz may work, they only work * in what the SD spec calls "default" speed mode, and it amounts to violating * the spec by overclocking the bus. * * If you also enable 4-wire mode it's possible transfers faster than 25MHz * will fail. On the AT91RM9200, due to bugs in the bus contention logic, if * you have the USB host device and OHCI driver enabled will fail. Even * underclocking to 15MHz, intermittant overrun and underrun errors occur. * Note that you don't even need to have usb devices attached to the system, * the errors begin to occur as soon as the OHCI driver sets the register bit * to enable periodic transfers. It appears (based on brief investigation) * that the usb host controller uses so much ASB bandwidth that sometimes the * DMA for MCI transfers doesn't get a bus grant in time and data gets * dropped. Adding even a modicum of network activity changes the symptom * from intermittant to very frequent. Members of the AT91SAM9 family have * corrected this problem, or are at least better about their use of the bus. */ #ifndef AT91_MCI_ALLOW_OVERCLOCK #define AT91_MCI_ALLOW_OVERCLOCK 1 #endif /* * Allocate 2 bounce buffers we'll use to endian-swap the data due to the rm9200 * erratum. We use a pair of buffers because when reading that lets us begin * endian-swapping the data in the first buffer while the DMA is reading into * the second buffer. (We can't use the same trick for writing because we might * not get all the data in the 2nd buffer swapped before the hardware needs it; * dealing with that would add complexity to the driver.) * * The buffers are sized at 16K each due to the way the busdma cache sync * operations work on arm. A dcache_inv_range() operation on a range larger * than 16K gets turned into a dcache_wbinv_all(). That needlessly flushes the * entire data cache, impacting overall system performance. */ #define BBCOUNT 2 #define BBSIZE (16*1024) #define MAX_BLOCKS ((BBSIZE*BBCOUNT)/512) static int mci_debug; struct at91_mci_softc { void *intrhand; /* Interrupt handle */ device_t dev; int sc_cap; #define CAP_HAS_4WIRE 1 /* Has 4 wire bus */ #define CAP_NEEDS_BYTESWAP 2 /* broken hardware needing bounce */ #define CAP_MCI1_REV2XX 4 /* MCI 1 rev 2.x */ int flags; #define PENDING_CMD 0x01 #define PENDING_STOP 0x02 #define CMD_MULTIREAD 0x10 #define CMD_MULTIWRITE 0x20 int has_4wire; int allow_overclock; struct resource *irq_res; /* IRQ resource */ struct resource *mem_res; /* Memory resource */ struct mtx sc_mtx; bus_dma_tag_t dmatag; struct mmc_host host; int bus_busy; struct mmc_request *req; struct mmc_command *curcmd; bus_dmamap_t bbuf_map[BBCOUNT]; char * bbuf_vaddr[BBCOUNT]; /* bounce bufs in KVA space */ uint32_t bbuf_len[BBCOUNT]; /* len currently queued for bounce buf */ uint32_t bbuf_curidx; /* which bbuf is the active DMA buffer */ uint32_t xfer_offset; /* offset so far into caller's buf */ }; /* bus entry points */ static int at91_mci_probe(device_t dev); static int at91_mci_attach(device_t dev); static int at91_mci_detach(device_t dev); static void at91_mci_intr(void *); /* helper routines */ static int at91_mci_activate(device_t dev); static void at91_mci_deactivate(device_t dev); static int at91_mci_is_mci1rev2xx(void); #define AT91_MCI_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define AT91_MCI_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define AT91_MCI_LOCK_INIT(_sc) \ mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \ "mci", MTX_DEF) #define AT91_MCI_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); #define AT91_MCI_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); #define AT91_MCI_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); static inline uint32_t RD4(struct at91_mci_softc *sc, bus_size_t off) { return (bus_read_4(sc->mem_res, off)); } static inline void WR4(struct at91_mci_softc *sc, bus_size_t off, uint32_t val) { bus_write_4(sc->mem_res, off, val); } static void at91_bswap_buf(struct at91_mci_softc *sc, void * dptr, void * sptr, uint32_t memsize) { uint32_t * dst = (uint32_t *)dptr; uint32_t * src = (uint32_t *)sptr; uint32_t i; /* * If the hardware doesn't need byte-swapping, let bcopy() do the * work. Use bounce buffer even if we don't need byteswap, since * buffer may straddle a page boundry, and we don't handle * multi-segment transfers in hardware. Seen from 'bsdlabel -w' which * uses raw geom access to the volume. Greg Ansley (gja (at) * ansley.com) */ if (!(sc->sc_cap & CAP_NEEDS_BYTESWAP)) { memcpy(dptr, sptr, memsize); return; } /* * Nice performance boost for slightly unrolling this loop. * (But very little extra boost for further unrolling it.) */ for (i = 0; i < memsize; i += 16) { *dst++ = bswap32(*src++); *dst++ = bswap32(*src++); *dst++ = bswap32(*src++); *dst++ = bswap32(*src++); } /* Mop up the last 1-3 words, if any. */ for (i = 0; i < (memsize & 0x0F); i += 4) { *dst++ = bswap32(*src++); } } static void at91_mci_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (error != 0) return; *(bus_addr_t *)arg = segs[0].ds_addr; } static void at91_mci_pdc_disable(struct at91_mci_softc *sc) { WR4(sc, PDC_PTCR, PDC_PTCR_TXTDIS | PDC_PTCR_RXTDIS); WR4(sc, PDC_RPR, 0); WR4(sc, PDC_RCR, 0); WR4(sc, PDC_RNPR, 0); WR4(sc, PDC_RNCR, 0); WR4(sc, PDC_TPR, 0); WR4(sc, PDC_TCR, 0); WR4(sc, PDC_TNPR, 0); WR4(sc, PDC_TNCR, 0); } /* * Reset the controller, then restore most of the current state. * * This is called after detecting an error. It's also called after stopping a * multi-block write, to un-wedge the device so that it will handle the NOTBUSY * signal correctly. See comments in at91_mci_stop_done() for more details. */ static void at91_mci_reset(struct at91_mci_softc *sc) { uint32_t mr; uint32_t sdcr; uint32_t dtor; uint32_t imr; at91_mci_pdc_disable(sc); /* save current state */ imr = RD4(sc, MCI_IMR); mr = RD4(sc, MCI_MR) & 0x7fff; sdcr = RD4(sc, MCI_SDCR); dtor = RD4(sc, MCI_DTOR); /* reset the controller */ WR4(sc, MCI_IDR, 0xffffffff); WR4(sc, MCI_CR, MCI_CR_MCIDIS | MCI_CR_SWRST); /* restore state */ WR4(sc, MCI_CR, MCI_CR_MCIEN|MCI_CR_PWSEN); WR4(sc, MCI_MR, mr); WR4(sc, MCI_SDCR, sdcr); WR4(sc, MCI_DTOR, dtor); WR4(sc, MCI_IER, imr); /* * Make sure sdio interrupts will fire. Not sure why reading * SR ensures that, but this is in the linux driver. */ RD4(sc, MCI_SR); } static void at91_mci_init(device_t dev) { struct at91_mci_softc *sc = device_get_softc(dev); uint32_t val; WR4(sc, MCI_CR, MCI_CR_MCIDIS | MCI_CR_SWRST); /* device into reset */ WR4(sc, MCI_IDR, 0xffffffff); /* Turn off interrupts */ WR4(sc, MCI_DTOR, MCI_DTOR_DTOMUL_1M | 1); val = MCI_MR_PDCMODE; val |= 0x34a; /* PWSDIV = 3; CLKDIV = 74 */ // if (sc->sc_cap & CAP_MCI1_REV2XX) // val |= MCI_MR_RDPROOF | MCI_MR_WRPROOF; WR4(sc, MCI_MR, val); #ifndef AT91_MCI_SLOT_B WR4(sc, MCI_SDCR, 0); /* SLOT A, 1 bit bus */ #else /* * XXX Really should add second "unit" but nobody using using * a two slot card that we know of. XXX */ WR4(sc, MCI_SDCR, 1); /* SLOT B, 1 bit bus */ #endif /* * Enable controller, including power-save. The slower clock * of the power-save mode is only in effect when there is no * transfer in progress, so it can be left in this mode all * the time. */ WR4(sc, MCI_CR, MCI_CR_MCIEN|MCI_CR_PWSEN); } static void at91_mci_fini(device_t dev) { struct at91_mci_softc *sc = device_get_softc(dev); WR4(sc, MCI_IDR, 0xffffffff); /* Turn off interrupts */ at91_mci_pdc_disable(sc); WR4(sc, MCI_CR, MCI_CR_MCIDIS | MCI_CR_SWRST); /* device into reset */ } static int at91_mci_probe(device_t dev) { #ifdef FDT if (!ofw_bus_is_compatible(dev, "atmel,hsmci")) return (ENXIO); #endif device_set_desc(dev, "MCI mmc/sd host bridge"); return (0); } static int at91_mci_attach(device_t dev) { struct at91_mci_softc *sc = device_get_softc(dev); struct sysctl_ctx_list *sctx; struct sysctl_oid *soid; device_t child; int err, i; sctx = device_get_sysctl_ctx(dev); soid = device_get_sysctl_tree(dev); sc->dev = dev; sc->sc_cap = 0; if (at91_is_rm92()) sc->sc_cap |= CAP_NEEDS_BYTESWAP; /* * MCI1 Rev 2 controllers need some workarounds, flag if so. */ if (at91_mci_is_mci1rev2xx()) sc->sc_cap |= CAP_MCI1_REV2XX; err = at91_mci_activate(dev); if (err) goto out; AT91_MCI_LOCK_INIT(sc); at91_mci_fini(dev); at91_mci_init(dev); /* * Allocate DMA tags and maps and bounce buffers. * * The parms in the tag_create call cause the dmamem_alloc call to * create each bounce buffer as a single contiguous buffer of BBSIZE * bytes aligned to a 4096 byte boundary. * * Do not use DMA_COHERENT for these buffers because that maps the * memory as non-cachable, which prevents cache line burst fills/writes, * which is something we need since we're trying to overlap the * byte-swapping with the DMA operations. */ err = bus_dma_tag_create(bus_get_dma_tag(dev), 4096, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BBSIZE, 1, BBSIZE, 0, NULL, NULL, &sc->dmatag); if (err != 0) goto out; for (i = 0; i < BBCOUNT; ++i) { err = bus_dmamem_alloc(sc->dmatag, (void **)&sc->bbuf_vaddr[i], BUS_DMA_NOWAIT, &sc->bbuf_map[i]); if (err != 0) goto out; } /* * Activate the interrupt */ err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, at91_mci_intr, sc, &sc->intrhand); if (err) { AT91_MCI_LOCK_DESTROY(sc); goto out; } /* * Allow 4-wire to be initially set via #define. * Allow a device hint to override that. * Allow a sysctl to override that. */ #if defined(AT91_MCI_HAS_4WIRE) && AT91_MCI_HAS_4WIRE != 0 sc->has_4wire = 1; #endif resource_int_value(device_get_name(dev), device_get_unit(dev), "4wire", &sc->has_4wire); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "4wire", CTLFLAG_RW, &sc->has_4wire, 0, "has 4 wire SD Card bus"); if (sc->has_4wire) sc->sc_cap |= CAP_HAS_4WIRE; sc->allow_overclock = AT91_MCI_ALLOW_OVERCLOCK; resource_int_value(device_get_name(dev), device_get_unit(dev), "allow_overclock", &sc->allow_overclock); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "allow_overclock", CTLFLAG_RW, &sc->allow_overclock, 0, "Allow up to 30MHz clock for 25MHz request when next highest speed 15MHz or less."); /* * Our real min freq is master_clock/512, but upper driver layers are * going to set the min speed during card discovery, and the right speed * for that is 400kHz, so advertise a safe value just under that. * * For max speed, while the rm9200 manual says the max is 50mhz, it also * says it supports only the SD v1.0 spec, which means the real limit is * 25mhz. On the other hand, historical use has been to slightly violate * the standard by running the bus at 30MHz. For more information on * that, see the comments at the top of this file. */ sc->host.f_min = 375000; sc->host.f_max = at91_master_clock / 2; if (sc->host.f_max > 25000000) sc->host.f_max = 25000000; sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; sc->host.caps = 0; if (sc->sc_cap & CAP_HAS_4WIRE) sc->host.caps |= MMC_CAP_4_BIT_DATA; child = device_add_child(dev, "mmc", 0); device_set_ivars(dev, &sc->host); err = bus_generic_attach(dev); out: if (err) at91_mci_deactivate(dev); return (err); } static int at91_mci_detach(device_t dev) { struct at91_mci_softc *sc = device_get_softc(dev); at91_mci_fini(dev); at91_mci_deactivate(dev); bus_dmamem_free(sc->dmatag, sc->bbuf_vaddr[0], sc->bbuf_map[0]); bus_dmamem_free(sc->dmatag, sc->bbuf_vaddr[1], sc->bbuf_map[1]); bus_dma_tag_destroy(sc->dmatag); return (EBUSY); /* XXX */ } static int at91_mci_activate(device_t dev) { struct at91_mci_softc *sc; int rid; sc = device_get_softc(dev); rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) goto errout; rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) goto errout; return (0); errout: at91_mci_deactivate(dev); return (ENOMEM); } static void at91_mci_deactivate(device_t dev) { struct at91_mci_softc *sc; sc = device_get_softc(dev); if (sc->intrhand) bus_teardown_intr(dev, sc->irq_res, sc->intrhand); sc->intrhand = 0; bus_generic_detach(sc->dev); if (sc->mem_res) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res), sc->mem_res); sc->mem_res = 0; if (sc->irq_res) bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res), sc->irq_res); sc->irq_res = 0; return; } static int at91_mci_is_mci1rev2xx(void) { switch (soc_info.type) { case AT91_T_SAM9260: case AT91_T_SAM9263: case AT91_T_CAP9: case AT91_T_SAM9G10: case AT91_T_SAM9G20: case AT91_T_SAM9RL: return(1); default: return (0); } } static int at91_mci_update_ios(device_t brdev, device_t reqdev) { struct at91_mci_softc *sc; struct mmc_ios *ios; uint32_t clkdiv; uint32_t freq; sc = device_get_softc(brdev); ios = &sc->host.ios; /* * Calculate our closest available clock speed that doesn't exceed the * requested speed. * * When overclocking is allowed, the requested clock is 25MHz, the * computed frequency is 15MHz or smaller and clockdiv is 1, use * clockdiv of 0 to double that. If less than 12.5MHz, double * regardless of the overclocking setting. * * Whatever we come up with, store it back into ios->clock so that the * upper layer drivers can report the actual speed of the bus. */ if (ios->clock == 0) { WR4(sc, MCI_CR, MCI_CR_MCIDIS); clkdiv = 0; } else { WR4(sc, MCI_CR, MCI_CR_MCIEN|MCI_CR_PWSEN); if ((at91_master_clock % (ios->clock * 2)) == 0) clkdiv = ((at91_master_clock / ios->clock) / 2) - 1; else clkdiv = (at91_master_clock / ios->clock) / 2; freq = at91_master_clock / ((clkdiv+1) * 2); if (clkdiv == 1 && ios->clock == 25000000 && freq <= 15000000) { if (sc->allow_overclock || freq <= 12500000) { clkdiv = 0; freq = at91_master_clock / ((clkdiv+1) * 2); } } ios->clock = freq; } if (ios->bus_width == bus_width_4) WR4(sc, MCI_SDCR, RD4(sc, MCI_SDCR) | MCI_SDCR_SDCBUS); else WR4(sc, MCI_SDCR, RD4(sc, MCI_SDCR) & ~MCI_SDCR_SDCBUS); WR4(sc, MCI_MR, (RD4(sc, MCI_MR) & ~MCI_MR_CLKDIV) | clkdiv); /* Do we need a settle time here? */ /* XXX We need to turn the device on/off here with a GPIO pin */ return (0); } static void at91_mci_start_cmd(struct at91_mci_softc *sc, struct mmc_command *cmd) { uint32_t cmdr, mr; struct mmc_data *data; sc->curcmd = cmd; data = cmd->data; /* XXX Upper layers don't always set this */ cmd->mrq = sc->req; /* Begin setting up command register. */ cmdr = cmd->opcode; if (sc->host.ios.bus_mode == opendrain) cmdr |= MCI_CMDR_OPDCMD; /* Set up response handling. Allow max timeout for responses. */ if (MMC_RSP(cmd->flags) == MMC_RSP_NONE) cmdr |= MCI_CMDR_RSPTYP_NO; else { cmdr |= MCI_CMDR_MAXLAT; if (cmd->flags & MMC_RSP_136) cmdr |= MCI_CMDR_RSPTYP_136; else cmdr |= MCI_CMDR_RSPTYP_48; } /* * If there is no data transfer, just set up the right interrupt mask * and start the command. * * The interrupt mask needs to be CMDRDY plus all non-data-transfer * errors. It's important to leave the transfer-related errors out, to * avoid spurious timeout or crc errors on a STOP command following a * multiblock read. When a multiblock read is in progress, sending a * STOP in the middle of a block occasionally triggers such errors, but * we're totally disinterested in them because we've already gotten all * the data we wanted without error before sending the STOP command. */ if (data == NULL) { uint32_t ier = MCI_SR_CMDRDY | MCI_SR_RTOE | MCI_SR_RENDE | MCI_SR_RCRCE | MCI_SR_RDIRE | MCI_SR_RINDE; at91_mci_pdc_disable(sc); if (cmd->opcode == MMC_STOP_TRANSMISSION) cmdr |= MCI_CMDR_TRCMD_STOP; /* Ignore response CRC on CMD2 and ACMD41, per standard. */ if (cmd->opcode == MMC_SEND_OP_COND || cmd->opcode == ACMD_SD_SEND_OP_COND) ier &= ~MCI_SR_RCRCE; if (mci_debug) printf("CMDR %x (opcode %d) ARGR %x no data\n", cmdr, cmd->opcode, cmd->arg); WR4(sc, MCI_ARGR, cmd->arg); WR4(sc, MCI_CMDR, cmdr); WR4(sc, MCI_IDR, 0xffffffff); WR4(sc, MCI_IER, ier); return; } /* There is data, set up the transfer-related parts of the command. */ if (data->flags & MMC_DATA_READ) cmdr |= MCI_CMDR_TRDIR; if (data->flags & (MMC_DATA_READ | MMC_DATA_WRITE)) cmdr |= MCI_CMDR_TRCMD_START; if (data->flags & MMC_DATA_STREAM) cmdr |= MCI_CMDR_TRTYP_STREAM; else if (data->flags & MMC_DATA_MULTI) { cmdr |= MCI_CMDR_TRTYP_MULTIPLE; sc->flags |= (data->flags & MMC_DATA_READ) ? CMD_MULTIREAD : CMD_MULTIWRITE; } /* * Disable PDC until we're ready. * * Set block size and turn on PDC mode for dma xfer. * Note that the block size is the smaller of the amount of data to be * transferred, or 512 bytes. The 512 size is fixed by the standard; * smaller blocks are possible, but never larger. */ WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS); mr = RD4(sc,MCI_MR) & ~MCI_MR_BLKLEN; mr |= min(data->len, 512) << 16; WR4(sc, MCI_MR, mr | MCI_MR_PDCMODE|MCI_MR_PDCPADV); /* * Set up DMA. * * Use bounce buffers even if we don't need to byteswap, because doing * multi-block IO with large DMA buffers is way fast (compared to * single-block IO), even after incurring the overhead of also copying * from/to the caller's buffers (which may be in non-contiguous physical * pages). * * In an ideal non-byteswap world we could create a dma tag that allows * for discontiguous segments and do the IO directly from/to the * caller's buffer(s), using ENDRX/ENDTX interrupts to chain the * discontiguous buffers through the PDC. Someday. * * If a read is bigger than 2k, split it in half so that we can start * byte-swapping the first half while the second half is on the wire. * It would be best if we could split it into 8k chunks, but we can't * always keep up with the byte-swapping due to other system activity, * and if an RXBUFF interrupt happens while we're still handling the * byte-swap from the prior buffer (IE, we haven't returned from * handling the prior interrupt yet), then data will get dropped on the * floor and we can't easily recover from that. The right fix for that * would be to have the interrupt handling only keep the DMA flowing and * enqueue filled buffers to be byte-swapped in a non-interrupt context. * Even that won't work on the write side of things though; in that * context we have to have all the data ready to go before starting the * dma. * * XXX what about stream transfers? */ sc->xfer_offset = 0; sc->bbuf_curidx = 0; if (data->flags & (MMC_DATA_READ | MMC_DATA_WRITE)) { uint32_t len; uint32_t remaining = data->len; bus_addr_t paddr; int err; if (remaining > (BBCOUNT*BBSIZE)) panic("IO read size exceeds MAXDATA\n"); if (data->flags & MMC_DATA_READ) { if (remaining > 2048) // XXX len = remaining / 2; else len = remaining; err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[0], sc->bbuf_vaddr[0], len, at91_mci_getaddr, &paddr, BUS_DMA_NOWAIT); if (err != 0) panic("IO read dmamap_load failed\n"); bus_dmamap_sync(sc->dmatag, sc->bbuf_map[0], BUS_DMASYNC_PREREAD); WR4(sc, PDC_RPR, paddr); WR4(sc, PDC_RCR, len / 4); sc->bbuf_len[0] = len; remaining -= len; if (remaining == 0) { sc->bbuf_len[1] = 0; } else { len = remaining; err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[1], sc->bbuf_vaddr[1], len, at91_mci_getaddr, &paddr, BUS_DMA_NOWAIT); if (err != 0) panic("IO read dmamap_load failed\n"); bus_dmamap_sync(sc->dmatag, sc->bbuf_map[1], BUS_DMASYNC_PREREAD); WR4(sc, PDC_RNPR, paddr); WR4(sc, PDC_RNCR, len / 4); sc->bbuf_len[1] = len; remaining -= len; } WR4(sc, PDC_PTCR, PDC_PTCR_RXTEN); } else { len = min(BBSIZE, remaining); /* * If this is MCI1 revision 2xx controller, apply * a work-around for the "Data Write Operation and * number of bytes" erratum. */ if ((sc->sc_cap & CAP_MCI1_REV2XX) && len < 12) { len = 12; memset(sc->bbuf_vaddr[0], 0, 12); } at91_bswap_buf(sc, sc->bbuf_vaddr[0], data->data, len); err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[0], sc->bbuf_vaddr[0], len, at91_mci_getaddr, &paddr, BUS_DMA_NOWAIT); if (err != 0) panic("IO write dmamap_load failed\n"); bus_dmamap_sync(sc->dmatag, sc->bbuf_map[0], BUS_DMASYNC_PREWRITE); WR4(sc, PDC_TPR,paddr); WR4(sc, PDC_TCR, len / 4); sc->bbuf_len[0] = len; remaining -= len; if (remaining == 0) { sc->bbuf_len[1] = 0; } else { len = remaining; at91_bswap_buf(sc, sc->bbuf_vaddr[1], ((char *)data->data)+BBSIZE, len); err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[1], sc->bbuf_vaddr[1], len, at91_mci_getaddr, &paddr, BUS_DMA_NOWAIT); if (err != 0) panic("IO write dmamap_load failed\n"); bus_dmamap_sync(sc->dmatag, sc->bbuf_map[1], BUS_DMASYNC_PREWRITE); WR4(sc, PDC_TNPR, paddr); WR4(sc, PDC_TNCR, len / 4); sc->bbuf_len[1] = len; remaining -= len; } /* do not enable PDC xfer until CMDRDY asserted */ } data->xfer_len = 0; /* XXX what's this? appears to be unused. */ } if (mci_debug) printf("CMDR %x (opcode %d) ARGR %x with data len %d\n", cmdr, cmd->opcode, cmd->arg, cmd->data->len); WR4(sc, MCI_ARGR, cmd->arg); WR4(sc, MCI_CMDR, cmdr); WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_CMDRDY); } static void at91_mci_next_operation(struct at91_mci_softc *sc) { struct mmc_request *req; req = sc->req; if (req == NULL) return; if (sc->flags & PENDING_CMD) { sc->flags &= ~PENDING_CMD; at91_mci_start_cmd(sc, req->cmd); return; } else if (sc->flags & PENDING_STOP) { sc->flags &= ~PENDING_STOP; at91_mci_start_cmd(sc, req->stop); return; } WR4(sc, MCI_IDR, 0xffffffff); sc->req = NULL; sc->curcmd = NULL; //printf("req done\n"); req->done(req); } static int at91_mci_request(device_t brdev, device_t reqdev, struct mmc_request *req) { struct at91_mci_softc *sc = device_get_softc(brdev); AT91_MCI_LOCK(sc); if (sc->req != NULL) { AT91_MCI_UNLOCK(sc); return (EBUSY); } //printf("new req\n"); sc->req = req; sc->flags = PENDING_CMD; if (sc->req->stop) sc->flags |= PENDING_STOP; at91_mci_next_operation(sc); AT91_MCI_UNLOCK(sc); return (0); } static int at91_mci_get_ro(device_t brdev, device_t reqdev) { return (0); } static int at91_mci_acquire_host(device_t brdev, device_t reqdev) { struct at91_mci_softc *sc = device_get_softc(brdev); int err = 0; AT91_MCI_LOCK(sc); while (sc->bus_busy) msleep(sc, &sc->sc_mtx, PZERO, "mciah", hz / 5); sc->bus_busy++; AT91_MCI_UNLOCK(sc); return (err); } static int at91_mci_release_host(device_t brdev, device_t reqdev) { struct at91_mci_softc *sc = device_get_softc(brdev); AT91_MCI_LOCK(sc); sc->bus_busy--; wakeup(sc); AT91_MCI_UNLOCK(sc); return (0); } static void at91_mci_read_done(struct at91_mci_softc *sc, uint32_t sr) { struct mmc_command *cmd = sc->curcmd; char * dataptr = (char *)cmd->data->data; uint32_t curidx = sc->bbuf_curidx; uint32_t len = sc->bbuf_len[curidx]; /* * We arrive here when a DMA transfer for a read is done, whether it's * a single or multi-block read. * * We byte-swap the buffer that just completed, and if that is the * last buffer that's part of this read then we move on to the next * operation, otherwise we wait for another ENDRX for the next bufer. */ bus_dmamap_sync(sc->dmatag, sc->bbuf_map[curidx], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->dmatag, sc->bbuf_map[curidx]); at91_bswap_buf(sc, dataptr + sc->xfer_offset, sc->bbuf_vaddr[curidx], len); if (mci_debug) { printf("read done sr %x curidx %d len %d xfer_offset %d\n", sr, curidx, len, sc->xfer_offset); } sc->xfer_offset += len; sc->bbuf_curidx = !curidx; /* swap buffers */ /* * If we've transferred all the data, move on to the next operation. * * If we're still transferring the last buffer, RNCR is already zero but * we have to write a zero anyway to clear the ENDRX status so we don't * re-interrupt until the last buffer is done. */ if (sc->xfer_offset == cmd->data->len) { WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS); cmd->error = MMC_ERR_NONE; at91_mci_next_operation(sc); } else { WR4(sc, PDC_RNCR, 0); WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_ENDRX); } } static void at91_mci_write_done(struct at91_mci_softc *sc, uint32_t sr) { struct mmc_command *cmd = sc->curcmd; /* * We arrive here when the entire DMA transfer for a write is done, * whether it's a single or multi-block write. If it's multi-block we * have to immediately move on to the next operation which is to send * the stop command. If it's a single-block transfer we need to wait * for NOTBUSY, but if that's already asserted we can avoid another * interrupt and just move on to completing the request right away. */ WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS); bus_dmamap_sync(sc->dmatag, sc->bbuf_map[sc->bbuf_curidx], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->dmatag, sc->bbuf_map[sc->bbuf_curidx]); if ((cmd->data->flags & MMC_DATA_MULTI) || (sr & MCI_SR_NOTBUSY)) { cmd->error = MMC_ERR_NONE; at91_mci_next_operation(sc); } else { WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_NOTBUSY); } } static void at91_mci_notbusy(struct at91_mci_softc *sc) { struct mmc_command *cmd = sc->curcmd; /* * We arrive here by either completion of a single-block write, or * completion of the stop command that ended a multi-block write (and, * I suppose, after a card-select or erase, but I haven't tested * those). Anyway, we're done and it's time to move on to the next * command. */ cmd->error = MMC_ERR_NONE; at91_mci_next_operation(sc); } static void at91_mci_stop_done(struct at91_mci_softc *sc, uint32_t sr) { struct mmc_command *cmd = sc->curcmd; /* * We arrive here after receiving CMDRDY for a MMC_STOP_TRANSMISSION * command. Depending on the operation being stopped, we may have to * do some unusual things to work around hardware bugs. */ /* * This is known to be true of at91rm9200 hardware; it may or may not * apply to more recent chips: * * After stopping a multi-block write, the NOTBUSY bit in MCI_SR does * not properly reflect the actual busy state of the card as signaled * on the DAT0 line; it always claims the card is not-busy. If we * believe that and let operations continue, following commands will * fail with response timeouts (except of course MMC_SEND_STATUS -- it * indicates the card is busy in the PRG state, which was the smoking * gun that showed MCI_SR NOTBUSY was not tracking DAT0 correctly). * * The atmel docs are emphatic: "This flag [NOTBUSY] must be used only * for Write Operations." I guess technically since we sent a stop * it's not a write operation anymore. But then just what did they * think it meant for the stop command to have "...an optional busy * signal transmitted on the data line" according to the SD spec? * * I tried a variety of things to un-wedge the MCI and get the status * register to reflect NOTBUSY correctly again, but the only thing * that worked was a full device reset. It feels like an awfully big * hammer, but doing a full reset after every multiblock write is * still faster than doing single-block IO (by almost two orders of * magnitude: 20KB/sec improves to about 1.8MB/sec best case). * * After doing the reset, wait for a NOTBUSY interrupt before * continuing with the next operation. * * This workaround breaks multiwrite on the rev2xx parts, but some other * workaround is needed. */ if ((sc->flags & CMD_MULTIWRITE) && (sc->sc_cap & CAP_NEEDS_BYTESWAP)) { at91_mci_reset(sc); WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_NOTBUSY); return; } /* * This is known to be true of at91rm9200 hardware; it may or may not * apply to more recent chips: * * After stopping a multi-block read, loop to read and discard any * data that coasts in after we sent the stop command. The docs don't * say anything about it, but empirical testing shows that 1-3 * additional words of data get buffered up in some unmentioned * internal fifo and if we don't read and discard them here they end * up on the front of the next read DMA transfer we do. * * This appears to be unnecessary for rev2xx parts. */ if ((sc->flags & CMD_MULTIREAD) && (sc->sc_cap & CAP_NEEDS_BYTESWAP)) { uint32_t sr; int count = 0; do { sr = RD4(sc, MCI_SR); if (sr & MCI_SR_RXRDY) { RD4(sc, MCI_RDR); ++count; } } while (sr & MCI_SR_RXRDY); at91_mci_reset(sc); } cmd->error = MMC_ERR_NONE; at91_mci_next_operation(sc); } static void at91_mci_cmdrdy(struct at91_mci_softc *sc, uint32_t sr) { struct mmc_command *cmd = sc->curcmd; int i; if (cmd == NULL) return; /* * We get here at the end of EVERY command. We retrieve the command * response (if any) then decide what to do next based on the command. */ if (cmd->flags & MMC_RSP_PRESENT) { for (i = 0; i < ((cmd->flags & MMC_RSP_136) ? 4 : 1); i++) { cmd->resp[i] = RD4(sc, MCI_RSPR + i * 4); if (mci_debug) printf("RSPR[%d] = %x sr=%x\n", i, cmd->resp[i], sr); } } /* * If this was a stop command, go handle the various special * conditions (read: bugs) that have to be dealt with following a stop. */ if (cmd->opcode == MMC_STOP_TRANSMISSION) { at91_mci_stop_done(sc, sr); return; } /* * If this command can continue to assert BUSY beyond the response then * we need to wait for NOTBUSY before the command is really done. * * Note that this may not work properly on the at91rm9200. It certainly * doesn't work for the STOP command that follows a multi-block write, * so post-stop CMDRDY is handled separately; see the special handling * in at91_mci_stop_done(). * * Beside STOP, there are other R1B-type commands that use the busy * signal after CMDRDY: CMD7 (card select), CMD28-29 (write protect), * CMD38 (erase). I haven't tested any of them, but I rather expect * them all to have the same sort of problem with MCI_SR not actually * reflecting the state of the DAT0-line busy indicator. So this code * may need to grow some sort of special handling for them too. (This * just in: CMD7 isn't a problem right now because dev/mmc.c incorrectly * sets the response flags to R1 rather than R1B.) XXX */ if ((cmd->flags & MMC_RSP_BUSY)) { WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_NOTBUSY); return; } /* * If there is a data transfer with this command, then... * - If it's a read, we need to wait for ENDRX. * - If it's a write, now is the time to enable the PDC, and we need * to wait for a BLKE that follows a TXBUFE, because if we're doing * a split transfer we get a BLKE after the first half (when TPR/TCR * get loaded from TNPR/TNCR). So first we wait for the TXBUFE, and * the handling for that interrupt will then invoke the wait for the * subsequent BLKE which indicates actual completion. */ if (cmd->data) { uint32_t ier; if (cmd->data->flags & MMC_DATA_READ) { ier = MCI_SR_ENDRX; } else { ier = MCI_SR_TXBUFE; WR4(sc, PDC_PTCR, PDC_PTCR_TXTEN); } WR4(sc, MCI_IER, MCI_SR_ERROR | ier); return; } /* * If we made it to here, we don't need to wait for anything more for * the current command, move on to the next command (will complete the * request if there is no next command). */ cmd->error = MMC_ERR_NONE; at91_mci_next_operation(sc); } static void at91_mci_intr(void *arg) { struct at91_mci_softc *sc = (struct at91_mci_softc*)arg; struct mmc_command *cmd = sc->curcmd; uint32_t sr, isr; AT91_MCI_LOCK(sc); sr = RD4(sc, MCI_SR); isr = sr & RD4(sc, MCI_IMR); if (mci_debug) printf("i 0x%x sr 0x%x\n", isr, sr); /* * All interrupts are one-shot; disable it now. * The next operation will re-enable whatever interrupts it wants. */ WR4(sc, MCI_IDR, isr); if (isr & MCI_SR_ERROR) { if (isr & (MCI_SR_RTOE | MCI_SR_DTOE)) cmd->error = MMC_ERR_TIMEOUT; else if (isr & (MCI_SR_RCRCE | MCI_SR_DCRCE)) cmd->error = MMC_ERR_BADCRC; else if (isr & (MCI_SR_OVRE | MCI_SR_UNRE)) cmd->error = MMC_ERR_FIFO; else cmd->error = MMC_ERR_FAILED; /* * CMD8 is used to probe for SDHC cards, a standard SD card * will get a response timeout; don't report it because it's a * normal and expected condition. One might argue that all * error reporting should be left to higher levels, but when * they report at all it's always EIO, which isn't very * helpful. XXX bootverbose? */ if (cmd->opcode != 8) { device_printf(sc->dev, "IO error; status MCI_SR = 0x%b cmd opcode = %d%s\n", sr, MCI_SR_BITSTRING, cmd->opcode, (cmd->opcode != 12) ? "" : (sc->flags & CMD_MULTIREAD) ? " after read" : " after write"); /* XXX not sure RTOE needs a full reset, just a retry */ at91_mci_reset(sc); } at91_mci_next_operation(sc); } else { if (isr & MCI_SR_TXBUFE) { // printf("TXBUFE\n"); /* * We need to wait for a BLKE that follows TXBUFE * (intermediate BLKEs might happen after ENDTXes if * we're chaining multiple buffers). If BLKE is also * asserted at the time we get TXBUFE, we can avoid * another interrupt and process it right away, below. */ if (sr & MCI_SR_BLKE) isr |= MCI_SR_BLKE; else WR4(sc, MCI_IER, MCI_SR_BLKE); } if (isr & MCI_SR_RXBUFF) { // printf("RXBUFF\n"); } if (isr & MCI_SR_ENDTX) { // printf("ENDTX\n"); } if (isr & MCI_SR_ENDRX) { // printf("ENDRX\n"); at91_mci_read_done(sc, sr); } if (isr & MCI_SR_NOTBUSY) { // printf("NOTBUSY\n"); at91_mci_notbusy(sc); } if (isr & MCI_SR_DTIP) { // printf("Data transfer in progress\n"); } if (isr & MCI_SR_BLKE) { // printf("Block transfer end\n"); at91_mci_write_done(sc, sr); } if (isr & MCI_SR_TXRDY) { // printf("Ready to transmit\n"); } if (isr & MCI_SR_RXRDY) { // printf("Ready to receive\n"); } if (isr & MCI_SR_CMDRDY) { // printf("Command ready\n"); at91_mci_cmdrdy(sc, sr); } } AT91_MCI_UNLOCK(sc); } static int at91_mci_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct at91_mci_softc *sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: *(int *)result = sc->host.ios.bus_mode; break; case MMCBR_IVAR_BUS_WIDTH: *(int *)result = sc->host.ios.bus_width; break; case MMCBR_IVAR_CHIP_SELECT: *(int *)result = sc->host.ios.chip_select; break; case MMCBR_IVAR_CLOCK: *(int *)result = sc->host.ios.clock; break; case MMCBR_IVAR_F_MIN: *(int *)result = sc->host.f_min; break; case MMCBR_IVAR_F_MAX: *(int *)result = sc->host.f_max; break; case MMCBR_IVAR_HOST_OCR: *(int *)result = sc->host.host_ocr; break; case MMCBR_IVAR_MODE: *(int *)result = sc->host.mode; break; case MMCBR_IVAR_OCR: *(int *)result = sc->host.ocr; break; case MMCBR_IVAR_POWER_MODE: *(int *)result = sc->host.ios.power_mode; break; case MMCBR_IVAR_VDD: *(int *)result = sc->host.ios.vdd; break; case MMCBR_IVAR_CAPS: if (sc->has_4wire) { sc->sc_cap |= CAP_HAS_4WIRE; sc->host.caps |= MMC_CAP_4_BIT_DATA; } else { sc->sc_cap &= ~CAP_HAS_4WIRE; sc->host.caps &= ~MMC_CAP_4_BIT_DATA; } *(int *)result = sc->host.caps; break; case MMCBR_IVAR_MAX_DATA: /* * Something is wrong with the 2x parts and multiblock, so * just do 1 block at a time for now, which really kills * performance. */ if (sc->sc_cap & CAP_MCI1_REV2XX) *(int *)result = 1; else *(int *)result = MAX_BLOCKS; break; } return (0); } static int at91_mci_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct at91_mci_softc *sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: sc->host.ios.bus_mode = value; break; case MMCBR_IVAR_BUS_WIDTH: sc->host.ios.bus_width = value; break; case MMCBR_IVAR_CHIP_SELECT: sc->host.ios.chip_select = value; break; case MMCBR_IVAR_CLOCK: sc->host.ios.clock = value; break; case MMCBR_IVAR_MODE: sc->host.mode = value; break; case MMCBR_IVAR_OCR: sc->host.ocr = value; break; case MMCBR_IVAR_POWER_MODE: sc->host.ios.power_mode = value; break; case MMCBR_IVAR_VDD: sc->host.ios.vdd = value; break; /* These are read-only */ case MMCBR_IVAR_CAPS: case MMCBR_IVAR_HOST_OCR: case MMCBR_IVAR_F_MIN: case MMCBR_IVAR_F_MAX: case MMCBR_IVAR_MAX_DATA: return (EINVAL); } return (0); } static device_method_t at91_mci_methods[] = { /* device_if */ DEVMETHOD(device_probe, at91_mci_probe), DEVMETHOD(device_attach, at91_mci_attach), DEVMETHOD(device_detach, at91_mci_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, at91_mci_read_ivar), DEVMETHOD(bus_write_ivar, at91_mci_write_ivar), /* mmcbr_if */ DEVMETHOD(mmcbr_update_ios, at91_mci_update_ios), DEVMETHOD(mmcbr_request, at91_mci_request), DEVMETHOD(mmcbr_get_ro, at91_mci_get_ro), DEVMETHOD(mmcbr_acquire_host, at91_mci_acquire_host), DEVMETHOD(mmcbr_release_host, at91_mci_release_host), DEVMETHOD_END }; static driver_t at91_mci_driver = { "at91_mci", at91_mci_methods, sizeof(struct at91_mci_softc), }; static devclass_t at91_mci_devclass; #ifdef FDT DRIVER_MODULE(at91_mci, simplebus, at91_mci_driver, at91_mci_devclass, NULL, NULL); #else DRIVER_MODULE(at91_mci, atmelarm, at91_mci_driver, at91_mci_devclass, NULL, NULL); #endif +DRIVER_MODULE(mmc, at91_mci, mmc_driver, mmc_devclass, NULL, NULL); Index: head/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c =================================================================== --- head/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c (revision 292179) +++ head/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c (revision 292180) @@ -1,677 +1,678 @@ /*- * Copyright (c) 2012 Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "sdhci_if.h" #include "bcm2835_dma.h" #include #include "bcm2835_vcbus.h" #define BCM2835_DEFAULT_SDHCI_FREQ 50 #define BCM_SDHCI_BUFFER_SIZE 512 #define NUM_DMA_SEGS 2 #ifdef DEBUG #define dprintf(fmt, args...) do { printf("%s(): ", __func__); \ printf(fmt,##args); } while (0) #else #define dprintf(fmt, args...) #endif static int bcm2835_sdhci_hs = 1; static int bcm2835_sdhci_pio_mode = 0; TUNABLE_INT("hw.bcm2835.sdhci.hs", &bcm2835_sdhci_hs); TUNABLE_INT("hw.bcm2835.sdhci.pio_mode", &bcm2835_sdhci_pio_mode); struct bcm_sdhci_softc { device_t sc_dev; struct resource * sc_mem_res; struct resource * sc_irq_res; bus_space_tag_t sc_bst; bus_space_handle_t sc_bsh; void * sc_intrhand; struct mmc_request * sc_req; struct sdhci_slot sc_slot; int sc_dma_ch; bus_dma_tag_t sc_dma_tag; bus_dmamap_t sc_dma_map; vm_paddr_t sc_sdhci_buffer_phys; uint32_t cmd_and_mode; bus_addr_t dmamap_seg_addrs[NUM_DMA_SEGS]; bus_size_t dmamap_seg_sizes[NUM_DMA_SEGS]; int dmamap_seg_count; int dmamap_seg_index; int dmamap_status; }; static int bcm_sdhci_probe(device_t); static int bcm_sdhci_attach(device_t); static int bcm_sdhci_detach(device_t); static void bcm_sdhci_intr(void *); static int bcm_sdhci_get_ro(device_t, device_t); static void bcm_sdhci_dma_intr(int ch, void *arg); static void bcm_sdhci_dmacb(void *arg, bus_dma_segment_t *segs, int nseg, int err) { struct bcm_sdhci_softc *sc = arg; int i; sc->dmamap_status = err; sc->dmamap_seg_count = nseg; /* Note nseg is guaranteed to be zero if err is non-zero. */ for (i = 0; i < nseg; i++) { sc->dmamap_seg_addrs[i] = segs[i].ds_addr; sc->dmamap_seg_sizes[i] = segs[i].ds_len; } } static int bcm_sdhci_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "broadcom,bcm2835-sdhci")) return (ENXIO); device_set_desc(dev, "Broadcom 2708 SDHCI controller"); return (BUS_PROBE_DEFAULT); } static int bcm_sdhci_attach(device_t dev) { struct bcm_sdhci_softc *sc = device_get_softc(dev); int rid, err; phandle_t node; pcell_t cell; u_int default_freq; sc->sc_dev = dev; sc->sc_req = NULL; err = bcm2835_mbox_set_power_state(BCM2835_MBOX_POWER_ID_EMMC, TRUE); if (err != 0) { if (bootverbose) device_printf(dev, "Unable to enable the power\n"); return (err); } default_freq = 0; err = bcm2835_mbox_get_clock_rate(BCM2835_MBOX_CLOCK_ID_EMMC, &default_freq); if (err == 0) { /* Convert to MHz */ default_freq /= 1000000; } if (default_freq == 0) { node = ofw_bus_get_node(sc->sc_dev); if ((OF_getencprop(node, "clock-frequency", &cell, sizeof(cell))) > 0) default_freq = cell / 1000000; } if (default_freq == 0) default_freq = BCM2835_DEFAULT_SDHCI_FREQ; if (bootverbose) device_printf(dev, "SDHCI frequency: %dMHz\n", default_freq); rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "cannot allocate memory window\n"); err = ENXIO; goto fail; } sc->sc_bst = rman_get_bustag(sc->sc_mem_res); sc->sc_bsh = rman_get_bushandle(sc->sc_mem_res); rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->sc_irq_res) { device_printf(dev, "cannot allocate interrupt\n"); err = ENXIO; goto fail; } if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE, NULL, bcm_sdhci_intr, sc, &sc->sc_intrhand)) { device_printf(dev, "cannot setup interrupt handler\n"); err = ENXIO; goto fail; } if (!bcm2835_sdhci_pio_mode) sc->sc_slot.opt = SDHCI_PLATFORM_TRANSFER; sc->sc_slot.caps = SDHCI_CAN_VDD_330 | SDHCI_CAN_VDD_180; if (bcm2835_sdhci_hs) sc->sc_slot.caps |= SDHCI_CAN_DO_HISPD; sc->sc_slot.caps |= (default_freq << SDHCI_CLOCK_BASE_SHIFT); sc->sc_slot.quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_DONT_SET_HISPD_BIT | SDHCI_QUIRK_MISSING_CAPS; sdhci_init_slot(dev, &sc->sc_slot, 0); sc->sc_dma_ch = bcm_dma_allocate(BCM_DMA_CH_FAST1); if (sc->sc_dma_ch == BCM_DMA_CH_INVALID) sc->sc_dma_ch = bcm_dma_allocate(BCM_DMA_CH_FAST2); if (sc->sc_dma_ch == BCM_DMA_CH_INVALID) sc->sc_dma_ch = bcm_dma_allocate(BCM_DMA_CH_ANY); if (sc->sc_dma_ch == BCM_DMA_CH_INVALID) goto fail; bcm_dma_setup_intr(sc->sc_dma_ch, bcm_sdhci_dma_intr, sc); /* Allocate bus_dma resources. */ err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BCM_SDHCI_BUFFER_SIZE, NUM_DMA_SEGS, BCM_SDHCI_BUFFER_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_dma_tag); if (err) { device_printf(dev, "failed allocate DMA tag"); goto fail; } err = bus_dmamap_create(sc->sc_dma_tag, 0, &sc->sc_dma_map); if (err) { device_printf(dev, "bus_dmamap_create failed\n"); goto fail; } sc->sc_sdhci_buffer_phys = BUS_SPACE_PHYSADDR(sc->sc_mem_res, SDHCI_BUFFER); bus_generic_probe(dev); bus_generic_attach(dev); sdhci_start_slot(&sc->sc_slot); return (0); fail: if (sc->sc_intrhand) bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intrhand); if (sc->sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); return (err); } static int bcm_sdhci_detach(device_t dev) { return (EBUSY); } static void bcm_sdhci_intr(void *arg) { struct bcm_sdhci_softc *sc = arg; sdhci_generic_intr(&sc->sc_slot); } static int bcm_sdhci_get_ro(device_t bus, device_t child) { return (0); } static inline uint32_t RD4(struct bcm_sdhci_softc *sc, bus_size_t off) { uint32_t val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, off); return val; } static inline void WR4(struct bcm_sdhci_softc *sc, bus_size_t off, uint32_t val) { bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, val); /* * The Arasan HC has a bug where it may lose the content of * consecutive writes to registers that are within two SD-card * clock cycles of each other (a clock domain crossing problem). */ if (sc->sc_slot.clock > 0) DELAY(((2 * 1000000) / sc->sc_slot.clock) + 1); } static uint8_t bcm_sdhci_read_1(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct bcm_sdhci_softc *sc = device_get_softc(dev); uint32_t val = RD4(sc, off & ~3); return ((val >> (off & 3)*8) & 0xff); } static uint16_t bcm_sdhci_read_2(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct bcm_sdhci_softc *sc = device_get_softc(dev); uint32_t val = RD4(sc, off & ~3); /* * Standard 32-bit handling of command and transfer mode. */ if (off == SDHCI_TRANSFER_MODE) { return (sc->cmd_and_mode >> 16); } else if (off == SDHCI_COMMAND_FLAGS) { return (sc->cmd_and_mode & 0x0000ffff); } return ((val >> (off & 3)*8) & 0xffff); } static uint32_t bcm_sdhci_read_4(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct bcm_sdhci_softc *sc = device_get_softc(dev); return RD4(sc, off); } static void bcm_sdhci_read_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct bcm_sdhci_softc *sc = device_get_softc(dev); bus_space_read_multi_4(sc->sc_bst, sc->sc_bsh, off, data, count); } static void bcm_sdhci_write_1(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint8_t val) { struct bcm_sdhci_softc *sc = device_get_softc(dev); uint32_t val32 = RD4(sc, off & ~3); val32 &= ~(0xff << (off & 3)*8); val32 |= (val << (off & 3)*8); WR4(sc, off & ~3, val32); } static void bcm_sdhci_write_2(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint16_t val) { struct bcm_sdhci_softc *sc = device_get_softc(dev); uint32_t val32; if (off == SDHCI_COMMAND_FLAGS) val32 = sc->cmd_and_mode; else val32 = RD4(sc, off & ~3); val32 &= ~(0xffff << (off & 3)*8); val32 |= (val << (off & 3)*8); if (off == SDHCI_TRANSFER_MODE) sc->cmd_and_mode = val32; else { WR4(sc, off & ~3, val32); if (off == SDHCI_COMMAND_FLAGS) sc->cmd_and_mode = val32; } } static void bcm_sdhci_write_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t val) { struct bcm_sdhci_softc *sc = device_get_softc(dev); WR4(sc, off, val); } static void bcm_sdhci_write_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct bcm_sdhci_softc *sc = device_get_softc(dev); bus_space_write_multi_4(sc->sc_bst, sc->sc_bsh, off, data, count); } static void bcm_sdhci_start_dma_seg(struct bcm_sdhci_softc *sc) { struct sdhci_slot *slot; vm_paddr_t pdst, psrc; int err, idx, len, sync_op; slot = &sc->sc_slot; idx = sc->dmamap_seg_index++; len = sc->dmamap_seg_sizes[idx]; slot->offset += len; if (slot->curcmd->data->flags & MMC_DATA_READ) { bcm_dma_setup_src(sc->sc_dma_ch, BCM_DMA_DREQ_EMMC, BCM_DMA_SAME_ADDR, BCM_DMA_32BIT); bcm_dma_setup_dst(sc->sc_dma_ch, BCM_DMA_DREQ_NONE, BCM_DMA_INC_ADDR, (len & 0xf) ? BCM_DMA_32BIT : BCM_DMA_128BIT); psrc = sc->sc_sdhci_buffer_phys; pdst = sc->dmamap_seg_addrs[idx]; sync_op = BUS_DMASYNC_PREREAD; } else { bcm_dma_setup_src(sc->sc_dma_ch, BCM_DMA_DREQ_NONE, BCM_DMA_INC_ADDR, (len & 0xf) ? BCM_DMA_32BIT : BCM_DMA_128BIT); bcm_dma_setup_dst(sc->sc_dma_ch, BCM_DMA_DREQ_EMMC, BCM_DMA_SAME_ADDR, BCM_DMA_32BIT); psrc = sc->dmamap_seg_addrs[idx]; pdst = sc->sc_sdhci_buffer_phys; sync_op = BUS_DMASYNC_PREWRITE; } /* * When starting a new DMA operation do the busdma sync operation, and * disable SDCHI data interrrupts because we'll be driven by DMA * interrupts (or SDHCI error interrupts) until the IO is done. */ if (idx == 0) { bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map, sync_op); slot->intmask &= ~(SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_END); bcm_sdhci_write_4(sc->sc_dev, &sc->sc_slot, SDHCI_SIGNAL_ENABLE, slot->intmask); } /* * Start the DMA transfer. Only programming errors (like failing to * allocate a channel) cause a non-zero return from bcm_dma_start(). */ err = bcm_dma_start(sc->sc_dma_ch, psrc, pdst, len); KASSERT((err == 0), ("bcm2835_sdhci: failed DMA start")); } static void bcm_sdhci_dma_intr(int ch, void *arg) { struct bcm_sdhci_softc *sc = (struct bcm_sdhci_softc *)arg; struct sdhci_slot *slot = &sc->sc_slot; uint32_t reg, mask; int left, sync_op; mtx_lock(&slot->mtx); /* * If there are more segments for the current dma, start the next one. * Otherwise unload the dma map and decide what to do next based on the * status of the sdhci controller and whether there's more data left. */ if (sc->dmamap_seg_index < sc->dmamap_seg_count) { bcm_sdhci_start_dma_seg(sc); mtx_unlock(&slot->mtx); return; } if (slot->curcmd->data->flags & MMC_DATA_READ) { sync_op = BUS_DMASYNC_POSTREAD; mask = SDHCI_INT_DATA_AVAIL; } else { sync_op = BUS_DMASYNC_POSTWRITE; mask = SDHCI_INT_SPACE_AVAIL; } bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map, sync_op); bus_dmamap_unload(sc->sc_dma_tag, sc->sc_dma_map); sc->dmamap_seg_count = 0; sc->dmamap_seg_index = 0; left = min(BCM_SDHCI_BUFFER_SIZE, slot->curcmd->data->len - slot->offset); /* DATA END? */ reg = bcm_sdhci_read_4(slot->bus, slot, SDHCI_INT_STATUS); if (reg & SDHCI_INT_DATA_END) { /* ACK for all outstanding interrupts */ bcm_sdhci_write_4(slot->bus, slot, SDHCI_INT_STATUS, reg); /* enable INT */ slot->intmask |= SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_END; bcm_sdhci_write_4(slot->bus, slot, SDHCI_SIGNAL_ENABLE, slot->intmask); /* finish this data */ sdhci_finish_data(slot); } else { /* already available? */ if (reg & mask) { /* ACK for DATA_AVAIL or SPACE_AVAIL */ bcm_sdhci_write_4(slot->bus, slot, SDHCI_INT_STATUS, mask); /* continue next DMA transfer */ if (bus_dmamap_load(sc->sc_dma_tag, sc->sc_dma_map, (uint8_t *)slot->curcmd->data->data + slot->offset, left, bcm_sdhci_dmacb, sc, BUS_DMA_NOWAIT) != 0 || sc->dmamap_status != 0) { slot->curcmd->error = MMC_ERR_NO_MEMORY; sdhci_finish_data(slot); } else { bcm_sdhci_start_dma_seg(sc); } } else { /* wait for next data by INT */ /* enable INT */ slot->intmask |= SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_END; bcm_sdhci_write_4(slot->bus, slot, SDHCI_SIGNAL_ENABLE, slot->intmask); } } mtx_unlock(&slot->mtx); } static void bcm_sdhci_read_dma(device_t dev, struct sdhci_slot *slot) { struct bcm_sdhci_softc *sc = device_get_softc(slot->bus); size_t left; if (sc->dmamap_seg_count != 0) { device_printf(sc->sc_dev, "DMA in use\n"); return; } left = min(BCM_SDHCI_BUFFER_SIZE, slot->curcmd->data->len - slot->offset); KASSERT((left & 3) == 0, ("%s: len = %d, not word-aligned", __func__, left)); if (bus_dmamap_load(sc->sc_dma_tag, sc->sc_dma_map, (uint8_t *)slot->curcmd->data->data + slot->offset, left, bcm_sdhci_dmacb, sc, BUS_DMA_NOWAIT) != 0 || sc->dmamap_status != 0) { slot->curcmd->error = MMC_ERR_NO_MEMORY; return; } /* DMA start */ bcm_sdhci_start_dma_seg(sc); } static void bcm_sdhci_write_dma(device_t dev, struct sdhci_slot *slot) { struct bcm_sdhci_softc *sc = device_get_softc(slot->bus); size_t left; if (sc->dmamap_seg_count != 0) { device_printf(sc->sc_dev, "DMA in use\n"); return; } left = min(BCM_SDHCI_BUFFER_SIZE, slot->curcmd->data->len - slot->offset); KASSERT((left & 3) == 0, ("%s: len = %d, not word-aligned", __func__, left)); if (bus_dmamap_load(sc->sc_dma_tag, sc->sc_dma_map, (uint8_t *)slot->curcmd->data->data + slot->offset, left, bcm_sdhci_dmacb, sc, BUS_DMA_NOWAIT) != 0 || sc->dmamap_status != 0) { slot->curcmd->error = MMC_ERR_NO_MEMORY; return; } /* DMA start */ bcm_sdhci_start_dma_seg(sc); } static int bcm_sdhci_will_handle_transfer(device_t dev, struct sdhci_slot *slot) { size_t left; /* * Do not use DMA for transfers less than block size or with a length * that is not a multiple of four. */ left = min(BCM_DMA_BLOCK_SIZE, slot->curcmd->data->len - slot->offset); if (left < BCM_DMA_BLOCK_SIZE) return (0); if (left & 0x03) return (0); return (1); } static void bcm_sdhci_start_transfer(device_t dev, struct sdhci_slot *slot, uint32_t *intmask) { /* DMA transfer FIFO 1KB */ if (slot->curcmd->data->flags & MMC_DATA_READ) bcm_sdhci_read_dma(dev, slot); else bcm_sdhci_write_dma(dev, slot); } static void bcm_sdhci_finish_transfer(device_t dev, struct sdhci_slot *slot) { sdhci_finish_data(slot); } static device_method_t bcm_sdhci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bcm_sdhci_probe), DEVMETHOD(device_attach, bcm_sdhci_attach), DEVMETHOD(device_detach, bcm_sdhci_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, sdhci_generic_read_ivar), DEVMETHOD(bus_write_ivar, sdhci_generic_write_ivar), DEVMETHOD(bus_print_child, bus_generic_print_child), /* MMC bridge interface */ DEVMETHOD(mmcbr_update_ios, sdhci_generic_update_ios), DEVMETHOD(mmcbr_request, sdhci_generic_request), DEVMETHOD(mmcbr_get_ro, bcm_sdhci_get_ro), DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host), DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host), /* Platform transfer methods */ DEVMETHOD(sdhci_platform_will_handle, bcm_sdhci_will_handle_transfer), DEVMETHOD(sdhci_platform_start_transfer, bcm_sdhci_start_transfer), DEVMETHOD(sdhci_platform_finish_transfer, bcm_sdhci_finish_transfer), /* SDHCI registers accessors */ DEVMETHOD(sdhci_read_1, bcm_sdhci_read_1), DEVMETHOD(sdhci_read_2, bcm_sdhci_read_2), DEVMETHOD(sdhci_read_4, bcm_sdhci_read_4), DEVMETHOD(sdhci_read_multi_4, bcm_sdhci_read_multi_4), DEVMETHOD(sdhci_write_1, bcm_sdhci_write_1), DEVMETHOD(sdhci_write_2, bcm_sdhci_write_2), DEVMETHOD(sdhci_write_4, bcm_sdhci_write_4), DEVMETHOD(sdhci_write_multi_4, bcm_sdhci_write_multi_4), { 0, 0 } }; static devclass_t bcm_sdhci_devclass; static driver_t bcm_sdhci_driver = { "sdhci_bcm", bcm_sdhci_methods, sizeof(struct bcm_sdhci_softc), }; DRIVER_MODULE(sdhci_bcm, simplebus, bcm_sdhci_driver, bcm_sdhci_devclass, 0, 0); MODULE_DEPEND(sdhci_bcm, sdhci, 1, 1, 1); +DRIVER_MODULE(mmc, sdhci_bcm, mmc_driver, mmc_devclass, NULL, NULL); Index: head/sys/arm/freescale/imx/imx_sdhci.c =================================================================== --- head/sys/arm/freescale/imx/imx_sdhci.c (revision 292179) +++ head/sys/arm/freescale/imx/imx_sdhci.c (revision 292180) @@ -1,830 +1,830 @@ /*- * Copyright (c) 2013 Ian Lepore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * SDHCI driver glue for Freescale i.MX SoC family. * * This supports both eSDHC (earlier SoCs) and uSDHC (more recent SoCs). */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "sdhci_if.h" struct imx_sdhci_softc { device_t dev; struct resource * mem_res; struct resource * irq_res; void * intr_cookie; struct sdhci_slot slot; struct callout r1bfix_callout; sbintime_t r1bfix_timeout_at; uint32_t baseclk_hz; uint32_t sdclockreg_freq_bits; uint32_t cmd_and_mode; uint32_t r1bfix_intmask; uint8_t r1bfix_type; uint8_t hwtype; boolean_t force_card_present; }; #define R1BFIX_NONE 0 /* No fix needed at next interrupt. */ #define R1BFIX_NODATA 1 /* Synthesize DATA_END for R1B w/o data. */ #define R1BFIX_AC12 2 /* Wait for busy after auto command 12. */ #define HWTYPE_NONE 0 /* Hardware not recognized/supported. */ #define HWTYPE_ESDHC 1 /* imx5x and earlier. */ #define HWTYPE_USDHC 2 /* imx6. */ #define SDHC_WTMK_LVL 0x44 /* Watermark Level register. */ #define USDHC_MIX_CONTROL 0x48 /* Mix(ed) Control register. */ #define SDHC_VEND_SPEC 0xC0 /* Vendor-specific register. */ #define SDHC_VEND_FRC_SDCLK_ON (1 << 8) #define SDHC_VEND_IPGEN (1 << 11) #define SDHC_VEND_HCKEN (1 << 12) #define SDHC_VEND_PEREN (1 << 13) #define SDHC_PRES_STATE 0x24 #define SDHC_PRES_CIHB (1 << 0) #define SDHC_PRES_CDIHB (1 << 1) #define SDHC_PRES_DLA (1 << 2) #define SDHC_PRES_SDSTB (1 << 3) #define SDHC_PRES_IPGOFF (1 << 4) #define SDHC_PRES_HCKOFF (1 << 5) #define SDHC_PRES_PEROFF (1 << 6) #define SDHC_PRES_SDOFF (1 << 7) #define SDHC_PRES_WTA (1 << 8) #define SDHC_PRES_RTA (1 << 9) #define SDHC_PRES_BWEN (1 << 10) #define SDHC_PRES_BREN (1 << 11) #define SDHC_PRES_RTR (1 << 12) #define SDHC_PRES_CINST (1 << 16) #define SDHC_PRES_CDPL (1 << 18) #define SDHC_PRES_WPSPL (1 << 19) #define SDHC_PRES_CLSL (1 << 23) #define SDHC_PRES_DLSL_SHIFT 24 #define SDHC_PRES_DLSL_MASK (0xffU << SDHC_PRES_DLSL_SHIFT) #define SDHC_PROT_CTRL 0x28 #define SDHC_PROT_LED (1 << 0) #define SDHC_PROT_WIDTH_1BIT (0 << 1) #define SDHC_PROT_WIDTH_4BIT (1 << 1) #define SDHC_PROT_WIDTH_8BIT (2 << 1) #define SDHC_PROT_WIDTH_MASK (3 << 1) #define SDHC_PROT_D3CD (1 << 3) #define SDHC_PROT_EMODE_BIG (0 << 4) #define SDHC_PROT_EMODE_HALF (1 << 4) #define SDHC_PROT_EMODE_LITTLE (2 << 4) #define SDHC_PROT_EMODE_MASK (3 << 4) #define SDHC_PROT_SDMA (0 << 8) #define SDHC_PROT_ADMA1 (1 << 8) #define SDHC_PROT_ADMA2 (2 << 8) #define SDHC_PROT_ADMA264 (3 << 8) #define SDHC_PROT_DMA_MASK (3 << 8) #define SDHC_PROT_CDTL (1 << 6) #define SDHC_PROT_CDSS (1 << 7) #define SDHC_INT_STATUS 0x30 #define SDHC_CLK_IPGEN (1 << 0) #define SDHC_CLK_HCKEN (1 << 1) #define SDHC_CLK_PEREN (1 << 2) #define SDHC_CLK_DIVISOR_MASK 0x000000f0 #define SDHC_CLK_DIVISOR_SHIFT 4 #define SDHC_CLK_PRESCALE_MASK 0x0000ff00 #define SDHC_CLK_PRESCALE_SHIFT 8 static struct ofw_compat_data compat_data[] = { {"fsl,imx6q-usdhc", HWTYPE_USDHC}, {"fsl,imx6sl-usdhc", HWTYPE_USDHC}, {"fsl,imx53-esdhc", HWTYPE_ESDHC}, {"fsl,imx51-esdhc", HWTYPE_ESDHC}, {NULL, HWTYPE_NONE}, }; static void imx_sdhc_set_clock(struct imx_sdhci_softc *sc, int enable); static void imx_sdhci_r1bfix_func(void *arg); static inline uint32_t RD4(struct imx_sdhci_softc *sc, bus_size_t off) { return (bus_read_4(sc->mem_res, off)); } static inline void WR4(struct imx_sdhci_softc *sc, bus_size_t off, uint32_t val) { bus_write_4(sc->mem_res, off, val); } static uint8_t imx_sdhci_read_1(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct imx_sdhci_softc *sc = device_get_softc(dev); uint32_t val32, wrk32; /* * Most of the things in the standard host control register are in the * hardware's wider protocol control register, but some of the bits are * moved around. */ if (off == SDHCI_HOST_CONTROL) { wrk32 = RD4(sc, SDHC_PROT_CTRL); val32 = wrk32 & (SDHCI_CTRL_LED | SDHCI_CTRL_CARD_DET | SDHCI_CTRL_FORCE_CARD); switch (wrk32 & SDHC_PROT_WIDTH_MASK) { case SDHC_PROT_WIDTH_1BIT: /* Value is already 0. */ break; case SDHC_PROT_WIDTH_4BIT: val32 |= SDHCI_CTRL_4BITBUS; break; case SDHC_PROT_WIDTH_8BIT: val32 |= SDHCI_CTRL_8BITBUS; break; } switch (wrk32 & SDHC_PROT_DMA_MASK) { case SDHC_PROT_SDMA: /* Value is already 0. */ break; case SDHC_PROT_ADMA1: /* This value is deprecated, should never appear. */ break; case SDHC_PROT_ADMA2: val32 |= SDHCI_CTRL_ADMA2; break; case SDHC_PROT_ADMA264: val32 |= SDHCI_CTRL_ADMA264; break; } return val32; } /* * XXX can't find the bus power on/off knob. For now we have to say the * power is always on and always set to the same voltage. */ if (off == SDHCI_POWER_CONTROL) { return (SDHCI_POWER_ON | SDHCI_POWER_300); } return ((RD4(sc, off & ~3) >> (off & 3) * 8) & 0xff); } static uint16_t imx_sdhci_read_2(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct imx_sdhci_softc *sc = device_get_softc(dev); uint32_t val32, wrk32; if (sc->hwtype == HWTYPE_USDHC) { /* * The USDHC hardware has nothing in the version register, but * it's v3 compatible with all our translation code. */ if (off == SDHCI_HOST_VERSION) { return (SDHCI_SPEC_300 << SDHCI_SPEC_VER_SHIFT); } /* * The USDHC hardware moved the transfer mode bits to the mixed * control register, fetch them from there. */ if (off == SDHCI_TRANSFER_MODE) return (RD4(sc, USDHC_MIX_CONTROL) & 0x37); } else if (sc->hwtype == HWTYPE_ESDHC) { /* * The ESDHC hardware has the typical 32-bit combined "command * and mode" register that we have to cache so that command * isn't written until after mode. On a read, just retrieve the * cached values last written. */ if (off == SDHCI_TRANSFER_MODE) { return (sc->cmd_and_mode >> 16); } else if (off == SDHCI_COMMAND_FLAGS) { return (sc->cmd_and_mode & 0x0000ffff); } } /* * This hardware only manages one slot. Synthesize a slot interrupt * status register... if there are any enabled interrupts active they * must be coming from our one and only slot. */ if (off == SDHCI_SLOT_INT_STATUS) { val32 = RD4(sc, SDHCI_INT_STATUS); val32 &= RD4(sc, SDHCI_SIGNAL_ENABLE); return (val32 ? 1 : 0); } /* * The clock enable bit is in the vendor register and the clock-stable * bit is in the present state register. Transcribe them as if they * were in the clock control register where they should be. * XXX Is it important that we distinguish between "internal" and "card" * clocks? Probably not; transcribe the card clock status to both bits. */ if (off == SDHCI_CLOCK_CONTROL) { val32 = 0; wrk32 = RD4(sc, SDHC_VEND_SPEC); if (wrk32 & SDHC_VEND_FRC_SDCLK_ON) val32 |= SDHCI_CLOCK_INT_EN | SDHCI_CLOCK_CARD_EN; wrk32 = RD4(sc, SDHC_PRES_STATE); if (wrk32 & SDHC_PRES_SDSTB) val32 |= SDHCI_CLOCK_INT_STABLE; val32 |= sc->sdclockreg_freq_bits; return (val32); } return ((RD4(sc, off & ~3) >> (off & 3) * 8) & 0xffff); } static uint32_t imx_sdhci_read_4(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct imx_sdhci_softc *sc = device_get_softc(dev); uint32_t val32, wrk32; val32 = RD4(sc, off); /* * The hardware leaves the base clock frequency out of the capabilities * register; fill it in. The timeout clock is the same as the active * output sdclock; we indicate that with a quirk setting so don't * populate the timeout frequency bits. * * XXX Turn off (for now) features the hardware can do but this driver * doesn't yet handle (1.8v, suspend/resume, etc). */ if (off == SDHCI_CAPABILITIES) { val32 &= ~SDHCI_CAN_VDD_180; val32 &= ~SDHCI_CAN_DO_SUSPEND; val32 |= SDHCI_CAN_DO_8BITBUS; val32 |= (sc->baseclk_hz / 1000000) << SDHCI_CLOCK_BASE_SHIFT; return (val32); } /* * The hardware moves bits around in the present state register to make * room for all 8 data line state bits. To translate, mask out all the * bits which are not in the same position in both registers (this also * masks out some freescale-specific bits in locations defined as * reserved by sdhci), then shift the data line and retune request bits * down to their standard locations. */ if (off == SDHCI_PRESENT_STATE) { wrk32 = val32; val32 &= 0x000F0F07; val32 |= (wrk32 >> 4) & SDHCI_STATE_DAT_MASK; val32 |= (wrk32 >> 9) & SDHCI_RETUNE_REQUEST; if (sc->force_card_present) val32 |= SDHCI_CARD_PRESENT; return (val32); } /* * imx_sdhci_intr() can synthesize a DATA_END interrupt following a * command with an R1B response, mix it into the hardware status. */ if (off == SDHCI_INT_STATUS) { return (val32 | sc->r1bfix_intmask); } return val32; } static void imx_sdhci_read_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct imx_sdhci_softc *sc = device_get_softc(dev); bus_read_multi_4(sc->mem_res, off, data, count); } static void imx_sdhci_write_1(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint8_t val) { struct imx_sdhci_softc *sc = device_get_softc(dev); uint32_t val32; /* * Most of the things in the standard host control register are in the * hardware's wider protocol control register, but some of the bits are * moved around. */ if (off == SDHCI_HOST_CONTROL) { val32 = RD4(sc, SDHC_PROT_CTRL); val32 &= ~(SDHC_PROT_LED | SDHC_PROT_DMA_MASK | SDHC_PROT_WIDTH_MASK | SDHC_PROT_CDTL | SDHC_PROT_CDSS); val32 |= (val & SDHCI_CTRL_LED); if (val & SDHCI_CTRL_8BITBUS) val32 |= SDHC_PROT_WIDTH_8BIT; else val32 |= (val & SDHCI_CTRL_4BITBUS); val32 |= (val & (SDHCI_CTRL_SDMA | SDHCI_CTRL_ADMA2)) << 4; val32 |= (val & (SDHCI_CTRL_CARD_DET | SDHCI_CTRL_FORCE_CARD)); WR4(sc, SDHC_PROT_CTRL, val32); return; } /* XXX I can't find the bus power on/off knob; do nothing. */ if (off == SDHCI_POWER_CONTROL) { return; } val32 = RD4(sc, off & ~3); val32 &= ~(0xff << (off & 3) * 8); val32 |= (val << (off & 3) * 8); WR4(sc, off & ~3, val32); } static void imx_sdhci_write_2(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint16_t val) { struct imx_sdhci_softc *sc = device_get_softc(dev); uint32_t val32; /* The USDHC hardware moved the transfer mode bits to mixed control. */ if (sc->hwtype == HWTYPE_USDHC) { if (off == SDHCI_TRANSFER_MODE) { val32 = RD4(sc, USDHC_MIX_CONTROL); val32 &= ~0x3f; val32 |= val & 0x37; // XXX acmd23 not supported here (or by sdhci driver) WR4(sc, USDHC_MIX_CONTROL, val32); return; } } /* * The clock control stuff is complex enough to have its own routine * that can both change speeds and en/disable the clock output. Also, * save the register bits in SDHCI format so that we can play them back * in the read2 routine without complex decoding. */ if (off == SDHCI_CLOCK_CONTROL) { sc->sdclockreg_freq_bits = val & 0xffc0; if (val & SDHCI_CLOCK_CARD_EN) { imx_sdhc_set_clock(sc, true); } else { imx_sdhc_set_clock(sc, false); } } /* * Figure out whether we need to check the DAT0 line for busy status at * interrupt time. The controller should be doing this, but for some * reason it doesn't. There are two cases: * - R1B response with no data transfer should generate a DATA_END (aka * TRANSFER_COMPLETE) interrupt after waiting for busy, but if * there's no data transfer there's no DATA_END interrupt. This is * documented; they seem to think it's a feature. * - R1B response after Auto-CMD12 appears to not work, even though * there's a control bit for it (bit 3) in the vendor register. * When we're starting a command that needs a manual DAT0 line check at * interrupt time, we leave ourselves a note in r1bfix_type so that we * can do the extra work in imx_sdhci_intr(). */ if (off == SDHCI_COMMAND_FLAGS) { if (val & SDHCI_CMD_DATA) { const uint32_t MBAUTOCMD = SDHCI_TRNS_ACMD12 | SDHCI_TRNS_MULTI; val32 = RD4(sc, USDHC_MIX_CONTROL); if ((val32 & MBAUTOCMD) == MBAUTOCMD) sc->r1bfix_type = R1BFIX_AC12; } else { if ((val & SDHCI_CMD_RESP_MASK) == SDHCI_CMD_RESP_SHORT_BUSY) { WR4(sc, SDHCI_INT_ENABLE, slot->intmask | SDHCI_INT_RESPONSE); WR4(sc, SDHCI_SIGNAL_ENABLE, slot->intmask | SDHCI_INT_RESPONSE); sc->r1bfix_type = R1BFIX_NODATA; } } } val32 = RD4(sc, off & ~3); val32 &= ~(0xffff << (off & 3) * 8); val32 |= ((val & 0xffff) << (off & 3) * 8); WR4(sc, off & ~3, val32); } static void imx_sdhci_write_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t val) { struct imx_sdhci_softc *sc = device_get_softc(dev); /* Clear synthesized interrupts, then pass the value to the hardware. */ if (off == SDHCI_INT_STATUS) { sc->r1bfix_intmask &= ~val; } WR4(sc, off, val); } static void imx_sdhci_write_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct imx_sdhci_softc *sc = device_get_softc(dev); bus_write_multi_4(sc->mem_res, off, data, count); } static void imx_sdhc_set_clock(struct imx_sdhci_softc *sc, int enable) { uint32_t divisor, enable_bits, enable_reg, freq, prescale, val32; if (sc->hwtype == HWTYPE_ESDHC) { divisor = (sc->sdclockreg_freq_bits >> SDHCI_DIVIDER_SHIFT) & SDHCI_DIVIDER_MASK; enable_reg = SDHCI_CLOCK_CONTROL; enable_bits = SDHC_CLK_IPGEN | SDHC_CLK_HCKEN | SDHC_CLK_PEREN; } else { divisor = (sc->sdclockreg_freq_bits >> SDHCI_DIVIDER_SHIFT) & SDHCI_DIVIDER_MASK; divisor |= ((sc->sdclockreg_freq_bits >> SDHCI_DIVIDER_HI_SHIFT) & SDHCI_DIVIDER_HI_MASK) << SDHCI_DIVIDER_MASK_LEN; enable_reg = SDHCI_CLOCK_CONTROL; enable_bits = SDHC_VEND_IPGEN | SDHC_VEND_HCKEN | SDHC_VEND_PEREN; } WR4(sc, SDHC_VEND_SPEC, RD4(sc, SDHC_VEND_SPEC) & ~SDHC_VEND_FRC_SDCLK_ON); WR4(sc, enable_reg, RD4(sc, enable_reg) & ~enable_bits); if (!enable) return; if (divisor == 0) freq = sc->baseclk_hz; else freq = sc->baseclk_hz / (2 * divisor); for (prescale = 2; prescale < freq / prescale / 16;) prescale <<= 1; for (divisor = 1; freq < freq / prescale / divisor;) ++divisor; prescale >>= 1; divisor -= 1; val32 = RD4(sc, SDHCI_CLOCK_CONTROL); val32 &= ~SDHC_CLK_DIVISOR_MASK; val32 |= divisor << SDHC_CLK_DIVISOR_SHIFT; val32 &= ~SDHC_CLK_PRESCALE_MASK; val32 |= prescale << SDHC_CLK_PRESCALE_SHIFT; WR4(sc, SDHCI_CLOCK_CONTROL, val32); WR4(sc, enable_reg, RD4(sc, enable_reg) | enable_bits); WR4(sc, SDHC_VEND_SPEC, RD4(sc, SDHC_VEND_SPEC) | SDHC_VEND_FRC_SDCLK_ON); } static boolean_t imx_sdhci_r1bfix_is_wait_done(struct imx_sdhci_softc *sc) { uint32_t inhibit; mtx_assert(&sc->slot.mtx, MA_OWNED); /* * Check the DAT0 line status using both the DLA (data line active) and * CDIHB (data inhibit) bits in the present state register. In theory * just DLA should do the trick, but in practice it takes both. If the * DAT0 line is still being held and we're not yet beyond the timeout * point, just schedule another callout to check again later. */ inhibit = RD4(sc, SDHC_PRES_STATE) & (SDHC_PRES_DLA | SDHC_PRES_CDIHB); if (inhibit && getsbinuptime() < sc->r1bfix_timeout_at) { callout_reset_sbt(&sc->r1bfix_callout, SBT_1MS, 0, imx_sdhci_r1bfix_func, sc, 0); return (false); } /* * If we reach this point with the inhibit bits still set, we've got a * timeout, synthesize a DATA_TIMEOUT interrupt. Otherwise the DAT0 * line has been released, and we synthesize a DATA_END, and if the type * of fix needed was on a command-without-data we also now add in the * original INT_RESPONSE that we suppressed earlier. */ if (inhibit) sc->r1bfix_intmask |= SDHCI_INT_DATA_TIMEOUT; else { sc->r1bfix_intmask |= SDHCI_INT_DATA_END; if (sc->r1bfix_type == R1BFIX_NODATA) sc->r1bfix_intmask |= SDHCI_INT_RESPONSE; } sc->r1bfix_type = R1BFIX_NONE; return (true); } static void imx_sdhci_r1bfix_func(void * arg) { struct imx_sdhci_softc *sc = arg; boolean_t r1bwait_done; mtx_lock(&sc->slot.mtx); r1bwait_done = imx_sdhci_r1bfix_is_wait_done(sc); mtx_unlock(&sc->slot.mtx); if (r1bwait_done) sdhci_generic_intr(&sc->slot); } static void imx_sdhci_intr(void *arg) { struct imx_sdhci_softc *sc = arg; uint32_t intmask; mtx_lock(&sc->slot.mtx); /* * Manually check the DAT0 line for R1B response types that the * controller fails to handle properly. The controller asserts the done * interrupt while the card is still asserting busy with the DAT0 line. * * We check DAT0 immediately because most of the time, especially on a * read, the card will actually be done by time we get here. If it's * not, then the wait_done routine will schedule a callout to re-check * periodically until it is done. In that case we clear the interrupt * out of the hardware now so that we can present it later when the DAT0 * line is released. * * If we need to wait for the the DAT0 line to be released, we set up a * timeout point 250ms in the future. This number comes from the SD * spec, which allows a command to take that long. In the real world, * cards tend to take 10-20ms for a long-running command such as a write * or erase that spans two pages. */ switch (sc->r1bfix_type) { case R1BFIX_NODATA: intmask = RD4(sc, SDHC_INT_STATUS) & SDHCI_INT_RESPONSE; break; case R1BFIX_AC12: intmask = RD4(sc, SDHC_INT_STATUS) & SDHCI_INT_DATA_END; break; default: intmask = 0; break; } if (intmask) { sc->r1bfix_timeout_at = getsbinuptime() + 250 * SBT_1MS; if (!imx_sdhci_r1bfix_is_wait_done(sc)) { WR4(sc, SDHC_INT_STATUS, intmask); bus_barrier(sc->mem_res, SDHC_INT_STATUS, 4, BUS_SPACE_BARRIER_WRITE); } } mtx_unlock(&sc->slot.mtx); sdhci_generic_intr(&sc->slot); } static int imx_sdhci_get_ro(device_t bus, device_t child) { return (false); } static int imx_sdhci_detach(device_t dev) { return (EBUSY); } static int imx_sdhci_attach(device_t dev) { struct imx_sdhci_softc *sc = device_get_softc(dev); int rid, err; phandle_t node; sc->dev = dev; sc->hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data; if (sc->hwtype == HWTYPE_NONE) panic("Impossible: not compatible in imx_sdhci_attach()"); rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->mem_res) { device_printf(dev, "cannot allocate memory window\n"); err = ENXIO; goto fail; } rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->irq_res) { device_printf(dev, "cannot allocate interrupt\n"); err = ENXIO; goto fail; } if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_BIO | INTR_MPSAFE, NULL, imx_sdhci_intr, sc, &sc->intr_cookie)) { device_printf(dev, "cannot setup interrupt handler\n"); err = ENXIO; goto fail; } sc->slot.quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; /* * DMA is not really broken, I just haven't implemented it yet. */ sc->slot.quirks |= SDHCI_QUIRK_BROKEN_DMA; /* * Set the buffer watermark level to 128 words (512 bytes) for both read * and write. The hardware has a restriction that when the read or * write ready status is asserted, that means you can read exactly the * number of words set in the watermark register before you have to * re-check the status and potentially wait for more data. The main * sdhci driver provides no hook for doing status checking on less than * a full block boundary, so we set the watermark level to be a full * block. Reads and writes where the block size is less than the * watermark size will work correctly too, no need to change the * watermark for different size blocks. However, 128 is the maximum * allowed for the watermark, so PIO is limitted to 512 byte blocks * (which works fine for SD cards, may be a problem for SDIO some day). * * XXX need named constants for this stuff. */ WR4(sc, SDHC_WTMK_LVL, 0x08800880); sc->baseclk_hz = imx_ccm_sdhci_hz(); /* * If the slot is flagged with the non-removable property, set our flag * to always force the SDHCI_CARD_PRESENT bit on. * * XXX Workaround for gpio-based card detect... * * We don't have gpio support yet. If there's a cd-gpios property just * force the SDHCI_CARD_PRESENT bit on for now. If there isn't really a * card there it will fail to probe at the mmc layer and nothing bad * happens except instantiating an mmcN device for an empty slot. */ node = ofw_bus_get_node(dev); if (OF_hasprop(node, "non-removable")) sc->force_card_present = true; else if (OF_hasprop(node, "cd-gpios")) { /* XXX put real gpio hookup here. */ sc->force_card_present = true; } callout_init(&sc->r1bfix_callout, 1); sdhci_init_slot(dev, &sc->slot, 0); bus_generic_probe(dev); bus_generic_attach(dev); sdhci_start_slot(&sc->slot); return (0); fail: if (sc->intr_cookie) bus_teardown_intr(dev, sc->irq_res, sc->intr_cookie); if (sc->irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (err); } static int imx_sdhci_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); switch (ofw_bus_search_compatible(dev, compat_data)->ocd_data) { case HWTYPE_ESDHC: device_set_desc(dev, "Freescale eSDHC controller"); return (BUS_PROBE_DEFAULT); case HWTYPE_USDHC: device_set_desc(dev, "Freescale uSDHC controller"); return (BUS_PROBE_DEFAULT); default: break; } return (ENXIO); } static device_method_t imx_sdhci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, imx_sdhci_probe), DEVMETHOD(device_attach, imx_sdhci_attach), DEVMETHOD(device_detach, imx_sdhci_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, sdhci_generic_read_ivar), DEVMETHOD(bus_write_ivar, sdhci_generic_write_ivar), DEVMETHOD(bus_print_child, bus_generic_print_child), /* MMC bridge interface */ DEVMETHOD(mmcbr_update_ios, sdhci_generic_update_ios), DEVMETHOD(mmcbr_request, sdhci_generic_request), DEVMETHOD(mmcbr_get_ro, imx_sdhci_get_ro), DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host), DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host), /* SDHCI registers accessors */ DEVMETHOD(sdhci_read_1, imx_sdhci_read_1), DEVMETHOD(sdhci_read_2, imx_sdhci_read_2), DEVMETHOD(sdhci_read_4, imx_sdhci_read_4), DEVMETHOD(sdhci_read_multi_4, imx_sdhci_read_multi_4), DEVMETHOD(sdhci_write_1, imx_sdhci_write_1), DEVMETHOD(sdhci_write_2, imx_sdhci_write_2), DEVMETHOD(sdhci_write_4, imx_sdhci_write_4), DEVMETHOD(sdhci_write_multi_4, imx_sdhci_write_multi_4), { 0, 0 } }; static devclass_t imx_sdhci_devclass; static driver_t imx_sdhci_driver = { "sdhci_imx", imx_sdhci_methods, sizeof(struct imx_sdhci_softc), }; DRIVER_MODULE(sdhci_imx, simplebus, imx_sdhci_driver, imx_sdhci_devclass, 0, 0); MODULE_DEPEND(sdhci_imx, sdhci, 1, 1, 1); - +DRIVER_MODULE(mmc, sdhci_imx, mmc_driver, mmc_devclass, NULL, NULL); Index: head/sys/arm/lpc/lpc_mmc.c =================================================================== --- head/sys/arm/lpc/lpc_mmc.c (revision 292179) +++ head/sys/arm/lpc/lpc_mmc.c (revision 292180) @@ -1,777 +1,778 @@ /*- * Copyright (c) 2011 Jakub Wojciech Klama * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEBUG #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif struct lpc_mmc_dmamap_arg { bus_addr_t lm_dma_busaddr; }; struct lpc_mmc_softc { device_t lm_dev; struct mtx lm_mtx; struct resource * lm_mem_res; struct resource * lm_irq_res; bus_space_tag_t lm_bst; bus_space_handle_t lm_bsh; void * lm_intrhand; struct mmc_host lm_host; struct mmc_request * lm_req; struct mmc_data * lm_data; uint32_t lm_flags; #define LPC_SD_FLAGS_IGNORECRC (1 << 0) int lm_xfer_direction; #define DIRECTION_READ 0 #define DIRECTION_WRITE 1 int lm_xfer_done; int lm_bus_busy; bus_dma_tag_t lm_dma_tag; bus_dmamap_t lm_dma_map; bus_addr_t lm_buffer_phys; void * lm_buffer; }; #define LPC_SD_MAX_BLOCKSIZE 1024 /* XXX */ #define LPC_MMC_DMACH_READ 1 #define LPC_MMC_DMACH_WRITE 0 static int lpc_mmc_probe(device_t); static int lpc_mmc_attach(device_t); static int lpc_mmc_detach(device_t); static void lpc_mmc_intr(void *); static void lpc_mmc_cmd(struct lpc_mmc_softc *, struct mmc_command *); static void lpc_mmc_setup_xfer(struct lpc_mmc_softc *, struct mmc_data *); static int lpc_mmc_update_ios(device_t, device_t); static int lpc_mmc_request(device_t, device_t, struct mmc_request *); static int lpc_mmc_get_ro(device_t, device_t); static int lpc_mmc_acquire_host(device_t, device_t); static int lpc_mmc_release_host(device_t, device_t); static void lpc_mmc_dma_rxfinish(void *); static void lpc_mmc_dma_rxerror(void *); static void lpc_mmc_dma_txfinish(void *); static void lpc_mmc_dma_txerror(void *); static void lpc_mmc_dmamap_cb(void *, bus_dma_segment_t *, int, int); #define lpc_mmc_lock(_sc) \ mtx_lock(&_sc->lm_mtx); #define lpc_mmc_unlock(_sc) \ mtx_unlock(&_sc->lm_mtx); #define lpc_mmc_read_4(_sc, _reg) \ bus_space_read_4(_sc->lm_bst, _sc->lm_bsh, _reg) #define lpc_mmc_write_4(_sc, _reg, _value) \ bus_space_write_4(_sc->lm_bst, _sc->lm_bsh, _reg, _value) static struct lpc_dmac_channel_config lpc_mmc_dma_rxconf = { .ldc_fcntl = LPC_DMAC_FLOW_D_P2M, .ldc_src_periph = LPC_DMAC_SD_ID, .ldc_src_width = LPC_DMAC_CH_CONTROL_WIDTH_4, .ldc_src_incr = 0, .ldc_src_burst = LPC_DMAC_CH_CONTROL_BURST_8, .ldc_dst_periph = LPC_DMAC_SD_ID, .ldc_dst_width = LPC_DMAC_CH_CONTROL_WIDTH_4, .ldc_dst_incr = 1, .ldc_dst_burst = LPC_DMAC_CH_CONTROL_BURST_8, .ldc_success_handler = lpc_mmc_dma_rxfinish, .ldc_error_handler = lpc_mmc_dma_rxerror, }; static struct lpc_dmac_channel_config lpc_mmc_dma_txconf = { .ldc_fcntl = LPC_DMAC_FLOW_P_M2P, .ldc_src_periph = LPC_DMAC_SD_ID, .ldc_src_width = LPC_DMAC_CH_CONTROL_WIDTH_4, .ldc_src_incr = 1, .ldc_src_burst = LPC_DMAC_CH_CONTROL_BURST_8, .ldc_dst_periph = LPC_DMAC_SD_ID, .ldc_dst_width = LPC_DMAC_CH_CONTROL_WIDTH_4, .ldc_dst_incr = 0, .ldc_dst_burst = LPC_DMAC_CH_CONTROL_BURST_8, .ldc_success_handler = lpc_mmc_dma_txfinish, .ldc_error_handler = lpc_mmc_dma_txerror, }; static int lpc_mmc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "lpc,mmc")) return (ENXIO); device_set_desc(dev, "LPC32x0 MMC/SD controller"); return (BUS_PROBE_DEFAULT); } static int lpc_mmc_attach(device_t dev) { struct lpc_mmc_softc *sc = device_get_softc(dev); struct lpc_mmc_dmamap_arg ctx; device_t child; int rid, err; sc->lm_dev = dev; sc->lm_req = NULL; mtx_init(&sc->lm_mtx, "lpcmmc", "mmc", MTX_DEF); rid = 0; sc->lm_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->lm_mem_res) { device_printf(dev, "cannot allocate memory window\n"); return (ENXIO); } sc->lm_bst = rman_get_bustag(sc->lm_mem_res); sc->lm_bsh = rman_get_bushandle(sc->lm_mem_res); debugf("virtual register space: 0x%08lx\n", sc->lm_bsh); rid = 0; sc->lm_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->lm_irq_res) { device_printf(dev, "cannot allocate interrupt\n"); bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->lm_mem_res); return (ENXIO); } if (bus_setup_intr(dev, sc->lm_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, lpc_mmc_intr, sc, &sc->lm_intrhand)) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->lm_mem_res); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lm_irq_res); device_printf(dev, "cannot setup interrupt handler\n"); return (ENXIO); } sc->lm_host.f_min = 312500; sc->lm_host.f_max = 2500000; sc->lm_host.host_ocr = MMC_OCR_300_310 | MMC_OCR_310_320 | MMC_OCR_320_330 | MMC_OCR_330_340; #if 0 sc->lm_host.caps = MMC_CAP_4_BIT_DATA; #endif lpc_pwr_write(dev, LPC_CLKPWR_MS_CTRL, LPC_CLKPWR_MS_CTRL_CLOCK_EN | LPC_CLKPWR_MS_CTRL_SD_CLOCK | 1); lpc_mmc_write_4(sc, LPC_SD_POWER, LPC_SD_POWER_CTRL_ON); device_set_ivars(dev, &sc->lm_host); child = device_add_child(dev, "mmc", -1); if (!child) { device_printf(dev, "attaching MMC bus failed!\n"); bus_teardown_intr(dev, sc->lm_irq_res, sc->lm_intrhand); bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->lm_mem_res); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lm_irq_res); return (ENXIO); } /* Alloc DMA memory */ err = bus_dma_tag_create( bus_get_dma_tag(sc->lm_dev), 4, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ LPC_SD_MAX_BLOCKSIZE, 1, /* maxsize, nsegments */ LPC_SD_MAX_BLOCKSIZE, 0, /* maxsegsize, flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->lm_dma_tag); err = bus_dmamem_alloc(sc->lm_dma_tag, (void **)&sc->lm_buffer, 0, &sc->lm_dma_map); if (err) { device_printf(dev, "cannot allocate framebuffer\n"); goto fail; } err = bus_dmamap_load(sc->lm_dma_tag, sc->lm_dma_map, sc->lm_buffer, LPC_SD_MAX_BLOCKSIZE, lpc_mmc_dmamap_cb, &ctx, BUS_DMA_NOWAIT); if (err) { device_printf(dev, "cannot load DMA map\n"); goto fail; } sc->lm_buffer_phys = ctx.lm_dma_busaddr; lpc_mmc_dma_rxconf.ldc_handler_arg = (void *)sc; err = lpc_dmac_config_channel(dev, LPC_MMC_DMACH_READ, &lpc_mmc_dma_rxconf); if (err) { device_printf(dev, "cannot allocate RX DMA channel\n"); goto fail; } lpc_mmc_dma_txconf.ldc_handler_arg = (void *)sc; err = lpc_dmac_config_channel(dev, LPC_MMC_DMACH_WRITE, &lpc_mmc_dma_txconf); if (err) { device_printf(dev, "cannot allocate TX DMA channel\n"); goto fail; } bus_generic_probe(dev); bus_generic_attach(dev); return (0); fail: if (sc->lm_intrhand) bus_teardown_intr(dev, sc->lm_irq_res, sc->lm_intrhand); if (sc->lm_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lm_irq_res); if (sc->lm_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->lm_mem_res); return (err); } static int lpc_mmc_detach(device_t dev) { return (EBUSY); } static void lpc_mmc_intr(void *arg) { struct lpc_mmc_softc *sc = (struct lpc_mmc_softc *)arg; struct mmc_command *cmd; uint32_t status; status = lpc_mmc_read_4(sc, LPC_SD_STATUS); debugf("interrupt: 0x%08x\n", status); if (status & LPC_SD_STATUS_CMDCRCFAIL) { cmd = sc->lm_req->cmd; cmd->error = sc->lm_flags & LPC_SD_FLAGS_IGNORECRC ? MMC_ERR_NONE : MMC_ERR_BADCRC; cmd->resp[0] = lpc_mmc_read_4(sc, LPC_SD_RESP0); sc->lm_req->done(sc->lm_req); sc->lm_req = NULL; lpc_mmc_write_4(sc, LPC_SD_CLEAR, LPC_SD_STATUS_CMDCRCFAIL); } if (status & LPC_SD_STATUS_CMDACTIVE) { debugf("command active\n"); cmd = sc->lm_req->cmd; cmd->resp[0] = lpc_mmc_read_4(sc, LPC_SD_RESP0); sc->lm_req->done(sc->lm_req); sc->lm_req = NULL; } if (status & LPC_SD_STATUS_DATATIMEOUT) { device_printf(sc->lm_dev, "data timeout\n"); lpc_mmc_write_4(sc, LPC_SD_CLEAR, LPC_SD_STATUS_DATATIMEOUT); } if (status & LPC_SD_STATUS_TXUNDERRUN) { device_printf(sc->lm_dev, "TX underrun\n"); lpc_mmc_write_4(sc, LPC_SD_CLEAR, LPC_SD_STATUS_TXUNDERRUN); } if (status & LPC_SD_STATUS_CMDRESPEND) { debugf("command response\n"); cmd = sc->lm_req->cmd; if (cmd->flags & MMC_RSP_136) { cmd->resp[3] = lpc_mmc_read_4(sc, LPC_SD_RESP3); cmd->resp[2] = lpc_mmc_read_4(sc, LPC_SD_RESP2); cmd->resp[1] = lpc_mmc_read_4(sc, LPC_SD_RESP1); } cmd->resp[0] = lpc_mmc_read_4(sc, LPC_SD_RESP0); cmd->error = MMC_ERR_NONE; if (cmd->data && (cmd->data->flags & MMC_DATA_WRITE)) lpc_mmc_setup_xfer(sc, sc->lm_req->cmd->data); if (!cmd->data) { sc->lm_req->done(sc->lm_req); sc->lm_req = NULL; } lpc_mmc_write_4(sc, LPC_SD_CLEAR, LPC_SD_STATUS_CMDRESPEND); } if (status & LPC_SD_STATUS_CMDSENT) { debugf("command sent\n"); cmd = sc->lm_req->cmd; cmd->error = MMC_ERR_NONE; sc->lm_req->done(sc->lm_req); sc->lm_req = NULL; lpc_mmc_write_4(sc, LPC_SD_CLEAR, LPC_SD_STATUS_CMDSENT); } if (status & LPC_SD_STATUS_DATAEND) { if (sc->lm_xfer_direction == DIRECTION_READ) lpc_dmac_start_burst(sc->lm_dev, LPC_DMAC_SD_ID); lpc_mmc_write_4(sc, LPC_SD_CLEAR, LPC_SD_STATUS_DATAEND); } if (status & LPC_SD_STATUS_CMDTIMEOUT) { device_printf(sc->lm_dev, "command response timeout\n"); cmd = sc->lm_req->cmd; cmd->error = MMC_ERR_TIMEOUT; sc->lm_req->done(sc->lm_req); sc->lm_req = NULL; lpc_mmc_write_4(sc, LPC_SD_CLEAR, LPC_SD_STATUS_CMDTIMEOUT); return; } if (status & LPC_SD_STATUS_STARTBITERR) { device_printf(sc->lm_dev, "start bit error\n"); lpc_mmc_write_4(sc, LPC_SD_CLEAR, LPC_SD_STATUS_STARTBITERR); } if (status & LPC_SD_STATUS_DATACRCFAIL) { device_printf(sc->lm_dev, "data CRC error\n"); debugf("data buffer: %p\n", sc->lm_buffer); cmd = sc->lm_req->cmd; cmd->error = MMC_ERR_BADCRC; sc->lm_req->done(sc->lm_req); sc->lm_req = NULL; if (sc->lm_xfer_direction == DIRECTION_READ) lpc_dmac_start_burst(sc->lm_dev, LPC_DMAC_SD_ID); lpc_mmc_write_4(sc, LPC_SD_CLEAR, LPC_SD_STATUS_DATACRCFAIL); } if (status & LPC_SD_STATUS_DATABLOCKEND) { debugf("data block end\n"); if (sc->lm_xfer_direction == DIRECTION_READ) memcpy(sc->lm_data->data, sc->lm_buffer, sc->lm_data->len); if (sc->lm_xfer_direction == DIRECTION_WRITE) { lpc_dmac_disable_channel(sc->lm_dev, LPC_MMC_DMACH_WRITE); lpc_mmc_write_4(sc, LPC_SD_DATACTRL, 0); } sc->lm_req->done(sc->lm_req); sc->lm_req = NULL; lpc_mmc_write_4(sc, LPC_SD_CLEAR, LPC_SD_STATUS_DATABLOCKEND); } debugf("done\n"); } static int lpc_mmc_request(device_t bus, device_t child, struct mmc_request *req) { struct lpc_mmc_softc *sc = device_get_softc(bus); debugf("request: %p\n", req); lpc_mmc_lock(sc); if (sc->lm_req) return (EBUSY); sc->lm_req = req; if (req->cmd->data && req->cmd->data->flags & MMC_DATA_WRITE) { memcpy(sc->lm_buffer, req->cmd->data->data, req->cmd->data->len); lpc_mmc_cmd(sc, req->cmd); lpc_mmc_unlock(sc); return (0); } if (req->cmd->data) lpc_mmc_setup_xfer(sc, req->cmd->data); lpc_mmc_cmd(sc, req->cmd); lpc_mmc_unlock(sc); return (0); } static void lpc_mmc_cmd(struct lpc_mmc_softc *sc, struct mmc_command *cmd) { uint32_t cmdreg = 0; debugf("cmd: %d arg: 0x%08x\n", cmd->opcode, cmd->arg); if (lpc_mmc_read_4(sc, LPC_SD_COMMAND) & LPC_SD_COMMAND_ENABLE) { lpc_mmc_write_4(sc, LPC_SD_COMMAND, 0); DELAY(1000); } sc->lm_flags &= ~LPC_SD_FLAGS_IGNORECRC; if (cmd->flags & MMC_RSP_PRESENT) cmdreg |= LPC_SD_COMMAND_RESPONSE; if (MMC_RSP(cmd->flags) == MMC_RSP_R2) cmdreg |= LPC_SD_COMMAND_LONGRSP; if (MMC_RSP(cmd->flags) == MMC_RSP_R3) sc->lm_flags |= LPC_SD_FLAGS_IGNORECRC; cmdreg |= LPC_SD_COMMAND_ENABLE; cmdreg |= (cmd->opcode & LPC_SD_COMMAND_CMDINDEXMASK); lpc_mmc_write_4(sc, LPC_SD_MASK0, 0xffffffff); lpc_mmc_write_4(sc, LPC_SD_MASK1, 0xffffffff); lpc_mmc_write_4(sc, LPC_SD_ARGUMENT, cmd->arg); lpc_mmc_write_4(sc, LPC_SD_COMMAND, cmdreg); } static void lpc_mmc_setup_xfer(struct lpc_mmc_softc *sc, struct mmc_data *data) { uint32_t datactrl = 0; int data_words = data->len / 4; sc->lm_data = data; sc->lm_xfer_done = 0; debugf("data: %p, len: %d, %s\n", data, data->len, (data->flags & MMC_DATA_READ) ? "read" : "write"); if (data->flags & MMC_DATA_READ) { sc->lm_xfer_direction = DIRECTION_READ; lpc_dmac_setup_transfer(sc->lm_dev, LPC_MMC_DMACH_READ, LPC_SD_PHYS_BASE + LPC_SD_FIFO, sc->lm_buffer_phys, data_words, 0); } if (data->flags & MMC_DATA_WRITE) { sc->lm_xfer_direction = DIRECTION_WRITE; lpc_dmac_setup_transfer(sc->lm_dev, LPC_MMC_DMACH_WRITE, sc->lm_buffer_phys, LPC_SD_PHYS_BASE + LPC_SD_FIFO, data_words, 0); } datactrl |= (sc->lm_xfer_direction ? LPC_SD_DATACTRL_WRITE : LPC_SD_DATACTRL_READ); datactrl |= LPC_SD_DATACTRL_DMAENABLE | LPC_SD_DATACTRL_ENABLE; datactrl |= (ffs(data->len) - 1) << 4; debugf("datactrl: 0x%08x\n", datactrl); lpc_mmc_write_4(sc, LPC_SD_DATATIMER, 0xFFFF0000); lpc_mmc_write_4(sc, LPC_SD_DATALENGTH, data->len); lpc_mmc_write_4(sc, LPC_SD_DATACTRL, datactrl); } static int lpc_mmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct lpc_mmc_softc *sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: *(int *)result = sc->lm_host.ios.bus_mode; break; case MMCBR_IVAR_BUS_WIDTH: *(int *)result = sc->lm_host.ios.bus_width; break; case MMCBR_IVAR_CHIP_SELECT: *(int *)result = sc->lm_host.ios.chip_select; break; case MMCBR_IVAR_CLOCK: *(int *)result = sc->lm_host.ios.clock; break; case MMCBR_IVAR_F_MIN: *(int *)result = sc->lm_host.f_min; break; case MMCBR_IVAR_F_MAX: *(int *)result = sc->lm_host.f_max; break; case MMCBR_IVAR_HOST_OCR: *(int *)result = sc->lm_host.host_ocr; break; case MMCBR_IVAR_MODE: *(int *)result = sc->lm_host.mode; break; case MMCBR_IVAR_OCR: *(int *)result = sc->lm_host.ocr; break; case MMCBR_IVAR_POWER_MODE: *(int *)result = sc->lm_host.ios.power_mode; break; case MMCBR_IVAR_VDD: *(int *)result = sc->lm_host.ios.vdd; break; case MMCBR_IVAR_CAPS: *(int *)result = sc->lm_host.caps; break; case MMCBR_IVAR_MAX_DATA: *(int *)result = 1; break; } return (0); } static int lpc_mmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct lpc_mmc_softc *sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: sc->lm_host.ios.bus_mode = value; break; case MMCBR_IVAR_BUS_WIDTH: sc->lm_host.ios.bus_width = value; break; case MMCBR_IVAR_CHIP_SELECT: sc->lm_host.ios.chip_select = value; break; case MMCBR_IVAR_CLOCK: sc->lm_host.ios.clock = value; break; case MMCBR_IVAR_MODE: sc->lm_host.mode = value; break; case MMCBR_IVAR_OCR: sc->lm_host.ocr = value; break; case MMCBR_IVAR_POWER_MODE: sc->lm_host.ios.power_mode = value; break; case MMCBR_IVAR_VDD: sc->lm_host.ios.vdd = value; break; /* These are read-only */ case MMCBR_IVAR_CAPS: case MMCBR_IVAR_HOST_OCR: case MMCBR_IVAR_F_MIN: case MMCBR_IVAR_F_MAX: case MMCBR_IVAR_MAX_DATA: return (EINVAL); } return (0); } static int lpc_mmc_update_ios(device_t bus, device_t child) { struct lpc_mmc_softc *sc = device_get_softc(bus); struct mmc_ios *ios = &sc->lm_host.ios; uint32_t clkdiv = 0, pwr = 0; if (ios->bus_width == bus_width_4) clkdiv |= LPC_SD_CLOCK_WIDEBUS; /* Calculate clock divider */ clkdiv = (LPC_SD_CLK / (2 * ios->clock)) - 1; /* Clock rate should not exceed rate requested in ios */ if ((LPC_SD_CLK / (2 * (clkdiv + 1))) > ios->clock) clkdiv++; debugf("clock: %dHz, clkdiv: %d\n", ios->clock, clkdiv); if (ios->bus_width == bus_width_4) { debugf("using wide bus mode\n"); clkdiv |= LPC_SD_CLOCK_WIDEBUS; } lpc_mmc_write_4(sc, LPC_SD_CLOCK, clkdiv | LPC_SD_CLOCK_ENABLE); switch (ios->power_mode) { case power_off: pwr |= LPC_SD_POWER_CTRL_OFF; break; case power_up: pwr |= LPC_SD_POWER_CTRL_UP; break; case power_on: pwr |= LPC_SD_POWER_CTRL_ON; break; } if (ios->bus_mode == opendrain) pwr |= LPC_SD_POWER_OPENDRAIN; lpc_mmc_write_4(sc, LPC_SD_POWER, pwr); return (0); } static int lpc_mmc_get_ro(device_t bus, device_t child) { return (0); } static int lpc_mmc_acquire_host(device_t bus, device_t child) { struct lpc_mmc_softc *sc = device_get_softc(bus); int error = 0; lpc_mmc_lock(sc); while (sc->lm_bus_busy) error = mtx_sleep(sc, &sc->lm_mtx, PZERO, "mmcah", 0); sc->lm_bus_busy++; lpc_mmc_unlock(sc); return (error); } static int lpc_mmc_release_host(device_t bus, device_t child) { struct lpc_mmc_softc *sc = device_get_softc(bus); lpc_mmc_lock(sc); sc->lm_bus_busy--; wakeup(sc); lpc_mmc_unlock(sc); return (0); } static void lpc_mmc_dma_rxfinish(void *arg) { } static void lpc_mmc_dma_rxerror(void *arg) { struct lpc_mmc_softc *sc = (struct lpc_mmc_softc *)arg; device_printf(sc->lm_dev, "DMA RX error\n"); } static void lpc_mmc_dma_txfinish(void *arg) { } static void lpc_mmc_dma_txerror(void *arg) { struct lpc_mmc_softc *sc = (struct lpc_mmc_softc *)arg; device_printf(sc->lm_dev, "DMA TX error\n"); } static void lpc_mmc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) { struct lpc_mmc_dmamap_arg *ctx; if (err) return; ctx = (struct lpc_mmc_dmamap_arg *)arg; ctx->lm_dma_busaddr = segs[0].ds_addr; } static device_method_t lpc_mmc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, lpc_mmc_probe), DEVMETHOD(device_attach, lpc_mmc_attach), DEVMETHOD(device_detach, lpc_mmc_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, lpc_mmc_read_ivar), DEVMETHOD(bus_write_ivar, lpc_mmc_write_ivar), DEVMETHOD(bus_print_child, bus_generic_print_child), /* MMC bridge interface */ DEVMETHOD(mmcbr_update_ios, lpc_mmc_update_ios), DEVMETHOD(mmcbr_request, lpc_mmc_request), DEVMETHOD(mmcbr_get_ro, lpc_mmc_get_ro), DEVMETHOD(mmcbr_acquire_host, lpc_mmc_acquire_host), DEVMETHOD(mmcbr_release_host, lpc_mmc_release_host), { 0, 0 } }; static devclass_t lpc_mmc_devclass; static driver_t lpc_mmc_driver = { "lpcmmc", lpc_mmc_methods, sizeof(struct lpc_mmc_softc), }; DRIVER_MODULE(lpcmmc, simplebus, lpc_mmc_driver, lpc_mmc_devclass, 0, 0); +DRIVER_MODULE(mmc, lpcmmc, mmc_driver, mmc_devclass, NULL, NULL); Index: head/sys/arm/ti/ti_sdhci.c =================================================================== --- head/sys/arm/ti/ti_sdhci.c (revision 292179) +++ head/sys/arm/ti/ti_sdhci.c (revision 292180) @@ -1,723 +1,724 @@ /*- * Copyright (c) 2013 Ian Lepore * Copyright (c) 2011 Ben Gray . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "sdhci_if.h" #include #include #include #include "gpio_if.h" struct ti_sdhci_softc { device_t dev; device_t gpio_dev; struct resource * mem_res; struct resource * irq_res; void * intr_cookie; struct sdhci_slot slot; clk_ident_t mmchs_clk_id; uint32_t mmchs_reg_off; uint32_t sdhci_reg_off; uint32_t baseclk_hz; uint32_t wp_gpio_pin; uint32_t cmd_and_mode; uint32_t sdhci_clkdiv; boolean_t disable_highspeed; boolean_t force_card_present; }; /* * Table of supported FDT compat strings. * * Note that "ti,mmchs" is our own invention, and should be phased out in favor * of the documented names. * * Note that vendor Beaglebone dtsi files use "ti,omap3-hsmmc" for the am335x. */ static struct ofw_compat_data compat_data[] = { {"ti,omap3-hsmmc", 1}, {"ti,omap4-hsmmc", 1}, {"ti,mmchs", 1}, {NULL, 0}, }; /* * The MMCHS hardware has a few control and status registers at the beginning of * the device's memory map, followed by the standard sdhci register block. * Different SoCs have the register blocks at different offsets from the * beginning of the device. Define some constants to map out the registers we * access, and the various per-SoC offsets. The SDHCI_REG_OFFSET is how far * beyond the MMCHS block the SDHCI block is found; it's the same on all SoCs. */ #define OMAP3_MMCHS_REG_OFFSET 0x000 #define OMAP4_MMCHS_REG_OFFSET 0x100 #define AM335X_MMCHS_REG_OFFSET 0x100 #define SDHCI_REG_OFFSET 0x100 #define MMCHS_SYSCONFIG 0x010 #define MMCHS_SYSCONFIG_RESET (1 << 1) #define MMCHS_SYSSTATUS 0x014 #define MMCHS_SYSSTATUS_RESETDONE (1 << 0) #define MMCHS_CON 0x02C #define MMCHS_CON_DW8 (1 << 5) #define MMCHS_CON_DVAL_8_4MS (3 << 9) #define MMCHS_CON_OD (1 << 0) #define MMCHS_SYSCTL 0x12C #define MMCHS_SYSCTL_CLKD_MASK 0x3FF #define MMCHS_SYSCTL_CLKD_SHIFT 6 #define MMCHS_SD_CAPA 0x140 #define MMCHS_SD_CAPA_VS18 (1 << 26) #define MMCHS_SD_CAPA_VS30 (1 << 25) #define MMCHS_SD_CAPA_VS33 (1 << 24) static inline uint32_t ti_mmchs_read_4(struct ti_sdhci_softc *sc, bus_size_t off) { return (bus_read_4(sc->mem_res, off + sc->mmchs_reg_off)); } static inline void ti_mmchs_write_4(struct ti_sdhci_softc *sc, bus_size_t off, uint32_t val) { bus_write_4(sc->mem_res, off + sc->mmchs_reg_off, val); } static inline uint32_t RD4(struct ti_sdhci_softc *sc, bus_size_t off) { return (bus_read_4(sc->mem_res, off + sc->sdhci_reg_off)); } static inline void WR4(struct ti_sdhci_softc *sc, bus_size_t off, uint32_t val) { bus_write_4(sc->mem_res, off + sc->sdhci_reg_off, val); } static uint8_t ti_sdhci_read_1(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct ti_sdhci_softc *sc = device_get_softc(dev); return ((RD4(sc, off & ~3) >> (off & 3) * 8) & 0xff); } static uint16_t ti_sdhci_read_2(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct ti_sdhci_softc *sc = device_get_softc(dev); uint32_t clkdiv, val32; /* * The MMCHS hardware has a non-standard interpretation of the sdclock * divisor bits. It uses the same bit positions as SDHCI 3.0 (15..6) * but doesn't split them into low:high fields. Instead they're a * single number in the range 0..1023 and the number is exactly the * clock divisor (with 0 and 1 both meaning divide by 1). The SDHCI * driver code expects a v2.0 or v3.0 divisor. The shifting and masking * here extracts the MMCHS representation from the hardware word, cleans * those bits out, applies the 2N adjustment, and plugs the result into * the bit positions for the 2.0 or 3.0 divisor in the returned register * value. The ti_sdhci_write_2() routine performs the opposite * transformation when the SDHCI driver writes to the register. */ if (off == SDHCI_CLOCK_CONTROL) { val32 = RD4(sc, SDHCI_CLOCK_CONTROL); clkdiv = ((val32 >> MMCHS_SYSCTL_CLKD_SHIFT) & MMCHS_SYSCTL_CLKD_MASK) / 2; val32 &= ~(MMCHS_SYSCTL_CLKD_MASK << MMCHS_SYSCTL_CLKD_SHIFT); val32 |= (clkdiv & SDHCI_DIVIDER_MASK) << SDHCI_DIVIDER_SHIFT; if (slot->version >= SDHCI_SPEC_300) val32 |= ((clkdiv >> SDHCI_DIVIDER_MASK_LEN) & SDHCI_DIVIDER_HI_MASK) << SDHCI_DIVIDER_HI_SHIFT; return (val32 & 0xffff); } /* * Standard 32-bit handling of command and transfer mode. */ if (off == SDHCI_TRANSFER_MODE) { return (sc->cmd_and_mode >> 16); } else if (off == SDHCI_COMMAND_FLAGS) { return (sc->cmd_and_mode & 0x0000ffff); } return ((RD4(sc, off & ~3) >> (off & 3) * 8) & 0xffff); } static uint32_t ti_sdhci_read_4(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct ti_sdhci_softc *sc = device_get_softc(dev); uint32_t val32; val32 = RD4(sc, off); /* * If we need to disallow highspeed mode due to the OMAP4 erratum, strip * that flag from the returned capabilities. */ if (off == SDHCI_CAPABILITIES && sc->disable_highspeed) val32 &= ~SDHCI_CAN_DO_HISPD; /* * Force the card-present state if necessary. */ if (off == SDHCI_PRESENT_STATE && sc->force_card_present) val32 |= SDHCI_CARD_PRESENT; return (val32); } static void ti_sdhci_read_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct ti_sdhci_softc *sc = device_get_softc(dev); bus_read_multi_4(sc->mem_res, off + sc->sdhci_reg_off, data, count); } static void ti_sdhci_write_1(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint8_t val) { struct ti_sdhci_softc *sc = device_get_softc(dev); uint32_t val32; val32 = RD4(sc, off & ~3); val32 &= ~(0xff << (off & 3) * 8); val32 |= (val << (off & 3) * 8); WR4(sc, off & ~3, val32); } static void ti_sdhci_write_2(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint16_t val) { struct ti_sdhci_softc *sc = device_get_softc(dev); uint32_t clkdiv, val32; /* * Translate between the hardware and SDHCI 2.0 or 3.0 representations * of the clock divisor. See the comments in ti_sdhci_read_2() for * details. */ if (off == SDHCI_CLOCK_CONTROL) { clkdiv = (val >> SDHCI_DIVIDER_SHIFT) & SDHCI_DIVIDER_MASK; if (slot->version >= SDHCI_SPEC_300) clkdiv |= ((val >> SDHCI_DIVIDER_HI_SHIFT) & SDHCI_DIVIDER_HI_MASK) << SDHCI_DIVIDER_MASK_LEN; clkdiv *= 2; if (clkdiv > MMCHS_SYSCTL_CLKD_MASK) clkdiv = MMCHS_SYSCTL_CLKD_MASK; val32 = RD4(sc, SDHCI_CLOCK_CONTROL); val32 &= 0xffff0000; val32 |= val & ~(MMCHS_SYSCTL_CLKD_MASK << MMCHS_SYSCTL_CLKD_SHIFT); val32 |= clkdiv << MMCHS_SYSCTL_CLKD_SHIFT; WR4(sc, SDHCI_CLOCK_CONTROL, val32); return; } /* * Standard 32-bit handling of command and transfer mode. */ if (off == SDHCI_TRANSFER_MODE) { sc->cmd_and_mode = (sc->cmd_and_mode & 0xffff0000) | ((uint32_t)val & 0x0000ffff); return; } else if (off == SDHCI_COMMAND_FLAGS) { sc->cmd_and_mode = (sc->cmd_and_mode & 0x0000ffff) | ((uint32_t)val << 16); WR4(sc, SDHCI_TRANSFER_MODE, sc->cmd_and_mode); return; } val32 = RD4(sc, off & ~3); val32 &= ~(0xffff << (off & 3) * 8); val32 |= ((val & 0xffff) << (off & 3) * 8); WR4(sc, off & ~3, val32); } static void ti_sdhci_write_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t val) { struct ti_sdhci_softc *sc = device_get_softc(dev); WR4(sc, off, val); } static void ti_sdhci_write_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct ti_sdhci_softc *sc = device_get_softc(dev); bus_write_multi_4(sc->mem_res, off + sc->sdhci_reg_off, data, count); } static void ti_sdhci_intr(void *arg) { struct ti_sdhci_softc *sc = arg; sdhci_generic_intr(&sc->slot); } static int ti_sdhci_update_ios(device_t brdev, device_t reqdev) { struct ti_sdhci_softc *sc = device_get_softc(brdev); struct sdhci_slot *slot; struct mmc_ios *ios; uint32_t val32, newval32; slot = device_get_ivars(reqdev); ios = &slot->host.ios; /* * There is an 8-bit-bus bit in the MMCHS control register which, when * set, overrides the 1 vs 4 bit setting in the standard SDHCI * registers. Set that bit first according to whether an 8-bit bus is * requested, then let the standard driver handle everything else. */ val32 = ti_mmchs_read_4(sc, MMCHS_CON); newval32 = val32; if (ios->bus_width == bus_width_8) newval32 |= MMCHS_CON_DW8; else newval32 &= ~MMCHS_CON_DW8; if (ios->bus_mode == opendrain) newval32 |= MMCHS_CON_OD; else /* if (ios->bus_mode == pushpull) */ newval32 &= ~MMCHS_CON_OD; if (newval32 != val32) ti_mmchs_write_4(sc, MMCHS_CON, newval32); return (sdhci_generic_update_ios(brdev, reqdev)); } static int ti_sdhci_get_ro(device_t brdev, device_t reqdev) { struct ti_sdhci_softc *sc = device_get_softc(brdev); unsigned int readonly = 0; /* If a gpio pin is configured, read it. */ if (sc->gpio_dev != NULL) { GPIO_PIN_GET(sc->gpio_dev, sc->wp_gpio_pin, &readonly); } return (readonly); } static int ti_sdhci_detach(device_t dev) { return (EBUSY); } static void ti_sdhci_hw_init(device_t dev) { struct ti_sdhci_softc *sc = device_get_softc(dev); uint32_t regval; unsigned long timeout; /* Enable the controller and interface/functional clocks */ if (ti_prcm_clk_enable(sc->mmchs_clk_id) != 0) { device_printf(dev, "Error: failed to enable MMC clock\n"); return; } /* Get the frequency of the source clock */ if (ti_prcm_clk_get_source_freq(sc->mmchs_clk_id, &sc->baseclk_hz) != 0) { device_printf(dev, "Error: failed to get source clock freq\n"); return; } /* Issue a softreset to the controller */ ti_mmchs_write_4(sc, MMCHS_SYSCONFIG, MMCHS_SYSCONFIG_RESET); timeout = 1000; while (!(ti_mmchs_read_4(sc, MMCHS_SYSSTATUS) & MMCHS_SYSSTATUS_RESETDONE)) { if (--timeout == 0) { device_printf(dev, "Error: Controller reset operation timed out\n"); break; } DELAY(100); } /* * Reset the command and data state machines and also other aspects of * the controller such as bus clock and power. * * If we read the software reset register too fast after writing it we * can get back a zero that means the reset hasn't started yet rather * than that the reset is complete. Per TI recommendations, work around * it by reading until we see the reset bit asserted, then read until * it's clear. We also set the SDHCI_QUIRK_WAITFOR_RESET_ASSERTED quirk * so that the main sdhci driver uses this same logic in its resets. */ ti_sdhci_write_1(dev, NULL, SDHCI_SOFTWARE_RESET, SDHCI_RESET_ALL); timeout = 10000; while ((ti_sdhci_read_1(dev, NULL, SDHCI_SOFTWARE_RESET) & SDHCI_RESET_ALL) != SDHCI_RESET_ALL) { if (--timeout == 0) { break; } DELAY(1); } timeout = 10000; while ((ti_sdhci_read_1(dev, NULL, SDHCI_SOFTWARE_RESET) & SDHCI_RESET_ALL)) { if (--timeout == 0) { device_printf(dev, "Error: Software reset operation timed out\n"); break; } DELAY(100); } /* * The attach() routine has examined fdt data and set flags in * slot.host.caps to reflect what voltages we can handle. Set those * values in the CAPA register. The manual says that these values can * only be set once, "before initialization" whatever that means, and * that they survive a reset. So maybe doing this will be a no-op if * u-boot has already initialized the hardware. */ regval = ti_mmchs_read_4(sc, MMCHS_SD_CAPA); if (sc->slot.host.caps & MMC_OCR_LOW_VOLTAGE) regval |= MMCHS_SD_CAPA_VS18; if (sc->slot.host.caps & (MMC_OCR_290_300 | MMC_OCR_300_310)) regval |= MMCHS_SD_CAPA_VS30; ti_mmchs_write_4(sc, MMCHS_SD_CAPA, regval); /* Set initial host configuration (1-bit, std speed, pwr off). */ ti_sdhci_write_1(dev, NULL, SDHCI_HOST_CONTROL, 0); ti_sdhci_write_1(dev, NULL, SDHCI_POWER_CONTROL, 0); /* Set the initial controller configuration. */ ti_mmchs_write_4(sc, MMCHS_CON, MMCHS_CON_DVAL_8_4MS); } static int ti_sdhci_attach(device_t dev) { struct ti_sdhci_softc *sc = device_get_softc(dev); int rid, err; pcell_t prop; phandle_t node; sc->dev = dev; /* * Get the MMCHS device id from FDT. If it's not there use the newbus * unit number (which will work as long as the devices are in order and * none are skipped in the fdt). Note that this is a property we made * up and added in freebsd, it doesn't exist in the published bindings. */ node = ofw_bus_get_node(dev); sc->mmchs_clk_id = ti_hwmods_get_clock(dev); if (sc->mmchs_clk_id == INVALID_CLK_IDENT) { device_printf(dev, "failed to get clock based on hwmods property\n"); } /* * The hardware can inherently do dual-voltage (1p8v, 3p0v) on the first * device, and only 1p8v on other devices unless an external transceiver * is used. The only way we could know about a transceiver is fdt data. * Note that we have to do this before calling ti_sdhci_hw_init() so * that it can set the right values in the CAPA register, which can only * be done once and never reset. */ sc->slot.host.caps |= MMC_OCR_LOW_VOLTAGE; if (sc->mmchs_clk_id == MMC1_CLK || OF_hasprop(node, "ti,dual-volt")) { sc->slot.host.caps |= MMC_OCR_290_300 | MMC_OCR_300_310; } /* * See if we've got a GPIO-based write detect pin. This is not the * standard documented property for this, we added it in freebsd. */ if ((OF_getprop(node, "mmchs-wp-gpio-pin", &prop, sizeof(prop))) <= 0) sc->wp_gpio_pin = 0xffffffff; else sc->wp_gpio_pin = fdt32_to_cpu(prop); if (sc->wp_gpio_pin != 0xffffffff) { sc->gpio_dev = devclass_get_device(devclass_find("gpio"), 0); if (sc->gpio_dev == NULL) device_printf(dev, "Error: No GPIO device, " "Write Protect pin will not function\n"); else GPIO_PIN_SETFLAGS(sc->gpio_dev, sc->wp_gpio_pin, GPIO_PIN_INPUT); } /* * Set the offset from the device's memory start to the MMCHS registers. * Also for OMAP4 disable high speed mode due to erratum ID i626. */ switch (ti_chip()) { #ifdef SOC_OMAP4 case CHIP_OMAP_4: sc->mmchs_reg_off = OMAP4_MMCHS_REG_OFFSET; sc->disable_highspeed = true; break; #endif #ifdef SOC_TI_AM335X case CHIP_AM335X: sc->mmchs_reg_off = AM335X_MMCHS_REG_OFFSET; break; #endif default: panic("Unknown OMAP device\n"); } /* * The standard SDHCI registers are at a fixed offset (the same on all * SoCs) beyond the MMCHS registers. */ sc->sdhci_reg_off = sc->mmchs_reg_off + SDHCI_REG_OFFSET; /* Resource setup. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->mem_res) { device_printf(dev, "cannot allocate memory window\n"); err = ENXIO; goto fail; } rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->irq_res) { device_printf(dev, "cannot allocate interrupt\n"); err = ENXIO; goto fail; } if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_BIO | INTR_MPSAFE, NULL, ti_sdhci_intr, sc, &sc->intr_cookie)) { device_printf(dev, "cannot setup interrupt handler\n"); err = ENXIO; goto fail; } /* Initialise the MMCHS hardware. */ ti_sdhci_hw_init(dev); /* * The capabilities register can only express base clock frequencies in * the range of 0-63MHz for a v2.0 controller. Since our clock runs * faster than that, the hardware sets the frequency to zero in the * register. When the register contains zero, the sdhci driver expects * slot.max_clk to already have the right value in it. */ sc->slot.max_clk = sc->baseclk_hz; /* * The MMCHS timeout counter is based on the output sdclock. Tell the * sdhci driver to recalculate the timeout clock whenever the output * sdclock frequency changes. */ sc->slot.quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; /* * The MMCHS hardware shifts the 136-bit response data (in violation of * the spec), so tell the sdhci driver not to do the same in software. */ sc->slot.quirks |= SDHCI_QUIRK_DONT_SHIFT_RESPONSE; /* * Reset bits are broken, have to wait to see the bits asserted * before waiting to see them de-asserted. */ sc->slot.quirks |= SDHCI_QUIRK_WAITFOR_RESET_ASSERTED; /* * DMA is not really broken, I just haven't implemented it yet. */ sc->slot.quirks |= SDHCI_QUIRK_BROKEN_DMA; /* * Set up the hardware and go. Note that this sets many of the * slot.host.* fields, so we have to do this before overriding any of * those values based on fdt data, below. */ sdhci_init_slot(dev, &sc->slot, 0); /* * The SDHCI controller doesn't realize it, but we can support 8-bit * even though we're not a v3.0 controller. If there's an fdt bus-width * property, honor it. */ if (OF_getencprop(node, "bus-width", &prop, sizeof(prop)) > 0) { sc->slot.host.caps &= ~(MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); switch (prop) { case 8: sc->slot.host.caps |= MMC_CAP_8_BIT_DATA; /* FALLTHROUGH */ case 4: sc->slot.host.caps |= MMC_CAP_4_BIT_DATA; break; case 1: break; default: device_printf(dev, "Bad bus-width value %u\n", prop); break; } } /* * If the slot is flagged with the non-removable property, set our flag * to always force the SDHCI_CARD_PRESENT bit on. */ node = ofw_bus_get_node(dev); if (OF_hasprop(node, "non-removable")) sc->force_card_present = true; bus_generic_probe(dev); bus_generic_attach(dev); sdhci_start_slot(&sc->slot); return (0); fail: if (sc->intr_cookie) bus_teardown_intr(dev, sc->irq_res, sc->intr_cookie); if (sc->irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (err); } static int ti_sdhci_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data != 0) { device_set_desc(dev, "TI MMCHS (SDHCI 2.0)"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static device_method_t ti_sdhci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ti_sdhci_probe), DEVMETHOD(device_attach, ti_sdhci_attach), DEVMETHOD(device_detach, ti_sdhci_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, sdhci_generic_read_ivar), DEVMETHOD(bus_write_ivar, sdhci_generic_write_ivar), DEVMETHOD(bus_print_child, bus_generic_print_child), /* MMC bridge interface */ DEVMETHOD(mmcbr_update_ios, ti_sdhci_update_ios), DEVMETHOD(mmcbr_request, sdhci_generic_request), DEVMETHOD(mmcbr_get_ro, ti_sdhci_get_ro), DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host), DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host), /* SDHCI registers accessors */ DEVMETHOD(sdhci_read_1, ti_sdhci_read_1), DEVMETHOD(sdhci_read_2, ti_sdhci_read_2), DEVMETHOD(sdhci_read_4, ti_sdhci_read_4), DEVMETHOD(sdhci_read_multi_4, ti_sdhci_read_multi_4), DEVMETHOD(sdhci_write_1, ti_sdhci_write_1), DEVMETHOD(sdhci_write_2, ti_sdhci_write_2), DEVMETHOD(sdhci_write_4, ti_sdhci_write_4), DEVMETHOD(sdhci_write_multi_4, ti_sdhci_write_multi_4), DEVMETHOD_END }; static devclass_t ti_sdhci_devclass; static driver_t ti_sdhci_driver = { "sdhci_ti", ti_sdhci_methods, sizeof(struct ti_sdhci_softc), }; DRIVER_MODULE(sdhci_ti, simplebus, ti_sdhci_driver, ti_sdhci_devclass, 0, 0); MODULE_DEPEND(sdhci_ti, sdhci, 1, 1, 1); +DRIVER_MODULE(mmc, sdhci_ti, mmc_driver, mmc_devclass, NULL, NULL); Index: head/sys/dev/mmc/bridge.h =================================================================== --- head/sys/dev/mmc/bridge.h (revision 292179) +++ head/sys/dev/mmc/bridge.h (revision 292180) @@ -1,138 +1,143 @@ /*- * Copyright (c) 2006 M. Warner Losh. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Portions of this software may have been developed with reference to * the SD Simplified Specification. The following disclaimer may apply: * * The following conditions apply to the release of the simplified * specification ("Simplified Specification") by the SD Card Association and * the SD Group. The Simplified Specification is a subset of the complete SD * Specification which is owned by the SD Card Association and the SD * Group. This Simplified Specification is provided on a non-confidential * basis subject to the disclaimers below. Any implementation of the * Simplified Specification may require a license from the SD Card * Association, SD Group, SD-3C LLC or other third parties. * * Disclaimers: * * The information contained in the Simplified Specification is presented only * as a standard specification for SD Cards and SD Host/Ancillary products and * is provided "AS-IS" without any representations or warranties of any * kind. No responsibility is assumed by the SD Group, SD-3C LLC or the SD * Card Association for any damages, any infringements of patents or other * right of the SD Group, SD-3C LLC, the SD Card Association or any third * parties, which may result from its use. No license is granted by * implication, estoppel or otherwise under any patent or other rights of the * SD Group, SD-3C LLC, the SD Card Association or any third party. Nothing * herein shall be construed as an obligation by the SD Group, the SD-3C LLC * or the SD Card Association to disclose or distribute any technical * information, know-how or other confidential information to any third party. * * $FreeBSD$ */ #ifndef DEV_MMC_BRIDGE_H #define DEV_MMC_BRIDGE_H +#include + /* * This file defines interfaces for the mmc bridge. The names chosen * are similar to or the same as the names used in Linux to allow for * easy porting of what Linux calls mmc host drivers. I use the * FreeBSD terminology of bridge and bus for consistancy with other * drivers in the system. This file corresponds roughly to the Linux * linux/mmc/host.h file. * * A mmc bridge is a chipset that can have one or more mmc and/or sd * cards attached to it. mmc cards are attached on a bus topology, * while sd and sdio cards are attached using a star topology (meaning * in practice each sd card has its own, independent slot). Each * mmcbr is assumed to be derived from the mmcbr. This is done to * allow for easier addition of bridges (as each bridge does not need * to be added to the mmcbus file). * * Attached to the mmc bridge is an mmcbus. The mmcbus is described * in dev/mmc/bus.h. */ /* * mmc_ios is a structure that is used to store the state of the mmc/sd * bus configuration. This include the bus' clock speed, its voltage, * the bus mode for command output, the SPI chip select, some power * states and the bus width. */ enum mmc_vdd { vdd_150 = 0, vdd_155, vdd_160, vdd_165, vdd_170, vdd_180, vdd_190, vdd_200, vdd_210, vdd_220, vdd_230, vdd_240, vdd_250, vdd_260, vdd_270, vdd_280, vdd_290, vdd_300, vdd_310, vdd_320, vdd_330, vdd_340, vdd_350, vdd_360 }; enum mmc_power_mode { power_off = 0, power_up, power_on }; enum mmc_bus_mode { opendrain = 1, pushpull }; enum mmc_chip_select { cs_dontcare = 0, cs_high, cs_low }; enum mmc_bus_width { bus_width_1 = 0, bus_width_4 = 2, bus_width_8 = 3 }; enum mmc_bus_timing { bus_timing_normal = 0, bus_timing_hs }; struct mmc_ios { uint32_t clock; /* Speed of the clock in Hz to move data */ enum mmc_vdd vdd; /* Voltage to apply to the power pins/ */ enum mmc_bus_mode bus_mode; enum mmc_chip_select chip_select; enum mmc_bus_width bus_width; enum mmc_power_mode power_mode; enum mmc_bus_timing timing; }; enum mmc_card_mode { mode_mmc, mode_sd }; struct mmc_host { int f_min; int f_max; uint32_t host_ocr; uint32_t ocr; uint32_t caps; #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can do 4-bit data transfers */ #define MMC_CAP_8_BIT_DATA (1 << 1) /* Can do 8-bit data transfers */ #define MMC_CAP_HSPEED (1 << 2) /* Can do High Speed transfers */ enum mmc_card_mode mode; struct mmc_ios ios; /* Current state of the host */ }; + +extern driver_t mmc_driver; +extern devclass_t mmc_devclass; #endif /* DEV_MMC_BRIDGE_H */ Index: head/sys/dev/mmc/host/dwmmc.c =================================================================== --- head/sys/dev/mmc/host/dwmmc.c (revision 292179) +++ head/sys/dev/mmc/host/dwmmc.c (revision 292180) @@ -1,1180 +1,1180 @@ /*- * Copyright (c) 2014 Ruslan Bukin * All rights reserved. * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) * ("CTSRD"), as part of the DARPA CRASH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Synopsys DesignWare Mobile Storage Host Controller * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22) */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mmcbr_if.h" #define dprintf(x, arg...) #define READ4(_sc, _reg) \ bus_read_4((_sc)->res[0], _reg) #define WRITE4(_sc, _reg, _val) \ bus_write_4((_sc)->res[0], _reg, _val) #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) #define DWMMC_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define DWMMC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define DWMMC_LOCK_INIT(_sc) \ mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \ "dwmmc", MTX_DEF) #define DWMMC_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); #define DWMMC_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); #define DWMMC_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); #define PENDING_CMD 0x01 #define PENDING_STOP 0x02 #define CARD_INIT_DONE 0x04 #define DWMMC_DATA_ERR_FLAGS (SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \ |SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE \ |SDMMC_INTMASK_EBE) #define DWMMC_CMD_ERR_FLAGS (SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \ |SDMMC_INTMASK_RE) #define DWMMC_ERR_FLAGS (DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \ |SDMMC_INTMASK_HLE) #define DES0_DIC (1 << 1) #define DES0_LD (1 << 2) #define DES0_FS (1 << 3) #define DES0_CH (1 << 4) #define DES0_ER (1 << 5) #define DES0_CES (1 << 30) #define DES0_OWN (1 << 31) #define DES1_BS1_MASK 0xfff #define DES1_BS1_SHIFT 0 struct idmac_desc { uint32_t des0; /* control */ uint32_t des1; /* bufsize */ uint32_t des2; /* buf1 phys addr */ uint32_t des3; /* buf2 phys addr or next descr */ }; #define DESC_MAX 256 #define DESC_SIZE (sizeof(struct idmac_desc) * DESC_MAX) #define DEF_MSIZE 0x2 /* Burst size of multiple transaction */ static void dwmmc_next_operation(struct dwmmc_softc *); static int dwmmc_setup_bus(struct dwmmc_softc *, int); static int dma_done(struct dwmmc_softc *, struct mmc_command *); static int dma_stop(struct dwmmc_softc *); static void pio_read(struct dwmmc_softc *, struct mmc_command *); static void pio_write(struct dwmmc_softc *, struct mmc_command *); static struct resource_spec dwmmc_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; #define HWTYPE_MASK (0x0000ffff) #define HWFLAG_MASK (0xffff << 16) static struct ofw_compat_data compat_data[] = { {"altr,socfpga-dw-mshc", HWTYPE_ALTERA}, {"samsung,exynos5420-dw-mshc", HWTYPE_EXYNOS}, {"rockchip,rk2928-dw-mshc", HWTYPE_ROCKCHIP}, {NULL, HWTYPE_NONE}, }; static void dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (error != 0) return; *(bus_addr_t *)arg = segs[0].ds_addr; } static void dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct dwmmc_softc *sc; int idx; if (error != 0) return; sc = arg; dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len); for (idx = 0; idx < nsegs; idx++) { sc->desc_ring[idx].des0 = (DES0_OWN | DES0_DIC | DES0_CH); sc->desc_ring[idx].des1 = segs[idx].ds_len; sc->desc_ring[idx].des2 = segs[idx].ds_addr; if (idx == 0) sc->desc_ring[idx].des0 |= DES0_FS; if (idx == (nsegs - 1)) { sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH); sc->desc_ring[idx].des0 |= DES0_LD; } } } static int dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits) { int reg; int i; reg = READ4(sc, SDMMC_CTRL); reg |= (reset_bits); WRITE4(sc, SDMMC_CTRL, reg); /* Wait reset done */ for (i = 0; i < 100; i++) { if (!(READ4(sc, SDMMC_CTRL) & reset_bits)) return (0); DELAY(10); }; device_printf(sc->dev, "Reset failed\n"); return (1); } static int dma_setup(struct dwmmc_softc *sc) { int error; int nidx; int idx; /* * Set up TX descriptor ring, descriptors, and dma maps. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* Parent tag. */ 4096, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ DESC_SIZE, 1, /* maxsize, nsegments */ DESC_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->desc_tag); if (error != 0) { device_printf(sc->dev, "could not create ring DMA tag.\n"); return (1); } error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring, BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->desc_map); if (error != 0) { device_printf(sc->dev, "could not allocate descriptor ring.\n"); return (1); } error = bus_dmamap_load(sc->desc_tag, sc->desc_map, sc->desc_ring, DESC_SIZE, dwmmc_get1paddr, &sc->desc_ring_paddr, 0); if (error != 0) { device_printf(sc->dev, "could not load descriptor ring map.\n"); return (1); } for (idx = 0; idx < sc->desc_count; idx++) { sc->desc_ring[idx].des0 = DES0_CH; sc->desc_ring[idx].des1 = 0; nidx = (idx + 1) % sc->desc_count; sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \ (nidx * sizeof(struct idmac_desc)); } error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* Parent tag. */ 4096, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sc->desc_count * MMC_SECTOR_SIZE, /* maxsize */ sc->desc_count, /* nsegments */ MMC_SECTOR_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->buf_tag); if (error != 0) { device_printf(sc->dev, "could not create ring DMA tag.\n"); return (1); } error = bus_dmamap_create(sc->buf_tag, 0, &sc->buf_map); if (error != 0) { device_printf(sc->dev, "could not create TX buffer DMA map.\n"); return (1); } return (0); } static void dwmmc_cmd_done(struct dwmmc_softc *sc) { struct mmc_command *cmd; cmd = sc->curcmd; if (cmd == NULL) return; if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) { cmd->resp[3] = READ4(sc, SDMMC_RESP0); cmd->resp[2] = READ4(sc, SDMMC_RESP1); cmd->resp[1] = READ4(sc, SDMMC_RESP2); cmd->resp[0] = READ4(sc, SDMMC_RESP3); } else { cmd->resp[3] = 0; cmd->resp[2] = 0; cmd->resp[1] = 0; cmd->resp[0] = READ4(sc, SDMMC_RESP0); } } } static void dwmmc_tasklet(struct dwmmc_softc *sc) { struct mmc_command *cmd; cmd = sc->curcmd; if (cmd == NULL) return; if (!sc->cmd_done) return; if (cmd->error != MMC_ERR_NONE || !cmd->data) { dwmmc_next_operation(sc); } else if (cmd->data && sc->dto_rcvd) { if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || cmd->opcode == MMC_READ_MULTIPLE_BLOCK) && sc->use_auto_stop) { if (sc->acd_rcvd) dwmmc_next_operation(sc); } else { dwmmc_next_operation(sc); } } } static void dwmmc_intr(void *arg) { struct mmc_command *cmd; struct dwmmc_softc *sc; uint32_t reg; sc = arg; DWMMC_LOCK(sc); cmd = sc->curcmd; /* First handle SDMMC controller interrupts */ reg = READ4(sc, SDMMC_MINTSTS); if (reg) { dprintf("%s 0x%08x\n", __func__, reg); if (reg & DWMMC_CMD_ERR_FLAGS) { WRITE4(sc, SDMMC_RINTSTS, DWMMC_CMD_ERR_FLAGS); dprintf("cmd err 0x%08x cmd 0x%08x\n", reg, cmd->opcode); cmd->error = MMC_ERR_TIMEOUT; } if (reg & DWMMC_DATA_ERR_FLAGS) { WRITE4(sc, SDMMC_RINTSTS, DWMMC_DATA_ERR_FLAGS); dprintf("data err 0x%08x cmd 0x%08x\n", reg, cmd->opcode); cmd->error = MMC_ERR_FAILED; if (!sc->use_pio) { dma_done(sc, cmd); dma_stop(sc); } } if (reg & SDMMC_INTMASK_CMD_DONE) { dwmmc_cmd_done(sc); sc->cmd_done = 1; WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CMD_DONE); } if (reg & SDMMC_INTMASK_ACD) { sc->acd_rcvd = 1; WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_ACD); } if (reg & SDMMC_INTMASK_DTO) { sc->dto_rcvd = 1; WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_DTO); } if (reg & SDMMC_INTMASK_CD) { /* XXX: Handle card detect */ WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CD); } } if (sc->use_pio) { if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) { pio_read(sc, cmd); } if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) { pio_write(sc, cmd); } } else { /* Now handle DMA interrupts */ reg = READ4(sc, SDMMC_IDSTS); if (reg) { dprintf("dma intr 0x%08x\n", reg); if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) { WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)); WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI); dma_done(sc, cmd); } } } dwmmc_tasklet(sc); DWMMC_UNLOCK(sc); } static int parse_fdt(struct dwmmc_softc *sc) { pcell_t dts_value[3]; phandle_t node; int len; if ((node = ofw_bus_get_node(sc->dev)) == -1) return (ENXIO); /* fifo-depth */ if ((len = OF_getproplen(node, "fifo-depth")) > 0) { OF_getencprop(node, "fifo-depth", dts_value, len); sc->fifo_depth = dts_value[0]; } /* num-slots */ sc->num_slots = 1; if ((len = OF_getproplen(node, "num-slots")) > 0) { OF_getencprop(node, "num-slots", dts_value, len); sc->num_slots = dts_value[0]; } /* * We need some platform-specific code to know * what the clock is supplied for our device. * For now rely on the value specified in FDT. */ if (sc->bus_hz == 0) { if ((len = OF_getproplen(node, "bus-frequency")) <= 0) return (ENXIO); OF_getencprop(node, "bus-frequency", dts_value, len); sc->bus_hz = dts_value[0]; } /* * Platform-specific stuff * XXX: Move to separate file */ if ((sc->hwtype & HWTYPE_MASK) != HWTYPE_EXYNOS) return (0); if ((len = OF_getproplen(node, "samsung,dw-mshc-ciu-div")) <= 0) return (ENXIO); OF_getencprop(node, "samsung,dw-mshc-ciu-div", dts_value, len); sc->sdr_timing = (dts_value[0] << SDMMC_CLKSEL_DIVIDER_SHIFT); sc->ddr_timing = (dts_value[0] << SDMMC_CLKSEL_DIVIDER_SHIFT); if ((len = OF_getproplen(node, "samsung,dw-mshc-sdr-timing")) <= 0) return (ENXIO); OF_getencprop(node, "samsung,dw-mshc-sdr-timing", dts_value, len); sc->sdr_timing |= ((dts_value[0] << SDMMC_CLKSEL_SAMPLE_SHIFT) | (dts_value[1] << SDMMC_CLKSEL_DRIVE_SHIFT)); if ((len = OF_getproplen(node, "samsung,dw-mshc-ddr-timing")) <= 0) return (ENXIO); OF_getencprop(node, "samsung,dw-mshc-ddr-timing", dts_value, len); sc->ddr_timing |= ((dts_value[0] << SDMMC_CLKSEL_SAMPLE_SHIFT) | (dts_value[1] << SDMMC_CLKSEL_DRIVE_SHIFT)); return (0); } static int dwmmc_probe(device_t dev) { uintptr_t hwtype; if (!ofw_bus_status_okay(dev)) return (ENXIO); hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data; if (hwtype == HWTYPE_NONE) return (ENXIO); device_set_desc(dev, "Synopsys DesignWare Mobile " "Storage Host Controller"); return (BUS_PROBE_DEFAULT); } int dwmmc_attach(device_t dev) { struct dwmmc_softc *sc; int error; int slot; sc = device_get_softc(dev); sc->dev = dev; if (sc->hwtype == HWTYPE_NONE) { sc->hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data; } /* Why not to use Auto Stop? It save a hundred of irq per second */ sc->use_auto_stop = 1; error = parse_fdt(sc); if (error != 0) { device_printf(dev, "Can't get FDT property.\n"); return (ENXIO); } DWMMC_LOCK_INIT(sc); if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } /* Setup interrupt handler. */ error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE, NULL, dwmmc_intr, sc, &sc->intr_cookie); if (error != 0) { device_printf(dev, "could not setup interrupt handler.\n"); return (ENXIO); } device_printf(dev, "Hardware version ID is %04x\n", READ4(sc, SDMMC_VERID) & 0xffff); if (sc->desc_count == 0) sc->desc_count = DESC_MAX; if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP) { sc->use_pio = 1; sc->pwren_inverted = 1; } else if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) { WRITE4(sc, EMMCP_MPSBEGIN0, 0); WRITE4(sc, EMMCP_SEND0, 0); WRITE4(sc, EMMCP_CTRL0, (MPSCTRL_SECURE_READ_BIT | MPSCTRL_SECURE_WRITE_BIT | MPSCTRL_NON_SECURE_READ_BIT | MPSCTRL_NON_SECURE_WRITE_BIT | MPSCTRL_VALID)); } /* XXX: we support operation for slot index 0 only */ slot = 0; if (sc->pwren_inverted) { WRITE4(sc, SDMMC_PWREN, (0 << slot)); } else { WRITE4(sc, SDMMC_PWREN, (1 << slot)); } /* Reset all */ if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | SDMMC_CTRL_DMA_RESET))) return (ENXIO); dwmmc_setup_bus(sc, sc->host.f_min); if (sc->fifo_depth == 0) { sc->fifo_depth = 1 + ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff); device_printf(dev, "No fifo-depth, using FIFOTH %x\n", sc->fifo_depth); } if (!sc->use_pio) { if (dma_setup(sc)) return (ENXIO); /* Install desc base */ WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr); /* Enable DMA interrupts */ WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK); WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI | SDMMC_IDINTEN_RI | SDMMC_IDINTEN_TI)); } /* Clear and disable interrups for a while */ WRITE4(sc, SDMMC_RINTSTS, 0xffffffff); WRITE4(sc, SDMMC_INTMASK, 0); /* Maximum timeout */ WRITE4(sc, SDMMC_TMOUT, 0xffffffff); /* Enable interrupts */ WRITE4(sc, SDMMC_RINTSTS, 0xffffffff); WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE | SDMMC_INTMASK_DTO | SDMMC_INTMASK_ACD | SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR | DWMMC_ERR_FLAGS | SDMMC_INTMASK_CD)); WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE); sc->host.f_min = 400000; sc->host.f_max = min(200000000, sc->bus_hz); sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; sc->host.caps = MMC_CAP_4_BIT_DATA; device_add_child(dev, "mmc", -1); return (bus_generic_attach(dev)); } static int dwmmc_setup_bus(struct dwmmc_softc *sc, int freq) { int tout; int div; if (freq == 0) { WRITE4(sc, SDMMC_CLKENA, 0); WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA | SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START)); tout = 1000; do { if (tout-- < 0) { device_printf(sc->dev, "Failed update clk\n"); return (1); } } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); return (0); } WRITE4(sc, SDMMC_CLKENA, 0); WRITE4(sc, SDMMC_CLKSRC, 0); div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0; WRITE4(sc, SDMMC_CLKDIV, div); WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA | SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START)); tout = 1000; do { if (tout-- < 0) { device_printf(sc->dev, "Failed to update clk"); return (1); } } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP)); WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA | SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START); tout = 1000; do { if (tout-- < 0) { device_printf(sc->dev, "Failed to enable clk\n"); return (1); } } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); return (0); } static int dwmmc_update_ios(device_t brdev, device_t reqdev) { struct dwmmc_softc *sc; struct mmc_ios *ios; sc = device_get_softc(brdev); ios = &sc->host.ios; dprintf("Setting up clk %u bus_width %d\n", ios->clock, ios->bus_width); dwmmc_setup_bus(sc, ios->clock); if (ios->bus_width == bus_width_8) WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT); else if (ios->bus_width == bus_width_4) WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT); else WRITE4(sc, SDMMC_CTYPE, 0); if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) { /* XXX: take care about DDR or SDR use here */ WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing); } /* * XXX: take care about DDR bit * * reg = READ4(sc, SDMMC_UHS_REG); * reg |= (SDMMC_UHS_REG_DDR); * WRITE4(sc, SDMMC_UHS_REG, reg); */ return (0); } static int dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; data = cmd->data; if (data->flags & MMC_DATA_WRITE) bus_dmamap_sync(sc->buf_tag, sc->buf_map, BUS_DMASYNC_POSTWRITE); else bus_dmamap_sync(sc->buf_tag, sc->buf_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->buf_tag, sc->buf_map); return (0); } static int dma_stop(struct dwmmc_softc *sc) { int reg; reg = READ4(sc, SDMMC_CTRL); reg &= ~(SDMMC_CTRL_USE_IDMAC); reg |= (SDMMC_CTRL_DMA_RESET); WRITE4(sc, SDMMC_CTRL, reg); reg = READ4(sc, SDMMC_BMOD); reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB); reg |= (SDMMC_BMOD_SWR); WRITE4(sc, SDMMC_BMOD, reg); return (0); } static int dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; int len; int err; int reg; data = cmd->data; len = data->len; reg = READ4(sc, SDMMC_INTMASK); reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR); WRITE4(sc, SDMMC_INTMASK, reg); err = bus_dmamap_load(sc->buf_tag, sc->buf_map, data->data, data->len, dwmmc_ring_setup, sc, BUS_DMA_NOWAIT); if (err != 0) panic("dmamap_load failed\n"); if (data->flags & MMC_DATA_WRITE) bus_dmamap_sync(sc->buf_tag, sc->buf_map, BUS_DMASYNC_PREWRITE); else bus_dmamap_sync(sc->buf_tag, sc->buf_map, BUS_DMASYNC_PREREAD); reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S); reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S; reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S; WRITE4(sc, SDMMC_FIFOTH, reg); wmb(); reg = READ4(sc, SDMMC_CTRL); reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE); WRITE4(sc, SDMMC_CTRL, reg); wmb(); reg = READ4(sc, SDMMC_BMOD); reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB); WRITE4(sc, SDMMC_BMOD, reg); /* Start */ WRITE4(sc, SDMMC_PLDMND, 1); return (0); } static int pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; int reg; data = cmd->data; data->xfer_len = 0; reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S); reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S; reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S; WRITE4(sc, SDMMC_FIFOTH, reg); wmb(); return (0); } static void pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; uint32_t *p, status; if (cmd == NULL || cmd->data == NULL) return; data = cmd->data; if ((data->flags & MMC_DATA_READ) == 0) return; KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned")); p = (uint32_t *)data->data + (data->xfer_len >> 2); while (data->xfer_len < data->len) { status = READ4(sc, SDMMC_STATUS); if (status & SDMMC_STATUS_FIFO_EMPTY) break; *p++ = READ4(sc, SDMMC_DATA); data->xfer_len += 4; } WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR); } static void pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; uint32_t *p, status; if (cmd == NULL || cmd->data == NULL) return; data = cmd->data; if ((data->flags & MMC_DATA_WRITE) == 0) return; KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned")); p = (uint32_t *)data->data + (data->xfer_len >> 2); while (data->xfer_len < data->len) { status = READ4(sc, SDMMC_STATUS); if (status & SDMMC_STATUS_FIFO_FULL) break; WRITE4(sc, SDMMC_DATA, *p++); data->xfer_len += 4; } WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR); } static void dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; uint32_t blksz; uint32_t cmdr; sc->curcmd = cmd; data = cmd->data; if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP) dwmmc_setup_bus(sc, sc->host.ios.clock); /* XXX Upper layers don't always set this */ cmd->mrq = sc->req; /* Begin setting up command register. */ cmdr = cmd->opcode; dprintf("cmd->opcode 0x%08x\n", cmd->opcode); if (cmd->opcode == MMC_STOP_TRANSMISSION || cmd->opcode == MMC_GO_IDLE_STATE || cmd->opcode == MMC_GO_INACTIVE_STATE) cmdr |= SDMMC_CMD_STOP_ABORT; else if (cmd->opcode != MMC_SEND_STATUS && data) cmdr |= SDMMC_CMD_WAIT_PRVDATA; /* Set up response handling. */ if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) { cmdr |= SDMMC_CMD_RESP_EXP; if (cmd->flags & MMC_RSP_136) cmdr |= SDMMC_CMD_RESP_LONG; } if (cmd->flags & MMC_RSP_CRC) cmdr |= SDMMC_CMD_RESP_CRC; /* * XXX: Not all platforms want this. */ cmdr |= SDMMC_CMD_USE_HOLD_REG; if ((sc->flags & CARD_INIT_DONE) == 0) { sc->flags |= (CARD_INIT_DONE); cmdr |= SDMMC_CMD_SEND_INIT; } if (data) { if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || cmd->opcode == MMC_READ_MULTIPLE_BLOCK) && sc->use_auto_stop) cmdr |= SDMMC_CMD_SEND_ASTOP; cmdr |= SDMMC_CMD_DATA_EXP; if (data->flags & MMC_DATA_STREAM) cmdr |= SDMMC_CMD_MODE_STREAM; if (data->flags & MMC_DATA_WRITE) cmdr |= SDMMC_CMD_DATA_WRITE; WRITE4(sc, SDMMC_TMOUT, 0xffffffff); WRITE4(sc, SDMMC_BYTCNT, data->len); blksz = (data->len < MMC_SECTOR_SIZE) ? \ data->len : MMC_SECTOR_SIZE; WRITE4(sc, SDMMC_BLKSIZ, blksz); if (sc->use_pio) { pio_prepare(sc, cmd); } else { dma_prepare(sc, cmd); } wmb(); } dprintf("cmdr 0x%08x\n", cmdr); WRITE4(sc, SDMMC_CMDARG, cmd->arg); wmb(); WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START); }; static void dwmmc_next_operation(struct dwmmc_softc *sc) { struct mmc_request *req; req = sc->req; if (req == NULL) return; sc->acd_rcvd = 0; sc->dto_rcvd = 0; sc->cmd_done = 0; /* * XXX: Wait until card is still busy. * We do need this to prevent data timeouts, * mostly caused by multi-block write command * followed by single-read. */ while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY)) continue; if (sc->flags & PENDING_CMD) { sc->flags &= ~PENDING_CMD; dwmmc_start_cmd(sc, req->cmd); return; } else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) { sc->flags &= ~PENDING_STOP; dwmmc_start_cmd(sc, req->stop); return; } sc->req = NULL; sc->curcmd = NULL; req->done(req); } static int dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req) { struct dwmmc_softc *sc; sc = device_get_softc(brdev); dprintf("%s\n", __func__); DWMMC_LOCK(sc); if (sc->req != NULL) { DWMMC_UNLOCK(sc); return (EBUSY); } sc->req = req; sc->flags |= PENDING_CMD; if (sc->req->stop) sc->flags |= PENDING_STOP; dwmmc_next_operation(sc); DWMMC_UNLOCK(sc); return (0); } static int dwmmc_get_ro(device_t brdev, device_t reqdev) { dprintf("%s\n", __func__); return (0); } static int dwmmc_acquire_host(device_t brdev, device_t reqdev) { struct dwmmc_softc *sc; sc = device_get_softc(brdev); DWMMC_LOCK(sc); while (sc->bus_busy) msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5); sc->bus_busy++; DWMMC_UNLOCK(sc); return (0); } static int dwmmc_release_host(device_t brdev, device_t reqdev) { struct dwmmc_softc *sc; sc = device_get_softc(brdev); DWMMC_LOCK(sc); sc->bus_busy--; wakeup(sc); DWMMC_UNLOCK(sc); return (0); } static int dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct dwmmc_softc *sc; sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: *(int *)result = sc->host.ios.bus_mode; break; case MMCBR_IVAR_BUS_WIDTH: *(int *)result = sc->host.ios.bus_width; break; case MMCBR_IVAR_CHIP_SELECT: *(int *)result = sc->host.ios.chip_select; break; case MMCBR_IVAR_CLOCK: *(int *)result = sc->host.ios.clock; break; case MMCBR_IVAR_F_MIN: *(int *)result = sc->host.f_min; break; case MMCBR_IVAR_F_MAX: *(int *)result = sc->host.f_max; break; case MMCBR_IVAR_HOST_OCR: *(int *)result = sc->host.host_ocr; break; case MMCBR_IVAR_MODE: *(int *)result = sc->host.mode; break; case MMCBR_IVAR_OCR: *(int *)result = sc->host.ocr; break; case MMCBR_IVAR_POWER_MODE: *(int *)result = sc->host.ios.power_mode; break; case MMCBR_IVAR_VDD: *(int *)result = sc->host.ios.vdd; break; case MMCBR_IVAR_CAPS: sc->host.caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; *(int *)result = sc->host.caps; break; case MMCBR_IVAR_MAX_DATA: *(int *)result = sc->desc_count; } return (0); } static int dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct dwmmc_softc *sc; sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: sc->host.ios.bus_mode = value; break; case MMCBR_IVAR_BUS_WIDTH: sc->host.ios.bus_width = value; break; case MMCBR_IVAR_CHIP_SELECT: sc->host.ios.chip_select = value; break; case MMCBR_IVAR_CLOCK: sc->host.ios.clock = value; break; case MMCBR_IVAR_MODE: sc->host.mode = value; break; case MMCBR_IVAR_OCR: sc->host.ocr = value; break; case MMCBR_IVAR_POWER_MODE: sc->host.ios.power_mode = value; break; case MMCBR_IVAR_VDD: sc->host.ios.vdd = value; break; /* These are read-only */ case MMCBR_IVAR_CAPS: case MMCBR_IVAR_HOST_OCR: case MMCBR_IVAR_F_MIN: case MMCBR_IVAR_F_MAX: case MMCBR_IVAR_MAX_DATA: return (EINVAL); } return (0); } static device_method_t dwmmc_methods[] = { DEVMETHOD(device_probe, dwmmc_probe), DEVMETHOD(device_attach, dwmmc_attach), /* Bus interface */ DEVMETHOD(bus_read_ivar, dwmmc_read_ivar), DEVMETHOD(bus_write_ivar, dwmmc_write_ivar), /* mmcbr_if */ DEVMETHOD(mmcbr_update_ios, dwmmc_update_ios), DEVMETHOD(mmcbr_request, dwmmc_request), DEVMETHOD(mmcbr_get_ro, dwmmc_get_ro), DEVMETHOD(mmcbr_acquire_host, dwmmc_acquire_host), DEVMETHOD(mmcbr_release_host, dwmmc_release_host), DEVMETHOD_END }; driver_t dwmmc_driver = { "dwmmc", dwmmc_methods, sizeof(struct dwmmc_softc), }; static devclass_t dwmmc_devclass; DRIVER_MODULE(dwmmc, simplebus, dwmmc_driver, dwmmc_devclass, 0, 0); DRIVER_MODULE(dwmmc, ofwbus, dwmmc_driver, dwmmc_devclass, 0, 0); - +DRIVER_MODULE(mmc, dwmmc, mmc_driver, mmc_devclass, NULL, NULL); Index: head/sys/dev/mmc/mmc.c =================================================================== --- head/sys/dev/mmc/mmc.c (revision 292179) +++ head/sys/dev/mmc/mmc.c (revision 292180) @@ -1,1826 +1,1813 @@ /*- * Copyright (c) 2006 Bernd Walter. All rights reserved. * Copyright (c) 2006 M. Warner Losh. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Portions of this software may have been developed with reference to * the SD Simplified Specification. The following disclaimer may apply: * * The following conditions apply to the release of the simplified * specification ("Simplified Specification") by the SD Card Association and * the SD Group. The Simplified Specification is a subset of the complete SD * Specification which is owned by the SD Card Association and the SD * Group. This Simplified Specification is provided on a non-confidential * basis subject to the disclaimers below. Any implementation of the * Simplified Specification may require a license from the SD Card * Association, SD Group, SD-3C LLC or other third parties. * * Disclaimers: * * The information contained in the Simplified Specification is presented only * as a standard specification for SD Cards and SD Host/Ancillary products and * is provided "AS-IS" without any representations or warranties of any * kind. No responsibility is assumed by the SD Group, SD-3C LLC or the SD * Card Association for any damages, any infringements of patents or other * right of the SD Group, SD-3C LLC, the SD Card Association or any third * parties, which may result from its use. No license is granted by * implication, estoppel or otherwise under any patent or other rights of the * SD Group, SD-3C LLC, the SD Card Association or any third party. Nothing * herein shall be construed as an obligation by the SD Group, the SD-3C LLC * or the SD Card Association to disclose or distribute any technical * information, know-how or other confidential information to any third party. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mmcbr_if.h" #include "mmcbus_if.h" struct mmc_softc { device_t dev; struct mtx sc_mtx; struct intr_config_hook config_intrhook; device_t owner; uint32_t last_rca; int squelched; /* suppress reporting of (expected) errors */ int log_count; struct timeval log_time; }; #define LOG_PPS 5 /* Log no more than 5 errors per second. */ /* * Per-card data */ struct mmc_ivars { uint32_t raw_cid[4]; /* Raw bits of the CID */ uint32_t raw_csd[4]; /* Raw bits of the CSD */ uint32_t raw_scr[2]; /* Raw bits of the SCR */ uint8_t raw_ext_csd[512]; /* Raw bits of the EXT_CSD */ uint32_t raw_sd_status[16]; /* Raw bits of the SD_STATUS */ uint16_t rca; enum mmc_card_mode mode; struct mmc_cid cid; /* cid decoded */ struct mmc_csd csd; /* csd decoded */ struct mmc_scr scr; /* scr decoded */ struct mmc_sd_status sd_status; /* SD_STATUS decoded */ u_char read_only; /* True when the device is read-only */ u_char bus_width; /* Bus width to use */ u_char timing; /* Bus timing support */ u_char high_cap; /* High Capacity card (block addressed) */ uint32_t sec_count; /* Card capacity in 512byte blocks */ uint32_t tran_speed; /* Max speed in normal mode */ uint32_t hs_tran_speed; /* Max speed in high speed mode */ uint32_t erase_sector; /* Card native erase sector size */ char card_id_string[64];/* Formatted CID info (serial, MFG, etc) */ char card_sn_string[16];/* Formatted serial # for disk->d_ident */ }; #define CMD_RETRIES 3 #define CARD_ID_FREQUENCY 400000 /* Spec requires 400kHz max during ID phase. */ static SYSCTL_NODE(_hw, OID_AUTO, mmc, CTLFLAG_RD, NULL, "mmc driver"); static int mmc_debug; SYSCTL_INT(_hw_mmc, OID_AUTO, debug, CTLFLAG_RW, &mmc_debug, 0, "Debug level"); /* bus entry points */ static int mmc_acquire_bus(device_t busdev, device_t dev); static int mmc_attach(device_t dev); static int mmc_child_location_str(device_t dev, device_t child, char *buf, size_t buflen); static int mmc_detach(device_t dev); static int mmc_probe(device_t dev); static int mmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result); static int mmc_release_bus(device_t busdev, device_t dev); static int mmc_resume(device_t dev); static int mmc_suspend(device_t dev); static int mmc_wait_for_request(device_t brdev, device_t reqdev, struct mmc_request *req); static int mmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value); #define MMC_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define MMC_LOCK_INIT(_sc) \ mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \ "mmc", MTX_DEF) #define MMC_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); #define MMC_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); #define MMC_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); static int mmc_all_send_cid(struct mmc_softc *sc, uint32_t *rawcid); static void mmc_app_decode_scr(uint32_t *raw_scr, struct mmc_scr *scr); static void mmc_app_decode_sd_status(uint32_t *raw_sd_status, struct mmc_sd_status *sd_status); static int mmc_app_sd_status(struct mmc_softc *sc, uint16_t rca, uint32_t *rawsdstatus); static int mmc_app_send_scr(struct mmc_softc *sc, uint16_t rca, uint32_t *rawscr); static int mmc_calculate_clock(struct mmc_softc *sc); static void mmc_decode_cid_mmc(uint32_t *raw_cid, struct mmc_cid *cid); static void mmc_decode_cid_sd(uint32_t *raw_cid, struct mmc_cid *cid); static void mmc_decode_csd_mmc(uint32_t *raw_csd, struct mmc_csd *csd); static void mmc_decode_csd_sd(uint32_t *raw_csd, struct mmc_csd *csd); static void mmc_delayed_attach(void *xsc); static int mmc_delete_cards(struct mmc_softc *sc); static void mmc_discover_cards(struct mmc_softc *sc); static void mmc_format_card_id_string(struct mmc_ivars *ivar); static void mmc_go_discovery(struct mmc_softc *sc); static uint32_t mmc_get_bits(uint32_t *bits, int bit_len, int start, int size); static int mmc_highest_voltage(uint32_t ocr); static void mmc_idle_cards(struct mmc_softc *sc); static void mmc_ms_delay(int ms); static void mmc_log_card(device_t dev, struct mmc_ivars *ivar, int newcard); static void mmc_power_down(struct mmc_softc *sc); static void mmc_power_up(struct mmc_softc *sc); static void mmc_rescan_cards(struct mmc_softc *sc); static void mmc_scan(struct mmc_softc *sc); static int mmc_sd_switch(struct mmc_softc *sc, uint8_t mode, uint8_t grp, uint8_t value, uint8_t *res); static int mmc_select_card(struct mmc_softc *sc, uint16_t rca); static uint32_t mmc_select_vdd(struct mmc_softc *sc, uint32_t ocr); static int mmc_send_app_op_cond(struct mmc_softc *sc, uint32_t ocr, uint32_t *rocr); static int mmc_send_csd(struct mmc_softc *sc, uint16_t rca, uint32_t *rawcsd); static int mmc_send_ext_csd(struct mmc_softc *sc, uint8_t *rawextcsd); static int mmc_send_if_cond(struct mmc_softc *sc, uint8_t vhs); static int mmc_send_op_cond(struct mmc_softc *sc, uint32_t ocr, uint32_t *rocr); static int mmc_send_relative_addr(struct mmc_softc *sc, uint32_t *resp); static int mmc_send_status(struct mmc_softc *sc, uint16_t rca, uint32_t *status); static int mmc_set_blocklen(struct mmc_softc *sc, uint32_t len); static int mmc_set_card_bus_width(struct mmc_softc *sc, uint16_t rca, int width); static int mmc_set_relative_addr(struct mmc_softc *sc, uint16_t resp); static int mmc_set_timing(struct mmc_softc *sc, int timing); static int mmc_switch(struct mmc_softc *sc, uint8_t set, uint8_t index, uint8_t value); static int mmc_test_bus_width(struct mmc_softc *sc); static int mmc_wait_for_app_cmd(struct mmc_softc *sc, uint32_t rca, struct mmc_command *cmd, int retries); static int mmc_wait_for_cmd(struct mmc_softc *sc, struct mmc_command *cmd, int retries); static int mmc_wait_for_command(struct mmc_softc *sc, uint32_t opcode, uint32_t arg, uint32_t flags, uint32_t *resp, int retries); static int mmc_wait_for_req(struct mmc_softc *sc, struct mmc_request *req); static void mmc_wakeup(struct mmc_request *req); static void mmc_ms_delay(int ms) { DELAY(1000 * ms); /* XXX BAD */ } static int mmc_probe(device_t dev) { device_set_desc(dev, "MMC/SD bus"); return (0); } static int mmc_attach(device_t dev) { struct mmc_softc *sc; sc = device_get_softc(dev); sc->dev = dev; MMC_LOCK_INIT(sc); /* We'll probe and attach our children later, but before / mount */ sc->config_intrhook.ich_func = mmc_delayed_attach; sc->config_intrhook.ich_arg = sc; if (config_intrhook_establish(&sc->config_intrhook) != 0) device_printf(dev, "config_intrhook_establish failed\n"); return (0); } static int mmc_detach(device_t dev) { struct mmc_softc *sc = device_get_softc(dev); int err; if ((err = mmc_delete_cards(sc)) != 0) return (err); mmc_power_down(sc); MMC_LOCK_DESTROY(sc); return (0); } static int mmc_suspend(device_t dev) { struct mmc_softc *sc = device_get_softc(dev); int err; err = bus_generic_suspend(dev); if (err) return (err); mmc_power_down(sc); return (0); } static int mmc_resume(device_t dev) { struct mmc_softc *sc = device_get_softc(dev); mmc_scan(sc); return (bus_generic_resume(dev)); } static int mmc_acquire_bus(device_t busdev, device_t dev) { struct mmc_softc *sc; struct mmc_ivars *ivar; int err; int rca; err = MMCBR_ACQUIRE_HOST(device_get_parent(busdev), busdev); if (err) return (err); sc = device_get_softc(busdev); MMC_LOCK(sc); if (sc->owner) panic("mmc: host bridge didn't serialize us."); sc->owner = dev; MMC_UNLOCK(sc); if (busdev != dev) { /* * Keep track of the last rca that we've selected. If * we're asked to do it again, don't. We never * unselect unless the bus code itself wants the mmc * bus, and constantly reselecting causes problems. */ rca = mmc_get_rca(dev); if (sc->last_rca != rca) { mmc_select_card(sc, rca); sc->last_rca = rca; /* Prepare bus width for the new card. */ ivar = device_get_ivars(dev); if (bootverbose || mmc_debug) { device_printf(busdev, "setting bus width to %d bits\n", (ivar->bus_width == bus_width_4) ? 4 : (ivar->bus_width == bus_width_8) ? 8 : 1); } mmc_set_card_bus_width(sc, rca, ivar->bus_width); mmcbr_set_bus_width(busdev, ivar->bus_width); mmcbr_update_ios(busdev); } } else { /* * If there's a card selected, stand down. */ if (sc->last_rca != 0) { mmc_select_card(sc, 0); sc->last_rca = 0; } } return (0); } static int mmc_release_bus(device_t busdev, device_t dev) { struct mmc_softc *sc; int err; sc = device_get_softc(busdev); MMC_LOCK(sc); if (!sc->owner) panic("mmc: releasing unowned bus."); if (sc->owner != dev) panic("mmc: you don't own the bus. game over."); MMC_UNLOCK(sc); err = MMCBR_RELEASE_HOST(device_get_parent(busdev), busdev); if (err) return (err); MMC_LOCK(sc); sc->owner = NULL; MMC_UNLOCK(sc); return (0); } static uint32_t mmc_select_vdd(struct mmc_softc *sc, uint32_t ocr) { return (ocr & MMC_OCR_VOLTAGE); } static int mmc_highest_voltage(uint32_t ocr) { int i; for (i = MMC_OCR_MAX_VOLTAGE_SHIFT; i >= MMC_OCR_MIN_VOLTAGE_SHIFT; i--) if (ocr & (1 << i)) return (i); return (-1); } static void mmc_wakeup(struct mmc_request *req) { struct mmc_softc *sc; sc = (struct mmc_softc *)req->done_data; MMC_LOCK(sc); req->flags |= MMC_REQ_DONE; MMC_UNLOCK(sc); wakeup(req); } static int mmc_wait_for_req(struct mmc_softc *sc, struct mmc_request *req) { req->done = mmc_wakeup; req->done_data = sc; if (mmc_debug > 1) { device_printf(sc->dev, "REQUEST: CMD%d arg %#x flags %#x", req->cmd->opcode, req->cmd->arg, req->cmd->flags); if (req->cmd->data) { printf(" data %d\n", (int)req->cmd->data->len); } else printf("\n"); } MMCBR_REQUEST(device_get_parent(sc->dev), sc->dev, req); MMC_LOCK(sc); while ((req->flags & MMC_REQ_DONE) == 0) msleep(req, &sc->sc_mtx, 0, "mmcreq", 0); MMC_UNLOCK(sc); if (mmc_debug > 2 || (mmc_debug > 0 && req->cmd->error != MMC_ERR_NONE)) device_printf(sc->dev, "CMD%d RESULT: %d\n", req->cmd->opcode, req->cmd->error); return (0); } static int mmc_wait_for_request(device_t brdev, device_t reqdev, struct mmc_request *req) { struct mmc_softc *sc = device_get_softc(brdev); return (mmc_wait_for_req(sc, req)); } static int mmc_wait_for_cmd(struct mmc_softc *sc, struct mmc_command *cmd, int retries) { struct mmc_request mreq; int err; do { memset(&mreq, 0, sizeof(mreq)); memset(cmd->resp, 0, sizeof(cmd->resp)); cmd->retries = 0; /* Retries done here, not in hardware. */ cmd->mrq = &mreq; mreq.cmd = cmd; if (mmc_wait_for_req(sc, &mreq) != 0) err = MMC_ERR_FAILED; else err = cmd->error; } while (err != MMC_ERR_NONE && retries-- > 0); if (err != MMC_ERR_NONE && sc->squelched == 0) { if (ppsratecheck(&sc->log_time, &sc->log_count, LOG_PPS)) { device_printf(sc->dev, "CMD%d failed, RESULT: %d\n", cmd->opcode, err); } } return (err); } static int mmc_wait_for_app_cmd(struct mmc_softc *sc, uint32_t rca, struct mmc_command *cmd, int retries) { struct mmc_command appcmd; int err; /* Squelch error reporting at lower levels, we report below. */ sc->squelched++; do { memset(&appcmd, 0, sizeof(appcmd)); appcmd.opcode = MMC_APP_CMD; appcmd.arg = rca << 16; appcmd.flags = MMC_RSP_R1 | MMC_CMD_AC; appcmd.data = NULL; if (mmc_wait_for_cmd(sc, &appcmd, 0) != 0) err = MMC_ERR_FAILED; else err = appcmd.error; if (err == MMC_ERR_NONE) { if (!(appcmd.resp[0] & R1_APP_CMD)) err = MMC_ERR_FAILED; else if (mmc_wait_for_cmd(sc, cmd, 0) != 0) err = MMC_ERR_FAILED; else err = cmd->error; } } while (err != MMC_ERR_NONE && retries-- > 0); sc->squelched--; if (err != MMC_ERR_NONE && sc->squelched == 0) { if (ppsratecheck(&sc->log_time, &sc->log_count, LOG_PPS)) { device_printf(sc->dev, "ACMD%d failed, RESULT: %d\n", cmd->opcode, err); } } return (err); } static int mmc_wait_for_command(struct mmc_softc *sc, uint32_t opcode, uint32_t arg, uint32_t flags, uint32_t *resp, int retries) { struct mmc_command cmd; int err; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = opcode; cmd.arg = arg; cmd.flags = flags; cmd.data = NULL; err = mmc_wait_for_cmd(sc, &cmd, retries); if (err) return (err); if (resp) { if (flags & MMC_RSP_136) memcpy(resp, cmd.resp, 4 * sizeof(uint32_t)); else *resp = cmd.resp[0]; } return (0); } static void mmc_idle_cards(struct mmc_softc *sc) { device_t dev; struct mmc_command cmd; dev = sc->dev; mmcbr_set_chip_select(dev, cs_high); mmcbr_update_ios(dev); mmc_ms_delay(1); memset(&cmd, 0, sizeof(cmd)); cmd.opcode = MMC_GO_IDLE_STATE; cmd.arg = 0; cmd.flags = MMC_RSP_NONE | MMC_CMD_BC; cmd.data = NULL; mmc_wait_for_cmd(sc, &cmd, CMD_RETRIES); mmc_ms_delay(1); mmcbr_set_chip_select(dev, cs_dontcare); mmcbr_update_ios(dev); mmc_ms_delay(1); } static int mmc_send_app_op_cond(struct mmc_softc *sc, uint32_t ocr, uint32_t *rocr) { struct mmc_command cmd; int err = MMC_ERR_NONE, i; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = ACMD_SD_SEND_OP_COND; cmd.arg = ocr; cmd.flags = MMC_RSP_R3 | MMC_CMD_BCR; cmd.data = NULL; for (i = 0; i < 1000; i++) { err = mmc_wait_for_app_cmd(sc, 0, &cmd, CMD_RETRIES); if (err != MMC_ERR_NONE) break; if ((cmd.resp[0] & MMC_OCR_CARD_BUSY) || (ocr & MMC_OCR_VOLTAGE) == 0) break; err = MMC_ERR_TIMEOUT; mmc_ms_delay(10); } if (rocr && err == MMC_ERR_NONE) *rocr = cmd.resp[0]; return (err); } static int mmc_send_op_cond(struct mmc_softc *sc, uint32_t ocr, uint32_t *rocr) { struct mmc_command cmd; int err = MMC_ERR_NONE, i; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = MMC_SEND_OP_COND; cmd.arg = ocr; cmd.flags = MMC_RSP_R3 | MMC_CMD_BCR; cmd.data = NULL; for (i = 0; i < 1000; i++) { err = mmc_wait_for_cmd(sc, &cmd, CMD_RETRIES); if (err != MMC_ERR_NONE) break; if ((cmd.resp[0] & MMC_OCR_CARD_BUSY) || (ocr & MMC_OCR_VOLTAGE) == 0) break; err = MMC_ERR_TIMEOUT; mmc_ms_delay(10); } if (rocr && err == MMC_ERR_NONE) *rocr = cmd.resp[0]; return (err); } static int mmc_send_if_cond(struct mmc_softc *sc, uint8_t vhs) { struct mmc_command cmd; int err; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = SD_SEND_IF_COND; cmd.arg = (vhs << 8) + 0xAA; cmd.flags = MMC_RSP_R7 | MMC_CMD_BCR; cmd.data = NULL; err = mmc_wait_for_cmd(sc, &cmd, CMD_RETRIES); return (err); } static void mmc_power_up(struct mmc_softc *sc) { device_t dev; dev = sc->dev; mmcbr_set_vdd(dev, mmc_highest_voltage(mmcbr_get_host_ocr(dev))); mmcbr_set_bus_mode(dev, opendrain); mmcbr_set_chip_select(dev, cs_dontcare); mmcbr_set_bus_width(dev, bus_width_1); mmcbr_set_power_mode(dev, power_up); mmcbr_set_clock(dev, 0); mmcbr_update_ios(dev); mmc_ms_delay(1); mmcbr_set_clock(dev, CARD_ID_FREQUENCY); mmcbr_set_timing(dev, bus_timing_normal); mmcbr_set_power_mode(dev, power_on); mmcbr_update_ios(dev); mmc_ms_delay(2); } static void mmc_power_down(struct mmc_softc *sc) { device_t dev = sc->dev; mmcbr_set_bus_mode(dev, opendrain); mmcbr_set_chip_select(dev, cs_dontcare); mmcbr_set_bus_width(dev, bus_width_1); mmcbr_set_power_mode(dev, power_off); mmcbr_set_clock(dev, 0); mmcbr_set_timing(dev, bus_timing_normal); mmcbr_update_ios(dev); } static int mmc_select_card(struct mmc_softc *sc, uint16_t rca) { int flags; flags = (rca ? MMC_RSP_R1B : MMC_RSP_NONE) | MMC_CMD_AC; return (mmc_wait_for_command(sc, MMC_SELECT_CARD, (uint32_t)rca << 16, flags, NULL, CMD_RETRIES)); } static int mmc_switch(struct mmc_softc *sc, uint8_t set, uint8_t index, uint8_t value) { struct mmc_command cmd; int err; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = MMC_SWITCH_FUNC; cmd.arg = (MMC_SWITCH_FUNC_WR << 24) | (index << 16) | (value << 8) | set; cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; cmd.data = NULL; err = mmc_wait_for_cmd(sc, &cmd, CMD_RETRIES); return (err); } static int mmc_sd_switch(struct mmc_softc *sc, uint8_t mode, uint8_t grp, uint8_t value, uint8_t *res) { int err; struct mmc_command cmd; struct mmc_data data; memset(&cmd, 0, sizeof(cmd)); memset(&data, 0, sizeof(data)); memset(res, 0, 64); cmd.opcode = SD_SWITCH_FUNC; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.arg = mode << 31; /* 0 - check, 1 - set */ cmd.arg |= 0x00FFFFFF; cmd.arg &= ~(0xF << (grp * 4)); cmd.arg |= value << (grp * 4); cmd.data = &data; data.data = res; data.len = 64; data.flags = MMC_DATA_READ; err = mmc_wait_for_cmd(sc, &cmd, CMD_RETRIES); return (err); } static int mmc_set_card_bus_width(struct mmc_softc *sc, uint16_t rca, int width) { struct mmc_command cmd; int err; uint8_t value; if (mmcbr_get_mode(sc->dev) == mode_sd) { memset(&cmd, 0, sizeof(cmd)); cmd.opcode = ACMD_SET_CLR_CARD_DETECT; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; cmd.arg = SD_CLR_CARD_DETECT; err = mmc_wait_for_app_cmd(sc, rca, &cmd, CMD_RETRIES); if (err != 0) return (err); memset(&cmd, 0, sizeof(cmd)); cmd.opcode = ACMD_SET_BUS_WIDTH; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; switch (width) { case bus_width_1: cmd.arg = SD_BUS_WIDTH_1; break; case bus_width_4: cmd.arg = SD_BUS_WIDTH_4; break; default: return (MMC_ERR_INVALID); } err = mmc_wait_for_app_cmd(sc, rca, &cmd, CMD_RETRIES); } else { switch (width) { case bus_width_1: value = EXT_CSD_BUS_WIDTH_1; break; case bus_width_4: value = EXT_CSD_BUS_WIDTH_4; break; case bus_width_8: value = EXT_CSD_BUS_WIDTH_8; break; default: return (MMC_ERR_INVALID); } err = mmc_switch(sc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, value); } return (err); } static int mmc_set_timing(struct mmc_softc *sc, int timing) { int err; uint8_t value; u_char switch_res[64]; switch (timing) { case bus_timing_normal: value = 0; break; case bus_timing_hs: value = 1; break; default: return (MMC_ERR_INVALID); } if (mmcbr_get_mode(sc->dev) == mode_sd) err = mmc_sd_switch(sc, SD_SWITCH_MODE_SET, SD_SWITCH_GROUP1, value, switch_res); else err = mmc_switch(sc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, value); return (err); } static int mmc_test_bus_width(struct mmc_softc *sc) { struct mmc_command cmd; struct mmc_data data; int err; uint8_t buf[8]; uint8_t p8[8] = { 0x55, 0xAA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; uint8_t p8ok[8] = { 0xAA, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; uint8_t p4[4] = { 0x5A, 0x00, 0x00, 0x00, }; uint8_t p4ok[4] = { 0xA5, 0x00, 0x00, 0x00, }; if (mmcbr_get_caps(sc->dev) & MMC_CAP_8_BIT_DATA) { mmcbr_set_bus_width(sc->dev, bus_width_8); mmcbr_update_ios(sc->dev); sc->squelched++; /* Errors are expected, squelch reporting. */ memset(&cmd, 0, sizeof(cmd)); memset(&data, 0, sizeof(data)); cmd.opcode = MMC_BUSTEST_W; cmd.arg = 0; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.data = &data; data.data = p8; data.len = 8; data.flags = MMC_DATA_WRITE; mmc_wait_for_cmd(sc, &cmd, 0); memset(&cmd, 0, sizeof(cmd)); memset(&data, 0, sizeof(data)); cmd.opcode = MMC_BUSTEST_R; cmd.arg = 0; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.data = &data; data.data = buf; data.len = 8; data.flags = MMC_DATA_READ; err = mmc_wait_for_cmd(sc, &cmd, 0); sc->squelched--; mmcbr_set_bus_width(sc->dev, bus_width_1); mmcbr_update_ios(sc->dev); if (err == MMC_ERR_NONE && memcmp(buf, p8ok, 8) == 0) return (bus_width_8); } if (mmcbr_get_caps(sc->dev) & MMC_CAP_4_BIT_DATA) { mmcbr_set_bus_width(sc->dev, bus_width_4); mmcbr_update_ios(sc->dev); sc->squelched++; /* Errors are expected, squelch reporting. */ memset(&cmd, 0, sizeof(cmd)); memset(&data, 0, sizeof(data)); cmd.opcode = MMC_BUSTEST_W; cmd.arg = 0; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.data = &data; data.data = p4; data.len = 4; data.flags = MMC_DATA_WRITE; mmc_wait_for_cmd(sc, &cmd, 0); memset(&cmd, 0, sizeof(cmd)); memset(&data, 0, sizeof(data)); cmd.opcode = MMC_BUSTEST_R; cmd.arg = 0; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.data = &data; data.data = buf; data.len = 4; data.flags = MMC_DATA_READ; err = mmc_wait_for_cmd(sc, &cmd, 0); sc->squelched--; mmcbr_set_bus_width(sc->dev, bus_width_1); mmcbr_update_ios(sc->dev); if (err == MMC_ERR_NONE && memcmp(buf, p4ok, 4) == 0) return (bus_width_4); } return (bus_width_1); } static uint32_t mmc_get_bits(uint32_t *bits, int bit_len, int start, int size) { const int i = (bit_len / 32) - (start / 32) - 1; const int shift = start & 31; uint32_t retval = bits[i] >> shift; if (size + shift > 32) retval |= bits[i - 1] << (32 - shift); return (retval & ((1llu << size) - 1)); } static void mmc_decode_cid_sd(uint32_t *raw_cid, struct mmc_cid *cid) { int i; /* There's no version info, so we take it on faith */ memset(cid, 0, sizeof(*cid)); cid->mid = mmc_get_bits(raw_cid, 128, 120, 8); cid->oid = mmc_get_bits(raw_cid, 128, 104, 16); for (i = 0; i < 5; i++) cid->pnm[i] = mmc_get_bits(raw_cid, 128, 96 - i * 8, 8); cid->pnm[5] = 0; cid->prv = mmc_get_bits(raw_cid, 128, 56, 8); cid->psn = mmc_get_bits(raw_cid, 128, 24, 32); cid->mdt_year = mmc_get_bits(raw_cid, 128, 12, 8) + 2000; cid->mdt_month = mmc_get_bits(raw_cid, 128, 8, 4); } static void mmc_decode_cid_mmc(uint32_t *raw_cid, struct mmc_cid *cid) { int i; /* There's no version info, so we take it on faith */ memset(cid, 0, sizeof(*cid)); cid->mid = mmc_get_bits(raw_cid, 128, 120, 8); cid->oid = mmc_get_bits(raw_cid, 128, 104, 8); for (i = 0; i < 6; i++) cid->pnm[i] = mmc_get_bits(raw_cid, 128, 96 - i * 8, 8); cid->pnm[6] = 0; cid->prv = mmc_get_bits(raw_cid, 128, 48, 8); cid->psn = mmc_get_bits(raw_cid, 128, 16, 32); cid->mdt_month = mmc_get_bits(raw_cid, 128, 12, 4); cid->mdt_year = mmc_get_bits(raw_cid, 128, 8, 4) + 1997; } static void mmc_format_card_id_string(struct mmc_ivars *ivar) { char oidstr[8]; uint8_t c1; uint8_t c2; /* * Format a card ID string for use by the mmcsd driver, it's what * appears between the <> in the following: * mmcsd0: 968MB at mmc0 * 22.5MHz/4bit/128-block * * Also format just the card serial number, which the mmcsd driver will * use as the disk->d_ident string. * * The card_id_string in mmc_ivars is currently allocated as 64 bytes, * and our max formatted length is currently 55 bytes if every field * contains the largest value. * * Sometimes the oid is two printable ascii chars; when it's not, * format it as 0xnnnn instead. */ c1 = (ivar->cid.oid >> 8) & 0x0ff; c2 = ivar->cid.oid & 0x0ff; if (c1 > 0x1f && c1 < 0x7f && c2 > 0x1f && c2 < 0x7f) snprintf(oidstr, sizeof(oidstr), "%c%c", c1, c2); else snprintf(oidstr, sizeof(oidstr), "0x%04x", ivar->cid.oid); snprintf(ivar->card_sn_string, sizeof(ivar->card_sn_string), "%08X", ivar->cid.psn); snprintf(ivar->card_id_string, sizeof(ivar->card_id_string), "%s%s %s %d.%d SN %08X MFG %02d/%04d by %d %s", ivar->mode == mode_sd ? "SD" : "MMC", ivar->high_cap ? "HC" : "", ivar->cid.pnm, ivar->cid.prv >> 4, ivar->cid.prv & 0x0f, ivar->cid.psn, ivar->cid.mdt_month, ivar->cid.mdt_year, ivar->cid.mid, oidstr); } static const int exp[8] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000 }; static const int mant[16] = { 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80 }; static const int cur_min[8] = { 500, 1000, 5000, 10000, 25000, 35000, 60000, 100000 }; static const int cur_max[8] = { 1000, 5000, 10000, 25000, 35000, 45000, 800000, 200000 }; static void mmc_decode_csd_sd(uint32_t *raw_csd, struct mmc_csd *csd) { int v; int m; int e; memset(csd, 0, sizeof(*csd)); csd->csd_structure = v = mmc_get_bits(raw_csd, 128, 126, 2); if (v == 0) { m = mmc_get_bits(raw_csd, 128, 115, 4); e = mmc_get_bits(raw_csd, 128, 112, 3); csd->tacc = (exp[e] * mant[m] + 9) / 10; csd->nsac = mmc_get_bits(raw_csd, 128, 104, 8) * 100; m = mmc_get_bits(raw_csd, 128, 99, 4); e = mmc_get_bits(raw_csd, 128, 96, 3); csd->tran_speed = exp[e] * 10000 * mant[m]; csd->ccc = mmc_get_bits(raw_csd, 128, 84, 12); csd->read_bl_len = 1 << mmc_get_bits(raw_csd, 128, 80, 4); csd->read_bl_partial = mmc_get_bits(raw_csd, 128, 79, 1); csd->write_blk_misalign = mmc_get_bits(raw_csd, 128, 78, 1); csd->read_blk_misalign = mmc_get_bits(raw_csd, 128, 77, 1); csd->dsr_imp = mmc_get_bits(raw_csd, 128, 76, 1); csd->vdd_r_curr_min = cur_min[mmc_get_bits(raw_csd, 128, 59, 3)]; csd->vdd_r_curr_max = cur_max[mmc_get_bits(raw_csd, 128, 56, 3)]; csd->vdd_w_curr_min = cur_min[mmc_get_bits(raw_csd, 128, 53, 3)]; csd->vdd_w_curr_max = cur_max[mmc_get_bits(raw_csd, 128, 50, 3)]; m = mmc_get_bits(raw_csd, 128, 62, 12); e = mmc_get_bits(raw_csd, 128, 47, 3); csd->capacity = ((1 + m) << (e + 2)) * csd->read_bl_len; csd->erase_blk_en = mmc_get_bits(raw_csd, 128, 46, 1); csd->erase_sector = mmc_get_bits(raw_csd, 128, 39, 7) + 1; csd->wp_grp_size = mmc_get_bits(raw_csd, 128, 32, 7); csd->wp_grp_enable = mmc_get_bits(raw_csd, 128, 31, 1); csd->r2w_factor = 1 << mmc_get_bits(raw_csd, 128, 26, 3); csd->write_bl_len = 1 << mmc_get_bits(raw_csd, 128, 22, 4); csd->write_bl_partial = mmc_get_bits(raw_csd, 128, 21, 1); } else if (v == 1) { m = mmc_get_bits(raw_csd, 128, 115, 4); e = mmc_get_bits(raw_csd, 128, 112, 3); csd->tacc = (exp[e] * mant[m] + 9) / 10; csd->nsac = mmc_get_bits(raw_csd, 128, 104, 8) * 100; m = mmc_get_bits(raw_csd, 128, 99, 4); e = mmc_get_bits(raw_csd, 128, 96, 3); csd->tran_speed = exp[e] * 10000 * mant[m]; csd->ccc = mmc_get_bits(raw_csd, 128, 84, 12); csd->read_bl_len = 1 << mmc_get_bits(raw_csd, 128, 80, 4); csd->read_bl_partial = mmc_get_bits(raw_csd, 128, 79, 1); csd->write_blk_misalign = mmc_get_bits(raw_csd, 128, 78, 1); csd->read_blk_misalign = mmc_get_bits(raw_csd, 128, 77, 1); csd->dsr_imp = mmc_get_bits(raw_csd, 128, 76, 1); csd->capacity = ((uint64_t)mmc_get_bits(raw_csd, 128, 48, 22) + 1) * 512 * 1024; csd->erase_blk_en = mmc_get_bits(raw_csd, 128, 46, 1); csd->erase_sector = mmc_get_bits(raw_csd, 128, 39, 7) + 1; csd->wp_grp_size = mmc_get_bits(raw_csd, 128, 32, 7); csd->wp_grp_enable = mmc_get_bits(raw_csd, 128, 31, 1); csd->r2w_factor = 1 << mmc_get_bits(raw_csd, 128, 26, 3); csd->write_bl_len = 1 << mmc_get_bits(raw_csd, 128, 22, 4); csd->write_bl_partial = mmc_get_bits(raw_csd, 128, 21, 1); } else panic("unknown SD CSD version"); } static void mmc_decode_csd_mmc(uint32_t *raw_csd, struct mmc_csd *csd) { int m; int e; memset(csd, 0, sizeof(*csd)); csd->csd_structure = mmc_get_bits(raw_csd, 128, 126, 2); csd->spec_vers = mmc_get_bits(raw_csd, 128, 122, 4); m = mmc_get_bits(raw_csd, 128, 115, 4); e = mmc_get_bits(raw_csd, 128, 112, 3); csd->tacc = exp[e] * mant[m] + 9 / 10; csd->nsac = mmc_get_bits(raw_csd, 128, 104, 8) * 100; m = mmc_get_bits(raw_csd, 128, 99, 4); e = mmc_get_bits(raw_csd, 128, 96, 3); csd->tran_speed = exp[e] * 10000 * mant[m]; csd->ccc = mmc_get_bits(raw_csd, 128, 84, 12); csd->read_bl_len = 1 << mmc_get_bits(raw_csd, 128, 80, 4); csd->read_bl_partial = mmc_get_bits(raw_csd, 128, 79, 1); csd->write_blk_misalign = mmc_get_bits(raw_csd, 128, 78, 1); csd->read_blk_misalign = mmc_get_bits(raw_csd, 128, 77, 1); csd->dsr_imp = mmc_get_bits(raw_csd, 128, 76, 1); csd->vdd_r_curr_min = cur_min[mmc_get_bits(raw_csd, 128, 59, 3)]; csd->vdd_r_curr_max = cur_max[mmc_get_bits(raw_csd, 128, 56, 3)]; csd->vdd_w_curr_min = cur_min[mmc_get_bits(raw_csd, 128, 53, 3)]; csd->vdd_w_curr_max = cur_max[mmc_get_bits(raw_csd, 128, 50, 3)]; m = mmc_get_bits(raw_csd, 128, 62, 12); e = mmc_get_bits(raw_csd, 128, 47, 3); csd->capacity = ((1 + m) << (e + 2)) * csd->read_bl_len; csd->erase_blk_en = 0; csd->erase_sector = (mmc_get_bits(raw_csd, 128, 42, 5) + 1) * (mmc_get_bits(raw_csd, 128, 37, 5) + 1); csd->wp_grp_size = mmc_get_bits(raw_csd, 128, 32, 5); csd->wp_grp_enable = mmc_get_bits(raw_csd, 128, 31, 1); csd->r2w_factor = 1 << mmc_get_bits(raw_csd, 128, 26, 3); csd->write_bl_len = 1 << mmc_get_bits(raw_csd, 128, 22, 4); csd->write_bl_partial = mmc_get_bits(raw_csd, 128, 21, 1); } static void mmc_app_decode_scr(uint32_t *raw_scr, struct mmc_scr *scr) { unsigned int scr_struct; memset(scr, 0, sizeof(*scr)); scr_struct = mmc_get_bits(raw_scr, 64, 60, 4); if (scr_struct != 0) { printf("Unrecognised SCR structure version %d\n", scr_struct); return; } scr->sda_vsn = mmc_get_bits(raw_scr, 64, 56, 4); scr->bus_widths = mmc_get_bits(raw_scr, 64, 48, 4); } static void mmc_app_decode_sd_status(uint32_t *raw_sd_status, struct mmc_sd_status *sd_status) { memset(sd_status, 0, sizeof(*sd_status)); sd_status->bus_width = mmc_get_bits(raw_sd_status, 512, 510, 2); sd_status->secured_mode = mmc_get_bits(raw_sd_status, 512, 509, 1); sd_status->card_type = mmc_get_bits(raw_sd_status, 512, 480, 16); sd_status->prot_area = mmc_get_bits(raw_sd_status, 512, 448, 12); sd_status->speed_class = mmc_get_bits(raw_sd_status, 512, 440, 8); sd_status->perf_move = mmc_get_bits(raw_sd_status, 512, 432, 8); sd_status->au_size = mmc_get_bits(raw_sd_status, 512, 428, 4); sd_status->erase_size = mmc_get_bits(raw_sd_status, 512, 408, 16); sd_status->erase_timeout = mmc_get_bits(raw_sd_status, 512, 402, 6); sd_status->erase_offset = mmc_get_bits(raw_sd_status, 512, 400, 2); } static int mmc_all_send_cid(struct mmc_softc *sc, uint32_t *rawcid) { struct mmc_command cmd; int err; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = MMC_ALL_SEND_CID; cmd.arg = 0; cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; cmd.data = NULL; err = mmc_wait_for_cmd(sc, &cmd, CMD_RETRIES); memcpy(rawcid, cmd.resp, 4 * sizeof(uint32_t)); return (err); } static int mmc_send_csd(struct mmc_softc *sc, uint16_t rca, uint32_t *rawcsd) { struct mmc_command cmd; int err; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = MMC_SEND_CSD; cmd.arg = rca << 16; cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; cmd.data = NULL; err = mmc_wait_for_cmd(sc, &cmd, CMD_RETRIES); memcpy(rawcsd, cmd.resp, 4 * sizeof(uint32_t)); return (err); } static int mmc_app_send_scr(struct mmc_softc *sc, uint16_t rca, uint32_t *rawscr) { int err; struct mmc_command cmd; struct mmc_data data; memset(&cmd, 0, sizeof(cmd)); memset(&data, 0, sizeof(data)); memset(rawscr, 0, 8); cmd.opcode = ACMD_SEND_SCR; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.arg = 0; cmd.data = &data; data.data = rawscr; data.len = 8; data.flags = MMC_DATA_READ; err = mmc_wait_for_app_cmd(sc, rca, &cmd, CMD_RETRIES); rawscr[0] = be32toh(rawscr[0]); rawscr[1] = be32toh(rawscr[1]); return (err); } static int mmc_send_ext_csd(struct mmc_softc *sc, uint8_t *rawextcsd) { int err; struct mmc_command cmd; struct mmc_data data; memset(&cmd, 0, sizeof(cmd)); memset(&data, 0, sizeof(data)); memset(rawextcsd, 0, 512); cmd.opcode = MMC_SEND_EXT_CSD; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.arg = 0; cmd.data = &data; data.data = rawextcsd; data.len = 512; data.flags = MMC_DATA_READ; err = mmc_wait_for_cmd(sc, &cmd, CMD_RETRIES); return (err); } static int mmc_app_sd_status(struct mmc_softc *sc, uint16_t rca, uint32_t *rawsdstatus) { int err, i; struct mmc_command cmd; struct mmc_data data; memset(&cmd, 0, sizeof(cmd)); memset(&data, 0, sizeof(data)); memset(rawsdstatus, 0, 64); cmd.opcode = ACMD_SD_STATUS; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.arg = 0; cmd.data = &data; data.data = rawsdstatus; data.len = 64; data.flags = MMC_DATA_READ; err = mmc_wait_for_app_cmd(sc, rca, &cmd, CMD_RETRIES); for (i = 0; i < 16; i++) rawsdstatus[i] = be32toh(rawsdstatus[i]); return (err); } static int mmc_set_relative_addr(struct mmc_softc *sc, uint16_t resp) { struct mmc_command cmd; int err; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = MMC_SET_RELATIVE_ADDR; cmd.arg = resp << 16; cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR; cmd.data = NULL; err = mmc_wait_for_cmd(sc, &cmd, CMD_RETRIES); return (err); } static int mmc_send_relative_addr(struct mmc_softc *sc, uint32_t *resp) { struct mmc_command cmd; int err; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = SD_SEND_RELATIVE_ADDR; cmd.arg = 0; cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR; cmd.data = NULL; err = mmc_wait_for_cmd(sc, &cmd, CMD_RETRIES); *resp = cmd.resp[0]; return (err); } static int mmc_send_status(struct mmc_softc *sc, uint16_t rca, uint32_t *status) { struct mmc_command cmd; int err; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = MMC_SEND_STATUS; cmd.arg = rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; cmd.data = NULL; err = mmc_wait_for_cmd(sc, &cmd, CMD_RETRIES); *status = cmd.resp[0]; return (err); } static int mmc_set_blocklen(struct mmc_softc *sc, uint32_t len) { struct mmc_command cmd; int err; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = MMC_SET_BLOCKLEN; cmd.arg = len; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; cmd.data = NULL; err = mmc_wait_for_cmd(sc, &cmd, CMD_RETRIES); return (err); } static void mmc_log_card(device_t dev, struct mmc_ivars *ivar, int newcard) { device_printf(dev, "Card at relative address 0x%04x%s:\n", ivar->rca, newcard ? " added" : ""); device_printf(dev, " card: %s\n", ivar->card_id_string); device_printf(dev, " bus: %ubit, %uMHz%s\n", (ivar->bus_width == bus_width_1 ? 1 : (ivar->bus_width == bus_width_4 ? 4 : 8)), (ivar->timing == bus_timing_hs ? ivar->hs_tran_speed : ivar->tran_speed) / 1000000, ivar->timing == bus_timing_hs ? ", high speed timing" : ""); device_printf(dev, " memory: %u blocks, erase sector %u blocks%s\n", ivar->sec_count, ivar->erase_sector, ivar->read_only ? ", read-only" : ""); } static void mmc_discover_cards(struct mmc_softc *sc) { struct mmc_ivars *ivar = NULL; device_t *devlist; int err, i, devcount, newcard; uint32_t raw_cid[4], resp, sec_count, status; device_t child; uint16_t rca = 2; u_char switch_res[64]; if (bootverbose || mmc_debug) device_printf(sc->dev, "Probing cards\n"); while (1) { sc->squelched++; /* Errors are expected, squelch reporting. */ err = mmc_all_send_cid(sc, raw_cid); sc->squelched--; if (err == MMC_ERR_TIMEOUT) break; if (err != MMC_ERR_NONE) { device_printf(sc->dev, "Error reading CID %d\n", err); break; } newcard = 1; if ((err = device_get_children(sc->dev, &devlist, &devcount)) != 0) return; for (i = 0; i < devcount; i++) { ivar = device_get_ivars(devlist[i]); if (memcmp(ivar->raw_cid, raw_cid, sizeof(raw_cid)) == 0) { newcard = 0; break; } } free(devlist, M_TEMP); if (bootverbose || mmc_debug) { device_printf(sc->dev, "%sard detected (CID %08x%08x%08x%08x)\n", newcard ? "New c" : "C", raw_cid[0], raw_cid[1], raw_cid[2], raw_cid[3]); } if (newcard) { ivar = malloc(sizeof(struct mmc_ivars), M_DEVBUF, M_WAITOK | M_ZERO); memcpy(ivar->raw_cid, raw_cid, sizeof(raw_cid)); } if (mmcbr_get_ro(sc->dev)) ivar->read_only = 1; ivar->bus_width = bus_width_1; ivar->timing = bus_timing_normal; ivar->mode = mmcbr_get_mode(sc->dev); if (ivar->mode == mode_sd) { mmc_decode_cid_sd(ivar->raw_cid, &ivar->cid); mmc_send_relative_addr(sc, &resp); ivar->rca = resp >> 16; /* Get card CSD. */ mmc_send_csd(sc, ivar->rca, ivar->raw_csd); if (bootverbose || mmc_debug) device_printf(sc->dev, "%sard detected (CSD %08x%08x%08x%08x)\n", newcard ? "New c" : "C", ivar->raw_csd[0], ivar->raw_csd[1], ivar->raw_csd[2], ivar->raw_csd[3]); mmc_decode_csd_sd(ivar->raw_csd, &ivar->csd); ivar->sec_count = ivar->csd.capacity / MMC_SECTOR_SIZE; if (ivar->csd.csd_structure > 0) ivar->high_cap = 1; ivar->tran_speed = ivar->csd.tran_speed; ivar->erase_sector = ivar->csd.erase_sector * ivar->csd.write_bl_len / MMC_SECTOR_SIZE; err = mmc_send_status(sc, ivar->rca, &status); if (err != MMC_ERR_NONE) { device_printf(sc->dev, "Error reading card status %d\n", err); break; } if ((status & R1_CARD_IS_LOCKED) != 0) { device_printf(sc->dev, "Card is password protected, skipping.\n"); break; } /* Get card SCR. Card must be selected to fetch it. */ mmc_select_card(sc, ivar->rca); mmc_app_send_scr(sc, ivar->rca, ivar->raw_scr); mmc_app_decode_scr(ivar->raw_scr, &ivar->scr); /* Get card switch capabilities (command class 10). */ if ((ivar->scr.sda_vsn >= 1) && (ivar->csd.ccc & (1<<10))) { mmc_sd_switch(sc, SD_SWITCH_MODE_CHECK, SD_SWITCH_GROUP1, SD_SWITCH_NOCHANGE, switch_res); if (switch_res[13] & 2) { ivar->timing = bus_timing_hs; ivar->hs_tran_speed = SD_MAX_HS; } } /* * We deselect then reselect the card here. Some cards * become unselected and timeout with the above two * commands, although the state tables / diagrams in the * standard suggest they go back to the transfer state. * Other cards don't become deselected, and if we * atttempt to blindly re-select them, we get timeout * errors from some controllers. So we deselect then * reselect to handle all situations. The only thing we * use from the sd_status is the erase sector size, but * it is still nice to get that right. */ mmc_select_card(sc, 0); mmc_select_card(sc, ivar->rca); mmc_app_sd_status(sc, ivar->rca, ivar->raw_sd_status); mmc_app_decode_sd_status(ivar->raw_sd_status, &ivar->sd_status); if (ivar->sd_status.au_size != 0) { ivar->erase_sector = 16 << ivar->sd_status.au_size; } /* Find max supported bus width. */ if ((mmcbr_get_caps(sc->dev) & MMC_CAP_4_BIT_DATA) && (ivar->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) ivar->bus_width = bus_width_4; /* * Some cards that report maximum I/O block sizes * greater than 512 require the block length to be * set to 512, even though that is supposed to be * the default. Example: * * Transcend 2GB SDSC card, CID: * mid=0x1b oid=0x534d pnm="00000" prv=1.0 mdt=00.2000 */ if (ivar->csd.read_bl_len != MMC_SECTOR_SIZE || ivar->csd.write_bl_len != MMC_SECTOR_SIZE) mmc_set_blocklen(sc, MMC_SECTOR_SIZE); mmc_format_card_id_string(ivar); if (bootverbose || mmc_debug) mmc_log_card(sc->dev, ivar, newcard); if (newcard) { /* Add device. */ child = device_add_child(sc->dev, NULL, -1); device_set_ivars(child, ivar); } mmc_select_card(sc, 0); return; } mmc_decode_cid_mmc(ivar->raw_cid, &ivar->cid); ivar->rca = rca++; mmc_set_relative_addr(sc, ivar->rca); /* Get card CSD. */ mmc_send_csd(sc, ivar->rca, ivar->raw_csd); if (bootverbose || mmc_debug) device_printf(sc->dev, "%sard detected (CSD %08x%08x%08x%08x)\n", newcard ? "New c" : "C", ivar->raw_csd[0], ivar->raw_csd[1], ivar->raw_csd[2], ivar->raw_csd[3]); mmc_decode_csd_mmc(ivar->raw_csd, &ivar->csd); ivar->sec_count = ivar->csd.capacity / MMC_SECTOR_SIZE; ivar->tran_speed = ivar->csd.tran_speed; ivar->erase_sector = ivar->csd.erase_sector * ivar->csd.write_bl_len / MMC_SECTOR_SIZE; err = mmc_send_status(sc, ivar->rca, &status); if (err != MMC_ERR_NONE) { device_printf(sc->dev, "Error reading card status %d\n", err); break; } if ((status & R1_CARD_IS_LOCKED) != 0) { device_printf(sc->dev, "Card is password protected, skipping.\n"); break; } mmc_select_card(sc, ivar->rca); /* Only MMC >= 4.x cards support EXT_CSD. */ if (ivar->csd.spec_vers >= 4) { mmc_send_ext_csd(sc, ivar->raw_ext_csd); /* Handle extended capacity from EXT_CSD */ sec_count = ivar->raw_ext_csd[EXT_CSD_SEC_CNT] + (ivar->raw_ext_csd[EXT_CSD_SEC_CNT + 1] << 8) + (ivar->raw_ext_csd[EXT_CSD_SEC_CNT + 2] << 16) + (ivar->raw_ext_csd[EXT_CSD_SEC_CNT + 3] << 24); if (sec_count != 0) { ivar->sec_count = sec_count; ivar->high_cap = 1; } /* Get card speed in high speed mode. */ ivar->timing = bus_timing_hs; if (ivar->raw_ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_52) ivar->hs_tran_speed = MMC_TYPE_52_MAX_HS; else if (ivar->raw_ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_26) ivar->hs_tran_speed = MMC_TYPE_26_MAX_HS; else ivar->hs_tran_speed = ivar->tran_speed; /* Find max supported bus width. */ ivar->bus_width = mmc_test_bus_width(sc); /* Handle HC erase sector size. */ if (ivar->raw_ext_csd[EXT_CSD_ERASE_GRP_SIZE] != 0) { ivar->erase_sector = 1024 * ivar->raw_ext_csd[EXT_CSD_ERASE_GRP_SIZE]; mmc_switch(sc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_ERASE_GRP_DEF, 1); } } else { ivar->bus_width = bus_width_1; ivar->timing = bus_timing_normal; } /* * Some cards that report maximum I/O block sizes greater * than 512 require the block length to be set to 512, even * though that is supposed to be the default. Example: * * Transcend 2GB SDSC card, CID: * mid=0x1b oid=0x534d pnm="00000" prv=1.0 mdt=00.2000 */ if (ivar->csd.read_bl_len != MMC_SECTOR_SIZE || ivar->csd.write_bl_len != MMC_SECTOR_SIZE) mmc_set_blocklen(sc, MMC_SECTOR_SIZE); mmc_format_card_id_string(ivar); if (bootverbose || mmc_debug) mmc_log_card(sc->dev, ivar, newcard); if (newcard) { /* Add device. */ child = device_add_child(sc->dev, NULL, -1); device_set_ivars(child, ivar); } mmc_select_card(sc, 0); } } static void mmc_rescan_cards(struct mmc_softc *sc) { struct mmc_ivars *ivar = NULL; device_t *devlist; int err, i, devcount; if ((err = device_get_children(sc->dev, &devlist, &devcount)) != 0) return; for (i = 0; i < devcount; i++) { ivar = device_get_ivars(devlist[i]); if (mmc_select_card(sc, ivar->rca)) { if (bootverbose || mmc_debug) device_printf(sc->dev, "Card at relative address %d lost.\n", ivar->rca); device_delete_child(sc->dev, devlist[i]); free(ivar, M_DEVBUF); } } free(devlist, M_TEMP); mmc_select_card(sc, 0); } static int mmc_delete_cards(struct mmc_softc *sc) { struct mmc_ivars *ivar; device_t *devlist; int err, i, devcount; if ((err = device_get_children(sc->dev, &devlist, &devcount)) != 0) return (err); for (i = 0; i < devcount; i++) { ivar = device_get_ivars(devlist[i]); if (bootverbose || mmc_debug) device_printf(sc->dev, "Card at relative address %d deleted.\n", ivar->rca); device_delete_child(sc->dev, devlist[i]); free(ivar, M_DEVBUF); } free(devlist, M_TEMP); return (0); } static void mmc_go_discovery(struct mmc_softc *sc) { uint32_t ocr; device_t dev; int err; dev = sc->dev; if (mmcbr_get_power_mode(dev) != power_on) { /* * First, try SD modes */ sc->squelched++; /* Errors are expected, squelch reporting. */ mmcbr_set_mode(dev, mode_sd); mmc_power_up(sc); mmcbr_set_bus_mode(dev, pushpull); if (bootverbose || mmc_debug) device_printf(sc->dev, "Probing bus\n"); mmc_idle_cards(sc); err = mmc_send_if_cond(sc, 1); if ((bootverbose || mmc_debug) && err == 0) device_printf(sc->dev, "SD 2.0 interface conditions: OK\n"); if (mmc_send_app_op_cond(sc, 0, &ocr) != MMC_ERR_NONE) { if (bootverbose || mmc_debug) device_printf(sc->dev, "SD probe: failed\n"); /* * Failed, try MMC */ mmcbr_set_mode(dev, mode_mmc); if (mmc_send_op_cond(sc, 0, &ocr) != MMC_ERR_NONE) { if (bootverbose || mmc_debug) device_printf(sc->dev, "MMC probe: failed\n"); ocr = 0; /* Failed both, powerdown. */ } else if (bootverbose || mmc_debug) device_printf(sc->dev, "MMC probe: OK (OCR: 0x%08x)\n", ocr); } else if (bootverbose || mmc_debug) device_printf(sc->dev, "SD probe: OK (OCR: 0x%08x)\n", ocr); sc->squelched--; mmcbr_set_ocr(dev, mmc_select_vdd(sc, ocr)); if (mmcbr_get_ocr(dev) != 0) mmc_idle_cards(sc); } else { mmcbr_set_bus_mode(dev, opendrain); mmcbr_set_clock(dev, CARD_ID_FREQUENCY); mmcbr_update_ios(dev); /* XXX recompute vdd based on new cards? */ } /* * Make sure that we have a mutually agreeable voltage to at least * one card on the bus. */ if (bootverbose || mmc_debug) device_printf(sc->dev, "Current OCR: 0x%08x\n", mmcbr_get_ocr(dev)); if (mmcbr_get_ocr(dev) == 0) { device_printf(sc->dev, "No compatible cards found on bus\n"); mmc_delete_cards(sc); mmc_power_down(sc); return; } /* * Reselect the cards after we've idled them above. */ if (mmcbr_get_mode(dev) == mode_sd) { err = mmc_send_if_cond(sc, 1); mmc_send_app_op_cond(sc, (err ? 0 : MMC_OCR_CCS) | mmcbr_get_ocr(dev), NULL); } else mmc_send_op_cond(sc, MMC_OCR_CCS | mmcbr_get_ocr(dev), NULL); mmc_discover_cards(sc); mmc_rescan_cards(sc); mmcbr_set_bus_mode(dev, pushpull); mmcbr_update_ios(dev); mmc_calculate_clock(sc); bus_generic_attach(dev); /* mmc_update_children_sysctl(dev);*/ } static int mmc_calculate_clock(struct mmc_softc *sc) { int max_dtr, max_hs_dtr, max_timing; int nkid, i, f_max; device_t *kids; struct mmc_ivars *ivar; f_max = mmcbr_get_f_max(sc->dev); max_dtr = max_hs_dtr = f_max; if ((mmcbr_get_caps(sc->dev) & MMC_CAP_HSPEED)) max_timing = bus_timing_hs; else max_timing = bus_timing_normal; if (device_get_children(sc->dev, &kids, &nkid) != 0) panic("can't get children"); for (i = 0; i < nkid; i++) { ivar = device_get_ivars(kids[i]); if (ivar->timing < max_timing) max_timing = ivar->timing; if (ivar->tran_speed < max_dtr) max_dtr = ivar->tran_speed; if (ivar->hs_tran_speed < max_hs_dtr) max_hs_dtr = ivar->hs_tran_speed; } for (i = 0; i < nkid; i++) { ivar = device_get_ivars(kids[i]); if (ivar->timing == bus_timing_normal) continue; mmc_select_card(sc, ivar->rca); mmc_set_timing(sc, max_timing); } mmc_select_card(sc, 0); free(kids, M_TEMP); if (max_timing == bus_timing_hs) max_dtr = max_hs_dtr; if (bootverbose || mmc_debug) { device_printf(sc->dev, "setting transfer rate to %d.%03dMHz%s\n", max_dtr / 1000000, (max_dtr / 1000) % 1000, max_timing == bus_timing_hs ? " (high speed timing)" : ""); } mmcbr_set_timing(sc->dev, max_timing); mmcbr_set_clock(sc->dev, max_dtr); mmcbr_update_ios(sc->dev); return max_dtr; } static void mmc_scan(struct mmc_softc *sc) { device_t dev = sc->dev; mmc_acquire_bus(dev, dev); mmc_go_discovery(sc); mmc_release_bus(dev, dev); } static int mmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct mmc_ivars *ivar = device_get_ivars(child); switch (which) { default: return (EINVAL); case MMC_IVAR_DSR_IMP: *result = ivar->csd.dsr_imp; break; case MMC_IVAR_MEDIA_SIZE: *result = ivar->sec_count; break; case MMC_IVAR_RCA: *result = ivar->rca; break; case MMC_IVAR_SECTOR_SIZE: *result = MMC_SECTOR_SIZE; break; case MMC_IVAR_TRAN_SPEED: *result = mmcbr_get_clock(bus); break; case MMC_IVAR_READ_ONLY: *result = ivar->read_only; break; case MMC_IVAR_HIGH_CAP: *result = ivar->high_cap; break; case MMC_IVAR_CARD_TYPE: *result = ivar->mode; break; case MMC_IVAR_BUS_WIDTH: *result = ivar->bus_width; break; case MMC_IVAR_ERASE_SECTOR: *result = ivar->erase_sector; break; case MMC_IVAR_MAX_DATA: *result = mmcbr_get_max_data(bus); break; case MMC_IVAR_CARD_ID_STRING: *(char **)result = ivar->card_id_string; break; case MMC_IVAR_CARD_SN_STRING: *(char **)result = ivar->card_sn_string; break; } return (0); } static int mmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { /* * None are writable ATM */ return (EINVAL); } static void mmc_delayed_attach(void *xsc) { struct mmc_softc *sc = xsc; mmc_scan(sc); config_intrhook_disestablish(&sc->config_intrhook); } static int mmc_child_location_str(device_t dev, device_t child, char *buf, size_t buflen) { snprintf(buf, buflen, "rca=0x%04x", mmc_get_rca(child)); return (0); } static device_method_t mmc_methods[] = { /* device_if */ DEVMETHOD(device_probe, mmc_probe), DEVMETHOD(device_attach, mmc_attach), DEVMETHOD(device_detach, mmc_detach), DEVMETHOD(device_suspend, mmc_suspend), DEVMETHOD(device_resume, mmc_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, mmc_read_ivar), DEVMETHOD(bus_write_ivar, mmc_write_ivar), DEVMETHOD(bus_child_location_str, mmc_child_location_str), /* MMC Bus interface */ DEVMETHOD(mmcbus_wait_for_request, mmc_wait_for_request), DEVMETHOD(mmcbus_acquire_bus, mmc_acquire_bus), DEVMETHOD(mmcbus_release_bus, mmc_release_bus), DEVMETHOD_END }; -static driver_t mmc_driver = { +driver_t mmc_driver = { "mmc", mmc_methods, sizeof(struct mmc_softc), }; -static devclass_t mmc_devclass; - -DRIVER_MODULE(mmc, a10_mmc, mmc_driver, mmc_devclass, NULL, NULL); -DRIVER_MODULE(mmc, aml8726_mmc, mmc_driver, mmc_devclass, NULL, NULL); -DRIVER_MODULE(mmc, aml8726_sdxc, mmc_driver, mmc_devclass, NULL, NULL); -DRIVER_MODULE(mmc, at91_mci, mmc_driver, mmc_devclass, NULL, NULL); -DRIVER_MODULE(mmc, sdhci_bcm, mmc_driver, mmc_devclass, NULL, NULL); -DRIVER_MODULE(mmc, sdhci_fdt, mmc_driver, mmc_devclass, NULL, NULL); -DRIVER_MODULE(mmc, sdhci_fsl, mmc_driver, mmc_devclass, NULL, NULL); -DRIVER_MODULE(mmc, sdhci_imx, mmc_driver, mmc_devclass, NULL, NULL); -DRIVER_MODULE(mmc, sdhci_pci, mmc_driver, mmc_devclass, NULL, NULL); -DRIVER_MODULE(mmc, sdhci_ti, mmc_driver, mmc_devclass, NULL, NULL); -DRIVER_MODULE(mmc, ti_mmchs, mmc_driver, mmc_devclass, NULL, NULL); -DRIVER_MODULE(mmc, dwmmc, mmc_driver, mmc_devclass, NULL, NULL); +devclass_t mmc_devclass; Index: head/sys/dev/sdhci/sdhci_fdt.c =================================================================== --- head/sys/dev/sdhci/sdhci_fdt.c (revision 292179) +++ head/sys/dev/sdhci/sdhci_fdt.c (revision 292180) @@ -1,309 +1,310 @@ /*- * Copyright (c) 2012 Thomas Skibo * Copyright (c) 2008 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Generic driver to attach sdhci controllers on simplebus. * Derived mainly from sdhci_pci.c */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mmcbr_if.h" #include "sdhci_if.h" #define MAX_SLOTS 6 struct sdhci_fdt_softc { device_t dev; /* Controller device */ u_int quirks; /* Chip specific quirks */ u_int caps; /* If we override SDHCI_CAPABILITIES */ uint32_t max_clk; /* Max possible freq */ struct resource *irq_res; /* IRQ resource */ void *intrhand; /* Interrupt handle */ int num_slots; /* Number of slots on this controller*/ struct sdhci_slot slots[MAX_SLOTS]; struct resource *mem_res[MAX_SLOTS]; /* Memory resource */ }; static uint8_t sdhci_fdt_read_1(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct sdhci_fdt_softc *sc = device_get_softc(dev); return (bus_read_1(sc->mem_res[slot->num], off)); } static void sdhci_fdt_write_1(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint8_t val) { struct sdhci_fdt_softc *sc = device_get_softc(dev); bus_write_1(sc->mem_res[slot->num], off, val); } static uint16_t sdhci_fdt_read_2(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct sdhci_fdt_softc *sc = device_get_softc(dev); return (bus_read_2(sc->mem_res[slot->num], off)); } static void sdhci_fdt_write_2(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint16_t val) { struct sdhci_fdt_softc *sc = device_get_softc(dev); bus_write_2(sc->mem_res[slot->num], off, val); } static uint32_t sdhci_fdt_read_4(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct sdhci_fdt_softc *sc = device_get_softc(dev); return (bus_read_4(sc->mem_res[slot->num], off)); } static void sdhci_fdt_write_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t val) { struct sdhci_fdt_softc *sc = device_get_softc(dev); bus_write_4(sc->mem_res[slot->num], off, val); } static void sdhci_fdt_read_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct sdhci_fdt_softc *sc = device_get_softc(dev); bus_read_multi_4(sc->mem_res[slot->num], off, data, count); } static void sdhci_fdt_write_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct sdhci_fdt_softc *sc = device_get_softc(dev); bus_write_multi_4(sc->mem_res[slot->num], off, data, count); } static void sdhci_fdt_intr(void *arg) { struct sdhci_fdt_softc *sc = (struct sdhci_fdt_softc *)arg; int i; for (i = 0; i < sc->num_slots; i++) { struct sdhci_slot *slot = &sc->slots[i]; sdhci_generic_intr(slot); } } static int sdhci_fdt_probe(device_t dev) { struct sdhci_fdt_softc *sc = device_get_softc(dev); phandle_t node; pcell_t cid; sc->quirks = 0; sc->num_slots = 1; sc->max_clk = 0; if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "sdhci_generic")) { device_set_desc(dev, "generic fdt SDHCI controller"); } else if (ofw_bus_is_compatible(dev, "xlnx,zy7_sdhci")) { sc->quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; device_set_desc(dev, "Zynq-7000 generic fdt SDHCI controller"); } else return (ENXIO); node = ofw_bus_get_node(dev); /* Allow dts to patch quirks, slots, and max-frequency. */ if ((OF_getencprop(node, "quirks", &cid, sizeof(cid))) > 0) sc->quirks = cid; if ((OF_getencprop(node, "num-slots", &cid, sizeof(cid))) > 0) sc->num_slots = cid; if ((OF_getencprop(node, "max-frequency", &cid, sizeof(cid))) > 0) sc->max_clk = cid; return (0); } static int sdhci_fdt_attach(device_t dev) { struct sdhci_fdt_softc *sc = device_get_softc(dev); int err, slots, rid, i; sc->dev = dev; /* Allocate IRQ. */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Can't allocate IRQ\n"); return (ENOMEM); } /* Scan all slots. */ slots = sc->num_slots; /* number of slots determined in probe(). */ sc->num_slots = 0; for (i = 0; i < slots; i++) { struct sdhci_slot *slot = &sc->slots[sc->num_slots]; /* Allocate memory. */ rid = 0; sc->mem_res[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res[i] == NULL) { device_printf(dev, "Can't allocate memory for " "slot %d\n", i); continue; } slot->quirks = sc->quirks; slot->caps = sc->caps; slot->max_clk = sc->max_clk; if (sdhci_init_slot(dev, slot, i) != 0) continue; sc->num_slots++; } device_printf(dev, "%d slot(s) allocated\n", sc->num_slots); /* Activate the interrupt */ err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, sdhci_fdt_intr, sc, &sc->intrhand); if (err) { device_printf(dev, "Cannot setup IRQ\n"); return (err); } /* Process cards detection. */ for (i = 0; i < sc->num_slots; i++) { struct sdhci_slot *slot = &sc->slots[i]; sdhci_start_slot(slot); } return (0); } static int sdhci_fdt_detach(device_t dev) { struct sdhci_fdt_softc *sc = device_get_softc(dev); int i; bus_generic_detach(dev); bus_teardown_intr(dev, sc->irq_res, sc->intrhand); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res), sc->irq_res); for (i = 0; i < sc->num_slots; i++) { struct sdhci_slot *slot = &sc->slots[i]; sdhci_cleanup_slot(slot); bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res[i]), sc->mem_res[i]); } return (0); } static device_method_t sdhci_fdt_methods[] = { /* device_if */ DEVMETHOD(device_probe, sdhci_fdt_probe), DEVMETHOD(device_attach, sdhci_fdt_attach), DEVMETHOD(device_detach, sdhci_fdt_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, sdhci_generic_read_ivar), DEVMETHOD(bus_write_ivar, sdhci_generic_write_ivar), /* mmcbr_if */ DEVMETHOD(mmcbr_update_ios, sdhci_generic_update_ios), DEVMETHOD(mmcbr_request, sdhci_generic_request), DEVMETHOD(mmcbr_get_ro, sdhci_generic_get_ro), DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host), DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host), /* SDHCI registers accessors */ DEVMETHOD(sdhci_read_1, sdhci_fdt_read_1), DEVMETHOD(sdhci_read_2, sdhci_fdt_read_2), DEVMETHOD(sdhci_read_4, sdhci_fdt_read_4), DEVMETHOD(sdhci_read_multi_4, sdhci_fdt_read_multi_4), DEVMETHOD(sdhci_write_1, sdhci_fdt_write_1), DEVMETHOD(sdhci_write_2, sdhci_fdt_write_2), DEVMETHOD(sdhci_write_4, sdhci_fdt_write_4), DEVMETHOD(sdhci_write_multi_4, sdhci_fdt_write_multi_4), DEVMETHOD_END }; static driver_t sdhci_fdt_driver = { "sdhci_fdt", sdhci_fdt_methods, sizeof(struct sdhci_fdt_softc), }; static devclass_t sdhci_fdt_devclass; DRIVER_MODULE(sdhci_fdt, simplebus, sdhci_fdt_driver, sdhci_fdt_devclass, NULL, NULL); MODULE_DEPEND(sdhci_fdt, sdhci, 1, 1, 1); +DRIVER_MODULE(mmc, sdhci_fdt, mmc_driver, mmc_devclass, NULL, NULL); Index: head/sys/dev/sdhci/sdhci_pci.c =================================================================== --- head/sys/dev/sdhci/sdhci_pci.c (revision 292179) +++ head/sys/dev/sdhci/sdhci_pci.c (revision 292180) @@ -1,476 +1,477 @@ /*- * Copyright (c) 2008 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "sdhci.h" #include "mmcbr_if.h" #include "sdhci_if.h" /* * PCI registers */ #define PCI_SDHCI_IFPIO 0x00 #define PCI_SDHCI_IFDMA 0x01 #define PCI_SDHCI_IFVENDOR 0x02 #define PCI_SLOT_INFO 0x40 /* 8 bits */ #define PCI_SLOT_INFO_SLOTS(x) (((x >> 4) & 7) + 1) #define PCI_SLOT_INFO_FIRST_BAR(x) ((x) & 7) /* * RICOH specific PCI registers */ #define SDHC_PCI_MODE_KEY 0xf9 #define SDHC_PCI_MODE 0x150 #define SDHC_PCI_MODE_SD20 0x10 #define SDHC_PCI_BASE_FREQ_KEY 0xfc #define SDHC_PCI_BASE_FREQ 0xe1 static const struct sdhci_device { uint32_t model; uint16_t subvendor; const char *desc; u_int quirks; } sdhci_devices[] = { { 0x08221180, 0xffff, "RICOH R5C822 SD", SDHCI_QUIRK_FORCE_DMA }, { 0xe8221180, 0xffff, "RICOH R5CE822 SD", SDHCI_QUIRK_FORCE_DMA | SDHCI_QUIRK_LOWER_FREQUENCY }, { 0xe8231180, 0xffff, "RICOH R5CE823 SD", SDHCI_QUIRK_LOWER_FREQUENCY }, { 0x8034104c, 0xffff, "TI XX21/XX11 SD", SDHCI_QUIRK_FORCE_DMA }, { 0x05501524, 0xffff, "ENE CB712 SD", SDHCI_QUIRK_BROKEN_TIMINGS }, { 0x05511524, 0xffff, "ENE CB712 SD 2", SDHCI_QUIRK_BROKEN_TIMINGS }, { 0x07501524, 0xffff, "ENE CB714 SD", SDHCI_QUIRK_RESET_ON_IOS | SDHCI_QUIRK_BROKEN_TIMINGS }, { 0x07511524, 0xffff, "ENE CB714 SD 2", SDHCI_QUIRK_RESET_ON_IOS | SDHCI_QUIRK_BROKEN_TIMINGS }, { 0x410111ab, 0xffff, "Marvell CaFe SD", SDHCI_QUIRK_INCR_TIMEOUT_CONTROL }, { 0x2381197B, 0xffff, "JMicron JMB38X SD", SDHCI_QUIRK_32BIT_DMA_SIZE | SDHCI_QUIRK_RESET_AFTER_REQUEST }, { 0x16bc14e4, 0xffff, "Broadcom BCM577xx SDXC/MMC Card Reader", SDHCI_QUIRK_BCM577XX_400KHZ_CLKSRC }, { 0, 0xffff, NULL, 0 } }; struct sdhci_pci_softc { u_int quirks; /* Chip specific quirks */ struct resource *irq_res; /* IRQ resource */ void *intrhand; /* Interrupt handle */ int num_slots; /* Number of slots on this controller */ struct sdhci_slot slots[6]; struct resource *mem_res[6]; /* Memory resource */ uint8_t cfg_freq; /* Saved mode */ uint8_t cfg_mode; /* Saved frequency */ }; static int sdhci_enable_msi = 1; SYSCTL_INT(_hw_sdhci, OID_AUTO, enable_msi, CTLFLAG_RDTUN, &sdhci_enable_msi, 0, "Enable MSI interrupts"); static uint8_t sdhci_pci_read_1(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct sdhci_pci_softc *sc = device_get_softc(dev); bus_barrier(sc->mem_res[slot->num], 0, 0xFF, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return bus_read_1(sc->mem_res[slot->num], off); } static void sdhci_pci_write_1(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint8_t val) { struct sdhci_pci_softc *sc = device_get_softc(dev); bus_barrier(sc->mem_res[slot->num], 0, 0xFF, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); bus_write_1(sc->mem_res[slot->num], off, val); } static uint16_t sdhci_pci_read_2(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct sdhci_pci_softc *sc = device_get_softc(dev); bus_barrier(sc->mem_res[slot->num], 0, 0xFF, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return bus_read_2(sc->mem_res[slot->num], off); } static void sdhci_pci_write_2(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint16_t val) { struct sdhci_pci_softc *sc = device_get_softc(dev); bus_barrier(sc->mem_res[slot->num], 0, 0xFF, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); bus_write_2(sc->mem_res[slot->num], off, val); } static uint32_t sdhci_pci_read_4(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct sdhci_pci_softc *sc = device_get_softc(dev); bus_barrier(sc->mem_res[slot->num], 0, 0xFF, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return bus_read_4(sc->mem_res[slot->num], off); } static void sdhci_pci_write_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t val) { struct sdhci_pci_softc *sc = device_get_softc(dev); bus_barrier(sc->mem_res[slot->num], 0, 0xFF, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); bus_write_4(sc->mem_res[slot->num], off, val); } static void sdhci_pci_read_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct sdhci_pci_softc *sc = device_get_softc(dev); bus_read_multi_stream_4(sc->mem_res[slot->num], off, data, count); } static void sdhci_pci_write_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct sdhci_pci_softc *sc = device_get_softc(dev); bus_write_multi_stream_4(sc->mem_res[slot->num], off, data, count); } static void sdhci_pci_intr(void *arg); static void sdhci_lower_frequency(device_t dev) { struct sdhci_pci_softc *sc = device_get_softc(dev); /* * Enable SD2.0 mode. * NB: for RICOH R5CE823, this changes the PCI device ID to 0xe822. */ pci_write_config(dev, SDHC_PCI_MODE_KEY, 0xfc, 1); sc->cfg_mode = pci_read_config(dev, SDHC_PCI_MODE, 1); pci_write_config(dev, SDHC_PCI_MODE, SDHC_PCI_MODE_SD20, 1); pci_write_config(dev, SDHC_PCI_MODE_KEY, 0x00, 1); /* * Some SD/MMC cards don't work with the default base * clock frequency of 200 MHz. Lower it to 50 MHz. */ pci_write_config(dev, SDHC_PCI_BASE_FREQ_KEY, 0x01, 1); sc->cfg_freq = pci_read_config(dev, SDHC_PCI_BASE_FREQ, 1); pci_write_config(dev, SDHC_PCI_BASE_FREQ, 50, 1); pci_write_config(dev, SDHC_PCI_BASE_FREQ_KEY, 0x00, 1); } static void sdhci_restore_frequency(device_t dev) { struct sdhci_pci_softc *sc = device_get_softc(dev); /* Restore mode. */ pci_write_config(dev, SDHC_PCI_MODE_KEY, 0xfc, 1); pci_write_config(dev, SDHC_PCI_MODE, sc->cfg_mode, 1); pci_write_config(dev, SDHC_PCI_MODE_KEY, 0x00, 1); /* Restore frequency. */ pci_write_config(dev, SDHC_PCI_BASE_FREQ_KEY, 0x01, 1); pci_write_config(dev, SDHC_PCI_BASE_FREQ, sc->cfg_freq, 1); pci_write_config(dev, SDHC_PCI_BASE_FREQ_KEY, 0x00, 1); } static int sdhci_pci_probe(device_t dev) { uint32_t model; uint16_t subvendor; uint8_t class, subclass; int i, result; model = (uint32_t)pci_get_device(dev) << 16; model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; subvendor = pci_get_subvendor(dev); class = pci_get_class(dev); subclass = pci_get_subclass(dev); result = ENXIO; for (i = 0; sdhci_devices[i].model != 0; i++) { if (sdhci_devices[i].model == model && (sdhci_devices[i].subvendor == 0xffff || sdhci_devices[i].subvendor == subvendor)) { device_set_desc(dev, sdhci_devices[i].desc); result = BUS_PROBE_DEFAULT; break; } } if (result == ENXIO && class == PCIC_BASEPERIPH && subclass == PCIS_BASEPERIPH_SDHC) { device_set_desc(dev, "Generic SD HCI"); result = BUS_PROBE_GENERIC; } return (result); } static int sdhci_pci_attach(device_t dev) { struct sdhci_pci_softc *sc = device_get_softc(dev); uint32_t model; uint16_t subvendor; int bar, err, rid, slots, i; model = (uint32_t)pci_get_device(dev) << 16; model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; subvendor = pci_get_subvendor(dev); /* Apply chip specific quirks. */ for (i = 0; sdhci_devices[i].model != 0; i++) { if (sdhci_devices[i].model == model && (sdhci_devices[i].subvendor == 0xffff || sdhci_devices[i].subvendor == subvendor)) { sc->quirks = sdhci_devices[i].quirks; break; } } /* Some controllers need to be bumped into the right mode. */ if (sc->quirks & SDHCI_QUIRK_LOWER_FREQUENCY) sdhci_lower_frequency(dev); /* Read slots info from PCI registers. */ slots = pci_read_config(dev, PCI_SLOT_INFO, 1); bar = PCI_SLOT_INFO_FIRST_BAR(slots); slots = PCI_SLOT_INFO_SLOTS(slots); if (slots > 6 || bar > 5) { device_printf(dev, "Incorrect slots information (%d, %d).\n", slots, bar); return (EINVAL); } /* Allocate IRQ. */ i = 1; rid = 0; if (sdhci_enable_msi != 0 && pci_alloc_msi(dev, &i) == 0) rid = 1; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | (rid != 0 ? 0 : RF_SHAREABLE)); if (sc->irq_res == NULL) { device_printf(dev, "Can't allocate IRQ\n"); pci_release_msi(dev); return (ENOMEM); } /* Scan all slots. */ for (i = 0; i < slots; i++) { struct sdhci_slot *slot = &sc->slots[sc->num_slots]; /* Allocate memory. */ rid = PCIR_BAR(bar + i); sc->mem_res[i] = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0ul, ~0ul, 0x100, RF_ACTIVE); if (sc->mem_res[i] == NULL) { device_printf(dev, "Can't allocate memory for slot %d\n", i); continue; } slot->quirks = sc->quirks; if (sdhci_init_slot(dev, slot, i) != 0) continue; sc->num_slots++; } device_printf(dev, "%d slot(s) allocated\n", sc->num_slots); /* Activate the interrupt */ err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, sdhci_pci_intr, sc, &sc->intrhand); if (err) device_printf(dev, "Can't setup IRQ\n"); pci_enable_busmaster(dev); /* Process cards detection. */ for (i = 0; i < sc->num_slots; i++) { struct sdhci_slot *slot = &sc->slots[i]; sdhci_start_slot(slot); } return (0); } static int sdhci_pci_detach(device_t dev) { struct sdhci_pci_softc *sc = device_get_softc(dev); int i; bus_teardown_intr(dev, sc->irq_res, sc->intrhand); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res), sc->irq_res); pci_release_msi(dev); for (i = 0; i < sc->num_slots; i++) { struct sdhci_slot *slot = &sc->slots[i]; sdhci_cleanup_slot(slot); bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res[i]), sc->mem_res[i]); } if (sc->quirks & SDHCI_QUIRK_LOWER_FREQUENCY) sdhci_restore_frequency(dev); return (0); } static int sdhci_pci_shutdown(device_t dev) { struct sdhci_pci_softc *sc = device_get_softc(dev); if (sc->quirks & SDHCI_QUIRK_LOWER_FREQUENCY) sdhci_restore_frequency(dev); return (0); } static int sdhci_pci_suspend(device_t dev) { struct sdhci_pci_softc *sc = device_get_softc(dev); int i, err; err = bus_generic_suspend(dev); if (err) return (err); for (i = 0; i < sc->num_slots; i++) sdhci_generic_suspend(&sc->slots[i]); return (0); } static int sdhci_pci_resume(device_t dev) { struct sdhci_pci_softc *sc = device_get_softc(dev); int i; for (i = 0; i < sc->num_slots; i++) sdhci_generic_resume(&sc->slots[i]); return (bus_generic_resume(dev)); } static void sdhci_pci_intr(void *arg) { struct sdhci_pci_softc *sc = (struct sdhci_pci_softc *)arg; int i; for (i = 0; i < sc->num_slots; i++) { struct sdhci_slot *slot = &sc->slots[i]; sdhci_generic_intr(slot); } } static device_method_t sdhci_methods[] = { /* device_if */ DEVMETHOD(device_probe, sdhci_pci_probe), DEVMETHOD(device_attach, sdhci_pci_attach), DEVMETHOD(device_detach, sdhci_pci_detach), DEVMETHOD(device_shutdown, sdhci_pci_shutdown), DEVMETHOD(device_suspend, sdhci_pci_suspend), DEVMETHOD(device_resume, sdhci_pci_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, sdhci_generic_read_ivar), DEVMETHOD(bus_write_ivar, sdhci_generic_write_ivar), /* mmcbr_if */ DEVMETHOD(mmcbr_update_ios, sdhci_generic_update_ios), DEVMETHOD(mmcbr_request, sdhci_generic_request), DEVMETHOD(mmcbr_get_ro, sdhci_generic_get_ro), DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host), DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host), /* SDHCI registers accessors */ DEVMETHOD(sdhci_read_1, sdhci_pci_read_1), DEVMETHOD(sdhci_read_2, sdhci_pci_read_2), DEVMETHOD(sdhci_read_4, sdhci_pci_read_4), DEVMETHOD(sdhci_read_multi_4, sdhci_pci_read_multi_4), DEVMETHOD(sdhci_write_1, sdhci_pci_write_1), DEVMETHOD(sdhci_write_2, sdhci_pci_write_2), DEVMETHOD(sdhci_write_4, sdhci_pci_write_4), DEVMETHOD(sdhci_write_multi_4, sdhci_pci_write_multi_4), DEVMETHOD_END }; static driver_t sdhci_pci_driver = { "sdhci_pci", sdhci_methods, sizeof(struct sdhci_pci_softc), }; static devclass_t sdhci_pci_devclass; DRIVER_MODULE(sdhci_pci, pci, sdhci_pci_driver, sdhci_pci_devclass, NULL, NULL); MODULE_DEPEND(sdhci_pci, sdhci, 1, 1, 1); +DRIVER_MODULE(mmc, sdhci_pci, mmc_driver, mmc_devclass, NULL, NULL); Index: head/sys/powerpc/mpc85xx/fsl_sdhc.c =================================================================== --- head/sys/powerpc/mpc85xx/fsl_sdhc.c (revision 292179) +++ head/sys/powerpc/mpc85xx/fsl_sdhc.c (revision 292180) @@ -1,1302 +1,1303 @@ /*- * Copyright (c) 2011-2012 Semihalf * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Driver for Freescale integrated eSDHC controller. * Limitations: * - No support for multi-block transfers. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_platform.h" #include "mmcbr_if.h" #include "fsl_sdhc.h" #ifdef DEBUG #define DPRINTF(fmt, arg...) printf("DEBUG %s(): " fmt, __FUNCTION__, ##arg) #else #define DPRINTF(fmt, arg...) #endif /***************************************************************************** * Register the driver *****************************************************************************/ /* Forward declarations */ static int fsl_sdhc_probe(device_t); static int fsl_sdhc_attach(device_t); static int fsl_sdhc_detach(device_t); static int fsl_sdhc_read_ivar(device_t, device_t, int, uintptr_t *); static int fsl_sdhc_write_ivar(device_t, device_t, int, uintptr_t); static int fsl_sdhc_update_ios(device_t, device_t); static int fsl_sdhc_request(device_t, device_t, struct mmc_request *); static int fsl_sdhc_get_ro(device_t, device_t); static int fsl_sdhc_acquire_host(device_t, device_t); static int fsl_sdhc_release_host(device_t, device_t); static device_method_t fsl_sdhc_methods[] = { /* device_if */ DEVMETHOD(device_probe, fsl_sdhc_probe), DEVMETHOD(device_attach, fsl_sdhc_attach), DEVMETHOD(device_detach, fsl_sdhc_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, fsl_sdhc_read_ivar), DEVMETHOD(bus_write_ivar, fsl_sdhc_write_ivar), /* OFW bus interface */ DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), /* mmcbr_if */ DEVMETHOD(mmcbr_update_ios, fsl_sdhc_update_ios), DEVMETHOD(mmcbr_request, fsl_sdhc_request), DEVMETHOD(mmcbr_get_ro, fsl_sdhc_get_ro), DEVMETHOD(mmcbr_acquire_host, fsl_sdhc_acquire_host), DEVMETHOD(mmcbr_release_host, fsl_sdhc_release_host), {0, 0}, }; /* kobj_class definition */ static driver_t fsl_sdhc_driver = { "sdhci_fsl", fsl_sdhc_methods, sizeof(struct fsl_sdhc_softc) }; static devclass_t fsl_sdhc_devclass; DRIVER_MODULE(sdhci_fsl, simplebus, fsl_sdhc_driver, fsl_sdhc_devclass, 0, 0); +DRIVER_MODULE(mmc, sdhci_fsl, mmc_driver, mmc_devclass, NULL, NULL); /***************************************************************************** * Private methods *****************************************************************************/ static inline int read4(struct fsl_sdhc_softc *sc, unsigned int offset) { return bus_space_read_4(sc->bst, sc->bsh, offset); } static inline void write4(struct fsl_sdhc_softc *sc, unsigned int offset, int value) { bus_space_write_4(sc->bst, sc->bsh, offset, value); } static inline void set_bit(struct fsl_sdhc_softc *sc, uint32_t offset, uint32_t mask) { uint32_t x = read4(sc, offset); write4(sc, offset, x | mask); } static inline void clear_bit(struct fsl_sdhc_softc *sc, uint32_t offset, uint32_t mask) { uint32_t x = read4(sc, offset); write4(sc, offset, x & ~mask); } static int wait_for_bit_clear(struct fsl_sdhc_softc *sc, enum sdhc_reg_off reg, uint32_t bit) { uint32_t timeout = 10; uint32_t stat; stat = read4(sc, reg); while (stat & bit) { if (timeout == 0) { return (-1); } --timeout; DELAY(1000); stat = read4(sc, reg); } return (0); } static int wait_for_free_line(struct fsl_sdhc_softc *sc, enum sdhc_line line) { uint32_t timeout = 100; uint32_t stat; stat = read4(sc, SDHC_PRSSTAT); while (stat & line) { if (timeout == 0) { return (-1); } --timeout; DELAY(1000); stat = read4(sc, SDHC_PRSSTAT); } return (0); } static uint32_t get_platform_clock(struct fsl_sdhc_softc *sc) { device_t self, parent; phandle_t node; uint32_t clock; self = sc->self; node = ofw_bus_get_node(self); /* Get sdhci node properties */ if((OF_getprop(node, "clock-frequency", (void *)&clock, sizeof(clock)) <= 0) || (clock == 0)) { /* * Trying to get clock from parent device (soc) if correct * clock cannot be acquired from sdhci node. */ parent = device_get_parent(self); node = ofw_bus_get_node(parent); /* Get soc properties */ if ((OF_getprop(node, "bus-frequency", (void *)&clock, sizeof(clock)) <= 0) || (clock == 0)) { device_printf(self,"Cannot acquire correct sdhci " "frequency from DTS.\n"); return (0); } } DPRINTF("Acquired clock: %d from DTS\n", clock); return (clock); } /** * Set clock driving card. * @param sc * @param clock Desired clock frequency in Hz */ static void set_clock(struct fsl_sdhc_softc *sc, uint32_t clock) { uint32_t base_clock; uint32_t divisor, prescaler = 1; uint32_t round = 0; if (clock == sc->slot.clock) return; if (clock == 0) { clear_bit(sc, SDHC_SYSCTL, MASK_CLOCK_CONTROL | SYSCTL_PEREN | SYSCTL_HCKEN | SYSCTL_IPGEN); return; } base_clock = sc->platform_clock; round = base_clock & 0x2; base_clock >>= 2; base_clock += round; round = 0; /* SD specification 1.1 doesn't allow frequences above 50 MHz */ if (clock > FSL_SDHC_MAX_CLOCK) clock = FSL_SDHC_MAX_CLOCK; /* * divisor = ceil(base_clock / clock) * TODO: Reconsider symmetric rounding here instead of ceiling. */ divisor = (base_clock + clock - 1) / clock; while (divisor > 16) { round = divisor & 0x1; divisor >>= 1; prescaler <<= 1; } divisor += round - 1; /* Turn off the clock. */ clear_bit(sc, SDHC_SYSCTL, MASK_CLOCK_CONTROL); /* Write clock settings. */ set_bit(sc, SDHC_SYSCTL, (prescaler << SHIFT_SDCLKFS) | (divisor << SHIFT_DVS)); /* * Turn on clocks. * TODO: This actually disables clock automatic gating off feature of * the controller which eventually should be enabled but as for now * it prevents controller from generating card insertion/removal * interrupts correctly. */ set_bit(sc, SDHC_SYSCTL, SYSCTL_PEREN | SYSCTL_HCKEN | SYSCTL_IPGEN); sc->slot.clock = clock; DPRINTF("given clock = %d, computed clock = %d\n", clock, (base_clock / prescaler) / (divisor + 1)); } static inline void send_80_clock_ticks(struct fsl_sdhc_softc *sc) { int err; err = wait_for_free_line(sc, SDHC_CMD_LINE | SDHC_DAT_LINE); if (err != 0) { device_printf(sc->self, "Can't acquire data/cmd lines\n"); return; } set_bit(sc, SDHC_SYSCTL, SYSCTL_INITA); err = wait_for_bit_clear(sc, SDHC_SYSCTL, SYSCTL_INITA); if (err != 0) { device_printf(sc->self, "Can't send 80 clocks to the card.\n"); } } static void set_bus_width(struct fsl_sdhc_softc *sc, enum mmc_bus_width width) { DPRINTF("setting bus width to %d\n", width); switch (width) { case bus_width_1: set_bit(sc, SDHC_PROCTL, DTW_1); break; case bus_width_4: set_bit(sc, SDHC_PROCTL, DTW_4); break; case bus_width_8: set_bit(sc, SDHC_PROCTL, DTW_8); break; default: device_printf(sc->self, "Unsupported bus width\n"); } } static void reset_controller_all(struct fsl_sdhc_softc *sc) { uint32_t count = 5; set_bit(sc, SDHC_SYSCTL, SYSCTL_RSTA); while (read4(sc, SDHC_SYSCTL) & SYSCTL_RSTA) { DELAY(FSL_SDHC_RESET_DELAY); --count; if (count == 0) { device_printf(sc->self, "Can't reset the controller\n"); return; } } } static void reset_controller_dat_cmd(struct fsl_sdhc_softc *sc) { int err; set_bit(sc, SDHC_SYSCTL, SYSCTL_RSTD | SYSCTL_RSTC); err = wait_for_bit_clear(sc, SDHC_SYSCTL, SYSCTL_RSTD | SYSCTL_RSTC); if (err != 0) { device_printf(sc->self, "Can't reset data & command part!\n"); return; } } static void init_controller(struct fsl_sdhc_softc *sc) { /* Enable interrupts. */ #ifdef FSL_SDHC_NO_DMA write4(sc, SDHC_IRQSTATEN, MASK_IRQ_ALL & ~IRQ_DINT & ~IRQ_DMAE); write4(sc, SDHC_IRQSIGEN, MASK_IRQ_ALL & ~IRQ_DINT & ~IRQ_DMAE); #else write4(sc, SDHC_IRQSTATEN, MASK_IRQ_ALL & ~IRQ_BRR & ~IRQ_BWR); write4(sc, SDHC_IRQSIGEN, MASK_IRQ_ALL & ~IRQ_BRR & ~IRQ_BWR); /* Write DMA address */ write4(sc, SDHC_DSADDR, sc->dma_phys); /* Enable snooping and fix for AHB2MAG bypass. */ write4(sc, SDHC_DCR, DCR_SNOOP | DCR_AHB2MAG_BYPASS); #endif /* Set data timeout. */ set_bit(sc, SDHC_SYSCTL, 0xe << SHIFT_DTOCV); /* Set water-mark levels (FIFO buffer size). */ write4(sc, SDHC_WML, (FSL_SDHC_FIFO_BUF_WORDS << 16) | FSL_SDHC_FIFO_BUF_WORDS); } static void init_mmc_host_struct(struct fsl_sdhc_softc *sc) { struct mmc_host *host = &sc->mmc_host; /* Clear host structure. */ bzero(host, sizeof(struct mmc_host)); /* Calculate minimum and maximum operating frequencies. */ host->f_min = sc->platform_clock / FSL_SDHC_MAX_DIV; host->f_max = FSL_SDHC_MAX_CLOCK; /* Set operation conditions (voltage). */ host->host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; /* Set additional host controller capabilities. */ host->caps = MMC_CAP_4_BIT_DATA; /* Set mode. */ host->mode = mode_sd; } static void card_detect_task(void *arg, int pending) { struct fsl_sdhc_softc *sc = (struct fsl_sdhc_softc *)arg; int err; int insert; insert = read4(sc, SDHC_PRSSTAT) & PRSSTAT_CINS; mtx_lock(&sc->mtx); if (insert) { if (sc->child != NULL) { mtx_unlock(&sc->mtx); return; } sc->child = device_add_child(sc->self, "mmc", -1); if (sc->child == NULL) { device_printf(sc->self, "Couldn't add MMC bus!\n"); mtx_unlock(&sc->mtx); return; } /* Initialize MMC bus host structure. */ init_mmc_host_struct(sc); device_set_ivars(sc->child, &sc->mmc_host); } else { if (sc->child == NULL) { mtx_unlock(&sc->mtx); return; } } mtx_unlock(&sc->mtx); if (insert) { if ((err = device_probe_and_attach(sc->child)) != 0) { device_printf(sc->self, "MMC bus failed on probe " "and attach! error %d\n", err); device_delete_child(sc->self, sc->child); sc->child = NULL; } } else { if (device_delete_child(sc->self, sc->child) != 0) device_printf(sc->self, "Could not delete MMC bus!\n"); sc->child = NULL; } } static void card_detect_delay(void *arg) { struct fsl_sdhc_softc *sc = arg; taskqueue_enqueue(taskqueue_swi_giant, &sc->card_detect_task); } static void finalize_request(struct fsl_sdhc_softc *sc) { DPRINTF("finishing request %p\n", sc->request); sc->request->done(sc->request); sc->request = NULL; } /** * Read response from card. * @todo Implement Auto-CMD responses being held in R3 for multi-block xfers. * @param sc */ static void get_response(struct fsl_sdhc_softc *sc) { struct mmc_command *cmd = sc->request->cmd; int i; uint32_t val; uint8_t ext = 0; if (cmd->flags & MMC_RSP_136) { /* CRC is stripped, need to shift one byte left. */ for (i = 0; i < 4; i++) { val = read4(sc, SDHC_CMDRSP0 + i * 4); cmd->resp[3 - i] = (val << 8) + ext; ext = val >> 24; } } else { cmd->resp[0] = read4(sc, SDHC_CMDRSP0); } } #ifdef FSL_SDHC_NO_DMA /** * Read all content of a fifo buffer. * @warning Assumes data buffer is 32-bit aligned. * @param sc */ static void read_block_pio(struct fsl_sdhc_softc *sc) { struct mmc_data *data = sc->request->cmd->data; size_t left = min(FSL_SDHC_FIFO_BUF_SIZE, data->len); uint8_t *buf = data->data; uint32_t word; buf += sc->data_offset; bus_space_read_multi_4(sc->bst, sc->bsh, SDHC_DATPORT, (uint32_t *)buf, left >> 2); sc->data_offset += left; /* Handle 32-bit unaligned size case. */ left &= 0x3; if (left > 0) { buf = (uint8_t *)data->data + (sc->data_offset & ~0x3); word = read4(sc, SDHC_DATPORT); while (left > 0) { *(buf++) = word; word >>= 8; --left; } } } /** * Write a fifo buffer. * @warning Assumes data buffer size is 32-bit aligned. * @param sc */ static void write_block_pio(struct fsl_sdhc_softc *sc) { struct mmc_data *data = sc->request->cmd->data; size_t left = min(FSL_SDHC_FIFO_BUF_SIZE, data->len); uint8_t *buf = data->data; uint32_t word = 0; DPRINTF("sc->data_offset %d\n", sc->data_offset); buf += sc->data_offset; bus_space_write_multi_4(sc->bst, sc->bsh, SDHC_DATPORT, (uint32_t *)buf, left >> 2); sc->data_offset += left; /* Handle 32-bit unaligned size case. */ left &= 0x3; if (left > 0) { buf = (uint8_t *)data->data + (sc->data_offset & ~0x3); while (left > 0) { word += *(buf++); word <<= 8; --left; } write4(sc, SDHC_DATPORT, word); } } static void pio_read_transfer(struct fsl_sdhc_softc *sc) { while (read4(sc, SDHC_PRSSTAT) & PRSSTAT_BREN) { read_block_pio(sc); /* * TODO: should we check here whether data_offset >= data->len? */ } } static void pio_write_transfer(struct fsl_sdhc_softc *sc) { while (read4(sc, SDHC_PRSSTAT) & PRSSTAT_BWEN) { write_block_pio(sc); /* * TODO: should we check here whether data_offset >= data->len? */ } } #endif /* FSL_SDHC_USE_DMA */ static inline void handle_command_intr(struct fsl_sdhc_softc *sc, uint32_t irq_stat) { struct mmc_command *cmd = sc->request->cmd; /* Handle errors. */ if (irq_stat & IRQ_CTOE) { cmd->error = MMC_ERR_TIMEOUT; } else if (irq_stat & IRQ_CCE) { cmd->error = MMC_ERR_BADCRC; } else if (irq_stat & (IRQ_CEBE | IRQ_CIE)) { cmd->error = MMC_ERR_FIFO; } if (cmd->error) { device_printf(sc->self, "Error interrupt occured\n"); reset_controller_dat_cmd(sc); return; } if (sc->command_done) return; if (irq_stat & IRQ_CC) { sc->command_done = 1; if (cmd->flags & MMC_RSP_PRESENT) get_response(sc); } } static inline void handle_data_intr(struct fsl_sdhc_softc *sc, uint32_t irq_stat) { struct mmc_command *cmd = sc->request->cmd; /* Handle errors. */ if (irq_stat & IRQ_DTOE) { cmd->error = MMC_ERR_TIMEOUT; } else if (irq_stat & (IRQ_DCE | IRQ_DEBE)) { cmd->error = MMC_ERR_BADCRC; } else if (irq_stat & IRQ_ERROR_DATA_MASK) { cmd->error = MMC_ERR_FAILED; } if (cmd->error) { device_printf(sc->self, "Error interrupt occured\n"); sc->data_done = 1; reset_controller_dat_cmd(sc); return; } if (sc->data_done) return; #ifdef FSL_SDHC_NO_DMA if (irq_stat & IRQ_BRR) { pio_read_transfer(sc); } if (irq_stat & IRQ_BWR) { pio_write_transfer(sc); } #else if (irq_stat & IRQ_DINT) { struct mmc_data *data = sc->request->cmd->data; /* Synchronize DMA. */ if (data->flags & MMC_DATA_READ) { bus_dmamap_sync(sc->dma_tag, sc->dma_map, BUS_DMASYNC_POSTREAD); memcpy(data->data, sc->dma_mem, data->len); } else { bus_dmamap_sync(sc->dma_tag, sc->dma_map, BUS_DMASYNC_POSTWRITE); } /* * TODO: For multiple block transfers, address of dma memory * in DSADDR register should be set to the beginning of the * segment here. Also offset to data pointer should be handled. */ } #endif if (irq_stat & IRQ_TC) sc->data_done = 1; } static void interrupt_handler(void *arg) { struct fsl_sdhc_softc *sc = (struct fsl_sdhc_softc *)arg; uint32_t irq_stat; mtx_lock(&sc->mtx); irq_stat = read4(sc, SDHC_IRQSTAT); /* Card interrupt. */ if (irq_stat & IRQ_CINT) { DPRINTF("Card interrupt recievied\n"); } /* Card insertion interrupt. */ if (irq_stat & IRQ_CINS) { clear_bit(sc, SDHC_IRQSIGEN, IRQ_CINS); clear_bit(sc, SDHC_IRQSTATEN, IRQ_CINS); set_bit(sc, SDHC_IRQSIGEN, IRQ_CRM); set_bit(sc, SDHC_IRQSTATEN, IRQ_CRM); callout_reset(&sc->card_detect_callout, hz / 2, card_detect_delay, sc); } /* Card removal interrupt. */ if (irq_stat & IRQ_CRM) { clear_bit(sc, SDHC_IRQSIGEN, IRQ_CRM); clear_bit(sc, SDHC_IRQSTATEN, IRQ_CRM); set_bit(sc, SDHC_IRQSIGEN, IRQ_CINS); set_bit(sc, SDHC_IRQSTATEN, IRQ_CINS); callout_stop(&sc->card_detect_callout); taskqueue_enqueue(taskqueue_swi_giant, &sc->card_detect_task); } /* Handle request interrupts. */ if (sc->request) { handle_command_intr(sc, irq_stat); handle_data_intr(sc, irq_stat); /* * Finalize request when transfer is done successfully * or was interrupted due to error. */ if ((sc->data_done && sc->command_done) || (sc->request->cmd->error)) finalize_request(sc); } /* Clear status register. */ write4(sc, SDHC_IRQSTAT, irq_stat); mtx_unlock(&sc->mtx); } #ifndef FSL_SDHC_NO_DMA static void dma_get_phys_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (error != 0) return; /* Get first segment's physical address. */ *(bus_addr_t *)arg = segs->ds_addr; } static int init_dma(struct fsl_sdhc_softc *sc) { device_t self = sc->self; int err; err = bus_dma_tag_create(bus_get_dma_tag(self), FSL_SDHC_DMA_BLOCK_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, FSL_SDHC_DMA_BLOCK_SIZE, 1, FSL_SDHC_DMA_BLOCK_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->dma_tag); if (err) { device_printf(self, "Could not create DMA tag!\n"); return (-1); } err = bus_dmamem_alloc(sc->dma_tag, (void **)&(sc->dma_mem), BUS_DMA_NOWAIT | BUS_DMA_NOCACHE, &sc->dma_map); if (err) { device_printf(self, "Could not allocate DMA memory!\n"); goto fail1; } err = bus_dmamap_load(sc->dma_tag, sc->dma_map, (void *)sc->dma_mem, FSL_SDHC_DMA_BLOCK_SIZE, dma_get_phys_addr, &sc->dma_phys, 0); if (err) { device_printf(self, "Could not load DMA map!\n"); goto fail2; } return (0); fail2: bus_dmamem_free(sc->dma_tag, sc->dma_mem, sc->dma_map); fail1: bus_dma_tag_destroy(sc->dma_tag); return (-1); } #endif /* FSL_SDHC_NO_DMA */ static uint32_t set_xfertyp_register(const struct mmc_command *cmd) { uint32_t xfertyp = 0; /* Set command index. */ xfertyp |= cmd->opcode << CMDINX_SHIFT; /* Set command type. */ if (cmd->opcode == MMC_STOP_TRANSMISSION) xfertyp |= CMDTYP_ABORT; /* Set data preset select. */ if (cmd->data) { xfertyp |= XFERTYP_DPSEL; /* Set transfer direction. */ if (cmd->data->flags & MMC_DATA_READ) xfertyp |= XFERTYP_DTDSEL; } /* Set command index check. */ if (cmd->flags & MMC_RSP_OPCODE) xfertyp |= XFERTYP_CICEN; /* Set command CRC check. */ if (cmd->flags & MMC_RSP_CRC) xfertyp |= XFERTYP_CCCEN; /* Set response type */ if (!(cmd->flags & MMC_RSP_PRESENT)) xfertyp |= RSPTYP_NONE; else if (cmd->flags & MMC_RSP_136) xfertyp |= RSPTYP_136; else if (cmd->flags & MMC_RSP_BUSY) xfertyp |= RSPTYP_48_BUSY; else xfertyp |= RSPTYP_48; #ifndef FSL_SDHC_NO_DMA /* Enable DMA */ xfertyp |= XFERTYP_DMAEN; #endif return (xfertyp); } static uint32_t set_blkattr_register(const struct mmc_data *data) { if (data->len <= FSL_SDHC_MAX_BLOCK_SIZE) { /* One block transfer. */ return (BLKATTR_BLOCK_COUNT(1) | ((data->len) & BLKATTR_BLKSZE)); } /* TODO: Write code here for multi-block transfers. */ return (0); } /** * Initiate data transfer. Interrupt handler will finalize it. * @todo Implement multi-block transfers. * @param sc * @param cmd */ static int start_data(struct fsl_sdhc_softc *sc, struct mmc_data *data) { uint32_t reg; if ((uint32_t)data->data & 0x3) { device_printf(sc->self, "32-bit unaligned data pointer in " "request\n"); return (-1); } sc->data_done = 0; #ifdef FSL_SDHC_NO_DMA sc->data_ptr = data->data; sc->data_offset = 0; #else /* Write DMA address register. */ write4(sc, SDHC_DSADDR, sc->dma_phys); /* Synchronize DMA. */ if (data->flags & MMC_DATA_READ) { bus_dmamap_sync(sc->dma_tag, sc->dma_map, BUS_DMASYNC_PREREAD); } else { memcpy(sc->dma_mem, data->data, data->len); bus_dmamap_sync(sc->dma_tag, sc->dma_map, BUS_DMASYNC_PREWRITE); } #endif /* Set block size and count. */ reg = set_blkattr_register(data); if (reg == 0) { device_printf(sc->self, "Requested unsupported multi-block " "transfer.\n"); return (-1); } write4(sc, SDHC_BLKATTR, reg); return (0); } static int start_command(struct fsl_sdhc_softc *sc, struct mmc_command *cmd) { struct mmc_request *req = sc->request; uint32_t mask; uint32_t xfertyp; int err; DPRINTF("opcode %d, flags 0x%08x\n", cmd->opcode, cmd->flags); DPRINTF("PRSSTAT = 0x%08x\n", read4(sc, SDHC_PRSSTAT)); sc->command_done = 0; cmd->error = MMC_ERR_NONE; /* TODO: should we check here for card presence and clock settings? */ /* Always wait for free CMD line. */ mask = SDHC_CMD_LINE; /* Wait for free DAT if we have data or busy signal. */ if (cmd->data || (cmd->flags & MMC_RSP_BUSY)) mask |= SDHC_DAT_LINE; /* We shouldn't wait for DAT for stop commands. */ if (cmd == req->stop) mask &= ~SDHC_DAT_LINE; err = wait_for_free_line(sc, mask); if (err != 0) { device_printf(sc->self, "Controller never released inhibit " "bit(s).\n"); reset_controller_dat_cmd(sc); cmd->error = MMC_ERR_FAILED; sc->request = NULL; req->done(req); return (-1); } xfertyp = set_xfertyp_register(cmd); if (cmd->data != NULL) { err = start_data(sc, cmd->data); if (err != 0) { device_printf(sc->self, "Data transfer request failed\n"); reset_controller_dat_cmd(sc); cmd->error = MMC_ERR_FAILED; sc->request = NULL; req->done(req); return (-1); } } write4(sc, SDHC_CMDARG, cmd->arg); write4(sc, SDHC_XFERTYP, xfertyp); DPRINTF("XFERTYP = 0x%08x\n", xfertyp); DPRINTF("CMDARG = 0x%08x\n", cmd->arg); return (0); } #ifdef DEBUG static void dump_registers(struct fsl_sdhc_softc *sc) { printf("PRSSTAT = 0x%08x\n", read4(sc, SDHC_PRSSTAT)); printf("PROCTL = 0x%08x\n", read4(sc, SDHC_PROCTL)); printf("HOSTCAPBLT = 0x%08x\n", read4(sc, SDHC_HOSTCAPBLT)); printf("IRQSTAT = 0x%08x\n", read4(sc, SDHC_IRQSTAT)); printf("IRQSTATEN = 0x%08x\n", read4(sc, SDHC_IRQSTATEN)); printf("IRQSIGEN = 0x%08x\n", read4(sc, SDHC_IRQSIGEN)); printf("WML = 0x%08x\n", read4(sc, SDHC_WML)); printf("DSADDR = 0x%08x\n", read4(sc, SDHC_DSADDR)); printf("XFERTYP = 0x%08x\n", read4(sc, SDHC_XFERTYP)); printf("DCR = 0x%08x\n", read4(sc, SDHC_DCR)); } #endif /***************************************************************************** * Public methods *****************************************************************************/ /* * Device interface methods. */ static int fsl_sdhc_probe(device_t self) { static const char *desc = "Freescale Enhanced Secure Digital Host Controller"; if (!ofw_bus_is_compatible(self, "fsl,p2020-esdhc") && !ofw_bus_is_compatible(self, "fsl,esdhc")) return (ENXIO); device_set_desc(self, desc); return (BUS_PROBE_VENDOR); } static int fsl_sdhc_attach(device_t self) { struct fsl_sdhc_softc *sc; sc = device_get_softc(self); sc->self = self; mtx_init(&sc->mtx, device_get_nameunit(self), NULL, MTX_DEF); /* Setup memory resource */ sc->mem_rid = 0; sc->mem_resource = bus_alloc_resource_any(self, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (sc->mem_resource == NULL) { device_printf(self, "Could not allocate memory.\n"); goto fail; } sc->bst = rman_get_bustag(sc->mem_resource); sc->bsh = rman_get_bushandle(sc->mem_resource); /* Setup interrupt resource. */ sc->irq_rid = 0; sc->irq_resource = bus_alloc_resource_any(self, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (sc->irq_resource == NULL) { device_printf(self, "Could not allocate interrupt.\n"); goto fail; } if (bus_setup_intr(self, sc->irq_resource, INTR_TYPE_MISC | INTR_MPSAFE, NULL, interrupt_handler, sc, &sc->ihl) != 0) { device_printf(self, "Could not setup interrupt.\n"); goto fail; } /* Setup DMA. */ #ifndef FSL_SDHC_NO_DMA if (init_dma(sc) != 0) { device_printf(self, "Could not setup DMA\n"); } #endif sc->bus_busy = 0; sc->platform_clock = get_platform_clock(sc); if (sc->platform_clock == 0) { device_printf(self, "Could not get platform clock.\n"); goto fail; } sc->command_done = 1; sc->data_done = 1; /* Init card detection task. */ TASK_INIT(&sc->card_detect_task, 0, card_detect_task, sc); callout_init(&sc->card_detect_callout, 1); reset_controller_all(sc); init_controller(sc); set_clock(sc, 400000); send_80_clock_ticks(sc); #ifdef DEBUG dump_registers(sc); #endif return (0); fail: fsl_sdhc_detach(self); return (ENXIO); } static int fsl_sdhc_detach(device_t self) { struct fsl_sdhc_softc *sc = device_get_softc(self); int err; if (sc->child) device_delete_child(self, sc->child); taskqueue_drain(taskqueue_swi_giant, &sc->card_detect_task); #ifndef FSL_SDHC_NO_DMA bus_dmamap_unload(sc->dma_tag, sc->dma_map); bus_dmamem_free(sc->dma_tag, sc->dma_mem, sc->dma_map); bus_dma_tag_destroy(sc->dma_tag); #endif if (sc->ihl != NULL) { err = bus_teardown_intr(self, sc->irq_resource, sc->ihl); if (err) return (err); } if (sc->irq_resource != NULL) { err = bus_release_resource(self, SYS_RES_IRQ, sc->irq_rid, sc->irq_resource); if (err) return (err); } if (sc->mem_resource != NULL) { err = bus_release_resource(self, SYS_RES_MEMORY, sc->mem_rid, sc->mem_resource); if (err) return (err); } mtx_destroy(&sc->mtx); return (0); } /* * Bus interface methods. */ static int fsl_sdhc_read_ivar(device_t self, device_t child, int index, uintptr_t *result) { struct mmc_host *host = device_get_ivars(child); switch (index) { case MMCBR_IVAR_BUS_MODE: *(int *)result = host->ios.bus_mode; break; case MMCBR_IVAR_BUS_WIDTH: *(int *)result = host->ios.bus_width; break; case MMCBR_IVAR_CHIP_SELECT: *(int *)result = host->ios.chip_select; break; case MMCBR_IVAR_CLOCK: *(int *)result = host->ios.clock; break; case MMCBR_IVAR_F_MIN: *(int *)result = host->f_min; break; case MMCBR_IVAR_F_MAX: *(int *)result = host->f_max; break; case MMCBR_IVAR_HOST_OCR: *(int *)result = host->host_ocr; break; case MMCBR_IVAR_MODE: *(int *)result = host->mode; break; case MMCBR_IVAR_OCR: *(int *)result = host->ocr; break; case MMCBR_IVAR_POWER_MODE: *(int *)result = host->ios.power_mode; break; case MMCBR_IVAR_VDD: *(int *)result = host->ios.vdd; break; default: return (EINVAL); } return (0); } static int fsl_sdhc_write_ivar(device_t self, device_t child, int index, uintptr_t value) { struct mmc_host *host = device_get_ivars(child); switch (index) { case MMCBR_IVAR_BUS_MODE: host->ios.bus_mode = value; break; case MMCBR_IVAR_BUS_WIDTH: host->ios.bus_width = value; break; case MMCBR_IVAR_CHIP_SELECT: host->ios.chip_select = value; break; case MMCBR_IVAR_CLOCK: host->ios.clock = value; break; case MMCBR_IVAR_MODE: host->mode = value; break; case MMCBR_IVAR_OCR: host->ocr = value; break; case MMCBR_IVAR_POWER_MODE: host->ios.power_mode = value; break; case MMCBR_IVAR_VDD: host->ios.vdd = value; break; case MMCBR_IVAR_HOST_OCR: case MMCBR_IVAR_F_MIN: case MMCBR_IVAR_F_MAX: default: /* Instance variable not writable. */ return (EINVAL); } return (0); } /* * MMC bridge methods. */ static int fsl_sdhc_update_ios(device_t self, device_t reqdev) { struct fsl_sdhc_softc *sc = device_get_softc(self); struct mmc_host *host = device_get_ivars(reqdev); struct mmc_ios *ios = &host->ios; mtx_lock(&sc->mtx); /* Full reset on bus power down to clear from any state. */ if (ios->power_mode == power_off) { reset_controller_all(sc); init_controller(sc); } set_clock(sc, ios->clock); set_bus_width(sc, ios->bus_width); mtx_unlock(&sc->mtx); return (0); } static int fsl_sdhc_request(device_t self, device_t reqdev, struct mmc_request *req) { struct fsl_sdhc_softc *sc = device_get_softc(self); int err; mtx_lock(&sc->mtx); sc->request = req; err = start_command(sc, req->cmd); mtx_unlock(&sc->mtx); return (err); } static int fsl_sdhc_get_ro(device_t self, device_t reqdev) { struct fsl_sdhc_softc *sc = device_get_softc(self); /* Wouldn't it be faster using branching (if {}) ?? */ return (((read4(sc, SDHC_PRSSTAT) & PRSSTAT_WPSPL) >> 19) ^ 0x1); } static int fsl_sdhc_acquire_host(device_t self, device_t reqdev) { struct fsl_sdhc_softc *sc = device_get_softc(self); int retval = 0; mtx_lock(&sc->mtx); while (sc->bus_busy) retval = mtx_sleep(sc, &sc->mtx, PZERO, "sdhcah", 0); ++(sc->bus_busy); mtx_unlock(&sc->mtx); return (retval); } static int fsl_sdhc_release_host(device_t self, device_t reqdev) { struct fsl_sdhc_softc *sc = device_get_softc(self); mtx_lock(&sc->mtx); --(sc->bus_busy); mtx_unlock(&sc->mtx); wakeup(sc); return (0); }