Index: head/sys/arm/allwinner/aw_mmc.c =================================================================== --- head/sys/arm/allwinner/aw_mmc.c (revision 345774) +++ head/sys/arm/allwinner/aw_mmc.c (revision 345775) @@ -1,1524 +1,1526 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2018 Emmanuel Vadot * Copyright (c) 2013 Alexander Fedorov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_mmccam.h" #ifdef MMCCAM #include #include #include #include #include #endif #define AW_MMC_MEMRES 0 #define AW_MMC_IRQRES 1 #define AW_MMC_RESSZ 2 #define AW_MMC_DMA_SEGS (PAGE_SIZE / sizeof(struct aw_mmc_dma_desc)) #define AW_MMC_DMA_DESC_SIZE (sizeof(struct aw_mmc_dma_desc) * AW_MMC_DMA_SEGS) #define AW_MMC_DMA_FTRGLEVEL 0x20070008 #define AW_MMC_RESET_RETRY 1000 #define CARD_ID_FREQUENCY 400000 struct aw_mmc_conf { uint32_t dma_xferlen; bool mask_data0; bool can_calibrate; bool new_timing; }; static const struct aw_mmc_conf a10_mmc_conf = { .dma_xferlen = 0x2000, }; static const struct aw_mmc_conf a13_mmc_conf = { .dma_xferlen = 0x10000, }; static const struct aw_mmc_conf a64_mmc_conf = { .dma_xferlen = 0x10000, .mask_data0 = true, .can_calibrate = true, .new_timing = true, }; static const struct aw_mmc_conf a64_emmc_conf = { .dma_xferlen = 0x2000, .can_calibrate = true, }; static struct ofw_compat_data compat_data[] = { {"allwinner,sun4i-a10-mmc", (uintptr_t)&a10_mmc_conf}, {"allwinner,sun5i-a13-mmc", (uintptr_t)&a13_mmc_conf}, {"allwinner,sun7i-a20-mmc", (uintptr_t)&a13_mmc_conf}, {"allwinner,sun50i-a64-mmc", (uintptr_t)&a64_mmc_conf}, {"allwinner,sun50i-a64-emmc", (uintptr_t)&a64_emmc_conf}, {NULL, 0} }; struct aw_mmc_softc { device_t aw_dev; clk_t aw_clk_ahb; clk_t aw_clk_mmc; hwreset_t aw_rst_ahb; int aw_bus_busy; int aw_resid; int aw_timeout; struct callout aw_timeoutc; struct mmc_host aw_host; #ifdef MMCCAM union ccb * ccb; struct cam_devq * devq; struct cam_sim * sim; struct mtx sim_mtx; #else struct mmc_request * aw_req; #endif struct mtx aw_mtx; struct resource * aw_res[AW_MMC_RESSZ]; struct aw_mmc_conf * aw_mmc_conf; uint32_t aw_intr; uint32_t aw_intr_wait; void * aw_intrhand; regulator_t aw_reg_vmmc; regulator_t aw_reg_vqmmc; unsigned int aw_clock; /* Fields required for DMA access. */ bus_addr_t aw_dma_desc_phys; bus_dmamap_t aw_dma_map; bus_dma_tag_t aw_dma_tag; void * aw_dma_desc; bus_dmamap_t aw_dma_buf_map; bus_dma_tag_t aw_dma_buf_tag; int aw_dma_map_err; }; static struct resource_spec aw_mmc_res_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; static int aw_mmc_probe(device_t); static int aw_mmc_attach(device_t); static int aw_mmc_detach(device_t); static int aw_mmc_setup_dma(struct aw_mmc_softc *); static int aw_mmc_reset(struct aw_mmc_softc *); static int aw_mmc_init(struct aw_mmc_softc *); static void aw_mmc_intr(void *); static int aw_mmc_update_clock(struct aw_mmc_softc *, uint32_t); static void aw_mmc_print_error(uint32_t); static int aw_mmc_update_ios(device_t, device_t); static int aw_mmc_request(device_t, device_t, struct mmc_request *); static int aw_mmc_get_ro(device_t, device_t); static int aw_mmc_acquire_host(device_t, device_t); static int aw_mmc_release_host(device_t, device_t); #ifdef MMCCAM static void aw_mmc_cam_action(struct cam_sim *, union ccb *); static void aw_mmc_cam_poll(struct cam_sim *); static int aw_mmc_cam_settran_settings(struct aw_mmc_softc *, union ccb *); static int aw_mmc_cam_request(struct aw_mmc_softc *, union ccb *); static void aw_mmc_cam_handle_mmcio(struct cam_sim *, union ccb *); #endif #define AW_MMC_LOCK(_sc) mtx_lock(&(_sc)->aw_mtx) #define AW_MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->aw_mtx) #define AW_MMC_READ_4(_sc, _reg) \ bus_read_4((_sc)->aw_res[AW_MMC_MEMRES], _reg) #define AW_MMC_WRITE_4(_sc, _reg, _value) \ bus_write_4((_sc)->aw_res[AW_MMC_MEMRES], _reg, _value) #ifdef MMCCAM static void aw_mmc_cam_handle_mmcio(struct cam_sim *sim, union ccb *ccb) { struct aw_mmc_softc *sc; sc = cam_sim_softc(sim); aw_mmc_cam_request(sc, ccb); } static void aw_mmc_cam_action(struct cam_sim *sim, union ccb *ccb) { struct aw_mmc_softc *sc; sc = cam_sim_softc(sim); if (sc == NULL) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; xpt_done(ccb); return; } mtx_assert(&sc->sim_mtx, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_PATH_INQ: { struct ccb_pathinq *cpi; cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = 0; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET | PIM_SEQSCAN; cpi->hba_eng_cnt = 0; cpi->max_target = 0; cpi->max_lun = 0; cpi->initiator_id = 1; cpi->maxio = (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS) / MMC_SECTOR_SIZE; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Deglitch Networks", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->protocol = PROTO_MMCSD; cpi->protocol_version = SCSI_REV_0; cpi->transport = XPORT_MMCSD; cpi->transport_version = 1; cpi->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; if (bootverbose) device_printf(sc->aw_dev, "Got XPT_GET_TRAN_SETTINGS\n"); cts->protocol = PROTO_MMCSD; cts->protocol_version = 1; cts->transport = XPORT_MMCSD; cts->transport_version = 1; cts->xport_specific.valid = 0; cts->proto_specific.mmc.host_ocr = sc->aw_host.host_ocr; cts->proto_specific.mmc.host_f_min = sc->aw_host.f_min; cts->proto_specific.mmc.host_f_max = sc->aw_host.f_max; cts->proto_specific.mmc.host_caps = sc->aw_host.caps; + cts->proto_specific.mmc.host_max_data = (sc->aw_mmc_conf->dma_xferlen * + AW_MMC_DMA_SEGS) / MMC_SECTOR_SIZE; memcpy(&cts->proto_specific.mmc.ios, &sc->aw_host.ios, sizeof(struct mmc_ios)); ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_SET_TRAN_SETTINGS: { if (bootverbose) device_printf(sc->aw_dev, "Got XPT_SET_TRAN_SETTINGS\n"); aw_mmc_cam_settran_settings(sc, ccb); ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: if (bootverbose) device_printf(sc->aw_dev, "Got XPT_RESET_BUS, ACK it...\n"); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_MMC_IO: /* * Here is the HW-dependent part of * sending the command to the underlying h/w * At some point in the future an interrupt comes. * Then the request will be marked as completed. */ ccb->ccb_h.status = CAM_REQ_INPROG; aw_mmc_cam_handle_mmcio(sim, ccb); return; /* NOTREACHED */ break; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void aw_mmc_cam_poll(struct cam_sim *sim) { return; } static int aw_mmc_cam_settran_settings(struct aw_mmc_softc *sc, union ccb *ccb) { struct mmc_ios *ios; struct mmc_ios *new_ios; struct ccb_trans_settings_mmc *cts; ios = &sc->aw_host.ios; cts = &ccb->cts.proto_specific.mmc; new_ios = &cts->ios; /* Update only requested fields */ if (cts->ios_valid & MMC_CLK) { ios->clock = new_ios->clock; device_printf(sc->aw_dev, "Clock => %d\n", ios->clock); } if (cts->ios_valid & MMC_VDD) { ios->vdd = new_ios->vdd; device_printf(sc->aw_dev, "VDD => %d\n", ios->vdd); } if (cts->ios_valid & MMC_CS) { ios->chip_select = new_ios->chip_select; device_printf(sc->aw_dev, "CS => %d\n", ios->chip_select); } if (cts->ios_valid & MMC_BW) { ios->bus_width = new_ios->bus_width; device_printf(sc->aw_dev, "Bus width => %d\n", ios->bus_width); } if (cts->ios_valid & MMC_PM) { ios->power_mode = new_ios->power_mode; device_printf(sc->aw_dev, "Power mode => %d\n", ios->power_mode); } if (cts->ios_valid & MMC_BT) { ios->timing = new_ios->timing; device_printf(sc->aw_dev, "Timing => %d\n", ios->timing); } if (cts->ios_valid & MMC_BM) { ios->bus_mode = new_ios->bus_mode; device_printf(sc->aw_dev, "Bus mode => %d\n", ios->bus_mode); } return (aw_mmc_update_ios(sc->aw_dev, NULL)); } static int aw_mmc_cam_request(struct aw_mmc_softc *sc, union ccb *ccb) { struct ccb_mmcio *mmcio; mmcio = &ccb->mmcio; AW_MMC_LOCK(sc); #ifdef DEBUG if (__predict_false(bootverbose)) { device_printf(sc->aw_dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n", mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags, mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0, mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0); } #endif if (mmcio->cmd.data != NULL) { if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0) panic("data->len = %d, data->flags = %d -- something is b0rked", (int)mmcio->cmd.data->len, mmcio->cmd.data->flags); } if (sc->ccb != NULL) { device_printf(sc->aw_dev, "Controller still has an active command\n"); return (EBUSY); } sc->ccb = ccb; /* aw_mmc_request locks again */ AW_MMC_UNLOCK(sc); aw_mmc_request(sc->aw_dev, NULL, NULL); return (0); } #endif /* MMCCAM */ static int aw_mmc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner Integrated MMC/SD controller"); return (BUS_PROBE_DEFAULT); } static int aw_mmc_attach(device_t dev) { device_t child; struct aw_mmc_softc *sc; struct sysctl_ctx_list *ctx; struct sysctl_oid_list *tree; uint32_t bus_width, max_freq; phandle_t node; int error; node = ofw_bus_get_node(dev); sc = device_get_softc(dev); sc->aw_dev = dev; sc->aw_mmc_conf = (struct aw_mmc_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; #ifndef MMCCAM sc->aw_req = NULL; #endif if (bus_alloc_resources(dev, aw_mmc_res_spec, sc->aw_res) != 0) { device_printf(dev, "cannot allocate device resources\n"); return (ENXIO); } if (bus_setup_intr(dev, sc->aw_res[AW_MMC_IRQRES], INTR_TYPE_MISC | INTR_MPSAFE, NULL, aw_mmc_intr, sc, &sc->aw_intrhand)) { bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res); device_printf(dev, "cannot setup interrupt handler\n"); return (ENXIO); } mtx_init(&sc->aw_mtx, device_get_nameunit(sc->aw_dev), "aw_mmc", MTX_DEF); callout_init_mtx(&sc->aw_timeoutc, &sc->aw_mtx, 0); /* De-assert reset */ if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->aw_rst_ahb) == 0) { error = hwreset_deassert(sc->aw_rst_ahb); if (error != 0) { device_printf(dev, "cannot de-assert reset\n"); goto fail; } } /* Activate the module clock. */ error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->aw_clk_ahb); if (error != 0) { device_printf(dev, "cannot get ahb clock\n"); goto fail; } error = clk_enable(sc->aw_clk_ahb); if (error != 0) { device_printf(dev, "cannot enable ahb clock\n"); goto fail; } error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->aw_clk_mmc); if (error != 0) { device_printf(dev, "cannot get mmc clock\n"); goto fail; } error = clk_set_freq(sc->aw_clk_mmc, CARD_ID_FREQUENCY, CLK_SET_ROUND_DOWN); if (error != 0) { device_printf(dev, "cannot init mmc clock\n"); goto fail; } error = clk_enable(sc->aw_clk_mmc); if (error != 0) { device_printf(dev, "cannot enable mmc clock\n"); goto fail; } sc->aw_timeout = 10; ctx = device_get_sysctl_ctx(dev); tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW, &sc->aw_timeout, 0, "Request timeout in seconds"); /* Soft Reset controller. */ if (aw_mmc_reset(sc) != 0) { device_printf(dev, "cannot reset the controller\n"); goto fail; } if (aw_mmc_setup_dma(sc) != 0) { device_printf(sc->aw_dev, "Couldn't setup DMA!\n"); goto fail; } if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0) bus_width = 4; if (regulator_get_by_ofw_property(dev, 0, "vmmc-supply", &sc->aw_reg_vmmc) == 0) { if (bootverbose) device_printf(dev, "vmmc-supply regulator found\n"); } if (regulator_get_by_ofw_property(dev, 0, "vqmmc-supply", &sc->aw_reg_vqmmc) == 0 && bootverbose) { if (bootverbose) device_printf(dev, "vqmmc-supply regulator found\n"); } sc->aw_host.f_min = 400000; if (OF_getencprop(node, "max-frequency", &max_freq, sizeof(uint32_t)) <= 0) max_freq = 52000000; sc->aw_host.f_max = max_freq; sc->aw_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; sc->aw_host.caps = MMC_CAP_HSPEED | MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 | MMC_CAP_MMC_DDR52; sc->aw_host.caps |= MMC_CAP_SIGNALING_330 | MMC_CAP_SIGNALING_180; if (bus_width >= 4) sc->aw_host.caps |= MMC_CAP_4_BIT_DATA; if (bus_width >= 8) sc->aw_host.caps |= MMC_CAP_8_BIT_DATA; #ifdef MMCCAM child = NULL; /* Not used by MMCCAM, need to silence compiler warnings */ sc->ccb = NULL; if ((sc->devq = cam_simq_alloc(1)) == NULL) { goto fail; } mtx_init(&sc->sim_mtx, "awmmcsim", NULL, MTX_DEF); sc->sim = cam_sim_alloc(aw_mmc_cam_action, aw_mmc_cam_poll, "aw_mmc_sim", sc, device_get_unit(dev), &sc->sim_mtx, 1, 1, sc->devq); if (sc->sim == NULL) { cam_simq_free(sc->devq); device_printf(dev, "cannot allocate CAM SIM\n"); goto fail; } mtx_lock(&sc->sim_mtx); if (xpt_bus_register(sc->sim, sc->aw_dev, 0) != 0) { device_printf(dev, "cannot register SCSI pass-through bus\n"); cam_sim_free(sc->sim, FALSE); cam_simq_free(sc->devq); mtx_unlock(&sc->sim_mtx); goto fail; } mtx_unlock(&sc->sim_mtx); #else /* !MMCCAM */ child = device_add_child(dev, "mmc", -1); if (child == NULL) { device_printf(dev, "attaching MMC bus failed!\n"); goto fail; } if (device_probe_and_attach(child) != 0) { device_printf(dev, "attaching MMC child failed!\n"); device_delete_child(dev, child); goto fail; } #endif /* MMCCAM */ return (0); fail: callout_drain(&sc->aw_timeoutc); mtx_destroy(&sc->aw_mtx); bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand); bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res); #ifdef MMCCAM if (sc->sim != NULL) { mtx_lock(&sc->sim_mtx); xpt_bus_deregister(cam_sim_path(sc->sim)); cam_sim_free(sc->sim, FALSE); mtx_unlock(&sc->sim_mtx); } if (sc->devq != NULL) cam_simq_free(sc->devq); #endif return (ENXIO); } static int aw_mmc_detach(device_t dev) { return (EBUSY); } static void aw_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err) { struct aw_mmc_softc *sc; sc = (struct aw_mmc_softc *)arg; if (err) { sc->aw_dma_map_err = err; return; } sc->aw_dma_desc_phys = segs[0].ds_addr; } static int aw_mmc_setup_dma(struct aw_mmc_softc *sc) { int error; /* Allocate the DMA descriptor memory. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->aw_dev), /* parent */ AW_MMC_DMA_ALIGN, 0, /* align, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg*/ AW_MMC_DMA_DESC_SIZE, 1, /* maxsize, nsegment */ AW_MMC_DMA_DESC_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lock, lockarg*/ &sc->aw_dma_tag); if (error) return (error); error = bus_dmamem_alloc(sc->aw_dma_tag, &sc->aw_dma_desc, BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->aw_dma_map); if (error) return (error); error = bus_dmamap_load(sc->aw_dma_tag, sc->aw_dma_map, sc->aw_dma_desc, AW_MMC_DMA_DESC_SIZE, aw_dma_desc_cb, sc, 0); if (error) return (error); if (sc->aw_dma_map_err) return (sc->aw_dma_map_err); /* Create the DMA map for data transfers. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->aw_dev), /* parent */ AW_MMC_DMA_ALIGN, 0, /* align, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg*/ sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS, AW_MMC_DMA_SEGS, /* maxsize, nsegments */ sc->aw_mmc_conf->dma_xferlen, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lock, lockarg*/ &sc->aw_dma_buf_tag); if (error) return (error); error = bus_dmamap_create(sc->aw_dma_buf_tag, 0, &sc->aw_dma_buf_map); if (error) return (error); return (0); } static void aw_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err) { int i; struct aw_mmc_dma_desc *dma_desc; struct aw_mmc_softc *sc; sc = (struct aw_mmc_softc *)arg; sc->aw_dma_map_err = err; if (err) return; dma_desc = sc->aw_dma_desc; for (i = 0; i < nsegs; i++) { if (segs[i].ds_len == sc->aw_mmc_conf->dma_xferlen) dma_desc[i].buf_size = 0; /* Size of 0 indicate max len */ else dma_desc[i].buf_size = segs[i].ds_len; dma_desc[i].buf_addr = segs[i].ds_addr; dma_desc[i].config = AW_MMC_DMA_CONFIG_CH | AW_MMC_DMA_CONFIG_OWN | AW_MMC_DMA_CONFIG_DIC; dma_desc[i].next = sc->aw_dma_desc_phys + ((i + 1) * sizeof(struct aw_mmc_dma_desc)); } dma_desc[0].config |= AW_MMC_DMA_CONFIG_FD; dma_desc[nsegs - 1].config |= AW_MMC_DMA_CONFIG_LD | AW_MMC_DMA_CONFIG_ER; dma_desc[nsegs - 1].config &= ~AW_MMC_DMA_CONFIG_DIC; dma_desc[nsegs - 1].next = 0; } static int aw_mmc_prepare_dma(struct aw_mmc_softc *sc) { bus_dmasync_op_t sync_op; int error; struct mmc_command *cmd; uint32_t val; #ifdef MMCCAM cmd = &sc->ccb->mmcio.cmd; #else cmd = sc->aw_req->cmd; #endif if (cmd->data->len > (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS)) return (EFBIG); error = bus_dmamap_load(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, cmd->data->data, cmd->data->len, aw_dma_cb, sc, 0); if (error) return (error); if (sc->aw_dma_map_err) return (sc->aw_dma_map_err); if (cmd->data->flags & MMC_DATA_WRITE) sync_op = BUS_DMASYNC_PREWRITE; else sync_op = BUS_DMASYNC_PREREAD; bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op); bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_PREWRITE); /* Enable DMA */ val = AW_MMC_READ_4(sc, AW_MMC_GCTL); val &= ~AW_MMC_GCTL_FIFO_AC_MOD; val |= AW_MMC_GCTL_DMA_ENB; AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val); /* Reset DMA */ val |= AW_MMC_GCTL_DMA_RST; AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val); AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_SOFT_RST); AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_IDMA_ON | AW_MMC_DMAC_IDMAC_FIX_BURST); /* Enable RX or TX DMA interrupt */ val = AW_MMC_READ_4(sc, AW_MMC_IDIE); if (cmd->data->flags & MMC_DATA_WRITE) val |= AW_MMC_IDST_TX_INT; else val |= AW_MMC_IDST_RX_INT; AW_MMC_WRITE_4(sc, AW_MMC_IDIE, val); /* Set DMA descritptor list address */ AW_MMC_WRITE_4(sc, AW_MMC_DLBA, sc->aw_dma_desc_phys); /* FIFO trigger level */ AW_MMC_WRITE_4(sc, AW_MMC_FWLR, AW_MMC_DMA_FTRGLEVEL); return (0); } static int aw_mmc_reset(struct aw_mmc_softc *sc) { uint32_t reg; int timeout; reg = AW_MMC_READ_4(sc, AW_MMC_GCTL); reg |= AW_MMC_GCTL_RESET; AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg); timeout = AW_MMC_RESET_RETRY; while (--timeout > 0) { if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_GCTL_RESET) == 0) break; DELAY(100); } if (timeout == 0) return (ETIMEDOUT); return (0); } static int aw_mmc_init(struct aw_mmc_softc *sc) { uint32_t reg; int ret; ret = aw_mmc_reset(sc); if (ret != 0) return (ret); /* Set the timeout. */ AW_MMC_WRITE_4(sc, AW_MMC_TMOR, AW_MMC_TMOR_DTO_LMT_SHIFT(AW_MMC_TMOR_DTO_LMT_MASK) | AW_MMC_TMOR_RTO_LMT_SHIFT(AW_MMC_TMOR_RTO_LMT_MASK)); /* Unmask interrupts. */ AW_MMC_WRITE_4(sc, AW_MMC_IMKR, 0); /* Clear pending interrupts. */ AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff); /* Debug register, undocumented */ AW_MMC_WRITE_4(sc, AW_MMC_DBGC, 0xdeb); /* Function select register */ AW_MMC_WRITE_4(sc, AW_MMC_FUNS, 0xceaa0000); AW_MMC_WRITE_4(sc, AW_MMC_IDST, 0xffffffff); /* Enable interrupts and disable AHB access. */ reg = AW_MMC_READ_4(sc, AW_MMC_GCTL); reg |= AW_MMC_GCTL_INT_ENB; reg &= ~AW_MMC_GCTL_FIFO_AC_MOD; reg &= ~AW_MMC_GCTL_WAIT_MEM_ACCESS; AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg); return (0); } static void aw_mmc_req_done(struct aw_mmc_softc *sc) { struct mmc_command *cmd; #ifdef MMCCAM union ccb *ccb; #else struct mmc_request *req; #endif uint32_t val, mask; int retry; #ifdef MMCCAM ccb = sc->ccb; cmd = &ccb->mmcio.cmd; #else cmd = sc->aw_req->cmd; #endif #ifdef DEBUG if (bootverbose) { device_printf(sc->aw_dev, "%s: cmd %d err %d\n", __func__, cmd->opcode, cmd->error); } #endif if (cmd->error != MMC_ERR_NONE) { /* Reset the FIFO and DMA engines. */ mask = AW_MMC_GCTL_FIFO_RST | AW_MMC_GCTL_DMA_RST; val = AW_MMC_READ_4(sc, AW_MMC_GCTL); AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val | mask); retry = AW_MMC_RESET_RETRY; while (--retry > 0) { if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_GCTL_RESET) == 0) break; DELAY(100); } if (retry == 0) device_printf(sc->aw_dev, "timeout resetting DMA/FIFO\n"); aw_mmc_update_clock(sc, 1); } callout_stop(&sc->aw_timeoutc); sc->aw_intr = 0; sc->aw_resid = 0; sc->aw_dma_map_err = 0; sc->aw_intr_wait = 0; #ifdef MMCCAM sc->ccb = NULL; ccb->ccb_h.status = (ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR); xpt_done(ccb); #else req = sc->aw_req; sc->aw_req = NULL; req->done(req); #endif } static void aw_mmc_req_ok(struct aw_mmc_softc *sc) { int timeout; struct mmc_command *cmd; uint32_t status; timeout = 1000; while (--timeout > 0) { status = AW_MMC_READ_4(sc, AW_MMC_STAR); if ((status & AW_MMC_STAR_CARD_BUSY) == 0) break; DELAY(1000); } #ifdef MMCCAM cmd = &sc->ccb->mmcio.cmd; #else cmd = sc->aw_req->cmd; #endif if (timeout == 0) { cmd->error = MMC_ERR_FAILED; aw_mmc_req_done(sc); return; } if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) { cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP3); cmd->resp[1] = AW_MMC_READ_4(sc, AW_MMC_RESP2); cmd->resp[2] = AW_MMC_READ_4(sc, AW_MMC_RESP1); cmd->resp[3] = AW_MMC_READ_4(sc, AW_MMC_RESP0); } else cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP0); } /* All data has been transferred ? */ if (cmd->data != NULL && (sc->aw_resid << 2) < cmd->data->len) cmd->error = MMC_ERR_FAILED; aw_mmc_req_done(sc); } static inline void set_mmc_error(struct aw_mmc_softc *sc, int error_code) { #ifdef MMCCAM sc->ccb->mmcio.cmd.error = error_code; #else sc->aw_req->cmd->error = error_code; #endif } static void aw_mmc_timeout(void *arg) { struct aw_mmc_softc *sc; sc = (struct aw_mmc_softc *)arg; #ifdef MMCCAM if (sc->ccb != NULL) { #else if (sc->aw_req != NULL) { #endif device_printf(sc->aw_dev, "controller timeout\n"); set_mmc_error(sc, MMC_ERR_TIMEOUT); aw_mmc_req_done(sc); } else device_printf(sc->aw_dev, "Spurious timeout - no active request\n"); } static void aw_mmc_print_error(uint32_t err) { if(err & AW_MMC_INT_RESP_ERR) printf("AW_MMC_INT_RESP_ERR "); if (err & AW_MMC_INT_RESP_CRC_ERR) printf("AW_MMC_INT_RESP_CRC_ERR "); if (err & AW_MMC_INT_DATA_CRC_ERR) printf("AW_MMC_INT_DATA_CRC_ERR "); if (err & AW_MMC_INT_RESP_TIMEOUT) printf("AW_MMC_INT_RESP_TIMEOUT "); if (err & AW_MMC_INT_FIFO_RUN_ERR) printf("AW_MMC_INT_FIFO_RUN_ERR "); if (err & AW_MMC_INT_CMD_BUSY) printf("AW_MMC_INT_CMD_BUSY "); if (err & AW_MMC_INT_DATA_START_ERR) printf("AW_MMC_INT_DATA_START_ERR "); if (err & AW_MMC_INT_DATA_END_BIT_ERR) printf("AW_MMC_INT_DATA_END_BIT_ERR"); printf("\n"); } static void aw_mmc_intr(void *arg) { bus_dmasync_op_t sync_op; struct aw_mmc_softc *sc; struct mmc_data *data; uint32_t idst, imask, rint; sc = (struct aw_mmc_softc *)arg; AW_MMC_LOCK(sc); rint = AW_MMC_READ_4(sc, AW_MMC_RISR); idst = AW_MMC_READ_4(sc, AW_MMC_IDST); imask = AW_MMC_READ_4(sc, AW_MMC_IMKR); if (idst == 0 && imask == 0 && rint == 0) { AW_MMC_UNLOCK(sc); return; } #ifdef DEBUG device_printf(sc->aw_dev, "idst: %#x, imask: %#x, rint: %#x\n", idst, imask, rint); #endif #ifdef MMCCAM if (sc->ccb == NULL) { #else if (sc->aw_req == NULL) { #endif device_printf(sc->aw_dev, "Spurious interrupt - no active request, rint: 0x%08X\n", rint); aw_mmc_print_error(rint); goto end; } if (rint & AW_MMC_INT_ERR_BIT) { if (bootverbose) device_printf(sc->aw_dev, "error rint: 0x%08X\n", rint); aw_mmc_print_error(rint); if (rint & AW_MMC_INT_RESP_TIMEOUT) set_mmc_error(sc, MMC_ERR_TIMEOUT); else set_mmc_error(sc, MMC_ERR_FAILED); aw_mmc_req_done(sc); goto end; } if (idst & AW_MMC_IDST_ERROR) { device_printf(sc->aw_dev, "error idst: 0x%08x\n", idst); set_mmc_error(sc, MMC_ERR_FAILED); aw_mmc_req_done(sc); goto end; } sc->aw_intr |= rint; #ifdef MMCCAM data = sc->ccb->mmcio.cmd.data; #else data = sc->aw_req->cmd->data; #endif if (data != NULL && (idst & AW_MMC_IDST_COMPLETE) != 0) { if (data->flags & MMC_DATA_WRITE) sync_op = BUS_DMASYNC_POSTWRITE; else sync_op = BUS_DMASYNC_POSTREAD; bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op); bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map); sc->aw_resid = data->len >> 2; } if ((sc->aw_intr & sc->aw_intr_wait) == sc->aw_intr_wait) aw_mmc_req_ok(sc); end: AW_MMC_WRITE_4(sc, AW_MMC_IDST, idst); AW_MMC_WRITE_4(sc, AW_MMC_RISR, rint); AW_MMC_UNLOCK(sc); } static int aw_mmc_request(device_t bus, device_t child, struct mmc_request *req) { int blksz; struct aw_mmc_softc *sc; struct mmc_command *cmd; uint32_t cmdreg, imask; int err; sc = device_get_softc(bus); AW_MMC_LOCK(sc); #ifdef MMCCAM KASSERT(req == NULL, ("req should be NULL in MMCCAM case!")); /* * For MMCCAM, sc->ccb has been NULL-checked and populated * by aw_mmc_cam_request() already. */ cmd = &sc->ccb->mmcio.cmd; #else if (sc->aw_req) { AW_MMC_UNLOCK(sc); return (EBUSY); } sc->aw_req = req; cmd = req->cmd; #ifdef DEBUG if (bootverbose) device_printf(sc->aw_dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n", cmd->opcode, cmd->arg, cmd->flags, cmd->data != NULL ? (unsigned int)cmd->data->len : 0, cmd->data != NULL ? cmd->data->flags: 0); #endif #endif cmdreg = AW_MMC_CMDR_LOAD; imask = AW_MMC_INT_ERR_BIT; sc->aw_intr_wait = 0; sc->aw_intr = 0; sc->aw_resid = 0; cmd->error = MMC_ERR_NONE; if (cmd->opcode == MMC_GO_IDLE_STATE) cmdreg |= AW_MMC_CMDR_SEND_INIT_SEQ; if (cmd->flags & MMC_RSP_PRESENT) cmdreg |= AW_MMC_CMDR_RESP_RCV; if (cmd->flags & MMC_RSP_136) cmdreg |= AW_MMC_CMDR_LONG_RESP; if (cmd->flags & MMC_RSP_CRC) cmdreg |= AW_MMC_CMDR_CHK_RESP_CRC; if (cmd->data) { cmdreg |= AW_MMC_CMDR_DATA_TRANS | AW_MMC_CMDR_WAIT_PRE_OVER; if (cmd->data->flags & MMC_DATA_MULTI) { cmdreg |= AW_MMC_CMDR_STOP_CMD_FLAG; imask |= AW_MMC_INT_AUTO_STOP_DONE; sc->aw_intr_wait |= AW_MMC_INT_AUTO_STOP_DONE; } else { sc->aw_intr_wait |= AW_MMC_INT_DATA_OVER; imask |= AW_MMC_INT_DATA_OVER; } if (cmd->data->flags & MMC_DATA_WRITE) cmdreg |= AW_MMC_CMDR_DIR_WRITE; blksz = min(cmd->data->len, MMC_SECTOR_SIZE); AW_MMC_WRITE_4(sc, AW_MMC_BKSR, blksz); AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len); } else { imask |= AW_MMC_INT_CMD_DONE; } /* Enable the interrupts we are interested in */ AW_MMC_WRITE_4(sc, AW_MMC_IMKR, imask); AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff); /* Enable auto stop if needed */ AW_MMC_WRITE_4(sc, AW_MMC_A12A, cmdreg & AW_MMC_CMDR_STOP_CMD_FLAG ? 0 : 0xffff); /* Write the command argument */ AW_MMC_WRITE_4(sc, AW_MMC_CAGR, cmd->arg); /* * If we don't have data start the request * if we do prepare the dma request and start the request */ if (cmd->data == NULL) { AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode); } else { err = aw_mmc_prepare_dma(sc); if (err != 0) device_printf(sc->aw_dev, "prepare_dma failed: %d\n", err); AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode); } callout_reset(&sc->aw_timeoutc, sc->aw_timeout * hz, aw_mmc_timeout, sc); AW_MMC_UNLOCK(sc); return (0); } static int aw_mmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct aw_mmc_softc *sc; sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: *(int *)result = sc->aw_host.ios.bus_mode; break; case MMCBR_IVAR_BUS_WIDTH: *(int *)result = sc->aw_host.ios.bus_width; break; case MMCBR_IVAR_CHIP_SELECT: *(int *)result = sc->aw_host.ios.chip_select; break; case MMCBR_IVAR_CLOCK: *(int *)result = sc->aw_host.ios.clock; break; case MMCBR_IVAR_F_MIN: *(int *)result = sc->aw_host.f_min; break; case MMCBR_IVAR_F_MAX: *(int *)result = sc->aw_host.f_max; break; case MMCBR_IVAR_HOST_OCR: *(int *)result = sc->aw_host.host_ocr; break; case MMCBR_IVAR_MODE: *(int *)result = sc->aw_host.mode; break; case MMCBR_IVAR_OCR: *(int *)result = sc->aw_host.ocr; break; case MMCBR_IVAR_POWER_MODE: *(int *)result = sc->aw_host.ios.power_mode; break; case MMCBR_IVAR_VDD: *(int *)result = sc->aw_host.ios.vdd; break; case MMCBR_IVAR_VCCQ: *(int *)result = sc->aw_host.ios.vccq; break; case MMCBR_IVAR_CAPS: *(int *)result = sc->aw_host.caps; break; case MMCBR_IVAR_TIMING: *(int *)result = sc->aw_host.ios.timing; break; case MMCBR_IVAR_MAX_DATA: *(int *)result = (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS) / MMC_SECTOR_SIZE; break; case MMCBR_IVAR_RETUNE_REQ: *(int *)result = retune_req_none; break; } return (0); } static int aw_mmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct aw_mmc_softc *sc; sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: sc->aw_host.ios.bus_mode = value; break; case MMCBR_IVAR_BUS_WIDTH: sc->aw_host.ios.bus_width = value; break; case MMCBR_IVAR_CHIP_SELECT: sc->aw_host.ios.chip_select = value; break; case MMCBR_IVAR_CLOCK: sc->aw_host.ios.clock = value; break; case MMCBR_IVAR_MODE: sc->aw_host.mode = value; break; case MMCBR_IVAR_OCR: sc->aw_host.ocr = value; break; case MMCBR_IVAR_POWER_MODE: sc->aw_host.ios.power_mode = value; break; case MMCBR_IVAR_VDD: sc->aw_host.ios.vdd = value; break; case MMCBR_IVAR_VCCQ: sc->aw_host.ios.vccq = value; break; case MMCBR_IVAR_TIMING: sc->aw_host.ios.timing = value; break; /* These are read-only */ case MMCBR_IVAR_CAPS: case MMCBR_IVAR_HOST_OCR: case MMCBR_IVAR_F_MIN: case MMCBR_IVAR_F_MAX: case MMCBR_IVAR_MAX_DATA: return (EINVAL); } return (0); } static int aw_mmc_update_clock(struct aw_mmc_softc *sc, uint32_t clkon) { uint32_t reg; int retry; reg = AW_MMC_READ_4(sc, AW_MMC_CKCR); reg &= ~(AW_MMC_CKCR_ENB | AW_MMC_CKCR_LOW_POWER | AW_MMC_CKCR_MASK_DATA0); if (clkon) reg |= AW_MMC_CKCR_ENB; if (sc->aw_mmc_conf->mask_data0) reg |= AW_MMC_CKCR_MASK_DATA0; AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg); reg = AW_MMC_CMDR_LOAD | AW_MMC_CMDR_PRG_CLK | AW_MMC_CMDR_WAIT_PRE_OVER; AW_MMC_WRITE_4(sc, AW_MMC_CMDR, reg); retry = 0xfffff; while (reg & AW_MMC_CMDR_LOAD && --retry > 0) { reg = AW_MMC_READ_4(sc, AW_MMC_CMDR); DELAY(10); } AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff); if (reg & AW_MMC_CMDR_LOAD) { device_printf(sc->aw_dev, "timeout updating clock\n"); return (ETIMEDOUT); } if (sc->aw_mmc_conf->mask_data0) { reg = AW_MMC_READ_4(sc, AW_MMC_CKCR); reg &= ~AW_MMC_CKCR_MASK_DATA0; AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg); } return (0); } static int aw_mmc_switch_vccq(device_t bus, device_t child) { struct aw_mmc_softc *sc; int uvolt, err; sc = device_get_softc(bus); if (sc->aw_reg_vqmmc == NULL) return EOPNOTSUPP; switch (sc->aw_host.ios.vccq) { case vccq_180: uvolt = 1800000; break; case vccq_330: uvolt = 3300000; break; default: return EINVAL; } err = regulator_set_voltage(sc->aw_reg_vqmmc, uvolt, uvolt); if (err != 0) { device_printf(sc->aw_dev, "Cannot set vqmmc to %d<->%d\n", uvolt, uvolt); return (err); } return (0); } static int aw_mmc_update_ios(device_t bus, device_t child) { int error; struct aw_mmc_softc *sc; struct mmc_ios *ios; unsigned int clock; uint32_t reg, div = 1; sc = device_get_softc(bus); ios = &sc->aw_host.ios; /* Set the bus width. */ switch (ios->bus_width) { case bus_width_1: AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR1); break; case bus_width_4: AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR4); break; case bus_width_8: AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR8); break; } switch (ios->power_mode) { case power_on: break; case power_off: if (bootverbose) device_printf(sc->aw_dev, "Powering down sd/mmc\n"); if (sc->aw_reg_vmmc) regulator_disable(sc->aw_reg_vmmc); if (sc->aw_reg_vqmmc) regulator_disable(sc->aw_reg_vqmmc); aw_mmc_reset(sc); break; case power_up: if (bootverbose) device_printf(sc->aw_dev, "Powering up sd/mmc\n"); if (sc->aw_reg_vmmc) regulator_enable(sc->aw_reg_vmmc); if (sc->aw_reg_vqmmc) regulator_enable(sc->aw_reg_vqmmc); aw_mmc_init(sc); break; }; /* Enable ddr mode if needed */ reg = AW_MMC_READ_4(sc, AW_MMC_GCTL); if (ios->timing == bus_timing_uhs_ddr50 || ios->timing == bus_timing_mmc_ddr52) reg |= AW_MMC_GCTL_DDR_MOD_SEL; else reg &= ~AW_MMC_GCTL_DDR_MOD_SEL; AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg); if (ios->clock && ios->clock != sc->aw_clock) { sc->aw_clock = clock = ios->clock; /* Disable clock */ error = aw_mmc_update_clock(sc, 0); if (error != 0) return (error); if (ios->timing == bus_timing_mmc_ddr52 && (sc->aw_mmc_conf->new_timing || ios->bus_width == bus_width_8)) { div = 2; clock <<= 1; } /* Reset the divider. */ reg = AW_MMC_READ_4(sc, AW_MMC_CKCR); reg &= ~AW_MMC_CKCR_DIV; reg |= div - 1; AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg); /* New timing mode if needed */ if (sc->aw_mmc_conf->new_timing) { reg = AW_MMC_READ_4(sc, AW_MMC_NTSR); reg |= AW_MMC_NTSR_MODE_SELECT; AW_MMC_WRITE_4(sc, AW_MMC_NTSR, reg); } /* Set the MMC clock. */ error = clk_set_freq(sc->aw_clk_mmc, clock, CLK_SET_ROUND_DOWN); if (error != 0) { device_printf(sc->aw_dev, "failed to set frequency to %u Hz: %d\n", clock, error); return (error); } if (sc->aw_mmc_conf->can_calibrate) AW_MMC_WRITE_4(sc, AW_MMC_SAMP_DL, AW_MMC_SAMP_DL_SW_EN); /* Enable clock. */ error = aw_mmc_update_clock(sc, 1); if (error != 0) return (error); } return (0); } static int aw_mmc_get_ro(device_t bus, device_t child) { return (0); } static int aw_mmc_acquire_host(device_t bus, device_t child) { struct aw_mmc_softc *sc; int error; sc = device_get_softc(bus); AW_MMC_LOCK(sc); while (sc->aw_bus_busy) { error = msleep(sc, &sc->aw_mtx, PCATCH, "mmchw", 0); if (error != 0) { AW_MMC_UNLOCK(sc); return (error); } } sc->aw_bus_busy++; AW_MMC_UNLOCK(sc); return (0); } static int aw_mmc_release_host(device_t bus, device_t child) { struct aw_mmc_softc *sc; sc = device_get_softc(bus); AW_MMC_LOCK(sc); sc->aw_bus_busy--; wakeup(sc); AW_MMC_UNLOCK(sc); return (0); } static device_method_t aw_mmc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aw_mmc_probe), DEVMETHOD(device_attach, aw_mmc_attach), DEVMETHOD(device_detach, aw_mmc_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, aw_mmc_read_ivar), DEVMETHOD(bus_write_ivar, aw_mmc_write_ivar), /* MMC bridge interface */ DEVMETHOD(mmcbr_update_ios, aw_mmc_update_ios), DEVMETHOD(mmcbr_request, aw_mmc_request), DEVMETHOD(mmcbr_get_ro, aw_mmc_get_ro), DEVMETHOD(mmcbr_switch_vccq, aw_mmc_switch_vccq), DEVMETHOD(mmcbr_acquire_host, aw_mmc_acquire_host), DEVMETHOD(mmcbr_release_host, aw_mmc_release_host), DEVMETHOD_END }; static devclass_t aw_mmc_devclass; static driver_t aw_mmc_driver = { "aw_mmc", aw_mmc_methods, sizeof(struct aw_mmc_softc), }; DRIVER_MODULE(aw_mmc, simplebus, aw_mmc_driver, aw_mmc_devclass, NULL, NULL); #ifndef MMCCAM MMC_DECLARE_BRIDGE(aw_mmc); #endif Index: head/sys/cam/cam_ccb.h =================================================================== --- head/sys/cam/cam_ccb.h (revision 345774) +++ head/sys/cam/cam_ccb.h (revision 345775) @@ -1,1508 +1,1509 @@ /*- * Data structures and definitions for CAM Control Blocks (CCBs). * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_CCB_H #define _CAM_CAM_CCB_H 1 #include #include #include #include #ifndef _KERNEL #include #endif #include #include #include #include #include /* General allocation length definitions for CCB structures */ #define IOCDBLEN CAM_MAX_CDBLEN /* Space for CDB bytes/pointer */ #define VUHBALEN 14 /* Vendor Unique HBA length */ #define SIM_IDLEN 16 /* ASCII string len for SIM ID */ #define HBA_IDLEN 16 /* ASCII string len for HBA ID */ #define DEV_IDLEN 16 /* ASCII string len for device names */ #define CCB_PERIPH_PRIV_SIZE 2 /* size of peripheral private area */ #define CCB_SIM_PRIV_SIZE 2 /* size of sim private area */ /* Struct definitions for CAM control blocks */ /* Common CCB header */ /* CAM CCB flags */ typedef enum { CAM_CDB_POINTER = 0x00000001,/* The CDB field is a pointer */ CAM_QUEUE_ENABLE = 0x00000002,/* SIM queue actions are enabled */ CAM_CDB_LINKED = 0x00000004,/* CCB contains a linked CDB */ CAM_NEGOTIATE = 0x00000008,/* * Perform transport negotiation * with this command. */ CAM_DATA_ISPHYS = 0x00000010,/* Data type with physical addrs */ CAM_DIS_AUTOSENSE = 0x00000020,/* Disable autosense feature */ CAM_DIR_BOTH = 0x00000000,/* Data direction (00:IN/OUT) */ CAM_DIR_IN = 0x00000040,/* Data direction (01:DATA IN) */ CAM_DIR_OUT = 0x00000080,/* Data direction (10:DATA OUT) */ CAM_DIR_NONE = 0x000000C0,/* Data direction (11:no data) */ CAM_DIR_MASK = 0x000000C0,/* Data direction Mask */ CAM_DATA_VADDR = 0x00000000,/* Data type (000:Virtual) */ CAM_DATA_PADDR = 0x00000010,/* Data type (001:Physical) */ CAM_DATA_SG = 0x00040000,/* Data type (010:sglist) */ CAM_DATA_SG_PADDR = 0x00040010,/* Data type (011:sglist phys) */ CAM_DATA_BIO = 0x00200000,/* Data type (100:bio) */ CAM_DATA_MASK = 0x00240010,/* Data type mask */ CAM_SOFT_RST_OP = 0x00000100,/* Use Soft reset alternative */ CAM_ENG_SYNC = 0x00000200,/* Flush resid bytes on complete */ CAM_DEV_QFRZDIS = 0x00000400,/* Disable DEV Q freezing */ CAM_DEV_QFREEZE = 0x00000800,/* Freeze DEV Q on execution */ CAM_HIGH_POWER = 0x00001000,/* Command takes a lot of power */ CAM_SENSE_PTR = 0x00002000,/* Sense data is a pointer */ CAM_SENSE_PHYS = 0x00004000,/* Sense pointer is physical addr*/ CAM_TAG_ACTION_VALID = 0x00008000,/* Use the tag action in this ccb*/ CAM_PASS_ERR_RECOVER = 0x00010000,/* Pass driver does err. recovery*/ CAM_DIS_DISCONNECT = 0x00020000,/* Disable disconnect */ CAM_MSG_BUF_PHYS = 0x00080000,/* Message buffer ptr is physical*/ CAM_SNS_BUF_PHYS = 0x00100000,/* Autosense data ptr is physical*/ CAM_CDB_PHYS = 0x00400000,/* CDB poiner is physical */ CAM_ENG_SGLIST = 0x00800000,/* SG list is for the HBA engine */ /* Phase cognizant mode flags */ CAM_DIS_AUTOSRP = 0x01000000,/* Disable autosave/restore ptrs */ CAM_DIS_AUTODISC = 0x02000000,/* Disable auto disconnect */ CAM_TGT_CCB_AVAIL = 0x04000000,/* Target CCB available */ CAM_TGT_PHASE_MODE = 0x08000000,/* The SIM runs in phase mode */ CAM_MSGB_VALID = 0x10000000,/* Message buffer valid */ CAM_STATUS_VALID = 0x20000000,/* Status buffer valid */ CAM_DATAB_VALID = 0x40000000,/* Data buffer valid */ /* Host target Mode flags */ CAM_SEND_SENSE = 0x08000000,/* Send sense data with status */ CAM_TERM_IO = 0x10000000,/* Terminate I/O Message sup. */ CAM_DISCONNECT = 0x20000000,/* Disconnects are mandatory */ CAM_SEND_STATUS = 0x40000000,/* Send status after data phase */ CAM_UNLOCKED = 0x80000000 /* Call callback without lock. */ } ccb_flags; typedef enum { CAM_USER_DATA_ADDR = 0x00000002,/* Userspace data pointers */ CAM_SG_FORMAT_IOVEC = 0x00000004,/* iovec instead of busdma S/G*/ CAM_UNMAPPED_BUF = 0x00000008 /* use unmapped I/O */ } ccb_xflags; /* XPT Opcodes for xpt_action */ typedef enum { /* Function code flags are bits greater than 0xff */ XPT_FC_QUEUED = 0x100, /* Non-immediate function code */ XPT_FC_USER_CCB = 0x200, XPT_FC_XPT_ONLY = 0x400, /* Only for the transport layer device */ XPT_FC_DEV_QUEUED = 0x800 | XPT_FC_QUEUED, /* Passes through the device queues */ /* Common function commands: 0x00->0x0F */ XPT_NOOP = 0x00, /* Execute Nothing */ XPT_SCSI_IO = 0x01 | XPT_FC_DEV_QUEUED, /* Execute the requested I/O operation */ XPT_GDEV_TYPE = 0x02, /* Get type information for specified device */ XPT_GDEVLIST = 0x03, /* Get a list of peripheral devices */ XPT_PATH_INQ = 0x04, /* Path routing inquiry */ XPT_REL_SIMQ = 0x05, /* Release a frozen device queue */ XPT_SASYNC_CB = 0x06, /* Set Asynchronous Callback Parameters */ XPT_SDEV_TYPE = 0x07, /* Set device type information */ XPT_SCAN_BUS = 0x08 | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* (Re)Scan the SCSI Bus */ XPT_DEV_MATCH = 0x09 | XPT_FC_XPT_ONLY, /* Get EDT entries matching the given pattern */ XPT_DEBUG = 0x0a, /* Turn on debugging for a bus, target or lun */ XPT_PATH_STATS = 0x0b, /* Path statistics (error counts, etc.) */ XPT_GDEV_STATS = 0x0c, /* Device statistics (error counts, etc.) */ XPT_DEV_ADVINFO = 0x0e, /* Get/Set Device advanced information */ XPT_ASYNC = 0x0f | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Asynchronous event */ /* SCSI Control Functions: 0x10->0x1F */ XPT_ABORT = 0x10, /* Abort the specified CCB */ XPT_RESET_BUS = 0x11 | XPT_FC_XPT_ONLY, /* Reset the specified SCSI bus */ XPT_RESET_DEV = 0x12 | XPT_FC_DEV_QUEUED, /* Bus Device Reset the specified SCSI device */ XPT_TERM_IO = 0x13, /* Terminate the I/O process */ XPT_SCAN_LUN = 0x14 | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Scan Logical Unit */ XPT_GET_TRAN_SETTINGS = 0x15, /* * Get default/user transfer settings * for the target */ XPT_SET_TRAN_SETTINGS = 0x16, /* * Set transfer rate/width * negotiation settings */ XPT_CALC_GEOMETRY = 0x17, /* * Calculate the geometry parameters for * a device give the sector size and * volume size. */ XPT_ATA_IO = 0x18 | XPT_FC_DEV_QUEUED, /* Execute the requested ATA I/O operation */ XPT_GET_SIM_KNOB_OLD = 0x18, /* Compat only */ XPT_SET_SIM_KNOB = 0x19, /* * Set SIM specific knob values. */ XPT_GET_SIM_KNOB = 0x1a, /* * Get SIM specific knob values. */ XPT_SMP_IO = 0x1b | XPT_FC_DEV_QUEUED, /* Serial Management Protocol */ XPT_NVME_IO = 0x1c | XPT_FC_DEV_QUEUED, /* Execute the requested NVMe I/O operation */ XPT_MMC_IO = 0x1d | XPT_FC_DEV_QUEUED, /* Placeholder for MMC / SD / SDIO I/O stuff */ XPT_SCAN_TGT = 0x1e | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Scan Target */ XPT_NVME_ADMIN = 0x1f | XPT_FC_DEV_QUEUED, /* Execute the requested NVMe Admin operation */ /* HBA engine commands 0x20->0x2F */ XPT_ENG_INQ = 0x20 | XPT_FC_XPT_ONLY, /* HBA engine feature inquiry */ XPT_ENG_EXEC = 0x21 | XPT_FC_DEV_QUEUED, /* HBA execute engine request */ /* Target mode commands: 0x30->0x3F */ XPT_EN_LUN = 0x30, /* Enable LUN as a target */ XPT_TARGET_IO = 0x31 | XPT_FC_DEV_QUEUED, /* Execute target I/O request */ XPT_ACCEPT_TARGET_IO = 0x32 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Accept Host Target Mode CDB */ XPT_CONT_TARGET_IO = 0x33 | XPT_FC_DEV_QUEUED, /* Continue Host Target I/O Connection */ XPT_IMMED_NOTIFY = 0x34 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Notify Host Target driver of event (obsolete) */ XPT_NOTIFY_ACK = 0x35, /* Acknowledgement of event (obsolete) */ XPT_IMMEDIATE_NOTIFY = 0x36 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Notify Host Target driver of event */ XPT_NOTIFY_ACKNOWLEDGE = 0x37 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Acknowledgement of event */ XPT_REPROBE_LUN = 0x38 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Query device capacity and notify GEOM */ /* Vendor Unique codes: 0x80->0x8F */ XPT_VUNIQUE = 0x80 } xpt_opcode; #define XPT_FC_GROUP_MASK 0xF0 #define XPT_FC_GROUP(op) ((op) & XPT_FC_GROUP_MASK) #define XPT_FC_GROUP_COMMON 0x00 #define XPT_FC_GROUP_SCSI_CONTROL 0x10 #define XPT_FC_GROUP_HBA_ENGINE 0x20 #define XPT_FC_GROUP_TMODE 0x30 #define XPT_FC_GROUP_VENDOR_UNIQUE 0x80 #define XPT_FC_IS_DEV_QUEUED(ccb) \ (((ccb)->ccb_h.func_code & XPT_FC_DEV_QUEUED) == XPT_FC_DEV_QUEUED) #define XPT_FC_IS_QUEUED(ccb) \ (((ccb)->ccb_h.func_code & XPT_FC_QUEUED) != 0) typedef enum { PROTO_UNKNOWN, PROTO_UNSPECIFIED, PROTO_SCSI, /* Small Computer System Interface */ PROTO_ATA, /* AT Attachment */ PROTO_ATAPI, /* AT Attachment Packetized Interface */ PROTO_SATAPM, /* SATA Port Multiplier */ PROTO_SEMB, /* SATA Enclosure Management Bridge */ PROTO_NVME, /* NVME */ PROTO_MMCSD, /* MMC, SD, SDIO */ } cam_proto; typedef enum { XPORT_UNKNOWN, XPORT_UNSPECIFIED, XPORT_SPI, /* SCSI Parallel Interface */ XPORT_FC, /* Fiber Channel */ XPORT_SSA, /* Serial Storage Architecture */ XPORT_USB, /* Universal Serial Bus */ XPORT_PPB, /* Parallel Port Bus */ XPORT_ATA, /* AT Attachment */ XPORT_SAS, /* Serial Attached SCSI */ XPORT_SATA, /* Serial AT Attachment */ XPORT_ISCSI, /* iSCSI */ XPORT_SRP, /* SCSI RDMA Protocol */ XPORT_NVME, /* NVMe over PCIe */ XPORT_MMCSD, /* MMC, SD, SDIO card */ } cam_xport; #define XPORT_IS_NVME(t) ((t) == XPORT_NVME) #define XPORT_IS_ATA(t) ((t) == XPORT_ATA || (t) == XPORT_SATA) #define XPORT_IS_SCSI(t) ((t) != XPORT_UNKNOWN && \ (t) != XPORT_UNSPECIFIED && \ !XPORT_IS_ATA(t) && !XPORT_IS_NVME(t)) #define XPORT_DEVSTAT_TYPE(t) (XPORT_IS_ATA(t) ? DEVSTAT_TYPE_IF_IDE : \ XPORT_IS_SCSI(t) ? DEVSTAT_TYPE_IF_SCSI : \ DEVSTAT_TYPE_IF_OTHER) #define PROTO_VERSION_UNKNOWN (UINT_MAX - 1) #define PROTO_VERSION_UNSPECIFIED UINT_MAX #define XPORT_VERSION_UNKNOWN (UINT_MAX - 1) #define XPORT_VERSION_UNSPECIFIED UINT_MAX typedef union { LIST_ENTRY(ccb_hdr) le; SLIST_ENTRY(ccb_hdr) sle; TAILQ_ENTRY(ccb_hdr) tqe; STAILQ_ENTRY(ccb_hdr) stqe; } camq_entry; typedef union { void *ptr; u_long field; u_int8_t bytes[sizeof(uintptr_t)]; } ccb_priv_entry; typedef union { ccb_priv_entry entries[CCB_PERIPH_PRIV_SIZE]; u_int8_t bytes[CCB_PERIPH_PRIV_SIZE * sizeof(ccb_priv_entry)]; } ccb_ppriv_area; typedef union { ccb_priv_entry entries[CCB_SIM_PRIV_SIZE]; u_int8_t bytes[CCB_SIM_PRIV_SIZE * sizeof(ccb_priv_entry)]; } ccb_spriv_area; typedef struct { struct timeval *etime; uintptr_t sim_data; uintptr_t periph_data; } ccb_qos_area; struct ccb_hdr { cam_pinfo pinfo; /* Info for priority scheduling */ camq_entry xpt_links; /* For chaining in the XPT layer */ camq_entry sim_links; /* For chaining in the SIM layer */ camq_entry periph_links; /* For chaining in the type driver */ u_int32_t retry_count; void (*cbfcnp)(struct cam_periph *, union ccb *); /* Callback on completion function */ xpt_opcode func_code; /* XPT function code */ u_int32_t status; /* Status returned by CAM subsystem */ struct cam_path *path; /* Compiled path for this ccb */ path_id_t path_id; /* Path ID for the request */ target_id_t target_id; /* Target device ID */ lun_id_t target_lun; /* Target LUN number */ u_int32_t flags; /* ccb_flags */ u_int32_t xflags; /* Extended flags */ ccb_ppriv_area periph_priv; ccb_spriv_area sim_priv; ccb_qos_area qos; u_int32_t timeout; /* Hard timeout value in mseconds */ struct timeval softtimeout; /* Soft timeout value in sec + usec */ }; /* Get Device Information CCB */ struct ccb_getdev { struct ccb_hdr ccb_h; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; u_int8_t serial_num[252]; u_int8_t inq_flags; u_int8_t serial_num_len; void *padding[2]; }; /* Device Statistics CCB */ struct ccb_getdevstats { struct ccb_hdr ccb_h; int dev_openings; /* Space left for more work on device*/ int dev_active; /* Transactions running on the device */ int allocated; /* CCBs allocated for the device */ int queued; /* CCBs queued to be sent to the device */ int held; /* * CCBs held by peripheral drivers * for this device */ int maxtags; /* * Boundary conditions for number of * tagged operations */ int mintags; struct timeval last_reset; /* Time of last bus reset/loop init */ }; typedef enum { CAM_GDEVLIST_LAST_DEVICE, CAM_GDEVLIST_LIST_CHANGED, CAM_GDEVLIST_MORE_DEVS, CAM_GDEVLIST_ERROR } ccb_getdevlist_status_e; struct ccb_getdevlist { struct ccb_hdr ccb_h; char periph_name[DEV_IDLEN]; u_int32_t unit_number; unsigned int generation; u_int32_t index; ccb_getdevlist_status_e status; }; typedef enum { PERIPH_MATCH_NONE = 0x000, PERIPH_MATCH_PATH = 0x001, PERIPH_MATCH_TARGET = 0x002, PERIPH_MATCH_LUN = 0x004, PERIPH_MATCH_NAME = 0x008, PERIPH_MATCH_UNIT = 0x010, PERIPH_MATCH_ANY = 0x01f } periph_pattern_flags; struct periph_match_pattern { char periph_name[DEV_IDLEN]; u_int32_t unit_number; path_id_t path_id; target_id_t target_id; lun_id_t target_lun; periph_pattern_flags flags; }; typedef enum { DEV_MATCH_NONE = 0x000, DEV_MATCH_PATH = 0x001, DEV_MATCH_TARGET = 0x002, DEV_MATCH_LUN = 0x004, DEV_MATCH_INQUIRY = 0x008, DEV_MATCH_DEVID = 0x010, DEV_MATCH_ANY = 0x00f } dev_pattern_flags; struct device_id_match_pattern { uint8_t id_len; uint8_t id[256]; }; struct device_match_pattern { path_id_t path_id; target_id_t target_id; lun_id_t target_lun; dev_pattern_flags flags; union { struct scsi_static_inquiry_pattern inq_pat; struct device_id_match_pattern devid_pat; } data; }; typedef enum { BUS_MATCH_NONE = 0x000, BUS_MATCH_PATH = 0x001, BUS_MATCH_NAME = 0x002, BUS_MATCH_UNIT = 0x004, BUS_MATCH_BUS_ID = 0x008, BUS_MATCH_ANY = 0x00f } bus_pattern_flags; struct bus_match_pattern { path_id_t path_id; char dev_name[DEV_IDLEN]; u_int32_t unit_number; u_int32_t bus_id; bus_pattern_flags flags; }; union match_pattern { struct periph_match_pattern periph_pattern; struct device_match_pattern device_pattern; struct bus_match_pattern bus_pattern; }; typedef enum { DEV_MATCH_PERIPH, DEV_MATCH_DEVICE, DEV_MATCH_BUS } dev_match_type; struct dev_match_pattern { dev_match_type type; union match_pattern pattern; }; struct periph_match_result { char periph_name[DEV_IDLEN]; u_int32_t unit_number; path_id_t path_id; target_id_t target_id; lun_id_t target_lun; }; typedef enum { DEV_RESULT_NOFLAG = 0x00, DEV_RESULT_UNCONFIGURED = 0x01 } dev_result_flags; struct device_match_result { path_id_t path_id; target_id_t target_id; lun_id_t target_lun; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; dev_result_flags flags; }; struct bus_match_result { path_id_t path_id; char dev_name[DEV_IDLEN]; u_int32_t unit_number; u_int32_t bus_id; }; union match_result { struct periph_match_result periph_result; struct device_match_result device_result; struct bus_match_result bus_result; }; struct dev_match_result { dev_match_type type; union match_result result; }; typedef enum { CAM_DEV_MATCH_LAST, CAM_DEV_MATCH_MORE, CAM_DEV_MATCH_LIST_CHANGED, CAM_DEV_MATCH_SIZE_ERROR, CAM_DEV_MATCH_ERROR } ccb_dev_match_status; typedef enum { CAM_DEV_POS_NONE = 0x000, CAM_DEV_POS_BUS = 0x001, CAM_DEV_POS_TARGET = 0x002, CAM_DEV_POS_DEVICE = 0x004, CAM_DEV_POS_PERIPH = 0x008, CAM_DEV_POS_PDPTR = 0x010, CAM_DEV_POS_TYPEMASK = 0xf00, CAM_DEV_POS_EDT = 0x100, CAM_DEV_POS_PDRV = 0x200 } dev_pos_type; struct ccb_dm_cookie { void *bus; void *target; void *device; void *periph; void *pdrv; }; struct ccb_dev_position { u_int generations[4]; #define CAM_BUS_GENERATION 0x00 #define CAM_TARGET_GENERATION 0x01 #define CAM_DEV_GENERATION 0x02 #define CAM_PERIPH_GENERATION 0x03 dev_pos_type position_type; struct ccb_dm_cookie cookie; }; struct ccb_dev_match { struct ccb_hdr ccb_h; ccb_dev_match_status status; u_int32_t num_patterns; u_int32_t pattern_buf_len; struct dev_match_pattern *patterns; u_int32_t num_matches; u_int32_t match_buf_len; struct dev_match_result *matches; struct ccb_dev_position pos; }; /* * Definitions for the path inquiry CCB fields. */ #define CAM_VERSION 0x19 /* Hex value for current version */ typedef enum { PI_MDP_ABLE = 0x80, /* Supports MDP message */ PI_WIDE_32 = 0x40, /* Supports 32 bit wide SCSI */ PI_WIDE_16 = 0x20, /* Supports 16 bit wide SCSI */ PI_SDTR_ABLE = 0x10, /* Supports SDTR message */ PI_LINKED_CDB = 0x08, /* Supports linked CDBs */ PI_SATAPM = 0x04, /* Supports SATA PM */ PI_TAG_ABLE = 0x02, /* Supports tag queue messages */ PI_SOFT_RST = 0x01 /* Supports soft reset alternative */ } pi_inqflag; typedef enum { PIT_PROCESSOR = 0x80, /* Target mode processor mode */ PIT_PHASE = 0x40, /* Target mode phase cog. mode */ PIT_DISCONNECT = 0x20, /* Disconnects supported in target mode */ PIT_TERM_IO = 0x10, /* Terminate I/O message supported in TM */ PIT_GRP_6 = 0x08, /* Group 6 commands supported */ PIT_GRP_7 = 0x04 /* Group 7 commands supported */ } pi_tmflag; typedef enum { PIM_ATA_EXT = 0x200,/* ATA requests can understand ata_ext requests */ PIM_EXTLUNS = 0x100,/* 64bit extended LUNs supported */ PIM_SCANHILO = 0x80, /* Bus scans from high ID to low ID */ PIM_NOREMOVE = 0x40, /* Removeable devices not included in scan */ PIM_NOINITIATOR = 0x20, /* Initiator role not supported. */ PIM_NOBUSRESET = 0x10, /* User has disabled initial BUS RESET */ PIM_NO_6_BYTE = 0x08, /* Do not send 6-byte commands */ PIM_SEQSCAN = 0x04, /* Do bus scans sequentially, not in parallel */ PIM_UNMAPPED = 0x02, PIM_NOSCAN = 0x01 /* SIM does its own scanning */ } pi_miscflag; /* Path Inquiry CCB */ struct ccb_pathinq_settings_spi { u_int8_t ppr_options; }; struct ccb_pathinq_settings_fc { u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int32_t port; /* 24 bit port id, if known */ u_int32_t bitrate; /* Mbps */ }; struct ccb_pathinq_settings_sas { u_int32_t bitrate; /* Mbps */ }; struct ccb_pathinq_settings_nvme { uint32_t nsid; /* Namespace ID for this path */ uint32_t domain; uint8_t bus; uint8_t slot; uint8_t function; uint8_t extra; }; #define PATHINQ_SETTINGS_SIZE 128 struct ccb_pathinq { struct ccb_hdr ccb_h; u_int8_t version_num; /* Version number for the SIM/HBA */ u_int8_t hba_inquiry; /* Mimic of INQ byte 7 for the HBA */ u_int16_t target_sprt; /* Flags for target mode support */ u_int32_t hba_misc; /* Misc HBA features */ u_int16_t hba_eng_cnt; /* HBA engine count */ /* Vendor Unique capabilities */ u_int8_t vuhba_flags[VUHBALEN]; u_int32_t max_target; /* Maximum supported Target */ u_int32_t max_lun; /* Maximum supported Lun */ u_int32_t async_flags; /* Installed Async handlers */ path_id_t hpath_id; /* Highest Path ID in the subsystem */ target_id_t initiator_id; /* ID of the HBA on the SCSI bus */ char sim_vid[SIM_IDLEN]; /* Vendor ID of the SIM */ char hba_vid[HBA_IDLEN]; /* Vendor ID of the HBA */ char dev_name[DEV_IDLEN];/* Device name for SIM */ u_int32_t unit_number; /* Unit number for SIM */ u_int32_t bus_id; /* Bus ID for SIM */ u_int32_t base_transfer_speed;/* Base bus speed in KB/sec */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { struct ccb_pathinq_settings_spi spi; struct ccb_pathinq_settings_fc fc; struct ccb_pathinq_settings_sas sas; struct ccb_pathinq_settings_nvme nvme; char ccb_pathinq_settings_opaque[PATHINQ_SETTINGS_SIZE]; } xport_specific; u_int maxio; /* Max supported I/O size, in bytes. */ u_int16_t hba_vendor; /* HBA vendor ID */ u_int16_t hba_device; /* HBA device ID */ u_int16_t hba_subvendor; /* HBA subvendor ID */ u_int16_t hba_subdevice; /* HBA subdevice ID */ }; /* Path Statistics CCB */ struct ccb_pathstats { struct ccb_hdr ccb_h; struct timeval last_reset; /* Time of last bus reset/loop init */ }; typedef enum { SMP_FLAG_NONE = 0x00, SMP_FLAG_REQ_SG = 0x01, SMP_FLAG_RSP_SG = 0x02 } ccb_smp_pass_flags; /* * Serial Management Protocol CCB * XXX Currently the semantics for this CCB are that it is executed either * by the addressed device, or that device's parent (i.e. an expander for * any device on an expander) if the addressed device doesn't support SMP. * Later, once we have the ability to probe SMP-only devices and put them * in CAM's topology, the CCB will only be executed by the addressed device * if possible. */ struct ccb_smpio { struct ccb_hdr ccb_h; uint8_t *smp_request; int smp_request_len; uint16_t smp_request_sglist_cnt; uint8_t *smp_response; int smp_response_len; uint16_t smp_response_sglist_cnt; ccb_smp_pass_flags flags; }; typedef union { u_int8_t *sense_ptr; /* * Pointer to storage * for sense information */ /* Storage Area for sense information */ struct scsi_sense_data sense_buf; } sense_t; typedef union { u_int8_t *cdb_ptr; /* Pointer to the CDB bytes to send */ /* Area for the CDB send */ u_int8_t cdb_bytes[IOCDBLEN]; } cdb_t; /* * SCSI I/O Request CCB used for the XPT_SCSI_IO and XPT_CONT_TARGET_IO * function codes. */ struct ccb_scsiio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ u_int8_t *req_map; /* Ptr to mapping info */ u_int8_t *data_ptr; /* Ptr to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ /* Autosense storage */ struct scsi_sense_data sense_data; u_int8_t sense_len; /* Number of bytes to autosense */ u_int8_t cdb_len; /* Number of bytes for the CDB */ u_int16_t sglist_cnt; /* Number of SG list entries */ u_int8_t scsi_status; /* Returned SCSI status */ u_int8_t sense_resid; /* Autosense resid length: 2's comp */ u_int32_t resid; /* Transfer residual length: 2's comp */ cdb_t cdb_io; /* Union for CDB bytes/pointer */ u_int8_t *msg_ptr; /* Pointer to the message buffer */ u_int16_t msg_len; /* Number of bytes for the Message */ u_int8_t tag_action; /* What to do for tag queueing */ /* * The tag action should be either the define below (to send a * non-tagged transaction) or one of the defined scsi tag messages * from scsi_message.h. */ #define CAM_TAG_ACTION_NONE 0x00 u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) struct bio *bio; /* Associated bio */ #endif }; static __inline uint8_t * scsiio_cdb_ptr(struct ccb_scsiio *ccb) { return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes); } /* * ATA I/O Request CCB used for the XPT_ATA_IO function code. */ struct ccb_ataio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ struct ata_cmd cmd; /* ATA command register set */ struct ata_res res; /* ATA result register set */ u_int8_t *data_ptr; /* Ptr to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ u_int32_t resid; /* Transfer residual length: 2's comp */ u_int8_t ata_flags; /* Flags for the rest of the buffer */ #define ATA_FLAG_AUX 0x1 uint32_t aux; uint32_t unused; }; /* * MMC I/O Request CCB used for the XPT_MMC_IO function code. */ struct ccb_mmcio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ struct mmc_command cmd; struct mmc_command stop; }; struct ccb_accept_tio { struct ccb_hdr ccb_h; cdb_t cdb_io; /* Union for CDB bytes/pointer */ u_int8_t cdb_len; /* Number of bytes for the CDB */ u_int8_t tag_action; /* What to do for tag queueing */ u_int8_t sense_len; /* Number of bytes of Sense Data */ u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ struct scsi_sense_data sense_data; }; static __inline uint8_t * atio_cdb_ptr(struct ccb_accept_tio *ccb) { return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes); } /* Release SIM Queue */ struct ccb_relsim { struct ccb_hdr ccb_h; u_int32_t release_flags; #define RELSIM_ADJUST_OPENINGS 0x01 #define RELSIM_RELEASE_AFTER_TIMEOUT 0x02 #define RELSIM_RELEASE_AFTER_CMDCMPLT 0x04 #define RELSIM_RELEASE_AFTER_QEMPTY 0x08 u_int32_t openings; u_int32_t release_timeout; /* Abstract argument. */ u_int32_t qfrozen_cnt; }; /* * NVMe I/O Request CCB used for the XPT_NVME_IO and XPT_NVME_ADMIN function codes. */ struct ccb_nvmeio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ struct nvme_command cmd; /* NVME command, per NVME standard */ struct nvme_completion cpl; /* NVME completion, per NVME standard */ uint8_t *data_ptr; /* Ptr to the data buf/SG list */ uint32_t dxfer_len; /* Data transfer length */ uint16_t sglist_cnt; /* Number of SG list entries */ uint16_t unused; /* padding for removed uint32_t */ }; /* * Definitions for the asynchronous callback CCB fields. */ typedef enum { AC_UNIT_ATTENTION = 0x4000,/* Device reported UNIT ATTENTION */ AC_ADVINFO_CHANGED = 0x2000,/* Advance info might have changes */ AC_CONTRACT = 0x1000,/* A contractual callback */ AC_GETDEV_CHANGED = 0x800,/* Getdev info might have changed */ AC_INQ_CHANGED = 0x400,/* Inquiry info might have changed */ AC_TRANSFER_NEG = 0x200,/* New transfer settings in effect */ AC_LOST_DEVICE = 0x100,/* A device went away */ AC_FOUND_DEVICE = 0x080,/* A new device was found */ AC_PATH_DEREGISTERED = 0x040,/* A path has de-registered */ AC_PATH_REGISTERED = 0x020,/* A new path has been registered */ AC_SENT_BDR = 0x010,/* A BDR message was sent to target */ AC_SCSI_AEN = 0x008,/* A SCSI AEN has been received */ AC_UNSOL_RESEL = 0x002,/* Unsolicited reselection occurred */ AC_BUS_RESET = 0x001 /* A SCSI bus reset occurred */ } ac_code; typedef void ac_callback_t (void *softc, u_int32_t code, struct cam_path *path, void *args); /* * Generic Asynchronous callbacks. * * Generic arguments passed bac which are then interpreted between a per-system * contract number. */ #define AC_CONTRACT_DATA_MAX (128 - sizeof (u_int64_t)) struct ac_contract { u_int64_t contract_number; u_int8_t contract_data[AC_CONTRACT_DATA_MAX]; }; #define AC_CONTRACT_DEV_CHG 1 struct ac_device_changed { u_int64_t wwpn; u_int32_t port; target_id_t target; u_int8_t arrived; }; /* Set Asynchronous Callback CCB */ struct ccb_setasync { struct ccb_hdr ccb_h; u_int32_t event_enable; /* Async Event enables */ ac_callback_t *callback; void *callback_arg; }; /* Set Device Type CCB */ struct ccb_setdev { struct ccb_hdr ccb_h; u_int8_t dev_type; /* Value for dev type field in EDT */ }; /* SCSI Control Functions */ /* Abort XPT request CCB */ struct ccb_abort { struct ccb_hdr ccb_h; union ccb *abort_ccb; /* Pointer to CCB to abort */ }; /* Reset SCSI Bus CCB */ struct ccb_resetbus { struct ccb_hdr ccb_h; }; /* Reset SCSI Device CCB */ struct ccb_resetdev { struct ccb_hdr ccb_h; }; /* Terminate I/O Process Request CCB */ struct ccb_termio { struct ccb_hdr ccb_h; union ccb *termio_ccb; /* Pointer to CCB to terminate */ }; typedef enum { CTS_TYPE_CURRENT_SETTINGS, CTS_TYPE_USER_SETTINGS } cts_type; struct ccb_trans_settings_scsi { u_int valid; /* Which fields to honor */ #define CTS_SCSI_VALID_TQ 0x01 u_int flags; #define CTS_SCSI_FLAGS_TAG_ENB 0x01 }; struct ccb_trans_settings_ata { u_int valid; /* Which fields to honor */ #define CTS_ATA_VALID_TQ 0x01 u_int flags; #define CTS_ATA_FLAGS_TAG_ENB 0x01 }; struct ccb_trans_settings_spi { u_int valid; /* Which fields to honor */ #define CTS_SPI_VALID_SYNC_RATE 0x01 #define CTS_SPI_VALID_SYNC_OFFSET 0x02 #define CTS_SPI_VALID_BUS_WIDTH 0x04 #define CTS_SPI_VALID_DISC 0x08 #define CTS_SPI_VALID_PPR_OPTIONS 0x10 u_int flags; #define CTS_SPI_FLAGS_DISC_ENB 0x01 u_int sync_period; u_int sync_offset; u_int bus_width; u_int ppr_options; }; struct ccb_trans_settings_fc { u_int valid; /* Which fields to honor */ #define CTS_FC_VALID_WWNN 0x8000 #define CTS_FC_VALID_WWPN 0x4000 #define CTS_FC_VALID_PORT 0x2000 #define CTS_FC_VALID_SPEED 0x1000 u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int32_t port; /* 24 bit port id, if known */ u_int32_t bitrate; /* Mbps */ }; struct ccb_trans_settings_sas { u_int valid; /* Which fields to honor */ #define CTS_SAS_VALID_SPEED 0x1000 u_int32_t bitrate; /* Mbps */ }; struct ccb_trans_settings_pata { u_int valid; /* Which fields to honor */ #define CTS_ATA_VALID_MODE 0x01 #define CTS_ATA_VALID_BYTECOUNT 0x02 #define CTS_ATA_VALID_ATAPI 0x20 #define CTS_ATA_VALID_CAPS 0x40 int mode; /* Mode */ u_int bytecount; /* Length of PIO transaction */ u_int atapi; /* Length of ATAPI CDB */ u_int caps; /* Device and host SATA caps. */ #define CTS_ATA_CAPS_H 0x0000ffff #define CTS_ATA_CAPS_H_DMA48 0x00000001 /* 48-bit DMA */ #define CTS_ATA_CAPS_D 0xffff0000 }; struct ccb_trans_settings_sata { u_int valid; /* Which fields to honor */ #define CTS_SATA_VALID_MODE 0x01 #define CTS_SATA_VALID_BYTECOUNT 0x02 #define CTS_SATA_VALID_REVISION 0x04 #define CTS_SATA_VALID_PM 0x08 #define CTS_SATA_VALID_TAGS 0x10 #define CTS_SATA_VALID_ATAPI 0x20 #define CTS_SATA_VALID_CAPS 0x40 int mode; /* Legacy PATA mode */ u_int bytecount; /* Length of PIO transaction */ int revision; /* SATA revision */ u_int pm_present; /* PM is present (XPT->SIM) */ u_int tags; /* Number of allowed tags */ u_int atapi; /* Length of ATAPI CDB */ u_int caps; /* Device and host SATA caps. */ #define CTS_SATA_CAPS_H 0x0000ffff #define CTS_SATA_CAPS_H_PMREQ 0x00000001 #define CTS_SATA_CAPS_H_APST 0x00000002 #define CTS_SATA_CAPS_H_DMAAA 0x00000010 /* Auto-activation */ #define CTS_SATA_CAPS_H_AN 0x00000020 /* Async. notification */ #define CTS_SATA_CAPS_D 0xffff0000 #define CTS_SATA_CAPS_D_PMREQ 0x00010000 #define CTS_SATA_CAPS_D_APST 0x00020000 }; struct ccb_trans_settings_nvme { u_int valid; /* Which fields to honor */ #define CTS_NVME_VALID_SPEC 0x01 #define CTS_NVME_VALID_CAPS 0x02 #define CTS_NVME_VALID_LINK 0x04 uint32_t spec; /* NVMe spec implemented -- same as vs register */ uint32_t max_xfer; /* Max transfer size (0 -> unlimited */ uint32_t caps; uint8_t lanes; /* Number of PCIe lanes */ uint8_t speed; /* PCIe generation for each lane */ uint8_t max_lanes; /* Number of PCIe lanes */ uint8_t max_speed; /* PCIe generation for each lane */ }; #include struct ccb_trans_settings_mmc { struct mmc_ios ios; #define MMC_CLK (1 << 1) #define MMC_VDD (1 << 2) #define MMC_CS (1 << 3) #define MMC_BW (1 << 4) #define MMC_PM (1 << 5) #define MMC_BT (1 << 6) #define MMC_BM (1 << 7) uint32_t ios_valid; /* The folowing is used only for GET_TRAN_SETTINGS */ uint32_t host_ocr; int host_f_min; int host_f_max; #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can do 4-bit data transfers */ #define MMC_CAP_8_BIT_DATA (1 << 1) /* Can do 8-bit data transfers */ #define MMC_CAP_HSPEED (1 << 2) /* Can do High Speed transfers */ uint32_t host_caps; + uint32_t host_max_data; }; /* Get/Set transfer rate/width/disconnection/tag queueing settings */ struct ccb_trans_settings { struct ccb_hdr ccb_h; cts_type type; /* Current or User settings */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_ata ata; struct ccb_trans_settings_scsi scsi; struct ccb_trans_settings_nvme nvme; struct ccb_trans_settings_mmc mmc; } proto_specific; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_spi spi; struct ccb_trans_settings_fc fc; struct ccb_trans_settings_sas sas; struct ccb_trans_settings_pata ata; struct ccb_trans_settings_sata sata; struct ccb_trans_settings_nvme nvme; } xport_specific; }; /* * Calculate the geometry parameters for a device * give the block size and volume size in blocks. */ struct ccb_calc_geometry { struct ccb_hdr ccb_h; u_int32_t block_size; u_int64_t volume_size; u_int32_t cylinders; u_int8_t heads; u_int8_t secs_per_track; }; /* * Set or get SIM (and transport) specific knobs */ #define KNOB_VALID_ADDRESS 0x1 #define KNOB_VALID_ROLE 0x2 #define KNOB_ROLE_NONE 0x0 #define KNOB_ROLE_INITIATOR 0x1 #define KNOB_ROLE_TARGET 0x2 #define KNOB_ROLE_BOTH 0x3 struct ccb_sim_knob_settings_spi { u_int valid; u_int initiator_id; u_int role; }; struct ccb_sim_knob_settings_fc { u_int valid; u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int role; }; struct ccb_sim_knob_settings_sas { u_int valid; u_int64_t wwnn; /* world wide node name */ u_int role; }; #define KNOB_SETTINGS_SIZE 128 struct ccb_sim_knob { struct ccb_hdr ccb_h; union { u_int valid; /* Which fields to honor */ struct ccb_sim_knob_settings_spi spi; struct ccb_sim_knob_settings_fc fc; struct ccb_sim_knob_settings_sas sas; char pad[KNOB_SETTINGS_SIZE]; } xport_specific; }; /* * Rescan the given bus, or bus/target/lun */ struct ccb_rescan { struct ccb_hdr ccb_h; cam_flags flags; }; /* * Turn on debugging for the given bus, bus/target, or bus/target/lun. */ struct ccb_debug { struct ccb_hdr ccb_h; cam_debug_flags flags; }; /* Target mode structures. */ struct ccb_en_lun { struct ccb_hdr ccb_h; u_int16_t grp6_len; /* Group 6 VU CDB length */ u_int16_t grp7_len; /* Group 7 VU CDB length */ u_int8_t enable; }; /* old, barely used immediate notify, binary compatibility */ struct ccb_immed_notify { struct ccb_hdr ccb_h; struct scsi_sense_data sense_data; u_int8_t sense_len; /* Number of bytes in sense buffer */ u_int8_t initiator_id; /* Id of initiator that selected */ u_int8_t message_args[7]; /* Message Arguments */ }; struct ccb_notify_ack { struct ccb_hdr ccb_h; u_int16_t seq_id; /* Sequence identifier */ u_int8_t event; /* Event flags */ }; struct ccb_immediate_notify { struct ccb_hdr ccb_h; u_int tag_id; /* Tag for immediate notify */ u_int seq_id; /* Tag for target of notify */ u_int initiator_id; /* Initiator Identifier */ u_int arg; /* Function specific */ }; struct ccb_notify_acknowledge { struct ccb_hdr ccb_h; u_int tag_id; /* Tag for immediate notify */ u_int seq_id; /* Tar for target of notify */ u_int initiator_id; /* Initiator Identifier */ u_int arg; /* Response information */ /* * Lower byte of arg is one of RESPONSE CODE values defined below * (subset of response codes from SPL-4 and FCP-4 specifications), * upper 3 bytes is code-specific ADDITIONAL RESPONSE INFORMATION. */ #define CAM_RSP_TMF_COMPLETE 0x00 #define CAM_RSP_TMF_REJECTED 0x04 #define CAM_RSP_TMF_FAILED 0x05 #define CAM_RSP_TMF_SUCCEEDED 0x08 #define CAM_RSP_TMF_INCORRECT_LUN 0x09 }; /* HBA engine structures. */ typedef enum { EIT_BUFFER, /* Engine type: buffer memory */ EIT_LOSSLESS, /* Engine type: lossless compression */ EIT_LOSSY, /* Engine type: lossy compression */ EIT_ENCRYPT /* Engine type: encryption */ } ei_type; typedef enum { EAD_VUNIQUE, /* Engine algorithm ID: vendor unique */ EAD_LZ1V1, /* Engine algorithm ID: LZ1 var.1 */ EAD_LZ2V1, /* Engine algorithm ID: LZ2 var.1 */ EAD_LZ2V2 /* Engine algorithm ID: LZ2 var.2 */ } ei_algo; struct ccb_eng_inq { struct ccb_hdr ccb_h; u_int16_t eng_num; /* The engine number for this inquiry */ ei_type eng_type; /* Returned engine type */ ei_algo eng_algo; /* Returned engine algorithm type */ u_int32_t eng_memeory; /* Returned engine memory size */ }; struct ccb_eng_exec { /* This structure must match SCSIIO size */ struct ccb_hdr ccb_h; u_int8_t *pdrv_ptr; /* Ptr used by the peripheral driver */ u_int8_t *req_map; /* Ptr for mapping info on the req. */ u_int8_t *data_ptr; /* Pointer to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ u_int8_t *engdata_ptr; /* Pointer to the engine buffer data */ u_int16_t sglist_cnt; /* Num of scatter gather list entries */ u_int32_t dmax_len; /* Destination data maximum length */ u_int32_t dest_len; /* Destination data length */ int32_t src_resid; /* Source residual length: 2's comp */ u_int32_t timeout; /* Timeout value */ u_int16_t eng_num; /* Engine number for this request */ u_int16_t vu_flags; /* Vendor Unique flags */ }; /* * Definitions for the timeout field in the SCSI I/O CCB. */ #define CAM_TIME_DEFAULT 0x00000000 /* Use SIM default value */ #define CAM_TIME_INFINITY 0xFFFFFFFF /* Infinite timeout */ #define CAM_SUCCESS 0 /* For signaling general success */ #define CAM_FAILURE 1 /* For signaling general failure */ #define CAM_FALSE 0 #define CAM_TRUE 1 #define XPT_CCB_INVALID -1 /* for signaling a bad CCB to free */ /* * CCB for working with advanced device information. This operates in a fashion * similar to XPT_GDEV_TYPE. Specify the target in ccb_h, the buffer * type requested, and provide a buffer size/buffer to write to. If the * buffer is too small, provsiz will be larger than bufsiz. */ struct ccb_dev_advinfo { struct ccb_hdr ccb_h; uint32_t flags; #define CDAI_FLAG_NONE 0x0 /* No flags set */ #define CDAI_FLAG_STORE 0x1 /* If set, action becomes store */ uint32_t buftype; /* IN: Type of data being requested */ /* NB: buftype is interpreted on a per-transport basis */ #define CDAI_TYPE_SCSI_DEVID 1 #define CDAI_TYPE_SERIAL_NUM 2 #define CDAI_TYPE_PHYS_PATH 3 #define CDAI_TYPE_RCAPLONG 4 #define CDAI_TYPE_EXT_INQ 5 #define CDAI_TYPE_NVME_CNTRL 6 /* NVMe Identify Controller data */ #define CDAI_TYPE_NVME_NS 7 /* NVMe Identify Namespace data */ #define CDAI_TYPE_MMC_PARAMS 8 /* MMC/SD ident */ off_t bufsiz; /* IN: Size of external buffer */ #define CAM_SCSI_DEVID_MAXLEN 65536 /* length in buffer is an uint16_t */ off_t provsiz; /* OUT: Size required/used */ uint8_t *buf; /* IN/OUT: Buffer for requested data */ }; /* * CCB for sending async events */ struct ccb_async { struct ccb_hdr ccb_h; uint32_t async_code; off_t async_arg_size; void *async_arg_ptr; }; /* * Union of all CCB types for kernel space allocation. This union should * never be used for manipulating CCBs - its only use is for the allocation * and deallocation of raw CCB space and is the return type of xpt_ccb_alloc * and the argument to xpt_ccb_free. */ union ccb { struct ccb_hdr ccb_h; /* For convenience */ struct ccb_scsiio csio; struct ccb_getdev cgd; struct ccb_getdevlist cgdl; struct ccb_pathinq cpi; struct ccb_relsim crs; struct ccb_setasync csa; struct ccb_setdev csd; struct ccb_pathstats cpis; struct ccb_getdevstats cgds; struct ccb_dev_match cdm; struct ccb_trans_settings cts; struct ccb_calc_geometry ccg; struct ccb_sim_knob knob; struct ccb_abort cab; struct ccb_resetbus crb; struct ccb_resetdev crd; struct ccb_termio tio; struct ccb_accept_tio atio; struct ccb_scsiio ctio; struct ccb_en_lun cel; struct ccb_immed_notify cin; struct ccb_notify_ack cna; struct ccb_immediate_notify cin1; struct ccb_notify_acknowledge cna2; struct ccb_eng_inq cei; struct ccb_eng_exec cee; struct ccb_smpio smpio; struct ccb_rescan crcn; struct ccb_debug cdbg; struct ccb_ataio ataio; struct ccb_dev_advinfo cdai; struct ccb_async casync; struct ccb_nvmeio nvmeio; struct ccb_mmcio mmcio; }; #define CCB_CLEAR_ALL_EXCEPT_HDR(ccbp) \ bzero((char *)(ccbp) + sizeof((ccbp)->ccb_h), \ sizeof(*(ccbp)) - sizeof((ccbp)->ccb_h)) __BEGIN_DECLS static __inline void cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int8_t cdb_len, u_int32_t timeout) { csio->ccb_h.func_code = XPT_SCSI_IO; csio->ccb_h.flags = flags; csio->ccb_h.xflags = 0; csio->ccb_h.retry_count = retries; csio->ccb_h.cbfcnp = cbfcnp; csio->ccb_h.timeout = timeout; csio->data_ptr = data_ptr; csio->dxfer_len = dxfer_len; csio->sense_len = sense_len; csio->cdb_len = cdb_len; csio->tag_action = tag_action; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) csio->bio = NULL; #endif } static __inline void cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int tag_id, u_int init_id, u_int scsi_status, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { csio->ccb_h.func_code = XPT_CONT_TARGET_IO; csio->ccb_h.flags = flags; csio->ccb_h.xflags = 0; csio->ccb_h.retry_count = retries; csio->ccb_h.cbfcnp = cbfcnp; csio->ccb_h.timeout = timeout; csio->data_ptr = data_ptr; csio->dxfer_len = dxfer_len; csio->scsi_status = scsi_status; csio->tag_action = tag_action; csio->tag_id = tag_id; csio->init_id = init_id; } static __inline void cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action __unused, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { ataio->ccb_h.func_code = XPT_ATA_IO; ataio->ccb_h.flags = flags; ataio->ccb_h.retry_count = retries; ataio->ccb_h.cbfcnp = cbfcnp; ataio->ccb_h.timeout = timeout; ataio->data_ptr = data_ptr; ataio->dxfer_len = dxfer_len; ataio->ata_flags = 0; } static __inline void cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint8_t *smp_request, int smp_request_len, uint8_t *smp_response, int smp_response_len, uint32_t timeout) { #ifdef _KERNEL KASSERT((flags & CAM_DIR_MASK) == CAM_DIR_BOTH, ("direction != CAM_DIR_BOTH")); KASSERT((smp_request != NULL) && (smp_response != NULL), ("need valid request and response buffers")); KASSERT((smp_request_len != 0) && (smp_response_len != 0), ("need non-zero request and response lengths")); #endif /*_KERNEL*/ smpio->ccb_h.func_code = XPT_SMP_IO; smpio->ccb_h.flags = flags; smpio->ccb_h.retry_count = retries; smpio->ccb_h.cbfcnp = cbfcnp; smpio->ccb_h.timeout = timeout; smpio->smp_request = smp_request; smpio->smp_request_len = smp_request_len; smpio->smp_response = smp_response; smpio->smp_response_len = smp_response_len; } static __inline void cam_fill_mmcio(struct ccb_mmcio *mmcio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint32_t mmc_opcode, uint32_t mmc_arg, uint32_t mmc_flags, struct mmc_data *mmc_d, uint32_t timeout) { mmcio->ccb_h.func_code = XPT_MMC_IO; mmcio->ccb_h.flags = flags; mmcio->ccb_h.retry_count = retries; mmcio->ccb_h.cbfcnp = cbfcnp; mmcio->ccb_h.timeout = timeout; mmcio->cmd.opcode = mmc_opcode; mmcio->cmd.arg = mmc_arg; mmcio->cmd.flags = mmc_flags; mmcio->stop.opcode = 0; mmcio->stop.arg = 0; mmcio->stop.flags = 0; if (mmc_d != NULL) { mmcio->cmd.data = mmc_d; } else mmcio->cmd.data = NULL; mmcio->cmd.resp[0] = 0; mmcio->cmd.resp[1] = 0; mmcio->cmd.resp[2] = 0; mmcio->cmd.resp[3] = 0; } static __inline void cam_set_ccbstatus(union ccb *ccb, cam_status status) { ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= status; } static __inline cam_status cam_ccb_status(union ccb *ccb) { return ((cam_status)(ccb->ccb_h.status & CAM_STATUS_MASK)); } void cam_calc_geometry(struct ccb_calc_geometry *ccg, int extended); static __inline void cam_fill_nvmeio(struct ccb_nvmeio *nvmeio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { nvmeio->ccb_h.func_code = XPT_NVME_IO; nvmeio->ccb_h.flags = flags; nvmeio->ccb_h.retry_count = retries; nvmeio->ccb_h.cbfcnp = cbfcnp; nvmeio->ccb_h.timeout = timeout; nvmeio->data_ptr = data_ptr; nvmeio->dxfer_len = dxfer_len; } static __inline void cam_fill_nvmeadmin(struct ccb_nvmeio *nvmeio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { nvmeio->ccb_h.func_code = XPT_NVME_ADMIN; nvmeio->ccb_h.flags = flags; nvmeio->ccb_h.retry_count = retries; nvmeio->ccb_h.cbfcnp = cbfcnp; nvmeio->ccb_h.timeout = timeout; nvmeio->data_ptr = data_ptr; nvmeio->dxfer_len = dxfer_len; } __END_DECLS #endif /* _CAM_CAM_CCB_H */ Index: head/sys/cam/mmc/mmc_da.c =================================================================== --- head/sys/cam/mmc/mmc_da.c (revision 345774) +++ head/sys/cam/mmc/mmc_da.c (revision 345775) @@ -1,1900 +1,1917 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Bernd Walter * Copyright (c) 2006 M. Warner Losh * Copyright (c) 2009 Alexander Motin * Copyright (c) 2015-2017 Ilya Bakulin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Some code derived from the sys/dev/mmc and sys/cam/ata * Thanks to Warner Losh , Alexander Motin * Bernd Walter , and other authors. */ #include __FBSDID("$FreeBSD$"); //#include "opt_sdda.h" #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for PRIu64 */ #endif /* _KERNEL */ #ifndef _KERNEL #include #include #endif /* _KERNEL */ #include #include #include #include #include #include #include #include #include #include #include #include /* geometry translation */ #ifdef _KERNEL typedef enum { SDDA_FLAG_OPEN = 0x0002, SDDA_FLAG_DIRTY = 0x0004 } sdda_flags; typedef enum { SDDA_STATE_INIT, SDDA_STATE_INVALID, SDDA_STATE_NORMAL, SDDA_STATE_PART_SWITCH, } sdda_state; #define SDDA_FMT_BOOT "sdda%dboot" #define SDDA_FMT_GP "sdda%dgp" #define SDDA_FMT_RPMB "sdda%drpmb" #define SDDA_LABEL_ENH "enh" #define SDDA_PART_NAMELEN (16 + 1) struct sdda_softc; struct sdda_part { struct disk *disk; struct bio_queue_head bio_queue; sdda_flags flags; struct sdda_softc *sc; u_int cnt; u_int type; bool ro; char name[SDDA_PART_NAMELEN]; }; struct sdda_softc { int outstanding_cmds; /* Number of active commands */ int refcount; /* Active xpt_action() calls */ sdda_state state; struct mmc_data *mmcdata; struct cam_periph *periph; // sdda_quirks quirks; struct task start_init_task; uint32_t raw_csd[4]; uint8_t raw_ext_csd[512]; /* MMC only? */ struct mmc_csd csd; struct mmc_cid cid; struct mmc_scr scr; /* Calculated from CSD */ uint64_t sector_count; uint64_t mediasize; /* Calculated from CID */ char card_id_string[64];/* Formatted CID info (serial, MFG, etc) */ char card_sn_string[16];/* Formatted serial # for disk->d_ident */ /* Determined from CSD + is highspeed card*/ uint32_t card_f_max; /* Generic switch timeout */ uint32_t cmd6_time; /* MMC partitions support */ struct sdda_part *part[MMC_PART_MAX]; uint8_t part_curr; /* Partition currently switched to */ uint8_t part_requested; /* What partition we're currently switching to */ uint32_t part_time; /* Partition switch timeout [us] */ off_t enh_base; /* Enhanced user data area slice base ... */ off_t enh_size; /* ... and size [bytes] */ int log_count; struct timeval log_time; }; #define ccb_bp ppriv_ptr1 static disk_strategy_t sddastrategy; static periph_init_t sddainit; static void sddaasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static periph_ctor_t sddaregister; static periph_dtor_t sddacleanup; static periph_start_t sddastart; static periph_oninv_t sddaoninvalidate; static void sddadone(struct cam_periph *periph, union ccb *done_ccb); static int sddaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static uint16_t get_rca(struct cam_periph *periph); static void sdda_start_init(void *context, union ccb *start_ccb); static void sdda_start_init_task(void *context, int pending); static void sdda_process_mmc_partitions(struct cam_periph *periph, union ccb *start_ccb); static uint32_t sdda_get_host_caps(struct cam_periph *periph, union ccb *ccb); static void sdda_init_switch_part(struct cam_periph *periph, union ccb *start_ccb, u_int part); static int mmc_select_card(struct cam_periph *periph, union ccb *ccb, uint32_t rca); static inline uint32_t mmc_get_sector_size(struct cam_periph *periph) {return MMC_SECTOR_SIZE;} /* TODO: actually issue GET_TRAN_SETTINGS to get R/O status */ static inline bool sdda_get_read_only(struct cam_periph *periph, union ccb *start_ccb) { return (false); } static uint32_t mmc_get_spec_vers(struct cam_periph *periph); static uint64_t mmc_get_media_size(struct cam_periph *periph); static uint32_t mmc_get_cmd6_timeout(struct cam_periph *periph); static void sdda_add_part(struct cam_periph *periph, u_int type, const char *name, u_int cnt, off_t media_size, bool ro); static struct periph_driver sddadriver = { sddainit, "sdda", TAILQ_HEAD_INITIALIZER(sddadriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(sdda, sddadriver); static MALLOC_DEFINE(M_SDDA, "sd_da", "sd_da buffers"); static const int exp[8] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000 }; static const int mant[16] = { 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80 }; static const int cur_min[8] = { 500, 1000, 5000, 10000, 25000, 35000, 60000, 100000 }; static const int cur_max[8] = { 1000, 5000, 10000, 25000, 35000, 45000, 800000, 200000 }; static uint16_t get_rca(struct cam_periph *periph) { return periph->path->device->mmc_ident_data.card_rca; } static uint32_t mmc_get_bits(uint32_t *bits, int bit_len, int start, int size) { const int i = (bit_len / 32) - (start / 32) - 1; const int shift = start & 31; uint32_t retval = bits[i] >> shift; if (size + shift > 32) retval |= bits[i - 1] << (32 - shift); return (retval & ((1llu << size) - 1)); } static void mmc_decode_csd_sd(uint32_t *raw_csd, struct mmc_csd *csd) { int v; int m; int e; memset(csd, 0, sizeof(*csd)); csd->csd_structure = v = mmc_get_bits(raw_csd, 128, 126, 2); if (v == 0) { m = mmc_get_bits(raw_csd, 128, 115, 4); e = mmc_get_bits(raw_csd, 128, 112, 3); csd->tacc = (exp[e] * mant[m] + 9) / 10; csd->nsac = mmc_get_bits(raw_csd, 128, 104, 8) * 100; m = mmc_get_bits(raw_csd, 128, 99, 4); e = mmc_get_bits(raw_csd, 128, 96, 3); csd->tran_speed = exp[e] * 10000 * mant[m]; csd->ccc = mmc_get_bits(raw_csd, 128, 84, 12); csd->read_bl_len = 1 << mmc_get_bits(raw_csd, 128, 80, 4); csd->read_bl_partial = mmc_get_bits(raw_csd, 128, 79, 1); csd->write_blk_misalign = mmc_get_bits(raw_csd, 128, 78, 1); csd->read_blk_misalign = mmc_get_bits(raw_csd, 128, 77, 1); csd->dsr_imp = mmc_get_bits(raw_csd, 128, 76, 1); csd->vdd_r_curr_min = cur_min[mmc_get_bits(raw_csd, 128, 59, 3)]; csd->vdd_r_curr_max = cur_max[mmc_get_bits(raw_csd, 128, 56, 3)]; csd->vdd_w_curr_min = cur_min[mmc_get_bits(raw_csd, 128, 53, 3)]; csd->vdd_w_curr_max = cur_max[mmc_get_bits(raw_csd, 128, 50, 3)]; m = mmc_get_bits(raw_csd, 128, 62, 12); e = mmc_get_bits(raw_csd, 128, 47, 3); csd->capacity = ((1 + m) << (e + 2)) * csd->read_bl_len; csd->erase_blk_en = mmc_get_bits(raw_csd, 128, 46, 1); csd->erase_sector = mmc_get_bits(raw_csd, 128, 39, 7) + 1; csd->wp_grp_size = mmc_get_bits(raw_csd, 128, 32, 7); csd->wp_grp_enable = mmc_get_bits(raw_csd, 128, 31, 1); csd->r2w_factor = 1 << mmc_get_bits(raw_csd, 128, 26, 3); csd->write_bl_len = 1 << mmc_get_bits(raw_csd, 128, 22, 4); csd->write_bl_partial = mmc_get_bits(raw_csd, 128, 21, 1); } else if (v == 1) { m = mmc_get_bits(raw_csd, 128, 115, 4); e = mmc_get_bits(raw_csd, 128, 112, 3); csd->tacc = (exp[e] * mant[m] + 9) / 10; csd->nsac = mmc_get_bits(raw_csd, 128, 104, 8) * 100; m = mmc_get_bits(raw_csd, 128, 99, 4); e = mmc_get_bits(raw_csd, 128, 96, 3); csd->tran_speed = exp[e] * 10000 * mant[m]; csd->ccc = mmc_get_bits(raw_csd, 128, 84, 12); csd->read_bl_len = 1 << mmc_get_bits(raw_csd, 128, 80, 4); csd->read_bl_partial = mmc_get_bits(raw_csd, 128, 79, 1); csd->write_blk_misalign = mmc_get_bits(raw_csd, 128, 78, 1); csd->read_blk_misalign = mmc_get_bits(raw_csd, 128, 77, 1); csd->dsr_imp = mmc_get_bits(raw_csd, 128, 76, 1); csd->capacity = ((uint64_t)mmc_get_bits(raw_csd, 128, 48, 22) + 1) * 512 * 1024; csd->erase_blk_en = mmc_get_bits(raw_csd, 128, 46, 1); csd->erase_sector = mmc_get_bits(raw_csd, 128, 39, 7) + 1; csd->wp_grp_size = mmc_get_bits(raw_csd, 128, 32, 7); csd->wp_grp_enable = mmc_get_bits(raw_csd, 128, 31, 1); csd->r2w_factor = 1 << mmc_get_bits(raw_csd, 128, 26, 3); csd->write_bl_len = 1 << mmc_get_bits(raw_csd, 128, 22, 4); csd->write_bl_partial = mmc_get_bits(raw_csd, 128, 21, 1); } else panic("unknown SD CSD version"); } static void mmc_decode_csd_mmc(uint32_t *raw_csd, struct mmc_csd *csd) { int m; int e; memset(csd, 0, sizeof(*csd)); csd->csd_structure = mmc_get_bits(raw_csd, 128, 126, 2); csd->spec_vers = mmc_get_bits(raw_csd, 128, 122, 4); m = mmc_get_bits(raw_csd, 128, 115, 4); e = mmc_get_bits(raw_csd, 128, 112, 3); csd->tacc = exp[e] * mant[m] + 9 / 10; csd->nsac = mmc_get_bits(raw_csd, 128, 104, 8) * 100; m = mmc_get_bits(raw_csd, 128, 99, 4); e = mmc_get_bits(raw_csd, 128, 96, 3); csd->tran_speed = exp[e] * 10000 * mant[m]; csd->ccc = mmc_get_bits(raw_csd, 128, 84, 12); csd->read_bl_len = 1 << mmc_get_bits(raw_csd, 128, 80, 4); csd->read_bl_partial = mmc_get_bits(raw_csd, 128, 79, 1); csd->write_blk_misalign = mmc_get_bits(raw_csd, 128, 78, 1); csd->read_blk_misalign = mmc_get_bits(raw_csd, 128, 77, 1); csd->dsr_imp = mmc_get_bits(raw_csd, 128, 76, 1); csd->vdd_r_curr_min = cur_min[mmc_get_bits(raw_csd, 128, 59, 3)]; csd->vdd_r_curr_max = cur_max[mmc_get_bits(raw_csd, 128, 56, 3)]; csd->vdd_w_curr_min = cur_min[mmc_get_bits(raw_csd, 128, 53, 3)]; csd->vdd_w_curr_max = cur_max[mmc_get_bits(raw_csd, 128, 50, 3)]; m = mmc_get_bits(raw_csd, 128, 62, 12); e = mmc_get_bits(raw_csd, 128, 47, 3); csd->capacity = ((1 + m) << (e + 2)) * csd->read_bl_len; csd->erase_blk_en = 0; csd->erase_sector = (mmc_get_bits(raw_csd, 128, 42, 5) + 1) * (mmc_get_bits(raw_csd, 128, 37, 5) + 1); csd->wp_grp_size = mmc_get_bits(raw_csd, 128, 32, 5); csd->wp_grp_enable = mmc_get_bits(raw_csd, 128, 31, 1); csd->r2w_factor = 1 << mmc_get_bits(raw_csd, 128, 26, 3); csd->write_bl_len = 1 << mmc_get_bits(raw_csd, 128, 22, 4); csd->write_bl_partial = mmc_get_bits(raw_csd, 128, 21, 1); } static void mmc_decode_cid_sd(uint32_t *raw_cid, struct mmc_cid *cid) { int i; /* There's no version info, so we take it on faith */ memset(cid, 0, sizeof(*cid)); cid->mid = mmc_get_bits(raw_cid, 128, 120, 8); cid->oid = mmc_get_bits(raw_cid, 128, 104, 16); for (i = 0; i < 5; i++) cid->pnm[i] = mmc_get_bits(raw_cid, 128, 96 - i * 8, 8); cid->pnm[5] = 0; cid->prv = mmc_get_bits(raw_cid, 128, 56, 8); cid->psn = mmc_get_bits(raw_cid, 128, 24, 32); cid->mdt_year = mmc_get_bits(raw_cid, 128, 12, 8) + 2000; cid->mdt_month = mmc_get_bits(raw_cid, 128, 8, 4); } static void mmc_decode_cid_mmc(uint32_t *raw_cid, struct mmc_cid *cid) { int i; /* There's no version info, so we take it on faith */ memset(cid, 0, sizeof(*cid)); cid->mid = mmc_get_bits(raw_cid, 128, 120, 8); cid->oid = mmc_get_bits(raw_cid, 128, 104, 8); for (i = 0; i < 6; i++) cid->pnm[i] = mmc_get_bits(raw_cid, 128, 96 - i * 8, 8); cid->pnm[6] = 0; cid->prv = mmc_get_bits(raw_cid, 128, 48, 8); cid->psn = mmc_get_bits(raw_cid, 128, 16, 32); cid->mdt_month = mmc_get_bits(raw_cid, 128, 12, 4); cid->mdt_year = mmc_get_bits(raw_cid, 128, 8, 4) + 1997; } static void mmc_format_card_id_string(struct sdda_softc *sc, struct mmc_params *mmcp) { char oidstr[8]; uint8_t c1; uint8_t c2; /* * Format a card ID string for use by the mmcsd driver, it's what * appears between the <> in the following: * mmcsd0: 968MB at mmc0 * 22.5MHz/4bit/128-block * * Also format just the card serial number, which the mmcsd driver will * use as the disk->d_ident string. * * The card_id_string in mmc_ivars is currently allocated as 64 bytes, * and our max formatted length is currently 55 bytes if every field * contains the largest value. * * Sometimes the oid is two printable ascii chars; when it's not, * format it as 0xnnnn instead. */ c1 = (sc->cid.oid >> 8) & 0x0ff; c2 = sc->cid.oid & 0x0ff; if (c1 > 0x1f && c1 < 0x7f && c2 > 0x1f && c2 < 0x7f) snprintf(oidstr, sizeof(oidstr), "%c%c", c1, c2); else snprintf(oidstr, sizeof(oidstr), "0x%04x", sc->cid.oid); snprintf(sc->card_sn_string, sizeof(sc->card_sn_string), "%08X", sc->cid.psn); snprintf(sc->card_id_string, sizeof(sc->card_id_string), "%s%s %s %d.%d SN %08X MFG %02d/%04d by %d %s", mmcp->card_features & CARD_FEATURE_MMC ? "MMC" : "SD", mmcp->card_features & CARD_FEATURE_SDHC ? "HC" : "", sc->cid.pnm, sc->cid.prv >> 4, sc->cid.prv & 0x0f, sc->cid.psn, sc->cid.mdt_month, sc->cid.mdt_year, sc->cid.mid, oidstr); } static int sddaopen(struct disk *dp) { struct sdda_part *part; struct cam_periph *periph; struct sdda_softc *softc; int error; part = (struct sdda_part *)dp->d_drv1; softc = part->sc; periph = softc->periph; if (cam_periph_acquire(periph) != 0) { return(ENXIO); } cam_periph_lock(periph); if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddaopen\n")); part->flags |= SDDA_FLAG_OPEN; cam_periph_unhold(periph); cam_periph_unlock(periph); return (0); } static int sddaclose(struct disk *dp) { struct sdda_part *part; struct cam_periph *periph; struct sdda_softc *softc; part = (struct sdda_part *)dp->d_drv1; softc = part->sc; periph = softc->periph; part->flags &= ~SDDA_FLAG_OPEN; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddaclose\n")); while (softc->refcount != 0) cam_periph_sleep(periph, &softc->refcount, PRIBIO, "sddaclose", 1); cam_periph_unlock(periph); cam_periph_release(periph); return (0); } static void sddaschedule(struct cam_periph *periph) { struct sdda_softc *softc = (struct sdda_softc *)periph->softc; struct sdda_part *part; struct bio *bp; int i; /* Check if we have more work to do. */ /* Find partition that has outstanding commands. Prefer current partition. */ bp = bioq_first(&softc->part[softc->part_curr]->bio_queue); if (bp == NULL) { for (i = 0; i < MMC_PART_MAX; i++) { if ((part = softc->part[i]) != NULL && (bp = bioq_first(&softc->part[i]->bio_queue)) != NULL) break; } } if (bp != NULL) { xpt_schedule(periph, CAM_PRIORITY_NORMAL); } } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void sddastrategy(struct bio *bp) { struct cam_periph *periph; struct sdda_part *part; struct sdda_softc *softc; part = (struct sdda_part *)bp->bio_disk->d_drv1; softc = part->sc; periph = softc->periph; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddastrategy(%p)\n", bp)); /* * If the device has been made invalid, error out */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } /* * Place it in the queue of disk activities for this disk */ bioq_disksort(&part->bio_queue, bp); /* * Schedule ourselves for performing the work. */ sddaschedule(periph); cam_periph_unlock(periph); return; } static void sddainit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, sddaasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("sdda: Failed to attach master async callback " "due to status 0x%x!\n", status); } } /* * Callback from GEOM, called when it has finished cleaning up its * resources. */ static void sddadiskgonecb(struct disk *dp) { struct cam_periph *periph; struct sdda_part *part; part = (struct sdda_part *)dp->d_drv1; periph = part->sc->periph; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddadiskgonecb\n")); cam_periph_release(periph); } static void sddaoninvalidate(struct cam_periph *periph) { struct sdda_softc *softc; struct sdda_part *part; softc = (struct sdda_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddaoninvalidate\n")); /* * De-register any async callbacks. */ xpt_register_async(0, sddaasync, periph, periph->path); /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("bioq_flush start\n")); for (int i = 0; i < MMC_PART_MAX; i++) { if ((part = softc->part[i]) != NULL) { bioq_flush(&part->bio_queue, NULL, ENXIO); disk_gone(part->disk); } } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("bioq_flush end\n")); } static void sddacleanup(struct cam_periph *periph) { struct sdda_softc *softc; struct sdda_part *part; int i; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddacleanup\n")); softc = (struct sdda_softc *)periph->softc; cam_periph_unlock(periph); for (i = 0; i < MMC_PART_MAX; i++) { if ((part = softc->part[i]) != NULL) { disk_destroy(part->disk); free(part, M_DEVBUF); softc->part[i] = NULL; } } free(softc, M_DEVBUF); cam_periph_lock(periph); } static void sddaasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct ccb_getdev cgd; struct cam_periph *periph; struct sdda_softc *softc; periph = (struct cam_periph *)callback_arg; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("sddaasync(code=%d)\n", code)); switch (code) { case AC_FOUND_DEVICE: { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("=> AC_FOUND_DEVICE\n")); struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_MMCSD) break; if (!(path->device->mmc_ident_data.card_features & CARD_FEATURE_MEMORY)) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("No memory on the card!\n")); break; } /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(sddaregister, sddaoninvalidate, sddacleanup, sddastart, "sdda", CAM_PERIPH_BIO, path, sddaasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("sddaasync: Unable to attach to new device " "due to status 0x%x\n", status); break; } case AC_GETDEV_CHANGED: { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("=> AC_GETDEV_CHANGED\n")); softc = (struct sdda_softc *)periph->softc; xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); cam_periph_async(periph, code, path, arg); break; } case AC_ADVINFO_CHANGED: { uintptr_t buftype; int i; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("=> AC_ADVINFO_CHANGED\n")); buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct sdda_softc *softc; struct sdda_part *part; softc = periph->softc; for (i = 0; i < MMC_PART_MAX; i++) { if ((part = softc->part[i]) != NULL) { disk_attr_changed(part->disk, "GEOM::physpath", M_NOWAIT); } } } break; } default: CAM_DEBUG(path, CAM_DEBUG_TRACE, ("=> default?!\n")); cam_periph_async(periph, code, path, arg); break; } } static int sddagetattr(struct bio *bp) { struct cam_periph *periph; struct sdda_softc *softc; struct sdda_part *part; int ret; part = (struct sdda_part *)bp->bio_disk->d_drv1; softc = part->sc; periph = softc->periph; cam_periph_lock(periph); ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, periph->path); cam_periph_unlock(periph); if (ret == 0) bp->bio_completed = bp->bio_length; return (ret); } static cam_status sddaregister(struct cam_periph *periph, void *arg) { struct sdda_softc *softc; struct ccb_getdev *cgd; union ccb *request_ccb; /* CCB representing the probe request */ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddaregister\n")); cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("sddaregister: no getdev CCB, can't register device\n"); return (CAM_REQ_CMP_ERR); } softc = (struct sdda_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT|M_ZERO); if (softc == NULL) { printf("sddaregister: Unable to probe new device. " "Unable to allocate softc\n"); return (CAM_REQ_CMP_ERR); } softc->state = SDDA_STATE_INIT; softc->mmcdata = (struct mmc_data *)malloc(sizeof(struct mmc_data), M_DEVBUF, M_NOWAIT|M_ZERO); periph->softc = softc; softc->periph = periph; request_ccb = (union ccb*) arg; xpt_schedule(periph, CAM_PRIORITY_XPT); TASK_INIT(&softc->start_init_task, 0, sdda_start_init_task, periph); taskqueue_enqueue(taskqueue_thread, &softc->start_init_task); return (CAM_REQ_CMP); } static int mmc_exec_app_cmd(struct cam_periph *periph, union ccb *ccb, struct mmc_command *cmd) { int err; /* Send APP_CMD first */ memset(&ccb->mmcio.cmd, 0, sizeof(struct mmc_command)); memset(&ccb->mmcio.stop, 0, sizeof(struct mmc_command)); cam_fill_mmcio(&ccb->mmcio, /*retries*/ 0, /*cbfcnp*/ NULL, /*flags*/ CAM_DIR_NONE, /*mmc_opcode*/ MMC_APP_CMD, /*mmc_arg*/ get_rca(periph) << 16, /*mmc_flags*/ MMC_RSP_R1 | MMC_CMD_AC, /*mmc_data*/ NULL, /*timeout*/ 0); err = cam_periph_runccb(ccb, sddaerror, CAM_FLAG_NONE, /*sense_flags*/0, NULL); if (err != 0) return err; if (!(ccb->mmcio.cmd.resp[0] & R1_APP_CMD)) return MMC_ERR_FAILED; /* Now exec actual command */ int flags = 0; if (cmd->data != NULL) { ccb->mmcio.cmd.data = cmd->data; if (cmd->data->flags & MMC_DATA_READ) flags |= CAM_DIR_IN; if (cmd->data->flags & MMC_DATA_WRITE) flags |= CAM_DIR_OUT; } else flags = CAM_DIR_NONE; cam_fill_mmcio(&ccb->mmcio, /*retries*/ 0, /*cbfcnp*/ NULL, /*flags*/ flags, /*mmc_opcode*/ cmd->opcode, /*mmc_arg*/ cmd->arg, /*mmc_flags*/ cmd->flags, /*mmc_data*/ cmd->data, /*timeout*/ 0); err = cam_periph_runccb(ccb, sddaerror, CAM_FLAG_NONE, /*sense_flags*/0, NULL); memcpy(cmd->resp, ccb->mmcio.cmd.resp, sizeof(cmd->resp)); cmd->error = ccb->mmcio.cmd.error; if (err != 0) return err; return 0; } static int mmc_app_get_scr(struct cam_periph *periph, union ccb *ccb, uint32_t *rawscr) { int err; struct mmc_command cmd; struct mmc_data d; memset(&cmd, 0, sizeof(cmd)); memset(&d, 0, sizeof(d)); memset(rawscr, 0, 8); cmd.opcode = ACMD_SEND_SCR; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.arg = 0; d.data = rawscr; d.len = 8; d.flags = MMC_DATA_READ; cmd.data = &d; err = mmc_exec_app_cmd(periph, ccb, &cmd); rawscr[0] = be32toh(rawscr[0]); rawscr[1] = be32toh(rawscr[1]); return (err); } static int mmc_send_ext_csd(struct cam_periph *periph, union ccb *ccb, uint8_t *rawextcsd, size_t buf_len) { int err; struct mmc_data d; KASSERT(buf_len == 512, ("Buffer for ext csd must be 512 bytes")); d.data = rawextcsd; d.len = buf_len; d.flags = MMC_DATA_READ; memset(d.data, 0, d.len); cam_fill_mmcio(&ccb->mmcio, /*retries*/ 0, /*cbfcnp*/ NULL, /*flags*/ CAM_DIR_IN, /*mmc_opcode*/ MMC_SEND_EXT_CSD, /*mmc_arg*/ 0, /*mmc_flags*/ MMC_RSP_R1 | MMC_CMD_ADTC, /*mmc_data*/ &d, /*timeout*/ 0); err = cam_periph_runccb(ccb, sddaerror, CAM_FLAG_NONE, /*sense_flags*/0, NULL); if (err != 0) return (err); return (MMC_ERR_NONE); } static void mmc_app_decode_scr(uint32_t *raw_scr, struct mmc_scr *scr) { unsigned int scr_struct; memset(scr, 0, sizeof(*scr)); scr_struct = mmc_get_bits(raw_scr, 64, 60, 4); if (scr_struct != 0) { printf("Unrecognised SCR structure version %d\n", scr_struct); return; } scr->sda_vsn = mmc_get_bits(raw_scr, 64, 56, 4); scr->bus_widths = mmc_get_bits(raw_scr, 64, 48, 4); } static inline void mmc_switch_fill_mmcio(union ccb *ccb, uint8_t set, uint8_t index, uint8_t value, u_int timeout) { int arg = (MMC_SWITCH_FUNC_WR << 24) | (index << 16) | (value << 8) | set; cam_fill_mmcio(&ccb->mmcio, /*retries*/ 0, /*cbfcnp*/ NULL, /*flags*/ CAM_DIR_NONE, /*mmc_opcode*/ MMC_SWITCH_FUNC, /*mmc_arg*/ arg, /*mmc_flags*/ MMC_RSP_R1B | MMC_CMD_AC, /*mmc_data*/ NULL, /*timeout*/ timeout); } static int mmc_select_card(struct cam_periph *periph, union ccb *ccb, uint32_t rca) { int flags; flags = (rca ? MMC_RSP_R1B : MMC_RSP_NONE) | MMC_CMD_AC; cam_fill_mmcio(&ccb->mmcio, /*retries*/ 0, /*cbfcnp*/ NULL, /*flags*/ CAM_DIR_IN, /*mmc_opcode*/ MMC_SELECT_CARD, /*mmc_arg*/ rca << 16, /*mmc_flags*/ flags, /*mmc_data*/ NULL, /*timeout*/ 0); cam_periph_runccb(ccb, sddaerror, CAM_FLAG_NONE, /*sense_flags*/0, NULL); if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)) { if (ccb->mmcio.cmd.error != 0) { CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_PERIPH, ("%s: MMC_SELECT command failed", __func__)); return EIO; } return 0; /* Normal return */ } else { CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_PERIPH, ("%s: CAM request failed\n", __func__)); return EIO; } } static int mmc_switch(struct cam_periph *periph, union ccb *ccb, uint8_t set, uint8_t index, uint8_t value, u_int timeout) { mmc_switch_fill_mmcio(ccb, set, index, value, timeout); cam_periph_runccb(ccb, sddaerror, CAM_FLAG_NONE, /*sense_flags*/0, NULL); if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)) { if (ccb->mmcio.cmd.error != 0) { CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_PERIPH, ("%s: MMC command failed", __func__)); return (EIO); } return (0); /* Normal return */ } else { CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_PERIPH, ("%s: CAM request failed\n", __func__)); return (EIO); } } static uint32_t mmc_get_spec_vers(struct cam_periph *periph) { struct sdda_softc *softc = (struct sdda_softc *)periph->softc; return (softc->csd.spec_vers); } static uint64_t mmc_get_media_size(struct cam_periph *periph) { struct sdda_softc *softc = (struct sdda_softc *)periph->softc; return (softc->mediasize); } static uint32_t mmc_get_cmd6_timeout(struct cam_periph *periph) { struct sdda_softc *softc = (struct sdda_softc *)periph->softc; if (mmc_get_spec_vers(periph) >= 6) return (softc->raw_ext_csd[EXT_CSD_GEN_CMD6_TIME] * 10); return (500 * 1000); } static int mmc_sd_switch(struct cam_periph *periph, union ccb *ccb, uint8_t mode, uint8_t grp, uint8_t value, uint8_t *res) { struct mmc_data mmc_d; uint32_t arg; memset(res, 0, 64); mmc_d.len = 64; mmc_d.data = res; mmc_d.flags = MMC_DATA_READ; arg = mode << 31; /* 0 - check, 1 - set */ arg |= 0x00FFFFFF; arg &= ~(0xF << (grp * 4)); arg |= value << (grp * 4); cam_fill_mmcio(&ccb->mmcio, /*retries*/ 0, /*cbfcnp*/ NULL, /*flags*/ CAM_DIR_IN, /*mmc_opcode*/ SD_SWITCH_FUNC, /*mmc_arg*/ arg, /*mmc_flags*/ MMC_RSP_R1 | MMC_CMD_ADTC, /*mmc_data*/ &mmc_d, /*timeout*/ 0); cam_periph_runccb(ccb, sddaerror, CAM_FLAG_NONE, /*sense_flags*/0, NULL); if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)) { if (ccb->mmcio.cmd.error != 0) { CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_PERIPH, ("%s: MMC command failed", __func__)); return EIO; } return 0; /* Normal return */ } else { CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_PERIPH, ("%s: CAM request failed\n", __func__)); return EIO; } } static int mmc_set_timing(struct cam_periph *periph, union ccb *ccb, enum mmc_bus_timing timing) { u_char switch_res[64]; int err; uint8_t value; struct sdda_softc *softc = (struct sdda_softc *)periph->softc; struct mmc_params *mmcp = &periph->path->device->mmc_ident_data; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mmc_set_timing(timing=%d)", timing)); switch (timing) { case bus_timing_normal: value = 0; break; case bus_timing_hs: value = 1; break; default: return (MMC_ERR_INVALID); } if (mmcp->card_features & CARD_FEATURE_MMC) { err = mmc_switch(periph, ccb, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, value, softc->cmd6_time); } else { err = mmc_sd_switch(periph, ccb, SD_SWITCH_MODE_SET, SD_SWITCH_GROUP1, value, switch_res); } /* Set high-speed timing on the host */ struct ccb_trans_settings_mmc *cts; cts = &ccb->cts.proto_specific.mmc; ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; ccb->ccb_h.flags = CAM_DIR_NONE; ccb->ccb_h.retry_count = 0; ccb->ccb_h.timeout = 100; ccb->ccb_h.cbfcnp = NULL; cts->ios.timing = timing; cts->ios_valid = MMC_BT; xpt_action(ccb); return (err); } static void sdda_start_init_task(void *context, int pending) { union ccb *new_ccb; struct cam_periph *periph; periph = (struct cam_periph *)context; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sdda_start_init_task\n")); new_ccb = xpt_alloc_ccb(); xpt_setup_ccb(&new_ccb->ccb_h, periph->path, CAM_PRIORITY_NONE); cam_periph_lock(periph); sdda_start_init(context, new_ccb); cam_periph_unlock(periph); xpt_free_ccb(new_ccb); } static void sdda_set_bus_width(struct cam_periph *periph, union ccb *ccb, int width) { struct sdda_softc *softc = (struct sdda_softc *)periph->softc; struct mmc_params *mmcp = &periph->path->device->mmc_ident_data; int err; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sdda_set_bus_width\n")); /* First set for the card, then for the host */ if (mmcp->card_features & CARD_FEATURE_MMC) { uint8_t value; switch (width) { case bus_width_1: value = EXT_CSD_BUS_WIDTH_1; break; case bus_width_4: value = EXT_CSD_BUS_WIDTH_4; break; case bus_width_8: value = EXT_CSD_BUS_WIDTH_8; break; default: panic("Invalid bus width %d", width); } err = mmc_switch(periph, ccb, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, value, softc->cmd6_time); } else { /* For SD cards we send ACMD6 with the required bus width in arg */ struct mmc_command cmd; memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = ACMD_SET_BUS_WIDTH; cmd.arg = width; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; err = mmc_exec_app_cmd(periph, ccb, &cmd); } if (err != MMC_ERR_NONE) { CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Error %d when setting bus width on the card\n", err)); return; } /* Now card is done, set the host to the same width */ struct ccb_trans_settings_mmc *cts; cts = &ccb->cts.proto_specific.mmc; ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; ccb->ccb_h.flags = CAM_DIR_NONE; ccb->ccb_h.retry_count = 0; ccb->ccb_h.timeout = 100; ccb->ccb_h.cbfcnp = NULL; cts->ios.bus_width = width; cts->ios_valid = MMC_BW; xpt_action(ccb); } static inline const char *part_type(u_int type) { switch (type) { case EXT_CSD_PART_CONFIG_ACC_RPMB: return ("RPMB"); case EXT_CSD_PART_CONFIG_ACC_DEFAULT: return ("default"); case EXT_CSD_PART_CONFIG_ACC_BOOT0: return ("boot0"); case EXT_CSD_PART_CONFIG_ACC_BOOT1: return ("boot1"); case EXT_CSD_PART_CONFIG_ACC_GP0: case EXT_CSD_PART_CONFIG_ACC_GP1: case EXT_CSD_PART_CONFIG_ACC_GP2: case EXT_CSD_PART_CONFIG_ACC_GP3: return ("general purpose"); default: return ("(unknown type)"); } } static inline const char *bus_width_str(enum mmc_bus_width w) { switch (w) { case bus_width_1: return ("1-bit"); case bus_width_4: return ("4-bit"); case bus_width_8: return ("8-bit"); } } static uint32_t sdda_get_host_caps(struct cam_periph *periph, union ccb *ccb) { struct ccb_trans_settings_mmc *cts; cts = &ccb->cts.proto_specific.mmc; ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; ccb->ccb_h.flags = CAM_DIR_NONE; ccb->ccb_h.retry_count = 0; ccb->ccb_h.timeout = 100; ccb->ccb_h.cbfcnp = NULL; xpt_action(ccb); if (ccb->ccb_h.status != CAM_REQ_CMP) panic("Cannot get host caps"); return (cts->host_caps); } +static uint32_t +sdda_get_max_data(struct cam_periph *periph, union ccb *ccb) +{ + struct ccb_trans_settings_mmc *cts; + + cts = &ccb->cts.proto_specific.mmc; + memset(cts, 0, sizeof(struct ccb_trans_settings_mmc)); + + ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; + ccb->ccb_h.flags = CAM_DIR_NONE; + ccb->ccb_h.retry_count = 0; + ccb->ccb_h.timeout = 100; + ccb->ccb_h.cbfcnp = NULL; + xpt_action(ccb); + + if (ccb->ccb_h.status != CAM_REQ_CMP) + panic("Cannot get host max data"); + KASSERT(cts->host_max_data != 0, ("host_max_data == 0?!")); + return (cts->host_max_data); +} + static void sdda_start_init(void *context, union ccb *start_ccb) { struct cam_periph *periph = (struct cam_periph *)context; struct ccb_trans_settings_mmc *cts; uint32_t host_caps; uint32_t sec_count; int err; int host_f_max; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sdda_start_init\n")); /* periph was held for us when this task was enqueued */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_release(periph); return; } struct sdda_softc *softc = (struct sdda_softc *)periph->softc; //struct ccb_mmcio *mmcio = &start_ccb->mmcio; struct mmc_params *mmcp = &periph->path->device->mmc_ident_data; struct cam_ed *device = periph->path->device; if (mmcp->card_features & CARD_FEATURE_MMC) { mmc_decode_csd_mmc(mmcp->card_csd, &softc->csd); mmc_decode_cid_mmc(mmcp->card_cid, &softc->cid); if (mmc_get_spec_vers(periph) >= 4) { err = mmc_send_ext_csd(periph, start_ccb, (uint8_t *)&softc->raw_ext_csd, sizeof(softc->raw_ext_csd)); if (err != 0) { CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Cannot read EXT_CSD, err %d", err)); return; } } } else { mmc_decode_csd_sd(mmcp->card_csd, &softc->csd); mmc_decode_cid_sd(mmcp->card_cid, &softc->cid); } softc->sector_count = softc->csd.capacity / 512; softc->mediasize = softc->csd.capacity; softc->cmd6_time = mmc_get_cmd6_timeout(periph); /* MMC >= 4.x have EXT_CSD that has its own opinion about capacity */ if (mmc_get_spec_vers(periph) >= 4) { sec_count = softc->raw_ext_csd[EXT_CSD_SEC_CNT] + (softc->raw_ext_csd[EXT_CSD_SEC_CNT + 1] << 8) + (softc->raw_ext_csd[EXT_CSD_SEC_CNT + 2] << 16) + (softc->raw_ext_csd[EXT_CSD_SEC_CNT + 3] << 24); if (sec_count != 0) { softc->sector_count = sec_count; softc->mediasize = softc->sector_count * 512; /* FIXME: there should be a better name for this option...*/ mmcp->card_features |= CARD_FEATURE_SDHC; } } CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Capacity: %"PRIu64", sectors: %"PRIu64"\n", softc->mediasize, softc->sector_count)); mmc_format_card_id_string(softc, mmcp); /* Update info for CAM */ device->serial_num_len = strlen(softc->card_sn_string); device->serial_num = (u_int8_t *)malloc((device->serial_num_len + 1), M_CAMXPT, M_NOWAIT); strlcpy(device->serial_num, softc->card_sn_string, device->serial_num_len); device->device_id_len = strlen(softc->card_id_string); device->device_id = (u_int8_t *)malloc((device->device_id_len + 1), M_CAMXPT, M_NOWAIT); strlcpy(device->device_id, softc->card_id_string, device->device_id_len); strlcpy(mmcp->model, softc->card_id_string, sizeof(mmcp->model)); /* Set the clock frequency that the card can handle */ cts = &start_ccb->cts.proto_specific.mmc; /* First, get the host's max freq */ start_ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; start_ccb->ccb_h.flags = CAM_DIR_NONE; start_ccb->ccb_h.retry_count = 0; start_ccb->ccb_h.timeout = 100; start_ccb->ccb_h.cbfcnp = NULL; xpt_action(start_ccb); if (start_ccb->ccb_h.status != CAM_REQ_CMP) panic("Cannot get max host freq"); host_f_max = cts->host_f_max; host_caps = cts->host_caps; if (cts->ios.bus_width != bus_width_1) panic("Bus width in ios is not 1-bit"); /* Now check if the card supports High-speed */ softc->card_f_max = softc->csd.tran_speed; if (host_caps & MMC_CAP_HSPEED) { /* Find out if the card supports High speed timing */ if (mmcp->card_features & CARD_FEATURE_SD20) { /* Get and decode SCR */ uint32_t rawscr[2]; uint8_t res[64]; if (mmc_app_get_scr(periph, start_ccb, rawscr)) { CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Cannot get SCR\n")); goto finish_hs_tests; } mmc_app_decode_scr(rawscr, &softc->scr); if ((softc->scr.sda_vsn >= 1) && (softc->csd.ccc & (1<<10))) { mmc_sd_switch(periph, start_ccb, SD_SWITCH_MODE_CHECK, SD_SWITCH_GROUP1, SD_SWITCH_NOCHANGE, res); if (res[13] & 2) { CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Card supports HS\n")); softc->card_f_max = SD_HS_MAX; } /* * We deselect then reselect the card here. Some cards * become unselected and timeout with the above two * commands, although the state tables / diagrams in the * standard suggest they go back to the transfer state. * Other cards don't become deselected, and if we * attempt to blindly re-select them, we get timeout * errors from some controllers. So we deselect then * reselect to handle all situations. */ mmc_select_card(periph, start_ccb, 0); mmc_select_card(periph, start_ccb, get_rca(periph)); } else { CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Not trying the switch\n")); goto finish_hs_tests; } } if (mmcp->card_features & CARD_FEATURE_MMC && mmc_get_spec_vers(periph) >= 4) { if (softc->raw_ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_HS_52) softc->card_f_max = MMC_TYPE_HS_52_MAX; else if (softc->raw_ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_HS_26) softc->card_f_max = MMC_TYPE_HS_26_MAX; } } int f_max; finish_hs_tests: f_max = min(host_f_max, softc->card_f_max); CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Set SD freq to %d MHz (min out of host f=%d MHz and card f=%d MHz)\n", f_max / 1000000, host_f_max / 1000000, softc->card_f_max / 1000000)); /* Enable high-speed timing on the card */ if (f_max > 25000000) { err = mmc_set_timing(periph, start_ccb, bus_timing_hs); if (err != MMC_ERR_NONE) { CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("Cannot switch card to high-speed mode")); f_max = 25000000; } } /* Set frequency on the controller */ start_ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; start_ccb->ccb_h.flags = CAM_DIR_NONE; start_ccb->ccb_h.retry_count = 0; start_ccb->ccb_h.timeout = 100; start_ccb->ccb_h.cbfcnp = NULL; cts->ios.clock = f_max; cts->ios_valid = MMC_CLK; xpt_action(start_ccb); /* Set bus width */ enum mmc_bus_width desired_bus_width = bus_width_1; enum mmc_bus_width max_host_bus_width = (host_caps & MMC_CAP_8_BIT_DATA ? bus_width_8 : host_caps & MMC_CAP_4_BIT_DATA ? bus_width_4 : bus_width_1); enum mmc_bus_width max_card_bus_width = bus_width_1; if (mmcp->card_features & CARD_FEATURE_SD20 && softc->scr.bus_widths & SD_SCR_BUS_WIDTH_4) max_card_bus_width = bus_width_4; /* * Unlike SD, MMC cards don't have any information about supported bus width... * So we need to perform read/write test to find out the width. */ /* TODO: figure out bus width for MMC; use 8-bit for now (to test on BBB) */ if (mmcp->card_features & CARD_FEATURE_MMC) max_card_bus_width = bus_width_8; desired_bus_width = min(max_host_bus_width, max_card_bus_width); CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Set bus width to %s (min of host %s and card %s)\n", bus_width_str(desired_bus_width), bus_width_str(max_host_bus_width), bus_width_str(max_card_bus_width))); sdda_set_bus_width(periph, start_ccb, desired_bus_width); softc->state = SDDA_STATE_NORMAL; /* MMC partitions support */ if (mmcp->card_features & CARD_FEATURE_MMC && mmc_get_spec_vers(periph) >= 4) { sdda_process_mmc_partitions(periph, start_ccb); } else if (mmcp->card_features & CARD_FEATURE_SD20) { /* For SD[HC] cards, just add one partition that is the whole card */ sdda_add_part(periph, 0, "sdda", periph->unit_number, mmc_get_media_size(periph), sdda_get_read_only(periph, start_ccb)); softc->part_curr = 0; } xpt_announce_periph(periph, softc->card_id_string); /* * Add async callbacks for bus reset and bus device reset calls. * I don't bother checking if this fails as, in most cases, * the system will function just fine without them and the only * alternative would be to not attach the device on failure. */ xpt_register_async(AC_LOST_DEVICE | AC_GETDEV_CHANGED | AC_ADVINFO_CHANGED, sddaasync, periph, periph->path); } static void sdda_add_part(struct cam_periph *periph, u_int type, const char *name, u_int cnt, off_t media_size, bool ro) { struct sdda_softc *sc = (struct sdda_softc *)periph->softc; struct sdda_part *part; struct ccb_pathinq cpi; - u_int maxio; CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Partition type '%s', size %ju %s\n", part_type(type), media_size, ro ? "(read-only)" : "")); part = sc->part[type] = malloc(sizeof(*part), M_DEVBUF, M_WAITOK | M_ZERO); part->cnt = cnt; part->type = type; part->ro = ro; part->sc = sc; snprintf(part->name, sizeof(part->name), name, periph->unit_number); /* * Due to the nature of RPMB partition it doesn't make much sense * to add it as a disk. It would be more appropriate to create a * userland tool to operate on the partition or leverage the existing * tools from sysutils/mmc-utils. */ if (type == EXT_CSD_PART_CONFIG_ACC_RPMB) { /* TODO: Create device, assign IOCTL handler */ CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Don't know what to do with RPMB partitions yet\n")); return; } bioq_init(&part->bio_queue); bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); /* * Register this media as a disk */ (void)cam_periph_hold(periph, PRIBIO); cam_periph_unlock(periph); part->disk = disk_alloc(); part->disk->d_rotation_rate = DISK_RR_NON_ROTATING; part->disk->d_devstat = devstat_new_entry(part->name, cnt, 512, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_DISK); part->disk->d_open = sddaopen; part->disk->d_close = sddaclose; part->disk->d_strategy = sddastrategy; part->disk->d_getattr = sddagetattr; // sc->disk->d_dump = sddadump; part->disk->d_gone = sddadiskgonecb; part->disk->d_name = part->name; part->disk->d_drv1 = part; - maxio = cpi.maxio; /* Honor max I/O size of SIM */ - if (maxio == 0) - maxio = DFLTPHYS; /* traditional default */ - else if (maxio > MAXPHYS) - maxio = MAXPHYS; /* for safety */ - part->disk->d_maxsize = maxio; + part->disk->d_maxsize = + MIN(MAXPHYS, sdda_get_max_data(periph, + (union ccb *)&cpi) * mmc_get_sector_size(periph)); part->disk->d_unit = cnt; part->disk->d_flags = 0; strlcpy(part->disk->d_descr, sc->card_id_string, MIN(sizeof(part->disk->d_descr), sizeof(sc->card_id_string))); strlcpy(part->disk->d_ident, sc->card_sn_string, MIN(sizeof(part->disk->d_ident), sizeof(sc->card_sn_string))); part->disk->d_hba_vendor = cpi.hba_vendor; part->disk->d_hba_device = cpi.hba_device; part->disk->d_hba_subvendor = cpi.hba_subvendor; part->disk->d_hba_subdevice = cpi.hba_subdevice; part->disk->d_sectorsize = mmc_get_sector_size(periph); part->disk->d_mediasize = media_size; part->disk->d_stripesize = 0; part->disk->d_fwsectors = 0; part->disk->d_fwheads = 0; /* * Acquire a reference to the periph before we register with GEOM. * We'll release this reference once GEOM calls us back (via * sddadiskgonecb()) telling us that our provider has been freed. */ if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return; } disk_create(part->disk, DISK_VERSION); cam_periph_lock(periph); cam_periph_unhold(periph); } /* * For MMC cards, process EXT_CSD and add partitions that are supported by * this device. */ static void sdda_process_mmc_partitions(struct cam_periph *periph, union ccb *ccb) { struct sdda_softc *sc = (struct sdda_softc *)periph->softc; struct mmc_params *mmcp = &periph->path->device->mmc_ident_data; off_t erase_size, sector_size, size, wp_size; int i; const uint8_t *ext_csd; uint8_t rev; bool comp, ro; ext_csd = sc->raw_ext_csd; /* * Enhanced user data area and general purpose partitions are only * supported in revision 1.4 (EXT_CSD_REV == 4) and later, the RPMB * partition in revision 1.5 (MMC v4.41, EXT_CSD_REV == 5) and later. */ rev = ext_csd[EXT_CSD_REV]; /* * Ignore user-creatable enhanced user data area and general purpose * partitions partitions as long as partitioning hasn't been finished. */ comp = (ext_csd[EXT_CSD_PART_SET] & EXT_CSD_PART_SET_COMPLETED) != 0; /* * Add enhanced user data area slice, unless it spans the entirety of * the user data area. The enhanced area is of a multiple of high * capacity write protect groups ((ERASE_GRP_SIZE + HC_WP_GRP_SIZE) * * 512 KB) and its offset given in either sectors or bytes, depending * on whether it's a high capacity device or not. * NB: The slicer and its slices need to be registered before adding * the disk for the corresponding user data area as re-tasting is * racy. */ sector_size = mmc_get_sector_size(periph); size = ext_csd[EXT_CSD_ENH_SIZE_MULT] + (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) + (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16); if (rev >= 4 && comp == TRUE && size > 0 && (ext_csd[EXT_CSD_PART_SUPPORT] & EXT_CSD_PART_SUPPORT_ENH_ATTR_EN) != 0 && (ext_csd[EXT_CSD_PART_ATTR] & (EXT_CSD_PART_ATTR_ENH_USR)) != 0) { erase_size = ext_csd[EXT_CSD_ERASE_GRP_SIZE] * 1024 * MMC_SECTOR_SIZE; wp_size = ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; size *= erase_size * wp_size; if (size != mmc_get_media_size(periph) * sector_size) { sc->enh_size = size; sc->enh_base = (ext_csd[EXT_CSD_ENH_START_ADDR] + (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) + (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) + (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24)) * ((mmcp->card_features & CARD_FEATURE_SDHC) ? 1: MMC_SECTOR_SIZE); } else CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("enhanced user data area spans entire device")); } /* * Add default partition. This may be the only one or the user * data area in case partitions are supported. */ ro = sdda_get_read_only(periph, ccb); sdda_add_part(periph, EXT_CSD_PART_CONFIG_ACC_DEFAULT, "sdda", periph->unit_number, mmc_get_media_size(periph), ro); sc->part_curr = EXT_CSD_PART_CONFIG_ACC_DEFAULT; if (mmc_get_spec_vers(periph) < 3) return; /* Belatedly announce enhanced user data slice. */ if (sc->enh_size != 0) { CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("enhanced user data area off 0x%jx size %ju bytes\n", sc->enh_base, sc->enh_size)); } /* * Determine partition switch timeout (provided in units of 10 ms) * and ensure it's at least 300 ms as some eMMC chips lie. */ sc->part_time = max(ext_csd[EXT_CSD_PART_SWITCH_TO] * 10 * 1000, 300 * 1000); /* Add boot partitions, which are of a fixed multiple of 128 KB. */ size = ext_csd[EXT_CSD_BOOT_SIZE_MULT] * MMC_BOOT_RPMB_BLOCK_SIZE; if (size > 0 && (sdda_get_host_caps(periph, ccb) & MMC_CAP_BOOT_NOACC) == 0) { sdda_add_part(periph, EXT_CSD_PART_CONFIG_ACC_BOOT0, SDDA_FMT_BOOT, 0, size, ro | ((ext_csd[EXT_CSD_BOOT_WP_STATUS] & EXT_CSD_BOOT_WP_STATUS_BOOT0_MASK) != 0)); sdda_add_part(periph, EXT_CSD_PART_CONFIG_ACC_BOOT1, SDDA_FMT_BOOT, 1, size, ro | ((ext_csd[EXT_CSD_BOOT_WP_STATUS] & EXT_CSD_BOOT_WP_STATUS_BOOT1_MASK) != 0)); } /* Add RPMB partition, which also is of a fixed multiple of 128 KB. */ size = ext_csd[EXT_CSD_RPMB_MULT] * MMC_BOOT_RPMB_BLOCK_SIZE; if (rev >= 5 && size > 0) sdda_add_part(periph, EXT_CSD_PART_CONFIG_ACC_RPMB, SDDA_FMT_RPMB, 0, size, ro); if (rev <= 3 || comp == FALSE) return; /* * Add general purpose partitions, which are of a multiple of high * capacity write protect groups, too. */ if ((ext_csd[EXT_CSD_PART_SUPPORT] & EXT_CSD_PART_SUPPORT_EN) != 0) { erase_size = ext_csd[EXT_CSD_ERASE_GRP_SIZE] * 1024 * MMC_SECTOR_SIZE; wp_size = ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; for (i = 0; i < MMC_PART_GP_MAX; i++) { size = ext_csd[EXT_CSD_GP_SIZE_MULT + i * 3] + (ext_csd[EXT_CSD_GP_SIZE_MULT + i * 3 + 1] << 8) + (ext_csd[EXT_CSD_GP_SIZE_MULT + i * 3 + 2] << 16); if (size == 0) continue; sdda_add_part(periph, EXT_CSD_PART_CONFIG_ACC_GP0 + i, SDDA_FMT_GP, i, size * erase_size * wp_size, ro); } } } /* * We cannot just call mmc_switch() since it will sleep, and we are in * GEOM context and cannot sleep. Instead, create an MMCIO request to switch * partitions and send it to h/w, and upon completion resume processing * the I/O queue. * This function cannot fail, instead check switch errors in sddadone(). */ static void sdda_init_switch_part(struct cam_periph *periph, union ccb *start_ccb, u_int part) { struct sdda_softc *sc = (struct sdda_softc *)periph->softc; uint8_t value; sc->part_requested = part; value = (sc->raw_ext_csd[EXT_CSD_PART_CONFIG] & ~EXT_CSD_PART_CONFIG_ACC_MASK) | part; mmc_switch_fill_mmcio(start_ccb, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, value, sc->part_time); start_ccb->ccb_h.cbfcnp = sddadone; sc->outstanding_cmds++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); } /* Called with periph lock held! */ static void sddastart(struct cam_periph *periph, union ccb *start_ccb) { struct bio *bp; struct sdda_softc *softc = (struct sdda_softc *)periph->softc; struct sdda_part *part; struct mmc_params *mmcp = &periph->path->device->mmc_ident_data; int part_index; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddastart\n")); if (softc->state != SDDA_STATE_NORMAL) { CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("device is not in SDDA_STATE_NORMAL yet\n")); xpt_release_ccb(start_ccb); return; } /* Find partition that has outstanding commands. Prefer current partition. */ part = softc->part[softc->part_curr]; bp = bioq_first(&part->bio_queue); if (bp == NULL) { for (part_index = 0; part_index < MMC_PART_MAX; part_index++) { if ((part = softc->part[part_index]) != NULL && (bp = bioq_first(&softc->part[part_index]->bio_queue)) != NULL) break; } } if (bp == NULL) { xpt_release_ccb(start_ccb); return; } if (part_index != softc->part_curr) { CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Partition %d -> %d\n", softc->part_curr, part_index)); /* * According to section "6.2.2 Command restrictions" of the eMMC * specification v5.1, CMD19/CMD21 aren't allowed to be used with * RPMB partitions. So we pause re-tuning along with triggering * it up-front to decrease the likelihood of re-tuning becoming * necessary while accessing an RPMB partition. Consequently, an * RPMB partition should immediately be switched away from again * after an access in order to allow for re-tuning to take place * anew. */ /* TODO: pause retune if switching to RPMB partition */ softc->state = SDDA_STATE_PART_SWITCH; sdda_init_switch_part(periph, start_ccb, part_index); return; } bioq_remove(&part->bio_queue, bp); switch (bp->bio_cmd) { case BIO_WRITE: CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("BIO_WRITE\n")); part->flags |= SDDA_FLAG_DIRTY; /* FALLTHROUGH */ case BIO_READ: { struct ccb_mmcio *mmcio; uint64_t blockno = bp->bio_pblkno; uint16_t count = bp->bio_bcount / 512; uint16_t opcode; if (bp->bio_cmd == BIO_READ) CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("BIO_READ\n")); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("Block %"PRIu64" cnt %u\n", blockno, count)); /* Construct new MMC command */ if (bp->bio_cmd == BIO_READ) { if (count > 1) opcode = MMC_READ_MULTIPLE_BLOCK; else opcode = MMC_READ_SINGLE_BLOCK; } else { if (count > 1) opcode = MMC_WRITE_MULTIPLE_BLOCK; else opcode = MMC_WRITE_BLOCK; } start_ccb->ccb_h.func_code = XPT_MMC_IO; start_ccb->ccb_h.flags = (bp->bio_cmd == BIO_READ ? CAM_DIR_IN : CAM_DIR_OUT); start_ccb->ccb_h.retry_count = 0; start_ccb->ccb_h.timeout = 15 * 1000; start_ccb->ccb_h.cbfcnp = sddadone; mmcio = &start_ccb->mmcio; mmcio->cmd.opcode = opcode; mmcio->cmd.arg = blockno; if (!(mmcp->card_features & CARD_FEATURE_SDHC)) mmcio->cmd.arg <<= 9; mmcio->cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; mmcio->cmd.data = softc->mmcdata; mmcio->cmd.data->data = bp->bio_data; mmcio->cmd.data->len = 512 * count; mmcio->cmd.data->flags = (bp->bio_cmd == BIO_READ ? MMC_DATA_READ : MMC_DATA_WRITE); /* Direct h/w to issue CMD12 upon completion */ if (count > 1) { mmcio->cmd.data->flags |= MMC_DATA_MULTI; mmcio->stop.opcode = MMC_STOP_TRANSMISSION; mmcio->stop.flags = MMC_RSP_R1B | MMC_CMD_AC; mmcio->stop.arg = 0; } break; } case BIO_FLUSH: CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("BIO_FLUSH\n")); sddaschedule(periph); break; case BIO_DELETE: CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("BIO_DELETE\n")); sddaschedule(periph); break; } start_ccb->ccb_h.ccb_bp = bp; softc->outstanding_cmds++; softc->refcount++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); /* May have more work to do, so ensure we stay scheduled */ sddaschedule(periph); } static void sddadone(struct cam_periph *periph, union ccb *done_ccb) { struct bio *bp; struct sdda_softc *softc; struct ccb_mmcio *mmcio; struct cam_path *path; uint32_t card_status; int error = 0; softc = (struct sdda_softc *)periph->softc; mmcio = &done_ccb->mmcio; path = done_ccb->ccb_h.path; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("sddadone\n")); // cam_periph_lock(periph); if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Error!!!\n")); if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); error = 5; /* EIO */ } else { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) panic("REQ_CMP with QFRZN"); error = 0; } card_status = mmcio->cmd.resp[0]; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Card status: %08x\n", R1_STATUS(card_status))); CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Current state: %d\n", R1_CURRENT_STATE(card_status))); /* Process result of switching MMC partitions */ if (softc->state == SDDA_STATE_PART_SWITCH) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Compteting partition switch to %d\n", softc->part_requested)); softc->outstanding_cmds--; /* Complete partition switch */ softc->state = SDDA_STATE_NORMAL; if (error != MMC_ERR_NONE) { /* TODO: Unpause retune if accessing RPMB */ xpt_release_ccb(done_ccb); xpt_schedule(periph, CAM_PRIORITY_NORMAL); return; } softc->raw_ext_csd[EXT_CSD_PART_CONFIG] = (softc->raw_ext_csd[EXT_CSD_PART_CONFIG] & ~EXT_CSD_PART_CONFIG_ACC_MASK) | softc->part_requested; /* TODO: Unpause retune if accessing RPMB */ softc->part_curr = softc->part_requested; xpt_release_ccb(done_ccb); /* Return to processing BIO requests */ xpt_schedule(periph, CAM_PRIORITY_NORMAL); return; } bp = (struct bio *)done_ccb->ccb_h.ccb_bp; bp->bio_error = error; if (error != 0) { bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; } else { /* XXX: How many bytes remaining? */ bp->bio_resid = 0; if (bp->bio_resid > 0) bp->bio_flags |= BIO_ERROR; } softc->outstanding_cmds--; xpt_release_ccb(done_ccb); /* * Release the periph refcount taken in sddastart() for each CCB. */ KASSERT(softc->refcount >= 1, ("sddadone softc %p refcount %d", softc, softc->refcount)); softc->refcount--; biodone(bp); } static int sddaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { return(cam_periph_error(ccb, cam_flags, sense_flags)); } #endif /* _KERNEL */ Index: head/sys/dev/sdhci/sdhci.c =================================================================== --- head/sys/dev/sdhci/sdhci.c (revision 345774) +++ head/sys/dev/sdhci/sdhci.c (revision 345775) @@ -1,2794 +1,2808 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008 Alexander Motin * Copyright (c) 2017 Marius Strobl * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mmcbr_if.h" #include "sdhci_if.h" #include "opt_mmccam.h" SYSCTL_NODE(_hw, OID_AUTO, sdhci, CTLFLAG_RD, 0, "sdhci driver"); static int sdhci_debug = 0; SYSCTL_INT(_hw_sdhci, OID_AUTO, debug, CTLFLAG_RWTUN, &sdhci_debug, 0, "Debug level"); u_int sdhci_quirk_clear = 0; SYSCTL_INT(_hw_sdhci, OID_AUTO, quirk_clear, CTLFLAG_RWTUN, &sdhci_quirk_clear, 0, "Mask of quirks to clear"); u_int sdhci_quirk_set = 0; SYSCTL_INT(_hw_sdhci, OID_AUTO, quirk_set, CTLFLAG_RWTUN, &sdhci_quirk_set, 0, "Mask of quirks to set"); #define RD1(slot, off) SDHCI_READ_1((slot)->bus, (slot), (off)) #define RD2(slot, off) SDHCI_READ_2((slot)->bus, (slot), (off)) #define RD4(slot, off) SDHCI_READ_4((slot)->bus, (slot), (off)) #define RD_MULTI_4(slot, off, ptr, count) \ SDHCI_READ_MULTI_4((slot)->bus, (slot), (off), (ptr), (count)) #define WR1(slot, off, val) SDHCI_WRITE_1((slot)->bus, (slot), (off), (val)) #define WR2(slot, off, val) SDHCI_WRITE_2((slot)->bus, (slot), (off), (val)) #define WR4(slot, off, val) SDHCI_WRITE_4((slot)->bus, (slot), (off), (val)) #define WR_MULTI_4(slot, off, ptr, count) \ SDHCI_WRITE_MULTI_4((slot)->bus, (slot), (off), (ptr), (count)) static void sdhci_acmd_irq(struct sdhci_slot *slot, uint16_t acmd_err); static void sdhci_card_poll(void *arg); static void sdhci_card_task(void *arg, int pending); static void sdhci_cmd_irq(struct sdhci_slot *slot, uint32_t intmask); static void sdhci_data_irq(struct sdhci_slot *slot, uint32_t intmask); static int sdhci_exec_tuning(struct sdhci_slot *slot, bool reset); static void sdhci_handle_card_present_locked(struct sdhci_slot *slot, bool is_present); static void sdhci_finish_command(struct sdhci_slot *slot); static void sdhci_init(struct sdhci_slot *slot); static void sdhci_read_block_pio(struct sdhci_slot *slot); static void sdhci_req_done(struct sdhci_slot *slot); static void sdhci_req_wakeup(struct mmc_request *req); static void sdhci_reset(struct sdhci_slot *slot, uint8_t mask); static void sdhci_retune(void *arg); static void sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock); static void sdhci_set_power(struct sdhci_slot *slot, u_char power); static void sdhci_set_transfer_mode(struct sdhci_slot *slot, const struct mmc_data *data); static void sdhci_start(struct sdhci_slot *slot); static void sdhci_timeout(void *arg); static void sdhci_start_command(struct sdhci_slot *slot, struct mmc_command *cmd); static void sdhci_start_data(struct sdhci_slot *slot, const struct mmc_data *data); static void sdhci_write_block_pio(struct sdhci_slot *slot); static void sdhci_transfer_pio(struct sdhci_slot *slot); #ifdef MMCCAM /* CAM-related */ static void sdhci_cam_action(struct cam_sim *sim, union ccb *ccb); static int sdhci_cam_get_possible_host_clock(const struct sdhci_slot *slot, int proposed_clock); static void sdhci_cam_handle_mmcio(struct cam_sim *sim, union ccb *ccb); static void sdhci_cam_poll(struct cam_sim *sim); static int sdhci_cam_request(struct sdhci_slot *slot, union ccb *ccb); static int sdhci_cam_settran_settings(struct sdhci_slot *slot, union ccb *ccb); static int sdhci_cam_update_ios(struct sdhci_slot *slot); #endif /* helper routines */ static int sdhci_dma_alloc(struct sdhci_slot *slot); static void sdhci_dma_free(struct sdhci_slot *slot); static void sdhci_dumpregs(struct sdhci_slot *slot); static void sdhci_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error); static int slot_printf(const struct sdhci_slot *slot, const char * fmt, ...) __printflike(2, 3); static uint32_t sdhci_tuning_intmask(const struct sdhci_slot *slot); #define SDHCI_LOCK(_slot) mtx_lock(&(_slot)->mtx) #define SDHCI_UNLOCK(_slot) mtx_unlock(&(_slot)->mtx) #define SDHCI_LOCK_INIT(_slot) \ mtx_init(&_slot->mtx, "SD slot mtx", "sdhci", MTX_DEF) #define SDHCI_LOCK_DESTROY(_slot) mtx_destroy(&_slot->mtx); #define SDHCI_ASSERT_LOCKED(_slot) mtx_assert(&_slot->mtx, MA_OWNED); #define SDHCI_ASSERT_UNLOCKED(_slot) mtx_assert(&_slot->mtx, MA_NOTOWNED); #define SDHCI_DEFAULT_MAX_FREQ 50 #define SDHCI_200_MAX_DIVIDER 256 #define SDHCI_300_MAX_DIVIDER 2046 #define SDHCI_CARD_PRESENT_TICKS (hz / 5) #define SDHCI_INSERT_DELAY_TICKS (hz / 2) /* * Broadcom BCM577xx Controller Constants */ /* Maximum divider supported by the default clock source. */ #define BCM577XX_DEFAULT_MAX_DIVIDER 256 /* Alternative clock's base frequency. */ #define BCM577XX_ALT_CLOCK_BASE 63000000 #define BCM577XX_HOST_CONTROL 0x198 #define BCM577XX_CTRL_CLKSEL_MASK 0xFFFFCFFF #define BCM577XX_CTRL_CLKSEL_SHIFT 12 #define BCM577XX_CTRL_CLKSEL_DEFAULT 0x0 #define BCM577XX_CTRL_CLKSEL_64MHZ 0x3 static void sdhci_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (error != 0) { printf("getaddr: error %d\n", error); return; } *(bus_addr_t *)arg = segs[0].ds_addr; } static int slot_printf(const struct sdhci_slot *slot, const char * fmt, ...) { va_list ap; int retval; retval = printf("%s-slot%d: ", device_get_nameunit(slot->bus), slot->num); va_start(ap, fmt); retval += vprintf(fmt, ap); va_end(ap); return (retval); } static void sdhci_dumpregs(struct sdhci_slot *slot) { slot_printf(slot, "============== REGISTER DUMP ==============\n"); slot_printf(slot, "Sys addr: 0x%08x | Version: 0x%08x\n", RD4(slot, SDHCI_DMA_ADDRESS), RD2(slot, SDHCI_HOST_VERSION)); slot_printf(slot, "Blk size: 0x%08x | Blk cnt: 0x%08x\n", RD2(slot, SDHCI_BLOCK_SIZE), RD2(slot, SDHCI_BLOCK_COUNT)); slot_printf(slot, "Argument: 0x%08x | Trn mode: 0x%08x\n", RD4(slot, SDHCI_ARGUMENT), RD2(slot, SDHCI_TRANSFER_MODE)); slot_printf(slot, "Present: 0x%08x | Host ctl: 0x%08x\n", RD4(slot, SDHCI_PRESENT_STATE), RD1(slot, SDHCI_HOST_CONTROL)); slot_printf(slot, "Power: 0x%08x | Blk gap: 0x%08x\n", RD1(slot, SDHCI_POWER_CONTROL), RD1(slot, SDHCI_BLOCK_GAP_CONTROL)); slot_printf(slot, "Wake-up: 0x%08x | Clock: 0x%08x\n", RD1(slot, SDHCI_WAKE_UP_CONTROL), RD2(slot, SDHCI_CLOCK_CONTROL)); slot_printf(slot, "Timeout: 0x%08x | Int stat: 0x%08x\n", RD1(slot, SDHCI_TIMEOUT_CONTROL), RD4(slot, SDHCI_INT_STATUS)); slot_printf(slot, "Int enab: 0x%08x | Sig enab: 0x%08x\n", RD4(slot, SDHCI_INT_ENABLE), RD4(slot, SDHCI_SIGNAL_ENABLE)); slot_printf(slot, "AC12 err: 0x%08x | Host ctl2:0x%08x\n", RD2(slot, SDHCI_ACMD12_ERR), RD2(slot, SDHCI_HOST_CONTROL2)); slot_printf(slot, "Caps: 0x%08x | Caps2: 0x%08x\n", RD4(slot, SDHCI_CAPABILITIES), RD4(slot, SDHCI_CAPABILITIES2)); slot_printf(slot, "Max curr: 0x%08x | ADMA err: 0x%08x\n", RD4(slot, SDHCI_MAX_CURRENT), RD1(slot, SDHCI_ADMA_ERR)); slot_printf(slot, "ADMA addr:0x%08x | Slot int: 0x%08x\n", RD4(slot, SDHCI_ADMA_ADDRESS_LO), RD2(slot, SDHCI_SLOT_INT_STATUS)); slot_printf(slot, "===========================================\n"); } static void sdhci_reset(struct sdhci_slot *slot, uint8_t mask) { int timeout; uint32_t clock; if (slot->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { if (!SDHCI_GET_CARD_PRESENT(slot->bus, slot)) return; } /* Some controllers need this kick or reset won't work. */ if ((mask & SDHCI_RESET_ALL) == 0 && (slot->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)) { /* This is to force an update */ clock = slot->clock; slot->clock = 0; sdhci_set_clock(slot, clock); } if (mask & SDHCI_RESET_ALL) { slot->clock = 0; slot->power = 0; } WR1(slot, SDHCI_SOFTWARE_RESET, mask); if (slot->quirks & SDHCI_QUIRK_WAITFOR_RESET_ASSERTED) { /* * Resets on TI OMAPs and AM335x are incompatible with SDHCI * specification. The reset bit has internal propagation delay, * so a fast read after write returns 0 even if reset process is * in progress. The workaround is to poll for 1 before polling * for 0. In the worst case, if we miss seeing it asserted the * time we spent waiting is enough to ensure the reset finishes. */ timeout = 10000; while ((RD1(slot, SDHCI_SOFTWARE_RESET) & mask) != mask) { if (timeout <= 0) break; timeout--; DELAY(1); } } /* Wait max 100 ms */ timeout = 10000; /* Controller clears the bits when it's done */ while (RD1(slot, SDHCI_SOFTWARE_RESET) & mask) { if (timeout <= 0) { slot_printf(slot, "Reset 0x%x never completed.\n", mask); sdhci_dumpregs(slot); return; } timeout--; DELAY(10); } } static uint32_t sdhci_tuning_intmask(const struct sdhci_slot *slot) { uint32_t intmask; intmask = 0; if (slot->opt & SDHCI_TUNING_ENABLED) { intmask |= SDHCI_INT_TUNEERR; if (slot->retune_mode == SDHCI_RETUNE_MODE_2 || slot->retune_mode == SDHCI_RETUNE_MODE_3) intmask |= SDHCI_INT_RETUNE; } return (intmask); } static void sdhci_init(struct sdhci_slot *slot) { sdhci_reset(slot, SDHCI_RESET_ALL); /* Enable interrupts. */ slot->intmask = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE | SDHCI_INT_ACMD12ERR; if (!(slot->quirks & SDHCI_QUIRK_POLL_CARD_PRESENT) && !(slot->opt & SDHCI_NON_REMOVABLE)) { slot->intmask |= SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT; } WR4(slot, SDHCI_INT_ENABLE, slot->intmask); WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); } static void sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock) { uint32_t clk_base; uint32_t clk_sel; uint32_t res; uint16_t clk; uint16_t div; int timeout; if (clock == slot->clock) return; slot->clock = clock; /* Turn off the clock. */ clk = RD2(slot, SDHCI_CLOCK_CONTROL); WR2(slot, SDHCI_CLOCK_CONTROL, clk & ~SDHCI_CLOCK_CARD_EN); /* If no clock requested - leave it so. */ if (clock == 0) return; /* Determine the clock base frequency */ clk_base = slot->max_clk; if (slot->quirks & SDHCI_QUIRK_BCM577XX_400KHZ_CLKSRC) { clk_sel = RD2(slot, BCM577XX_HOST_CONTROL) & BCM577XX_CTRL_CLKSEL_MASK; /* * Select clock source appropriate for the requested frequency. */ if ((clk_base / BCM577XX_DEFAULT_MAX_DIVIDER) > clock) { clk_base = BCM577XX_ALT_CLOCK_BASE; clk_sel |= (BCM577XX_CTRL_CLKSEL_64MHZ << BCM577XX_CTRL_CLKSEL_SHIFT); } else { clk_sel |= (BCM577XX_CTRL_CLKSEL_DEFAULT << BCM577XX_CTRL_CLKSEL_SHIFT); } WR2(slot, BCM577XX_HOST_CONTROL, clk_sel); } /* Recalculate timeout clock frequency based on the new sd clock. */ if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) slot->timeout_clk = slot->clock / 1000; if (slot->version < SDHCI_SPEC_300) { /* Looking for highest freq <= clock. */ res = clk_base; for (div = 1; div < SDHCI_200_MAX_DIVIDER; div <<= 1) { if (res <= clock) break; res >>= 1; } /* Divider 1:1 is 0x00, 2:1 is 0x01, 256:1 is 0x80 ... */ div >>= 1; } else { /* Version 3.0 divisors are multiples of two up to 1023 * 2 */ if (clock >= clk_base) div = 0; else { for (div = 2; div < SDHCI_300_MAX_DIVIDER; div += 2) { if ((clk_base / div) <= clock) break; } } div >>= 1; } if (bootverbose || sdhci_debug) slot_printf(slot, "Divider %d for freq %d (base %d)\n", div, clock, clk_base); /* Now we have got divider, set it. */ clk = (div & SDHCI_DIVIDER_MASK) << SDHCI_DIVIDER_SHIFT; clk |= ((div >> SDHCI_DIVIDER_MASK_LEN) & SDHCI_DIVIDER_HI_MASK) << SDHCI_DIVIDER_HI_SHIFT; WR2(slot, SDHCI_CLOCK_CONTROL, clk); /* Enable clock. */ clk |= SDHCI_CLOCK_INT_EN; WR2(slot, SDHCI_CLOCK_CONTROL, clk); /* Wait up to 10 ms until it stabilize. */ timeout = 10; while (!((clk = RD2(slot, SDHCI_CLOCK_CONTROL)) & SDHCI_CLOCK_INT_STABLE)) { if (timeout == 0) { slot_printf(slot, "Internal clock never stabilised.\n"); sdhci_dumpregs(slot); return; } timeout--; DELAY(1000); } /* Pass clock signal to the bus. */ clk |= SDHCI_CLOCK_CARD_EN; WR2(slot, SDHCI_CLOCK_CONTROL, clk); } static void sdhci_set_power(struct sdhci_slot *slot, u_char power) { int i; uint8_t pwr; if (slot->power == power) return; slot->power = power; /* Turn off the power. */ pwr = 0; WR1(slot, SDHCI_POWER_CONTROL, pwr); /* If power down requested - leave it so. */ if (power == 0) return; /* Set voltage. */ switch (1 << power) { case MMC_OCR_LOW_VOLTAGE: pwr |= SDHCI_POWER_180; break; case MMC_OCR_290_300: case MMC_OCR_300_310: pwr |= SDHCI_POWER_300; break; case MMC_OCR_320_330: case MMC_OCR_330_340: pwr |= SDHCI_POWER_330; break; } WR1(slot, SDHCI_POWER_CONTROL, pwr); /* * Turn on VDD1 power. Note that at least some Intel controllers can * fail to enable bus power on the first try after transiting from D3 * to D0, so we give them up to 2 ms. */ pwr |= SDHCI_POWER_ON; for (i = 0; i < 20; i++) { WR1(slot, SDHCI_POWER_CONTROL, pwr); if (RD1(slot, SDHCI_POWER_CONTROL) & SDHCI_POWER_ON) break; DELAY(100); } if (!(RD1(slot, SDHCI_POWER_CONTROL) & SDHCI_POWER_ON)) slot_printf(slot, "Bus power failed to enable"); if (slot->quirks & SDHCI_QUIRK_INTEL_POWER_UP_RESET) { WR1(slot, SDHCI_POWER_CONTROL, pwr | 0x10); DELAY(10); WR1(slot, SDHCI_POWER_CONTROL, pwr); DELAY(300); } } static void sdhci_read_block_pio(struct sdhci_slot *slot) { uint32_t data; char *buffer; size_t left; buffer = slot->curcmd->data->data; buffer += slot->offset; /* Transfer one block at a time. */ left = min(512, slot->curcmd->data->len - slot->offset); slot->offset += left; /* If we are too fast, broken controllers return zeroes. */ if (slot->quirks & SDHCI_QUIRK_BROKEN_TIMINGS) DELAY(10); /* Handle unaligned and aligned buffer cases. */ if ((intptr_t)buffer & 3) { while (left > 3) { data = RD4(slot, SDHCI_BUFFER); buffer[0] = data; buffer[1] = (data >> 8); buffer[2] = (data >> 16); buffer[3] = (data >> 24); buffer += 4; left -= 4; } } else { RD_MULTI_4(slot, SDHCI_BUFFER, (uint32_t *)buffer, left >> 2); left &= 3; } /* Handle uneven size case. */ if (left > 0) { data = RD4(slot, SDHCI_BUFFER); while (left > 0) { *(buffer++) = data; data >>= 8; left--; } } } static void sdhci_write_block_pio(struct sdhci_slot *slot) { uint32_t data = 0; char *buffer; size_t left; buffer = slot->curcmd->data->data; buffer += slot->offset; /* Transfer one block at a time. */ left = min(512, slot->curcmd->data->len - slot->offset); slot->offset += left; /* Handle unaligned and aligned buffer cases. */ if ((intptr_t)buffer & 3) { while (left > 3) { data = buffer[0] + (buffer[1] << 8) + (buffer[2] << 16) + (buffer[3] << 24); left -= 4; buffer += 4; WR4(slot, SDHCI_BUFFER, data); } } else { WR_MULTI_4(slot, SDHCI_BUFFER, (uint32_t *)buffer, left >> 2); left &= 3; } /* Handle uneven size case. */ if (left > 0) { while (left > 0) { data <<= 8; data += *(buffer++); left--; } WR4(slot, SDHCI_BUFFER, data); } } static void sdhci_transfer_pio(struct sdhci_slot *slot) { /* Read as many blocks as possible. */ if (slot->curcmd->data->flags & MMC_DATA_READ) { while (RD4(slot, SDHCI_PRESENT_STATE) & SDHCI_DATA_AVAILABLE) { sdhci_read_block_pio(slot); if (slot->offset >= slot->curcmd->data->len) break; } } else { while (RD4(slot, SDHCI_PRESENT_STATE) & SDHCI_SPACE_AVAILABLE) { sdhci_write_block_pio(slot); if (slot->offset >= slot->curcmd->data->len) break; } } } static void sdhci_card_task(void *arg, int pending __unused) { struct sdhci_slot *slot = arg; device_t d; SDHCI_LOCK(slot); if (SDHCI_GET_CARD_PRESENT(slot->bus, slot)) { #ifdef MMCCAM if (slot->card_present == 0) { #else if (slot->dev == NULL) { #endif /* If card is present - attach mmc bus. */ if (bootverbose || sdhci_debug) slot_printf(slot, "Card inserted\n"); #ifdef MMCCAM slot->card_present = 1; union ccb *ccb; uint32_t pathid; pathid = cam_sim_path(slot->sim); ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { slot_printf(slot, "Unable to alloc CCB for rescan\n"); SDHCI_UNLOCK(slot); return; } /* * We create a rescan request for BUS:0:0, since the card * will be at lun 0. */ if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, /* target */ 0, /* lun */ 0) != CAM_REQ_CMP) { slot_printf(slot, "Unable to create path for rescan\n"); SDHCI_UNLOCK(slot); xpt_free_ccb(ccb); return; } SDHCI_UNLOCK(slot); xpt_rescan(ccb); #else d = slot->dev = device_add_child(slot->bus, "mmc", -1); SDHCI_UNLOCK(slot); if (d) { device_set_ivars(d, slot); (void)device_probe_and_attach(d); } #endif } else SDHCI_UNLOCK(slot); } else { #ifdef MMCCAM if (slot->card_present == 1) { #else if (slot->dev != NULL) { #endif /* If no card present - detach mmc bus. */ if (bootverbose || sdhci_debug) slot_printf(slot, "Card removed\n"); d = slot->dev; slot->dev = NULL; #ifdef MMCCAM slot->card_present = 0; union ccb *ccb; uint32_t pathid; pathid = cam_sim_path(slot->sim); ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { slot_printf(slot, "Unable to alloc CCB for rescan\n"); SDHCI_UNLOCK(slot); return; } /* * We create a rescan request for BUS:0:0, since the card * will be at lun 0. */ if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, /* target */ 0, /* lun */ 0) != CAM_REQ_CMP) { slot_printf(slot, "Unable to create path for rescan\n"); SDHCI_UNLOCK(slot); xpt_free_ccb(ccb); return; } SDHCI_UNLOCK(slot); xpt_rescan(ccb); #else slot->intmask &= ~sdhci_tuning_intmask(slot); WR4(slot, SDHCI_INT_ENABLE, slot->intmask); WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); slot->opt &= ~SDHCI_TUNING_ENABLED; SDHCI_UNLOCK(slot); callout_drain(&slot->retune_callout); device_delete_child(slot->bus, d); #endif } else SDHCI_UNLOCK(slot); } } static void sdhci_handle_card_present_locked(struct sdhci_slot *slot, bool is_present) { bool was_present; /* * If there was no card and now there is one, schedule the task to * create the child device after a short delay. The delay is to * debounce the card insert (sometimes the card detect pin stabilizes * before the other pins have made good contact). * * If there was a card present and now it's gone, immediately schedule * the task to delete the child device. No debouncing -- gone is gone, * because once power is removed, a full card re-init is needed, and * that happens by deleting and recreating the child device. */ #ifdef MMCCAM was_present = slot->card_present; #else was_present = slot->dev != NULL; #endif if (!was_present && is_present) { taskqueue_enqueue_timeout(taskqueue_swi_giant, &slot->card_delayed_task, -SDHCI_INSERT_DELAY_TICKS); } else if (was_present && !is_present) { taskqueue_enqueue(taskqueue_swi_giant, &slot->card_task); } } void sdhci_handle_card_present(struct sdhci_slot *slot, bool is_present) { SDHCI_LOCK(slot); sdhci_handle_card_present_locked(slot, is_present); SDHCI_UNLOCK(slot); } static void sdhci_card_poll(void *arg) { struct sdhci_slot *slot = arg; sdhci_handle_card_present(slot, SDHCI_GET_CARD_PRESENT(slot->bus, slot)); callout_reset(&slot->card_poll_callout, SDHCI_CARD_PRESENT_TICKS, sdhci_card_poll, slot); } static int sdhci_dma_alloc(struct sdhci_slot *slot) { int err; if (!(slot->quirks & SDHCI_QUIRK_BROKEN_SDMA_BOUNDARY)) { if (MAXPHYS <= 1024 * 4) slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_4K; else if (MAXPHYS <= 1024 * 8) slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_8K; else if (MAXPHYS <= 1024 * 16) slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_16K; else if (MAXPHYS <= 1024 * 32) slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_32K; else if (MAXPHYS <= 1024 * 64) slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_64K; else if (MAXPHYS <= 1024 * 128) slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_128K; else if (MAXPHYS <= 1024 * 256) slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_256K; else slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_512K; } slot->sdma_bbufsz = SDHCI_SDMA_BNDRY_TO_BBUFSZ(slot->sdma_boundary); /* * Allocate the DMA tag for an SDMA bounce buffer. * Note that the SDHCI specification doesn't state any alignment * constraint for the SDMA system address. However, controllers * typically ignore the SDMA boundary bits in SDHCI_DMA_ADDRESS when * forming the actual address of data, requiring the SDMA buffer to * be aligned to the SDMA boundary. */ err = bus_dma_tag_create(bus_get_dma_tag(slot->bus), slot->sdma_bbufsz, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, slot->sdma_bbufsz, 1, slot->sdma_bbufsz, BUS_DMA_ALLOCNOW, NULL, NULL, &slot->dmatag); if (err != 0) { slot_printf(slot, "Can't create DMA tag for SDMA\n"); return (err); } /* Allocate DMA memory for the SDMA bounce buffer. */ err = bus_dmamem_alloc(slot->dmatag, (void **)&slot->dmamem, BUS_DMA_NOWAIT, &slot->dmamap); if (err != 0) { slot_printf(slot, "Can't alloc DMA memory for SDMA\n"); bus_dma_tag_destroy(slot->dmatag); return (err); } /* Map the memory of the SDMA bounce buffer. */ err = bus_dmamap_load(slot->dmatag, slot->dmamap, (void *)slot->dmamem, slot->sdma_bbufsz, sdhci_getaddr, &slot->paddr, 0); if (err != 0 || slot->paddr == 0) { slot_printf(slot, "Can't load DMA memory for SDMA\n"); bus_dmamem_free(slot->dmatag, slot->dmamem, slot->dmamap); bus_dma_tag_destroy(slot->dmatag); if (err) return (err); else return (EFAULT); } return (0); } static void sdhci_dma_free(struct sdhci_slot *slot) { bus_dmamap_unload(slot->dmatag, slot->dmamap); bus_dmamem_free(slot->dmatag, slot->dmamem, slot->dmamap); bus_dma_tag_destroy(slot->dmatag); } int sdhci_init_slot(device_t dev, struct sdhci_slot *slot, int num) { kobjop_desc_t kobj_desc; kobj_method_t *kobj_method; uint32_t caps, caps2, freq, host_caps; int err; SDHCI_LOCK_INIT(slot); slot->num = num; slot->bus = dev; slot->version = (RD2(slot, SDHCI_HOST_VERSION) >> SDHCI_SPEC_VER_SHIFT) & SDHCI_SPEC_VER_MASK; if (slot->quirks & SDHCI_QUIRK_MISSING_CAPS) { caps = slot->caps; caps2 = slot->caps2; } else { caps = RD4(slot, SDHCI_CAPABILITIES); if (slot->version >= SDHCI_SPEC_300) caps2 = RD4(slot, SDHCI_CAPABILITIES2); else caps2 = 0; } if (slot->version >= SDHCI_SPEC_300) { if ((caps & SDHCI_SLOTTYPE_MASK) != SDHCI_SLOTTYPE_REMOVABLE && (caps & SDHCI_SLOTTYPE_MASK) != SDHCI_SLOTTYPE_EMBEDDED) { slot_printf(slot, "Driver doesn't support shared bus slots\n"); SDHCI_LOCK_DESTROY(slot); return (ENXIO); } else if ((caps & SDHCI_SLOTTYPE_MASK) == SDHCI_SLOTTYPE_EMBEDDED) { slot->opt |= SDHCI_SLOT_EMBEDDED | SDHCI_NON_REMOVABLE; } } /* Calculate base clock frequency. */ if (slot->version >= SDHCI_SPEC_300) freq = (caps & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; else freq = (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; if (freq != 0) slot->max_clk = freq * 1000000; /* * If the frequency wasn't in the capabilities and the hardware driver * hasn't already set max_clk we're probably not going to work right * with an assumption, so complain about it. */ if (slot->max_clk == 0) { slot->max_clk = SDHCI_DEFAULT_MAX_FREQ * 1000000; slot_printf(slot, "Hardware doesn't specify base clock " "frequency, using %dMHz as default.\n", SDHCI_DEFAULT_MAX_FREQ); } /* Calculate/set timeout clock frequency. */ if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) { slot->timeout_clk = slot->max_clk / 1000; } else if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_1MHZ) { slot->timeout_clk = 1000; } else { slot->timeout_clk = (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; if (caps & SDHCI_TIMEOUT_CLK_UNIT) slot->timeout_clk *= 1000; } /* * If the frequency wasn't in the capabilities and the hardware driver * hasn't already set timeout_clk we'll probably work okay using the * max timeout, but still mention it. */ if (slot->timeout_clk == 0) { slot_printf(slot, "Hardware doesn't specify timeout clock " "frequency, setting BROKEN_TIMEOUT quirk.\n"); slot->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; } slot->host.f_min = SDHCI_MIN_FREQ(slot->bus, slot); slot->host.f_max = slot->max_clk; slot->host.host_ocr = 0; if (caps & SDHCI_CAN_VDD_330) slot->host.host_ocr |= MMC_OCR_320_330 | MMC_OCR_330_340; if (caps & SDHCI_CAN_VDD_300) slot->host.host_ocr |= MMC_OCR_290_300 | MMC_OCR_300_310; /* 1.8V VDD is not supposed to be used for removable cards. */ if ((caps & SDHCI_CAN_VDD_180) && (slot->opt & SDHCI_SLOT_EMBEDDED)) slot->host.host_ocr |= MMC_OCR_LOW_VOLTAGE; if (slot->host.host_ocr == 0) { slot_printf(slot, "Hardware doesn't report any " "support voltages.\n"); } host_caps = MMC_CAP_4_BIT_DATA; if (caps & SDHCI_CAN_DO_8BITBUS) host_caps |= MMC_CAP_8_BIT_DATA; if (caps & SDHCI_CAN_DO_HISPD) host_caps |= MMC_CAP_HSPEED; if (slot->quirks & SDHCI_QUIRK_BOOT_NOACC) host_caps |= MMC_CAP_BOOT_NOACC; if (slot->quirks & SDHCI_QUIRK_WAIT_WHILE_BUSY) host_caps |= MMC_CAP_WAIT_WHILE_BUSY; /* Determine supported UHS-I and eMMC modes. */ if (caps2 & (SDHCI_CAN_SDR50 | SDHCI_CAN_SDR104 | SDHCI_CAN_DDR50)) host_caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; if (caps2 & SDHCI_CAN_SDR104) { host_caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; if (!(slot->quirks & SDHCI_QUIRK_BROKEN_MMC_HS200)) host_caps |= MMC_CAP_MMC_HS200; } else if (caps2 & SDHCI_CAN_SDR50) host_caps |= MMC_CAP_UHS_SDR50; if (caps2 & SDHCI_CAN_DDR50 && !(slot->quirks & SDHCI_QUIRK_BROKEN_UHS_DDR50)) host_caps |= MMC_CAP_UHS_DDR50; if (slot->quirks & SDHCI_QUIRK_MMC_DDR52) host_caps |= MMC_CAP_MMC_DDR52; if (slot->quirks & SDHCI_QUIRK_CAPS_BIT63_FOR_MMC_HS400 && caps2 & SDHCI_CAN_MMC_HS400) host_caps |= MMC_CAP_MMC_HS400; if (slot->quirks & SDHCI_QUIRK_MMC_HS400_IF_CAN_SDR104 && caps2 & SDHCI_CAN_SDR104) host_caps |= MMC_CAP_MMC_HS400; /* * Disable UHS-I and eMMC modes if the set_uhs_timing method is the * default NULL implementation. */ kobj_desc = &sdhci_set_uhs_timing_desc; kobj_method = kobj_lookup_method(((kobj_t)dev)->ops->cls, NULL, kobj_desc); if (kobj_method == &kobj_desc->deflt) host_caps &= ~(MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_MMC_DDR52 | MMC_CAP_MMC_HS200 | MMC_CAP_MMC_HS400); #define SDHCI_CAP_MODES_TUNING(caps2) \ (((caps2) & SDHCI_TUNE_SDR50 ? MMC_CAP_UHS_SDR50 : 0) | \ MMC_CAP_UHS_DDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_MMC_HS200 | \ MMC_CAP_MMC_HS400) /* * Disable UHS-I and eMMC modes that require (re-)tuning if either * the tune or re-tune method is the default NULL implementation. */ kobj_desc = &mmcbr_tune_desc; kobj_method = kobj_lookup_method(((kobj_t)dev)->ops->cls, NULL, kobj_desc); if (kobj_method == &kobj_desc->deflt) goto no_tuning; kobj_desc = &mmcbr_retune_desc; kobj_method = kobj_lookup_method(((kobj_t)dev)->ops->cls, NULL, kobj_desc); if (kobj_method == &kobj_desc->deflt) { no_tuning: host_caps &= ~(SDHCI_CAP_MODES_TUNING(caps2)); } /* Allocate tuning structures and determine tuning parameters. */ if (host_caps & SDHCI_CAP_MODES_TUNING(caps2)) { slot->opt |= SDHCI_TUNING_SUPPORTED; slot->tune_req = malloc(sizeof(*slot->tune_req), M_DEVBUF, M_WAITOK); slot->tune_cmd = malloc(sizeof(*slot->tune_cmd), M_DEVBUF, M_WAITOK); slot->tune_data = malloc(sizeof(*slot->tune_data), M_DEVBUF, M_WAITOK); if (caps2 & SDHCI_TUNE_SDR50) slot->opt |= SDHCI_SDR50_NEEDS_TUNING; slot->retune_mode = (caps2 & SDHCI_RETUNE_MODES_MASK) >> SDHCI_RETUNE_MODES_SHIFT; if (slot->retune_mode == SDHCI_RETUNE_MODE_1) { slot->retune_count = (caps2 & SDHCI_RETUNE_CNT_MASK) >> SDHCI_RETUNE_CNT_SHIFT; if (slot->retune_count > 0xb) { slot_printf(slot, "Unknown re-tuning count " "%x, using 1 sec\n", slot->retune_count); slot->retune_count = 1; } else if (slot->retune_count != 0) slot->retune_count = 1 << (slot->retune_count - 1); } } #undef SDHCI_CAP_MODES_TUNING /* Determine supported VCCQ signaling levels. */ host_caps |= MMC_CAP_SIGNALING_330; if (host_caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_MMC_DDR52_180 | MMC_CAP_MMC_HS200_180 | MMC_CAP_MMC_HS400_180)) host_caps |= MMC_CAP_SIGNALING_120 | MMC_CAP_SIGNALING_180; /* * Disable 1.2 V and 1.8 V signaling if the switch_vccq method is the * default NULL implementation. Disable 1.2 V support if it's the * generic SDHCI implementation. */ kobj_desc = &mmcbr_switch_vccq_desc; kobj_method = kobj_lookup_method(((kobj_t)dev)->ops->cls, NULL, kobj_desc); if (kobj_method == &kobj_desc->deflt) host_caps &= ~(MMC_CAP_SIGNALING_120 | MMC_CAP_SIGNALING_180); else if (kobj_method->func == (kobjop_t)sdhci_generic_switch_vccq) host_caps &= ~MMC_CAP_SIGNALING_120; /* Determine supported driver types (type B is always mandatory). */ if (caps2 & SDHCI_CAN_DRIVE_TYPE_A) host_caps |= MMC_CAP_DRIVER_TYPE_A; if (caps2 & SDHCI_CAN_DRIVE_TYPE_C) host_caps |= MMC_CAP_DRIVER_TYPE_C; if (caps2 & SDHCI_CAN_DRIVE_TYPE_D) host_caps |= MMC_CAP_DRIVER_TYPE_D; slot->host.caps = host_caps; /* Decide if we have usable DMA. */ if (caps & SDHCI_CAN_DO_DMA) slot->opt |= SDHCI_HAVE_DMA; if (slot->quirks & SDHCI_QUIRK_BROKEN_DMA) slot->opt &= ~SDHCI_HAVE_DMA; if (slot->quirks & SDHCI_QUIRK_FORCE_DMA) slot->opt |= SDHCI_HAVE_DMA; if (slot->quirks & SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE) slot->opt |= SDHCI_NON_REMOVABLE; /* * Use platform-provided transfer backend * with PIO as a fallback mechanism */ if (slot->opt & SDHCI_PLATFORM_TRANSFER) slot->opt &= ~SDHCI_HAVE_DMA; if (slot->opt & SDHCI_HAVE_DMA) { err = sdhci_dma_alloc(slot); if (err != 0) { if (slot->opt & SDHCI_TUNING_SUPPORTED) { free(slot->tune_req, M_DEVBUF); free(slot->tune_cmd, M_DEVBUF); free(slot->tune_data, M_DEVBUF); } SDHCI_LOCK_DESTROY(slot); return (err); } } if (bootverbose || sdhci_debug) { slot_printf(slot, "%uMHz%s %s VDD:%s%s%s VCCQ: 3.3V%s%s DRV: B%s%s%s %s %s\n", slot->max_clk / 1000000, (caps & SDHCI_CAN_DO_HISPD) ? " HS" : "", (host_caps & MMC_CAP_8_BIT_DATA) ? "8bits" : ((host_caps & MMC_CAP_4_BIT_DATA) ? "4bits" : "1bit"), (caps & SDHCI_CAN_VDD_330) ? " 3.3V" : "", (caps & SDHCI_CAN_VDD_300) ? " 3.0V" : "", ((caps & SDHCI_CAN_VDD_180) && (slot->opt & SDHCI_SLOT_EMBEDDED)) ? " 1.8V" : "", (host_caps & MMC_CAP_SIGNALING_180) ? " 1.8V" : "", (host_caps & MMC_CAP_SIGNALING_120) ? " 1.2V" : "", (host_caps & MMC_CAP_DRIVER_TYPE_A) ? "A" : "", (host_caps & MMC_CAP_DRIVER_TYPE_C) ? "C" : "", (host_caps & MMC_CAP_DRIVER_TYPE_D) ? "D" : "", (slot->opt & SDHCI_HAVE_DMA) ? "DMA" : "PIO", (slot->opt & SDHCI_SLOT_EMBEDDED) ? "embedded" : (slot->opt & SDHCI_NON_REMOVABLE) ? "non-removable" : "removable"); if (host_caps & (MMC_CAP_MMC_DDR52 | MMC_CAP_MMC_HS200 | MMC_CAP_MMC_HS400 | MMC_CAP_MMC_ENH_STROBE)) slot_printf(slot, "eMMC:%s%s%s%s\n", (host_caps & MMC_CAP_MMC_DDR52) ? " DDR52" : "", (host_caps & MMC_CAP_MMC_HS200) ? " HS200" : "", (host_caps & MMC_CAP_MMC_HS400) ? " HS400" : "", ((host_caps & (MMC_CAP_MMC_HS400 | MMC_CAP_MMC_ENH_STROBE)) == (MMC_CAP_MMC_HS400 | MMC_CAP_MMC_ENH_STROBE)) ? " HS400ES" : ""); if (host_caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104)) slot_printf(slot, "UHS-I:%s%s%s%s%s\n", (host_caps & MMC_CAP_UHS_SDR12) ? " SDR12" : "", (host_caps & MMC_CAP_UHS_SDR25) ? " SDR25" : "", (host_caps & MMC_CAP_UHS_SDR50) ? " SDR50" : "", (host_caps & MMC_CAP_UHS_SDR104) ? " SDR104" : "", (host_caps & MMC_CAP_UHS_DDR50) ? " DDR50" : ""); if (slot->opt & SDHCI_TUNING_SUPPORTED) slot_printf(slot, "Re-tuning count %d secs, mode %d\n", slot->retune_count, slot->retune_mode + 1); sdhci_dumpregs(slot); } slot->timeout = 10; SYSCTL_ADD_INT(device_get_sysctl_ctx(slot->bus), SYSCTL_CHILDREN(device_get_sysctl_tree(slot->bus)), OID_AUTO, "timeout", CTLFLAG_RW, &slot->timeout, 0, "Maximum timeout for SDHCI transfers (in secs)"); TASK_INIT(&slot->card_task, 0, sdhci_card_task, slot); TIMEOUT_TASK_INIT(taskqueue_swi_giant, &slot->card_delayed_task, 0, sdhci_card_task, slot); callout_init(&slot->card_poll_callout, 1); callout_init_mtx(&slot->timeout_callout, &slot->mtx, 0); callout_init_mtx(&slot->retune_callout, &slot->mtx, 0); if ((slot->quirks & SDHCI_QUIRK_POLL_CARD_PRESENT) && !(slot->opt & SDHCI_NON_REMOVABLE)) { callout_reset(&slot->card_poll_callout, SDHCI_CARD_PRESENT_TICKS, sdhci_card_poll, slot); } sdhci_init(slot); return (0); } #ifndef MMCCAM void sdhci_start_slot(struct sdhci_slot *slot) { sdhci_card_task(slot, 0); } #endif int sdhci_cleanup_slot(struct sdhci_slot *slot) { device_t d; callout_drain(&slot->timeout_callout); callout_drain(&slot->card_poll_callout); callout_drain(&slot->retune_callout); taskqueue_drain(taskqueue_swi_giant, &slot->card_task); taskqueue_drain_timeout(taskqueue_swi_giant, &slot->card_delayed_task); SDHCI_LOCK(slot); d = slot->dev; slot->dev = NULL; SDHCI_UNLOCK(slot); if (d != NULL) device_delete_child(slot->bus, d); SDHCI_LOCK(slot); sdhci_reset(slot, SDHCI_RESET_ALL); SDHCI_UNLOCK(slot); if (slot->opt & SDHCI_HAVE_DMA) sdhci_dma_free(slot); if (slot->opt & SDHCI_TUNING_SUPPORTED) { free(slot->tune_req, M_DEVBUF); free(slot->tune_cmd, M_DEVBUF); free(slot->tune_data, M_DEVBUF); } SDHCI_LOCK_DESTROY(slot); return (0); } int sdhci_generic_suspend(struct sdhci_slot *slot) { /* * We expect the MMC layer to issue initial tuning after resume. * Otherwise, we'd need to indicate re-tuning including circuit reset * being required at least for re-tuning modes 1 and 2 ourselves. */ callout_drain(&slot->retune_callout); SDHCI_LOCK(slot); slot->opt &= ~SDHCI_TUNING_ENABLED; sdhci_reset(slot, SDHCI_RESET_ALL); SDHCI_UNLOCK(slot); return (0); } int sdhci_generic_resume(struct sdhci_slot *slot) { SDHCI_LOCK(slot); sdhci_init(slot); SDHCI_UNLOCK(slot); return (0); } uint32_t sdhci_generic_min_freq(device_t brdev __unused, struct sdhci_slot *slot) { if (slot->version >= SDHCI_SPEC_300) return (slot->max_clk / SDHCI_300_MAX_DIVIDER); else return (slot->max_clk / SDHCI_200_MAX_DIVIDER); } bool sdhci_generic_get_card_present(device_t brdev __unused, struct sdhci_slot *slot) { if (slot->opt & SDHCI_NON_REMOVABLE) return true; return (RD4(slot, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); } void sdhci_generic_set_uhs_timing(device_t brdev __unused, struct sdhci_slot *slot) { const struct mmc_ios *ios; uint16_t hostctrl2; if (slot->version < SDHCI_SPEC_300) return; SDHCI_ASSERT_LOCKED(slot); ios = &slot->host.ios; sdhci_set_clock(slot, 0); hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2); hostctrl2 &= ~SDHCI_CTRL2_UHS_MASK; if (ios->clock > SD_SDR50_MAX) { if (ios->timing == bus_timing_mmc_hs400 || ios->timing == bus_timing_mmc_hs400es) hostctrl2 |= SDHCI_CTRL2_MMC_HS400; else hostctrl2 |= SDHCI_CTRL2_UHS_SDR104; } else if (ios->clock > SD_SDR25_MAX) hostctrl2 |= SDHCI_CTRL2_UHS_SDR50; else if (ios->clock > SD_SDR12_MAX) { if (ios->timing == bus_timing_uhs_ddr50 || ios->timing == bus_timing_mmc_ddr52) hostctrl2 |= SDHCI_CTRL2_UHS_DDR50; else hostctrl2 |= SDHCI_CTRL2_UHS_SDR25; } else if (ios->clock > SD_MMC_CARD_ID_FREQUENCY) hostctrl2 |= SDHCI_CTRL2_UHS_SDR12; WR2(slot, SDHCI_HOST_CONTROL2, hostctrl2); sdhci_set_clock(slot, ios->clock); } int sdhci_generic_update_ios(device_t brdev, device_t reqdev) { struct sdhci_slot *slot = device_get_ivars(reqdev); struct mmc_ios *ios = &slot->host.ios; SDHCI_LOCK(slot); /* Do full reset on bus power down to clear from any state. */ if (ios->power_mode == power_off) { WR4(slot, SDHCI_SIGNAL_ENABLE, 0); sdhci_init(slot); } /* Configure the bus. */ sdhci_set_clock(slot, ios->clock); sdhci_set_power(slot, (ios->power_mode == power_off) ? 0 : ios->vdd); if (ios->bus_width == bus_width_8) { slot->hostctrl |= SDHCI_CTRL_8BITBUS; slot->hostctrl &= ~SDHCI_CTRL_4BITBUS; } else if (ios->bus_width == bus_width_4) { slot->hostctrl &= ~SDHCI_CTRL_8BITBUS; slot->hostctrl |= SDHCI_CTRL_4BITBUS; } else if (ios->bus_width == bus_width_1) { slot->hostctrl &= ~SDHCI_CTRL_8BITBUS; slot->hostctrl &= ~SDHCI_CTRL_4BITBUS; } else { panic("Invalid bus width: %d", ios->bus_width); } if (ios->clock > SD_SDR12_MAX && !(slot->quirks & SDHCI_QUIRK_DONT_SET_HISPD_BIT)) slot->hostctrl |= SDHCI_CTRL_HISPD; else slot->hostctrl &= ~SDHCI_CTRL_HISPD; WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl); SDHCI_SET_UHS_TIMING(brdev, slot); /* Some controllers like reset after bus changes. */ if (slot->quirks & SDHCI_QUIRK_RESET_ON_IOS) sdhci_reset(slot, SDHCI_RESET_CMD | SDHCI_RESET_DATA); SDHCI_UNLOCK(slot); return (0); } int sdhci_generic_switch_vccq(device_t brdev __unused, device_t reqdev) { struct sdhci_slot *slot = device_get_ivars(reqdev); enum mmc_vccq vccq; int err; uint16_t hostctrl2; if (slot->version < SDHCI_SPEC_300) return (0); err = 0; vccq = slot->host.ios.vccq; SDHCI_LOCK(slot); sdhci_set_clock(slot, 0); hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2); switch (vccq) { case vccq_330: if (!(hostctrl2 & SDHCI_CTRL2_S18_ENABLE)) goto done; hostctrl2 &= ~SDHCI_CTRL2_S18_ENABLE; WR2(slot, SDHCI_HOST_CONTROL2, hostctrl2); DELAY(5000); hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2); if (!(hostctrl2 & SDHCI_CTRL2_S18_ENABLE)) goto done; err = EAGAIN; break; case vccq_180: if (!(slot->host.caps & MMC_CAP_SIGNALING_180)) { err = EINVAL; goto done; } if (hostctrl2 & SDHCI_CTRL2_S18_ENABLE) goto done; hostctrl2 |= SDHCI_CTRL2_S18_ENABLE; WR2(slot, SDHCI_HOST_CONTROL2, hostctrl2); DELAY(5000); hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2); if (hostctrl2 & SDHCI_CTRL2_S18_ENABLE) goto done; err = EAGAIN; break; default: slot_printf(slot, "Attempt to set unsupported signaling voltage\n"); err = EINVAL; break; } done: sdhci_set_clock(slot, slot->host.ios.clock); SDHCI_UNLOCK(slot); return (err); } int sdhci_generic_tune(device_t brdev __unused, device_t reqdev, bool hs400) { struct sdhci_slot *slot = device_get_ivars(reqdev); const struct mmc_ios *ios = &slot->host.ios; struct mmc_command *tune_cmd; struct mmc_data *tune_data; uint32_t opcode; int err; if (!(slot->opt & SDHCI_TUNING_SUPPORTED)) return (0); slot->retune_ticks = slot->retune_count * hz; opcode = MMC_SEND_TUNING_BLOCK; SDHCI_LOCK(slot); switch (ios->timing) { case bus_timing_mmc_hs400: slot_printf(slot, "HS400 must be tuned in HS200 mode\n"); SDHCI_UNLOCK(slot); return (EINVAL); case bus_timing_mmc_hs200: /* * In HS400 mode, controllers use the data strobe line to * latch data from the devices so periodic re-tuning isn't * expected to be required. */ if (hs400) slot->retune_ticks = 0; opcode = MMC_SEND_TUNING_BLOCK_HS200; break; case bus_timing_uhs_ddr50: case bus_timing_uhs_sdr104: break; case bus_timing_uhs_sdr50: if (slot->opt & SDHCI_SDR50_NEEDS_TUNING) break; /* FALLTHROUGH */ default: SDHCI_UNLOCK(slot); return (0); } tune_cmd = slot->tune_cmd; memset(tune_cmd, 0, sizeof(*tune_cmd)); tune_cmd->opcode = opcode; tune_cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; tune_data = tune_cmd->data = slot->tune_data; memset(tune_data, 0, sizeof(*tune_data)); tune_data->len = (opcode == MMC_SEND_TUNING_BLOCK_HS200 && ios->bus_width == bus_width_8) ? MMC_TUNING_LEN_HS200 : MMC_TUNING_LEN; tune_data->flags = MMC_DATA_READ; tune_data->mrq = tune_cmd->mrq = slot->tune_req; slot->opt &= ~SDHCI_TUNING_ENABLED; err = sdhci_exec_tuning(slot, true); if (err == 0) { slot->opt |= SDHCI_TUNING_ENABLED; slot->intmask |= sdhci_tuning_intmask(slot); WR4(slot, SDHCI_INT_ENABLE, slot->intmask); WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); if (slot->retune_ticks) { callout_reset(&slot->retune_callout, slot->retune_ticks, sdhci_retune, slot); } } SDHCI_UNLOCK(slot); return (err); } int sdhci_generic_retune(device_t brdev __unused, device_t reqdev, bool reset) { struct sdhci_slot *slot = device_get_ivars(reqdev); int err; if (!(slot->opt & SDHCI_TUNING_ENABLED)) return (0); /* HS400 must be tuned in HS200 mode. */ if (slot->host.ios.timing == bus_timing_mmc_hs400) return (EINVAL); SDHCI_LOCK(slot); err = sdhci_exec_tuning(slot, reset); /* * There are two ways sdhci_exec_tuning() can fail: * EBUSY should not actually happen when requests are only issued * with the host properly acquired, and * EIO re-tuning failed (but it did work initially). * * In both cases, we should retry at later point if periodic re-tuning * is enabled. Note that due to slot->retune_req not being cleared in * these failure cases, the MMC layer should trigger another attempt at * re-tuning with the next request anyway, though. */ if (slot->retune_ticks) { callout_reset(&slot->retune_callout, slot->retune_ticks, sdhci_retune, slot); } SDHCI_UNLOCK(slot); return (err); } static int sdhci_exec_tuning(struct sdhci_slot *slot, bool reset) { struct mmc_request *tune_req; struct mmc_command *tune_cmd; int i; uint32_t intmask; uint16_t hostctrl2; u_char opt; SDHCI_ASSERT_LOCKED(slot); if (slot->req != NULL) return (EBUSY); /* Tuning doesn't work with DMA enabled. */ opt = slot->opt; slot->opt = opt & ~SDHCI_HAVE_DMA; /* * Ensure that as documented, SDHCI_INT_DATA_AVAIL is the only * kind of interrupt we receive in response to a tuning request. */ intmask = slot->intmask; slot->intmask = SDHCI_INT_DATA_AVAIL; WR4(slot, SDHCI_INT_ENABLE, SDHCI_INT_DATA_AVAIL); WR4(slot, SDHCI_SIGNAL_ENABLE, SDHCI_INT_DATA_AVAIL); hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2); if (reset) hostctrl2 &= ~SDHCI_CTRL2_SAMPLING_CLOCK; else hostctrl2 |= SDHCI_CTRL2_SAMPLING_CLOCK; WR2(slot, SDHCI_HOST_CONTROL2, hostctrl2 | SDHCI_CTRL2_EXEC_TUNING); tune_req = slot->tune_req; tune_cmd = slot->tune_cmd; for (i = 0; i < MMC_TUNING_MAX; i++) { memset(tune_req, 0, sizeof(*tune_req)); tune_req->cmd = tune_cmd; tune_req->done = sdhci_req_wakeup; tune_req->done_data = slot; slot->req = tune_req; slot->flags = 0; sdhci_start(slot); while (!(tune_req->flags & MMC_REQ_DONE)) msleep(tune_req, &slot->mtx, 0, "sdhciet", 0); if (!(tune_req->flags & MMC_TUNE_DONE)) break; hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2); if (!(hostctrl2 & SDHCI_CTRL2_EXEC_TUNING)) break; if (tune_cmd->opcode == MMC_SEND_TUNING_BLOCK) DELAY(1000); } /* * Restore DMA usage and interrupts. * Note that the interrupt aggregation code might have cleared * SDHCI_INT_DMA_END and/or SDHCI_INT_RESPONSE in slot->intmask * and SDHCI_SIGNAL_ENABLE respectively so ensure SDHCI_INT_ENABLE * doesn't lose these. */ slot->opt = opt; slot->intmask = intmask; WR4(slot, SDHCI_INT_ENABLE, intmask | SDHCI_INT_DMA_END | SDHCI_INT_RESPONSE); WR4(slot, SDHCI_SIGNAL_ENABLE, intmask); if ((hostctrl2 & (SDHCI_CTRL2_EXEC_TUNING | SDHCI_CTRL2_SAMPLING_CLOCK)) == SDHCI_CTRL2_SAMPLING_CLOCK) { slot->retune_req = 0; return (0); } slot_printf(slot, "Tuning failed, using fixed sampling clock\n"); WR2(slot, SDHCI_HOST_CONTROL2, hostctrl2 & ~(SDHCI_CTRL2_EXEC_TUNING | SDHCI_CTRL2_SAMPLING_CLOCK)); sdhci_reset(slot, SDHCI_RESET_CMD | SDHCI_RESET_DATA); return (EIO); } static void sdhci_retune(void *arg) { struct sdhci_slot *slot = arg; slot->retune_req |= SDHCI_RETUNE_REQ_NEEDED; } #ifdef MMCCAM static void sdhci_req_done(struct sdhci_slot *slot) { union ccb *ccb; if (__predict_false(sdhci_debug > 1)) slot_printf(slot, "%s\n", __func__); if (slot->ccb != NULL && slot->curcmd != NULL) { callout_stop(&slot->timeout_callout); ccb = slot->ccb; slot->ccb = NULL; slot->curcmd = NULL; /* Tell CAM the request is finished */ struct ccb_mmcio *mmcio; mmcio = &ccb->mmcio; ccb->ccb_h.status = (mmcio->cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR); xpt_done(ccb); } } #else static void sdhci_req_done(struct sdhci_slot *slot) { struct mmc_request *req; if (slot->req != NULL && slot->curcmd != NULL) { callout_stop(&slot->timeout_callout); req = slot->req; slot->req = NULL; slot->curcmd = NULL; req->done(req); } } #endif static void sdhci_req_wakeup(struct mmc_request *req) { struct sdhci_slot *slot; slot = req->done_data; req->flags |= MMC_REQ_DONE; wakeup(req); } static void sdhci_timeout(void *arg) { struct sdhci_slot *slot = arg; if (slot->curcmd != NULL) { slot_printf(slot, "Controller timeout\n"); sdhci_dumpregs(slot); sdhci_reset(slot, SDHCI_RESET_CMD | SDHCI_RESET_DATA); slot->curcmd->error = MMC_ERR_TIMEOUT; sdhci_req_done(slot); } else { slot_printf(slot, "Spurious timeout - no active command\n"); } } static void sdhci_set_transfer_mode(struct sdhci_slot *slot, const struct mmc_data *data) { uint16_t mode; if (data == NULL) return; mode = SDHCI_TRNS_BLK_CNT_EN; if (data->len > 512) { mode |= SDHCI_TRNS_MULTI; if (__predict_true( #ifdef MMCCAM slot->ccb->mmcio.stop.opcode == MMC_STOP_TRANSMISSION && #else slot->req->stop != NULL && #endif !(slot->quirks & SDHCI_QUIRK_BROKEN_AUTO_STOP))) mode |= SDHCI_TRNS_ACMD12; } if (data->flags & MMC_DATA_READ) mode |= SDHCI_TRNS_READ; if (slot->flags & SDHCI_USE_DMA) mode |= SDHCI_TRNS_DMA; WR2(slot, SDHCI_TRANSFER_MODE, mode); } static void sdhci_start_command(struct sdhci_slot *slot, struct mmc_command *cmd) { int flags, timeout; uint32_t mask; slot->curcmd = cmd; slot->cmd_done = 0; cmd->error = MMC_ERR_NONE; /* This flags combination is not supported by controller. */ if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { slot_printf(slot, "Unsupported response type!\n"); cmd->error = MMC_ERR_FAILED; sdhci_req_done(slot); return; } /* * Do not issue command if there is no card, clock or power. * Controller will not detect timeout without clock active. */ if (!SDHCI_GET_CARD_PRESENT(slot->bus, slot) || slot->power == 0 || slot->clock == 0) { slot_printf(slot, "Cannot issue a command (power=%d clock=%d)", slot->power, slot->clock); cmd->error = MMC_ERR_FAILED; sdhci_req_done(slot); return; } /* Always wait for free CMD bus. */ mask = SDHCI_CMD_INHIBIT; /* Wait for free DAT if we have data or busy signal. */ if (cmd->data != NULL || (cmd->flags & MMC_RSP_BUSY)) mask |= SDHCI_DAT_INHIBIT; /* * We shouldn't wait for DAT for stop commands or CMD19/CMD21. Note * that these latter are also special in that SDHCI_CMD_DATA should * be set below but no actual data is ever read from the controller. */ #ifdef MMCCAM if (cmd == &slot->ccb->mmcio.stop || #else if (cmd == slot->req->stop || #endif __predict_false(cmd->opcode == MMC_SEND_TUNING_BLOCK || cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)) mask &= ~SDHCI_DAT_INHIBIT; /* * Wait for bus no more then 250 ms. Typically there will be no wait * here at all, but when writing a crash dump we may be bypassing the * host platform's interrupt handler, and in some cases that handler * may be working around hardware quirks such as not respecting r1b * busy indications. In those cases, this wait-loop serves the purpose * of waiting for the prior command and data transfers to be done, and * SD cards are allowed to take up to 250ms for write and erase ops. * (It's usually more like 20-30ms in the real world.) */ timeout = 250; while (mask & RD4(slot, SDHCI_PRESENT_STATE)) { if (timeout == 0) { slot_printf(slot, "Controller never released " "inhibit bit(s).\n"); sdhci_dumpregs(slot); cmd->error = MMC_ERR_FAILED; sdhci_req_done(slot); return; } timeout--; DELAY(1000); } /* Prepare command flags. */ if (!(cmd->flags & MMC_RSP_PRESENT)) flags = SDHCI_CMD_RESP_NONE; else if (cmd->flags & MMC_RSP_136) flags = SDHCI_CMD_RESP_LONG; else if (cmd->flags & MMC_RSP_BUSY) flags = SDHCI_CMD_RESP_SHORT_BUSY; else flags = SDHCI_CMD_RESP_SHORT; if (cmd->flags & MMC_RSP_CRC) flags |= SDHCI_CMD_CRC; if (cmd->flags & MMC_RSP_OPCODE) flags |= SDHCI_CMD_INDEX; if (cmd->data != NULL) flags |= SDHCI_CMD_DATA; if (cmd->opcode == MMC_STOP_TRANSMISSION) flags |= SDHCI_CMD_TYPE_ABORT; /* Prepare data. */ sdhci_start_data(slot, cmd->data); /* * Interrupt aggregation: To reduce total number of interrupts * group response interrupt with data interrupt when possible. * If there going to be data interrupt, mask response one. */ if (slot->data_done == 0) { WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask &= ~SDHCI_INT_RESPONSE); } /* Set command argument. */ WR4(slot, SDHCI_ARGUMENT, cmd->arg); /* Set data transfer mode. */ sdhci_set_transfer_mode(slot, cmd->data); if (__predict_false(sdhci_debug > 1)) slot_printf(slot, "Starting command!\n"); /* Start command. */ WR2(slot, SDHCI_COMMAND_FLAGS, (cmd->opcode << 8) | (flags & 0xff)); /* Start timeout callout. */ callout_reset(&slot->timeout_callout, slot->timeout * hz, sdhci_timeout, slot); } static void sdhci_finish_command(struct sdhci_slot *slot) { int i; uint32_t val; uint8_t extra; if (__predict_false(sdhci_debug > 1)) slot_printf(slot, "%s: called, err %d flags %d\n", __func__, slot->curcmd->error, slot->curcmd->flags); slot->cmd_done = 1; /* * Interrupt aggregation: Restore command interrupt. * Main restore point for the case when command interrupt * happened first. */ if (__predict_true(slot->curcmd->opcode != MMC_SEND_TUNING_BLOCK && slot->curcmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)) WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask |= SDHCI_INT_RESPONSE); /* In case of error - reset host and return. */ if (slot->curcmd->error) { if (slot->curcmd->error == MMC_ERR_BADCRC) slot->retune_req |= SDHCI_RETUNE_REQ_RESET; sdhci_reset(slot, SDHCI_RESET_CMD); sdhci_reset(slot, SDHCI_RESET_DATA); sdhci_start(slot); return; } /* If command has response - fetch it. */ if (slot->curcmd->flags & MMC_RSP_PRESENT) { if (slot->curcmd->flags & MMC_RSP_136) { /* CRC is stripped so we need one byte shift. */ extra = 0; for (i = 0; i < 4; i++) { val = RD4(slot, SDHCI_RESPONSE + i * 4); if (slot->quirks & SDHCI_QUIRK_DONT_SHIFT_RESPONSE) slot->curcmd->resp[3 - i] = val; else { slot->curcmd->resp[3 - i] = (val << 8) | extra; extra = val >> 24; } } } else slot->curcmd->resp[0] = RD4(slot, SDHCI_RESPONSE); } if (__predict_false(sdhci_debug > 1)) printf("Resp: %02x %02x %02x %02x\n", slot->curcmd->resp[0], slot->curcmd->resp[1], slot->curcmd->resp[2], slot->curcmd->resp[3]); /* If data ready - finish. */ if (slot->data_done) sdhci_start(slot); } static void sdhci_start_data(struct sdhci_slot *slot, const struct mmc_data *data) { uint32_t blkcnt, blksz, current_timeout, sdma_bbufsz, target_timeout; uint8_t div; if (data == NULL && (slot->curcmd->flags & MMC_RSP_BUSY) == 0) { slot->data_done = 1; return; } slot->data_done = 0; /* Calculate and set data timeout.*/ /* XXX: We should have this from mmc layer, now assume 1 sec. */ if (slot->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) { div = 0xE; } else { target_timeout = 1000000; div = 0; current_timeout = (1 << 13) * 1000 / slot->timeout_clk; while (current_timeout < target_timeout && div < 0xE) { ++div; current_timeout <<= 1; } /* Compensate for an off-by-one error in the CaFe chip.*/ if (div < 0xE && (slot->quirks & SDHCI_QUIRK_INCR_TIMEOUT_CONTROL)) { ++div; } } WR1(slot, SDHCI_TIMEOUT_CONTROL, div); if (data == NULL) return; /* Use DMA if possible. */ if ((slot->opt & SDHCI_HAVE_DMA)) slot->flags |= SDHCI_USE_DMA; /* If data is small, broken DMA may return zeroes instead of data. */ if ((slot->quirks & SDHCI_QUIRK_BROKEN_TIMINGS) && (data->len <= 512)) slot->flags &= ~SDHCI_USE_DMA; /* Some controllers require even block sizes. */ if ((slot->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) && ((data->len) & 0x3)) slot->flags &= ~SDHCI_USE_DMA; /* Load DMA buffer. */ if (slot->flags & SDHCI_USE_DMA) { sdma_bbufsz = slot->sdma_bbufsz; if (data->flags & MMC_DATA_READ) bus_dmamap_sync(slot->dmatag, slot->dmamap, BUS_DMASYNC_PREREAD); else { memcpy(slot->dmamem, data->data, ulmin(data->len, sdma_bbufsz)); bus_dmamap_sync(slot->dmatag, slot->dmamap, BUS_DMASYNC_PREWRITE); } WR4(slot, SDHCI_DMA_ADDRESS, slot->paddr); /* * Interrupt aggregation: Mask border interrupt for the last * bounce buffer and unmask otherwise. */ if (data->len == sdma_bbufsz) slot->intmask &= ~SDHCI_INT_DMA_END; else slot->intmask |= SDHCI_INT_DMA_END; WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); } /* Current data offset for both PIO and DMA. */ slot->offset = 0; /* Set block size and request border interrupts on the SDMA boundary. */ blksz = SDHCI_MAKE_BLKSZ(slot->sdma_boundary, ulmin(data->len, 512)); WR2(slot, SDHCI_BLOCK_SIZE, blksz); /* Set block count. */ blkcnt = howmany(data->len, 512); WR2(slot, SDHCI_BLOCK_COUNT, blkcnt); if (__predict_false(sdhci_debug > 1)) slot_printf(slot, "Blk size: 0x%08x | Blk cnt: 0x%08x\n", blksz, blkcnt); } void sdhci_finish_data(struct sdhci_slot *slot) { struct mmc_data *data = slot->curcmd->data; size_t left; /* Interrupt aggregation: Restore command interrupt. * Auxiliary restore point for the case when data interrupt * happened first. */ if (!slot->cmd_done) { WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask |= SDHCI_INT_RESPONSE); } /* Unload rest of data from DMA buffer. */ if (!slot->data_done && (slot->flags & SDHCI_USE_DMA) && slot->curcmd->data != NULL) { if (data->flags & MMC_DATA_READ) { left = data->len - slot->offset; bus_dmamap_sync(slot->dmatag, slot->dmamap, BUS_DMASYNC_POSTREAD); memcpy((u_char*)data->data + slot->offset, slot->dmamem, ulmin(left, slot->sdma_bbufsz)); } else bus_dmamap_sync(slot->dmatag, slot->dmamap, BUS_DMASYNC_POSTWRITE); } slot->data_done = 1; /* If there was error - reset the host. */ if (slot->curcmd->error) { if (slot->curcmd->error == MMC_ERR_BADCRC) slot->retune_req |= SDHCI_RETUNE_REQ_RESET; sdhci_reset(slot, SDHCI_RESET_CMD); sdhci_reset(slot, SDHCI_RESET_DATA); sdhci_start(slot); return; } /* If we already have command response - finish. */ if (slot->cmd_done) sdhci_start(slot); } #ifdef MMCCAM static void sdhci_start(struct sdhci_slot *slot) { union ccb *ccb; struct ccb_mmcio *mmcio; ccb = slot->ccb; if (ccb == NULL) return; mmcio = &ccb->mmcio; if (!(slot->flags & CMD_STARTED)) { slot->flags |= CMD_STARTED; sdhci_start_command(slot, &mmcio->cmd); return; } /* * Old stack doesn't use this! * Enabling this code causes significant performance degradation * and IRQ storms on BBB, Wandboard behaves fine. * Not using this code does no harm... if (!(slot->flags & STOP_STARTED) && mmcio->stop.opcode != 0) { slot->flags |= STOP_STARTED; sdhci_start_command(slot, &mmcio->stop); return; } */ if (__predict_false(sdhci_debug > 1)) slot_printf(slot, "result: %d\n", mmcio->cmd.error); if (mmcio->cmd.error == 0 && (slot->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) { sdhci_reset(slot, SDHCI_RESET_CMD); sdhci_reset(slot, SDHCI_RESET_DATA); } sdhci_req_done(slot); } #else static void sdhci_start(struct sdhci_slot *slot) { const struct mmc_request *req; req = slot->req; if (req == NULL) return; if (!(slot->flags & CMD_STARTED)) { slot->flags |= CMD_STARTED; sdhci_start_command(slot, req->cmd); return; } if ((slot->quirks & SDHCI_QUIRK_BROKEN_AUTO_STOP) && !(slot->flags & STOP_STARTED) && req->stop) { slot->flags |= STOP_STARTED; sdhci_start_command(slot, req->stop); return; } if (__predict_false(sdhci_debug > 1)) slot_printf(slot, "result: %d\n", req->cmd->error); if (!req->cmd->error && ((slot->curcmd == req->stop && (slot->quirks & SDHCI_QUIRK_BROKEN_AUTO_STOP)) || (slot->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { sdhci_reset(slot, SDHCI_RESET_CMD); sdhci_reset(slot, SDHCI_RESET_DATA); } sdhci_req_done(slot); } #endif int sdhci_generic_request(device_t brdev __unused, device_t reqdev, struct mmc_request *req) { struct sdhci_slot *slot = device_get_ivars(reqdev); SDHCI_LOCK(slot); if (slot->req != NULL) { SDHCI_UNLOCK(slot); return (EBUSY); } if (__predict_false(sdhci_debug > 1)) { slot_printf(slot, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n", req->cmd->opcode, req->cmd->arg, req->cmd->flags, (req->cmd->data)?(u_int)req->cmd->data->len:0, (req->cmd->data)?req->cmd->data->flags:0); } slot->req = req; slot->flags = 0; sdhci_start(slot); SDHCI_UNLOCK(slot); if (dumping) { while (slot->req != NULL) { sdhci_generic_intr(slot); DELAY(10); } } return (0); } int sdhci_generic_get_ro(device_t brdev __unused, device_t reqdev) { struct sdhci_slot *slot = device_get_ivars(reqdev); uint32_t val; SDHCI_LOCK(slot); val = RD4(slot, SDHCI_PRESENT_STATE); SDHCI_UNLOCK(slot); return (!(val & SDHCI_WRITE_PROTECT)); } int sdhci_generic_acquire_host(device_t brdev __unused, device_t reqdev) { struct sdhci_slot *slot = device_get_ivars(reqdev); int err = 0; SDHCI_LOCK(slot); while (slot->bus_busy) msleep(slot, &slot->mtx, 0, "sdhciah", 0); slot->bus_busy++; /* Activate led. */ WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl |= SDHCI_CTRL_LED); SDHCI_UNLOCK(slot); return (err); } int sdhci_generic_release_host(device_t brdev __unused, device_t reqdev) { struct sdhci_slot *slot = device_get_ivars(reqdev); SDHCI_LOCK(slot); /* Deactivate led. */ WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl &= ~SDHCI_CTRL_LED); slot->bus_busy--; SDHCI_UNLOCK(slot); wakeup(slot); return (0); } static void sdhci_cmd_irq(struct sdhci_slot *slot, uint32_t intmask) { if (!slot->curcmd) { slot_printf(slot, "Got command interrupt 0x%08x, but " "there is no active command.\n", intmask); sdhci_dumpregs(slot); return; } if (intmask & SDHCI_INT_TIMEOUT) slot->curcmd->error = MMC_ERR_TIMEOUT; else if (intmask & SDHCI_INT_CRC) slot->curcmd->error = MMC_ERR_BADCRC; else if (intmask & (SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) slot->curcmd->error = MMC_ERR_FIFO; sdhci_finish_command(slot); } static void sdhci_data_irq(struct sdhci_slot *slot, uint32_t intmask) { struct mmc_data *data; size_t left; uint32_t sdma_bbufsz; if (!slot->curcmd) { slot_printf(slot, "Got data interrupt 0x%08x, but " "there is no active command.\n", intmask); sdhci_dumpregs(slot); return; } if (slot->curcmd->data == NULL && (slot->curcmd->flags & MMC_RSP_BUSY) == 0) { slot_printf(slot, "Got data interrupt 0x%08x, but " "there is no active data operation.\n", intmask); sdhci_dumpregs(slot); return; } if (intmask & SDHCI_INT_DATA_TIMEOUT) slot->curcmd->error = MMC_ERR_TIMEOUT; else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) slot->curcmd->error = MMC_ERR_BADCRC; if (slot->curcmd->data == NULL && (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DMA_END))) { slot_printf(slot, "Got data interrupt 0x%08x, but " "there is busy-only command.\n", intmask); sdhci_dumpregs(slot); slot->curcmd->error = MMC_ERR_INVALID; } if (slot->curcmd->error) { /* No need to continue after any error. */ goto done; } /* Handle tuning completion interrupt. */ if (__predict_false((intmask & SDHCI_INT_DATA_AVAIL) && (slot->curcmd->opcode == MMC_SEND_TUNING_BLOCK || slot->curcmd->opcode == MMC_SEND_TUNING_BLOCK_HS200))) { slot->req->flags |= MMC_TUNE_DONE; sdhci_finish_command(slot); sdhci_finish_data(slot); return; } /* Handle PIO interrupt. */ if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) { if ((slot->opt & SDHCI_PLATFORM_TRANSFER) && SDHCI_PLATFORM_WILL_HANDLE(slot->bus, slot)) { SDHCI_PLATFORM_START_TRANSFER(slot->bus, slot, &intmask); slot->flags |= PLATFORM_DATA_STARTED; } else sdhci_transfer_pio(slot); } /* Handle DMA border. */ if (intmask & SDHCI_INT_DMA_END) { data = slot->curcmd->data; sdma_bbufsz = slot->sdma_bbufsz; /* Unload DMA buffer ... */ left = data->len - slot->offset; if (data->flags & MMC_DATA_READ) { bus_dmamap_sync(slot->dmatag, slot->dmamap, BUS_DMASYNC_POSTREAD); memcpy((u_char*)data->data + slot->offset, slot->dmamem, ulmin(left, sdma_bbufsz)); } else { bus_dmamap_sync(slot->dmatag, slot->dmamap, BUS_DMASYNC_POSTWRITE); } /* ... and reload it again. */ slot->offset += sdma_bbufsz; left = data->len - slot->offset; if (data->flags & MMC_DATA_READ) { bus_dmamap_sync(slot->dmatag, slot->dmamap, BUS_DMASYNC_PREREAD); } else { memcpy(slot->dmamem, (u_char*)data->data + slot->offset, ulmin(left, sdma_bbufsz)); bus_dmamap_sync(slot->dmatag, slot->dmamap, BUS_DMASYNC_PREWRITE); } /* * Interrupt aggregation: Mask border interrupt for the last * bounce buffer. */ if (left == sdma_bbufsz) { slot->intmask &= ~SDHCI_INT_DMA_END; WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); } /* Restart DMA. */ WR4(slot, SDHCI_DMA_ADDRESS, slot->paddr); } /* We have got all data. */ if (intmask & SDHCI_INT_DATA_END) { if (slot->flags & PLATFORM_DATA_STARTED) { slot->flags &= ~PLATFORM_DATA_STARTED; SDHCI_PLATFORM_FINISH_TRANSFER(slot->bus, slot); } else sdhci_finish_data(slot); } done: if (slot->curcmd != NULL && slot->curcmd->error != 0) { if (slot->flags & PLATFORM_DATA_STARTED) { slot->flags &= ~PLATFORM_DATA_STARTED; SDHCI_PLATFORM_FINISH_TRANSFER(slot->bus, slot); } else sdhci_finish_data(slot); } } static void sdhci_acmd_irq(struct sdhci_slot *slot, uint16_t acmd_err) { if (!slot->curcmd) { slot_printf(slot, "Got AutoCMD12 error 0x%04x, but " "there is no active command.\n", acmd_err); sdhci_dumpregs(slot); return; } slot_printf(slot, "Got AutoCMD12 error 0x%04x\n", acmd_err); sdhci_reset(slot, SDHCI_RESET_CMD); } void sdhci_generic_intr(struct sdhci_slot *slot) { uint32_t intmask, present; uint16_t val16; SDHCI_LOCK(slot); /* Read slot interrupt status. */ intmask = RD4(slot, SDHCI_INT_STATUS); if (intmask == 0 || intmask == 0xffffffff) { SDHCI_UNLOCK(slot); return; } if (__predict_false(sdhci_debug > 2)) slot_printf(slot, "Interrupt %#x\n", intmask); /* Handle tuning error interrupt. */ if (__predict_false(intmask & SDHCI_INT_TUNEERR)) { WR4(slot, SDHCI_INT_STATUS, SDHCI_INT_TUNEERR); slot_printf(slot, "Tuning error indicated\n"); slot->retune_req |= SDHCI_RETUNE_REQ_RESET; if (slot->curcmd) { slot->curcmd->error = MMC_ERR_BADCRC; sdhci_finish_command(slot); } } /* Handle re-tuning interrupt. */ if (__predict_false(intmask & SDHCI_INT_RETUNE)) slot->retune_req |= SDHCI_RETUNE_REQ_NEEDED; /* Handle card presence interrupts. */ if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { present = (intmask & SDHCI_INT_CARD_INSERT) != 0; slot->intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE); slot->intmask |= present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT; WR4(slot, SDHCI_INT_ENABLE, slot->intmask); WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); WR4(slot, SDHCI_INT_STATUS, intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)); sdhci_handle_card_present_locked(slot, present); } /* Handle command interrupts. */ if (intmask & SDHCI_INT_CMD_MASK) { WR4(slot, SDHCI_INT_STATUS, intmask & SDHCI_INT_CMD_MASK); sdhci_cmd_irq(slot, intmask & SDHCI_INT_CMD_MASK); } /* Handle data interrupts. */ if (intmask & SDHCI_INT_DATA_MASK) { WR4(slot, SDHCI_INT_STATUS, intmask & SDHCI_INT_DATA_MASK); /* Don't call data_irq in case of errored command. */ if ((intmask & SDHCI_INT_CMD_ERROR_MASK) == 0) sdhci_data_irq(slot, intmask & SDHCI_INT_DATA_MASK); } /* Handle AutoCMD12 error interrupt. */ if (intmask & SDHCI_INT_ACMD12ERR) { /* Clearing SDHCI_INT_ACMD12ERR may clear SDHCI_ACMD12_ERR. */ val16 = RD2(slot, SDHCI_ACMD12_ERR); WR4(slot, SDHCI_INT_STATUS, SDHCI_INT_ACMD12ERR); sdhci_acmd_irq(slot, val16); } /* Handle bus power interrupt. */ if (intmask & SDHCI_INT_BUS_POWER) { WR4(slot, SDHCI_INT_STATUS, SDHCI_INT_BUS_POWER); slot_printf(slot, "Card is consuming too much power!\n"); } intmask &= ~(SDHCI_INT_ERROR | SDHCI_INT_TUNEERR | SDHCI_INT_RETUNE | SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | SDHCI_INT_ACMD12ERR | SDHCI_INT_BUS_POWER); /* The rest is unknown. */ if (intmask) { WR4(slot, SDHCI_INT_STATUS, intmask); slot_printf(slot, "Unexpected interrupt 0x%08x.\n", intmask); sdhci_dumpregs(slot); } SDHCI_UNLOCK(slot); } int sdhci_generic_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { const struct sdhci_slot *slot = device_get_ivars(child); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: *result = slot->host.ios.bus_mode; break; case MMCBR_IVAR_BUS_WIDTH: *result = slot->host.ios.bus_width; break; case MMCBR_IVAR_CHIP_SELECT: *result = slot->host.ios.chip_select; break; case MMCBR_IVAR_CLOCK: *result = slot->host.ios.clock; break; case MMCBR_IVAR_F_MIN: *result = slot->host.f_min; break; case MMCBR_IVAR_F_MAX: *result = slot->host.f_max; break; case MMCBR_IVAR_HOST_OCR: *result = slot->host.host_ocr; break; case MMCBR_IVAR_MODE: *result = slot->host.mode; break; case MMCBR_IVAR_OCR: *result = slot->host.ocr; break; case MMCBR_IVAR_POWER_MODE: *result = slot->host.ios.power_mode; break; case MMCBR_IVAR_VDD: *result = slot->host.ios.vdd; break; case MMCBR_IVAR_RETUNE_REQ: if (slot->opt & SDHCI_TUNING_ENABLED) { if (slot->retune_req & SDHCI_RETUNE_REQ_RESET) { *result = retune_req_reset; break; } if (slot->retune_req & SDHCI_RETUNE_REQ_NEEDED) { *result = retune_req_normal; break; } } *result = retune_req_none; break; case MMCBR_IVAR_VCCQ: *result = slot->host.ios.vccq; break; case MMCBR_IVAR_CAPS: *result = slot->host.caps; break; case MMCBR_IVAR_TIMING: *result = slot->host.ios.timing; break; case MMCBR_IVAR_MAX_DATA: /* * Re-tuning modes 1 and 2 restrict the maximum data length * per read/write command to 4 MiB. */ if (slot->opt & SDHCI_TUNING_ENABLED && (slot->retune_mode == SDHCI_RETUNE_MODE_1 || slot->retune_mode == SDHCI_RETUNE_MODE_2)) { *result = 4 * 1024 * 1024 / MMC_SECTOR_SIZE; break; } *result = 65535; break; case MMCBR_IVAR_MAX_BUSY_TIMEOUT: /* * Currently, sdhci_start_data() hardcodes 1 s for all CMDs. */ *result = 1000000; break; } return (0); } int sdhci_generic_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct sdhci_slot *slot = device_get_ivars(child); uint32_t clock, max_clock; int i; if (sdhci_debug > 1) slot_printf(slot, "%s: var=%d\n", __func__, which); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: slot->host.ios.bus_mode = value; break; case MMCBR_IVAR_BUS_WIDTH: slot->host.ios.bus_width = value; break; case MMCBR_IVAR_CHIP_SELECT: slot->host.ios.chip_select = value; break; case MMCBR_IVAR_CLOCK: if (value > 0) { max_clock = slot->max_clk; clock = max_clock; if (slot->version < SDHCI_SPEC_300) { for (i = 0; i < SDHCI_200_MAX_DIVIDER; i <<= 1) { if (clock <= value) break; clock >>= 1; } } else { for (i = 0; i < SDHCI_300_MAX_DIVIDER; i += 2) { if (clock <= value) break; clock = max_clock / (i + 2); } } slot->host.ios.clock = clock; } else slot->host.ios.clock = 0; break; case MMCBR_IVAR_MODE: slot->host.mode = value; break; case MMCBR_IVAR_OCR: slot->host.ocr = value; break; case MMCBR_IVAR_POWER_MODE: slot->host.ios.power_mode = value; break; case MMCBR_IVAR_VDD: slot->host.ios.vdd = value; break; case MMCBR_IVAR_VCCQ: slot->host.ios.vccq = value; break; case MMCBR_IVAR_TIMING: slot->host.ios.timing = value; break; case MMCBR_IVAR_CAPS: case MMCBR_IVAR_HOST_OCR: case MMCBR_IVAR_F_MIN: case MMCBR_IVAR_F_MAX: case MMCBR_IVAR_MAX_DATA: case MMCBR_IVAR_RETUNE_REQ: return (EINVAL); } return (0); } #ifdef MMCCAM void sdhci_start_slot(struct sdhci_slot *slot) { if ((slot->devq = cam_simq_alloc(1)) == NULL) goto fail; mtx_init(&slot->sim_mtx, "sdhcisim", NULL, MTX_DEF); slot->sim = cam_sim_alloc(sdhci_cam_action, sdhci_cam_poll, "sdhci_slot", slot, device_get_unit(slot->bus), &slot->sim_mtx, 1, 1, slot->devq); if (slot->sim == NULL) { cam_simq_free(slot->devq); slot_printf(slot, "cannot allocate CAM SIM\n"); goto fail; } mtx_lock(&slot->sim_mtx); if (xpt_bus_register(slot->sim, slot->bus, 0) != 0) { slot_printf(slot, "cannot register SCSI pass-through bus\n"); cam_sim_free(slot->sim, FALSE); cam_simq_free(slot->devq); mtx_unlock(&slot->sim_mtx); goto fail; } mtx_unlock(&slot->sim_mtx); /* End CAM-specific init */ slot->card_present = 0; sdhci_card_task(slot, 0); return; fail: if (slot->sim != NULL) { mtx_lock(&slot->sim_mtx); xpt_bus_deregister(cam_sim_path(slot->sim)); cam_sim_free(slot->sim, FALSE); mtx_unlock(&slot->sim_mtx); } if (slot->devq != NULL) cam_simq_free(slot->devq); } static void sdhci_cam_handle_mmcio(struct cam_sim *sim, union ccb *ccb) { struct sdhci_slot *slot; slot = cam_sim_softc(sim); sdhci_cam_request(slot, ccb); } void sdhci_cam_action(struct cam_sim *sim, union ccb *ccb) { struct sdhci_slot *slot; slot = cam_sim_softc(sim); if (slot == NULL) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; xpt_done(ccb); return; } mtx_assert(&slot->sim_mtx, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_PATH_INQ: { struct ccb_pathinq *cpi; cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = 0; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET | PIM_SEQSCAN; cpi->hba_eng_cnt = 0; cpi->max_target = 0; cpi->max_lun = 0; cpi->initiator_id = 1; cpi->maxio = MAXPHYS; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Deglitch Networks", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 100; /* XXX WTF? */ cpi->protocol = PROTO_MMCSD; cpi->protocol_version = SCSI_REV_0; cpi->transport = XPORT_MMCSD; cpi->transport_version = 0; cpi->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; + uint32_t max_data; if (sdhci_debug > 1) slot_printf(slot, "Got XPT_GET_TRAN_SETTINGS\n"); cts->protocol = PROTO_MMCSD; cts->protocol_version = 1; cts->transport = XPORT_MMCSD; cts->transport_version = 1; cts->xport_specific.valid = 0; cts->proto_specific.mmc.host_ocr = slot->host.host_ocr; cts->proto_specific.mmc.host_f_min = slot->host.f_min; cts->proto_specific.mmc.host_f_max = slot->host.f_max; cts->proto_specific.mmc.host_caps = slot->host.caps; + /* + * Re-tuning modes 1 and 2 restrict the maximum data length + * per read/write command to 4 MiB. + */ + if (slot->opt & SDHCI_TUNING_ENABLED && + (slot->retune_mode == SDHCI_RETUNE_MODE_1 || + slot->retune_mode == SDHCI_RETUNE_MODE_2)) { + max_data = 4 * 1024 * 1024 / MMC_SECTOR_SIZE; + } else { + max_data = 65535; + } + cts->proto_specific.mmc.host_max_data = max_data; + memcpy(&cts->proto_specific.mmc.ios, &slot->host.ios, sizeof(struct mmc_ios)); ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_SET_TRAN_SETTINGS: { if (sdhci_debug > 1) slot_printf(slot, "Got XPT_SET_TRAN_SETTINGS\n"); sdhci_cam_settran_settings(slot, ccb); ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: if (sdhci_debug > 1) slot_printf(slot, "Got XPT_RESET_BUS, ACK it...\n"); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_MMC_IO: /* * Here is the HW-dependent part of * sending the command to the underlying h/w * At some point in the future an interrupt comes. * Then the request will be marked as completed. */ if (__predict_false(sdhci_debug > 1)) slot_printf(slot, "Got XPT_MMC_IO\n"); ccb->ccb_h.status = CAM_REQ_INPROG; sdhci_cam_handle_mmcio(sim, ccb); return; /* NOTREACHED */ break; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } void sdhci_cam_poll(struct cam_sim *sim) { return; } static int sdhci_cam_get_possible_host_clock(const struct sdhci_slot *slot, int proposed_clock) { int max_clock, clock, i; if (proposed_clock == 0) return 0; max_clock = slot->max_clk; clock = max_clock; if (slot->version < SDHCI_SPEC_300) { for (i = 0; i < SDHCI_200_MAX_DIVIDER; i <<= 1) { if (clock <= proposed_clock) break; clock >>= 1; } } else { for (i = 0; i < SDHCI_300_MAX_DIVIDER; i += 2) { if (clock <= proposed_clock) break; clock = max_clock / (i + 2); } } return clock; } static int sdhci_cam_settran_settings(struct sdhci_slot *slot, union ccb *ccb) { struct mmc_ios *ios; const struct mmc_ios *new_ios; const struct ccb_trans_settings_mmc *cts; ios = &slot->host.ios; cts = &ccb->cts.proto_specific.mmc; new_ios = &cts->ios; /* Update only requested fields */ if (cts->ios_valid & MMC_CLK) { ios->clock = sdhci_cam_get_possible_host_clock(slot, new_ios->clock); slot_printf(slot, "Clock => %d\n", ios->clock); } if (cts->ios_valid & MMC_VDD) { ios->vdd = new_ios->vdd; slot_printf(slot, "VDD => %d\n", ios->vdd); } if (cts->ios_valid & MMC_CS) { ios->chip_select = new_ios->chip_select; slot_printf(slot, "CS => %d\n", ios->chip_select); } if (cts->ios_valid & MMC_BW) { ios->bus_width = new_ios->bus_width; slot_printf(slot, "Bus width => %d\n", ios->bus_width); } if (cts->ios_valid & MMC_PM) { ios->power_mode = new_ios->power_mode; slot_printf(slot, "Power mode => %d\n", ios->power_mode); } if (cts->ios_valid & MMC_BT) { ios->timing = new_ios->timing; slot_printf(slot, "Timing => %d\n", ios->timing); } if (cts->ios_valid & MMC_BM) { ios->bus_mode = new_ios->bus_mode; slot_printf(slot, "Bus mode => %d\n", ios->bus_mode); } /* XXX Provide a way to call a chip-specific IOS update, required for TI */ return (sdhci_cam_update_ios(slot)); } static int sdhci_cam_update_ios(struct sdhci_slot *slot) { struct mmc_ios *ios = &slot->host.ios; slot_printf(slot, "%s: power_mode=%d, clk=%d, bus_width=%d, timing=%d\n", __func__, ios->power_mode, ios->clock, ios->bus_width, ios->timing); SDHCI_LOCK(slot); /* Do full reset on bus power down to clear from any state. */ if (ios->power_mode == power_off) { WR4(slot, SDHCI_SIGNAL_ENABLE, 0); sdhci_init(slot); } /* Configure the bus. */ sdhci_set_clock(slot, ios->clock); sdhci_set_power(slot, (ios->power_mode == power_off) ? 0 : ios->vdd); if (ios->bus_width == bus_width_8) { slot->hostctrl |= SDHCI_CTRL_8BITBUS; slot->hostctrl &= ~SDHCI_CTRL_4BITBUS; } else if (ios->bus_width == bus_width_4) { slot->hostctrl &= ~SDHCI_CTRL_8BITBUS; slot->hostctrl |= SDHCI_CTRL_4BITBUS; } else if (ios->bus_width == bus_width_1) { slot->hostctrl &= ~SDHCI_CTRL_8BITBUS; slot->hostctrl &= ~SDHCI_CTRL_4BITBUS; } else { panic("Invalid bus width: %d", ios->bus_width); } if (ios->timing == bus_timing_hs && !(slot->quirks & SDHCI_QUIRK_DONT_SET_HISPD_BIT)) slot->hostctrl |= SDHCI_CTRL_HISPD; else slot->hostctrl &= ~SDHCI_CTRL_HISPD; WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl); /* Some controllers like reset after bus changes. */ if(slot->quirks & SDHCI_QUIRK_RESET_ON_IOS) sdhci_reset(slot, SDHCI_RESET_CMD | SDHCI_RESET_DATA); SDHCI_UNLOCK(slot); return (0); } static int sdhci_cam_request(struct sdhci_slot *slot, union ccb *ccb) { const struct ccb_mmcio *mmcio; mmcio = &ccb->mmcio; SDHCI_LOCK(slot); /* if (slot->req != NULL) { SDHCI_UNLOCK(slot); return (EBUSY); } */ if (__predict_false(sdhci_debug > 1)) { slot_printf(slot, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n", mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags, mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0, mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0); } if (mmcio->cmd.data != NULL) { if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0) panic("data->len = %d, data->flags = %d -- something is b0rked", (int)mmcio->cmd.data->len, mmcio->cmd.data->flags); } slot->ccb = ccb; slot->flags = 0; sdhci_start(slot); SDHCI_UNLOCK(slot); if (dumping) { while (slot->ccb != NULL) { sdhci_generic_intr(slot); DELAY(10); } } return (0); } #endif /* MMCCAM */ MODULE_VERSION(sdhci, SDHCI_VERSION);