diff --git a/sys/cam/mmc/mmc_da.c b/sys/cam/mmc/mmc_da.c index 0dfa43f4679b..8740421e416b 100644 --- a/sys/cam/mmc/mmc_da.c +++ b/sys/cam/mmc/mmc_da.c @@ -1,2008 +1,2081 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Bernd Walter All rights reserved. * Copyright (c) 2009 Alexander Motin All rights reserved. * Copyright (c) 2015-2017 Ilya Bakulin All rights reserved. * Copyright (c) 2006 M. Warner Losh * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Some code derived from the sys/dev/mmc and sys/cam/ata * Thanks to Warner Losh , Alexander Motin * Bernd Walter , and other authors. */ #include __FBSDID("$FreeBSD$"); //#include "opt_sdda.h" #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for PRIu64 */ #endif /* _KERNEL */ #ifndef _KERNEL #include #include #endif /* _KERNEL */ #include #include #include #include #include #include #include #include #include #include #include #ifdef _KERNEL typedef enum { SDDA_FLAG_OPEN = 0x0002, SDDA_FLAG_DIRTY = 0x0004 } sdda_flags; typedef enum { SDDA_STATE_INIT, SDDA_STATE_INVALID, SDDA_STATE_NORMAL, SDDA_STATE_PART_SWITCH, } sdda_state; #define SDDA_FMT_BOOT "sdda%dboot" #define SDDA_FMT_GP "sdda%dgp" #define SDDA_FMT_RPMB "sdda%drpmb" #define SDDA_LABEL_ENH "enh" #define SDDA_PART_NAMELEN (16 + 1) struct sdda_softc; struct sdda_part { struct disk *disk; struct bio_queue_head bio_queue; sdda_flags flags; struct sdda_softc *sc; u_int cnt; u_int type; bool ro; char name[SDDA_PART_NAMELEN]; }; struct sdda_softc { int outstanding_cmds; /* Number of active commands */ int refcount; /* Active xpt_action() calls */ sdda_state state; struct mmc_data *mmcdata; struct cam_periph *periph; // sdda_quirks quirks; struct task start_init_task; uint32_t raw_csd[4]; uint8_t raw_ext_csd[512]; /* MMC only? */ struct mmc_csd csd; struct mmc_cid cid; struct mmc_scr scr; /* Calculated from CSD */ uint64_t sector_count; uint64_t mediasize; /* Calculated from CID */ char card_id_string[64];/* Formatted CID info (serial, MFG, etc) */ char card_sn_string[16];/* Formatted serial # for disk->d_ident */ /* Determined from CSD + is highspeed card*/ uint32_t card_f_max; /* Generic switch timeout */ uint32_t cmd6_time; uint32_t timings; /* Mask of bus timings supported */ uint32_t vccq_120; /* Mask of bus timings at VCCQ of 1.2 V */ uint32_t vccq_180; /* Mask of bus timings at VCCQ of 1.8 V */ /* MMC partitions support */ struct sdda_part *part[MMC_PART_MAX]; uint8_t part_curr; /* Partition currently switched to */ uint8_t part_requested; /* What partition we're currently switching to */ uint32_t part_time; /* Partition switch timeout [us] */ off_t enh_base; /* Enhanced user data area slice base ... */ off_t enh_size; /* ... and size [bytes] */ int log_count; struct timeval log_time; }; static const char *mmc_errmsg[] = { "None", "Timeout", "Bad CRC", "Fifo", "Failed", "Invalid", "NO MEMORY" }; #define ccb_bp ppriv_ptr1 static disk_strategy_t sddastrategy; +static dumper_t sddadump; static periph_init_t sddainit; static void sddaasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static periph_ctor_t sddaregister; static periph_dtor_t sddacleanup; static periph_start_t sddastart; static periph_oninv_t sddaoninvalidate; static void sddadone(struct cam_periph *periph, union ccb *done_ccb); static int sddaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static int mmc_handle_reply(union ccb *ccb); static uint16_t get_rca(struct cam_periph *periph); static void sdda_start_init(void *context, union ccb *start_ccb); static void sdda_start_init_task(void *context, int pending); static void sdda_process_mmc_partitions(struct cam_periph *periph, union ccb *start_ccb); static uint32_t sdda_get_host_caps(struct cam_periph *periph, union ccb *ccb); static int mmc_select_card(struct cam_periph *periph, union ccb *ccb, uint32_t rca); static inline uint32_t mmc_get_sector_size(struct cam_periph *periph) {return MMC_SECTOR_SIZE;} static SYSCTL_NODE(_kern_cam, OID_AUTO, sdda, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Direct Access Disk driver"); static int sdda_mmcsd_compat = 1; SYSCTL_INT(_kern_cam_sdda, OID_AUTO, mmcsd_compat, CTLFLAG_RDTUN, &sdda_mmcsd_compat, 1, "Enable creation of mmcsd aliases."); /* TODO: actually issue GET_TRAN_SETTINGS to get R/O status */ static inline bool sdda_get_read_only(struct cam_periph *periph, union ccb *start_ccb) { return (false); } static uint32_t mmc_get_spec_vers(struct cam_periph *periph); static uint64_t mmc_get_media_size(struct cam_periph *periph); static uint32_t mmc_get_cmd6_timeout(struct cam_periph *periph); static bool sdda_add_part(struct cam_periph *periph, u_int type, const char *name, u_int cnt, off_t media_size, bool ro); static struct periph_driver sddadriver = { sddainit, "sdda", TAILQ_HEAD_INITIALIZER(sddadriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(sdda, sddadriver); static MALLOC_DEFINE(M_SDDA, "sd_da", "sd_da buffers"); static const int exp[8] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000 }; static const int mant[16] = { 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80 }; static const int cur_min[8] = { 500, 1000, 5000, 10000, 25000, 35000, 60000, 100000 }; static const int cur_max[8] = { 1000, 5000, 10000, 25000, 35000, 45000, 800000, 200000 }; static uint16_t get_rca(struct cam_periph *periph) { return periph->path->device->mmc_ident_data.card_rca; } /* * Figure out if CCB execution resulted in error. * Look at both CAM-level errors and on MMC protocol errors. * * Return value is always MMC error. */ static int mmc_handle_reply(union ccb *ccb) { KASSERT(ccb->ccb_h.func_code == XPT_MMC_IO, ("ccb %p: cannot handle non-XPT_MMC_IO errors, got func_code=%d", ccb, ccb->ccb_h.func_code)); /* CAM-level error should always correspond to MMC-level error */ if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) && (ccb->mmcio.cmd.error != MMC_ERR_NONE)) panic("CCB status is OK but MMC error != MMC_ERR_NONE"); if (ccb->mmcio.cmd.error != MMC_ERR_NONE) { xpt_print_path(ccb->ccb_h.path); printf("CMD%d failed, err %d (%s)\n", ccb->mmcio.cmd.opcode, ccb->mmcio.cmd.error, mmc_errmsg[ccb->mmcio.cmd.error]); } return (ccb->mmcio.cmd.error); } static uint32_t mmc_get_bits(uint32_t *bits, int bit_len, int start, int size) { const int i = (bit_len / 32) - (start / 32) - 1; const int shift = start & 31; uint32_t retval = bits[i] >> shift; if (size + shift > 32) retval |= bits[i - 1] << (32 - shift); return (retval & ((1llu << size) - 1)); } static void mmc_decode_csd_sd(uint32_t *raw_csd, struct mmc_csd *csd) { int v; int m; int e; memset(csd, 0, sizeof(*csd)); csd->csd_structure = v = mmc_get_bits(raw_csd, 128, 126, 2); /* Common members between 1.0 and 2.0 */ m = mmc_get_bits(raw_csd, 128, 115, 4); e = mmc_get_bits(raw_csd, 128, 112, 3); csd->tacc = (exp[e] * mant[m] + 9) / 10; csd->nsac = mmc_get_bits(raw_csd, 128, 104, 8) * 100; m = mmc_get_bits(raw_csd, 128, 99, 4); e = mmc_get_bits(raw_csd, 128, 96, 3); csd->tran_speed = exp[e] * 10000 * mant[m]; csd->ccc = mmc_get_bits(raw_csd, 128, 84, 12); csd->read_bl_len = 1 << mmc_get_bits(raw_csd, 128, 80, 4); csd->read_bl_partial = mmc_get_bits(raw_csd, 128, 79, 1); csd->write_blk_misalign = mmc_get_bits(raw_csd, 128, 78, 1); csd->read_blk_misalign = mmc_get_bits(raw_csd, 128, 77, 1); csd->dsr_imp = mmc_get_bits(raw_csd, 128, 76, 1); csd->erase_blk_en = mmc_get_bits(raw_csd, 128, 46, 1); csd->erase_sector = mmc_get_bits(raw_csd, 128, 39, 7) + 1; csd->wp_grp_size = mmc_get_bits(raw_csd, 128, 32, 7); csd->wp_grp_enable = mmc_get_bits(raw_csd, 128, 31, 1); csd->r2w_factor = 1 << mmc_get_bits(raw_csd, 128, 26, 3); csd->write_bl_len = 1 << mmc_get_bits(raw_csd, 128, 22, 4); csd->write_bl_partial = mmc_get_bits(raw_csd, 128, 21, 1); if (v == 0) { csd->vdd_r_curr_min = cur_min[mmc_get_bits(raw_csd, 128, 59, 3)]; csd->vdd_r_curr_max = cur_max[mmc_get_bits(raw_csd, 128, 56, 3)]; csd->vdd_w_curr_min = cur_min[mmc_get_bits(raw_csd, 128, 53, 3)]; csd->vdd_w_curr_max = cur_max[mmc_get_bits(raw_csd, 128, 50, 3)]; m = mmc_get_bits(raw_csd, 128, 62, 12); e = mmc_get_bits(raw_csd, 128, 47, 3); csd->capacity = ((1 + m) << (e + 2)) * csd->read_bl_len; } else if (v == 1) { csd->capacity = ((uint64_t)mmc_get_bits(raw_csd, 128, 48, 22) + 1) * 512 * 1024; } else panic("unknown SD CSD version"); } static void mmc_decode_csd_mmc(uint32_t *raw_csd, struct mmc_csd *csd) { int m; int e; memset(csd, 0, sizeof(*csd)); csd->csd_structure = mmc_get_bits(raw_csd, 128, 126, 2); csd->spec_vers = mmc_get_bits(raw_csd, 128, 122, 4); m = mmc_get_bits(raw_csd, 128, 115, 4); e = mmc_get_bits(raw_csd, 128, 112, 3); csd->tacc = exp[e] * mant[m] + 9 / 10; csd->nsac = mmc_get_bits(raw_csd, 128, 104, 8) * 100; m = mmc_get_bits(raw_csd, 128, 99, 4); e = mmc_get_bits(raw_csd, 128, 96, 3); csd->tran_speed = exp[e] * 10000 * mant[m]; csd->ccc = mmc_get_bits(raw_csd, 128, 84, 12); csd->read_bl_len = 1 << mmc_get_bits(raw_csd, 128, 80, 4); csd->read_bl_partial = mmc_get_bits(raw_csd, 128, 79, 1); csd->write_blk_misalign = mmc_get_bits(raw_csd, 128, 78, 1); csd->read_blk_misalign = mmc_get_bits(raw_csd, 128, 77, 1); csd->dsr_imp = mmc_get_bits(raw_csd, 128, 76, 1); csd->vdd_r_curr_min = cur_min[mmc_get_bits(raw_csd, 128, 59, 3)]; csd->vdd_r_curr_max = cur_max[mmc_get_bits(raw_csd, 128, 56, 3)]; csd->vdd_w_curr_min = cur_min[mmc_get_bits(raw_csd, 128, 53, 3)]; csd->vdd_w_curr_max = cur_max[mmc_get_bits(raw_csd, 128, 50, 3)]; m = mmc_get_bits(raw_csd, 128, 62, 12); e = mmc_get_bits(raw_csd, 128, 47, 3); csd->capacity = ((1 + m) << (e + 2)) * csd->read_bl_len; csd->erase_blk_en = 0; csd->erase_sector = (mmc_get_bits(raw_csd, 128, 42, 5) + 1) * (mmc_get_bits(raw_csd, 128, 37, 5) + 1); csd->wp_grp_size = mmc_get_bits(raw_csd, 128, 32, 5); csd->wp_grp_enable = mmc_get_bits(raw_csd, 128, 31, 1); csd->r2w_factor = 1 << mmc_get_bits(raw_csd, 128, 26, 3); csd->write_bl_len = 1 << mmc_get_bits(raw_csd, 128, 22, 4); csd->write_bl_partial = mmc_get_bits(raw_csd, 128, 21, 1); } static void mmc_decode_cid_sd(uint32_t *raw_cid, struct mmc_cid *cid) { int i; /* There's no version info, so we take it on faith */ memset(cid, 0, sizeof(*cid)); cid->mid = mmc_get_bits(raw_cid, 128, 120, 8); cid->oid = mmc_get_bits(raw_cid, 128, 104, 16); for (i = 0; i < 5; i++) cid->pnm[i] = mmc_get_bits(raw_cid, 128, 96 - i * 8, 8); cid->pnm[5] = 0; cid->prv = mmc_get_bits(raw_cid, 128, 56, 8); cid->psn = mmc_get_bits(raw_cid, 128, 24, 32); cid->mdt_year = mmc_get_bits(raw_cid, 128, 12, 8) + 2000; cid->mdt_month = mmc_get_bits(raw_cid, 128, 8, 4); } static void mmc_decode_cid_mmc(uint32_t *raw_cid, struct mmc_cid *cid) { int i; /* There's no version info, so we take it on faith */ memset(cid, 0, sizeof(*cid)); cid->mid = mmc_get_bits(raw_cid, 128, 120, 8); cid->oid = mmc_get_bits(raw_cid, 128, 104, 8); for (i = 0; i < 6; i++) cid->pnm[i] = mmc_get_bits(raw_cid, 128, 96 - i * 8, 8); cid->pnm[6] = 0; cid->prv = mmc_get_bits(raw_cid, 128, 48, 8); cid->psn = mmc_get_bits(raw_cid, 128, 16, 32); cid->mdt_month = mmc_get_bits(raw_cid, 128, 12, 4); cid->mdt_year = mmc_get_bits(raw_cid, 128, 8, 4) + 1997; } static void mmc_format_card_id_string(struct sdda_softc *sc, struct mmc_params *mmcp) { char oidstr[8]; uint8_t c1; uint8_t c2; /* * Format a card ID string for use by the mmcsd driver, it's what * appears between the <> in the following: * mmcsd0: 968MB at mmc0 * 22.5MHz/4bit/128-block * * Also format just the card serial number, which the mmcsd driver will * use as the disk->d_ident string. * * The card_id_string in mmc_ivars is currently allocated as 64 bytes, * and our max formatted length is currently 55 bytes if every field * contains the largest value. * * Sometimes the oid is two printable ascii chars; when it's not, * format it as 0xnnnn instead. */ c1 = (sc->cid.oid >> 8) & 0x0ff; c2 = sc->cid.oid & 0x0ff; if (c1 > 0x1f && c1 < 0x7f && c2 > 0x1f && c2 < 0x7f) snprintf(oidstr, sizeof(oidstr), "%c%c", c1, c2); else snprintf(oidstr, sizeof(oidstr), "0x%04x", sc->cid.oid); snprintf(sc->card_sn_string, sizeof(sc->card_sn_string), "%08X", sc->cid.psn); snprintf(sc->card_id_string, sizeof(sc->card_id_string), "%s%s %s %d.%d SN %08X MFG %02d/%04d by %d %s", mmcp->card_features & CARD_FEATURE_MMC ? "MMC" : "SD", mmcp->card_features & CARD_FEATURE_SDHC ? "HC" : "", sc->cid.pnm, sc->cid.prv >> 4, sc->cid.prv & 0x0f, sc->cid.psn, sc->cid.mdt_month, sc->cid.mdt_year, sc->cid.mid, oidstr); } static int sddaopen(struct disk *dp) { struct sdda_part *part; struct cam_periph *periph; struct sdda_softc *softc; int error; part = (struct sdda_part *)dp->d_drv1; softc = part->sc; periph = softc->periph; if (cam_periph_acquire(periph) != 0) { return(ENXIO); } cam_periph_lock(periph); if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddaopen\n")); part->flags |= SDDA_FLAG_OPEN; cam_periph_unhold(periph); cam_periph_unlock(periph); return (0); } static int sddaclose(struct disk *dp) { struct sdda_part *part; struct cam_periph *periph; struct sdda_softc *softc; part = (struct sdda_part *)dp->d_drv1; softc = part->sc; periph = softc->periph; part->flags &= ~SDDA_FLAG_OPEN; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddaclose\n")); while (softc->refcount != 0) cam_periph_sleep(periph, &softc->refcount, PRIBIO, "sddaclose", 1); cam_periph_unlock(periph); cam_periph_release(periph); return (0); } static void sddaschedule(struct cam_periph *periph) { struct sdda_softc *softc = (struct sdda_softc *)periph->softc; struct sdda_part *part; struct bio *bp; int i; /* Check if we have more work to do. */ /* Find partition that has outstanding commands. Prefer current partition. */ bp = bioq_first(&softc->part[softc->part_curr]->bio_queue); if (bp == NULL) { for (i = 0; i < MMC_PART_MAX; i++) { if ((part = softc->part[i]) != NULL && (bp = bioq_first(&softc->part[i]->bio_queue)) != NULL) break; } } if (bp != NULL) { xpt_schedule(periph, CAM_PRIORITY_NORMAL); } } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void sddastrategy(struct bio *bp) { struct cam_periph *periph; struct sdda_part *part; struct sdda_softc *softc; part = (struct sdda_part *)bp->bio_disk->d_drv1; softc = part->sc; periph = softc->periph; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddastrategy(%p)\n", bp)); /* * If the device has been made invalid, error out */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } /* * Place it in the queue of disk activities for this disk */ bioq_disksort(&part->bio_queue, bp); /* * Schedule ourselves for performing the work. */ sddaschedule(periph); cam_periph_unlock(periph); return; } static void sddainit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, sddaasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("sdda: Failed to attach master async callback " "due to status 0x%x!\n", status); } } /* * Callback from GEOM, called when it has finished cleaning up its * resources. */ static void sddadiskgonecb(struct disk *dp) { struct cam_periph *periph; struct sdda_part *part; part = (struct sdda_part *)dp->d_drv1; periph = part->sc->periph; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddadiskgonecb\n")); cam_periph_release(periph); } static void sddaoninvalidate(struct cam_periph *periph) { struct sdda_softc *softc; struct sdda_part *part; softc = (struct sdda_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddaoninvalidate\n")); /* * De-register any async callbacks. */ xpt_register_async(0, sddaasync, periph, periph->path); /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("bioq_flush start\n")); for (int i = 0; i < MMC_PART_MAX; i++) { if ((part = softc->part[i]) != NULL) { bioq_flush(&part->bio_queue, NULL, ENXIO); disk_gone(part->disk); } } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("bioq_flush end\n")); } static void sddacleanup(struct cam_periph *periph) { struct sdda_softc *softc; struct sdda_part *part; int i; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddacleanup\n")); softc = (struct sdda_softc *)periph->softc; cam_periph_unlock(periph); for (i = 0; i < MMC_PART_MAX; i++) { if ((part = softc->part[i]) != NULL) { disk_destroy(part->disk); free(part, M_DEVBUF); softc->part[i] = NULL; } } free(softc, M_DEVBUF); cam_periph_lock(periph); } static void sddaasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct ccb_getdev cgd; struct cam_periph *periph; struct sdda_softc *softc; periph = (struct cam_periph *)callback_arg; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("sddaasync(code=%d)\n", code)); switch (code) { case AC_FOUND_DEVICE: { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("=> AC_FOUND_DEVICE\n")); struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_MMCSD) break; if (!(path->device->mmc_ident_data.card_features & CARD_FEATURE_MEMORY)) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("No memory on the card!\n")); break; } /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(sddaregister, sddaoninvalidate, sddacleanup, sddastart, "sdda", CAM_PERIPH_BIO, path, sddaasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("sddaasync: Unable to attach to new device " "due to status 0x%x\n", status); break; } case AC_GETDEV_CHANGED: { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("=> AC_GETDEV_CHANGED\n")); softc = (struct sdda_softc *)periph->softc; memset(&cgd, 0, sizeof(cgd)); xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); cam_periph_async(periph, code, path, arg); break; } case AC_ADVINFO_CHANGED: { uintptr_t buftype; int i; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("=> AC_ADVINFO_CHANGED\n")); buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct sdda_softc *softc; struct sdda_part *part; softc = periph->softc; for (i = 0; i < MMC_PART_MAX; i++) { if ((part = softc->part[i]) != NULL) { disk_attr_changed(part->disk, "GEOM::physpath", M_NOWAIT); } } } break; } default: CAM_DEBUG(path, CAM_DEBUG_TRACE, ("=> default?!\n")); cam_periph_async(periph, code, path, arg); break; } } static int sddagetattr(struct bio *bp) { struct cam_periph *periph; struct sdda_softc *softc; struct sdda_part *part; int ret; part = (struct sdda_part *)bp->bio_disk->d_drv1; softc = part->sc; periph = softc->periph; cam_periph_lock(periph); ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, periph->path); cam_periph_unlock(periph); if (ret == 0) bp->bio_completed = bp->bio_length; return (ret); } static cam_status sddaregister(struct cam_periph *periph, void *arg) { struct sdda_softc *softc; struct ccb_getdev *cgd; union ccb *request_ccb; /* CCB representing the probe request */ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddaregister\n")); cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("sddaregister: no getdev CCB, can't register device\n"); return (CAM_REQ_CMP_ERR); } softc = (struct sdda_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT|M_ZERO); if (softc == NULL) { printf("sddaregister: Unable to probe new device. " "Unable to allocate softc\n"); return (CAM_REQ_CMP_ERR); } softc->state = SDDA_STATE_INIT; softc->mmcdata = (struct mmc_data *)malloc(sizeof(struct mmc_data), M_DEVBUF, M_NOWAIT|M_ZERO); if (softc->mmcdata == NULL) { printf("sddaregister: Unable to probe new device. " "Unable to allocate mmcdata\n"); free(softc, M_DEVBUF); return (CAM_REQ_CMP_ERR); } periph->softc = softc; softc->periph = periph; request_ccb = (union ccb*) arg; xpt_schedule(periph, CAM_PRIORITY_XPT); TASK_INIT(&softc->start_init_task, 0, sdda_start_init_task, periph); taskqueue_enqueue(taskqueue_thread, &softc->start_init_task); return (CAM_REQ_CMP); } static int mmc_exec_app_cmd(struct cam_periph *periph, union ccb *ccb, struct mmc_command *cmd) { int err; /* Send APP_CMD first */ memset(&ccb->mmcio.cmd, 0, sizeof(struct mmc_command)); memset(&ccb->mmcio.stop, 0, sizeof(struct mmc_command)); cam_fill_mmcio(&ccb->mmcio, /*retries*/ 0, /*cbfcnp*/ NULL, /*flags*/ CAM_DIR_NONE, /*mmc_opcode*/ MMC_APP_CMD, /*mmc_arg*/ get_rca(periph) << 16, /*mmc_flags*/ MMC_RSP_R1 | MMC_CMD_AC, /*mmc_data*/ NULL, /*timeout*/ 0); cam_periph_runccb(ccb, sddaerror, CAM_FLAG_NONE, /*sense_flags*/0, NULL); err = mmc_handle_reply(ccb); if (err != 0) return (err); if (!(ccb->mmcio.cmd.resp[0] & R1_APP_CMD)) return (EIO); /* Now exec actual command */ int flags = 0; if (cmd->data != NULL) { ccb->mmcio.cmd.data = cmd->data; if (cmd->data->flags & MMC_DATA_READ) flags |= CAM_DIR_IN; if (cmd->data->flags & MMC_DATA_WRITE) flags |= CAM_DIR_OUT; } else flags = CAM_DIR_NONE; cam_fill_mmcio(&ccb->mmcio, /*retries*/ 0, /*cbfcnp*/ NULL, /*flags*/ flags, /*mmc_opcode*/ cmd->opcode, /*mmc_arg*/ cmd->arg, /*mmc_flags*/ cmd->flags, /*mmc_data*/ cmd->data, /*timeout*/ 0); cam_periph_runccb(ccb, sddaerror, CAM_FLAG_NONE, /*sense_flags*/0, NULL); err = mmc_handle_reply(ccb); if (err != 0) return (err); memcpy(cmd->resp, ccb->mmcio.cmd.resp, sizeof(cmd->resp)); cmd->error = ccb->mmcio.cmd.error; return (0); } static int mmc_app_get_scr(struct cam_periph *periph, union ccb *ccb, uint32_t *rawscr) { int err; struct mmc_command cmd; struct mmc_data d; memset(&cmd, 0, sizeof(cmd)); memset(&d, 0, sizeof(d)); memset(rawscr, 0, 8); cmd.opcode = ACMD_SEND_SCR; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.arg = 0; d.data = rawscr; d.len = 8; d.flags = MMC_DATA_READ; cmd.data = &d; err = mmc_exec_app_cmd(periph, ccb, &cmd); rawscr[0] = be32toh(rawscr[0]); rawscr[1] = be32toh(rawscr[1]); return (err); } static int mmc_send_ext_csd(struct cam_periph *periph, union ccb *ccb, uint8_t *rawextcsd, size_t buf_len) { int err; struct mmc_data d; KASSERT(buf_len == 512, ("Buffer for ext csd must be 512 bytes")); memset(&d, 0, sizeof(d)); d.data = rawextcsd; d.len = buf_len; d.flags = MMC_DATA_READ; memset(d.data, 0, d.len); cam_fill_mmcio(&ccb->mmcio, /*retries*/ 0, /*cbfcnp*/ NULL, /*flags*/ CAM_DIR_IN, /*mmc_opcode*/ MMC_SEND_EXT_CSD, /*mmc_arg*/ 0, /*mmc_flags*/ MMC_RSP_R1 | MMC_CMD_ADTC, /*mmc_data*/ &d, /*timeout*/ 0); cam_periph_runccb(ccb, sddaerror, CAM_FLAG_NONE, /*sense_flags*/0, NULL); err = mmc_handle_reply(ccb); return (err); } static void mmc_app_decode_scr(uint32_t *raw_scr, struct mmc_scr *scr) { unsigned int scr_struct; memset(scr, 0, sizeof(*scr)); scr_struct = mmc_get_bits(raw_scr, 64, 60, 4); if (scr_struct != 0) { printf("Unrecognised SCR structure version %d\n", scr_struct); return; } scr->sda_vsn = mmc_get_bits(raw_scr, 64, 56, 4); scr->bus_widths = mmc_get_bits(raw_scr, 64, 48, 4); } static inline void mmc_switch_fill_mmcio(union ccb *ccb, uint8_t set, uint8_t index, uint8_t value, u_int timeout) { int arg = (MMC_SWITCH_FUNC_WR << 24) | (index << 16) | (value << 8) | set; cam_fill_mmcio(&ccb->mmcio, /*retries*/ 0, /*cbfcnp*/ NULL, /*flags*/ CAM_DIR_NONE, /*mmc_opcode*/ MMC_SWITCH_FUNC, /*mmc_arg*/ arg, /*mmc_flags*/ MMC_RSP_R1B | MMC_CMD_AC, /*mmc_data*/ NULL, /*timeout*/ timeout); } static int mmc_select_card(struct cam_periph *periph, union ccb *ccb, uint32_t rca) { int flags, err; flags = (rca ? MMC_RSP_R1B : MMC_RSP_NONE) | MMC_CMD_AC; cam_fill_mmcio(&ccb->mmcio, /*retries*/ 0, /*cbfcnp*/ NULL, /*flags*/ CAM_DIR_IN, /*mmc_opcode*/ MMC_SELECT_CARD, /*mmc_arg*/ rca << 16, /*mmc_flags*/ flags, /*mmc_data*/ NULL, /*timeout*/ 0); cam_periph_runccb(ccb, sddaerror, CAM_FLAG_NONE, /*sense_flags*/0, NULL); err = mmc_handle_reply(ccb); return (err); } static int mmc_switch(struct cam_periph *periph, union ccb *ccb, uint8_t set, uint8_t index, uint8_t value, u_int timeout) { int err; mmc_switch_fill_mmcio(ccb, set, index, value, timeout); cam_periph_runccb(ccb, sddaerror, CAM_FLAG_NONE, /*sense_flags*/0, NULL); err = mmc_handle_reply(ccb); return (err); } static uint32_t mmc_get_spec_vers(struct cam_periph *periph) { struct sdda_softc *softc = (struct sdda_softc *)periph->softc; return (softc->csd.spec_vers); } static uint64_t mmc_get_media_size(struct cam_periph *periph) { struct sdda_softc *softc = (struct sdda_softc *)periph->softc; return (softc->mediasize); } static uint32_t mmc_get_cmd6_timeout(struct cam_periph *periph) { struct sdda_softc *softc = (struct sdda_softc *)periph->softc; if (mmc_get_spec_vers(periph) >= 6) return (softc->raw_ext_csd[EXT_CSD_GEN_CMD6_TIME] * 10); return (500 * 1000); } static int mmc_sd_switch(struct cam_periph *periph, union ccb *ccb, uint8_t mode, uint8_t grp, uint8_t value, uint8_t *res) { struct mmc_data mmc_d; uint32_t arg; int err; memset(res, 0, 64); memset(&mmc_d, 0, sizeof(mmc_d)); mmc_d.len = 64; mmc_d.data = res; mmc_d.flags = MMC_DATA_READ; arg = mode << 31; /* 0 - check, 1 - set */ arg |= 0x00FFFFFF; arg &= ~(0xF << (grp * 4)); arg |= value << (grp * 4); cam_fill_mmcio(&ccb->mmcio, /*retries*/ 0, /*cbfcnp*/ NULL, /*flags*/ CAM_DIR_IN, /*mmc_opcode*/ SD_SWITCH_FUNC, /*mmc_arg*/ arg, /*mmc_flags*/ MMC_RSP_R1 | MMC_CMD_ADTC, /*mmc_data*/ &mmc_d, /*timeout*/ 0); cam_periph_runccb(ccb, sddaerror, CAM_FLAG_NONE, /*sense_flags*/0, NULL); err = mmc_handle_reply(ccb); return (err); } static int mmc_set_timing(struct cam_periph *periph, union ccb *ccb, enum mmc_bus_timing timing) { u_char switch_res[64]; int err; uint8_t value; struct sdda_softc *softc = (struct sdda_softc *)periph->softc; struct mmc_params *mmcp = &periph->path->device->mmc_ident_data; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mmc_set_timing(timing=%d)", timing)); switch (timing) { case bus_timing_normal: value = 0; break; case bus_timing_hs: value = 1; break; default: return (MMC_ERR_INVALID); } if (mmcp->card_features & CARD_FEATURE_MMC) { err = mmc_switch(periph, ccb, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, value, softc->cmd6_time); } else { err = mmc_sd_switch(periph, ccb, SD_SWITCH_MODE_SET, SD_SWITCH_GROUP1, value, switch_res); } /* Set high-speed timing on the host */ struct ccb_trans_settings_mmc *cts; cts = &ccb->cts.proto_specific.mmc; ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; ccb->ccb_h.flags = CAM_DIR_NONE; ccb->ccb_h.retry_count = 0; ccb->ccb_h.timeout = 100; ccb->ccb_h.cbfcnp = NULL; cts->ios.timing = timing; cts->ios_valid = MMC_BT; xpt_action(ccb); return (err); } static void sdda_start_init_task(void *context, int pending) { union ccb *new_ccb; struct cam_periph *periph; periph = (struct cam_periph *)context; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sdda_start_init_task\n")); new_ccb = xpt_alloc_ccb(); xpt_setup_ccb(&new_ccb->ccb_h, periph->path, CAM_PRIORITY_NONE); cam_periph_lock(periph); cam_periph_hold(periph, PRIBIO|PCATCH); sdda_start_init(context, new_ccb); cam_periph_unhold(periph); cam_periph_unlock(periph); xpt_free_ccb(new_ccb); } static void sdda_set_bus_width(struct cam_periph *periph, union ccb *ccb, int width) { struct sdda_softc *softc = (struct sdda_softc *)periph->softc; struct mmc_params *mmcp = &periph->path->device->mmc_ident_data; int err; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sdda_set_bus_width\n")); /* First set for the card, then for the host */ if (mmcp->card_features & CARD_FEATURE_MMC) { uint8_t value; switch (width) { case bus_width_1: value = EXT_CSD_BUS_WIDTH_1; break; case bus_width_4: value = EXT_CSD_BUS_WIDTH_4; break; case bus_width_8: value = EXT_CSD_BUS_WIDTH_8; break; default: panic("Invalid bus width %d", width); } err = mmc_switch(periph, ccb, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, value, softc->cmd6_time); } else { /* For SD cards we send ACMD6 with the required bus width in arg */ struct mmc_command cmd; memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = ACMD_SET_BUS_WIDTH; cmd.arg = width; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; err = mmc_exec_app_cmd(periph, ccb, &cmd); } if (err != MMC_ERR_NONE) { CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Error %d when setting bus width on the card\n", err)); return; } /* Now card is done, set the host to the same width */ struct ccb_trans_settings_mmc *cts; cts = &ccb->cts.proto_specific.mmc; ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; ccb->ccb_h.flags = CAM_DIR_NONE; ccb->ccb_h.retry_count = 0; ccb->ccb_h.timeout = 100; ccb->ccb_h.cbfcnp = NULL; cts->ios.bus_width = width; cts->ios_valid = MMC_BW; xpt_action(ccb); } static inline const char *part_type(u_int type) { switch (type) { case EXT_CSD_PART_CONFIG_ACC_RPMB: return ("RPMB"); case EXT_CSD_PART_CONFIG_ACC_DEFAULT: return ("default"); case EXT_CSD_PART_CONFIG_ACC_BOOT0: return ("boot0"); case EXT_CSD_PART_CONFIG_ACC_BOOT1: return ("boot1"); case EXT_CSD_PART_CONFIG_ACC_GP0: case EXT_CSD_PART_CONFIG_ACC_GP1: case EXT_CSD_PART_CONFIG_ACC_GP2: case EXT_CSD_PART_CONFIG_ACC_GP3: return ("general purpose"); default: return ("(unknown type)"); } } static inline const char *bus_width_str(enum mmc_bus_width w) { switch (w) { case bus_width_1: return ("1-bit"); case bus_width_4: return ("4-bit"); case bus_width_8: return ("8-bit"); default: __assert_unreachable(); } } static uint32_t sdda_get_host_caps(struct cam_periph *periph, union ccb *ccb) { struct ccb_trans_settings_mmc *cts; cts = &ccb->cts.proto_specific.mmc; ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; ccb->ccb_h.flags = CAM_DIR_NONE; ccb->ccb_h.retry_count = 0; ccb->ccb_h.timeout = 100; ccb->ccb_h.cbfcnp = NULL; xpt_action(ccb); if (ccb->ccb_h.status != CAM_REQ_CMP) panic("Cannot get host caps"); return (cts->host_caps); } static uint32_t sdda_get_max_data(struct cam_periph *periph, union ccb *ccb) { struct ccb_trans_settings_mmc *cts; cts = &ccb->cts.proto_specific.mmc; memset(cts, 0, sizeof(struct ccb_trans_settings_mmc)); ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; ccb->ccb_h.flags = CAM_DIR_NONE; ccb->ccb_h.retry_count = 0; ccb->ccb_h.timeout = 100; ccb->ccb_h.cbfcnp = NULL; xpt_action(ccb); if (ccb->ccb_h.status != CAM_REQ_CMP) panic("Cannot get host max data"); KASSERT(cts->host_max_data != 0, ("host_max_data == 0?!")); return (cts->host_max_data); } static void sdda_start_init(void *context, union ccb *start_ccb) { struct cam_periph *periph = (struct cam_periph *)context; struct ccb_trans_settings_mmc *cts; uint32_t host_caps; uint32_t sec_count; int err; int host_f_max; uint8_t card_type; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sdda_start_init\n")); /* periph was held for us when this task was enqueued */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_release(periph); return; } struct sdda_softc *softc = (struct sdda_softc *)periph->softc; struct mmc_params *mmcp = &periph->path->device->mmc_ident_data; struct cam_ed *device = periph->path->device; if (mmcp->card_features & CARD_FEATURE_MMC) { mmc_decode_csd_mmc(mmcp->card_csd, &softc->csd); mmc_decode_cid_mmc(mmcp->card_cid, &softc->cid); if (mmc_get_spec_vers(periph) >= 4) { err = mmc_send_ext_csd(periph, start_ccb, (uint8_t *)&softc->raw_ext_csd, sizeof(softc->raw_ext_csd)); if (err != 0) { CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Cannot read EXT_CSD, err %d", err)); return; } } } else { mmc_decode_csd_sd(mmcp->card_csd, &softc->csd); mmc_decode_cid_sd(mmcp->card_cid, &softc->cid); } softc->sector_count = softc->csd.capacity / 512; softc->mediasize = softc->csd.capacity; softc->cmd6_time = mmc_get_cmd6_timeout(periph); /* MMC >= 4.x have EXT_CSD that has its own opinion about capacity */ if (mmc_get_spec_vers(periph) >= 4) { sec_count = softc->raw_ext_csd[EXT_CSD_SEC_CNT] + (softc->raw_ext_csd[EXT_CSD_SEC_CNT + 1] << 8) + (softc->raw_ext_csd[EXT_CSD_SEC_CNT + 2] << 16) + (softc->raw_ext_csd[EXT_CSD_SEC_CNT + 3] << 24); if (sec_count != 0) { softc->sector_count = sec_count; softc->mediasize = softc->sector_count * 512; /* FIXME: there should be a better name for this option...*/ mmcp->card_features |= CARD_FEATURE_SDHC; } } CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Capacity: %"PRIu64", sectors: %"PRIu64"\n", softc->mediasize, softc->sector_count)); mmc_format_card_id_string(softc, mmcp); /* Update info for CAM */ device->serial_num_len = strlen(softc->card_sn_string); device->serial_num = (u_int8_t *)malloc((device->serial_num_len + 1), M_CAMXPT, M_NOWAIT); strlcpy(device->serial_num, softc->card_sn_string, device->serial_num_len + 1); device->device_id_len = strlen(softc->card_id_string); device->device_id = (u_int8_t *)malloc((device->device_id_len + 1), M_CAMXPT, M_NOWAIT); strlcpy(device->device_id, softc->card_id_string, device->device_id_len + 1); strlcpy(mmcp->model, softc->card_id_string, sizeof(mmcp->model)); /* Set the clock frequency that the card can handle */ cts = &start_ccb->cts.proto_specific.mmc; /* First, get the host's max freq */ start_ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; start_ccb->ccb_h.flags = CAM_DIR_NONE; start_ccb->ccb_h.retry_count = 0; start_ccb->ccb_h.timeout = 100; start_ccb->ccb_h.cbfcnp = NULL; xpt_action(start_ccb); if (start_ccb->ccb_h.status != CAM_REQ_CMP) panic("Cannot get max host freq"); host_f_max = cts->host_f_max; host_caps = cts->host_caps; if (cts->ios.bus_width != bus_width_1) panic("Bus width in ios is not 1-bit"); /* Now check if the card supports High-speed */ softc->card_f_max = softc->csd.tran_speed; if (host_caps & MMC_CAP_HSPEED) { /* Find out if the card supports High speed timing */ if (mmcp->card_features & CARD_FEATURE_SD20) { /* Get and decode SCR */ uint32_t rawscr[2]; uint8_t res[64]; if (mmc_app_get_scr(periph, start_ccb, rawscr)) { CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Cannot get SCR\n")); goto finish_hs_tests; } mmc_app_decode_scr(rawscr, &softc->scr); if ((softc->scr.sda_vsn >= 1) && (softc->csd.ccc & (1<<10))) { mmc_sd_switch(periph, start_ccb, SD_SWITCH_MODE_CHECK, SD_SWITCH_GROUP1, SD_SWITCH_NOCHANGE, res); if (res[13] & 2) { CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Card supports HS\n")); softc->card_f_max = SD_HS_MAX; } /* * We deselect then reselect the card here. Some cards * become unselected and timeout with the above two * commands, although the state tables / diagrams in the * standard suggest they go back to the transfer state. * Other cards don't become deselected, and if we * attempt to blindly re-select them, we get timeout * errors from some controllers. So we deselect then * reselect to handle all situations. */ mmc_select_card(periph, start_ccb, 0); mmc_select_card(periph, start_ccb, get_rca(periph)); } else { CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Not trying the switch\n")); goto finish_hs_tests; } } if (mmcp->card_features & CARD_FEATURE_MMC && mmc_get_spec_vers(periph) >= 4) { card_type = softc->raw_ext_csd[EXT_CSD_CARD_TYPE]; if (card_type & EXT_CSD_CARD_TYPE_HS_52) softc->card_f_max = MMC_TYPE_HS_52_MAX; else if (card_type & EXT_CSD_CARD_TYPE_HS_26) softc->card_f_max = MMC_TYPE_HS_26_MAX; if ((card_type & EXT_CSD_CARD_TYPE_DDR_52_1_2V) != 0 && (host_caps & MMC_CAP_SIGNALING_120) != 0) { setbit(&softc->timings, bus_timing_mmc_ddr52); setbit(&softc->vccq_120, bus_timing_mmc_ddr52); CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Card supports DDR52 at 1.2V\n")); } if ((card_type & EXT_CSD_CARD_TYPE_DDR_52_1_8V) != 0 && (host_caps & MMC_CAP_SIGNALING_180) != 0) { setbit(&softc->timings, bus_timing_mmc_ddr52); setbit(&softc->vccq_180, bus_timing_mmc_ddr52); CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Card supports DDR52 at 1.8V\n")); } if ((card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) != 0 && (host_caps & MMC_CAP_SIGNALING_120) != 0) { setbit(&softc->timings, bus_timing_mmc_hs200); setbit(&softc->vccq_120, bus_timing_mmc_hs200); CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Card supports HS200 at 1.2V\n")); } if ((card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) != 0 && (host_caps & MMC_CAP_SIGNALING_180) != 0) { setbit(&softc->timings, bus_timing_mmc_hs200); setbit(&softc->vccq_180, bus_timing_mmc_hs200); CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Card supports HS200 at 1.8V\n")); } } } int f_max; finish_hs_tests: f_max = min(host_f_max, softc->card_f_max); CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Set SD freq to %d MHz (min out of host f=%d MHz and card f=%d MHz)\n", f_max / 1000000, host_f_max / 1000000, softc->card_f_max / 1000000)); /* Enable high-speed timing on the card */ if (f_max > 25000000) { err = mmc_set_timing(periph, start_ccb, bus_timing_hs); if (err != MMC_ERR_NONE) { CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("Cannot switch card to high-speed mode")); f_max = 25000000; } } /* If possible, set lower-level signaling */ enum mmc_bus_timing timing; /* FIXME: MMCCAM supports max. bus_timing_mmc_ddr52 at the moment. */ for (timing = bus_timing_mmc_ddr52; timing > bus_timing_normal; timing--) { if (isset(&softc->vccq_120, timing)) { /* Set VCCQ = 1.2V */ start_ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; start_ccb->ccb_h.flags = CAM_DIR_NONE; start_ccb->ccb_h.retry_count = 0; start_ccb->ccb_h.timeout = 100; start_ccb->ccb_h.cbfcnp = NULL; cts->ios.vccq = vccq_120; cts->ios_valid = MMC_VCCQ; xpt_action(start_ccb); break; } else if (isset(&softc->vccq_180, timing)) { /* Set VCCQ = 1.8V */ start_ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; start_ccb->ccb_h.flags = CAM_DIR_NONE; start_ccb->ccb_h.retry_count = 0; start_ccb->ccb_h.timeout = 100; start_ccb->ccb_h.cbfcnp = NULL; cts->ios.vccq = vccq_180; cts->ios_valid = MMC_VCCQ; xpt_action(start_ccb); break; } else { /* Set VCCQ = 3.3V */ start_ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; start_ccb->ccb_h.flags = CAM_DIR_NONE; start_ccb->ccb_h.retry_count = 0; start_ccb->ccb_h.timeout = 100; start_ccb->ccb_h.cbfcnp = NULL; cts->ios.vccq = vccq_330; cts->ios_valid = MMC_VCCQ; xpt_action(start_ccb); break; } } /* Set frequency on the controller */ start_ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; start_ccb->ccb_h.flags = CAM_DIR_NONE; start_ccb->ccb_h.retry_count = 0; start_ccb->ccb_h.timeout = 100; start_ccb->ccb_h.cbfcnp = NULL; cts->ios.clock = f_max; cts->ios_valid = MMC_CLK; xpt_action(start_ccb); /* Set bus width */ enum mmc_bus_width desired_bus_width = bus_width_1; enum mmc_bus_width max_host_bus_width = (host_caps & MMC_CAP_8_BIT_DATA ? bus_width_8 : host_caps & MMC_CAP_4_BIT_DATA ? bus_width_4 : bus_width_1); enum mmc_bus_width max_card_bus_width = bus_width_1; if (mmcp->card_features & CARD_FEATURE_SD20 && softc->scr.bus_widths & SD_SCR_BUS_WIDTH_4) max_card_bus_width = bus_width_4; /* * Unlike SD, MMC cards don't have any information about supported bus width... * So we need to perform read/write test to find out the width. */ /* TODO: figure out bus width for MMC; use 8-bit for now (to test on BBB) */ if (mmcp->card_features & CARD_FEATURE_MMC) max_card_bus_width = bus_width_8; desired_bus_width = min(max_host_bus_width, max_card_bus_width); CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Set bus width to %s (min of host %s and card %s)\n", bus_width_str(desired_bus_width), bus_width_str(max_host_bus_width), bus_width_str(max_card_bus_width))); sdda_set_bus_width(periph, start_ccb, desired_bus_width); softc->state = SDDA_STATE_NORMAL; cam_periph_unhold(periph); /* MMC partitions support */ if (mmcp->card_features & CARD_FEATURE_MMC && mmc_get_spec_vers(periph) >= 4) { sdda_process_mmc_partitions(periph, start_ccb); } else if (mmcp->card_features & CARD_FEATURE_SD20) { /* For SD[HC] cards, just add one partition that is the whole card */ if (sdda_add_part(periph, 0, "sdda", periph->unit_number, mmc_get_media_size(periph), sdda_get_read_only(periph, start_ccb)) == false) return; softc->part_curr = 0; } cam_periph_hold(periph, PRIBIO|PCATCH); xpt_announce_periph(periph, softc->card_id_string); /* * Add async callbacks for bus reset and bus device reset calls. * I don't bother checking if this fails as, in most cases, * the system will function just fine without them and the only * alternative would be to not attach the device on failure. */ xpt_register_async(AC_LOST_DEVICE | AC_GETDEV_CHANGED | AC_ADVINFO_CHANGED, sddaasync, periph, periph->path); } static bool sdda_add_part(struct cam_periph *periph, u_int type, const char *name, u_int cnt, off_t media_size, bool ro) { struct sdda_softc *sc = (struct sdda_softc *)periph->softc; struct sdda_part *part; struct ccb_pathinq cpi; CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Partition type '%s', size %ju %s\n", part_type(type), media_size, ro ? "(read-only)" : "")); part = sc->part[type] = malloc(sizeof(*part), M_DEVBUF, M_NOWAIT | M_ZERO); if (part == NULL) { printf("Cannot add partition for sdda\n"); return (false); } part->cnt = cnt; part->type = type; part->ro = ro; part->sc = sc; snprintf(part->name, sizeof(part->name), name, periph->unit_number); /* * Due to the nature of RPMB partition it doesn't make much sense * to add it as a disk. It would be more appropriate to create a * userland tool to operate on the partition or leverage the existing * tools from sysutils/mmc-utils. */ if (type == EXT_CSD_PART_CONFIG_ACC_RPMB) { /* TODO: Create device, assign IOCTL handler */ CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Don't know what to do with RPMB partitions yet\n")); return (false); } bioq_init(&part->bio_queue); bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); /* * Register this media as a disk */ (void)cam_periph_hold(periph, PRIBIO); cam_periph_unlock(periph); part->disk = disk_alloc(); part->disk->d_rotation_rate = DISK_RR_NON_ROTATING; part->disk->d_devstat = devstat_new_entry(part->name, cnt, 512, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_DISK); part->disk->d_open = sddaopen; part->disk->d_close = sddaclose; part->disk->d_strategy = sddastrategy; + if (cam_sim_pollable(periph->sim)) + part->disk->d_dump = sddadump; part->disk->d_getattr = sddagetattr; part->disk->d_gone = sddadiskgonecb; part->disk->d_name = part->name; part->disk->d_drv1 = part; part->disk->d_maxsize = MIN(maxphys, sdda_get_max_data(periph, (union ccb *)&cpi) * mmc_get_sector_size(periph)); part->disk->d_unit = cnt; part->disk->d_flags = 0; strlcpy(part->disk->d_descr, sc->card_id_string, MIN(sizeof(part->disk->d_descr), sizeof(sc->card_id_string))); strlcpy(part->disk->d_ident, sc->card_sn_string, MIN(sizeof(part->disk->d_ident), sizeof(sc->card_sn_string))); part->disk->d_hba_vendor = cpi.hba_vendor; part->disk->d_hba_device = cpi.hba_device; part->disk->d_hba_subvendor = cpi.hba_subvendor; part->disk->d_hba_subdevice = cpi.hba_subdevice; snprintf(part->disk->d_attachment, sizeof(part->disk->d_attachment), "%s%d", cpi.dev_name, cpi.unit_number); part->disk->d_sectorsize = mmc_get_sector_size(periph); part->disk->d_mediasize = media_size; part->disk->d_stripesize = 0; part->disk->d_fwsectors = 0; part->disk->d_fwheads = 0; if (sdda_mmcsd_compat) disk_add_alias(part->disk, "mmcsd"); /* * Acquire a reference to the periph before we register with GEOM. * We'll release this reference once GEOM calls us back (via * sddadiskgonecb()) telling us that our provider has been freed. */ if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (false); } disk_create(part->disk, DISK_VERSION); cam_periph_lock(periph); cam_periph_unhold(periph); return (true); } /* * For MMC cards, process EXT_CSD and add partitions that are supported by * this device. */ static void sdda_process_mmc_partitions(struct cam_periph *periph, union ccb *ccb) { struct sdda_softc *sc = (struct sdda_softc *)periph->softc; struct mmc_params *mmcp = &periph->path->device->mmc_ident_data; off_t erase_size, sector_size, size, wp_size; int i; const uint8_t *ext_csd; uint8_t rev; bool comp, ro; ext_csd = sc->raw_ext_csd; /* * Enhanced user data area and general purpose partitions are only * supported in revision 1.4 (EXT_CSD_REV == 4) and later, the RPMB * partition in revision 1.5 (MMC v4.41, EXT_CSD_REV == 5) and later. */ rev = ext_csd[EXT_CSD_REV]; /* * Ignore user-creatable enhanced user data area and general purpose * partitions partitions as long as partitioning hasn't been finished. */ comp = (ext_csd[EXT_CSD_PART_SET] & EXT_CSD_PART_SET_COMPLETED) != 0; /* * Add enhanced user data area slice, unless it spans the entirety of * the user data area. The enhanced area is of a multiple of high * capacity write protect groups ((ERASE_GRP_SIZE + HC_WP_GRP_SIZE) * * 512 KB) and its offset given in either sectors or bytes, depending * on whether it's a high capacity device or not. * NB: The slicer and its slices need to be registered before adding * the disk for the corresponding user data area as re-tasting is * racy. */ sector_size = mmc_get_sector_size(periph); size = ext_csd[EXT_CSD_ENH_SIZE_MULT] + (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) + (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16); if (rev >= 4 && comp == TRUE && size > 0 && (ext_csd[EXT_CSD_PART_SUPPORT] & EXT_CSD_PART_SUPPORT_ENH_ATTR_EN) != 0 && (ext_csd[EXT_CSD_PART_ATTR] & (EXT_CSD_PART_ATTR_ENH_USR)) != 0) { erase_size = ext_csd[EXT_CSD_ERASE_GRP_SIZE] * 1024 * MMC_SECTOR_SIZE; wp_size = ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; size *= erase_size * wp_size; if (size != mmc_get_media_size(periph) * sector_size) { sc->enh_size = size; sc->enh_base = (ext_csd[EXT_CSD_ENH_START_ADDR] + (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) + (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) + (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24)) * ((mmcp->card_features & CARD_FEATURE_SDHC) ? 1: MMC_SECTOR_SIZE); } else CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("enhanced user data area spans entire device")); } /* * Add default partition. This may be the only one or the user * data area in case partitions are supported. */ ro = sdda_get_read_only(periph, ccb); sdda_add_part(periph, EXT_CSD_PART_CONFIG_ACC_DEFAULT, "sdda", periph->unit_number, mmc_get_media_size(periph), ro); sc->part_curr = EXT_CSD_PART_CONFIG_ACC_DEFAULT; if (mmc_get_spec_vers(periph) < 3) return; /* Belatedly announce enhanced user data slice. */ if (sc->enh_size != 0) { CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("enhanced user data area off 0x%jx size %ju bytes\n", sc->enh_base, sc->enh_size)); } /* * Determine partition switch timeout (provided in units of 10 ms) * and ensure it's at least 300 ms as some eMMC chips lie. */ sc->part_time = max(ext_csd[EXT_CSD_PART_SWITCH_TO] * 10 * 1000, 300 * 1000); /* Add boot partitions, which are of a fixed multiple of 128 KB. */ size = ext_csd[EXT_CSD_BOOT_SIZE_MULT] * MMC_BOOT_RPMB_BLOCK_SIZE; if (size > 0 && (sdda_get_host_caps(periph, ccb) & MMC_CAP_BOOT_NOACC) == 0) { sdda_add_part(periph, EXT_CSD_PART_CONFIG_ACC_BOOT0, SDDA_FMT_BOOT, 0, size, ro | ((ext_csd[EXT_CSD_BOOT_WP_STATUS] & EXT_CSD_BOOT_WP_STATUS_BOOT0_MASK) != 0)); sdda_add_part(periph, EXT_CSD_PART_CONFIG_ACC_BOOT1, SDDA_FMT_BOOT, 1, size, ro | ((ext_csd[EXT_CSD_BOOT_WP_STATUS] & EXT_CSD_BOOT_WP_STATUS_BOOT1_MASK) != 0)); } /* Add RPMB partition, which also is of a fixed multiple of 128 KB. */ size = ext_csd[EXT_CSD_RPMB_MULT] * MMC_BOOT_RPMB_BLOCK_SIZE; if (rev >= 5 && size > 0) sdda_add_part(periph, EXT_CSD_PART_CONFIG_ACC_RPMB, SDDA_FMT_RPMB, 0, size, ro); if (rev <= 3 || comp == FALSE) return; /* * Add general purpose partitions, which are of a multiple of high * capacity write protect groups, too. */ if ((ext_csd[EXT_CSD_PART_SUPPORT] & EXT_CSD_PART_SUPPORT_EN) != 0) { erase_size = ext_csd[EXT_CSD_ERASE_GRP_SIZE] * 1024 * MMC_SECTOR_SIZE; wp_size = ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; for (i = 0; i < MMC_PART_GP_MAX; i++) { size = ext_csd[EXT_CSD_GP_SIZE_MULT + i * 3] + (ext_csd[EXT_CSD_GP_SIZE_MULT + i * 3 + 1] << 8) + (ext_csd[EXT_CSD_GP_SIZE_MULT + i * 3 + 2] << 16); if (size == 0) continue; sdda_add_part(periph, EXT_CSD_PART_CONFIG_ACC_GP0 + i, SDDA_FMT_GP, i, size * erase_size * wp_size, ro); } } } /* * We cannot just call mmc_switch() since it will sleep, and we are in * GEOM context and cannot sleep. Instead, create an MMCIO request to switch * partitions and send it to h/w, and upon completion resume processing * the I/O queue. * This function cannot fail, instead check switch errors in sddadone(). */ static void sdda_init_switch_part(struct cam_periph *periph, union ccb *start_ccb, uint8_t part) { struct sdda_softc *sc = (struct sdda_softc *)periph->softc; uint8_t value; KASSERT(part < MMC_PART_MAX, ("%s: invalid partition index", __func__)); sc->part_requested = part; value = (sc->raw_ext_csd[EXT_CSD_PART_CONFIG] & ~EXT_CSD_PART_CONFIG_ACC_MASK) | part; mmc_switch_fill_mmcio(start_ccb, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, value, sc->part_time); start_ccb->ccb_h.cbfcnp = sddadone; sc->outstanding_cmds++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); } /* Called with periph lock held! */ static void sddastart(struct cam_periph *periph, union ccb *start_ccb) { struct bio *bp; struct sdda_softc *softc = (struct sdda_softc *)periph->softc; struct sdda_part *part; struct mmc_params *mmcp = &periph->path->device->mmc_ident_data; uint8_t part_index; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddastart\n")); if (softc->state != SDDA_STATE_NORMAL) { CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("device is not in SDDA_STATE_NORMAL yet\n")); xpt_release_ccb(start_ccb); return; } /* Find partition that has outstanding commands. Prefer current partition. */ part_index = softc->part_curr; part = softc->part[softc->part_curr]; bp = bioq_first(&part->bio_queue); if (bp == NULL) { for (part_index = 0; part_index < MMC_PART_MAX; part_index++) { if ((part = softc->part[part_index]) != NULL && (bp = bioq_first(&softc->part[part_index]->bio_queue)) != NULL) break; } } if (bp == NULL) { xpt_release_ccb(start_ccb); return; } if (part_index != softc->part_curr) { CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Partition %d -> %d\n", softc->part_curr, part_index)); /* * According to section "6.2.2 Command restrictions" of the eMMC * specification v5.1, CMD19/CMD21 aren't allowed to be used with * RPMB partitions. So we pause re-tuning along with triggering * it up-front to decrease the likelihood of re-tuning becoming * necessary while accessing an RPMB partition. Consequently, an * RPMB partition should immediately be switched away from again * after an access in order to allow for re-tuning to take place * anew. */ /* TODO: pause retune if switching to RPMB partition */ softc->state = SDDA_STATE_PART_SWITCH; sdda_init_switch_part(periph, start_ccb, part_index); return; } bioq_remove(&part->bio_queue, bp); switch (bp->bio_cmd) { case BIO_WRITE: CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("BIO_WRITE\n")); part->flags |= SDDA_FLAG_DIRTY; /* FALLTHROUGH */ case BIO_READ: { struct ccb_mmcio *mmcio; uint64_t blockno = bp->bio_pblkno; uint16_t count = bp->bio_bcount / 512; uint16_t opcode; if (bp->bio_cmd == BIO_READ) CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("BIO_READ\n")); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("Block %"PRIu64" cnt %u\n", blockno, count)); /* Construct new MMC command */ if (bp->bio_cmd == BIO_READ) { if (count > 1) opcode = MMC_READ_MULTIPLE_BLOCK; else opcode = MMC_READ_SINGLE_BLOCK; } else { if (count > 1) opcode = MMC_WRITE_MULTIPLE_BLOCK; else opcode = MMC_WRITE_BLOCK; } start_ccb->ccb_h.func_code = XPT_MMC_IO; start_ccb->ccb_h.flags = (bp->bio_cmd == BIO_READ ? CAM_DIR_IN : CAM_DIR_OUT); start_ccb->ccb_h.retry_count = 0; start_ccb->ccb_h.timeout = 15 * 1000; start_ccb->ccb_h.cbfcnp = sddadone; mmcio = &start_ccb->mmcio; mmcio->cmd.opcode = opcode; mmcio->cmd.arg = blockno; if (!(mmcp->card_features & CARD_FEATURE_SDHC)) mmcio->cmd.arg <<= 9; mmcio->cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; mmcio->cmd.data = softc->mmcdata; memset(mmcio->cmd.data, 0, sizeof(struct mmc_data)); mmcio->cmd.data->data = bp->bio_data; mmcio->cmd.data->len = 512 * count; mmcio->cmd.data->flags = (bp->bio_cmd == BIO_READ ? MMC_DATA_READ : MMC_DATA_WRITE); /* Direct h/w to issue CMD12 upon completion */ if (count > 1) { mmcio->cmd.data->flags |= MMC_DATA_MULTI; mmcio->stop.opcode = MMC_STOP_TRANSMISSION; mmcio->stop.flags = MMC_RSP_R1B | MMC_CMD_AC; mmcio->stop.arg = 0; } break; } case BIO_FLUSH: CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("BIO_FLUSH\n")); sddaschedule(periph); break; case BIO_DELETE: CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("BIO_DELETE\n")); sddaschedule(periph); break; default: biofinish(bp, NULL, EOPNOTSUPP); xpt_release_ccb(start_ccb); return; } start_ccb->ccb_h.ccb_bp = bp; softc->outstanding_cmds++; softc->refcount++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); /* May have more work to do, so ensure we stay scheduled */ sddaschedule(periph); } static void sddadone(struct cam_periph *periph, union ccb *done_ccb) { struct bio *bp; struct sdda_softc *softc; struct ccb_mmcio *mmcio; struct cam_path *path; uint32_t card_status; int error = 0; softc = (struct sdda_softc *)periph->softc; mmcio = &done_ccb->mmcio; path = done_ccb->ccb_h.path; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("sddadone\n")); if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Error!!!\n")); if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); error = EIO; } else { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) panic("REQ_CMP with QFRZN"); error = 0; } card_status = mmcio->cmd.resp[0]; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Card status: %08x\n", R1_STATUS(card_status))); CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Current state: %d\n", R1_CURRENT_STATE(card_status))); /* Process result of switching MMC partitions */ if (softc->state == SDDA_STATE_PART_SWITCH) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Completing partition switch to %d\n", softc->part_requested)); softc->outstanding_cmds--; /* Complete partition switch */ softc->state = SDDA_STATE_NORMAL; if (error != 0) { /* TODO: Unpause retune if accessing RPMB */ xpt_release_ccb(done_ccb); xpt_schedule(periph, CAM_PRIORITY_NORMAL); return; } softc->raw_ext_csd[EXT_CSD_PART_CONFIG] = (softc->raw_ext_csd[EXT_CSD_PART_CONFIG] & ~EXT_CSD_PART_CONFIG_ACC_MASK) | softc->part_requested; /* TODO: Unpause retune if accessing RPMB */ softc->part_curr = softc->part_requested; xpt_release_ccb(done_ccb); /* Return to processing BIO requests */ xpt_schedule(periph, CAM_PRIORITY_NORMAL); return; } bp = (struct bio *)done_ccb->ccb_h.ccb_bp; bp->bio_error = error; if (error != 0) { bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; } else { /* XXX: How many bytes remaining? */ bp->bio_resid = 0; if (bp->bio_resid > 0) bp->bio_flags |= BIO_ERROR; } softc->outstanding_cmds--; xpt_release_ccb(done_ccb); /* * Release the periph refcount taken in sddastart() for each CCB. */ KASSERT(softc->refcount >= 1, ("sddadone softc %p refcount %d", softc, softc->refcount)); softc->refcount--; biodone(bp); } static int sddaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { return(cam_periph_error(ccb, cam_flags, sense_flags)); } + +static int +sddadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, + size_t length) +{ + struct ccb_mmcio mmcio; + struct disk *dp; + struct sdda_part *part; + struct sdda_softc *softc; + struct cam_periph *periph; + struct mmc_params *mmcp; + uint16_t count; + uint16_t opcode; + int error; + + dp = arg; + part = dp->d_drv1; + softc = part->sc; + periph = softc->periph; + mmcp = &periph->path->device->mmc_ident_data; + + if (softc->state != SDDA_STATE_NORMAL) + return (ENXIO); + + count = length / 512; + if (count == 0) + return (0); + + if (softc->part[softc->part_curr] != part) + return (EIO); /* TODO implement polled partition switch */ + + memset(&mmcio, 0, sizeof(mmcio)); + xpt_setup_ccb(&mmcio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); /* XXX needed? */ + + mmcio.ccb_h.func_code = XPT_MMC_IO; + mmcio.ccb_h.flags = CAM_DIR_OUT; + mmcio.ccb_h.retry_count = 0; + mmcio.ccb_h.timeout = 15 * 1000; + + if (count > 1) + opcode = MMC_WRITE_MULTIPLE_BLOCK; + else + opcode = MMC_WRITE_BLOCK; + mmcio.cmd.opcode = opcode; + mmcio.cmd.arg = offset / 512; + if (!(mmcp->card_features & CARD_FEATURE_SDHC)) + mmcio.cmd.arg <<= 9; + + mmcio.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + mmcio.cmd.data = softc->mmcdata; + memset(mmcio.cmd.data, 0, sizeof(struct mmc_data)); + mmcio.cmd.data->data = virtual; + mmcio.cmd.data->len = 512 * count; + mmcio.cmd.data->flags = MMC_DATA_WRITE; + + /* Direct h/w to issue CMD12 upon completion */ + if (count > 1) { + mmcio.cmd.data->flags |= MMC_DATA_MULTI; + mmcio.stop.opcode = MMC_STOP_TRANSMISSION; + mmcio.stop.flags = MMC_RSP_R1B | MMC_CMD_AC; + mmcio.stop.arg = 0; + } + + error = cam_periph_runccb((union ccb *)&mmcio, cam_periph_error, + 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); + if (error != 0) + printf("Aborting dump due to I/O error.\n"); + return (error); +} + #endif /* _KERNEL */ diff --git a/sys/cam/mmc/mmc_sim.c b/sys/cam/mmc/mmc_sim.c index 792551a93511..40330958574b 100644 --- a/sys/cam/mmc/mmc_sim.c +++ b/sys/cam/mmc/mmc_sim.c @@ -1,253 +1,255 @@ /*- * Copyright (c) 2020-2021 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include "mmc_sim_if.h" static void mmc_cam_default_poll(struct cam_sim *sim) { + struct mmc_sim *mmc_sim; - return; + mmc_sim = cam_sim_softc(sim); + MMC_SIM_CAM_POLL(mmc_sim->dev); } static void mmc_sim_task(void *arg, int pending) { struct mmc_sim *mmc_sim; struct ccb_trans_settings *cts; int rv; mmc_sim = arg; if (mmc_sim->ccb == NULL) return; cts = &mmc_sim->ccb->cts; switch (mmc_sim->ccb->ccb_h.func_code) { case XPT_MMC_GET_TRAN_SETTINGS: rv = MMC_SIM_GET_TRAN_SETTINGS(mmc_sim->dev, &cts->proto_specific.mmc); if (rv != 0) mmc_sim->ccb->ccb_h.status = CAM_REQ_INVALID; else mmc_sim->ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_MMC_SET_TRAN_SETTINGS: rv = MMC_SIM_SET_TRAN_SETTINGS(mmc_sim->dev, &cts->proto_specific.mmc); if (rv != 0) mmc_sim->ccb->ccb_h.status = CAM_REQ_INVALID; else mmc_sim->ccb->ccb_h.status = CAM_REQ_CMP; break; default: panic("Unsupported ccb func %x\n", mmc_sim->ccb->ccb_h.func_code); break; } xpt_done(mmc_sim->ccb); mmc_sim->ccb = NULL; } static void mmc_cam_sim_default_action(struct cam_sim *sim, union ccb *ccb) { struct mmc_sim *mmc_sim; struct ccb_trans_settings_mmc mmc; int rv; mmc_sim = cam_sim_softc(sim); - if (mmc_sim == NULL) { - ccb->ccb_h.status = CAM_SEL_TIMEOUT; - xpt_done(ccb); - return; - } - mtx_assert(&mmc_sim->mtx, MA_OWNED); if (mmc_sim->ccb != NULL) { ccb->ccb_h.status = CAM_BUSY; xpt_done(ccb); return; } switch (ccb->ccb_h.func_code) { case XPT_PATH_INQ: rv = MMC_SIM_GET_TRAN_SETTINGS(mmc_sim->dev, &mmc); if (rv != 0) { ccb->ccb_h.status = CAM_REQ_INVALID; } else { mmc_path_inq(&ccb->cpi, "Deglitch Networks", sim, mmc.host_max_data); } break; case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; rv = MMC_SIM_GET_TRAN_SETTINGS(mmc_sim->dev, &cts->proto_specific.mmc); if (rv != 0) ccb->ccb_h.status = CAM_REQ_INVALID; else { cts->protocol = PROTO_MMCSD; cts->protocol_version = 1; cts->transport = XPORT_MMCSD; cts->transport_version = 1; cts->xport_specific.valid = 0; ccb->ccb_h.status = CAM_REQ_CMP; } break; } case XPT_MMC_GET_TRAN_SETTINGS: { ccb->ccb_h.status = CAM_SIM_QUEUED; mmc_sim->ccb = ccb; taskqueue_enqueue(taskqueue_thread, &mmc_sim->sim_task); return; /* NOTREACHED */ break; } case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; rv = MMC_SIM_SET_TRAN_SETTINGS(mmc_sim->dev, &cts->proto_specific.mmc); if (rv != 0) ccb->ccb_h.status = CAM_REQ_INVALID; else ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_MMC_SET_TRAN_SETTINGS: { ccb->ccb_h.status = CAM_SIM_QUEUED; mmc_sim->ccb = ccb; taskqueue_enqueue(taskqueue_thread, &mmc_sim->sim_task); return; /* NOTREACHED */ break; } case XPT_RESET_BUS: ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_MMC_IO: { - ccb->ccb_h.status = CAM_REQ_INVALID; rv = MMC_SIM_CAM_REQUEST(mmc_sim->dev, ccb); if (rv != 0) ccb->ccb_h.status = CAM_SIM_QUEUED; return; /* NOTREACHED */ break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } int mmc_cam_sim_alloc(device_t dev, const char *name, struct mmc_sim *mmc_sim) { + kobjop_desc_t kobj_desc; + kobj_method_t *kobj_method; mmc_sim->dev = dev; if ((mmc_sim->devq = cam_simq_alloc(1)) == NULL) { goto fail; } snprintf(mmc_sim->name, sizeof(mmc_sim->name), "%s_sim", name); mtx_init(&mmc_sim->mtx, mmc_sim->name, NULL, MTX_DEF); + + /* Provide sim_poll hook only if the device has the poll method. */ + kobj_desc = &mmc_sim_cam_poll_desc; + kobj_method = kobj_lookup_method(((kobj_t)dev)->ops->cls, NULL, + kobj_desc); mmc_sim->sim = cam_sim_alloc(mmc_cam_sim_default_action, - mmc_cam_default_poll, + kobj_method == &kobj_desc->deflt ? NULL : mmc_cam_default_poll, mmc_sim->name, mmc_sim, device_get_unit(dev), &mmc_sim->mtx, 1, 1, mmc_sim->devq); if (mmc_sim->sim == NULL) { cam_simq_free(mmc_sim->devq); device_printf(dev, "cannot allocate CAM SIM\n"); goto fail; } mtx_lock(&mmc_sim->mtx); if (xpt_bus_register(mmc_sim->sim, dev, 0) != 0) { device_printf(dev, "cannot register SCSI pass-through bus\n"); cam_sim_free(mmc_sim->sim, FALSE); cam_simq_free(mmc_sim->devq); mtx_unlock(&mmc_sim->mtx); goto fail; } mtx_unlock(&mmc_sim->mtx); TASK_INIT(&mmc_sim->sim_task, 0, mmc_sim_task, mmc_sim); return (0); fail: mmc_cam_sim_free(mmc_sim); return (1); } void mmc_cam_sim_free(struct mmc_sim *mmc_sim) { if (mmc_sim->sim != NULL) { mtx_lock(&mmc_sim->mtx); xpt_bus_deregister(cam_sim_path(mmc_sim->sim)); cam_sim_free(mmc_sim->sim, FALSE); mtx_unlock(&mmc_sim->mtx); } if (mmc_sim->devq != NULL) cam_simq_free(mmc_sim->devq); } void mmc_cam_sim_discover(struct mmc_sim *mmc_sim) { mmccam_start_discovery(mmc_sim->sim); } diff --git a/sys/cam/mmc/mmc_sim_if.m b/sys/cam/mmc/mmc_sim_if.m index f1b88fc05ef5..f7d3f4df5ebb 100644 --- a/sys/cam/mmc/mmc_sim_if.m +++ b/sys/cam/mmc/mmc_sim_if.m @@ -1,54 +1,58 @@ #- # SPDX-License-Identifier: BSD-2-Clause-FreeBSD # # Copyright (c) 2020 Emmanuel Vadot # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # # $FreeBSD$ #include #include #include #include #include #include #include #include INTERFACE mmc_sim; METHOD int get_tran_settings { device_t dev; struct ccb_trans_settings_mmc *cts; }; METHOD int set_tran_settings { device_t dev; struct ccb_trans_settings_mmc *cts; }; METHOD int cam_request { device_t dev; union ccb *ccb; }; + +METHOD void cam_poll { + device_t dev; +}; diff --git a/sys/dev/mmc/host/dwmmc.c b/sys/dev/mmc/host/dwmmc.c index 2080a973564f..70bd204069bd 100644 --- a/sys/dev/mmc/host/dwmmc.c +++ b/sys/dev/mmc/host/dwmmc.c @@ -1,1575 +1,1590 @@ /*- * Copyright (c) 2014-2019 Ruslan Bukin * All rights reserved. * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) * ("CTSRD"), as part of the DARPA CRASH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Synopsys DesignWare Mobile Storage Host Controller * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22) */ #include __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef EXT_RESOURCES #include #endif #include #include #include "opt_mmccam.h" #ifdef MMCCAM #include #include #include #include #include #include "mmc_sim_if.h" #endif #include "mmcbr_if.h" #ifdef DEBUG #define dprintf(fmt, args...) printf(fmt, ##args) #else #define dprintf(x, arg...) #endif #define READ4(_sc, _reg) \ bus_read_4((_sc)->res[0], _reg) #define WRITE4(_sc, _reg, _val) \ bus_write_4((_sc)->res[0], _reg, _val) #define DIV_ROUND_UP(n, d) howmany(n, d) #define DWMMC_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define DWMMC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define DWMMC_LOCK_INIT(_sc) \ mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \ "dwmmc", MTX_DEF) #define DWMMC_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); #define DWMMC_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); #define DWMMC_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); #define PENDING_CMD 0x01 #define PENDING_STOP 0x02 #define CARD_INIT_DONE 0x04 #define DWMMC_DATA_ERR_FLAGS (SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \ |SDMMC_INTMASK_SBE | SDMMC_INTMASK_EBE) #define DWMMC_CMD_ERR_FLAGS (SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \ |SDMMC_INTMASK_RE) #define DWMMC_ERR_FLAGS (DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \ |SDMMC_INTMASK_HLE) #define DES0_DIC (1 << 1) /* Disable Interrupt on Completion */ #define DES0_LD (1 << 2) /* Last Descriptor */ #define DES0_FS (1 << 3) /* First Descriptor */ #define DES0_CH (1 << 4) /* second address CHained */ #define DES0_ER (1 << 5) /* End of Ring */ #define DES0_CES (1 << 30) /* Card Error Summary */ #define DES0_OWN (1 << 31) /* OWN */ #define DES1_BS1_MASK 0x1fff struct idmac_desc { uint32_t des0; /* control */ uint32_t des1; /* bufsize */ uint32_t des2; /* buf1 phys addr */ uint32_t des3; /* buf2 phys addr or next descr */ }; #define IDMAC_DESC_SEGS (PAGE_SIZE / (sizeof(struct idmac_desc))) #define IDMAC_DESC_SIZE (sizeof(struct idmac_desc) * IDMAC_DESC_SEGS) #define DEF_MSIZE 0x2 /* Burst size of multiple transaction */ /* * Size field in DMA descriptor is 13 bits long (up to 4095 bytes), * but must be a multiple of the data bus size.Additionally, we must ensure * that bus_dmamap_load() doesn't additionally fragments buffer (because it * is processed with page size granularity). Thus limit fragment size to half * of page. * XXX switch descriptor format to array and use second buffer pointer for * second half of page */ #define IDMAC_MAX_SIZE 2048 /* * Busdma may bounce buffers, so we must reserve 2 descriptors * (on start and on end) for bounced fragments. */ #define DWMMC_MAX_DATA (IDMAC_MAX_SIZE * (IDMAC_DESC_SEGS - 2)) / MMC_SECTOR_SIZE static void dwmmc_next_operation(struct dwmmc_softc *); static int dwmmc_setup_bus(struct dwmmc_softc *, int); static int dma_done(struct dwmmc_softc *, struct mmc_command *); static int dma_stop(struct dwmmc_softc *); static void pio_read(struct dwmmc_softc *, struct mmc_command *); static void pio_write(struct dwmmc_softc *, struct mmc_command *); static void dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present); static struct resource_spec dwmmc_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; #define HWTYPE_MASK (0x0000ffff) #define HWFLAG_MASK (0xffff << 16) static void dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (nsegs != 1) panic("%s: nsegs != 1 (%d)\n", __func__, nsegs); if (error != 0) panic("%s: error != 0 (%d)\n", __func__, error); *(bus_addr_t *)arg = segs[0].ds_addr; } static void dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct dwmmc_softc *sc; int idx; sc = arg; dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len); if (error != 0) panic("%s: error != 0 (%d)\n", __func__, error); for (idx = 0; idx < nsegs; idx++) { sc->desc_ring[idx].des0 = DES0_DIC | DES0_CH; sc->desc_ring[idx].des1 = segs[idx].ds_len & DES1_BS1_MASK; sc->desc_ring[idx].des2 = segs[idx].ds_addr; if (idx == 0) sc->desc_ring[idx].des0 |= DES0_FS; if (idx == (nsegs - 1)) { sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH); sc->desc_ring[idx].des0 |= DES0_LD; } wmb(); sc->desc_ring[idx].des0 |= DES0_OWN; } } static int dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits) { int reg; int i; reg = READ4(sc, SDMMC_CTRL); reg |= (reset_bits); WRITE4(sc, SDMMC_CTRL, reg); /* Wait reset done */ for (i = 0; i < 100; i++) { if (!(READ4(sc, SDMMC_CTRL) & reset_bits)) return (0); DELAY(10); } device_printf(sc->dev, "Reset failed\n"); return (1); } static int dma_setup(struct dwmmc_softc *sc) { int error; int nidx; int idx; /* * Set up TX descriptor ring, descriptors, and dma maps. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* Parent tag. */ 4096, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ IDMAC_DESC_SIZE, 1, /* maxsize, nsegments */ IDMAC_DESC_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->desc_tag); if (error != 0) { device_printf(sc->dev, "could not create ring DMA tag.\n"); return (1); } error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring, BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->desc_map); if (error != 0) { device_printf(sc->dev, "could not allocate descriptor ring.\n"); return (1); } error = bus_dmamap_load(sc->desc_tag, sc->desc_map, sc->desc_ring, IDMAC_DESC_SIZE, dwmmc_get1paddr, &sc->desc_ring_paddr, 0); if (error != 0) { device_printf(sc->dev, "could not load descriptor ring map.\n"); return (1); } for (idx = 0; idx < IDMAC_DESC_SEGS; idx++) { sc->desc_ring[idx].des0 = DES0_CH; sc->desc_ring[idx].des1 = 0; nidx = (idx + 1) % IDMAC_DESC_SEGS; sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \ (nidx * sizeof(struct idmac_desc)); } sc->desc_ring[idx - 1].des3 = sc->desc_ring_paddr; sc->desc_ring[idx - 1].des0 |= DES0_ER; error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* Parent tag. */ 8, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ IDMAC_MAX_SIZE * IDMAC_DESC_SEGS, /* maxsize */ IDMAC_DESC_SEGS, /* nsegments */ IDMAC_MAX_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->buf_tag); if (error != 0) { device_printf(sc->dev, "could not create ring DMA tag.\n"); return (1); } error = bus_dmamap_create(sc->buf_tag, 0, &sc->buf_map); if (error != 0) { device_printf(sc->dev, "could not create TX buffer DMA map.\n"); return (1); } return (0); } static void dwmmc_cmd_done(struct dwmmc_softc *sc) { struct mmc_command *cmd; #ifdef MMCCAM union ccb *ccb; #endif #ifdef MMCCAM ccb = sc->ccb; if (ccb == NULL) return; cmd = &ccb->mmcio.cmd; #else cmd = sc->curcmd; #endif if (cmd == NULL) return; if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) { cmd->resp[3] = READ4(sc, SDMMC_RESP0); cmd->resp[2] = READ4(sc, SDMMC_RESP1); cmd->resp[1] = READ4(sc, SDMMC_RESP2); cmd->resp[0] = READ4(sc, SDMMC_RESP3); } else { cmd->resp[3] = 0; cmd->resp[2] = 0; cmd->resp[1] = 0; cmd->resp[0] = READ4(sc, SDMMC_RESP0); } } } static void dwmmc_tasklet(struct dwmmc_softc *sc) { struct mmc_command *cmd; cmd = sc->curcmd; if (cmd == NULL) return; if (!sc->cmd_done) return; if (cmd->error != MMC_ERR_NONE || !cmd->data) { dwmmc_next_operation(sc); } else if (cmd->data && sc->dto_rcvd) { if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || cmd->opcode == MMC_READ_MULTIPLE_BLOCK) && sc->use_auto_stop) { if (sc->acd_rcvd) dwmmc_next_operation(sc); } else { dwmmc_next_operation(sc); } } } static void dwmmc_intr(void *arg) { struct mmc_command *cmd; struct dwmmc_softc *sc; uint32_t reg; sc = arg; DWMMC_LOCK(sc); cmd = sc->curcmd; /* First handle SDMMC controller interrupts */ reg = READ4(sc, SDMMC_MINTSTS); if (reg) { dprintf("%s 0x%08x\n", __func__, reg); if (reg & DWMMC_CMD_ERR_FLAGS) { dprintf("cmd err 0x%08x cmd 0x%08x\n", reg, cmd->opcode); cmd->error = MMC_ERR_TIMEOUT; } if (reg & DWMMC_DATA_ERR_FLAGS) { dprintf("data err 0x%08x cmd 0x%08x\n", reg, cmd->opcode); cmd->error = MMC_ERR_FAILED; if (!sc->use_pio) { dma_done(sc, cmd); dma_stop(sc); } } if (reg & SDMMC_INTMASK_CMD_DONE) { dwmmc_cmd_done(sc); sc->cmd_done = 1; } if (reg & SDMMC_INTMASK_ACD) sc->acd_rcvd = 1; if (reg & SDMMC_INTMASK_DTO) sc->dto_rcvd = 1; if (reg & SDMMC_INTMASK_CD) { dwmmc_handle_card_present(sc, READ4(sc, SDMMC_CDETECT) == 0 ? true : false); } } /* Ack interrupts */ WRITE4(sc, SDMMC_RINTSTS, reg); if (sc->use_pio) { if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) { pio_read(sc, cmd); } if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) { pio_write(sc, cmd); } } else { /* Now handle DMA interrupts */ reg = READ4(sc, SDMMC_IDSTS); if (reg) { dprintf("dma intr 0x%08x\n", reg); if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) { WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)); WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI); dma_done(sc, cmd); } } } dwmmc_tasklet(sc); DWMMC_UNLOCK(sc); } static void dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present) { bool was_present; + if (dumping || SCHEDULER_STOPPED()) + return; + was_present = sc->child != NULL; if (!was_present && is_present) { taskqueue_enqueue_timeout(taskqueue_swi_giant, &sc->card_delayed_task, -(hz / 2)); } else if (was_present && !is_present) { taskqueue_enqueue(taskqueue_swi_giant, &sc->card_task); } } static void dwmmc_card_task(void *arg, int pending __unused) { struct dwmmc_softc *sc = arg; #ifdef MMCCAM mmc_cam_sim_discover(&sc->mmc_sim); #else DWMMC_LOCK(sc); if (READ4(sc, SDMMC_CDETECT) == 0 || (sc->mmc_helper.props & MMC_PROP_BROKEN_CD)) { if (sc->child == NULL) { if (bootverbose) device_printf(sc->dev, "Card inserted\n"); sc->child = device_add_child(sc->dev, "mmc", -1); DWMMC_UNLOCK(sc); if (sc->child) { device_set_ivars(sc->child, sc); (void)device_probe_and_attach(sc->child); } } else DWMMC_UNLOCK(sc); } else { /* Card isn't present, detach if necessary */ if (sc->child != NULL) { if (bootverbose) device_printf(sc->dev, "Card removed\n"); DWMMC_UNLOCK(sc); device_delete_child(sc->dev, sc->child); sc->child = NULL; } else DWMMC_UNLOCK(sc); } #endif /* MMCCAM */ } static int parse_fdt(struct dwmmc_softc *sc) { pcell_t dts_value[3]; phandle_t node; uint32_t bus_hz = 0; int len; #ifdef EXT_RESOURCES int error; #endif if ((node = ofw_bus_get_node(sc->dev)) == -1) return (ENXIO); /* Set some defaults for freq and supported mode */ sc->host.f_min = 400000; sc->host.f_max = 200000000; sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; sc->host.caps = MMC_CAP_HSPEED | MMC_CAP_SIGNALING_330; mmc_fdt_parse(sc->dev, node, &sc->mmc_helper, &sc->host); /* fifo-depth */ if ((len = OF_getproplen(node, "fifo-depth")) > 0) { OF_getencprop(node, "fifo-depth", dts_value, len); sc->fifo_depth = dts_value[0]; } /* num-slots (Deprecated) */ sc->num_slots = 1; if ((len = OF_getproplen(node, "num-slots")) > 0) { device_printf(sc->dev, "num-slots property is deprecated\n"); OF_getencprop(node, "num-slots", dts_value, len); sc->num_slots = dts_value[0]; } /* clock-frequency */ if ((len = OF_getproplen(node, "clock-frequency")) > 0) { OF_getencprop(node, "clock-frequency", dts_value, len); bus_hz = dts_value[0]; } #ifdef EXT_RESOURCES /* IP block reset is optional */ error = hwreset_get_by_ofw_name(sc->dev, 0, "reset", &sc->hwreset); if (error != 0 && error != ENOENT && error != ENODEV) { device_printf(sc->dev, "Cannot get reset\n"); goto fail; } /* vmmc regulator is optional */ error = regulator_get_by_ofw_property(sc->dev, 0, "vmmc-supply", &sc->vmmc); if (error != 0 && error != ENOENT && error != ENODEV) { device_printf(sc->dev, "Cannot get regulator 'vmmc-supply'\n"); goto fail; } /* vqmmc regulator is optional */ error = regulator_get_by_ofw_property(sc->dev, 0, "vqmmc-supply", &sc->vqmmc); if (error != 0 && error != ENOENT && error != ENODEV) { device_printf(sc->dev, "Cannot get regulator 'vqmmc-supply'\n"); goto fail; } /* Assert reset first */ if (sc->hwreset != NULL) { error = hwreset_assert(sc->hwreset); if (error != 0) { device_printf(sc->dev, "Cannot assert reset\n"); goto fail; } } /* BIU (Bus Interface Unit clock) is optional */ error = clk_get_by_ofw_name(sc->dev, 0, "biu", &sc->biu); if (error != 0 && error != ENOENT && error != ENODEV) { device_printf(sc->dev, "Cannot get 'biu' clock\n"); goto fail; } if (sc->biu) { error = clk_enable(sc->biu); if (error != 0) { device_printf(sc->dev, "cannot enable biu clock\n"); goto fail; } } /* * CIU (Controller Interface Unit clock) is mandatory * if no clock-frequency property is given */ error = clk_get_by_ofw_name(sc->dev, 0, "ciu", &sc->ciu); if (error != 0 && error != ENOENT && error != ENODEV) { device_printf(sc->dev, "Cannot get 'ciu' clock\n"); goto fail; } if (sc->ciu) { if (bus_hz != 0) { error = clk_set_freq(sc->ciu, bus_hz, 0); if (error != 0) device_printf(sc->dev, "cannot set ciu clock to %u\n", bus_hz); } error = clk_enable(sc->ciu); if (error != 0) { device_printf(sc->dev, "cannot enable ciu clock\n"); goto fail; } clk_get_freq(sc->ciu, &sc->bus_hz); } /* Enable regulators */ if (sc->vmmc != NULL) { error = regulator_enable(sc->vmmc); if (error != 0) { device_printf(sc->dev, "Cannot enable vmmc regulator\n"); goto fail; } } if (sc->vqmmc != NULL) { error = regulator_enable(sc->vqmmc); if (error != 0) { device_printf(sc->dev, "Cannot enable vqmmc regulator\n"); goto fail; } } /* Take dwmmc out of reset */ if (sc->hwreset != NULL) { error = hwreset_deassert(sc->hwreset); if (error != 0) { device_printf(sc->dev, "Cannot deassert reset\n"); goto fail; } } #endif /* EXT_RESOURCES */ if (sc->bus_hz == 0) { device_printf(sc->dev, "No bus speed provided\n"); goto fail; } return (0); fail: return (ENXIO); } int dwmmc_attach(device_t dev) { struct dwmmc_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; /* Why not to use Auto Stop? It save a hundred of irq per second */ sc->use_auto_stop = 1; error = parse_fdt(sc); if (error != 0) { device_printf(dev, "Can't get FDT property.\n"); return (ENXIO); } DWMMC_LOCK_INIT(sc); if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } /* Setup interrupt handler. */ error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE, NULL, dwmmc_intr, sc, &sc->intr_cookie); if (error != 0) { device_printf(dev, "could not setup interrupt handler.\n"); return (ENXIO); } device_printf(dev, "Hardware version ID is %04x\n", READ4(sc, SDMMC_VERID) & 0xffff); /* Reset all */ if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | SDMMC_CTRL_DMA_RESET))) return (ENXIO); dwmmc_setup_bus(sc, sc->host.f_min); if (sc->fifo_depth == 0) { sc->fifo_depth = 1 + ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff); device_printf(dev, "No fifo-depth, using FIFOTH %x\n", sc->fifo_depth); } if (!sc->use_pio) { dma_stop(sc); if (dma_setup(sc)) return (ENXIO); /* Install desc base */ WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr); /* Enable DMA interrupts */ WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK); WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI | SDMMC_IDINTEN_RI | SDMMC_IDINTEN_TI)); } /* Clear and disable interrups for a while */ WRITE4(sc, SDMMC_RINTSTS, 0xffffffff); WRITE4(sc, SDMMC_INTMASK, 0); /* Maximum timeout */ WRITE4(sc, SDMMC_TMOUT, 0xffffffff); /* Enable interrupts */ WRITE4(sc, SDMMC_RINTSTS, 0xffffffff); WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE | SDMMC_INTMASK_DTO | SDMMC_INTMASK_ACD | SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR | DWMMC_ERR_FLAGS | SDMMC_INTMASK_CD)); WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE); TASK_INIT(&sc->card_task, 0, dwmmc_card_task, sc); TIMEOUT_TASK_INIT(taskqueue_swi_giant, &sc->card_delayed_task, 0, dwmmc_card_task, sc); #ifdef MMCCAM sc->ccb = NULL; if (mmc_cam_sim_alloc(dev, "dw_mmc", &sc->mmc_sim) != 0) { device_printf(dev, "cannot alloc cam sim\n"); dwmmc_detach(dev); return (ENXIO); } #endif /* * Schedule a card detection as we won't get an interrupt * if the card is inserted when we attach */ dwmmc_card_task(sc, 0); return (0); } int dwmmc_detach(device_t dev) { struct dwmmc_softc *sc; int ret; sc = device_get_softc(dev); ret = device_delete_children(dev); if (ret != 0) return (ret); taskqueue_drain(taskqueue_swi_giant, &sc->card_task); taskqueue_drain_timeout(taskqueue_swi_giant, &sc->card_delayed_task); if (sc->intr_cookie != NULL) { ret = bus_teardown_intr(dev, sc->res[1], sc->intr_cookie); if (ret != 0) return (ret); } bus_release_resources(dev, dwmmc_spec, sc->res); DWMMC_LOCK_DESTROY(sc); #ifdef EXT_RESOURCES if (sc->hwreset != NULL && hwreset_deassert(sc->hwreset) != 0) device_printf(sc->dev, "cannot deassert reset\n"); if (sc->biu != NULL && clk_disable(sc->biu) != 0) device_printf(sc->dev, "cannot disable biu clock\n"); if (sc->ciu != NULL && clk_disable(sc->ciu) != 0) device_printf(sc->dev, "cannot disable ciu clock\n"); if (sc->vmmc && regulator_disable(sc->vmmc) != 0) device_printf(sc->dev, "Cannot disable vmmc regulator\n"); if (sc->vqmmc && regulator_disable(sc->vqmmc) != 0) device_printf(sc->dev, "Cannot disable vqmmc regulator\n"); #endif #ifdef MMCCAM mmc_cam_sim_free(&sc->mmc_sim); #endif return (0); } static int dwmmc_setup_bus(struct dwmmc_softc *sc, int freq) { int tout; int div; if (freq == 0) { WRITE4(sc, SDMMC_CLKENA, 0); WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA | SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START)); tout = 1000; do { if (tout-- < 0) { device_printf(sc->dev, "Failed update clk\n"); return (1); } } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); return (0); } WRITE4(sc, SDMMC_CLKENA, 0); WRITE4(sc, SDMMC_CLKSRC, 0); div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0; WRITE4(sc, SDMMC_CLKDIV, div); WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA | SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START)); tout = 1000; do { if (tout-- < 0) { device_printf(sc->dev, "Failed to update clk\n"); return (1); } } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP)); WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA | SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START); tout = 1000; do { if (tout-- < 0) { device_printf(sc->dev, "Failed to enable clk\n"); return (1); } } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); return (0); } static int dwmmc_update_ios(device_t brdev, device_t reqdev) { struct dwmmc_softc *sc; struct mmc_ios *ios; uint32_t reg; int ret = 0; sc = device_get_softc(brdev); ios = &sc->host.ios; dprintf("Setting up clk %u bus_width %d, timming: %d\n", ios->clock, ios->bus_width, ios->timing); switch (ios->power_mode) { case power_on: break; case power_off: WRITE4(sc, SDMMC_PWREN, 0); break; case power_up: WRITE4(sc, SDMMC_PWREN, 1); break; } mmc_fdt_set_power(&sc->mmc_helper, ios->power_mode); if (ios->bus_width == bus_width_8) WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT); else if (ios->bus_width == bus_width_4) WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT); else WRITE4(sc, SDMMC_CTYPE, 0); if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) { /* XXX: take care about DDR or SDR use here */ WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing); } /* Set DDR mode */ reg = READ4(sc, SDMMC_UHS_REG); if (ios->timing == bus_timing_uhs_ddr50 || ios->timing == bus_timing_mmc_ddr52 || ios->timing == bus_timing_mmc_hs400) reg |= (SDMMC_UHS_REG_DDR); else reg &= ~(SDMMC_UHS_REG_DDR); WRITE4(sc, SDMMC_UHS_REG, reg); if (sc->update_ios) ret = sc->update_ios(sc, ios); dwmmc_setup_bus(sc, ios->clock); return (ret); } static int dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; data = cmd->data; if (data->flags & MMC_DATA_WRITE) bus_dmamap_sync(sc->buf_tag, sc->buf_map, BUS_DMASYNC_POSTWRITE); else bus_dmamap_sync(sc->buf_tag, sc->buf_map, BUS_DMASYNC_POSTREAD); bus_dmamap_sync(sc->desc_tag, sc->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->buf_tag, sc->buf_map); return (0); } static int dma_stop(struct dwmmc_softc *sc) { int reg; reg = READ4(sc, SDMMC_CTRL); reg &= ~(SDMMC_CTRL_USE_IDMAC); reg |= (SDMMC_CTRL_DMA_RESET); WRITE4(sc, SDMMC_CTRL, reg); reg = READ4(sc, SDMMC_BMOD); reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB); reg |= (SDMMC_BMOD_SWR); WRITE4(sc, SDMMC_BMOD, reg); return (0); } static int dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; int err; int reg; data = cmd->data; reg = READ4(sc, SDMMC_INTMASK); reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR); WRITE4(sc, SDMMC_INTMASK, reg); dprintf("%s: bus_dmamap_load size: %zu\n", __func__, data->len); err = bus_dmamap_load(sc->buf_tag, sc->buf_map, data->data, data->len, dwmmc_ring_setup, sc, BUS_DMA_NOWAIT); if (err != 0) panic("dmamap_load failed\n"); /* Ensure the device can see the desc */ bus_dmamap_sync(sc->desc_tag, sc->desc_map, BUS_DMASYNC_PREWRITE); if (data->flags & MMC_DATA_WRITE) bus_dmamap_sync(sc->buf_tag, sc->buf_map, BUS_DMASYNC_PREWRITE); else bus_dmamap_sync(sc->buf_tag, sc->buf_map, BUS_DMASYNC_PREREAD); reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S); reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S; reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S; WRITE4(sc, SDMMC_FIFOTH, reg); wmb(); reg = READ4(sc, SDMMC_CTRL); reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE); WRITE4(sc, SDMMC_CTRL, reg); wmb(); reg = READ4(sc, SDMMC_BMOD); reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB); WRITE4(sc, SDMMC_BMOD, reg); /* Start */ WRITE4(sc, SDMMC_PLDMND, 1); return (0); } static int pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; int reg; data = cmd->data; data->xfer_len = 0; reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S); reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S; reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S; WRITE4(sc, SDMMC_FIFOTH, reg); wmb(); return (0); } static void pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; uint32_t *p, status; if (cmd == NULL || cmd->data == NULL) return; data = cmd->data; if ((data->flags & MMC_DATA_READ) == 0) return; KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned")); p = (uint32_t *)data->data + (data->xfer_len >> 2); while (data->xfer_len < data->len) { status = READ4(sc, SDMMC_STATUS); if (status & SDMMC_STATUS_FIFO_EMPTY) break; *p++ = READ4(sc, SDMMC_DATA); data->xfer_len += 4; } WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR); } static void pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; uint32_t *p, status; if (cmd == NULL || cmd->data == NULL) return; data = cmd->data; if ((data->flags & MMC_DATA_WRITE) == 0) return; KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned")); p = (uint32_t *)data->data + (data->xfer_len >> 2); while (data->xfer_len < data->len) { status = READ4(sc, SDMMC_STATUS); if (status & SDMMC_STATUS_FIFO_FULL) break; WRITE4(sc, SDMMC_DATA, *p++); data->xfer_len += 4; } WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR); } static void dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; uint32_t blksz; uint32_t cmdr; dprintf("%s\n", __func__); sc->curcmd = cmd; data = cmd->data; #ifndef MMCCAM /* XXX Upper layers don't always set this */ cmd->mrq = sc->req; #endif /* Begin setting up command register. */ cmdr = cmd->opcode; dprintf("cmd->opcode 0x%08x\n", cmd->opcode); if (cmd->opcode == MMC_STOP_TRANSMISSION || cmd->opcode == MMC_GO_IDLE_STATE || cmd->opcode == MMC_GO_INACTIVE_STATE) cmdr |= SDMMC_CMD_STOP_ABORT; else if (cmd->opcode != MMC_SEND_STATUS && data) cmdr |= SDMMC_CMD_WAIT_PRVDATA; /* Set up response handling. */ if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) { cmdr |= SDMMC_CMD_RESP_EXP; if (cmd->flags & MMC_RSP_136) cmdr |= SDMMC_CMD_RESP_LONG; } if (cmd->flags & MMC_RSP_CRC) cmdr |= SDMMC_CMD_RESP_CRC; /* * XXX: Not all platforms want this. */ cmdr |= SDMMC_CMD_USE_HOLD_REG; if ((sc->flags & CARD_INIT_DONE) == 0) { sc->flags |= (CARD_INIT_DONE); cmdr |= SDMMC_CMD_SEND_INIT; } if (data) { if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || cmd->opcode == MMC_READ_MULTIPLE_BLOCK) && sc->use_auto_stop) cmdr |= SDMMC_CMD_SEND_ASTOP; cmdr |= SDMMC_CMD_DATA_EXP; if (data->flags & MMC_DATA_STREAM) cmdr |= SDMMC_CMD_MODE_STREAM; if (data->flags & MMC_DATA_WRITE) cmdr |= SDMMC_CMD_DATA_WRITE; WRITE4(sc, SDMMC_TMOUT, 0xffffffff); #ifdef MMCCAM if (cmd->data->flags & MMC_DATA_BLOCK_SIZE) { WRITE4(sc, SDMMC_BLKSIZ, cmd->data->block_size); WRITE4(sc, SDMMC_BYTCNT, cmd->data->len); } else #endif { WRITE4(sc, SDMMC_BYTCNT, data->len); blksz = (data->len < MMC_SECTOR_SIZE) ? \ data->len : MMC_SECTOR_SIZE; WRITE4(sc, SDMMC_BLKSIZ, blksz); } if (sc->use_pio) { pio_prepare(sc, cmd); } else { dma_prepare(sc, cmd); } wmb(); } dprintf("cmdr 0x%08x\n", cmdr); WRITE4(sc, SDMMC_CMDARG, cmd->arg); wmb(); WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START); }; static void dwmmc_next_operation(struct dwmmc_softc *sc) { struct mmc_command *cmd; dprintf("%s\n", __func__); #ifdef MMCCAM union ccb *ccb; ccb = sc->ccb; if (ccb == NULL) return; cmd = &ccb->mmcio.cmd; #else struct mmc_request *req; req = sc->req; if (req == NULL) return; cmd = req->cmd; #endif sc->acd_rcvd = 0; sc->dto_rcvd = 0; sc->cmd_done = 0; /* * XXX: Wait until card is still busy. * We do need this to prevent data timeouts, * mostly caused by multi-block write command * followed by single-read. */ while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY)) continue; if (sc->flags & PENDING_CMD) { sc->flags &= ~PENDING_CMD; dwmmc_start_cmd(sc, cmd); return; } else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) { sc->flags &= ~PENDING_STOP; /// XXX: What to do with this? //dwmmc_start_cmd(sc, req->stop); return; } #ifdef MMCCAM sc->ccb = NULL; sc->curcmd = NULL; ccb->ccb_h.status = (ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR); xpt_done(ccb); #else sc->req = NULL; sc->curcmd = NULL; req->done(req); #endif } static int dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req) { struct dwmmc_softc *sc; sc = device_get_softc(brdev); dprintf("%s\n", __func__); DWMMC_LOCK(sc); #ifdef MMCCAM sc->flags |= PENDING_CMD; #else if (sc->req != NULL) { DWMMC_UNLOCK(sc); return (EBUSY); } sc->req = req; sc->flags |= PENDING_CMD; if (sc->req->stop) sc->flags |= PENDING_STOP; #endif dwmmc_next_operation(sc); DWMMC_UNLOCK(sc); return (0); } #ifndef MMCCAM static int dwmmc_get_ro(device_t brdev, device_t reqdev) { dprintf("%s\n", __func__); return (0); } static int dwmmc_acquire_host(device_t brdev, device_t reqdev) { struct dwmmc_softc *sc; sc = device_get_softc(brdev); DWMMC_LOCK(sc); while (sc->bus_busy) msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5); sc->bus_busy++; DWMMC_UNLOCK(sc); return (0); } static int dwmmc_release_host(device_t brdev, device_t reqdev) { struct dwmmc_softc *sc; sc = device_get_softc(brdev); DWMMC_LOCK(sc); sc->bus_busy--; wakeup(sc); DWMMC_UNLOCK(sc); return (0); } #endif /* !MMCCAM */ static int dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct dwmmc_softc *sc; sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: *(int *)result = sc->host.ios.bus_mode; break; case MMCBR_IVAR_BUS_WIDTH: *(int *)result = sc->host.ios.bus_width; break; case MMCBR_IVAR_CHIP_SELECT: *(int *)result = sc->host.ios.chip_select; break; case MMCBR_IVAR_CLOCK: *(int *)result = sc->host.ios.clock; break; case MMCBR_IVAR_F_MIN: *(int *)result = sc->host.f_min; break; case MMCBR_IVAR_F_MAX: *(int *)result = sc->host.f_max; break; case MMCBR_IVAR_HOST_OCR: *(int *)result = sc->host.host_ocr; break; case MMCBR_IVAR_MODE: *(int *)result = sc->host.mode; break; case MMCBR_IVAR_OCR: *(int *)result = sc->host.ocr; break; case MMCBR_IVAR_POWER_MODE: *(int *)result = sc->host.ios.power_mode; break; case MMCBR_IVAR_VDD: *(int *)result = sc->host.ios.vdd; break; case MMCBR_IVAR_VCCQ: *(int *)result = sc->host.ios.vccq; break; case MMCBR_IVAR_CAPS: *(int *)result = sc->host.caps; break; case MMCBR_IVAR_MAX_DATA: *(int *)result = DWMMC_MAX_DATA; break; case MMCBR_IVAR_TIMING: *(int *)result = sc->host.ios.timing; break; } return (0); } static int dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct dwmmc_softc *sc; sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: sc->host.ios.bus_mode = value; break; case MMCBR_IVAR_BUS_WIDTH: sc->host.ios.bus_width = value; break; case MMCBR_IVAR_CHIP_SELECT: sc->host.ios.chip_select = value; break; case MMCBR_IVAR_CLOCK: sc->host.ios.clock = value; break; case MMCBR_IVAR_MODE: sc->host.mode = value; break; case MMCBR_IVAR_OCR: sc->host.ocr = value; break; case MMCBR_IVAR_POWER_MODE: sc->host.ios.power_mode = value; break; case MMCBR_IVAR_VDD: sc->host.ios.vdd = value; break; case MMCBR_IVAR_TIMING: sc->host.ios.timing = value; break; case MMCBR_IVAR_VCCQ: sc->host.ios.vccq = value; break; /* These are read-only */ case MMCBR_IVAR_CAPS: case MMCBR_IVAR_HOST_OCR: case MMCBR_IVAR_F_MIN: case MMCBR_IVAR_F_MAX: case MMCBR_IVAR_MAX_DATA: return (EINVAL); } return (0); } #ifdef MMCCAM /* Note: this function likely belongs to the specific driver impl */ static int dwmmc_switch_vccq(device_t dev, device_t child) { device_printf(dev, "This is a default impl of switch_vccq() that always fails\n"); return EINVAL; } static int dwmmc_get_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts) { struct dwmmc_softc *sc; sc = device_get_softc(dev); cts->host_ocr = sc->host.host_ocr; cts->host_f_min = sc->host.f_min; cts->host_f_max = sc->host.f_max; cts->host_caps = sc->host.caps; cts->host_max_data = DWMMC_MAX_DATA; memcpy(&cts->ios, &sc->host.ios, sizeof(struct mmc_ios)); return (0); } static int dwmmc_set_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts) { struct dwmmc_softc *sc; struct mmc_ios *ios; struct mmc_ios *new_ios; int res; sc = device_get_softc(dev); ios = &sc->host.ios; new_ios = &cts->ios; /* Update only requested fields */ if (cts->ios_valid & MMC_CLK) { ios->clock = new_ios->clock; if (bootverbose) device_printf(sc->dev, "Clock => %d\n", ios->clock); } if (cts->ios_valid & MMC_VDD) { ios->vdd = new_ios->vdd; if (bootverbose) device_printf(sc->dev, "VDD => %d\n", ios->vdd); } if (cts->ios_valid & MMC_CS) { ios->chip_select = new_ios->chip_select; if (bootverbose) device_printf(sc->dev, "CS => %d\n", ios->chip_select); } if (cts->ios_valid & MMC_BW) { ios->bus_width = new_ios->bus_width; if (bootverbose) device_printf(sc->dev, "Bus width => %d\n", ios->bus_width); } if (cts->ios_valid & MMC_PM) { ios->power_mode = new_ios->power_mode; if (bootverbose) device_printf(sc->dev, "Power mode => %d\n", ios->power_mode); } if (cts->ios_valid & MMC_BT) { ios->timing = new_ios->timing; if (bootverbose) device_printf(sc->dev, "Timing => %d\n", ios->timing); } if (cts->ios_valid & MMC_BM) { ios->bus_mode = new_ios->bus_mode; if (bootverbose) device_printf(sc->dev, "Bus mode => %d\n", ios->bus_mode); } if (cts->ios_valid & MMC_VCCQ) { ios->vccq = new_ios->vccq; if (bootverbose) device_printf(sc->dev, "VCCQ => %d\n", ios->vccq); res = dwmmc_switch_vccq(sc->dev, NULL); device_printf(sc->dev, "VCCQ switch result: %d\n", res); } return (dwmmc_update_ios(sc->dev, NULL)); } static int dwmmc_cam_request(device_t dev, union ccb *ccb) { struct dwmmc_softc *sc; struct ccb_mmcio *mmcio; sc = device_get_softc(dev); mmcio = &ccb->mmcio; DWMMC_LOCK(sc); #ifdef DEBUG if (__predict_false(bootverbose)) { device_printf(sc->dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n", mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags, mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0, mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0); } #endif if (mmcio->cmd.data != NULL) { if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0) panic("data->len = %d, data->flags = %d -- something is b0rked", (int)mmcio->cmd.data->len, mmcio->cmd.data->flags); } if (sc->ccb != NULL) { device_printf(sc->dev, "Controller still has an active command\n"); return (EBUSY); } sc->ccb = ccb; DWMMC_UNLOCK(sc); dwmmc_request(sc->dev, NULL, NULL); return (0); } + +static void +dwmmc_cam_poll(device_t dev) +{ + struct dwmmc_softc *sc; + + sc = device_get_softc(dev); + dwmmc_intr(sc); +} #endif /* MMCCAM */ static device_method_t dwmmc_methods[] = { /* Bus interface */ DEVMETHOD(bus_read_ivar, dwmmc_read_ivar), DEVMETHOD(bus_write_ivar, dwmmc_write_ivar), #ifndef MMCCAM /* mmcbr_if */ DEVMETHOD(mmcbr_update_ios, dwmmc_update_ios), DEVMETHOD(mmcbr_request, dwmmc_request), DEVMETHOD(mmcbr_get_ro, dwmmc_get_ro), DEVMETHOD(mmcbr_acquire_host, dwmmc_acquire_host), DEVMETHOD(mmcbr_release_host, dwmmc_release_host), #endif #ifdef MMCCAM /* MMCCAM interface */ DEVMETHOD(mmc_sim_get_tran_settings, dwmmc_get_tran_settings), DEVMETHOD(mmc_sim_set_tran_settings, dwmmc_set_tran_settings), DEVMETHOD(mmc_sim_cam_request, dwmmc_cam_request), + DEVMETHOD(mmc_sim_cam_poll, dwmmc_cam_poll), DEVMETHOD(bus_add_child, bus_generic_add_child), #endif DEVMETHOD_END }; DEFINE_CLASS_0(dwmmc, dwmmc_driver, dwmmc_methods, sizeof(struct dwmmc_softc));