diff --git a/sys/cam/ctl/scsi_ctl.c b/sys/cam/ctl/scsi_ctl.c index d3023f9a6c8c..8c141a88e155 100644 --- a/sys/cam/ctl/scsi_ctl.c +++ b/sys/cam/ctl/scsi_ctl.c @@ -1,1994 +1,1997 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008, 2009 Silicon Graphics International Corp. * Copyright (c) 2014-2015 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $ */ /* * Peripheral driver interface between CAM and CTL (CAM Target Layer). * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct ctlfe_softc { struct ctl_port port; path_id_t path_id; target_id_t target_id; uint32_t hba_misc; u_int maxio; struct cam_sim *sim; char port_name[DEV_IDLEN]; struct mtx lun_softc_mtx; STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list; STAILQ_ENTRY(ctlfe_softc) links; }; STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list; struct mtx ctlfe_list_mtx; static char ctlfe_mtx_desc[] = "ctlfelist"; typedef enum { CTLFE_LUN_NONE = 0x00, CTLFE_LUN_WILDCARD = 0x01 } ctlfe_lun_flags; struct ctlfe_lun_softc { struct ctlfe_softc *parent_softc; struct cam_periph *periph; ctlfe_lun_flags flags; int ctios_sent; /* Number of active CTIOs */ int refcount; /* Number of active xpt_action() */ int atios_alloced; /* Number of ATIOs not freed */ int inots_alloced; /* Number of INOTs not freed */ struct task refdrain_task; STAILQ_HEAD(, ccb_hdr) work_queue; LIST_HEAD(, ccb_hdr) atio_list; /* List of ATIOs queued to SIM. */ LIST_HEAD(, ccb_hdr) inot_list; /* List of INOTs queued to SIM. */ STAILQ_ENTRY(ctlfe_lun_softc) links; }; typedef enum { CTLFE_CMD_NONE = 0x00, CTLFE_CMD_PIECEWISE = 0x01 } ctlfe_cmd_flags; struct ctlfe_cmd_info { int cur_transfer_index; size_t cur_transfer_off; ctlfe_cmd_flags flags; /* * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16 * bytes on amd64. So with 32 elements, this is 256 bytes on * i386 and 512 bytes on amd64. */ #define CTLFE_MAX_SEGS 32 bus_dma_segment_t cam_sglist[CTLFE_MAX_SEGS]; }; /* * When we register the adapter/bus, request that this many ctl_ios be * allocated. This should be the maximum supported by the adapter, but we * currently don't have a way to get that back from the path inquiry. * XXX KDM add that to the path inquiry. */ #define CTLFE_REQ_CTL_IO 4096 /* * Number of Accept Target I/O CCBs to allocate and queue down to the * adapter per LUN. * XXX KDM should this be controlled by CTL? */ #define CTLFE_ATIO_PER_LUN 1024 /* * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to * allocate and queue down to the adapter per LUN. * XXX KDM should this be controlled by CTL? */ #define CTLFE_IN_PER_LUN 1024 /* * Timeout (in seconds) on CTIO CCB doing DMA or sending status */ #define CTLFE_TIMEOUT 5 /* * Turn this on to enable extra debugging prints. */ #if 0 #define CTLFE_DEBUG #endif MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface"); #define io_ptr ppriv_ptr0 /* This is only used in the CTIO */ #define ccb_atio ppriv_ptr1 #define PRIV_CCB(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[0]) #define PRIV_INFO(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[1]) static int ctlfeinitialize(void); static int ctlfeshutdown(void); static periph_init_t ctlfeperiphinit; static periph_deinit_t ctlfeperiphdeinit; static void ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg); static periph_ctor_t ctlferegister; static periph_oninv_t ctlfeoninvalidate; static periph_dtor_t ctlfecleanup; static periph_start_t ctlfestart; static void ctlfedone(struct cam_periph *periph, union ccb *done_ccb); static void ctlfe_onoffline(void *arg, int online); static void ctlfe_online(void *arg); static void ctlfe_offline(void *arg); static int ctlfe_lun_enable(void *arg, int lun_id); static int ctlfe_lun_disable(void *arg, int lun_id); static void ctlfe_dump_sim(struct cam_sim *sim); static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc); static void ctlfe_datamove(union ctl_io *io); static void ctlfe_done(union ctl_io *io); static void ctlfe_dump(void); static void ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb); static void ctlfe_requeue_ccb(struct cam_periph *periph, union ccb *ccb, int unlock); static struct periph_driver ctlfe_driver = { ctlfeperiphinit, "ctl", TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0, CAM_PERIPH_DRV_EARLY, ctlfeperiphdeinit }; static struct ctl_frontend ctlfe_frontend = { .name = "camtgt", .init = ctlfeinitialize, .fe_dump = ctlfe_dump, .shutdown = ctlfeshutdown, }; CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend); static int ctlfeinitialize(void) { STAILQ_INIT(&ctlfe_softc_list); mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF); periphdriver_register(&ctlfe_driver); return (0); } static int ctlfeshutdown(void) { int error; error = periphdriver_unregister(&ctlfe_driver); if (error != 0) return (error); mtx_destroy(&ctlfe_list_mtx); return (0); } static void ctlfeperiphinit(void) { cam_status status; status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED | AC_CONTRACT, ctlfeasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("ctl: Failed to attach async callback due to CAM " "status 0x%x!\n", status); } } static int ctlfeperiphdeinit(void) { /* XXX: It would be good to tear down active ports here. */ if (!TAILQ_EMPTY(&ctlfe_driver.units)) return (EBUSY); xpt_register_async(0, ctlfeasync, NULL, NULL); return (0); } static void ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { struct ctlfe_softc *softc; #ifdef CTLFEDEBUG printf("%s: entered\n", __func__); #endif mtx_lock(&ctlfe_list_mtx); STAILQ_FOREACH(softc, &ctlfe_softc_list, links) { if (softc->path_id == xpt_path_path_id(path)) break; } mtx_unlock(&ctlfe_list_mtx); /* * When a new path gets registered, and it is capable of target * mode, go ahead and attach. Later on, we may need to be more * selective, but for now this will be sufficient. */ switch (code) { case AC_PATH_REGISTERED: { struct ctl_port *port; struct ccb_pathinq *cpi; int retval; cpi = (struct ccb_pathinq *)arg; /* Don't attach if it doesn't support target mode */ if ((cpi->target_sprt & PIT_PROCESSOR) == 0) { #ifdef CTLFEDEBUG printf("%s: SIM %s%d doesn't support target mode\n", __func__, cpi->dev_name, cpi->unit_number); #endif break; } if (softc != NULL) { #ifdef CTLFEDEBUG printf("%s: CTL port for CAM path %u already exists\n", __func__, xpt_path_path_id(path)); #endif break; } /* * We're in an interrupt context here, so we have to * use M_NOWAIT. Of course this means trouble if we * can't allocate memory. */ softc = malloc(sizeof(*softc), M_CTLFE, M_NOWAIT | M_ZERO); if (softc == NULL) { printf("%s: unable to malloc %zd bytes for softc\n", __func__, sizeof(*softc)); return; } softc->path_id = cpi->ccb_h.path_id; softc->target_id = cpi->initiator_id; softc->sim = xpt_path_sim(path); softc->hba_misc = cpi->hba_misc; if (cpi->maxio != 0) softc->maxio = cpi->maxio; else softc->maxio = DFLTPHYS; mtx_init(&softc->lun_softc_mtx, "LUN softc mtx", NULL, MTX_DEF); STAILQ_INIT(&softc->lun_softc_list); port = &softc->port; port->frontend = &ctlfe_frontend; /* * XXX KDM should we be more accurate here ? */ if (cpi->transport == XPORT_FC) port->port_type = CTL_PORT_FC; else if (cpi->transport == XPORT_SAS) port->port_type = CTL_PORT_SAS; else port->port_type = CTL_PORT_SCSI; /* XXX KDM what should the real number be here? */ port->num_requested_ctl_io = CTLFE_REQ_CTL_IO; snprintf(softc->port_name, sizeof(softc->port_name), "%s%d", cpi->dev_name, cpi->unit_number); /* * XXX KDM it would be nice to allocate storage in the * frontend structure itself. */ port->port_name = softc->port_name; port->physical_port = cpi->bus_id; port->virtual_port = 0; port->port_online = ctlfe_online; port->port_offline = ctlfe_offline; port->onoff_arg = softc; port->lun_enable = ctlfe_lun_enable; port->lun_disable = ctlfe_lun_disable; port->targ_lun_arg = softc; port->fe_datamove = ctlfe_datamove; port->fe_done = ctlfe_done; port->targ_port = -1; retval = ctl_port_register(port); if (retval != 0) { printf("%s: ctl_port_register() failed with " "error %d!\n", __func__, retval); mtx_destroy(&softc->lun_softc_mtx); free(softc, M_CTLFE); break; } else { mtx_lock(&ctlfe_list_mtx); STAILQ_INSERT_TAIL(&ctlfe_softc_list, softc, links); mtx_unlock(&ctlfe_list_mtx); } break; } case AC_PATH_DEREGISTERED: { if (softc != NULL) { /* * XXX KDM are we certain at this point that there * are no outstanding commands for this frontend? */ mtx_lock(&ctlfe_list_mtx); STAILQ_REMOVE(&ctlfe_softc_list, softc, ctlfe_softc, links); mtx_unlock(&ctlfe_list_mtx); ctl_port_deregister(&softc->port); mtx_destroy(&softc->lun_softc_mtx); free(softc, M_CTLFE); } break; } case AC_CONTRACT: { struct ac_contract *ac; ac = (struct ac_contract *)arg; switch (ac->contract_number) { case AC_CONTRACT_DEV_CHG: { struct ac_device_changed *dev_chg; int retval; dev_chg = (struct ac_device_changed *)ac->contract_data; printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n", __func__, dev_chg->wwpn, dev_chg->port, xpt_path_path_id(path), dev_chg->target, (dev_chg->arrived == 0) ? "left" : "arrived"); if (softc == NULL) { printf("%s: CTL port for CAM path %u not " "found!\n", __func__, xpt_path_path_id(path)); break; } if (dev_chg->arrived != 0) { retval = ctl_add_initiator(&softc->port, dev_chg->target, dev_chg->wwpn, NULL); } else { retval = ctl_remove_initiator(&softc->port, dev_chg->target); } if (retval < 0) { printf("%s: could not %s port %d iid %u " "WWPN %#jx!\n", __func__, (dev_chg->arrived != 0) ? "add" : "remove", softc->port.targ_port, dev_chg->target, (uintmax_t)dev_chg->wwpn); } break; } default: printf("%s: unsupported contract number %ju\n", __func__, (uintmax_t)ac->contract_number); break; } break; } default: break; } } static cam_status ctlferegister(struct cam_periph *periph, void *arg) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *softc; union ccb ccb; cam_status status; int i, acstatus; softc = (struct ctlfe_lun_softc *)arg; bus_softc = softc->parent_softc; STAILQ_INIT(&softc->work_queue); LIST_INIT(&softc->atio_list); LIST_INIT(&softc->inot_list); softc->periph = periph; periph->softc = softc; /* Increase device openings to maximum for the SIM. */ if (bus_softc->sim->max_tagged_dev_openings > bus_softc->sim->max_dev_openings) { cam_release_devq(periph->path, /*relsim_flags*/RELSIM_ADJUST_OPENINGS, /*openings*/bus_softc->sim->max_tagged_dev_openings, /*timeout*/0, /*getcount_only*/1); } + memset(&ccb, 0, sizeof(ccb)); xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); ccb.ccb_h.func_code = XPT_EN_LUN; ccb.cel.grp6_len = 0; ccb.cel.grp7_len = 0; ccb.cel.enable = 1; xpt_action(&ccb); status = (ccb.ccb_h.status & CAM_STATUS_MASK); if (status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n", __func__, ccb.ccb_h.status); return (status); } status = CAM_REQ_CMP; for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) { union ccb *new_ccb; union ctl_io *new_io; struct ctlfe_cmd_info *cmd_info; new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, M_ZERO|M_NOWAIT); if (new_ccb == NULL) { status = CAM_RESRC_UNAVAIL; break; } new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); if (new_io == NULL) { free(new_ccb, M_CTLFE); status = CAM_RESRC_UNAVAIL; break; } cmd_info = malloc(sizeof(*cmd_info), M_CTLFE, M_ZERO | M_NOWAIT); if (cmd_info == NULL) { ctl_free_io(new_io); free(new_ccb, M_CTLFE); status = CAM_RESRC_UNAVAIL; break; } PRIV_INFO(new_io) = cmd_info; softc->atios_alloced++; new_ccb->ccb_h.io_ptr = new_io; LIST_INSERT_HEAD(&softc->atio_list, &new_ccb->ccb_h, periph_links.le); xpt_setup_ccb(&new_ccb->ccb_h, periph->path, CAM_PRIORITY_NONE); new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; new_ccb->ccb_h.cbfcnp = ctlfedone; new_ccb->ccb_h.flags |= CAM_UNLOCKED; xpt_action(new_ccb); status = new_ccb->ccb_h.status; if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { free(cmd_info, M_CTLFE); ctl_free_io(new_io); free(new_ccb, M_CTLFE); break; } } acstatus = cam_periph_acquire(periph); if (acstatus != 0) { xpt_print(periph->path, "%s: could not acquire reference " "count, status = %#x\n", __func__, acstatus); return (CAM_REQ_CMP_ERR); } if (i == 0) { xpt_print(periph->path, "%s: could not allocate ATIO CCBs, " "status 0x%x\n", __func__, status); return (CAM_REQ_CMP_ERR); } for (i = 0; i < CTLFE_IN_PER_LUN; i++) { union ccb *new_ccb; union ctl_io *new_io; new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, M_ZERO|M_NOWAIT); if (new_ccb == NULL) { status = CAM_RESRC_UNAVAIL; break; } new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); if (new_io == NULL) { free(new_ccb, M_CTLFE); status = CAM_RESRC_UNAVAIL; break; } softc->inots_alloced++; new_ccb->ccb_h.io_ptr = new_io; LIST_INSERT_HEAD(&softc->inot_list, &new_ccb->ccb_h, periph_links.le); xpt_setup_ccb(&new_ccb->ccb_h, periph->path, CAM_PRIORITY_NONE); new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; new_ccb->ccb_h.cbfcnp = ctlfedone; new_ccb->ccb_h.flags |= CAM_UNLOCKED; xpt_action(new_ccb); status = new_ccb->ccb_h.status; if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { /* * Note that we don't free the CCB here. If the * status is not CAM_REQ_INPROG, then we're * probably talking to a SIM that says it is * target-capable but doesn't support the * XPT_IMMEDIATE_NOTIFY CCB. i.e. it supports the * older API. In that case, it'll call xpt_done() * on the CCB, and we need to free it in our done * routine as a result. */ break; } } if ((i == 0) || (status != CAM_REQ_INPROG)) { xpt_print(periph->path, "%s: could not allocate immediate " "notify CCBs, status 0x%x\n", __func__, status); return (CAM_REQ_CMP_ERR); } mtx_lock(&bus_softc->lun_softc_mtx); STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links); mtx_unlock(&bus_softc->lun_softc_mtx); return (CAM_REQ_CMP); } static void ctlfeoninvalidate(struct cam_periph *periph) { struct ctlfe_lun_softc *softc = (struct ctlfe_lun_softc *)periph->softc; struct ctlfe_softc *bus_softc; union ccb ccb; struct ccb_hdr *hdr; cam_status status; /* Abort all ATIOs and INOTs queued to SIM. */ + memset(&ccb, 0, sizeof(ccb)); xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); ccb.ccb_h.func_code = XPT_ABORT; LIST_FOREACH(hdr, &softc->atio_list, periph_links.le) { ccb.cab.abort_ccb = (union ccb *)hdr; xpt_action(&ccb); } LIST_FOREACH(hdr, &softc->inot_list, periph_links.le) { ccb.cab.abort_ccb = (union ccb *)hdr; xpt_action(&ccb); } /* Disable the LUN in SIM. */ ccb.ccb_h.func_code = XPT_EN_LUN; ccb.cel.grp6_len = 0; ccb.cel.grp7_len = 0; ccb.cel.enable = 0; xpt_action(&ccb); status = (ccb.ccb_h.status & CAM_STATUS_MASK); if (status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n", __func__, ccb.ccb_h.status); /* * XXX KDM what do we do now? */ } bus_softc = softc->parent_softc; mtx_lock(&bus_softc->lun_softc_mtx); STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links); mtx_unlock(&bus_softc->lun_softc_mtx); } static void ctlfecleanup(struct cam_periph *periph) { struct ctlfe_lun_softc *softc; softc = (struct ctlfe_lun_softc *)periph->softc; KASSERT(softc->ctios_sent == 0, ("%s: ctios_sent %d != 0", __func__, softc->ctios_sent)); KASSERT(softc->refcount == 0, ("%s: refcount %d != 0", __func__, softc->refcount)); KASSERT(softc->atios_alloced == 0, ("%s: atios_alloced %d != 0", __func__, softc->atios_alloced)); KASSERT(softc->inots_alloced == 0, ("%s: inots_alloced %d != 0", __func__, softc->inots_alloced)); free(softc, M_CTLFE); } static void ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io, ccb_flags *flags, uint8_t **data_ptr, uint32_t *dxfer_len, u_int16_t *sglist_cnt) { struct ctlfe_softc *bus_softc; struct ctlfe_cmd_info *cmd_info; struct ctl_sg_entry *ctl_sglist; bus_dma_segment_t *cam_sglist; size_t off; int i, idx; cmd_info = PRIV_INFO(io); bus_softc = softc->parent_softc; /* * Set the direction, relative to the initiator. */ *flags &= ~CAM_DIR_MASK; if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) *flags |= CAM_DIR_IN; else *flags |= CAM_DIR_OUT; *flags &= ~CAM_DATA_MASK; idx = cmd_info->cur_transfer_index; off = cmd_info->cur_transfer_off; cmd_info->flags &= ~CTLFE_CMD_PIECEWISE; if (io->scsiio.kern_sg_entries == 0) { /* No S/G list. */ /* One time shift for SRR offset. */ off += io->scsiio.ext_data_filled; io->scsiio.ext_data_filled = 0; *data_ptr = io->scsiio.kern_data_ptr + off; if (io->scsiio.kern_data_len - off <= bus_softc->maxio) { *dxfer_len = io->scsiio.kern_data_len - off; } else { *dxfer_len = bus_softc->maxio; cmd_info->cur_transfer_off += bus_softc->maxio; cmd_info->flags |= CTLFE_CMD_PIECEWISE; } *sglist_cnt = 0; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) *flags |= CAM_DATA_PADDR; else *flags |= CAM_DATA_VADDR; } else { /* S/G list with physical or virtual pointers. */ ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; /* One time shift for SRR offset. */ while (io->scsiio.ext_data_filled >= ctl_sglist[idx].len - off) { io->scsiio.ext_data_filled -= ctl_sglist[idx].len - off; idx++; off = 0; } off += io->scsiio.ext_data_filled; io->scsiio.ext_data_filled = 0; cam_sglist = cmd_info->cam_sglist; *dxfer_len = 0; for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) { cam_sglist[i].ds_addr = (bus_addr_t)(uintptr_t)ctl_sglist[i + idx].addr + off; if (ctl_sglist[i + idx].len - off <= bus_softc->maxio - *dxfer_len) { cam_sglist[i].ds_len = ctl_sglist[idx + i].len - off; *dxfer_len += cam_sglist[i].ds_len; } else { cam_sglist[i].ds_len = bus_softc->maxio - *dxfer_len; cmd_info->cur_transfer_index = idx + i; cmd_info->cur_transfer_off = cam_sglist[i].ds_len + off; cmd_info->flags |= CTLFE_CMD_PIECEWISE; *dxfer_len += cam_sglist[i].ds_len; if (ctl_sglist[i].len != 0) i++; break; } if (i == (CTLFE_MAX_SEGS - 1) && idx + i < (io->scsiio.kern_sg_entries - 1)) { cmd_info->cur_transfer_index = idx + i + 1; cmd_info->cur_transfer_off = 0; cmd_info->flags |= CTLFE_CMD_PIECEWISE; i++; break; } off = 0; } *sglist_cnt = i; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) *flags |= CAM_DATA_SG_PADDR; else *flags |= CAM_DATA_SG; *data_ptr = (uint8_t *)cam_sglist; } } static void ctlfestart(struct cam_periph *periph, union ccb *start_ccb) { struct ctlfe_lun_softc *softc; struct ctlfe_cmd_info *cmd_info; struct ccb_hdr *ccb_h; struct ccb_accept_tio *atio; struct ccb_scsiio *csio; uint8_t *data_ptr; uint32_t dxfer_len; ccb_flags flags; union ctl_io *io; uint8_t scsi_status; softc = (struct ctlfe_lun_softc *)periph->softc; next: /* Take the ATIO off the work queue */ ccb_h = STAILQ_FIRST(&softc->work_queue); if (ccb_h == NULL) { xpt_release_ccb(start_ccb); return; } STAILQ_REMOVE_HEAD(&softc->work_queue, periph_links.stqe); atio = (struct ccb_accept_tio *)ccb_h; io = (union ctl_io *)ccb_h->io_ptr; csio = &start_ccb->csio; flags = atio->ccb_h.flags & (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); cmd_info = PRIV_INFO(io); cmd_info->cur_transfer_index = 0; cmd_info->cur_transfer_off = 0; cmd_info->flags = 0; if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { /* * Datamove call, we need to setup the S/G list. */ ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, &csio->sglist_cnt); } else { /* * We're done, send status back. */ if ((io->io_hdr.flags & CTL_FLAG_ABORT) && (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) { io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; /* Tell the SIM that we've aborted this ATIO */ #ifdef CTLFEDEBUG printf("%s: tag %04x abort\n", __func__, atio->tag_id); #endif KASSERT(atio->ccb_h.func_code == XPT_ACCEPT_TARGET_IO, ("func_code %#x is not ATIO", atio->ccb_h.func_code)); start_ccb->ccb_h.func_code = XPT_ABORT; start_ccb->cab.abort_ccb = (union ccb *)atio; xpt_action(start_ccb); ctlfe_requeue_ccb(periph, (union ccb *)atio, /* unlock */0); /* XPT_ABORT is not queued, so we can take next I/O. */ goto next; } data_ptr = NULL; dxfer_len = 0; csio->sglist_cnt = 0; } scsi_status = 0; if ((io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) && (cmd_info->flags & CTLFE_CMD_PIECEWISE) == 0 && ((io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) == 0 || io->io_hdr.status == CTL_SUCCESS)) { flags |= CAM_SEND_STATUS; scsi_status = io->scsiio.scsi_status; csio->sense_len = io->scsiio.sense_len; #ifdef CTLFEDEBUG printf("%s: tag %04x status %x\n", __func__, atio->tag_id, io->io_hdr.status); #endif if (csio->sense_len != 0) { csio->sense_data = io->scsiio.sense_data; flags |= CAM_SEND_SENSE; } } #ifdef CTLFEDEBUG printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__, (flags & CAM_SEND_STATUS) ? "done" : "datamove", atio->tag_id, flags, data_ptr, dxfer_len); #endif /* * Valid combinations: * - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0, * sglist_cnt = 0 * - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0, * sglist_cnt = 0 * - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0, * sglist_cnt != 0 */ #ifdef CTLFEDEBUG if (((flags & CAM_SEND_STATUS) && (((flags & CAM_DATA_SG) != 0) || (dxfer_len != 0) || (csio->sglist_cnt != 0))) || (((flags & CAM_SEND_STATUS) == 0) && (dxfer_len == 0)) || ((flags & CAM_DATA_SG) && (csio->sglist_cnt == 0)) || (((flags & CAM_DATA_SG) == 0) && (csio->sglist_cnt != 0))) { printf("%s: tag %04x cdb %02x flags %#x dxfer_len " "%d sg %u\n", __func__, atio->tag_id, atio_cdb_ptr(atio)[0], flags, dxfer_len, csio->sglist_cnt); printf("%s: tag %04x io status %#x\n", __func__, atio->tag_id, io->io_hdr.status); } #endif cam_fill_ctio(csio, /*retries*/ 2, ctlfedone, flags, (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0, atio->tag_id, atio->init_id, scsi_status, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*timeout*/ CTLFE_TIMEOUT * 1000); start_ccb->ccb_h.flags |= CAM_UNLOCKED; start_ccb->ccb_h.ccb_atio = atio; if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; io->io_hdr.flags &= ~(CTL_FLAG_DMA_QUEUED | CTL_FLAG_STATUS_QUEUED); softc->ctios_sent++; softc->refcount++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); softc->refcount--; /* * If we still have work to do, ask for another CCB. */ if (!STAILQ_EMPTY(&softc->work_queue)) xpt_schedule(periph, CAM_PRIORITY_NORMAL); } static void ctlfe_drain(void *context, int pending) { struct cam_periph *periph = context; struct ctlfe_lun_softc *softc = periph->softc; cam_periph_lock(periph); while (softc->refcount != 0) { cam_periph_sleep(periph, &softc->refcount, PRIBIO, "ctlfe_drain", 1); } cam_periph_unlock(periph); cam_periph_release(periph); } static void ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb) { struct ctlfe_lun_softc *softc; union ctl_io *io; struct ctlfe_cmd_info *cmd_info; softc = (struct ctlfe_lun_softc *)periph->softc; io = ccb->ccb_h.io_ptr; switch (ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: softc->atios_alloced--; cmd_info = PRIV_INFO(io); free(cmd_info, M_CTLFE); break; case XPT_IMMEDIATE_NOTIFY: case XPT_NOTIFY_ACKNOWLEDGE: softc->inots_alloced--; break; default: break; } ctl_free_io(io); free(ccb, M_CTLFE); KASSERT(softc->atios_alloced >= 0, ("%s: atios_alloced %d < 0", __func__, softc->atios_alloced)); KASSERT(softc->inots_alloced >= 0, ("%s: inots_alloced %d < 0", __func__, softc->inots_alloced)); /* * If we have received all of our CCBs, we can release our * reference on the peripheral driver. It will probably go away * now. */ if (softc->atios_alloced == 0 && softc->inots_alloced == 0) { if (softc->refcount == 0) { cam_periph_release_locked(periph); } else { TASK_INIT(&softc->refdrain_task, 0, ctlfe_drain, periph); taskqueue_enqueue(taskqueue_thread, &softc->refdrain_task); } } } /* * Send the ATIO/INOT back to the SIM, or free it if periph was invalidated. */ static void ctlfe_requeue_ccb(struct cam_periph *periph, union ccb *ccb, int unlock) { struct ctlfe_lun_softc *softc; struct mtx *mtx; if (periph->flags & CAM_PERIPH_INVALID) { mtx = cam_periph_mtx(periph); ctlfe_free_ccb(periph, ccb); if (unlock) mtx_unlock(mtx); return; } softc = (struct ctlfe_lun_softc *)periph->softc; if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) LIST_INSERT_HEAD(&softc->atio_list, &ccb->ccb_h, periph_links.le); else LIST_INSERT_HEAD(&softc->inot_list, &ccb->ccb_h, periph_links.le); if (unlock) cam_periph_unlock(periph); /* * For a wildcard attachment, commands can come in with a specific * target/lun. Reset the target and LUN fields back to the wildcard * values before we send them back down to the SIM. */ xpt_setup_ccb_flags(&ccb->ccb_h, periph->path, CAM_PRIORITY_NONE, ccb->ccb_h.flags); xpt_action(ccb); } static int ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset) { uint64_t lba; uint32_t num_blocks, nbc; uint8_t *cmdbyt = atio_cdb_ptr(atio); nbc = offset >> 9; /* ASSUMING 512 BYTE BLOCKS */ switch (cmdbyt[0]) { case READ_6: case WRITE_6: { struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt; lba = scsi_3btoul(cdb->addr); lba &= 0x1fffff; num_blocks = cdb->length; if (num_blocks == 0) num_blocks = 256; lba += nbc; num_blocks -= nbc; scsi_ulto3b(lba, cdb->addr); cdb->length = num_blocks; break; } case READ_10: case WRITE_10: { struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_ulto4b(lba, cdb->addr); scsi_ulto2b(num_blocks, cdb->length); break; } case READ_12: case WRITE_12: { struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_ulto4b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); break; } case READ_16: case WRITE_16: { struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_u64to8b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); break; } default: return -1; } return (0); } static void ctlfedone(struct cam_periph *periph, union ccb *done_ccb) { struct ctlfe_lun_softc *softc; struct ctlfe_softc *bus_softc; struct ctlfe_cmd_info *cmd_info; struct ccb_accept_tio *atio = NULL; union ctl_io *io = NULL; struct mtx *mtx; cam_status status; KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0, ("CCB in ctlfedone() without CAM_UNLOCKED flag")); #ifdef CTLFE_DEBUG printf("%s: entered, func_code = %#x\n", __func__, done_ccb->ccb_h.func_code); #endif /* * At this point CTL has no known use case for device queue freezes. * In case some SIM think different -- drop its freeze right here. */ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(periph->path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; } softc = (struct ctlfe_lun_softc *)periph->softc; bus_softc = softc->parent_softc; mtx = cam_periph_mtx(periph); mtx_lock(mtx); switch (done_ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: { LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); atio = &done_ccb->atio; status = atio->ccb_h.status & CAM_STATUS_MASK; if (status != CAM_CDB_RECVD) { ctlfe_free_ccb(periph, done_ccb); goto out; } resubmit: /* * Allocate a ctl_io, pass it to CTL, and wait for the * datamove or done. */ mtx_unlock(mtx); io = done_ccb->ccb_h.io_ptr; cmd_info = PRIV_INFO(io); ctl_zero_io(io); /* Save pointers on both sides */ PRIV_CCB(io) = done_ccb; PRIV_INFO(io) = cmd_info; done_ccb->ccb_h.io_ptr = io; /* * Only SCSI I/O comes down this path, resets, etc. come * down the immediate notify path below. */ io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.nexus.initid = atio->init_id; io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; if (bus_softc->hba_misc & PIM_EXTLUNS) { io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(atio->ccb_h.target_lun)); } else { io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun; } io->scsiio.priority = atio->priority; io->scsiio.tag_num = atio->tag_id; switch (atio->tag_action) { case CAM_TAG_ACTION_NONE: io->scsiio.tag_type = CTL_TAG_UNTAGGED; break; case MSG_SIMPLE_TASK: io->scsiio.tag_type = CTL_TAG_SIMPLE; break; case MSG_HEAD_OF_QUEUE_TASK: io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case MSG_ORDERED_TASK: io->scsiio.tag_type = CTL_TAG_ORDERED; break; case MSG_ACA_TASK: io->scsiio.tag_type = CTL_TAG_ACA; break; default: io->scsiio.tag_type = CTL_TAG_UNTAGGED; printf("%s: unhandled tag type %#x!!\n", __func__, atio->tag_action); break; } if (atio->cdb_len > sizeof(io->scsiio.cdb)) { printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", __func__, atio->cdb_len, sizeof(io->scsiio.cdb)); } io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb)); bcopy(atio_cdb_ptr(atio), io->scsiio.cdb, io->scsiio.cdb_len); #ifdef CTLFEDEBUG printf("%s: %u:%u:%u: tag %04x CDB %02x\n", __func__, io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun, io->scsiio.tag_num, io->scsiio.cdb[0]); #endif ctl_queue(io); return; } case XPT_CONT_TARGET_IO: { int srr = 0; uint32_t srr_off = 0; atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio; io = (union ctl_io *)atio->ccb_h.io_ptr; softc->ctios_sent--; #ifdef CTLFEDEBUG printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n", __func__, atio->tag_id, done_ccb->ccb_h.flags); #endif /* * Handle SRR case were the data pointer is pushed back hack */ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV && done_ccb->csio.msg_ptr != NULL && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED && done_ccb->csio.msg_ptr[1] == 5 && done_ccb->csio.msg_ptr[2] == 0) { srr = 1; srr_off = (done_ccb->csio.msg_ptr[3] << 24) | (done_ccb->csio.msg_ptr[4] << 16) | (done_ccb->csio.msg_ptr[5] << 8) | (done_ccb->csio.msg_ptr[6]); } /* * If we have an SRR and we're still sending data, we * should be able to adjust offsets and cycle again. * It is possible only if offset is from this datamove. */ if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) && srr_off >= io->scsiio.kern_rel_offset && srr_off < io->scsiio.kern_rel_offset + io->scsiio.kern_data_len) { io->scsiio.kern_data_resid = io->scsiio.kern_rel_offset + io->scsiio.kern_data_len - srr_off; io->scsiio.ext_data_filled = srr_off; io->scsiio.io_hdr.status = CTL_STATUS_NONE; io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; xpt_release_ccb(done_ccb); STAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, periph_links.stqe); xpt_schedule(periph, CAM_PRIORITY_NORMAL); break; } /* * If status was being sent, the back end data is now history. * Hack it up and resubmit a new command with the CDB adjusted. * If the SIM does the right thing, all of the resid math * should work. */ if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { xpt_release_ccb(done_ccb); if (ctlfe_adjust_cdb(atio, srr_off) == 0) { done_ccb = (union ccb *)atio; goto resubmit; } /* * Fall through to doom.... */ } if ((done_ccb->ccb_h.flags & CAM_SEND_STATUS) && (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; /* * If we were sending status back to the initiator, free up * resources. If we were doing a datamove, call the * datamove done routine. */ if ((io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { /* * If we asked to send sense data but it wasn't sent, * queue the I/O back to CTL for later REQUEST SENSE. */ if ((done_ccb->ccb_h.flags & CAM_SEND_SENSE) != 0 && (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (done_ccb->ccb_h.status & CAM_SENT_SENSE) == 0 && (io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref)) != NULL) { PRIV_INFO(io) = PRIV_INFO( (union ctl_io *)atio->ccb_h.io_ptr); ctl_queue_sense(atio->ccb_h.io_ptr); atio->ccb_h.io_ptr = io; } /* Abort ATIO if CTIO sending status has failed. */ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { done_ccb->ccb_h.func_code = XPT_ABORT; done_ccb->cab.abort_ccb = (union ccb *)atio; xpt_action(done_ccb); } xpt_release_ccb(done_ccb); ctlfe_requeue_ccb(periph, (union ccb *)atio, /* unlock */1); return; } else { struct ctlfe_cmd_info *cmd_info; struct ccb_scsiio *csio; csio = &done_ccb->csio; cmd_info = PRIV_INFO(io); io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; /* * Translate CAM status to CTL status. Success * does not change the overall, ctl_io status. In * that case we just set port_status to 0. If we * have a failure, though, set a data phase error * for the overall ctl_io. */ switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_REQ_CMP: io->scsiio.kern_data_resid -= csio->dxfer_len - csio->resid; io->io_hdr.port_status = 0; break; default: /* * XXX KDM we probably need to figure out a * standard set of errors that the SIM * drivers should return in the event of a * data transfer failure. A data phase * error will at least point the user to a * data transfer error of some sort. * Hopefully the SIM printed out some * additional information to give the user * a clue what happened. */ io->io_hdr.port_status = 0xbad1; ctl_set_data_phase_error(&io->scsiio); /* * XXX KDM figure out residual. */ break; } /* * If we had to break this S/G list into multiple * pieces, figure out where we are in the list, and * continue sending pieces if necessary. */ if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) && io->io_hdr.port_status == 0 && csio->resid == 0) { ccb_flags flags; uint8_t *data_ptr; uint32_t dxfer_len; flags = atio->ccb_h.flags & (CAM_DIS_DISCONNECT| CAM_TAG_ACTION_VALID); ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, &csio->sglist_cnt); if (((flags & CAM_SEND_STATUS) == 0) && (dxfer_len == 0)) { printf("%s: tag %04x no status or " "len cdb = %02x\n", __func__, atio->tag_id, atio_cdb_ptr(atio)[0]); printf("%s: tag %04x io status %#x\n", __func__, atio->tag_id, io->io_hdr.status); } cam_fill_ctio(csio, /*retries*/ 2, ctlfedone, flags, (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0, atio->tag_id, atio->init_id, 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, CTLFE_TIMEOUT * 1000); csio->ccb_h.flags |= CAM_UNLOCKED; csio->resid = 0; csio->ccb_h.ccb_atio = atio; io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; softc->ctios_sent++; mtx_unlock(mtx); xpt_action((union ccb *)csio); } else { /* * Release the CTIO. The ATIO will be sent back * down to the SIM once we send status. */ xpt_release_ccb(done_ccb); mtx_unlock(mtx); ctl_datamove_done(io, false); } return; } break; } case XPT_IMMEDIATE_NOTIFY: { union ctl_io *io; struct ccb_immediate_notify *inot; int send_ctl_io; LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); inot = &done_ccb->cin1; io = done_ccb->ccb_h.io_ptr; ctl_zero_io(io); send_ctl_io = 1; io->io_hdr.io_type = CTL_IO_TASK; PRIV_CCB(io) = done_ccb; inot->ccb_h.io_ptr = io; io->io_hdr.nexus.initid = inot->initiator_id; io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; if (bus_softc->hba_misc & PIM_EXTLUNS) { io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(inot->ccb_h.target_lun)); } else { io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun; } /* XXX KDM should this be the tag_id? */ io->taskio.tag_num = inot->seq_id; status = inot->ccb_h.status & CAM_STATUS_MASK; switch (status) { case CAM_SCSI_BUS_RESET: io->taskio.task_action = CTL_TASK_BUS_RESET; break; case CAM_BDR_SENT: io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case CAM_MESSAGE_RECV: switch (inot->arg) { case MSG_ABORT_TASK_SET: io->taskio.task_action = CTL_TASK_ABORT_TASK_SET; break; case MSG_TARGET_RESET: io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case MSG_ABORT_TASK: io->taskio.task_action = CTL_TASK_ABORT_TASK; break; case MSG_LOGICAL_UNIT_RESET: io->taskio.task_action = CTL_TASK_LUN_RESET; break; case MSG_CLEAR_TASK_SET: io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET; break; case MSG_CLEAR_ACA: io->taskio.task_action = CTL_TASK_CLEAR_ACA; break; case MSG_QUERY_TASK: io->taskio.task_action = CTL_TASK_QUERY_TASK; break; case MSG_QUERY_TASK_SET: io->taskio.task_action = CTL_TASK_QUERY_TASK_SET; break; case MSG_QUERY_ASYNC_EVENT: io->taskio.task_action = CTL_TASK_QUERY_ASYNC_EVENT; break; case MSG_NOOP: send_ctl_io = 0; break; default: xpt_print(periph->path, "%s: unsupported INOT message 0x%x\n", __func__, inot->arg); send_ctl_io = 0; break; } break; default: xpt_print(periph->path, "%s: unsupported INOT status 0x%x\n", __func__, status); /* FALLTHROUGH */ case CAM_REQ_ABORTED: case CAM_REQ_INVALID: case CAM_DEV_NOT_THERE: case CAM_PROVIDE_FAIL: ctlfe_free_ccb(periph, done_ccb); goto out; } mtx_unlock(mtx); if (send_ctl_io != 0) { ctl_queue(io); } else { done_ccb->ccb_h.status = CAM_REQ_INPROG; done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; xpt_action(done_ccb); } return; } case XPT_NOTIFY_ACKNOWLEDGE: /* Queue this back down to the SIM as an immediate notify. */ done_ccb->ccb_h.status = CAM_REQ_INPROG; done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; ctlfe_requeue_ccb(periph, done_ccb, /* unlock */1); return; case XPT_SET_SIM_KNOB: case XPT_GET_SIM_KNOB: case XPT_GET_SIM_KNOB_OLD: break; default: panic("%s: unexpected CCB type %#x", __func__, done_ccb->ccb_h.func_code); break; } out: mtx_unlock(mtx); } static void ctlfe_onoffline(void *arg, int online) { struct ctlfe_softc *bus_softc = arg; union ccb *ccb; cam_status status; struct cam_path *path; int set_wwnn = 0; status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("%s: unable to create path!\n", __func__); return; } ccb = xpt_alloc_ccb(); xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); ccb->ccb_h.func_code = XPT_GET_SIM_KNOB; xpt_action(ccb); /* Check whether we should change WWNs. */ if (online != 0) { if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){ printf("%s: %s current WWNN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwnn); printf("%s: %s current WWPN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwpn); /* * If the user has specified a WWNN/WWPN, send them * down to the SIM. Otherwise, record what the SIM * has reported. */ if (bus_softc->port.wwnn != 0 && bus_softc->port.wwnn != ccb->knob.xport_specific.fc.wwnn) { ccb->knob.xport_specific.fc.wwnn = bus_softc->port.wwnn; set_wwnn = 1; } else { ctl_port_set_wwns(&bus_softc->port, true, ccb->knob.xport_specific.fc.wwnn, false, 0); } if (bus_softc->port.wwpn != 0 && bus_softc->port.wwpn != ccb->knob.xport_specific.fc.wwpn) { ccb->knob.xport_specific.fc.wwpn = bus_softc->port.wwpn; set_wwnn = 1; } else { ctl_port_set_wwns(&bus_softc->port, false, 0, true, ccb->knob.xport_specific.fc.wwpn); } } else { printf("%s: %s has no valid WWNN/WWPN\n", __func__, bus_softc->port_name); if (bus_softc->port.wwnn != 0) { ccb->knob.xport_specific.fc.wwnn = bus_softc->port.wwnn; set_wwnn = 1; } if (bus_softc->port.wwpn != 0) { ccb->knob.xport_specific.fc.wwpn = bus_softc->port.wwpn; set_wwnn = 1; } } } if (set_wwnn) { ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; ccb->knob.xport_specific.valid = KNOB_VALID_ADDRESS; xpt_action(ccb); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { printf("%s: %s (path id %d) failed set WWNs: %#x\n", __func__, bus_softc->port_name, bus_softc->path_id, ccb->ccb_h.status); } else { printf("%s: %s new WWNN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwnn); printf("%s: %s new WWPN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwpn); } } /* Check whether we should change role. */ if ((ccb->knob.xport_specific.valid & KNOB_VALID_ROLE) == 0 || ((online != 0) ^ ((ccb->knob.xport_specific.fc.role & KNOB_ROLE_TARGET) != 0)) != 0) { ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; if (online) ccb->knob.xport_specific.fc.role |= KNOB_ROLE_TARGET; else ccb->knob.xport_specific.fc.role &= ~KNOB_ROLE_TARGET; xpt_action(ccb); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { printf("%s: %s (path id %d) failed %s target role: %#x\n", __func__, bus_softc->port_name, bus_softc->path_id, online ? "enable" : "disable", ccb->ccb_h.status); } else { printf("%s: %s (path id %d) target role %s succeeded\n", __func__, bus_softc->port_name, bus_softc->path_id, online ? "enable" : "disable"); } } xpt_free_path(path); xpt_free_ccb(ccb); } static void ctlfe_online(void *arg) { struct ctlfe_softc *bus_softc; struct cam_path *path; cam_status status; struct ctlfe_lun_softc *lun_softc; struct cam_periph *periph; bus_softc = (struct ctlfe_softc *)arg; /* * Create the wildcard LUN before bringing the port online. */ status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("%s: unable to create path for wildcard periph\n", __func__); return; } lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, M_WAITOK | M_ZERO); xpt_path_lock(path); periph = cam_periph_find(path, "ctl"); if (periph != NULL) { /* We've already got a periph, no need to alloc a new one. */ xpt_path_unlock(path); xpt_free_path(path); free(lun_softc, M_CTLFE); return; } lun_softc->parent_softc = bus_softc; lun_softc->flags |= CTLFE_LUN_WILDCARD; status = cam_periph_alloc(ctlferegister, ctlfeoninvalidate, ctlfecleanup, ctlfestart, "ctl", CAM_PERIPH_BIO, path, ctlfeasync, 0, lun_softc); if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("%s: CAM error %s (%#x) returned from " "cam_periph_alloc()\n", __func__, (entry != NULL) ? entry->status_text : "Unknown", status); free(lun_softc, M_CTLFE); } xpt_path_unlock(path); ctlfe_onoffline(arg, /*online*/ 1); xpt_free_path(path); } static void ctlfe_offline(void *arg) { struct ctlfe_softc *bus_softc; struct cam_path *path; cam_status status; struct cam_periph *periph; bus_softc = (struct ctlfe_softc *)arg; ctlfe_onoffline(arg, /*online*/ 0); /* * Disable the wildcard LUN for this port now that we have taken * the port offline. */ status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("%s: unable to create path for wildcard periph\n", __func__); return; } xpt_path_lock(path); if ((periph = cam_periph_find(path, "ctl")) != NULL) cam_periph_invalidate(periph); xpt_path_unlock(path); xpt_free_path(path); } /* * This will get called to enable a LUN on every bus that is attached to * CTL. So we only need to create a path/periph for this particular bus. */ static int ctlfe_lun_enable(void *arg, int lun_id) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *softc; struct cam_path *path; struct cam_periph *periph; cam_status status; bus_softc = (struct ctlfe_softc *)arg; if (bus_softc->hba_misc & PIM_EXTLUNS) lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id)); status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, bus_softc->target_id, lun_id); /* XXX KDM need some way to return status to CTL here? */ if (status != CAM_REQ_CMP) { printf("%s: could not create path, status %#x\n", __func__, status); return (1); } softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO); xpt_path_lock(path); periph = cam_periph_find(path, "ctl"); if (periph != NULL) { /* We've already got a periph, no need to alloc a new one. */ xpt_path_unlock(path); xpt_free_path(path); free(softc, M_CTLFE); return (0); } softc->parent_softc = bus_softc; status = cam_periph_alloc(ctlferegister, ctlfeoninvalidate, ctlfecleanup, ctlfestart, "ctl", CAM_PERIPH_BIO, path, ctlfeasync, 0, softc); if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("%s: CAM error %s (%#x) returned from " "cam_periph_alloc()\n", __func__, (entry != NULL) ? entry->status_text : "Unknown", status); free(softc, M_CTLFE); } xpt_path_unlock(path); xpt_free_path(path); return (0); } /* * This will get called when the user removes a LUN to disable that LUN * on every bus that is attached to CTL. */ static int ctlfe_lun_disable(void *arg, int lun_id) { struct ctlfe_softc *softc; struct ctlfe_lun_softc *lun_softc; softc = (struct ctlfe_softc *)arg; if (softc->hba_misc & PIM_EXTLUNS) lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id)); mtx_lock(&softc->lun_softc_mtx); STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) { struct cam_path *path; path = lun_softc->periph->path; if ((xpt_path_target_id(path) == softc->target_id) && (xpt_path_lun_id(path) == lun_id)) { break; } } if (lun_softc == NULL) { mtx_unlock(&softc->lun_softc_mtx); printf("%s: can't find lun %d\n", __func__, lun_id); return (1); } cam_periph_acquire(lun_softc->periph); mtx_unlock(&softc->lun_softc_mtx); cam_periph_lock(lun_softc->periph); cam_periph_invalidate(lun_softc->periph); cam_periph_unlock(lun_softc->periph); cam_periph_release(lun_softc->periph); return (0); } static void ctlfe_dump_sim(struct cam_sim *sim) { printf("%s%d: max dev openings: %d, max tagged dev openings: %d\n", sim->sim_name, sim->unit_number, sim->max_dev_openings, sim->max_tagged_dev_openings); } /* * Assumes that the SIM lock is held. */ static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc) { struct cam_periph *periph = softc->periph; struct ccb_hdr *hdr; struct ccb_getdevstats cgds; int num_items; + memset(&cgds, 0, sizeof(cgds)); xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgds.ccb_h.func_code = XPT_GDEV_STATS; xpt_action((union ccb *)&cgds); if ((cgds.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { xpt_print(periph->path, "devq: openings %d, active %d, " "allocated %d, queued %d, held %d\n", cgds.dev_openings, cgds.dev_active, cgds.allocated, cgds.queued, cgds.held); } num_items = 0; STAILQ_FOREACH(hdr, &softc->work_queue, periph_links.stqe) { union ctl_io *io = hdr->io_ptr; num_items++; /* * Only regular SCSI I/O is put on the work * queue, so we can print sense here. There may be no * sense if it's no the queue for a DMA, but this serves to * print out the CCB as well. * * XXX KDM switch this over to scsi_sense_print() when * CTL is merged in with CAM. */ ctl_io_error_print(io, NULL); /* * Print DMA status if we are DMA_QUEUED. */ if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { xpt_print(periph->path, "Total %u, Current %u, Resid %u\n", io->scsiio.kern_total_len, io->scsiio.kern_data_len, io->scsiio.kern_data_resid); } } xpt_print(periph->path, "%d requests waiting for CCBs\n", num_items); xpt_print(periph->path, "%d CTIOs outstanding\n", softc->ctios_sent); } /* * Datamove/done routine called by CTL. Put ourselves on the queue to * receive a CCB from CAM so we can queue the continue I/O request down * to the adapter. */ static void ctlfe_datamove(union ctl_io *io) { union ccb *ccb; struct cam_periph *periph; struct ctlfe_lun_softc *softc; KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); io->scsiio.ext_data_filled = 0; ccb = PRIV_CCB(io); periph = xpt_path_periph(ccb->ccb_h.path); cam_periph_lock(periph); softc = (struct ctlfe_lun_softc *)periph->softc; io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; STAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, periph_links.stqe); xpt_schedule(periph, CAM_PRIORITY_NORMAL); cam_periph_unlock(periph); } static void ctlfe_done(union ctl_io *io) { union ccb *ccb; struct cam_periph *periph; struct ctlfe_lun_softc *softc; ccb = PRIV_CCB(io); periph = xpt_path_periph(ccb->ccb_h.path); cam_periph_lock(periph); softc = (struct ctlfe_lun_softc *)periph->softc; if (io->io_hdr.io_type == CTL_IO_TASK) { /* * Send the notify acknowledge down to the SIM, to let it * know we processed the task management command. */ ccb->ccb_h.status = CAM_REQ_INPROG; ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; switch (io->taskio.task_status) { case CTL_TASK_FUNCTION_COMPLETE: ccb->cna2.arg = CAM_RSP_TMF_COMPLETE; break; case CTL_TASK_FUNCTION_SUCCEEDED: ccb->cna2.arg = CAM_RSP_TMF_SUCCEEDED; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; case CTL_TASK_FUNCTION_REJECTED: ccb->cna2.arg = CAM_RSP_TMF_REJECTED; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; case CTL_TASK_LUN_DOES_NOT_EXIST: ccb->cna2.arg = CAM_RSP_TMF_INCORRECT_LUN; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; case CTL_TASK_FUNCTION_NOT_SUPPORTED: ccb->cna2.arg = CAM_RSP_TMF_FAILED; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; } ccb->cna2.arg |= scsi_3btoul(io->taskio.task_resp) << 8; xpt_action(ccb); } else if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) { ctlfe_requeue_ccb(periph, ccb, /* unlock */1); return; } else { io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; STAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, periph_links.stqe); xpt_schedule(periph, CAM_PRIORITY_NORMAL); } cam_periph_unlock(periph); } static void ctlfe_dump(void) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *lun_softc; STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) { ctlfe_dump_sim(bus_softc->sim); STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) ctlfe_dump_queue(lun_softc); } } diff --git a/sys/cam/scsi/scsi_targ_bh.c b/sys/cam/scsi/scsi_targ_bh.c index 84aa16aa05f4..14edb254a0e5 100644 --- a/sys/cam/scsi/scsi_targ_bh.c +++ b/sys/cam/scsi/scsi_targ_bh.c @@ -1,765 +1,768 @@ /*- * Implementation of the Target Mode 'Black Hole device' for CAM. * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1999 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_SCSIBH, "SCSI bh", "SCSI blackhole buffers"); typedef enum { TARGBH_STATE_NORMAL, TARGBH_STATE_EXCEPTION, TARGBH_STATE_TEARDOWN } targbh_state; typedef enum { TARGBH_FLAG_NONE = 0x00, TARGBH_FLAG_LUN_ENABLED = 0x01 } targbh_flags; typedef enum { TARGBH_CCB_WORKQ } targbh_ccb_types; #define MAX_ACCEPT 8 #define MAX_IMMEDIATE 16 #define MAX_BUF_SIZE 256 /* Max inquiry/sense/mode page transfer */ /* Offsets into our private CCB area for storing accept information */ #define ccb_type ppriv_field0 #define ccb_descr ppriv_ptr1 /* We stick a pointer to the originating accept TIO in each continue I/O CCB */ #define ccb_atio ppriv_ptr1 TAILQ_HEAD(ccb_queue, ccb_hdr); struct targbh_softc { struct ccb_queue pending_queue; struct ccb_queue work_queue; struct ccb_queue unknown_atio_queue; struct devstat device_stats; targbh_state state; targbh_flags flags; u_int init_level; u_int inq_data_len; struct ccb_accept_tio *accept_tio_list; struct ccb_hdr_slist immed_notify_slist; }; struct targbh_cmd_desc { struct ccb_accept_tio* atio_link; u_int data_resid; /* How much left to transfer */ u_int data_increment;/* Amount to send before next disconnect */ void* data; /* The data. Can be from backing_store or not */ void* backing_store;/* Backing store allocated for this descriptor*/ u_int max_size; /* Size of backing_store */ u_int32_t timeout; u_int8_t status; /* Status to return to initiator */ }; static struct scsi_inquiry_data no_lun_inq_data = { T_NODEVICE | (SID_QUAL_BAD_LU << 5), 0, /* version */2, /* format version */2 }; static struct scsi_sense_data_fixed no_lun_sense_data = { SSD_CURRENT_ERROR|SSD_ERRCODE_VALID, 0, SSD_KEY_NOT_READY, { 0, 0, 0, 0 }, /*extra_len*/offsetof(struct scsi_sense_data_fixed, fru) - offsetof(struct scsi_sense_data_fixed, extra_len), { 0, 0, 0, 0 }, /* Logical Unit Not Supported */ /*ASC*/0x25, /*ASCQ*/0 }; static const int request_sense_size = offsetof(struct scsi_sense_data_fixed, fru); static periph_init_t targbhinit; static void targbhasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static cam_status targbhenlun(struct cam_periph *periph); static cam_status targbhdislun(struct cam_periph *periph); static periph_ctor_t targbhctor; static periph_dtor_t targbhdtor; static periph_start_t targbhstart; static void targbhdone(struct cam_periph *periph, union ccb *done_ccb); #ifdef NOTYET static int targbherror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); #endif static struct targbh_cmd_desc* targbhallocdescr(void); static void targbhfreedescr(struct targbh_cmd_desc *buf); static struct periph_driver targbhdriver = { targbhinit, "targbh", TAILQ_HEAD_INITIALIZER(targbhdriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(targbh, targbhdriver); static void targbhinit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new path registered". */ status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED, targbhasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("targbh: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void targbhasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_path *new_path; struct ccb_pathinq *cpi; path_id_t bus_path_id; cam_status status; cpi = (struct ccb_pathinq *)arg; if (code == AC_PATH_REGISTERED) bus_path_id = cpi->ccb_h.path_id; else bus_path_id = xpt_path_path_id(path); /* * Allocate a peripheral instance for * this target instance. */ status = xpt_create_path(&new_path, NULL, bus_path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("targbhasync: Unable to create path " "due to status 0x%x\n", status); return; } switch (code) { case AC_PATH_REGISTERED: { /* Only attach to controllers that support target mode */ if ((cpi->target_sprt & PIT_PROCESSOR) == 0) break; status = cam_periph_alloc(targbhctor, NULL, targbhdtor, targbhstart, "targbh", CAM_PERIPH_BIO, new_path, targbhasync, AC_PATH_REGISTERED, cpi); break; } case AC_PATH_DEREGISTERED: { struct cam_periph *periph; if ((periph = cam_periph_find(new_path, "targbh")) != NULL) cam_periph_invalidate(periph); break; } default: break; } xpt_free_path(new_path); } /* Attempt to enable our lun */ static cam_status targbhenlun(struct cam_periph *periph) { union ccb immed_ccb; struct targbh_softc *softc; cam_status status; int i; softc = (struct targbh_softc *)periph->softc; if ((softc->flags & TARGBH_FLAG_LUN_ENABLED) != 0) return (CAM_REQ_CMP); + memset(&immed_ccb, 0, sizeof(immed_ccb)); xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); immed_ccb.ccb_h.func_code = XPT_EN_LUN; /* Don't need support for any vendor specific commands */ immed_ccb.cel.grp6_len = 0; immed_ccb.cel.grp7_len = 0; immed_ccb.cel.enable = 1; xpt_action(&immed_ccb); status = immed_ccb.ccb_h.status; if (status != CAM_REQ_CMP) { xpt_print(periph->path, "targbhenlun - Enable Lun Rejected with status 0x%x\n", status); return (status); } softc->flags |= TARGBH_FLAG_LUN_ENABLED; /* * Build up a buffer of accept target I/O * operations for incoming selections. */ for (i = 0; i < MAX_ACCEPT; i++) { struct ccb_accept_tio *atio; atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_SCSIBH, - M_NOWAIT); + M_ZERO | M_NOWAIT); if (atio == NULL) { status = CAM_RESRC_UNAVAIL; break; } atio->ccb_h.ccb_descr = targbhallocdescr(); if (atio->ccb_h.ccb_descr == NULL) { free(atio, M_SCSIBH); status = CAM_RESRC_UNAVAIL; break; } xpt_setup_ccb(&atio->ccb_h, periph->path, CAM_PRIORITY_NORMAL); atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; atio->ccb_h.cbfcnp = targbhdone; ((struct targbh_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link = softc->accept_tio_list; softc->accept_tio_list = atio; xpt_action((union ccb *)atio); status = atio->ccb_h.status; if (status != CAM_REQ_INPROG) break; } if (i == 0) { xpt_print(periph->path, "targbhenlun - Could not allocate accept tio CCBs: status " "= 0x%x\n", status); targbhdislun(periph); return (CAM_REQ_CMP_ERR); } /* * Build up a buffer of immediate notify CCBs * so the SIM can tell us of asynchronous target mode events. */ for (i = 0; i < MAX_ACCEPT; i++) { struct ccb_immediate_notify *inot; inot = (struct ccb_immediate_notify*)malloc(sizeof(*inot), - M_SCSIBH, M_NOWAIT); + M_SCSIBH, M_ZERO | M_NOWAIT); if (inot == NULL) { status = CAM_RESRC_UNAVAIL; break; } xpt_setup_ccb(&inot->ccb_h, periph->path, CAM_PRIORITY_NORMAL); inot->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; inot->ccb_h.cbfcnp = targbhdone; SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h, periph_links.sle); xpt_action((union ccb *)inot); status = inot->ccb_h.status; if (status != CAM_REQ_INPROG) break; } if (i == 0) { xpt_print(periph->path, "targbhenlun - Could not allocate immediate notify " "CCBs: status = 0x%x\n", status); targbhdislun(periph); return (CAM_REQ_CMP_ERR); } return (CAM_REQ_CMP); } static cam_status targbhdislun(struct cam_periph *periph) { union ccb ccb; struct targbh_softc *softc; struct ccb_accept_tio* atio; struct ccb_hdr *ccb_h; softc = (struct targbh_softc *)periph->softc; if ((softc->flags & TARGBH_FLAG_LUN_ENABLED) == 0) return CAM_REQ_CMP; + memset(&ccb, 0, sizeof(ccb)); + /* XXX Block for Continue I/O completion */ /* Kill off all ACCECPT and IMMEDIATE CCBs */ while ((atio = softc->accept_tio_list) != NULL) { softc->accept_tio_list = ((struct targbh_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link; xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, CAM_PRIORITY_NORMAL); ccb.cab.ccb_h.func_code = XPT_ABORT; ccb.cab.abort_ccb = (union ccb *)atio; xpt_action(&ccb); } while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) { SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle); xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, CAM_PRIORITY_NORMAL); ccb.cab.ccb_h.func_code = XPT_ABORT; ccb.cab.abort_ccb = (union ccb *)ccb_h; xpt_action(&ccb); } /* * Dissable this lun. */ xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, CAM_PRIORITY_NORMAL); ccb.cel.ccb_h.func_code = XPT_EN_LUN; ccb.cel.enable = 0; xpt_action(&ccb); if (ccb.cel.ccb_h.status != CAM_REQ_CMP) printf("targbhdislun - Disabling lun on controller failed " "with status 0x%x\n", ccb.cel.ccb_h.status); else softc->flags &= ~TARGBH_FLAG_LUN_ENABLED; return (ccb.cel.ccb_h.status); } static cam_status targbhctor(struct cam_periph *periph, void *arg) { struct targbh_softc *softc; /* Allocate our per-instance private storage */ softc = (struct targbh_softc *)malloc(sizeof(*softc), M_SCSIBH, M_NOWAIT); if (softc == NULL) { printf("targctor: unable to malloc softc\n"); return (CAM_REQ_CMP_ERR); } bzero(softc, sizeof(*softc)); TAILQ_INIT(&softc->pending_queue); TAILQ_INIT(&softc->work_queue); softc->accept_tio_list = NULL; SLIST_INIT(&softc->immed_notify_slist); softc->state = TARGBH_STATE_NORMAL; periph->softc = softc; softc->init_level++; if (targbhenlun(periph) != CAM_REQ_CMP) cam_periph_invalidate(periph); return (CAM_REQ_CMP); } static void targbhdtor(struct cam_periph *periph) { struct targbh_softc *softc; softc = (struct targbh_softc *)periph->softc; softc->state = TARGBH_STATE_TEARDOWN; targbhdislun(periph); switch (softc->init_level) { case 0: panic("targdtor - impossible init level"); case 1: /* FALLTHROUGH */ default: /* XXX Wait for callback of targbhdislun() */ cam_periph_sleep(periph, softc, PRIBIO, "targbh", hz/2); free(softc, M_SCSIBH); break; } } static void targbhstart(struct cam_periph *periph, union ccb *start_ccb) { struct targbh_softc *softc; struct ccb_hdr *ccbh; struct ccb_accept_tio *atio; struct targbh_cmd_desc *desc; struct ccb_scsiio *csio; ccb_flags flags; softc = (struct targbh_softc *)periph->softc; ccbh = TAILQ_FIRST(&softc->work_queue); if (ccbh == NULL) { xpt_release_ccb(start_ccb); } else { TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe); TAILQ_INSERT_HEAD(&softc->pending_queue, ccbh, periph_links.tqe); atio = (struct ccb_accept_tio*)ccbh; desc = (struct targbh_cmd_desc *)atio->ccb_h.ccb_descr; /* Is this a tagged request? */ flags = atio->ccb_h.flags & (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); csio = &start_ccb->csio; /* * If we are done with the transaction, tell the * controller to send status and perform a CMD_CMPLT. * If we have associated sense data, see if we can * send that too. */ if (desc->data_resid == desc->data_increment) { flags |= CAM_SEND_STATUS; if (atio->sense_len) { csio->sense_len = atio->sense_len; csio->sense_data = atio->sense_data; flags |= CAM_SEND_SENSE; } } cam_fill_ctio(csio, /*retries*/2, targbhdone, flags, (flags & CAM_TAG_ACTION_VALID)? MSG_SIMPLE_Q_TAG : 0, atio->tag_id, atio->init_id, desc->status, /*data_ptr*/desc->data_increment == 0 ? NULL : desc->data, /*dxfer_len*/desc->data_increment, /*timeout*/desc->timeout); /* Override our wildcard attachment */ start_ccb->ccb_h.target_id = atio->ccb_h.target_id; start_ccb->ccb_h.target_lun = atio->ccb_h.target_lun; start_ccb->ccb_h.ccb_type = TARGBH_CCB_WORKQ; start_ccb->ccb_h.ccb_atio = atio; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("Sending a CTIO\n")); xpt_action(start_ccb); /* * If the queue was frozen waiting for the response * to this ATIO (for instance disconnection was disallowed), * then release it now that our response has been queued. */ if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(periph->path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); atio->ccb_h.status &= ~CAM_DEV_QFRZN; } ccbh = TAILQ_FIRST(&softc->work_queue); } if (ccbh != NULL) xpt_schedule(periph, CAM_PRIORITY_NORMAL); } static void targbhdone(struct cam_periph *periph, union ccb *done_ccb) { struct targbh_softc *softc; softc = (struct targbh_softc *)periph->softc; switch (done_ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: { struct ccb_accept_tio *atio; struct targbh_cmd_desc *descr; u_int8_t *cdb; int priority; atio = &done_ccb->atio; descr = (struct targbh_cmd_desc*)atio->ccb_h.ccb_descr; cdb = atio->cdb_io.cdb_bytes; if (softc->state == TARGBH_STATE_TEARDOWN || atio->ccb_h.status == CAM_REQ_ABORTED) { targbhfreedescr(descr); xpt_free_ccb(done_ccb); return; } /* * Determine the type of incoming command and * setup our buffer for a response. */ switch (cdb[0]) { case INQUIRY: { struct scsi_inquiry *inq; inq = (struct scsi_inquiry *)cdb; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("Saw an inquiry!\n")); /* * Validate the command. We don't * support any VPD pages, so complain * if EVPD is set. */ if ((inq->byte2 & SI_EVPD) != 0 || inq->page_code != 0) { atio->ccb_h.flags &= ~CAM_DIR_MASK; atio->ccb_h.flags |= CAM_DIR_NONE; /* * This needs to have other than a * no_lun_sense_data response. */ bcopy(&no_lun_sense_data, &atio->sense_data, min(sizeof(no_lun_sense_data), sizeof(atio->sense_data))); atio->sense_len = sizeof(no_lun_sense_data); descr->data_resid = 0; descr->data_increment = 0; descr->status = SCSI_STATUS_CHECK_COND; break; } /* * Direction is always relative * to the initator. */ atio->ccb_h.flags &= ~CAM_DIR_MASK; atio->ccb_h.flags |= CAM_DIR_IN; descr->data = &no_lun_inq_data; descr->data_resid = MIN(sizeof(no_lun_inq_data), scsi_2btoul(inq->length)); descr->data_increment = descr->data_resid; descr->timeout = 5 * 1000; descr->status = SCSI_STATUS_OK; break; } case REQUEST_SENSE: { struct scsi_request_sense *rsense; rsense = (struct scsi_request_sense *)cdb; /* Refer to static sense data */ atio->ccb_h.flags &= ~CAM_DIR_MASK; atio->ccb_h.flags |= CAM_DIR_IN; descr->data = &no_lun_sense_data; descr->data_resid = request_sense_size; descr->data_resid = MIN(descr->data_resid, SCSI_CDB6_LEN(rsense->length)); descr->data_increment = descr->data_resid; descr->timeout = 5 * 1000; descr->status = SCSI_STATUS_OK; break; } default: /* Constant CA, tell initiator */ /* Direction is always relative to the initator */ atio->ccb_h.flags &= ~CAM_DIR_MASK; atio->ccb_h.flags |= CAM_DIR_NONE; bcopy(&no_lun_sense_data, &atio->sense_data, min(sizeof(no_lun_sense_data), sizeof(atio->sense_data))); atio->sense_len = sizeof (no_lun_sense_data); descr->data_resid = 0; descr->data_increment = 0; descr->timeout = 5 * 1000; descr->status = SCSI_STATUS_CHECK_COND; break; } /* Queue us up to receive a Continue Target I/O ccb. */ if ((atio->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) { TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, periph_links.tqe); priority = 0; } else { TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h, periph_links.tqe); priority = CAM_PRIORITY_NORMAL; } xpt_schedule(periph, priority); break; } case XPT_CONT_TARGET_IO: { struct ccb_accept_tio *atio; struct targbh_cmd_desc *desc; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("Received completed CTIO\n")); atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio; desc = (struct targbh_cmd_desc *)atio->ccb_h.ccb_descr; TAILQ_REMOVE(&softc->pending_queue, &atio->ccb_h, periph_links.tqe); /* * We could check for CAM_SENT_SENSE bein set here, * but since we're not maintaining any CA/UA state, * there's no point. */ atio->sense_len = 0; done_ccb->ccb_h.flags &= ~CAM_SEND_SENSE; done_ccb->ccb_h.status &= ~CAM_SENT_SENSE; /* * Any errors will not change the data we return, * so make sure the queue is not left frozen. * XXX - At some point there may be errors that * leave us in a connected state with the * initiator... */ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { printf("Releasing Queue\n"); cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; } desc->data_resid -= desc->data_increment; xpt_release_ccb(done_ccb); if (softc->state != TARGBH_STATE_TEARDOWN) { /* * Send the original accept TIO back to the * controller to handle more work. */ CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("Returning ATIO to target\n")); /* Restore wildcards */ atio->ccb_h.target_id = CAM_TARGET_WILDCARD; atio->ccb_h.target_lun = CAM_LUN_WILDCARD; xpt_action((union ccb *)atio); break; } else { targbhfreedescr(desc); free(atio, M_SCSIBH); } break; } case XPT_IMMEDIATE_NOTIFY: { int frozen; frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; if (softc->state == TARGBH_STATE_TEARDOWN || done_ccb->ccb_h.status == CAM_REQ_ABORTED) { printf("Freed an immediate notify\n"); xpt_free_ccb(done_ccb); } else { /* Requeue for another immediate event */ xpt_action(done_ccb); } if (frozen != 0) cam_release_devq(periph->path, /*relsim_flags*/0, /*opening reduction*/0, /*timeout*/0, /*getcount_only*/0); break; } default: panic("targbhdone: Unexpected ccb opcode"); break; } } #ifdef NOTYET static int targbherror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { return 0; } #endif static struct targbh_cmd_desc* targbhallocdescr(void) { struct targbh_cmd_desc* descr; /* Allocate the targbh_descr structure */ descr = (struct targbh_cmd_desc *)malloc(sizeof(*descr), M_SCSIBH, M_NOWAIT); if (descr == NULL) return (NULL); bzero(descr, sizeof(*descr)); /* Allocate buffer backing store */ descr->backing_store = malloc(MAX_BUF_SIZE, M_SCSIBH, M_NOWAIT); if (descr->backing_store == NULL) { free(descr, M_SCSIBH); return (NULL); } descr->max_size = MAX_BUF_SIZE; return (descr); } static void targbhfreedescr(struct targbh_cmd_desc *descr) { free(descr->backing_store, M_SCSIBH); free(descr, M_SCSIBH); } diff --git a/sys/cam/scsi/scsi_target.c b/sys/cam/scsi/scsi_target.c index b2874f49f13f..b3d0461c1d24 100644 --- a/sys/cam/scsi/scsi_target.c +++ b/sys/cam/scsi/scsi_target.c @@ -1,1158 +1,1160 @@ /*- * Generic SCSI Target Kernel Mode Driver * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002 Nate Lawson. * Copyright (c) 1998, 1999, 2001, 2002 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include /* Includes to support callout */ #include #include #include #include #include #include #include #include /* Transaction information attached to each CCB sent by the user */ struct targ_cmd_descr { struct cam_periph_map_info mapinfo; TAILQ_ENTRY(targ_cmd_descr) tqe; union ccb *user_ccb; int priority; int func_code; }; /* Offset into the private CCB area for storing our descriptor */ #define targ_descr periph_priv.entries[1].ptr TAILQ_HEAD(descr_queue, targ_cmd_descr); typedef enum { TARG_STATE_RESV = 0x00, /* Invalid state */ TARG_STATE_OPENED = 0x01, /* Device opened, softc initialized */ TARG_STATE_LUN_ENABLED = 0x02 /* Device enabled for a path */ } targ_state; /* Per-instance device software context */ struct targ_softc { /* CCBs (CTIOs, ATIOs, INOTs) pending on the controller */ struct ccb_queue pending_ccb_queue; /* Command descriptors awaiting CTIO resources from the XPT */ struct descr_queue work_queue; /* Command descriptors that have been aborted back to the user. */ struct descr_queue abort_queue; /* * Queue of CCBs that have been copied out to userland, but our * userland daemon has not yet seen. */ struct ccb_queue user_ccb_queue; struct cam_periph *periph; struct cam_path *path; targ_state state; u_int maxio; struct selinfo read_select; struct devstat device_stats; }; static d_open_t targopen; static d_read_t targread; static d_write_t targwrite; static d_ioctl_t targioctl; static d_poll_t targpoll; static d_kqfilter_t targkqfilter; static void targreadfiltdetach(struct knote *kn); static int targreadfilt(struct knote *kn, long hint); static struct filterops targread_filtops = { .f_isfd = 1, .f_detach = targreadfiltdetach, .f_event = targreadfilt, }; static struct cdevsw targ_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = targopen, .d_read = targread, .d_write = targwrite, .d_ioctl = targioctl, .d_poll = targpoll, .d_name = "targ", .d_kqfilter = targkqfilter }; static cam_status targendislun(struct cam_path *path, int enable, int grp6_len, int grp7_len); static cam_status targenable(struct targ_softc *softc, struct cam_path *path, int grp6_len, int grp7_len); static cam_status targdisable(struct targ_softc *softc); static periph_ctor_t targctor; static periph_dtor_t targdtor; static periph_start_t targstart; static int targusermerge(struct targ_softc *softc, struct targ_cmd_descr *descr, union ccb *ccb); static int targsendccb(struct targ_softc *softc, union ccb *ccb, struct targ_cmd_descr *descr); static void targdone(struct cam_periph *periph, union ccb *done_ccb); static int targreturnccb(struct targ_softc *softc, union ccb *ccb); static union ccb * targgetccb(struct targ_softc *softc, xpt_opcode type, int priority); static void targfreeccb(struct targ_softc *softc, union ccb *ccb); static struct targ_cmd_descr * targgetdescr(struct targ_softc *softc); static periph_init_t targinit; static void targasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void abort_all_pending(struct targ_softc *softc); static void notify_user(struct targ_softc *softc); static int targcamstatus(cam_status status); static size_t targccblen(xpt_opcode func_code); static struct periph_driver targdriver = { targinit, "targ", TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(targ, targdriver); static MALLOC_DEFINE(M_TARG, "TARG", "TARG data"); /* Disable LUN if enabled and teardown softc */ static void targcdevdtor(void *data) { struct targ_softc *softc; struct cam_periph *periph; softc = data; if (softc->periph == NULL) { printf("%s: destroying non-enabled target\n", __func__); free(softc, M_TARG); return; } /* * Acquire a hold on the periph so that it doesn't go away before * we are ready at the end of the function. */ periph = softc->periph; cam_periph_acquire(periph); cam_periph_lock(periph); (void)targdisable(softc); if (softc->periph != NULL) { cam_periph_invalidate(softc->periph); softc->periph = NULL; } cam_periph_unlock(periph); cam_periph_release(periph); free(softc, M_TARG); } /* * Create softc and initialize it. There is no locking here because a * periph doesn't get created until an ioctl is issued to do so, and * that can't happen until this method returns. */ static int targopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct targ_softc *softc; /* Allocate its softc, initialize it */ softc = malloc(sizeof(*softc), M_TARG, M_WAITOK | M_ZERO); softc->state = TARG_STATE_OPENED; softc->periph = NULL; softc->path = NULL; TAILQ_INIT(&softc->pending_ccb_queue); TAILQ_INIT(&softc->work_queue); TAILQ_INIT(&softc->abort_queue); TAILQ_INIT(&softc->user_ccb_queue); knlist_init_mtx(&softc->read_select.si_note, NULL); devfs_set_cdevpriv(softc, targcdevdtor); return (0); } /* Enable/disable LUNs, set debugging level */ static int targioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct targ_softc *softc; cam_status status; devfs_get_cdevpriv((void **)&softc); switch (cmd) { case TARGIOCENABLE: { struct ioc_enable_lun *new_lun; struct cam_path *path; new_lun = (struct ioc_enable_lun *)addr; status = xpt_create_path(&path, /*periph*/NULL, new_lun->path_id, new_lun->target_id, new_lun->lun_id); if (status != CAM_REQ_CMP) { printf("Couldn't create path, status %#x\n", status); break; } xpt_path_lock(path); status = targenable(softc, path, new_lun->grp6_len, new_lun->grp7_len); xpt_path_unlock(path); xpt_free_path(path); break; } case TARGIOCDISABLE: if (softc->periph == NULL) { status = CAM_DEV_NOT_THERE; break; } cam_periph_lock(softc->periph); status = targdisable(softc); cam_periph_unlock(softc->periph); break; case TARGIOCDEBUG: { struct ccb_debug cdbg; /* If no periph available, disallow debugging changes */ if ((softc->state & TARG_STATE_LUN_ENABLED) == 0) { status = CAM_DEV_NOT_THERE; break; } bzero(&cdbg, sizeof cdbg); if (*((int *)addr) != 0) cdbg.flags = CAM_DEBUG_PERIPH; else cdbg.flags = CAM_DEBUG_NONE; xpt_setup_ccb(&cdbg.ccb_h, softc->path, CAM_PRIORITY_NORMAL); cdbg.ccb_h.func_code = XPT_DEBUG; cdbg.ccb_h.cbfcnp = targdone; xpt_action((union ccb *)&cdbg); status = cdbg.ccb_h.status & CAM_STATUS_MASK; break; } default: status = CAM_PROVIDE_FAIL; break; } return (targcamstatus(status)); } /* Writes are always ready, reads wait for user_ccb_queue or abort_queue */ static int targpoll(struct cdev *dev, int poll_events, struct thread *td) { struct targ_softc *softc; int revents; devfs_get_cdevpriv((void **)&softc); /* Poll for write() is always ok. */ revents = poll_events & (POLLOUT | POLLWRNORM); if ((poll_events & (POLLIN | POLLRDNORM)) != 0) { /* Poll for read() depends on user and abort queues. */ cam_periph_lock(softc->periph); if (!TAILQ_EMPTY(&softc->user_ccb_queue) || !TAILQ_EMPTY(&softc->abort_queue)) { revents |= poll_events & (POLLIN | POLLRDNORM); } cam_periph_unlock(softc->periph); /* Only sleep if the user didn't poll for write. */ if (revents == 0) selrecord(td, &softc->read_select); } return (revents); } static int targkqfilter(struct cdev *dev, struct knote *kn) { struct targ_softc *softc; devfs_get_cdevpriv((void **)&softc); kn->kn_hook = (caddr_t)softc; kn->kn_fop = &targread_filtops; knlist_add(&softc->read_select.si_note, kn, 0); return (0); } static void targreadfiltdetach(struct knote *kn) { struct targ_softc *softc; softc = (struct targ_softc *)kn->kn_hook; knlist_remove(&softc->read_select.si_note, kn, 0); } /* Notify the user's kqueue when the user queue or abort queue gets a CCB */ static int targreadfilt(struct knote *kn, long hint) { struct targ_softc *softc; int retval; softc = (struct targ_softc *)kn->kn_hook; cam_periph_lock(softc->periph); retval = !TAILQ_EMPTY(&softc->user_ccb_queue) || !TAILQ_EMPTY(&softc->abort_queue); cam_periph_unlock(softc->periph); return (retval); } /* Send the HBA the enable/disable message */ static cam_status targendislun(struct cam_path *path, int enable, int grp6_len, int grp7_len) { struct ccb_en_lun en_ccb; cam_status status; /* Tell the lun to begin answering selects */ + memset(&en_ccb, 0, sizeof(en_ccb)); xpt_setup_ccb(&en_ccb.ccb_h, path, CAM_PRIORITY_NORMAL); en_ccb.ccb_h.func_code = XPT_EN_LUN; /* Don't need support for any vendor specific commands */ en_ccb.grp6_len = grp6_len; en_ccb.grp7_len = grp7_len; en_ccb.enable = enable ? 1 : 0; xpt_action((union ccb *)&en_ccb); status = en_ccb.ccb_h.status & CAM_STATUS_MASK; if (status != CAM_REQ_CMP) { xpt_print(path, "%sable lun CCB rejected, status %#x\n", enable ? "en" : "dis", status); } return (status); } /* Enable target mode on a LUN, given its path */ static cam_status targenable(struct targ_softc *softc, struct cam_path *path, int grp6_len, int grp7_len) { struct cam_periph *periph; struct ccb_pathinq cpi; cam_status status; if ((softc->state & TARG_STATE_LUN_ENABLED) != 0) return (CAM_LUN_ALRDY_ENA); /* Make sure SIM supports target mode */ xpt_path_inq(&cpi, path); status = cpi.ccb_h.status & CAM_STATUS_MASK; if (status != CAM_REQ_CMP) { printf("pathinq failed, status %#x\n", status); goto enable_fail; } if ((cpi.target_sprt & PIT_PROCESSOR) == 0) { printf("controller does not support target mode\n"); status = CAM_FUNC_NOTAVAIL; goto enable_fail; } if (cpi.maxio == 0) softc->maxio = DFLTPHYS; /* traditional default */ else if (cpi.maxio > maxphys) softc->maxio = maxphys; /* for safety */ else softc->maxio = cpi.maxio; /* real value */ /* Destroy any periph on our path if it is disabled */ periph = cam_periph_find(path, "targ"); if (periph != NULL) { struct targ_softc *del_softc; del_softc = (struct targ_softc *)periph->softc; if ((del_softc->state & TARG_STATE_LUN_ENABLED) == 0) { cam_periph_invalidate(del_softc->periph); del_softc->periph = NULL; } else { printf("Requested path still in use by targ%d\n", periph->unit_number); status = CAM_LUN_ALRDY_ENA; goto enable_fail; } } /* Create a periph instance attached to this path */ status = cam_periph_alloc(targctor, NULL, targdtor, targstart, "targ", CAM_PERIPH_BIO, path, targasync, 0, softc); if (status != CAM_REQ_CMP) { printf("cam_periph_alloc failed, status %#x\n", status); goto enable_fail; } /* Ensure that the periph now exists. */ if (cam_periph_find(path, "targ") == NULL) { panic("targenable: succeeded but no periph?"); /* NOTREACHED */ } /* Send the enable lun message */ status = targendislun(path, /*enable*/1, grp6_len, grp7_len); if (status != CAM_REQ_CMP) { printf("enable lun failed, status %#x\n", status); goto enable_fail; } softc->state |= TARG_STATE_LUN_ENABLED; enable_fail: return (status); } /* Disable this softc's target instance if enabled */ static cam_status targdisable(struct targ_softc *softc) { cam_status status; if ((softc->state & TARG_STATE_LUN_ENABLED) == 0) return (CAM_REQ_CMP); CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targdisable\n")); /* Abort any ccbs pending on the controller */ abort_all_pending(softc); /* Disable this lun */ status = targendislun(softc->path, /*enable*/0, /*grp6_len*/0, /*grp7_len*/0); if (status == CAM_REQ_CMP) softc->state &= ~TARG_STATE_LUN_ENABLED; else printf("Disable lun failed, status %#x\n", status); return (status); } /* Initialize a periph (called from cam_periph_alloc) */ static cam_status targctor(struct cam_periph *periph, void *arg) { struct targ_softc *softc; /* Store pointer to softc for periph-driven routines */ softc = (struct targ_softc *)arg; periph->softc = softc; softc->periph = periph; softc->path = periph->path; return (CAM_REQ_CMP); } static void targdtor(struct cam_periph *periph) { struct targ_softc *softc; struct ccb_hdr *ccb_h; struct targ_cmd_descr *descr; softc = (struct targ_softc *)periph->softc; /* * targdisable() aborts CCBs back to the user and leaves them * on user_ccb_queue and abort_queue in case the user is still * interested in them. We free them now. */ while ((ccb_h = TAILQ_FIRST(&softc->user_ccb_queue)) != NULL) { TAILQ_REMOVE(&softc->user_ccb_queue, ccb_h, periph_links.tqe); targfreeccb(softc, (union ccb *)ccb_h); } while ((descr = TAILQ_FIRST(&softc->abort_queue)) != NULL) { TAILQ_REMOVE(&softc->abort_queue, descr, tqe); free(descr, M_TARG); } softc->periph = NULL; softc->path = NULL; periph->softc = NULL; } /* Receive CCBs from user mode proc and send them to the HBA */ static int targwrite(struct cdev *dev, struct uio *uio, int ioflag) { union ccb *user_ccb; struct targ_softc *softc; struct targ_cmd_descr *descr; int write_len, error; int func_code, priority; devfs_get_cdevpriv((void **)&softc); write_len = error = 0; CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("write - uio_resid %zd\n", uio->uio_resid)); while (uio->uio_resid >= sizeof(user_ccb) && error == 0) { union ccb *ccb; error = uiomove((caddr_t)&user_ccb, sizeof(user_ccb), uio); if (error != 0) { CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("write - uiomove failed (%d)\n", error)); break; } priority = fuword32(&user_ccb->ccb_h.pinfo.priority); if (priority == CAM_PRIORITY_NONE) { error = EINVAL; break; } func_code = fuword32(&user_ccb->ccb_h.func_code); switch (func_code) { case XPT_ACCEPT_TARGET_IO: case XPT_IMMED_NOTIFY: case XPT_IMMEDIATE_NOTIFY: cam_periph_lock(softc->periph); ccb = targgetccb(softc, func_code, priority); descr = (struct targ_cmd_descr *)ccb->ccb_h.targ_descr; descr->user_ccb = user_ccb; descr->func_code = func_code; CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("Sent ATIO/INOT (%p)\n", user_ccb)); xpt_action(ccb); TAILQ_INSERT_TAIL(&softc->pending_ccb_queue, &ccb->ccb_h, periph_links.tqe); cam_periph_unlock(softc->periph); break; default: cam_periph_lock(softc->periph); if ((func_code & XPT_FC_QUEUED) != 0) { CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("Sending queued ccb %#x (%p)\n", func_code, user_ccb)); descr = targgetdescr(softc); descr->user_ccb = user_ccb; descr->priority = priority; descr->func_code = func_code; TAILQ_INSERT_TAIL(&softc->work_queue, descr, tqe); xpt_schedule(softc->periph, priority); } else { CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("Sending inline ccb %#x (%p)\n", func_code, user_ccb)); ccb = targgetccb(softc, func_code, priority); descr = (struct targ_cmd_descr *) ccb->ccb_h.targ_descr; descr->user_ccb = user_ccb; descr->priority = priority; descr->func_code = func_code; if (targusermerge(softc, descr, ccb) != EFAULT) targsendccb(softc, ccb, descr); targreturnccb(softc, ccb); } cam_periph_unlock(softc->periph); break; } write_len += sizeof(user_ccb); } /* * If we've successfully taken in some amount of * data, return success for that data first. If * an error is persistent, it will be reported * on the next write. */ if (error != 0 && write_len == 0) return (error); if (write_len == 0 && uio->uio_resid != 0) return (ENOSPC); return (0); } /* Process requests (descrs) via the periph-supplied CCBs */ static void targstart(struct cam_periph *periph, union ccb *start_ccb) { struct targ_softc *softc; struct targ_cmd_descr *descr, *next_descr; int error; softc = (struct targ_softc *)periph->softc; CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targstart %p\n", start_ccb)); descr = TAILQ_FIRST(&softc->work_queue); if (descr == NULL) { xpt_release_ccb(start_ccb); } else { TAILQ_REMOVE(&softc->work_queue, descr, tqe); next_descr = TAILQ_FIRST(&softc->work_queue); /* Initiate a transaction using the descr and supplied CCB */ error = targusermerge(softc, descr, start_ccb); if (error == 0) error = targsendccb(softc, start_ccb, descr); if (error != 0) { xpt_print(periph->path, "targsendccb failed, err %d\n", error); xpt_release_ccb(start_ccb); suword(&descr->user_ccb->ccb_h.status, CAM_REQ_CMP_ERR); TAILQ_INSERT_TAIL(&softc->abort_queue, descr, tqe); notify_user(softc); } /* If we have more work to do, stay scheduled */ if (next_descr != NULL) xpt_schedule(periph, next_descr->priority); } } static int targusermerge(struct targ_softc *softc, struct targ_cmd_descr *descr, union ccb *ccb) { struct ccb_hdr *u_ccbh, *k_ccbh; size_t ccb_len; int error; u_ccbh = &descr->user_ccb->ccb_h; k_ccbh = &ccb->ccb_h; /* * There are some fields in the CCB header that need to be * preserved, the rest we get from the user ccb. (See xpt_merge_ccb) */ xpt_setup_ccb(k_ccbh, softc->path, descr->priority); k_ccbh->retry_count = fuword32(&u_ccbh->retry_count); k_ccbh->func_code = descr->func_code; k_ccbh->flags = fuword32(&u_ccbh->flags); k_ccbh->timeout = fuword32(&u_ccbh->timeout); ccb_len = targccblen(k_ccbh->func_code) - sizeof(struct ccb_hdr); error = copyin(u_ccbh + 1, k_ccbh + 1, ccb_len); if (error != 0) { k_ccbh->status = CAM_REQ_CMP_ERR; return (error); } /* Translate usermode abort_ccb pointer to its kernel counterpart */ if (k_ccbh->func_code == XPT_ABORT) { struct ccb_abort *cab; struct ccb_hdr *ccb_h; cab = (struct ccb_abort *)ccb; TAILQ_FOREACH(ccb_h, &softc->pending_ccb_queue, periph_links.tqe) { struct targ_cmd_descr *ab_descr; ab_descr = (struct targ_cmd_descr *)ccb_h->targ_descr; if (ab_descr->user_ccb == cab->abort_ccb) { CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("Changing abort for %p to %p\n", cab->abort_ccb, ccb_h)); cab->abort_ccb = (union ccb *)ccb_h; break; } } /* CCB not found, set appropriate status */ if (ccb_h == NULL) { k_ccbh->status = CAM_PATH_INVALID; error = ESRCH; } } return (error); } /* Build and send a kernel CCB formed from descr->user_ccb */ static int targsendccb(struct targ_softc *softc, union ccb *ccb, struct targ_cmd_descr *descr) { struct cam_periph_map_info *mapinfo; struct ccb_hdr *ccb_h; int error; ccb_h = &ccb->ccb_h; mapinfo = &descr->mapinfo; mapinfo->num_bufs_used = 0; /* * There's no way for the user to have a completion * function, so we put our own completion function in here. * We also stash in a reference to our descriptor so targreturnccb() * can find our mapping info. */ ccb_h->cbfcnp = targdone; ccb_h->targ_descr = descr; if ((ccb_h->func_code == XPT_CONT_TARGET_IO) || (ccb_h->func_code == XPT_DEV_MATCH)) { error = cam_periph_mapmem(ccb, mapinfo, softc->maxio); /* * cam_periph_mapmem returned an error, we can't continue. * Return the error to the user. */ if (error) { ccb_h->status = CAM_REQ_CMP_ERR; mapinfo->num_bufs_used = 0; return (error); } } /* * Once queued on the pending CCB list, this CCB will be protected * by our error recovery handler. */ CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("sendccb %p\n", ccb)); if (XPT_FC_IS_QUEUED(ccb)) { TAILQ_INSERT_TAIL(&softc->pending_ccb_queue, ccb_h, periph_links.tqe); } xpt_action(ccb); return (0); } /* Completion routine for CCBs (called at splsoftcam) */ static void targdone(struct cam_periph *periph, union ccb *done_ccb) { struct targ_softc *softc; cam_status status; CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("targdone %p\n", done_ccb)); softc = (struct targ_softc *)periph->softc; TAILQ_REMOVE(&softc->pending_ccb_queue, &done_ccb->ccb_h, periph_links.tqe); status = done_ccb->ccb_h.status & CAM_STATUS_MASK; /* If we're no longer enabled, throw away CCB */ if ((softc->state & TARG_STATE_LUN_ENABLED) == 0) { targfreeccb(softc, done_ccb); return; } /* abort_all_pending() waits for pending queue to be empty */ if (TAILQ_EMPTY(&softc->pending_ccb_queue)) wakeup(&softc->pending_ccb_queue); switch (done_ccb->ccb_h.func_code) { /* All FC_*_QUEUED CCBs go back to userland */ case XPT_IMMED_NOTIFY: case XPT_IMMEDIATE_NOTIFY: case XPT_ACCEPT_TARGET_IO: case XPT_CONT_TARGET_IO: TAILQ_INSERT_TAIL(&softc->user_ccb_queue, &done_ccb->ccb_h, periph_links.tqe); cam_periph_unlock(softc->periph); notify_user(softc); cam_periph_lock(softc->periph); break; default: panic("targdone: impossible xpt opcode %#x", done_ccb->ccb_h.func_code); /* NOTREACHED */ } } /* Return CCBs to the user from the user queue and abort queue */ static int targread(struct cdev *dev, struct uio *uio, int ioflag) { struct descr_queue *abort_queue; struct targ_cmd_descr *user_descr; struct targ_softc *softc; struct ccb_queue *user_queue; struct ccb_hdr *ccb_h; union ccb *user_ccb; int read_len, error; error = 0; read_len = 0; devfs_get_cdevpriv((void **)&softc); user_queue = &softc->user_ccb_queue; abort_queue = &softc->abort_queue; CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targread\n")); /* If no data is available, wait or return immediately */ cam_periph_lock(softc->periph); ccb_h = TAILQ_FIRST(user_queue); user_descr = TAILQ_FIRST(abort_queue); while (ccb_h == NULL && user_descr == NULL) { if ((ioflag & IO_NDELAY) == 0) { error = cam_periph_sleep(softc->periph, user_queue, PRIBIO | PCATCH, "targrd", 0); ccb_h = TAILQ_FIRST(user_queue); user_descr = TAILQ_FIRST(abort_queue); if (error != 0) { if (error == ERESTART) { continue; } else { goto read_fail; } } } else { cam_periph_unlock(softc->periph); return (EAGAIN); } } /* Data is available so fill the user's buffer */ while (ccb_h != NULL) { struct targ_cmd_descr *descr; if (uio->uio_resid < sizeof(user_ccb)) break; TAILQ_REMOVE(user_queue, ccb_h, periph_links.tqe); descr = (struct targ_cmd_descr *)ccb_h->targ_descr; user_ccb = descr->user_ccb; CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targread ccb %p (%p)\n", ccb_h, user_ccb)); error = targreturnccb(softc, (union ccb *)ccb_h); if (error != 0) goto read_fail; cam_periph_unlock(softc->periph); error = uiomove((caddr_t)&user_ccb, sizeof(user_ccb), uio); cam_periph_lock(softc->periph); if (error != 0) goto read_fail; read_len += sizeof(user_ccb); ccb_h = TAILQ_FIRST(user_queue); } /* Flush out any aborted descriptors */ while (user_descr != NULL) { if (uio->uio_resid < sizeof(user_ccb)) break; TAILQ_REMOVE(abort_queue, user_descr, tqe); user_ccb = user_descr->user_ccb; CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targread aborted descr %p (%p)\n", user_descr, user_ccb)); suword(&user_ccb->ccb_h.status, CAM_REQ_ABORTED); cam_periph_unlock(softc->periph); error = uiomove((caddr_t)&user_ccb, sizeof(user_ccb), uio); cam_periph_lock(softc->periph); if (error != 0) goto read_fail; read_len += sizeof(user_ccb); user_descr = TAILQ_FIRST(abort_queue); } /* * If we've successfully read some amount of data, don't report an * error. If the error is persistent, it will be reported on the * next read(). */ if (read_len == 0 && uio->uio_resid != 0) error = ENOSPC; read_fail: cam_periph_unlock(softc->periph); return (error); } /* Copy completed ccb back to the user */ static int targreturnccb(struct targ_softc *softc, union ccb *ccb) { struct targ_cmd_descr *descr; struct ccb_hdr *u_ccbh; size_t ccb_len; int error; CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targreturnccb %p\n", ccb)); descr = (struct targ_cmd_descr *)ccb->ccb_h.targ_descr; u_ccbh = &descr->user_ccb->ccb_h; /* Copy out the central portion of the ccb_hdr */ copyout(&ccb->ccb_h.retry_count, &u_ccbh->retry_count, offsetof(struct ccb_hdr, periph_priv) - offsetof(struct ccb_hdr, retry_count)); /* Copy out the rest of the ccb (after the ccb_hdr) */ ccb_len = targccblen(ccb->ccb_h.func_code) - sizeof(struct ccb_hdr); if (descr->mapinfo.num_bufs_used != 0) cam_periph_unmapmem(ccb, &descr->mapinfo); error = copyout(&ccb->ccb_h + 1, u_ccbh + 1, ccb_len); if (error != 0) { xpt_print(softc->path, "targreturnccb - CCB copyout failed (%d)\n", error); } /* Free CCB or send back to devq. */ targfreeccb(softc, ccb); return (error); } static union ccb * targgetccb(struct targ_softc *softc, xpt_opcode type, int priority) { union ccb *ccb; int ccb_len; ccb_len = targccblen(type); - ccb = malloc(ccb_len, M_TARG, M_NOWAIT); + ccb = malloc(ccb_len, M_TARG, M_NOWAIT | M_ZERO); CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("getccb %p\n", ccb)); if (ccb == NULL) { return (ccb); } xpt_setup_ccb(&ccb->ccb_h, softc->path, priority); ccb->ccb_h.func_code = type; ccb->ccb_h.cbfcnp = targdone; ccb->ccb_h.targ_descr = targgetdescr(softc); if (ccb->ccb_h.targ_descr == NULL) { free (ccb, M_TARG); ccb = NULL; } return (ccb); } static void targfreeccb(struct targ_softc *softc, union ccb *ccb) { CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH, ("targfreeccb descr %p and\n", ccb->ccb_h.targ_descr)); free(ccb->ccb_h.targ_descr, M_TARG); switch (ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: case XPT_IMMED_NOTIFY: case XPT_IMMEDIATE_NOTIFY: CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH, ("freeing ccb %p\n", ccb)); free(ccb, M_TARG); break; default: /* Send back CCB if we got it from the periph */ if (XPT_FC_IS_QUEUED(ccb)) { CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH, ("returning queued ccb %p\n", ccb)); xpt_release_ccb(ccb); } else { CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH, ("freeing ccb %p\n", ccb)); free(ccb, M_TARG); } break; } } static struct targ_cmd_descr * targgetdescr(struct targ_softc *softc) { struct targ_cmd_descr *descr; descr = malloc(sizeof(*descr), M_TARG, M_NOWAIT); if (descr) { descr->mapinfo.num_bufs_used = 0; } return (descr); } static void targinit(void) { struct cdev *dev; /* Add symbolic link to targ0 for compatibility. */ dev = make_dev(&targ_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "targ"); make_dev_alias(dev, "targ0"); } static void targasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { /* All events are handled in usermode by INOTs */ panic("targasync() called, should be an INOT instead"); } /* Cancel all pending requests and CCBs awaiting work. */ static void abort_all_pending(struct targ_softc *softc) { struct targ_cmd_descr *descr; struct ccb_abort cab; struct ccb_hdr *ccb_h; CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("abort_all_pending\n")); /* First abort the descriptors awaiting resources */ while ((descr = TAILQ_FIRST(&softc->work_queue)) != NULL) { CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("Aborting descr from workq %p\n", descr)); TAILQ_REMOVE(&softc->work_queue, descr, tqe); TAILQ_INSERT_TAIL(&softc->abort_queue, descr, tqe); } /* * Then abort all pending CCBs. * targdone() will return the aborted CCB via user_ccb_queue */ + memset(&cab, 0, sizeof(cab)); xpt_setup_ccb(&cab.ccb_h, softc->path, CAM_PRIORITY_NORMAL); cab.ccb_h.func_code = XPT_ABORT; cab.ccb_h.status = CAM_REQ_CMP_ERR; TAILQ_FOREACH(ccb_h, &softc->pending_ccb_queue, periph_links.tqe) { CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("Aborting pending CCB %p\n", ccb_h)); cab.abort_ccb = (union ccb *)ccb_h; xpt_action((union ccb *)&cab); if (cab.ccb_h.status != CAM_REQ_CMP) { xpt_print(cab.ccb_h.path, "Unable to abort CCB, status %#x\n", cab.ccb_h.status); } } /* If we aborted at least one pending CCB ok, wait for it. */ if (cab.ccb_h.status == CAM_REQ_CMP) { cam_periph_sleep(softc->periph, &softc->pending_ccb_queue, PRIBIO | PCATCH, "tgabrt", 0); } /* If we aborted anything from the work queue, wakeup user. */ if (!TAILQ_EMPTY(&softc->user_ccb_queue) || !TAILQ_EMPTY(&softc->abort_queue)) { cam_periph_unlock(softc->periph); notify_user(softc); cam_periph_lock(softc->periph); } } /* Notify the user that data is ready */ static void notify_user(struct targ_softc *softc) { /* * Notify users sleeping via poll(), kqueue(), and * blocking read(). */ selwakeuppri(&softc->read_select, PRIBIO); KNOTE_UNLOCKED(&softc->read_select.si_note, 0); wakeup(&softc->user_ccb_queue); } /* Convert CAM status to errno values */ static int targcamstatus(cam_status status) { switch (status & CAM_STATUS_MASK) { case CAM_REQ_CMP: /* CCB request completed without error */ return (0); case CAM_REQ_INPROG: /* CCB request is in progress */ return (EINPROGRESS); case CAM_REQ_CMP_ERR: /* CCB request completed with an error */ return (EIO); case CAM_PROVIDE_FAIL: /* Unable to provide requested capability */ return (ENOTTY); case CAM_FUNC_NOTAVAIL: /* The requested function is not available */ return (ENOTSUP); case CAM_LUN_ALRDY_ENA: /* LUN is already enabled for target mode */ return (EADDRINUSE); case CAM_PATH_INVALID: /* Supplied Path ID is invalid */ case CAM_DEV_NOT_THERE: /* SCSI Device Not Installed/there */ return (ENOENT); case CAM_REQ_ABORTED: /* CCB request aborted by the host */ return (ECANCELED); case CAM_CMD_TIMEOUT: /* Command timeout */ return (ETIMEDOUT); case CAM_REQUEUE_REQ: /* Requeue to preserve transaction ordering */ return (EAGAIN); case CAM_REQ_INVALID: /* CCB request was invalid */ return (EINVAL); case CAM_RESRC_UNAVAIL: /* Resource Unavailable */ return (ENOMEM); case CAM_BUSY: /* CAM subsystem is busy */ case CAM_UA_ABORT: /* Unable to abort CCB request */ return (EBUSY); default: return (ENXIO); } } static size_t targccblen(xpt_opcode func_code) { int len; /* Codes we expect to see as a target */ switch (func_code) { case XPT_CONT_TARGET_IO: case XPT_SCSI_IO: len = sizeof(struct ccb_scsiio); break; case XPT_ACCEPT_TARGET_IO: len = sizeof(struct ccb_accept_tio); break; case XPT_IMMED_NOTIFY: len = sizeof(struct ccb_immed_notify); break; case XPT_IMMEDIATE_NOTIFY: len = sizeof(struct ccb_immediate_notify); break; case XPT_REL_SIMQ: len = sizeof(struct ccb_relsim); break; case XPT_PATH_INQ: len = sizeof(struct ccb_pathinq); break; case XPT_DEBUG: len = sizeof(struct ccb_debug); break; case XPT_ABORT: len = sizeof(struct ccb_abort); break; case XPT_EN_LUN: len = sizeof(struct ccb_en_lun); break; default: len = sizeof(union ccb); break; } return (len); }