Index: projects/physbio/sys/dev/mfi/mfi_cam.c =================================================================== --- projects/physbio/sys/dev/mfi/mfi_cam.c (revision 244035) +++ projects/physbio/sys/dev/mfi/mfi_cam.c (revision 244036) @@ -1,477 +1,473 @@ /*- * Copyright 2007 Scott Long * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_mfi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include enum mfip_state { MFIP_STATE_NONE, MFIP_STATE_DETACH, MFIP_STATE_RESCAN }; struct mfip_softc { device_t dev; struct mfi_softc *mfi_sc; struct cam_devq *devq; struct cam_sim *sim; struct cam_path *path; enum mfip_state state; }; static int mfip_probe(device_t); static int mfip_attach(device_t); static int mfip_detach(device_t); static void mfip_cam_action(struct cam_sim *, union ccb *); static void mfip_cam_poll(struct cam_sim *); static void mfip_cam_rescan(struct mfi_softc *, uint32_t tid); static struct mfi_command * mfip_start(void *); static void mfip_done(struct mfi_command *cm); static int mfi_allow_disks = 0; TUNABLE_INT("hw.mfi.allow_cam_disk_passthrough", &mfi_allow_disks); SYSCTL_INT(_hw_mfi, OID_AUTO, allow_cam_disk_passthrough, CTLFLAG_RD, &mfi_allow_disks, 0, "event message locale"); static devclass_t mfip_devclass; static device_method_t mfip_methods[] = { DEVMETHOD(device_probe, mfip_probe), DEVMETHOD(device_attach, mfip_attach), DEVMETHOD(device_detach, mfip_detach), {0, 0} }; static driver_t mfip_driver = { "mfip", mfip_methods, sizeof(struct mfip_softc) }; DRIVER_MODULE(mfip, mfi, mfip_driver, mfip_devclass, 0, 0); MODULE_DEPEND(mfip, cam, 1, 1, 1); MODULE_DEPEND(mfip, mfi, 1, 1, 1); #define ccb_mfip_ptr sim_priv.entries[0].ptr static int mfip_probe(device_t dev) { device_set_desc(dev, "SCSI Passthrough Bus"); return (0); } static int mfip_attach(device_t dev) { struct mfip_softc *sc; struct mfi_softc *mfisc; sc = device_get_softc(dev); if (sc == NULL) return (EINVAL); mfisc = device_get_softc(device_get_parent(dev)); sc->dev = dev; sc->state = MFIP_STATE_NONE; sc->mfi_sc = mfisc; mfisc->mfi_cam_start = mfip_start; if ((sc->devq = cam_simq_alloc(MFI_SCSI_MAX_CMDS)) == NULL) return (ENOMEM); sc->sim = cam_sim_alloc(mfip_cam_action, mfip_cam_poll, "mfi", sc, device_get_unit(dev), &mfisc->mfi_io_lock, 1, MFI_SCSI_MAX_CMDS, sc->devq); if (sc->sim == NULL) { cam_simq_free(sc->devq); device_printf(dev, "CAM SIM attach failed\n"); return (EINVAL); } mfisc->mfi_cam_rescan_cb = mfip_cam_rescan; mtx_lock(&mfisc->mfi_io_lock); if (xpt_bus_register(sc->sim, dev, 0) != 0) { device_printf(dev, "XPT bus registration failed\n"); cam_sim_free(sc->sim, FALSE); cam_simq_free(sc->devq); mtx_unlock(&mfisc->mfi_io_lock); return (EINVAL); } mtx_unlock(&mfisc->mfi_io_lock); return (0); } static int mfip_detach(device_t dev) { struct mfip_softc *sc; sc = device_get_softc(dev); if (sc == NULL) return (EINVAL); mtx_lock(&sc->mfi_sc->mfi_io_lock); if (sc->state == MFIP_STATE_RESCAN) { mtx_unlock(&sc->mfi_sc->mfi_io_lock); return (EBUSY); } sc->state = MFIP_STATE_DETACH; mtx_unlock(&sc->mfi_sc->mfi_io_lock); sc->mfi_sc->mfi_cam_rescan_cb = NULL; if (sc->sim != NULL) { mtx_lock(&sc->mfi_sc->mfi_io_lock); xpt_bus_deregister(cam_sim_path(sc->sim)); cam_sim_free(sc->sim, FALSE); mtx_unlock(&sc->mfi_sc->mfi_io_lock); } if (sc->devq != NULL) cam_simq_free(sc->devq); return (0); } static void mfip_cam_action(struct cam_sim *sim, union ccb *ccb) { struct mfip_softc *sc = cam_sim_softc(sim); struct mfi_softc *mfisc = sc->mfi_sc; mtx_assert(&mfisc->mfi_io_lock, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET|PIM_SEQSCAN; cpi->hba_eng_cnt = 0; cpi->max_target = MFI_SCSI_MAX_TARGETS; cpi->max_lun = MFI_SCSI_MAX_LUNS; cpi->initiator_id = MFI_SCSI_INITIATOR_ID; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "LSI", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 150000; cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_RESET_DEV: ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings_sas *sas = &ccb->cts.xport_specific.sas; ccb->cts.protocol = PROTO_SCSI; ccb->cts.protocol_version = SCSI_REV_2; ccb->cts.transport = XPORT_SAS; ccb->cts.transport_version = 0; sas->valid &= ~CTS_SAS_VALID_SPEED; sas->bitrate = 150000; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_SCSI_IO: { struct ccb_hdr *ccbh = &ccb->ccb_h; struct ccb_scsiio *csio = &ccb->csio; ccbh->status = CAM_REQ_INPROG; if (csio->cdb_len > MFI_SCSI_MAX_CDB_LEN) { ccbh->status = CAM_REQ_INVALID; break; } if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if (ccbh->flags & CAM_DATA_PHYS) { - ccbh->status = CAM_REQ_INVALID; - break; - } - if (ccbh->flags & CAM_SCATTER_VALID) { + if ((ccbh->flags & CAM_DATA_MASK) != CAM_DATA_VADDR) { ccbh->status = CAM_REQ_INVALID; break; } } ccbh->ccb_mfip_ptr = sc; TAILQ_INSERT_TAIL(&mfisc->mfi_cam_ccbq, ccbh, sim_links.tqe); mfi_startio(mfisc); return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void mfip_cam_rescan(struct mfi_softc *sc, uint32_t tid) { union ccb *ccb; struct mfip_softc *camsc; struct cam_sim *sim; device_t mfip_dev; mtx_lock(&Giant); mfip_dev = device_find_child(sc->mfi_dev, "mfip", -1); mtx_unlock(&Giant); if (mfip_dev == NULL) { device_printf(sc->mfi_dev, "Couldn't find mfip child device!\n"); return; } mtx_lock(&sc->mfi_io_lock); camsc = device_get_softc(mfip_dev); if (camsc->state == MFIP_STATE_DETACH) { mtx_unlock(&sc->mfi_io_lock); return; } camsc->state = MFIP_STATE_RESCAN; mtx_unlock(&sc->mfi_io_lock); ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { device_printf(sc->mfi_dev, "Cannot allocate ccb for bus rescan.\n"); return; } sim = camsc->sim; if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sim), tid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); device_printf(sc->mfi_dev, "Cannot create path for bus rescan.\n"); return; } xpt_rescan(ccb); mtx_lock(&sc->mfi_io_lock); camsc->state = MFIP_STATE_NONE; mtx_unlock(&sc->mfi_io_lock); } static struct mfi_command * mfip_start(void *data) { union ccb *ccb = data; struct ccb_hdr *ccbh = &ccb->ccb_h; struct ccb_scsiio *csio = &ccb->csio; struct mfip_softc *sc; struct mfi_pass_frame *pt; struct mfi_command *cm; uint32_t context = 0; sc = ccbh->ccb_mfip_ptr; if ((cm = mfi_dequeue_free(sc->mfi_sc)) == NULL) return (NULL); /* Zero out the MFI frame */ context = cm->cm_frame->header.context; bzero(cm->cm_frame, sizeof(union mfi_frame)); cm->cm_frame->header.context = context; pt = &cm->cm_frame->pass; pt->header.cmd = MFI_CMD_PD_SCSI_IO; pt->header.cmd_status = 0; pt->header.scsi_status = 0; pt->header.target_id = ccbh->target_id; pt->header.lun_id = ccbh->target_lun; pt->header.flags = 0; pt->header.timeout = 0; pt->header.data_len = csio->dxfer_len; pt->header.sense_len = MFI_SENSE_LEN; pt->header.cdb_len = csio->cdb_len; pt->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; pt->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); if (ccbh->flags & CAM_CDB_POINTER) bcopy(csio->cdb_io.cdb_ptr, &pt->cdb[0], csio->cdb_len); else bcopy(csio->cdb_io.cdb_bytes, &pt->cdb[0], csio->cdb_len); cm->cm_complete = mfip_done; cm->cm_private = ccb; cm->cm_sg = &pt->sgl; cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE; cm->cm_data = ccb; cm->cm_len = csio->dxfer_len; switch (ccbh->flags & CAM_DIR_MASK) { case CAM_DIR_IN: cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_CCB; break; case CAM_DIR_OUT: cm->cm_flags = MFI_CMD_DATAOUT | MFI_CMD_CCB; break; case CAM_DIR_NONE: default: cm->cm_data = NULL; cm->cm_len = 0; cm->cm_flags = 0; break; } TAILQ_REMOVE(&sc->mfi_sc->mfi_cam_ccbq, ccbh, sim_links.tqe); return (cm); } static void mfip_done(struct mfi_command *cm) { union ccb *ccb = cm->cm_private; struct ccb_hdr *ccbh = &ccb->ccb_h; struct ccb_scsiio *csio = &ccb->csio; struct mfip_softc *sc; struct mfi_pass_frame *pt; sc = ccbh->ccb_mfip_ptr; pt = &cm->cm_frame->pass; switch (pt->header.cmd_status) { case MFI_STAT_OK: { uint8_t command, device; ccbh->status = CAM_REQ_CMP; csio->scsi_status = pt->header.scsi_status; if (ccbh->flags & CAM_CDB_POINTER) command = csio->cdb_io.cdb_ptr[0]; else command = csio->cdb_io.cdb_bytes[0]; if (command == INQUIRY) { device = csio->data_ptr[0] & 0x1f; if ((!mfi_allow_disks && device == T_DIRECT) || (device == T_PROCESSOR)) csio->data_ptr[0] = (csio->data_ptr[0] & 0xe0) | T_NODEVICE; } break; } case MFI_STAT_SCSI_DONE_WITH_ERROR: { int sense_len; ccbh->status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; csio->scsi_status = pt->header.scsi_status; if (pt->header.sense_len < csio->sense_len) csio->sense_resid = csio->sense_len - pt->header.sense_len; else csio->sense_resid = 0; sense_len = min(pt->header.sense_len, sizeof(struct scsi_sense_data)); bzero(&csio->sense_data, sizeof(struct scsi_sense_data)); bcopy(&cm->cm_sense->data[0], &csio->sense_data, sense_len); break; } case MFI_STAT_DEVICE_NOT_FOUND: ccbh->status = CAM_SEL_TIMEOUT; break; case MFI_STAT_SCSI_IO_FAILED: ccbh->status = CAM_REQ_CMP_ERR; csio->scsi_status = pt->header.scsi_status; break; default: ccbh->status = CAM_REQ_CMP_ERR; csio->scsi_status = pt->header.scsi_status; break; } mfi_release_command(cm); xpt_done(ccb); } static void mfip_cam_poll(struct cam_sim *sim) { struct mfip_softc *sc = cam_sim_softc(sim); struct mfi_softc *mfisc = sc->mfi_sc; mfisc->mfi_intr_ptr(mfisc); } Index: projects/physbio/sys/dev/virtio/scsi/virtio_scsi.c =================================================================== --- projects/physbio/sys/dev/virtio/scsi/virtio_scsi.c (revision 244035) +++ projects/physbio/sys/dev/virtio/scsi/virtio_scsi.c (revision 244036) @@ -1,2369 +1,2372 @@ /*- * Copyright (c) 2012, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for VirtIO SCSI devices. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "virtio_if.h" static int vtscsi_modevent(module_t, int, void *); static int vtscsi_probe(device_t); static int vtscsi_attach(device_t); static int vtscsi_detach(device_t); static int vtscsi_suspend(device_t); static int vtscsi_resume(device_t); static void vtscsi_negotiate_features(struct vtscsi_softc *); static int vtscsi_maximum_segments(struct vtscsi_softc *, int); static int vtscsi_alloc_virtqueues(struct vtscsi_softc *); static void vtscsi_write_device_config(struct vtscsi_softc *); static int vtscsi_reinit(struct vtscsi_softc *); static int vtscsi_alloc_cam(struct vtscsi_softc *); static int vtscsi_register_cam(struct vtscsi_softc *); static void vtscsi_free_cam(struct vtscsi_softc *); static void vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *); static int vtscsi_register_async(struct vtscsi_softc *); static void vtscsi_deregister_async(struct vtscsi_softc *); static void vtscsi_cam_action(struct cam_sim *, union ccb *); static void vtscsi_cam_poll(struct cam_sim *); static void vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *, union ccb *); static void vtscsi_cam_get_tran_settings(struct vtscsi_softc *, union ccb *); static void vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *); static void vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *); static void vtscsi_cam_abort(struct vtscsi_softc *, union ccb *); static void vtscsi_cam_path_inquiry(struct vtscsi_softc *, struct cam_sim *, union ccb *); static int vtscsi_sg_append_scsi_buf(struct vtscsi_softc *, struct sglist *, struct ccb_scsiio *); static int vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *, struct vtscsi_request *, int *, int *); static int vtscsi_execute_scsi_cmd(struct vtscsi_softc *, struct vtscsi_request *); static int vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *); static void vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *, struct vtscsi_request *); static int vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *, struct vtscsi_request *); static void vtscsi_timedout_scsi_cmd(void *); static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *); static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *, struct ccb_scsiio *, struct virtio_scsi_cmd_resp *); static void vtscsi_complete_scsi_cmd(struct vtscsi_softc *, struct vtscsi_request *); static void vtscsi_poll_ctrl_req(struct vtscsi_softc *, struct vtscsi_request *); static int vtscsi_execute_ctrl_req(struct vtscsi_softc *, struct vtscsi_request *, struct sglist *, int, int, int); static void vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c, struct vtscsi_request *); static int vtscsi_execute_abort_task_cmd(struct vtscsi_softc *, struct vtscsi_request *); static int vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *, struct vtscsi_request *); static void vtscsi_get_request_lun(uint8_t lun[], target_id_t *, lun_id_t *); static void vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []); static void vtscsi_init_scsi_cmd_req(struct ccb_scsiio *, struct virtio_scsi_cmd_req *); static void vtscsi_init_ctrl_tmf_req(struct ccb_hdr *, uint32_t, uintptr_t, struct virtio_scsi_ctrl_tmf_req *); static void vtscsi_freeze_simq(struct vtscsi_softc *, int); static int vtscsi_thaw_simq(struct vtscsi_softc *, int); static void vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t, lun_id_t); static void vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t, lun_id_t); static void vtscsi_execute_rescan_bus(struct vtscsi_softc *); static void vtscsi_handle_event(struct vtscsi_softc *, struct virtio_scsi_event *); static int vtscsi_enqueue_event_buf(struct vtscsi_softc *, struct virtio_scsi_event *); static int vtscsi_init_event_vq(struct vtscsi_softc *); static void vtscsi_reinit_event_vq(struct vtscsi_softc *); static void vtscsi_drain_event_vq(struct vtscsi_softc *); static void vtscsi_complete_vqs_locked(struct vtscsi_softc *); static void vtscsi_complete_vqs(struct vtscsi_softc *); static void vtscsi_drain_vqs(struct vtscsi_softc *); static void vtscsi_cancel_request(struct vtscsi_softc *, struct vtscsi_request *); static void vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *); static void vtscsi_stop(struct vtscsi_softc *); static int vtscsi_reset_bus(struct vtscsi_softc *); static void vtscsi_init_request(struct vtscsi_softc *, struct vtscsi_request *); static int vtscsi_alloc_requests(struct vtscsi_softc *); static void vtscsi_free_requests(struct vtscsi_softc *); static void vtscsi_enqueue_request(struct vtscsi_softc *, struct vtscsi_request *); static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *); static void vtscsi_complete_request(struct vtscsi_request *); static void vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *); static void vtscsi_control_vq_task(void *, int); static void vtscsi_event_vq_task(void *, int); static void vtscsi_request_vq_task(void *, int); static int vtscsi_control_vq_intr(void *); static int vtscsi_event_vq_intr(void *); static int vtscsi_request_vq_intr(void *); static void vtscsi_disable_vqs_intr(struct vtscsi_softc *); static void vtscsi_enable_vqs_intr(struct vtscsi_softc *); static void vtscsi_get_tunables(struct vtscsi_softc *); static void vtscsi_add_sysctl(struct vtscsi_softc *); static void vtscsi_printf_req(struct vtscsi_request *, const char *, const char *, ...); /* Global tunables. */ /* * The current QEMU VirtIO SCSI implementation does not cancel in-flight * IO during virtio_stop(). So in-flight requests still complete after the * device reset. We would have to wait for all the in-flight IO to complete, * which defeats the typical purpose of a bus reset. We could simulate the * bus reset with either I_T_NEXUS_RESET of all the targets, or with * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the * control virtqueue). But this isn't very useful if things really go off * the rails, so default to disabled for now. */ static int vtscsi_bus_reset_disable = 1; TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable); static struct virtio_feature_desc vtscsi_feature_desc[] = { { VIRTIO_SCSI_F_INOUT, "InOut" }, { VIRTIO_SCSI_F_HOTPLUG, "Hotplug" }, { 0, NULL } }; static device_method_t vtscsi_methods[] = { /* Device methods. */ DEVMETHOD(device_probe, vtscsi_probe), DEVMETHOD(device_attach, vtscsi_attach), DEVMETHOD(device_detach, vtscsi_detach), DEVMETHOD(device_suspend, vtscsi_suspend), DEVMETHOD(device_resume, vtscsi_resume), DEVMETHOD_END }; static driver_t vtscsi_driver = { "vtscsi", vtscsi_methods, sizeof(struct vtscsi_softc) }; static devclass_t vtscsi_devclass; DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass, vtscsi_modevent, 0); MODULE_VERSION(virtio_scsi, 1); MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1); MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1); static int vtscsi_modevent(module_t mod, int type, void *unused) { int error; switch (type) { case MOD_LOAD: case MOD_QUIESCE: case MOD_UNLOAD: case MOD_SHUTDOWN: error = 0; break; default: error = EOPNOTSUPP; break; } return (error); } static int vtscsi_probe(device_t dev) { if (virtio_get_device_type(dev) != VIRTIO_ID_SCSI) return (ENXIO); device_set_desc(dev, "VirtIO SCSI Adapter"); return (BUS_PROBE_DEFAULT); } static int vtscsi_attach(device_t dev) { struct vtscsi_softc *sc; struct virtio_scsi_config scsicfg; int error; sc = device_get_softc(dev); sc->vtscsi_dev = dev; VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev)); TAILQ_INIT(&sc->vtscsi_req_free); vtscsi_get_tunables(sc); vtscsi_add_sysctl(sc); virtio_set_feature_desc(dev, vtscsi_feature_desc); vtscsi_negotiate_features(sc); if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT; if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT)) sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL; if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG)) sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG; virtio_read_device_config(dev, 0, &scsicfg, sizeof(struct virtio_scsi_config)); sc->vtscsi_max_channel = scsicfg.max_channel; sc->vtscsi_max_target = scsicfg.max_target; sc->vtscsi_max_lun = scsicfg.max_lun; sc->vtscsi_event_buf_size = scsicfg.event_info_size; vtscsi_write_device_config(sc); sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max); sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT); if (sc->vtscsi_sglist == NULL) { error = ENOMEM; device_printf(dev, "cannot allocate sglist\n"); goto fail; } error = vtscsi_alloc_virtqueues(sc); if (error) { device_printf(dev, "cannot allocate virtqueues\n"); goto fail; } error = vtscsi_init_event_vq(sc); if (error) { device_printf(dev, "cannot populate the eventvq\n"); goto fail; } error = vtscsi_alloc_requests(sc); if (error) { device_printf(dev, "cannot allocate requests\n"); goto fail; } error = vtscsi_alloc_cam(sc); if (error) { device_printf(dev, "cannot allocate CAM structures\n"); goto fail; } TASK_INIT(&sc->vtscsi_control_intr_task, 0, vtscsi_control_vq_task, sc); TASK_INIT(&sc->vtscsi_event_intr_task, 0, vtscsi_event_vq_task, sc); TASK_INIT(&sc->vtscsi_request_intr_task, 0, vtscsi_request_vq_task, sc); sc->vtscsi_tq = taskqueue_create_fast("vtscsi_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->vtscsi_tq); if (sc->vtscsi_tq == NULL) { error = ENOMEM; device_printf(dev, "cannot allocate taskqueue\n"); goto fail; } error = taskqueue_start_threads(&sc->vtscsi_tq, 1, PI_DISK, "%s taskq", device_get_nameunit(dev)); if (error) { device_printf(dev, "cannot start taskqueue threads\n"); goto fail; } error = virtio_setup_intr(dev, INTR_TYPE_CAM); if (error) { device_printf(dev, "cannot setup virtqueue interrupts\n"); goto fail; } vtscsi_enable_vqs_intr(sc); /* * Register with CAM after interrupts are enabled so we will get * notified of the probe responses. */ error = vtscsi_register_cam(sc); if (error) { device_printf(dev, "cannot register with CAM\n"); goto fail; } fail: if (error) vtscsi_detach(dev); return (error); } static int vtscsi_detach(device_t dev) { struct vtscsi_softc *sc; sc = device_get_softc(dev); VTSCSI_LOCK(sc); sc->vtscsi_flags |= VTSCSI_FLAG_DETACH; if (device_is_attached(dev)) vtscsi_stop(sc); VTSCSI_UNLOCK(sc); if (sc->vtscsi_tq != NULL) { taskqueue_drain(sc->vtscsi_tq, &sc->vtscsi_control_intr_task); taskqueue_drain(sc->vtscsi_tq, &sc->vtscsi_event_intr_task); taskqueue_drain(sc->vtscsi_tq, &sc->vtscsi_request_intr_task); taskqueue_free(sc->vtscsi_tq); sc->vtscsi_tq = NULL; } vtscsi_complete_vqs(sc); vtscsi_drain_vqs(sc); vtscsi_free_cam(sc); vtscsi_free_requests(sc); if (sc->vtscsi_sglist != NULL) { sglist_free(sc->vtscsi_sglist); sc->vtscsi_sglist = NULL; } VTSCSI_LOCK_DESTROY(sc); return (0); } static int vtscsi_suspend(device_t dev) { return (0); } static int vtscsi_resume(device_t dev) { return (0); } static void vtscsi_negotiate_features(struct vtscsi_softc *sc) { device_t dev; uint64_t features; dev = sc->vtscsi_dev; features = virtio_negotiate_features(dev, VTSCSI_FEATURES); sc->vtscsi_features = features; } static int vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max) { int nsegs; nsegs = VTSCSI_MIN_SEGMENTS; if (seg_max > 0) { nsegs += MIN(seg_max, MAXPHYS / PAGE_SIZE + 1); if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT); } else nsegs += 1; return (nsegs); } static int vtscsi_alloc_virtqueues(struct vtscsi_softc *sc) { device_t dev; struct vq_alloc_info vq_info[3]; int nvqs; dev = sc->vtscsi_dev; nvqs = 3; VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc, &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev)); VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc, &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev)); VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs, vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq, "%s request", device_get_nameunit(dev)); return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info)); } static void vtscsi_write_device_config(struct vtscsi_softc *sc) { virtio_write_dev_config_4(sc->vtscsi_dev, offsetof(struct virtio_scsi_config, sense_size), VIRTIO_SCSI_SENSE_SIZE); /* * This is the size in the virtio_scsi_cmd_req structure. Note * this value (32) is larger than the maximum CAM CDB size (16). */ virtio_write_dev_config_4(sc->vtscsi_dev, offsetof(struct virtio_scsi_config, cdb_size), VIRTIO_SCSI_CDB_SIZE); } static int vtscsi_reinit(struct vtscsi_softc *sc) { device_t dev; int error; dev = sc->vtscsi_dev; error = virtio_reinit(dev, sc->vtscsi_features); if (error == 0) { vtscsi_write_device_config(sc); vtscsi_reinit_event_vq(sc); virtio_reinit_complete(dev); vtscsi_enable_vqs_intr(sc); } vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error); return (error); } static int vtscsi_alloc_cam(struct vtscsi_softc *sc) { device_t dev; struct cam_devq *devq; int openings; dev = sc->vtscsi_dev; openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS; devq = cam_simq_alloc(openings); if (devq == NULL) { device_printf(dev, "cannot allocate SIM queue\n"); return (ENOMEM); } sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll, "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1, openings, devq); if (sc->vtscsi_sim == NULL) { cam_simq_free(devq); device_printf(dev, "cannot allocate SIM\n"); return (ENOMEM); } return (0); } static int vtscsi_register_cam(struct vtscsi_softc *sc) { device_t dev; int registered, error; dev = sc->vtscsi_dev; registered = 0; VTSCSI_LOCK(sc); if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) { error = ENOMEM; device_printf(dev, "cannot register XPT bus\n"); goto fail; } registered = 1; if (xpt_create_path(&sc->vtscsi_path, NULL, cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { error = ENOMEM; device_printf(dev, "cannot create bus path\n"); goto fail; } VTSCSI_UNLOCK(sc); /* * The async register apparently needs to be done without * the lock held, otherwise it can recurse on the lock. */ if (vtscsi_register_async(sc) != CAM_REQ_CMP) { error = EIO; device_printf(dev, "cannot register async callback\n"); VTSCSI_LOCK(sc); goto fail; } return (0); fail: if (sc->vtscsi_path != NULL) { xpt_free_path(sc->vtscsi_path); sc->vtscsi_path = NULL; } if (registered != 0) xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim)); VTSCSI_UNLOCK(sc); return (error); } static void vtscsi_free_cam(struct vtscsi_softc *sc) { VTSCSI_LOCK(sc); if (sc->vtscsi_path != NULL) { vtscsi_deregister_async(sc); xpt_free_path(sc->vtscsi_path); sc->vtscsi_path = NULL; xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim)); } if (sc->vtscsi_sim != NULL) { cam_sim_free(sc->vtscsi_sim, 1); sc->vtscsi_sim = NULL; } VTSCSI_UNLOCK(sc); } static void vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg) { struct cam_sim *sim; struct vtscsi_softc *sc; sim = cb_arg; sc = cam_sim_softc(sim); vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code); /* * TODO Once QEMU supports event reporting, we should * (un)subscribe to events here. */ switch (code) { case AC_FOUND_DEVICE: break; case AC_LOST_DEVICE: break; } } static int vtscsi_register_async(struct vtscsi_softc *sc) { struct ccb_setasync csa; VTSCSI_LOCK_NOTOWNED(sc); xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE; csa.callback = vtscsi_cam_async; csa.callback_arg = sc->vtscsi_sim; xpt_action((union ccb *) &csa); return (csa.ccb_h.status); } static void vtscsi_deregister_async(struct vtscsi_softc *sc) { struct ccb_setasync csa; xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = 0; csa.callback = vtscsi_cam_async; csa.callback_arg = sc->vtscsi_sim; xpt_action((union ccb *) &csa); } static void vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb) { struct vtscsi_softc *sc; struct ccb_hdr *ccbh; sc = cam_sim_softc(sim); ccbh = &ccb->ccb_h; VTSCSI_LOCK_OWNED(sc); if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) { /* * The VTSCSI_MTX is briefly dropped between setting * VTSCSI_FLAG_DETACH and deregistering with CAM, so * drop any CCBs that come in during that window. */ ccbh->status = CAM_NO_HBA; xpt_done(ccb); return; } switch (ccbh->func_code) { case XPT_SCSI_IO: vtscsi_cam_scsi_io(sc, sim, ccb); break; case XPT_SET_TRAN_SETTINGS: ccbh->status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: vtscsi_cam_get_tran_settings(sc, ccb); break; case XPT_RESET_BUS: vtscsi_cam_reset_bus(sc, ccb); break; case XPT_RESET_DEV: vtscsi_cam_reset_dev(sc, ccb); break; case XPT_ABORT: vtscsi_cam_abort(sc, ccb); break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); break; case XPT_PATH_INQ: vtscsi_cam_path_inquiry(sc, sim, ccb); break; default: vtscsi_dprintf(sc, VTSCSI_ERROR, "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code); ccbh->status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void vtscsi_cam_poll(struct cam_sim *sim) { struct vtscsi_softc *sc; sc = cam_sim_softc(sim); vtscsi_complete_vqs_locked(sc); } static void vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim, union ccb *ccb) { struct ccb_hdr *ccbh; struct ccb_scsiio *csio; int error; ccbh = &ccb->ccb_h; csio = &ccb->csio; if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) { error = EINVAL; ccbh->status = CAM_REQ_INVALID; goto done; } if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH && (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) { error = EINVAL; ccbh->status = CAM_REQ_INVALID; goto done; } error = vtscsi_start_scsi_cmd(sc, ccb); done: if (error) { vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status); xpt_done(ccb); } } static void vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb) { struct ccb_trans_settings *cts; struct ccb_trans_settings_scsi *scsi; cts = &ccb->cts; scsi = &cts->proto_specific.scsi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC3; cts->transport = XPORT_SAS; cts->transport_version = 0; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); } static void vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb) { int error; error = vtscsi_reset_bus(sc); if (error == 0) ccb->ccb_h.status = CAM_REQ_CMP; else ccb->ccb_h.status = CAM_REQ_CMP_ERR; vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n", error, ccb, ccb->ccb_h.status); xpt_done(ccb); } static void vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb) { struct ccb_hdr *ccbh; struct vtscsi_request *req; int error; ccbh = &ccb->ccb_h; req = vtscsi_dequeue_request(sc); if (req == NULL) { error = EAGAIN; vtscsi_freeze_simq(sc, VTSCSI_REQUEST); goto fail; } req->vsr_ccb = ccb; error = vtscsi_execute_reset_dev_cmd(sc, req); if (error == 0) return; vtscsi_enqueue_request(sc, req); fail: vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n", error, req, ccb); if (error == EAGAIN) ccbh->status = CAM_RESRC_UNAVAIL; else ccbh->status = CAM_REQ_CMP_ERR; xpt_done(ccb); } static void vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb) { struct vtscsi_request *req; struct ccb_hdr *ccbh; int error; ccbh = &ccb->ccb_h; req = vtscsi_dequeue_request(sc); if (req == NULL) { error = EAGAIN; vtscsi_freeze_simq(sc, VTSCSI_REQUEST); goto fail; } req->vsr_ccb = ccb; error = vtscsi_execute_abort_task_cmd(sc, req); if (error == 0) return; vtscsi_enqueue_request(sc, req); fail: vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n", error, req, ccb); if (error == EAGAIN) ccbh->status = CAM_RESRC_UNAVAIL; else ccbh->status = CAM_REQ_CMP_ERR; xpt_done(ccb); } static void vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim, union ccb *ccb) { device_t dev; struct ccb_pathinq *cpi; dev = sc->vtscsi_dev; cpi = &ccb->cpi; vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb); cpi->version_num = 1; cpi->hba_inquiry = PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_SEQSCAN; if (vtscsi_bus_reset_disable != 0) cpi->hba_misc |= PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = sc->vtscsi_max_target; cpi->max_lun = sc->vtscsi_max_lun; cpi->initiator_id = VTSCSI_INITIATOR_ID; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "VirtIO", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 300000; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC3; cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) * PAGE_SIZE; cpi->hba_vendor = virtio_get_vendor(dev); cpi->hba_device = virtio_get_device(dev); cpi->hba_subvendor = virtio_get_subvendor(dev); cpi->hba_subdevice = virtio_get_subdevice(dev); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); } static int vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg, struct ccb_scsiio *csio) { struct ccb_hdr *ccbh; struct bus_dma_segment *dseg; int i, error; ccbh = &csio->ccb_h; error = 0; - if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { - - if ((ccbh->flags & CAM_DATA_PHYS) == 0) + switch ((ccbh->flags & CAM_DATA_MASK)) { + case CAM_DATA_VADDR: + error = sglist_append(sg, csio->data_ptr, csio->dxfer_len); + break; + case CAM_DATA_PADDR: + error = sglist_append_phys(sg, + (vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len); + break; + case CAM_DATA_SG: + for (i = 0; i < csio->sglist_cnt && error == 0; i++) { + dseg = &((struct bus_dma_segment *)csio->data_ptr)[i]; error = sglist_append(sg, - csio->data_ptr, csio->dxfer_len); - else - error = sglist_append_phys(sg, - (vm_paddr_t)(vm_offset_t) csio->data_ptr, - csio->dxfer_len); - } else { - + (void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len); + } + break; + case CAM_DATA_SG_PADDR: for (i = 0; i < csio->sglist_cnt && error == 0; i++) { dseg = &((struct bus_dma_segment *)csio->data_ptr)[i]; - - if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) - error = sglist_append(sg, - (void *)(vm_offset_t) dseg->ds_addr, - dseg->ds_len); - else - error = sglist_append_phys(sg, - (vm_paddr_t) dseg->ds_addr, dseg->ds_len); + error = sglist_append_phys(sg, + (vm_paddr_t) dseg->ds_addr, dseg->ds_len); } + break; + default: + error = EINVAL; + break; } return (error); } static int vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req, int *readable, int *writable) { struct sglist *sg; struct ccb_hdr *ccbh; struct ccb_scsiio *csio; struct virtio_scsi_cmd_req *cmd_req; struct virtio_scsi_cmd_resp *cmd_resp; int error; sg = sc->vtscsi_sglist; csio = &req->vsr_ccb->csio; ccbh = &csio->ccb_h; cmd_req = &req->vsr_cmd_req; cmd_resp = &req->vsr_cmd_resp; sglist_reset(sg); sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req)); if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) { error = vtscsi_sg_append_scsi_buf(sc, sg, csio); /* At least one segment must be left for the response. */ if (error || sg->sg_nseg == sg->sg_maxseg) goto fail; } *readable = sg->sg_nseg; sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp)); if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) { error = vtscsi_sg_append_scsi_buf(sc, sg, csio); if (error) goto fail; } *writable = sg->sg_nseg - *readable; vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d " "writable=%d\n", req, ccbh, *readable, *writable); return (0); fail: /* * This should never happen unless maxio was incorrectly set. */ vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0); vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p " "nseg=%d maxseg=%d\n", error, req, ccbh, sg->sg_nseg, sg->sg_maxseg); return (EFBIG); } static int vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { struct sglist *sg; struct virtqueue *vq; struct ccb_scsiio *csio; struct ccb_hdr *ccbh; struct virtio_scsi_cmd_req *cmd_req; struct virtio_scsi_cmd_resp *cmd_resp; int readable, writable, error; sg = sc->vtscsi_sglist; vq = sc->vtscsi_request_vq; csio = &req->vsr_ccb->csio; ccbh = &csio->ccb_h; cmd_req = &req->vsr_cmd_req; cmd_resp = &req->vsr_cmd_resp; vtscsi_init_scsi_cmd_req(csio, cmd_req); error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable); if (error) return (error); req->vsr_complete = vtscsi_complete_scsi_cmd; cmd_resp->response = -1; error = virtqueue_enqueue(vq, req, sg, readable, writable); if (error) { vtscsi_dprintf(sc, VTSCSI_ERROR, "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh); ccbh->status = CAM_REQUEUE_REQ; vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ); return (error); } ccbh->status |= CAM_SIM_QUEUED; ccbh->ccbh_vtscsi_req = req; virtqueue_notify(vq); if (ccbh->timeout != CAM_TIME_INFINITY) { req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET; callout_reset(&req->vsr_callout, ccbh->timeout * hz / 1000, vtscsi_timedout_scsi_cmd, req); } vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n", req, ccbh); return (0); } static int vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb) { struct vtscsi_request *req; int error; req = vtscsi_dequeue_request(sc); if (req == NULL) { ccb->ccb_h.status = CAM_REQUEUE_REQ; vtscsi_freeze_simq(sc, VTSCSI_REQUEST); return (ENOBUFS); } req->vsr_ccb = ccb; error = vtscsi_execute_scsi_cmd(sc, req); if (error) vtscsi_enqueue_request(sc, req); return (error); } static void vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { struct virtio_scsi_ctrl_tmf_resp *tmf_resp; struct vtscsi_request *to_req; uint8_t response; tmf_resp = &req->vsr_tmf_resp; response = tmf_resp->response; to_req = req->vsr_timedout_req; vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n", req, to_req, response); vtscsi_enqueue_request(sc, req); /* * The timedout request could have completed between when the * abort task was sent and when the host processed it. */ if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT) return; /* The timedout request was successfully aborted. */ if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) return; /* Don't bother if the device is going away. */ if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) return; /* The timedout request will be aborted by the reset. */ if (sc->vtscsi_flags & VTSCSI_FLAG_RESET) return; vtscsi_reset_bus(sc); } static int vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *to_req) { struct sglist *sg; struct ccb_hdr *to_ccbh; struct vtscsi_request *req; struct virtio_scsi_ctrl_tmf_req *tmf_req; struct virtio_scsi_ctrl_tmf_resp *tmf_resp; int error; sg = sc->vtscsi_sglist; to_ccbh = &to_req->vsr_ccb->ccb_h; req = vtscsi_dequeue_request(sc); if (req == NULL) { error = ENOBUFS; goto fail; } tmf_req = &req->vsr_tmf_req; tmf_resp = &req->vsr_tmf_resp; vtscsi_init_ctrl_tmf_req(to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK, (uintptr_t) to_ccbh, tmf_req); sglist_reset(sg); sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); req->vsr_timedout_req = to_req; req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd; tmf_resp->response = -1; error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, VTSCSI_EXECUTE_ASYNC); if (error == 0) return (0); vtscsi_enqueue_request(sc, req); fail: vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p " "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh); return (error); } static void vtscsi_timedout_scsi_cmd(void *xreq) { struct vtscsi_softc *sc; struct vtscsi_request *to_req; to_req = xreq; sc = to_req->vsr_softc; vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n", to_req, to_req->vsr_ccb, to_req->vsr_state); /* Don't bother if the device is going away. */ if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) return; /* * Bail if the request is not in use. We likely raced when * stopping the callout handler or it has already been aborted. */ if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE || (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0) return; /* * Complete the request queue in case the timedout request is * actually just pending. */ vtscsi_complete_vq(sc, sc->vtscsi_request_vq); if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE) return; sc->vtscsi_stats.scsi_cmd_timeouts++; to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT; if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0) return; vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n"); vtscsi_reset_bus(sc); } static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp) { cam_status status; switch (cmd_resp->response) { case VIRTIO_SCSI_S_OK: status = CAM_REQ_CMP; break; case VIRTIO_SCSI_S_OVERRUN: status = CAM_DATA_RUN_ERR; break; case VIRTIO_SCSI_S_ABORTED: status = CAM_REQ_ABORTED; break; case VIRTIO_SCSI_S_BAD_TARGET: status = CAM_TID_INVALID; break; case VIRTIO_SCSI_S_RESET: status = CAM_SCSI_BUS_RESET; break; case VIRTIO_SCSI_S_BUSY: status = CAM_SCSI_BUSY; break; case VIRTIO_SCSI_S_TRANSPORT_FAILURE: case VIRTIO_SCSI_S_TARGET_FAILURE: case VIRTIO_SCSI_S_NEXUS_FAILURE: status = CAM_SCSI_IT_NEXUS_LOST; break; default: /* VIRTIO_SCSI_S_FAILURE */ status = CAM_REQ_CMP_ERR; break; } return (status); } static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc, struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp) { cam_status status; csio->scsi_status = cmd_resp->status; csio->resid = cmd_resp->resid; if (csio->scsi_status == SCSI_STATUS_OK) status = CAM_REQ_CMP; else status = CAM_SCSI_STATUS_ERROR; if (cmd_resp->sense_len > 0) { status |= CAM_AUTOSNS_VALID; if (cmd_resp->sense_len < csio->sense_len) csio->sense_resid = csio->sense_len - cmd_resp->sense_len; else csio->sense_resid = 0; bzero(&csio->sense_data, sizeof(csio->sense_data)); memcpy(cmd_resp->sense, &csio->sense_data, csio->sense_len - csio->sense_resid); } vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR, "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n", csio, csio->scsi_status, csio->resid, csio->sense_resid); return (status); } static void vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { struct ccb_hdr *ccbh; struct ccb_scsiio *csio; struct virtio_scsi_cmd_resp *cmd_resp; cam_status status; csio = &req->vsr_ccb->csio; ccbh = &csio->ccb_h; cmd_resp = &req->vsr_cmd_resp; KASSERT(ccbh->ccbh_vtscsi_req == req, ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req)); if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) callout_stop(&req->vsr_callout); status = vtscsi_scsi_cmd_cam_status(cmd_resp); if (status == CAM_REQ_ABORTED) { if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT) status = CAM_CMD_TIMEOUT; } else if (status == CAM_REQ_CMP) status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp); if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccbh->path, 1); } if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0) status |= CAM_RELEASE_SIMQ; vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n", req, ccbh, status); ccbh->status = status; xpt_done(req->vsr_ccb); vtscsi_enqueue_request(sc, req); } static void vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req) { /* XXX We probably shouldn't poll forever. */ req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED; do vtscsi_complete_vq(sc, sc->vtscsi_control_vq); while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0); req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED; } static int vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req, struct sglist *sg, int readable, int writable, int flag) { struct virtqueue *vq; int error; vq = sc->vtscsi_control_vq; MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL); error = virtqueue_enqueue(vq, req, sg, readable, writable); if (error) { /* * Return EAGAIN when the virtqueue does not have enough * descriptors available. */ if (error == ENOSPC || error == EMSGSIZE) error = EAGAIN; return (error); } virtqueue_notify(vq); if (flag == VTSCSI_EXECUTE_POLL) vtscsi_poll_ctrl_req(sc, req); return (0); } static void vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { union ccb *ccb; struct ccb_hdr *ccbh; struct virtio_scsi_ctrl_tmf_resp *tmf_resp; ccb = req->vsr_ccb; ccbh = &ccb->ccb_h; tmf_resp = &req->vsr_tmf_resp; switch (tmf_resp->response) { case VIRTIO_SCSI_S_FUNCTION_COMPLETE: ccbh->status = CAM_REQ_CMP; break; case VIRTIO_SCSI_S_FUNCTION_REJECTED: ccbh->status = CAM_UA_ABORT; break; default: ccbh->status = CAM_REQ_CMP_ERR; break; } xpt_done(ccb); vtscsi_enqueue_request(sc, req); } static int vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { struct sglist *sg; struct ccb_abort *cab; struct ccb_hdr *ccbh; struct ccb_hdr *abort_ccbh; struct vtscsi_request *abort_req; struct virtio_scsi_ctrl_tmf_req *tmf_req; struct virtio_scsi_ctrl_tmf_resp *tmf_resp; int error; sg = sc->vtscsi_sglist; cab = &req->vsr_ccb->cab; ccbh = &cab->ccb_h; tmf_req = &req->vsr_tmf_req; tmf_resp = &req->vsr_tmf_resp; /* CCB header and request that's to be aborted. */ abort_ccbh = &cab->abort_ccb->ccb_h; abort_req = abort_ccbh->ccbh_vtscsi_req; if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) { error = EINVAL; goto fail; } /* Only attempt to abort requests that could be in-flight. */ if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) { error = EALREADY; goto fail; } abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED; if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) callout_stop(&abort_req->vsr_callout); vtscsi_init_ctrl_tmf_req(ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK, (uintptr_t) abort_ccbh, tmf_req); sglist_reset(sg); sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); req->vsr_complete = vtscsi_complete_abort_task_cmd; tmf_resp->response = -1; error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, VTSCSI_EXECUTE_ASYNC); fail: vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p " "abort_req=%p\n", error, req, abort_ccbh, abort_req); return (error); } static void vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { union ccb *ccb; struct ccb_hdr *ccbh; struct virtio_scsi_ctrl_tmf_resp *tmf_resp; ccb = req->vsr_ccb; ccbh = &ccb->ccb_h; tmf_resp = &req->vsr_tmf_resp; vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n", req, ccb, tmf_resp->response); if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) { ccbh->status = CAM_REQ_CMP; vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id, ccbh->target_lun); } else ccbh->status = CAM_REQ_CMP_ERR; xpt_done(ccb); vtscsi_enqueue_request(sc, req); } static int vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { struct sglist *sg; struct ccb_resetdev *crd; struct ccb_hdr *ccbh; struct virtio_scsi_ctrl_tmf_req *tmf_req; struct virtio_scsi_ctrl_tmf_resp *tmf_resp; uint32_t subtype; int error; sg = sc->vtscsi_sglist; crd = &req->vsr_ccb->crd; ccbh = &crd->ccb_h; tmf_req = &req->vsr_tmf_req; tmf_resp = &req->vsr_tmf_resp; if (ccbh->target_lun == CAM_LUN_WILDCARD) subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET; else subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET; vtscsi_init_ctrl_tmf_req(ccbh, subtype, 0, tmf_req); sglist_reset(sg); sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); req->vsr_complete = vtscsi_complete_reset_dev_cmd; tmf_resp->response = -1; error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, VTSCSI_EXECUTE_ASYNC); vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n", error, req, ccbh); return (error); } static void vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id) { *target_id = lun[1]; *lun_id = (lun[2] << 8) | lun[3]; } static void vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[]) { lun[0] = 1; lun[1] = ccbh->target_id; lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F); lun[3] = (ccbh->target_lun >> 8) & 0xFF; } static void vtscsi_init_scsi_cmd_req(struct ccb_scsiio *csio, struct virtio_scsi_cmd_req *cmd_req) { uint8_t attr; switch (csio->tag_action) { case MSG_HEAD_OF_Q_TAG: attr = VIRTIO_SCSI_S_HEAD; break; case MSG_ORDERED_Q_TAG: attr = VIRTIO_SCSI_S_ORDERED; break; case MSG_ACA_TASK: attr = VIRTIO_SCSI_S_ACA; break; default: /* MSG_SIMPLE_Q_TAG */ attr = VIRTIO_SCSI_S_SIMPLE; break; } vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun); cmd_req->tag = (uintptr_t) csio; cmd_req->task_attr = attr; memcpy(cmd_req->cdb, csio->ccb_h.flags & CAM_CDB_POINTER ? csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes, csio->cdb_len); } static void vtscsi_init_ctrl_tmf_req(struct ccb_hdr *ccbh, uint32_t subtype, uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req) { vtscsi_set_request_lun(ccbh, tmf_req->lun); tmf_req->type = VIRTIO_SCSI_T_TMF; tmf_req->subtype = subtype; tmf_req->tag = tag; } static void vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason) { int frozen; frozen = sc->vtscsi_frozen; if (reason & VTSCSI_REQUEST && (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0) sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS; if (reason & VTSCSI_REQUEST_VQ && (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0) sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL; /* Freeze the SIMQ if transitioned to frozen. */ if (frozen == 0 && sc->vtscsi_frozen != 0) { vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n"); xpt_freeze_simq(sc->vtscsi_sim, 1); } } static int vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason) { int thawed; if (sc->vtscsi_frozen == 0 || reason == 0) return (0); if (reason & VTSCSI_REQUEST && sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS; if (reason & VTSCSI_REQUEST_VQ && sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL; thawed = sc->vtscsi_frozen == 0; if (thawed != 0) vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n"); return (thawed); } static void vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code, target_id_t target_id, lun_id_t lun_id) { struct cam_path *path; xpt_async(ac_code, sc->vtscsi_path, NULL); return; /* Use the wildcard path from our softc for bus announcements. */ if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) { xpt_async(ac_code, sc->vtscsi_path, NULL); return; } if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim), target_id, lun_id) != CAM_REQ_CMP) { vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n"); return; } xpt_async(ac_code, path, NULL); xpt_free_path(path); } static void vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id, lun_id_t lun_id) { union ccb *ccb; cam_status status; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n"); return; } status = xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sc->vtscsi_sim), target_id, lun_id); if (status != CAM_REQ_CMP) { xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } static void vtscsi_execute_rescan_bus(struct vtscsi_softc *sc) { vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); } static void vtscsi_transport_reset_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event) { target_id_t target_id; lun_id_t lun_id; vtscsi_get_request_lun(event->lun, &target_id, &lun_id); switch (event->reason) { case VIRTIO_SCSI_EVT_RESET_RESCAN: case VIRTIO_SCSI_EVT_RESET_REMOVED: vtscsi_execute_rescan(sc, target_id, lun_id); break; default: device_printf(sc->vtscsi_dev, "unhandled transport event reason: %d\n", event->reason); break; } } static void vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event) { int error; if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) { switch (event->event) { case VIRTIO_SCSI_T_TRANSPORT_RESET: vtscsi_transport_reset_event(sc, event); break; default: device_printf(sc->vtscsi_dev, "unhandled event: %d\n", event->event); break; } } else vtscsi_execute_rescan_bus(sc); /* * This should always be successful since the buffer * was just dequeued. */ error = vtscsi_enqueue_event_buf(sc, event); KASSERT(error == 0, ("cannot requeue event buffer: %d", error)); } static int vtscsi_enqueue_event_buf(struct vtscsi_softc *sc, struct virtio_scsi_event *event) { struct sglist *sg; struct virtqueue *vq; int size, error; sg = sc->vtscsi_sglist; vq = sc->vtscsi_event_vq; size = sc->vtscsi_event_buf_size; bzero(event, size); sglist_reset(sg); error = sglist_append(sg, event, size); if (error) return (error); error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg); if (error) return (error); virtqueue_notify(vq); return (0); } static int vtscsi_init_event_vq(struct vtscsi_softc *sc) { struct virtio_scsi_event *event; int i, size, error; /* * The first release of QEMU with VirtIO SCSI support would crash * when attempting to notify the event virtqueue. This was fixed * when hotplug support was added. */ if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) size = sc->vtscsi_event_buf_size; else size = 0; if (size < sizeof(struct virtio_scsi_event)) return (0); for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) { event = &sc->vtscsi_event_bufs[i]; error = vtscsi_enqueue_event_buf(sc, event); if (error) break; } /* * Even just one buffer is enough. Missed events are * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag. */ if (i > 0) error = 0; return (error); } static void vtscsi_reinit_event_vq(struct vtscsi_softc *sc) { struct virtio_scsi_event *event; int i, error; if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 || sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event)) return; for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) { event = &sc->vtscsi_event_bufs[i]; error = vtscsi_enqueue_event_buf(sc, event); if (error) break; } KASSERT(i > 0, ("cannot reinit event vq: %d", error)); } static void vtscsi_drain_event_vq(struct vtscsi_softc *sc) { struct virtqueue *vq; int last; vq = sc->vtscsi_event_vq; last = 0; while (virtqueue_drain(vq, &last) != NULL) ; KASSERT(virtqueue_empty(vq), ("eventvq not empty")); } static void vtscsi_complete_vqs_locked(struct vtscsi_softc *sc) { VTSCSI_LOCK_OWNED(sc); if (sc->vtscsi_request_vq != NULL) vtscsi_complete_vq(sc, sc->vtscsi_request_vq); if (sc->vtscsi_control_vq != NULL) vtscsi_complete_vq(sc, sc->vtscsi_control_vq); } static void vtscsi_complete_vqs(struct vtscsi_softc *sc) { VTSCSI_LOCK(sc); vtscsi_complete_vqs_locked(sc); VTSCSI_UNLOCK(sc); } static void vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req) { union ccb *ccb; int detach; ccb = req->vsr_ccb; vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb); /* * The callout must be drained when detaching since the request is * about to be freed. The VTSCSI_MTX must not be held for this in * case the callout is pending because there is a deadlock potential. * Otherwise, the virtqueue is being drained because of a bus reset * so we only need to attempt to stop the callouts. */ detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0; if (detach != 0) VTSCSI_LOCK_NOTOWNED(sc); else VTSCSI_LOCK_OWNED(sc); if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) { if (detach != 0) callout_drain(&req->vsr_callout); else callout_stop(&req->vsr_callout); } if (ccb != NULL) { if (detach != 0) { VTSCSI_LOCK(sc); ccb->ccb_h.status = CAM_NO_HBA; } else ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); if (detach != 0) VTSCSI_UNLOCK(sc); } vtscsi_enqueue_request(sc, req); } static void vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq) { struct vtscsi_request *req; int last; last = 0; vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq); while ((req = virtqueue_drain(vq, &last)) != NULL) vtscsi_cancel_request(sc, req); KASSERT(virtqueue_empty(vq), ("virtqueue not empty")); } static void vtscsi_drain_vqs(struct vtscsi_softc *sc) { if (sc->vtscsi_control_vq != NULL) vtscsi_drain_vq(sc, sc->vtscsi_control_vq); if (sc->vtscsi_request_vq != NULL) vtscsi_drain_vq(sc, sc->vtscsi_request_vq); if (sc->vtscsi_event_vq != NULL) vtscsi_drain_event_vq(sc); } static void vtscsi_stop(struct vtscsi_softc *sc) { vtscsi_disable_vqs_intr(sc); virtio_stop(sc->vtscsi_dev); } static int vtscsi_reset_bus(struct vtscsi_softc *sc) { int error; VTSCSI_LOCK_OWNED(sc); if (vtscsi_bus_reset_disable != 0) { device_printf(sc->vtscsi_dev, "bus reset disabled\n"); return (0); } sc->vtscsi_flags |= VTSCSI_FLAG_RESET; /* * vtscsi_stop() will cause the in-flight requests to be canceled. * Those requests are then completed here so CAM will retry them * after the reset is complete. */ vtscsi_stop(sc); vtscsi_complete_vqs_locked(sc); /* Rid the virtqueues of any remaining requests. */ vtscsi_drain_vqs(sc); /* * Any resource shortage that froze the SIMQ cannot persist across * a bus reset so ensure it gets thawed here. */ if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0) xpt_release_simq(sc->vtscsi_sim, 0); error = vtscsi_reinit(sc); if (error) { device_printf(sc->vtscsi_dev, "reinitialization failed, stopping device...\n"); vtscsi_stop(sc); } else vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET; return (error); } static void vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req) { #ifdef INVARIANTS int req_nsegs, resp_nsegs; req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq)); resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp)); KASSERT(req_nsegs == 1, ("request crossed page boundary")); KASSERT(resp_nsegs == 1, ("response crossed page boundary")); #endif req->vsr_softc = sc; callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0); } static int vtscsi_alloc_requests(struct vtscsi_softc *sc) { struct vtscsi_request *req; int i, nreqs; /* * Commands destined for either the request or control queues come * from the same SIM queue. Use the size of the request virtqueue * as it (should) be much more frequently used. Some additional * requests are allocated for internal (TMF) use. */ nreqs = virtqueue_size(sc->vtscsi_request_vq); if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) nreqs /= VTSCSI_MIN_SEGMENTS; nreqs += VTSCSI_RESERVED_REQUESTS; for (i = 0; i < nreqs; i++) { req = malloc(sizeof(struct vtscsi_request), M_DEVBUF, M_NOWAIT); if (req == NULL) return (ENOMEM); vtscsi_init_request(sc, req); sc->vtscsi_nrequests++; vtscsi_enqueue_request(sc, req); } return (0); } static void vtscsi_free_requests(struct vtscsi_softc *sc) { struct vtscsi_request *req; while ((req = vtscsi_dequeue_request(sc)) != NULL) { KASSERT(callout_active(&req->vsr_callout) == 0, ("request callout still active")); sc->vtscsi_nrequests--; free(req, M_DEVBUF); } KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d", sc->vtscsi_nrequests)); } static void vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req) { KASSERT(req->vsr_softc == sc, ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc)); vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req); /* A request is available so the SIMQ could be released. */ if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0) xpt_release_simq(sc->vtscsi_sim, 1); req->vsr_ccb = NULL; req->vsr_complete = NULL; req->vsr_ptr0 = NULL; req->vsr_state = VTSCSI_REQ_STATE_FREE; req->vsr_flags = 0; bzero(&req->vsr_ureq, sizeof(req->vsr_ureq)); bzero(&req->vsr_uresp, sizeof(req->vsr_uresp)); /* * We insert at the tail of the queue in order to make it * very unlikely a request will be reused if we race with * stopping its callout handler. */ TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link); } static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *sc) { struct vtscsi_request *req; req = TAILQ_FIRST(&sc->vtscsi_req_free); if (req != NULL) { req->vsr_state = VTSCSI_REQ_STATE_INUSE; TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link); } else sc->vtscsi_stats.dequeue_no_requests++; vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req); return (req); } static void vtscsi_complete_request(struct vtscsi_request *req) { if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED) req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE; if (req->vsr_complete != NULL) req->vsr_complete(req->vsr_softc, req); } static void vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq) { struct vtscsi_request *req; VTSCSI_LOCK_OWNED(sc); while ((req = virtqueue_dequeue(vq, NULL)) != NULL) vtscsi_complete_request(req); } static void vtscsi_control_vq_task(void *arg, int pending) { struct vtscsi_softc *sc; struct virtqueue *vq; sc = arg; vq = sc->vtscsi_control_vq; VTSCSI_LOCK(sc); vtscsi_complete_vq(sc, sc->vtscsi_control_vq); if (virtqueue_enable_intr(vq) != 0) { virtqueue_disable_intr(vq); VTSCSI_UNLOCK(sc); taskqueue_enqueue_fast(sc->vtscsi_tq, &sc->vtscsi_control_intr_task); return; } VTSCSI_UNLOCK(sc); } static void vtscsi_event_vq_task(void *arg, int pending) { struct vtscsi_softc *sc; struct virtqueue *vq; struct virtio_scsi_event *event; sc = arg; vq = sc->vtscsi_event_vq; VTSCSI_LOCK(sc); while ((event = virtqueue_dequeue(vq, NULL)) != NULL) vtscsi_handle_event(sc, event); if (virtqueue_enable_intr(vq) != 0) { virtqueue_disable_intr(vq); VTSCSI_UNLOCK(sc); taskqueue_enqueue_fast(sc->vtscsi_tq, &sc->vtscsi_control_intr_task); return; } VTSCSI_UNLOCK(sc); } static void vtscsi_request_vq_task(void *arg, int pending) { struct vtscsi_softc *sc; struct virtqueue *vq; sc = arg; vq = sc->vtscsi_request_vq; VTSCSI_LOCK(sc); vtscsi_complete_vq(sc, sc->vtscsi_request_vq); if (virtqueue_enable_intr(vq) != 0) { virtqueue_disable_intr(vq); VTSCSI_UNLOCK(sc); taskqueue_enqueue_fast(sc->vtscsi_tq, &sc->vtscsi_request_intr_task); return; } VTSCSI_UNLOCK(sc); } static int vtscsi_control_vq_intr(void *xsc) { struct vtscsi_softc *sc; sc = xsc; virtqueue_disable_intr(sc->vtscsi_control_vq); taskqueue_enqueue_fast(sc->vtscsi_tq, &sc->vtscsi_control_intr_task); return (1); } static int vtscsi_event_vq_intr(void *xsc) { struct vtscsi_softc *sc; sc = xsc; virtqueue_disable_intr(sc->vtscsi_event_vq); taskqueue_enqueue_fast(sc->vtscsi_tq, &sc->vtscsi_event_intr_task); return (1); } static int vtscsi_request_vq_intr(void *xsc) { struct vtscsi_softc *sc; sc = xsc; virtqueue_disable_intr(sc->vtscsi_request_vq); taskqueue_enqueue_fast(sc->vtscsi_tq, &sc->vtscsi_request_intr_task); return (1); } static void vtscsi_disable_vqs_intr(struct vtscsi_softc *sc) { virtqueue_disable_intr(sc->vtscsi_control_vq); virtqueue_disable_intr(sc->vtscsi_event_vq); virtqueue_disable_intr(sc->vtscsi_request_vq); } static void vtscsi_enable_vqs_intr(struct vtscsi_softc *sc) { virtqueue_enable_intr(sc->vtscsi_control_vq); virtqueue_enable_intr(sc->vtscsi_event_vq); virtqueue_enable_intr(sc->vtscsi_request_vq); } static void vtscsi_get_tunables(struct vtscsi_softc *sc) { char tmpstr[64]; TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug); snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level", device_get_unit(sc->vtscsi_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug); } static void vtscsi_add_sysctl(struct vtscsi_softc *sc) { device_t dev; struct vtscsi_statistics *stats; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; dev = sc->vtscsi_dev; stats = &sc->vtscsi_stats; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level", CTLFLAG_RW, &sc->vtscsi_debug, 0, "Debug level"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts", CTLFLAG_RD, &stats->scsi_cmd_timeouts, "SCSI command timeouts"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests", CTLFLAG_RD, &stats->dequeue_no_requests, "No available requests to dequeue"); } static void vtscsi_printf_req(struct vtscsi_request *req, const char *func, const char *fmt, ...) { struct vtscsi_softc *sc; union ccb *ccb; struct sbuf sb; va_list ap; char str[192]; char path_str[64]; if (req == NULL) return; sc = req->vsr_softc; ccb = req->vsr_ccb; va_start(ap, fmt); sbuf_new(&sb, str, sizeof(str), 0); if (ccb == NULL) { sbuf_printf(&sb, "(noperiph:%s%d:%u): ", cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim), cam_sim_bus(sc->vtscsi_sim)); } else { xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str)); sbuf_cat(&sb, path_str); if (ccb->ccb_h.func_code == XPT_SCSI_IO) { scsi_command_string(&ccb->csio, &sb); sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len); } } sbuf_vprintf(&sb, fmt, ap); va_end(ap); sbuf_finish(&sb); printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func, sbuf_data(&sb)); } Index: projects/physbio/sys/dev/wds/wd7000.c =================================================================== --- projects/physbio/sys/dev/wds/wd7000.c (revision 244035) +++ projects/physbio/sys/dev/wds/wd7000.c (revision 244036) @@ -1,1443 +1,1443 @@ /*- * Copyright (c) 1994 Ludd, University of Lule}, Sweden. * Copyright (c) 2000 Sergey A. Babkin * All rights reserved. * * Written by Olof Johansson (offe@ludd.luth.se) 1995. * Based on code written by Theo de Raadt (deraadt@fsa.ca). * Resurrected, ported to CAM and generally cleaned up by Sergey Babkin * or . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed at Ludd, University of Lule} * and by the FreeBSD project. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* All bugs are subject to removal without further notice */ /* * offe 01/07/95 * * This version of the driver _still_ doesn't implement scatter/gather for the * WD7000-FASST2. This is due to the fact that my controller doesn't seem to * support it. That, and the lack of documentation makes it impossible for me * to implement it. What I've done instead is allocated a local buffer, * contiguous buffer big enough to handle the requests. I haven't seen any * read/write bigger than 64k, so I allocate a buffer of 64+16k. The data * that needs to be DMA'd to/from the controller is copied to/from that * buffer before/after the command is sent to the card. * * SB 03/30/00 * * An intermediate buffer is needed anyway to make sure that the buffer is * located under 16MB, otherwise it's out of reach of ISA cards. I've added * optimizations to allocate space in buffer in fragments. */ /* * Jumpers: (see The Ref(TM) for more info) * W1/W2 - interrupt selection: * W1 (1-2) IRQ3, (3-4) IRQ4, (5-6) IRQ5, (7-8) IRQ7, (9-10) IRQ9 * W2 (21-22) IRQ10, (19-20) IRQ11, (17-18) IRQ12, (15-16) IRQ14, (13-14) IRQ15 * * W2 - DRQ/DACK selection, DRQ and DACK must be the same: * (5-6) DRQ5 (11-12) DACK5 * (3-4) DRQ6 (9-10) DACK6 * (1-2) DRQ7 (7-8) DACK7 * * W3 - I/O address selection: open pair of pins (OFF) means 1, jumpered (ON) means 0 * pair (1-2) is bit 3, ..., pair (9-10) is bit 7. All the other bits are equal * to the value 0x300. In bitwise representation that would be: * 0 0 1 1 (9-10) (7-8) (5-6) (3-4) (1-2) 0 0 0 * For example, address 0x3C0, bitwise 1111000000 will be represented as: * (9-10) OFF, (7-8) OFF, (5-6) ON, (3-4) ON, (1-2) ON * * W4 - BIOS address: open pair of pins (OFF) means 1, jumpered (ON) means 0 * pair (1-2) is bit 13, ..., pair (7-8) is bit 16. All the other bits are * equal to the value 0xC0000. In bitwise representation that would be: * 1 1 0 (7-8) (5-6) (3-4) (1-2) 0 0000 0000 0000 * For example, address 0xD8000 will be represented as: * (7-8) OFF, (5-6) OFF, (3-4) ON, (1-2) ON * * W98 (on newer cards) - BIOS enabled; on older cards just remove the BIOS * chip to disable it * W99 (on newer cards) - ROM size (1-2) OFF, (3-4) ON * * W5 - terminator power * ON - host supplies term. power * OFF - target supplies term. power * * W6, W9 - floppy support (a bit cryptic): * W6 ON, W9 ON - disabled * W6 OFF, W9 ON - enabled with HardCard only * W6 OFF, W9 OFF - enabled with no hardCard or Combo * * Default: I/O 0x350, IRQ15, DMA6 */ /* * debugging levels: * 0 - disabled * 1 - print debugging messages * 2 - collect debugging messages in an internal log buffer which can be * printed later by calling wds_printlog from DDB * * Both kind of logs are heavy and interact significantly with the timing * of commands, so the observed problems may become invisible if debug * logging is enabled. * * The light-weight logging facility may be enabled by defining * WDS_ENABLE_SMALLOG as 1. It has very little overhead and allows observing * the traces of various race conditions without affectiong them but the log is * quite terse. The small log can be printer from DDB by calling * wds_printsmallog. */ #ifndef WDS_DEBUG #define WDS_DEBUG 0 #endif #ifndef WDS_ENABLE_SMALLOG #define WDS_ENABLE_SMALLOG 0 #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define WDSTOPHYS(wp, a) ( ((u_long)a) - ((u_long)wp->dx) + ((u_long)wp->dx_p) ) #define WDSTOVIRT(wp, a) ( ((char *)a) - ((char*)wp->dx_p) + ((char *)wp->dx) ) /* 0x10000 (64k) should be enough. But just to be sure... */ #define BUFSIZ 0x12000 /* buffer fragment size, no more than 32 frags per buffer */ #define FRAGSIZ 0x1000 /* WD7000 registers */ #define WDS_STAT 0 /* read */ #define WDS_IRQSTAT 1 /* read */ #define WDS_CMD 0 /* write */ #define WDS_IRQACK 1 /* write */ #define WDS_HCR 2 /* write */ #define WDS_NPORTS 4 /* number of ports used */ /* WDS_STAT (read) defs */ #define WDS_IRQ 0x80 #define WDS_RDY 0x40 #define WDS_REJ 0x20 #define WDS_INIT 0x10 /* WDS_IRQSTAT (read) defs */ #define WDSI_MASK 0xc0 #define WDSI_ERR 0x00 #define WDSI_MFREE 0x80 #define WDSI_MSVC 0xc0 /* WDS_CMD (write) defs */ #define WDSC_NOOP 0x00 #define WDSC_INIT 0x01 #define WDSC_DISUNSOL 0x02 /* disable unsolicited ints */ #define WDSC_ENAUNSOL 0x03 /* enable unsolicited ints */ #define WDSC_IRQMFREE 0x04 /* interrupt on free RQM */ #define WDSC_SCSIRESETSOFT 0x05 /* soft reset */ #define WDSC_SCSIRESETHARD 0x06 /* hard reset ack */ #define WDSC_MSTART(m) (0x80 + (m)) /* start mailbox */ #define WDSC_MMSTART(m) (0xc0 + (m)) /* start all mailboxes */ /* WDS_HCR (write) defs */ #define WDSH_IRQEN 0x08 #define WDSH_DRQEN 0x04 #define WDSH_SCSIRESET 0x02 #define WDSH_ASCRESET 0x01 struct wds_cmd { u_int8_t cmd; u_int8_t targ; u_int8_t scb[12]; u_int8_t stat; u_int8_t venderr; u_int8_t len[3]; u_int8_t data[3]; u_int8_t next[3]; u_int8_t write; u_int8_t xx[6]; }; struct wds_req { struct wds_cmd cmd; union ccb *ccb; enum { WR_DONE = 0x01, WR_SENSE = 0x02 } flags; u_int8_t *buf; /* address of linear data buffer */ u_int32_t mask; /* mask of allocated fragments */ u_int8_t ombn; u_int8_t id; /* number of request */ }; #define WDSX_SCSICMD 0x00 #define WDSX_OPEN_RCVBUF 0x80 #define WDSX_RCV_CMD 0x81 #define WDSX_RCV_DATA 0x82 #define WDSX_RCV_DATASTAT 0x83 #define WDSX_SND_DATA 0x84 #define WDSX_SND_DATASTAT 0x85 #define WDSX_SND_CMDSTAT 0x86 #define WDSX_READINIT 0x88 #define WDSX_READSCSIID 0x89 #define WDSX_SETUNSOLIRQMASK 0x8a #define WDSX_GETUNSOLIRQMASK 0x8b #define WDSX_GETFIRMREV 0x8c #define WDSX_EXECDIAG 0x8d #define WDSX_SETEXECPARM 0x8e #define WDSX_GETEXECPARM 0x8f struct wds_mb { u_int8_t stat; u_int8_t addr[3]; }; /* ICMB status value */ #define ICMB_OK 0x01 #define ICMB_OKERR 0x02 #define ICMB_ETIME 0x04 #define ICMB_ERESET 0x05 #define ICMB_ETARCMD 0x06 #define ICMB_ERESEL 0x80 #define ICMB_ESEL 0x81 #define ICMB_EABORT 0x82 #define ICMB_ESRESET 0x83 #define ICMB_EHRESET 0x84 struct wds_setup { u_int8_t cmd; u_int8_t scsi_id; u_int8_t buson_t; u_int8_t busoff_t; u_int8_t xx; u_int8_t mbaddr[3]; u_int8_t nomb; u_int8_t nimb; }; /* the code depends on equality of these parameters */ #define MAXSIMUL 8 #define WDS_NOMB MAXSIMUL #define WDS_NIMB MAXSIMUL static int fragsiz; static int nfrags; /* structure for data exchange with controller */ struct wdsdx { struct wds_req req[MAXSIMUL]; struct wds_mb ombs[MAXSIMUL]; struct wds_mb imbs[MAXSIMUL]; u_int8_t data[BUFSIZ]; }; /* structure softc */ struct wds { device_t dev; int unit; int addr; int drq; struct cam_sim *sim; /* SIM descriptor for this card */ struct cam_path *path; /* wildcard path for this card */ char want_wdsr; /* resource shortage flag */ u_int32_t data_free; u_int32_t wdsr_free; struct wdsdx *dx; struct wdsdx *dx_p; /* physical address */ struct resource *port_r; int port_rid; struct resource *drq_r; int drq_rid; struct resource *intr_r; int intr_rid; void *intr_cookie; bus_dma_tag_t bustag; bus_dmamap_t busmap; }; #define ccb_wdsr spriv_ptr1 /* for wds request */ static int wds_probe(device_t dev); static int wds_attach(device_t dev); static void wds_intr(struct wds *wp); static void wds_action(struct cam_sim * sim, union ccb * ccb); static void wds_poll(struct cam_sim * sim); static int wds_preinit(struct wds *wp); static int wds_init(struct wds *wp); static void wds_alloc_callback(void *arg, bus_dma_segment_t *seg, int nseg, int error); static void wds_free_resources(struct wds *wp); static struct wds_req *wdsr_alloc(struct wds *wp); static void wds_scsi_io(struct cam_sim * sim, struct ccb_scsiio * csio); static void wdsr_ccb_done(struct wds *wp, struct wds_req *r, union ccb *ccb, u_int32_t status); static void wds_done(struct wds *wp, struct wds_req *r, u_int8_t stat); static int wds_runsense(struct wds *wp, struct wds_req *r); static int wds_getvers(struct wds *wp); static int wds_cmd(int base, u_int8_t * p, int l); static void wds_wait(int reg, int mask, int val); static struct wds_req *cmdtovirt(struct wds *wp, u_int32_t phys); static u_int32_t frag_alloc(struct wds *wp, int size, u_int8_t **res, u_int32_t *maskp); static void frag_free(struct wds *wp, u_int32_t mask); void wds_print(void); #if WDS_ENABLE_SMALLOG==1 static __inline void smallog(char c); void wds_printsmallog(void); #endif /* SMALLOG */ /* SCSI ID of the adapter itself */ #ifndef WDS_HBA_ID #define WDS_HBA_ID 7 #endif #if WDS_DEBUG == 2 #define LOGLINESIZ 81 #define NLOGLINES 300 #define DBX wds_nextlog(), LOGLINESIZ, #define DBG snprintf static char wds_log[NLOGLINES][LOGLINESIZ]; static int logwrite = 0, logread = 0; static char *wds_nextlog(void); void wds_printlog(void); #elif WDS_DEBUG != 0 #define DBX #define DBG printf #else #define DBX #define DBG if(0) printf #endif /* the table of supported bus methods */ static device_method_t wds_isa_methods[] = { DEVMETHOD(device_probe, wds_probe), DEVMETHOD(device_attach, wds_attach), { 0, 0 } }; static driver_t wds_isa_driver = { "wds", wds_isa_methods, sizeof(struct wds), }; static devclass_t wds_devclass; DRIVER_MODULE(wds, isa, wds_isa_driver, wds_devclass, 0, 0); MODULE_DEPEND(wds, isa, 1, 1, 1); MODULE_DEPEND(wds, cam, 1, 1, 1); #if WDS_ENABLE_SMALLOG==1 #define SMALLOGSIZ 512 static char wds_smallog[SMALLOGSIZ]; static char *wds_smallogp = wds_smallog; static char wds_smallogover = 0; static __inline void smallog(char c) { *wds_smallogp = c; if (++wds_smallogp == &wds_smallog[SMALLOGSIZ]) { wds_smallogp = wds_smallog; wds_smallogover = 1; } } #define smallog2(a, b) (smallog(a), smallog(b)) #define smallog3(a, b, c) (smallog(a), smallog(b), smallog(c)) #define smallog4(a, b, c, d) (smallog(a),smallog(b),smallog(c),smallog(d)) void wds_printsmallog(void) { int i; char *p; printf("wds: "); p = wds_smallogover ? wds_smallogp : wds_smallog; i = 0; do { printf("%c", *p); if (++p == &wds_smallog[SMALLOGSIZ]) p = wds_smallog; if (++i == 70) { i = 0; printf("\nwds: "); } } while (p != wds_smallogp); printf("\n"); } #else #define smallog(a) #define smallog2(a, b) #define smallog3(a, b, c) #define smallog4(a, b, c, d) #endif /* SMALLOG */ static int wds_probe(device_t dev) { struct wds *wp; int error = 0; int irq; /* No pnp support */ if (isa_get_vendorid(dev)) return (ENXIO); wp = (struct wds *) device_get_softc(dev); wp->unit = device_get_unit(dev); wp->dev = dev; wp->addr = bus_get_resource_start(dev, SYS_RES_IOPORT, 0 /*rid*/); if (wp->addr == 0 || wp->addr <0x300 || wp->addr > 0x3f8 || wp->addr & 0x7) { device_printf(dev, "invalid port address 0x%x\n", wp->addr); return (ENXIO); } if (bus_set_resource(dev, SYS_RES_IOPORT, 0, wp->addr, WDS_NPORTS) < 0) return (ENXIO); /* get the DRQ */ wp->drq = bus_get_resource_start(dev, SYS_RES_DRQ, 0 /*rid*/); if (wp->drq < 5 || wp->drq > 7) { device_printf(dev, "invalid DRQ %d\n", wp->drq); return (ENXIO); } /* get the IRQ */ irq = bus_get_resource_start(dev, SYS_RES_IRQ, 0 /*rid*/); if (irq < 3) { device_printf(dev, "invalid IRQ %d\n", irq); return (ENXIO); } wp->port_rid = 0; wp->port_r = bus_alloc_resource(dev, SYS_RES_IOPORT, &wp->port_rid, /*start*/ 0, /*end*/ ~0, /*count*/ 0, RF_ACTIVE); if (wp->port_r == NULL) return (ENXIO); error = wds_preinit(wp); /* * We cannot hold resources between probe and * attach as we may never be attached. */ wds_free_resources(wp); return (error); } static int wds_attach(device_t dev) { struct wds *wp; struct cam_devq *devq; struct cam_sim *sim; struct cam_path *pathp; int i; int error = 0; wp = (struct wds *)device_get_softc(dev); wp->port_rid = 0; wp->port_r = bus_alloc_resource(dev, SYS_RES_IOPORT, &wp->port_rid, /*start*/ 0, /*end*/ ~0, /*count*/ 0, RF_ACTIVE); if (wp->port_r == NULL) return (ENXIO); /* We must now release resources on error. */ wp->drq_rid = 0; wp->drq_r = bus_alloc_resource(dev, SYS_RES_DRQ, &wp->drq_rid, /*start*/ 0, /*end*/ ~0, /*count*/ 0, RF_ACTIVE); if (wp->drq_r == NULL) goto bad; wp->intr_rid = 0; wp->intr_r = bus_alloc_resource(dev, SYS_RES_IRQ, &wp->intr_rid, /*start*/ 0, /*end*/ ~0, /*count*/ 0, RF_ACTIVE); if (wp->intr_r == NULL) goto bad; error = bus_setup_intr(dev, wp->intr_r, INTR_TYPE_CAM | INTR_ENTROPY, NULL, (driver_intr_t *)wds_intr, (void *)wp, &wp->intr_cookie); if (error) goto bad; /* now create the memory buffer */ error = bus_dma_tag_create(bus_get_dma_tag(dev), /*alignment*/4, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_24BIT, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ sizeof(* wp->dx), /*nsegments*/ 1, /*maxsegsz*/ sizeof(* wp->dx), /*flags*/ 0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &wp->bustag); if (error) goto bad; error = bus_dmamem_alloc(wp->bustag, (void **)&wp->dx, /*flags*/ 0, &wp->busmap); if (error) goto bad; bus_dmamap_load(wp->bustag, wp->busmap, (void *)wp->dx, sizeof(* wp->dx), wds_alloc_callback, (void *)&wp->dx_p, /*flags*/0); /* initialize the wds_req structures on this unit */ for(i=0; idx->req[i].id = i; wp->wdsr_free |= 1< 32) { fragsiz = (BUFSIZ / 32) & ~0x01; /* keep it word-aligned */ device_printf(dev, "data buffer fragment size too small. " "BUFSIZE / FRAGSIZE must be <= 32\n"); } else fragsiz = FRAGSIZ & ~0x01; /* keep it word-aligned */ wp->data_free = 0; nfrags = 0; for (i = fragsiz; i <= BUFSIZ; i += fragsiz) { nfrags++; wp->data_free = (wp->data_free << 1) | 1; } /* complete the hardware initialization */ if (wds_init(wp) != 0) goto bad; if (wds_getvers(wp) == -1) device_printf(dev, "getvers failed\n"); device_printf(dev, "using %d bytes / %d frags for dma buffer\n", BUFSIZ, nfrags); devq = cam_simq_alloc(MAXSIMUL); if (devq == NULL) goto bad; sim = cam_sim_alloc(wds_action, wds_poll, "wds", (void *) wp, wp->unit, &Giant, 1, 1, devq); if (sim == NULL) { cam_simq_free(devq); goto bad; } wp->sim = sim; if (xpt_bus_register(sim, dev, 0) != CAM_SUCCESS) { cam_sim_free(sim, /* free_devq */ TRUE); goto bad; } if (xpt_create_path(&pathp, /* periph */ NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sim)); cam_sim_free(sim, /* free_devq */ TRUE); goto bad; } wp->path = pathp; return (0); bad: wds_free_resources(wp); if (error) return (error); else /* exact error is unknown */ return (ENXIO); } /* callback to save the physical address */ static void wds_alloc_callback(void *arg, bus_dma_segment_t *seg, int nseg, int error) { *(bus_addr_t *)arg = seg[0].ds_addr; } static void wds_free_resources(struct wds *wp) { /* check every resource and free if not zero */ /* interrupt handler */ if (wp->intr_r) { bus_teardown_intr(wp->dev, wp->intr_r, wp->intr_cookie); bus_release_resource(wp->dev, SYS_RES_IRQ, wp->intr_rid, wp->intr_r); wp->intr_r = 0; } /* all kinds of memory maps we could have allocated */ if (wp->dx_p) { bus_dmamap_unload(wp->bustag, wp->busmap); wp->dx_p = 0; } if (wp->dx) { /* wp->busmap may be legitimately equal to 0 */ /* the map will also be freed */ bus_dmamem_free(wp->bustag, wp->dx, wp->busmap); wp->dx = 0; } if (wp->bustag) { bus_dma_tag_destroy(wp->bustag); wp->bustag = 0; } /* release all the bus resources */ if (wp->drq_r) { bus_release_resource(wp->dev, SYS_RES_DRQ, wp->drq_rid, wp->drq_r); wp->drq_r = 0; } if (wp->port_r) { bus_release_resource(wp->dev, SYS_RES_IOPORT, wp->port_rid, wp->port_r); wp->port_r = 0; } } /* allocate contiguous fragments from the buffer */ static u_int32_t frag_alloc(struct wds *wp, int size, u_int8_t **res, u_int32_t *maskp) { int i; u_int32_t mask; u_int32_t free; if (size > fragsiz * nfrags) return (CAM_REQ_TOO_BIG); mask = 1; /* always allocate at least 1 fragment */ for (i = fragsiz; i < size; i += fragsiz) mask = (mask << 1) | 1; free = wp->data_free; if(free != 0) { i = ffs(free)-1; /* ffs counts bits from 1 */ for (mask <<= i; i < nfrags; i++) { if ((free & mask) == mask) { wp->data_free &= ~mask; /* mark frags as busy */ *maskp = mask; *res = &wp->dx->data[fragsiz * i]; DBG(DBX "wds%d: allocated buffer mask=0x%x\n", wp->unit, mask); return (CAM_REQ_CMP); } if (mask & 0x80000000) break; mask <<= 1; } } return (CAM_REQUEUE_REQ); /* no free memory now, try later */ } static void frag_free(struct wds *wp, u_int32_t mask) { wp->data_free |= mask; /* mark frags as free */ DBG(DBX "wds%d: freed buffer mask=0x%x\n", wp->unit, mask); } static struct wds_req * wdsr_alloc(struct wds *wp) { struct wds_req *r; int x; int i; r = NULL; x = splcam(); /* anyway most of the time only 1 or 2 commands will * be active because SCSI disconnect is not supported * by hardware, so the search should be fast enough */ i = ffs(wp->wdsr_free) - 1; if(i < 0) { splx(x); return (NULL); } wp->wdsr_free &= ~ (1<dx->req[i]; r->flags = 0; /* reset all flags */ r->ombn = i; /* luckily we have one omb per wdsr */ wp->dx->ombs[i].stat = 1; r->mask = 0; splx(x); smallog3('r', i + '0', r->ombn + '0'); return (r); } static void wds_intr(struct wds *wp) { struct wds_req *rp; struct wds_mb *in; u_int8_t stat; u_int8_t c; int addr = wp->addr; DBG(DBX "wds%d: interrupt [\n", wp->unit); smallog('['); if (inb(addr + WDS_STAT) & WDS_IRQ) { c = inb(addr + WDS_IRQSTAT); if ((c & WDSI_MASK) == WDSI_MSVC) { c = c & ~WDSI_MASK; in = &wp->dx->imbs[c]; rp = cmdtovirt(wp, scsi_3btoul(in->addr)); stat = in->stat; if (rp != NULL) wds_done(wp, rp, stat); else device_printf(wp->dev, "got weird command address %p" "from controller\n", rp); in->stat = 0; } else device_printf(wp->dev, "weird interrupt, irqstat=0x%x\n", c); outb(addr + WDS_IRQACK, 0); } else { smallog('?'); } smallog(']'); DBG(DBX "wds%d: ]\n", wp->unit); } static void wds_done(struct wds *wp, struct wds_req *r, u_int8_t stat) { struct ccb_hdr *ccb_h; struct ccb_scsiio *csio; int status; smallog('d'); if (r->flags & WR_DONE) { device_printf(wp->dev, "request %d reported done twice\n", r->id); smallog2('x', r->id + '0'); return; } smallog(r->id + '0'); ccb_h = &r->ccb->ccb_h; csio = &r->ccb->csio; status = CAM_REQ_CMP_ERR; DBG(DBX "wds%d: %s stat=0x%x c->stat=0x%x c->venderr=0x%x\n", wp->unit, r->flags & WR_SENSE ? "(sense)" : "", stat, r->cmd.stat, r->cmd.venderr); if (r->flags & WR_SENSE) { if (stat == ICMB_OK || (stat == ICMB_OKERR && r->cmd.stat == 0)) { DBG(DBX "wds%d: sense 0x%x\n", wp->unit, r->buf[0]); /* it has the same size now but for future */ bcopy(r->buf, &csio->sense_data, sizeof(struct scsi_sense_data) > csio->sense_len ? csio->sense_len : sizeof(struct scsi_sense_data)); if (sizeof(struct scsi_sense_data) >= csio->sense_len) csio->sense_resid = 0; else csio->sense_resid = csio->sense_len - sizeof(struct scsi_sense_data); status = CAM_AUTOSNS_VALID | CAM_SCSI_STATUS_ERROR; } else { status = CAM_AUTOSENSE_FAIL; } } else { switch (stat) { case ICMB_OK: if (ccb_h) { csio->resid = 0; csio->scsi_status = r->cmd.stat; status = CAM_REQ_CMP; } break; case ICMB_OKERR: if (ccb_h) { csio->scsi_status = r->cmd.stat; if (r->cmd.stat) { if (ccb_h->flags & CAM_DIS_AUTOSENSE) status = CAM_SCSI_STATUS_ERROR; else { if ( wds_runsense(wp, r) == CAM_REQ_CMP ) return; /* in case of error continue with freeing of CCB */ } } else { csio->resid = 0; status = CAM_REQ_CMP; } } break; case ICMB_ETIME: if (ccb_h) status = CAM_SEL_TIMEOUT; break; case ICMB_ERESET: case ICMB_ETARCMD: case ICMB_ERESEL: case ICMB_ESEL: case ICMB_EABORT: case ICMB_ESRESET: case ICMB_EHRESET: if (ccb_h) status = CAM_REQ_CMP_ERR; break; } if (ccb_h && (ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN) { /* we accept only virtual addresses in wds_action() */ bcopy(r->buf, csio->data_ptr, csio->dxfer_len); } } r->flags |= WR_DONE; wp->dx->ombs[r->ombn].stat = 0; if (ccb_h) { wdsr_ccb_done(wp, r, r->ccb, status); smallog3('-', ccb_h->target_id + '0', ccb_h->target_lun + '0'); } else { frag_free(wp, r->mask); if (wp->want_wdsr) { wp->want_wdsr = 0; xpt_release_simq(wp->sim, /* run queue */ 1); } wp->wdsr_free |= (1 << r->id); } DBG(DBX "wds%d: request %p done\n", wp->unit, r); } /* command returned bad status, request sense */ static int wds_runsense(struct wds *wp, struct wds_req *r) { u_int8_t c; struct ccb_hdr *ccb_h; ccb_h = &r->ccb->ccb_h; r->flags |= WR_SENSE; scsi_ulto3b(WDSTOPHYS(wp, &r->cmd), wp->dx->ombs[r->ombn].addr); bzero(&r->cmd, sizeof r->cmd); r->cmd.cmd = WDSX_SCSICMD; r->cmd.targ = (ccb_h->target_id << 5) | ccb_h->target_lun; scsi_ulto3b(0, r->cmd.next); r->cmd.scb[0] = REQUEST_SENSE; r->cmd.scb[1] = ccb_h->target_lun << 5; r->cmd.scb[4] = sizeof(struct scsi_sense_data); r->cmd.scb[5] = 0; scsi_ulto3b(WDSTOPHYS(wp, r->buf), r->cmd.data); scsi_ulto3b(sizeof(struct scsi_sense_data), r->cmd.len); r->cmd.write = 0x80; outb(wp->addr + WDS_HCR, WDSH_IRQEN | WDSH_DRQEN); wp->dx->ombs[r->ombn].stat = 1; c = WDSC_MSTART(r->ombn); if (wds_cmd(wp->addr, &c, sizeof c) != 0) { device_printf(wp->dev, "unable to start outgoing sense mbox\n"); wp->dx->ombs[r->ombn].stat = 0; wdsr_ccb_done(wp, r, r->ccb, CAM_AUTOSENSE_FAIL); return CAM_AUTOSENSE_FAIL; } else { DBG(DBX "wds%d: enqueued status cmd 0x%x, r=%p\n", wp->unit, r->cmd.scb[0] & 0xFF, r); /* don't free CCB yet */ smallog3('*', ccb_h->target_id + '0', ccb_h->target_lun + '0'); return CAM_REQ_CMP; } } static int wds_getvers(struct wds *wp) { struct wds_req *r; int base; u_int8_t c; int i; base = wp->addr; r = wdsr_alloc(wp); if (!r) { device_printf(wp->dev, "no request slot available!\n"); return (-1); } r->flags &= ~WR_DONE; r->ccb = NULL; scsi_ulto3b(WDSTOPHYS(wp, &r->cmd), wp->dx->ombs[r->ombn].addr); bzero(&r->cmd, sizeof r->cmd); r->cmd.cmd = WDSX_GETFIRMREV; outb(base + WDS_HCR, WDSH_DRQEN); c = WDSC_MSTART(r->ombn); if (wds_cmd(base, (u_int8_t *) & c, sizeof c)) { device_printf(wp->dev, "version request failed\n"); wp->wdsr_free |= (1 << r->id); wp->dx->ombs[r->ombn].stat = 0; return (-1); } while (1) { i = 0; while ((inb(base + WDS_STAT) & WDS_IRQ) == 0) { DELAY(9000); if (++i == 100) { device_printf(wp->dev, "getvers timeout\n"); return (-1); } } wds_intr(wp); if (r->flags & WR_DONE) { device_printf(wp->dev, "firmware version %d.%02d\n", r->cmd.targ, r->cmd.scb[0]); wp->wdsr_free |= (1 << r->id); return (0); } } } static void wdsr_ccb_done(struct wds *wp, struct wds_req *r, union ccb *ccb, u_int32_t status) { ccb->ccb_h.ccb_wdsr = 0; if (r != NULL) { /* To implement timeouts we would need to know how to abort the * command on controller, and this is a great mystery. * So for now we just pass the responsibility for timeouts * to the controlles itself, it does that reasonably good. */ /* untimeout(_timeout, (caddr_t) hcb, ccb->ccb_h.timeout_ch); */ /* we're about to free a hcb, so the shortage has ended */ frag_free(wp, r->mask); if (wp->want_wdsr && status != CAM_REQUEUE_REQ) { wp->want_wdsr = 0; status |= CAM_RELEASE_SIMQ; smallog('R'); } wp->wdsr_free |= (1 << r->id); } ccb->ccb_h.status = status | (ccb->ccb_h.status & ~(CAM_STATUS_MASK | CAM_SIM_QUEUED)); xpt_done(ccb); } static void wds_scsi_io(struct cam_sim * sim, struct ccb_scsiio * csio) { int unit = cam_sim_unit(sim); struct wds *wp; struct ccb_hdr *ccb_h; struct wds_req *r; int base; u_int8_t c; int error; int n; wp = (struct wds *)cam_sim_softc(sim); ccb_h = &csio->ccb_h; DBG(DBX "wds%d: cmd TARG=%d LUN=%d\n", unit, ccb_h->target_id, ccb_h->target_lun); if (ccb_h->target_id > 7 || ccb_h->target_id == WDS_HBA_ID) { ccb_h->status = CAM_TID_INVALID; xpt_done((union ccb *) csio); return; } if (ccb_h->target_lun > 7) { ccb_h->status = CAM_LUN_INVALID; xpt_done((union ccb *) csio); return; } if (csio->dxfer_len > BUFSIZ) { ccb_h->status = CAM_REQ_TOO_BIG; xpt_done((union ccb *) csio); return; } - if (ccb_h->flags & (CAM_CDB_PHYS | CAM_SCATTER_VALID | CAM_DATA_PHYS)) { + if ((ccb_h->flags & CAM_DATA_MASK) != CAM_DATA_VADDR) { /* don't support these */ ccb_h->status = CAM_REQ_INVALID; xpt_done((union ccb *) csio); return; } base = wp->addr; /* * this check is mostly for debugging purposes, * "can't happen" normally. */ if(wp->want_wdsr) { DBG(DBX "wds%d: someone already waits for buffer\n", unit); smallog('b'); n = xpt_freeze_simq(sim, /* count */ 1); smallog('0'+n); ccb_h->status = CAM_REQUEUE_REQ; xpt_done((union ccb *) csio); return; } r = wdsr_alloc(wp); if (r == NULL) { device_printf(wp->dev, "no request slot available!\n"); wp->want_wdsr = 1; n = xpt_freeze_simq(sim, /* count */ 1); smallog2('f', '0'+n); ccb_h->status = CAM_REQUEUE_REQ; xpt_done((union ccb *) csio); return; } ccb_h->ccb_wdsr = (void *) r; r->ccb = (union ccb *) csio; switch (error = frag_alloc(wp, csio->dxfer_len, &r->buf, &r->mask)) { case CAM_REQ_CMP: break; case CAM_REQUEUE_REQ: DBG(DBX "wds%d: no data buffer available\n", unit); wp->want_wdsr = 1; n = xpt_freeze_simq(sim, /* count */ 1); smallog2('f', '0'+n); wdsr_ccb_done(wp, r, r->ccb, CAM_REQUEUE_REQ); return; default: DBG(DBX "wds%d: request is too big\n", unit); wdsr_ccb_done(wp, r, r->ccb, error); break; } ccb_h->status |= CAM_SIM_QUEUED; r->flags &= ~WR_DONE; scsi_ulto3b(WDSTOPHYS(wp, &r->cmd), wp->dx->ombs[r->ombn].addr); bzero(&r->cmd, sizeof r->cmd); r->cmd.cmd = WDSX_SCSICMD; r->cmd.targ = (ccb_h->target_id << 5) | ccb_h->target_lun; if (ccb_h->flags & CAM_CDB_POINTER) bcopy(csio->cdb_io.cdb_ptr, &r->cmd.scb, csio->cdb_len < 12 ? csio->cdb_len : 12); else bcopy(csio->cdb_io.cdb_bytes, &r->cmd.scb, csio->cdb_len < 12 ? csio->cdb_len : 12); scsi_ulto3b(csio->dxfer_len, r->cmd.len); if (csio->dxfer_len > 0 && (ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_OUT) { /* we already rejected physical or scattered addresses */ bcopy(csio->data_ptr, r->buf, csio->dxfer_len); } scsi_ulto3b(csio->dxfer_len ? WDSTOPHYS(wp, r->buf) : 0, r->cmd.data); if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN) r->cmd.write = 0x80; else r->cmd.write = 0x00; scsi_ulto3b(0, r->cmd.next); outb(base + WDS_HCR, WDSH_IRQEN | WDSH_DRQEN); c = WDSC_MSTART(r->ombn); if (wds_cmd(base, &c, sizeof c) != 0) { device_printf(wp->dev, "unable to start outgoing mbox\n"); wp->dx->ombs[r->ombn].stat = 0; wdsr_ccb_done(wp, r, r->ccb, CAM_RESRC_UNAVAIL); return; } DBG(DBX "wds%d: enqueued cmd 0x%x, r=%p\n", unit, r->cmd.scb[0] & 0xFF, r); smallog3('+', ccb_h->target_id + '0', ccb_h->target_lun + '0'); } static void wds_action(struct cam_sim * sim, union ccb * ccb) { int unit = cam_sim_unit(sim); int s; DBG(DBX "wds%d: action 0x%x\n", unit, ccb->ccb_h.func_code); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: s = splcam(); DBG(DBX "wds%d: SCSI IO entered\n", unit); wds_scsi_io(sim, &ccb->csio); DBG(DBX "wds%d: SCSI IO returned\n", unit); splx(s); break; case XPT_RESET_BUS: /* how to do it right ? */ printf("wds%d: reset\n", unit); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_ABORT: ccb->ccb_h.status = CAM_UA_ABORT; xpt_done(ccb); break; case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; u_int32_t size_mb; u_int32_t secs_per_cylinder; ccg = &ccb->ccg; size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); ccg->heads = 64; ccg->secs_per_track = 16; secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = 0; /* nothing fancy */ cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 7; cpi->max_lun = 7; cpi->initiator_id = WDS_HBA_ID; cpi->hba_misc = 0; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "WD/FDC", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void wds_poll(struct cam_sim * sim) { wds_intr((struct wds *)cam_sim_softc(sim)); } /* part of initialization done in probe() */ /* returns 0 if OK, ENXIO if bad */ static int wds_preinit(struct wds *wp) { int base; int i; base = wp->addr; /* * Sending a command causes the CMDRDY bit to clear. */ outb(base + WDS_CMD, WDSC_NOOP); if (inb(base + WDS_STAT) & WDS_RDY) return (ENXIO); /* * the controller exists. reset and init. */ outb(base + WDS_HCR, WDSH_ASCRESET | WDSH_SCSIRESET); DELAY(30); outb(base + WDS_HCR, 0); if ((inb(base + WDS_STAT) & (WDS_RDY)) != WDS_RDY) { for (i = 0; i < 10; i++) { if ((inb(base + WDS_STAT) & (WDS_RDY)) == WDS_RDY) break; DELAY(40000); } if ((inb(base + WDS_STAT) & (WDS_RDY)) != WDS_RDY) /* probe timeout */ return (ENXIO); } return (0); } /* part of initialization done in attach() */ /* returns 0 if OK, 1 if bad */ static int wds_init(struct wds *wp) { struct wds_setup init; int base; int i; struct wds_cmd wc; base = wp->addr; outb(base + WDS_HCR, WDSH_DRQEN); isa_dmacascade(wp->drq); if ((inb(base + WDS_STAT) & (WDS_RDY)) != WDS_RDY) { for (i = 0; i < 10; i++) { if ((inb(base + WDS_STAT) & (WDS_RDY)) == WDS_RDY) break; DELAY(40000); } if ((inb(base + WDS_STAT) & (WDS_RDY)) != WDS_RDY) /* probe timeout */ return (1); } bzero(&init, sizeof init); init.cmd = WDSC_INIT; init.scsi_id = WDS_HBA_ID; init.buson_t = 24; init.busoff_t = 48; scsi_ulto3b(WDSTOPHYS(wp, &wp->dx->ombs), init.mbaddr); init.xx = 0; init.nomb = WDS_NOMB; init.nimb = WDS_NIMB; wds_wait(base + WDS_STAT, WDS_RDY, WDS_RDY); if (wds_cmd(base, (u_int8_t *) & init, sizeof init) != 0) { device_printf(wp->dev, "wds_cmd init failed\n"); return (1); } wds_wait(base + WDS_STAT, WDS_INIT, WDS_INIT); wds_wait(base + WDS_STAT, WDS_RDY, WDS_RDY); bzero(&wc, sizeof wc); wc.cmd = WDSC_DISUNSOL; if (wds_cmd(base, (char *) &wc, sizeof wc) != 0) { device_printf(wp->dev, "wds_cmd init2 failed\n"); return (1); } return (0); } static int wds_cmd(int base, u_int8_t * p, int l) { int s = splcam(); while (l--) { do { outb(base + WDS_CMD, *p); wds_wait(base + WDS_STAT, WDS_RDY, WDS_RDY); } while (inb(base + WDS_STAT) & WDS_REJ); p++; } wds_wait(base + WDS_STAT, WDS_RDY, WDS_RDY); splx(s); return (0); } static void wds_wait(int reg, int mask, int val) { while ((inb(reg) & mask) != val) ; } static struct wds_req * cmdtovirt(struct wds *wp, u_int32_t phys) { char *a; a = WDSTOVIRT(wp, (uintptr_t)phys); if( a < (char *)&wp->dx->req[0] || a>= (char *)&wp->dx->req[MAXSIMUL]) { device_printf(wp->dev, "weird phys address 0x%x\n", phys); return (NULL); } a -= (int)offsetof(struct wds_req, cmd); /* convert cmd to request */ return ((struct wds_req *)a); } /* for debugging, print out all the data about the status of devices */ void wds_print(void) { int unit; int i; struct wds_req *r; struct wds *wp; for (unit = 0; unit < devclass_get_maxunit(wds_devclass); unit++) { wp = (struct wds *) devclass_get_device(wds_devclass, unit); if (wp == NULL) continue; printf("wds%d: want_wdsr=0x%x stat=0x%x irq=%s irqstat=0x%x\n", unit, wp->want_wdsr, inb(wp->addr + WDS_STAT) & 0xff, (inb(wp->addr + WDS_STAT) & WDS_IRQ) ? "ready" : "no", inb(wp->addr + WDS_IRQSTAT) & 0xff); for (i = 0; i < MAXSIMUL; i++) { r = &wp->dx->req[i]; if( wp->wdsr_free & (1 << r->id) ) { printf("req=%d flg=0x%x ombn=%d ombstat=%d " "mask=0x%x targ=%d lun=%d cmd=0x%x\n", i, r->flags, r->ombn, wp->dx->ombs[r->ombn].stat, r->mask, r->cmd.targ >> 5, r->cmd.targ & 7, r->cmd.scb[0]); } } } } #if WDS_DEBUG == 2 /* create circular log buffer */ static char * wds_nextlog(void) { int n = logwrite; if (++logwrite >= NLOGLINES) logwrite = 0; if (logread == logwrite) if (++logread >= NLOGLINES) logread = 0; return (wds_log[n]); } void wds_printlog(void) { /* print the circular buffer */ int i; for (i = logread; i != logwrite;) { printf("%s", wds_log[i]); if (i == NLOGLINES) i = 0; else i++; } } #endif /* WDS_DEBUG */