Index: stable/11/sys/dev/aha/aha.c =================================================================== --- stable/11/sys/dev/aha/aha.c (revision 335137) +++ stable/11/sys/dev/aha/aha.c (revision 335138) @@ -1,1820 +1,1820 @@ /* * Generic register and struct definitions for the Adaptech 154x/164x * SCSI host adapters. Product specific probe and attach routines can * be found in: * aha 1542A/1542B/1542C/1542CF/1542CP aha_isa.c * aha 1640 aha_mca.c */ /*- * Copyright (c) 1998 M. Warner Losh. * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Derived from bt.c written by: * * Copyright (c) 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PRVERB(x) do { if (bootverbose) device_printf x; } while (0) /* Macro to determine that a rev is potentially a new valid one * so that the driver doesn't keep breaking on new revs as it * did for the CF and CP. */ #define PROBABLY_NEW_BOARD(REV) (REV > 0x43 && REV < 0x56) /* MailBox Management functions */ static __inline void ahanextinbox(struct aha_softc *aha); static __inline void ahanextoutbox(struct aha_softc *aha); #define aha_name(aha) device_get_nameunit(aha->dev) static __inline void ahanextinbox(struct aha_softc *aha) { if (aha->cur_inbox == aha->last_inbox) aha->cur_inbox = aha->in_boxes; else aha->cur_inbox++; } static __inline void ahanextoutbox(struct aha_softc *aha) { if (aha->cur_outbox == aha->last_outbox) aha->cur_outbox = aha->out_boxes; else aha->cur_outbox++; } #define ahautoa24(u,s3) \ (s3)[0] = ((u) >> 16) & 0xff; \ (s3)[1] = ((u) >> 8) & 0xff; \ (s3)[2] = (u) & 0xff; #define aha_a24tou(s3) \ (((s3)[0] << 16) | ((s3)[1] << 8) | (s3)[2]) /* CCB Management functions */ static __inline uint32_t ahaccbvtop(struct aha_softc *aha, struct aha_ccb *accb); static __inline struct aha_ccb* ahaccbptov(struct aha_softc *aha, uint32_t ccb_addr); static __inline uint32_t ahaccbvtop(struct aha_softc *aha, struct aha_ccb *accb) { return (aha->aha_ccb_physbase + (uint32_t)((caddr_t)accb - (caddr_t)aha->aha_ccb_array)); } static __inline struct aha_ccb * ahaccbptov(struct aha_softc *aha, uint32_t ccb_addr) { return (aha->aha_ccb_array + + ((struct aha_ccb*)(uintptr_t)ccb_addr - (struct aha_ccb*)(uintptr_t)aha->aha_ccb_physbase)); } static struct aha_ccb* ahagetccb(struct aha_softc *aha); static __inline void ahafreeccb(struct aha_softc *aha, struct aha_ccb *accb); static void ahaallocccbs(struct aha_softc *aha); static bus_dmamap_callback_t ahaexecuteccb; static void ahadone(struct aha_softc *aha, struct aha_ccb *accb, aha_mbi_comp_code_t comp_code); static void aha_intr_locked(struct aha_softc *aha); /* Host adapter command functions */ static int ahareset(struct aha_softc* aha, int hard_reset); /* Initialization functions */ static int ahainitmboxes(struct aha_softc *aha); static bus_dmamap_callback_t ahamapmboxes; static bus_dmamap_callback_t ahamapccbs; static bus_dmamap_callback_t ahamapsgs; /* Transfer Negotiation Functions */ static void ahafetchtransinfo(struct aha_softc *aha, struct ccb_trans_settings *cts); /* CAM SIM entry points */ #define ccb_accb_ptr spriv_ptr0 #define ccb_aha_ptr spriv_ptr1 static void ahaaction(struct cam_sim *sim, union ccb *ccb); static void ahapoll(struct cam_sim *sim); /* Our timeout handler */ static void ahatimeout(void *arg); /* Exported functions */ void aha_alloc(struct aha_softc *aha) { SLIST_INIT(&aha->free_aha_ccbs); LIST_INIT(&aha->pending_ccbs); SLIST_INIT(&aha->sg_maps); aha->ccb_sg_opcode = INITIATOR_SG_CCB_WRESID; aha->ccb_ccb_opcode = INITIATOR_CCB_WRESID; mtx_init(&aha->lock, "aha", NULL, MTX_DEF); } void aha_free(struct aha_softc *aha) { switch (aha->init_level) { default: case 8: { struct sg_map_node *sg_map; while ((sg_map = SLIST_FIRST(&aha->sg_maps))!= NULL) { SLIST_REMOVE_HEAD(&aha->sg_maps, links); bus_dmamap_unload(aha->sg_dmat, sg_map->sg_dmamap); bus_dmamem_free(aha->sg_dmat, sg_map->sg_vaddr, sg_map->sg_dmamap); free(sg_map, M_DEVBUF); } bus_dma_tag_destroy(aha->sg_dmat); } case 7: bus_dmamap_unload(aha->ccb_dmat, aha->ccb_dmamap); case 6: bus_dmamem_free(aha->ccb_dmat, aha->aha_ccb_array, aha->ccb_dmamap); case 5: bus_dma_tag_destroy(aha->ccb_dmat); case 4: bus_dmamap_unload(aha->mailbox_dmat, aha->mailbox_dmamap); case 3: bus_dmamem_free(aha->mailbox_dmat, aha->in_boxes, aha->mailbox_dmamap); case 2: bus_dma_tag_destroy(aha->buffer_dmat); case 1: bus_dma_tag_destroy(aha->mailbox_dmat); case 0: break; } mtx_destroy(&aha->lock); } /* * Probe the adapter and verify that the card is an Adaptec. */ int aha_probe(struct aha_softc* aha) { u_int status; u_int intstat; int error; board_id_data_t board_id; /* * See if the three I/O ports look reasonable. * Touch the minimal number of registers in the * failure case. */ status = aha_inb(aha, STATUS_REG); if ((status == 0) || (status & (DIAG_ACTIVE|CMD_REG_BUSY | STATUS_REG_RSVD)) != 0) { PRVERB((aha->dev, "status reg test failed %x\n", status)); return (ENXIO); } intstat = aha_inb(aha, INTSTAT_REG); if ((intstat & INTSTAT_REG_RSVD) != 0) { PRVERB((aha->dev, "Failed Intstat Reg Test\n")); return (ENXIO); } /* * Looking good so far. Final test is to reset the * adapter and fetch the board ID and ensure we aren't * looking at a BusLogic. */ if ((error = ahareset(aha, /*hard_reset*/TRUE)) != 0) { PRVERB((aha->dev, "Failed Reset\n")); return (ENXIO); } /* * Get the board ID. We use this to see if we're dealing with * a buslogic card or an aha card (or clone). */ error = aha_cmd(aha, AOP_INQUIRE_BOARD_ID, NULL, /*parmlen*/0, (uint8_t*)&board_id, sizeof(board_id), DEFAULT_CMD_TIMEOUT); if (error != 0) { PRVERB((aha->dev, "INQUIRE failed %x\n", error)); return (ENXIO); } aha->fw_major = board_id.firmware_rev_major; aha->fw_minor = board_id.firmware_rev_minor; aha->boardid = board_id.board_type; /* * The Buslogic cards have an id of either 0x41 or 0x42. So * if those come up in the probe, we test the geometry register * of the board. Adaptec boards that are this old will not have * this register, and return 0xff, while buslogic cards will return * something different. * * It appears that for reasons unknow, for the for the * aha-1542B cards, we need to wait a little bit before trying * to read the geometry register. I picked 10ms since we have * reports that a for loop to 1000 did the trick, and this * errs on the side of conservatism. Besides, no one will * notice a 10mS delay here, even the 1542B card users :-) * * Some compatible cards return 0 here. Some cards also * seem to return 0x7f. * * XXX I'm not sure how this will impact other cloned cards * * This really should be replaced with the esetup command, since * that appears to be more reliable. This becomes more and more * true over time as we discover more cards that don't read the * geometry register consistently. */ if (aha->boardid <= 0x42) { /* Wait 10ms before reading */ DELAY(10000); status = aha_inb(aha, GEOMETRY_REG); if (status != 0xff && status != 0x00 && status != 0x7f) { PRVERB((aha->dev, "Geometry Register test failed %#x\n", status)); return (ENXIO); } } return (0); } /* * Pull the boards setup information and record it in our softc. */ int aha_fetch_adapter_info(struct aha_softc *aha) { setup_data_t setup_info; config_data_t config_data; uint8_t length_param; int error; struct aha_extbios extbios; switch (aha->boardid) { case BOARD_1540_16HEAD_BIOS: snprintf(aha->model, sizeof(aha->model), "1540 16 head BIOS"); break; case BOARD_1540_64HEAD_BIOS: snprintf(aha->model, sizeof(aha->model), "1540 64 head BIOS"); break; case BOARD_1542: snprintf(aha->model, sizeof(aha->model), "1540/1542 64 head BIOS"); break; case BOARD_1640: snprintf(aha->model, sizeof(aha->model), "1640"); break; case BOARD_1740: snprintf(aha->model, sizeof(aha->model), "1740A/1742A/1744"); break; case BOARD_1542C: snprintf(aha->model, sizeof(aha->model), "1542C"); break; case BOARD_1542CF: snprintf(aha->model, sizeof(aha->model), "1542CF"); break; case BOARD_1542CP: snprintf(aha->model, sizeof(aha->model), "1542CP"); break; default: snprintf(aha->model, sizeof(aha->model), "Unknown"); break; } /* * If we are a new type of 1542 board (anything newer than a 1542C) * then disable the extended bios so that the * mailbox interface is unlocked. * This is also true for the 1542B Version 3.20. First Adaptec * board that supports >1Gb drives. * No need to check the extended bios flags as some of the * extensions that cause us problems are not flagged in that byte. */ if (PROBABLY_NEW_BOARD(aha->boardid) || (aha->boardid == 0x41 && aha->fw_major == 0x31 && aha->fw_minor >= 0x34)) { error = aha_cmd(aha, AOP_RETURN_EXT_BIOS_INFO, NULL, /*paramlen*/0, (u_char *)&extbios, sizeof(extbios), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(aha->dev, "AOP_RETURN_EXT_BIOS_INFO - Failed."); return (error); } error = aha_cmd(aha, AOP_MBOX_IF_ENABLE, (uint8_t *)&extbios, /*paramlen*/2, NULL, 0, DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(aha->dev, "AOP_MBOX_IF_ENABLE - Failed."); return (error); } } if (aha->boardid < 0x41) device_printf(aha->dev, "Warning: aha-1542A won't work.\n"); aha->max_sg = 17; /* Need >= 17 to do 64k I/O */ aha->diff_bus = 0; aha->extended_lun = 0; aha->extended_trans = 0; aha->max_ccbs = 16; /* Determine Sync/Wide/Disc settings */ length_param = sizeof(setup_info); error = aha_cmd(aha, AOP_INQUIRE_SETUP_INFO, &length_param, /*paramlen*/1, (uint8_t*)&setup_info, sizeof(setup_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(aha->dev, "aha_fetch_adapter_info - Failed " "Get Setup Info\n"); return (error); } if (setup_info.initiate_sync != 0) { aha->sync_permitted = ALL_TARGETS; } aha->disc_permitted = ALL_TARGETS; /* We need as many mailboxes as we can have ccbs */ aha->num_boxes = aha->max_ccbs; /* Determine our SCSI ID */ error = aha_cmd(aha, AOP_INQUIRE_CONFIG, NULL, /*parmlen*/0, (uint8_t*)&config_data, sizeof(config_data), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(aha->dev, "aha_fetch_adapter_info - Failed Get Config\n"); return (error); } aha->scsi_id = config_data.scsi_id; return (0); } /* * Start the board, ready for normal operation */ int aha_init(struct aha_softc* aha) { /* Announce the Adapter */ device_printf(aha->dev, "AHA-%s FW Rev. %c.%c (ID=%x) ", aha->model, aha->fw_major, aha->fw_minor, aha->boardid); if (aha->diff_bus != 0) printf("Diff "); printf("SCSI Host Adapter, SCSI ID %d, %d CCBs\n", aha->scsi_id, aha->max_ccbs); /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. */ /* DMA tag for mapping buffers into device visible space. */ if (bus_dma_tag_create( /* parent */ aha->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ DFLTPHYS, /* nsegments */ AHA_NSEG, /* maxsegsz */ BUS_SPACE_MAXSIZE_24BIT, /* flags */ BUS_DMA_ALLOCNOW, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &aha->lock, &aha->buffer_dmat) != 0) { goto error_exit; } aha->init_level++; /* DMA tag for our mailboxes */ if (bus_dma_tag_create( /* parent */ aha->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ aha->num_boxes * (sizeof(aha_mbox_in_t) + sizeof(aha_mbox_out_t)), /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_24BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &aha->mailbox_dmat) != 0) { goto error_exit; } aha->init_level++; /* Allocation for our mailboxes */ if (bus_dmamem_alloc(aha->mailbox_dmat, (void **)&aha->out_boxes, BUS_DMA_NOWAIT, &aha->mailbox_dmamap) != 0) goto error_exit; aha->init_level++; /* And permanently map them */ bus_dmamap_load(aha->mailbox_dmat, aha->mailbox_dmamap, aha->out_boxes, aha->num_boxes * (sizeof(aha_mbox_in_t) + sizeof(aha_mbox_out_t)), ahamapmboxes, aha, /*flags*/0); aha->init_level++; aha->in_boxes = (aha_mbox_in_t *)&aha->out_boxes[aha->num_boxes]; ahainitmboxes(aha); /* DMA tag for our ccb structures */ if (bus_dma_tag_create( /* parent */ aha->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ aha->max_ccbs * sizeof(struct aha_ccb), /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_24BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &aha->ccb_dmat) != 0) { goto error_exit; } aha->init_level++; /* Allocation for our ccbs */ if (bus_dmamem_alloc(aha->ccb_dmat, (void **)&aha->aha_ccb_array, BUS_DMA_NOWAIT, &aha->ccb_dmamap) != 0) goto error_exit; aha->init_level++; /* And permanently map them */ bus_dmamap_load(aha->ccb_dmat, aha->ccb_dmamap, aha->aha_ccb_array, aha->max_ccbs * sizeof(struct aha_ccb), ahamapccbs, aha, /*flags*/0); aha->init_level++; /* DMA tag for our S/G structures. We allocate in page sized chunks */ if (bus_dma_tag_create( /* parent */ aha->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ PAGE_SIZE, /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_24BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &aha->sg_dmat) != 0) goto error_exit; aha->init_level++; /* Perform initial CCB allocation */ bzero(aha->aha_ccb_array, aha->max_ccbs * sizeof(struct aha_ccb)); ahaallocccbs(aha); if (aha->num_ccbs == 0) { device_printf(aha->dev, "aha_init - Unable to allocate initial ccbs\n"); goto error_exit; } /* * Note that we are going and return (to probe) */ return (0); error_exit: return (ENXIO); } int aha_attach(struct aha_softc *aha) { int tagged_dev_openings; struct cam_devq *devq; /* * We don't do tagged queueing, since the aha cards don't * support it. */ tagged_dev_openings = 0; /* * Create the device queue for our SIM. */ devq = cam_simq_alloc(aha->max_ccbs - 1); if (devq == NULL) return (ENOMEM); /* * Construct our SIM entry */ aha->sim = cam_sim_alloc(ahaaction, ahapoll, "aha", aha, device_get_unit(aha->dev), &aha->lock, 2, tagged_dev_openings, devq); if (aha->sim == NULL) { cam_simq_free(devq); return (ENOMEM); } mtx_lock(&aha->lock); if (xpt_bus_register(aha->sim, aha->dev, 0) != CAM_SUCCESS) { cam_sim_free(aha->sim, /*free_devq*/TRUE); mtx_unlock(&aha->lock); return (ENXIO); } if (xpt_create_path(&aha->path, /*periph*/NULL, cam_sim_path(aha->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(aha->sim)); cam_sim_free(aha->sim, /*free_devq*/TRUE); mtx_unlock(&aha->lock); return (ENXIO); } mtx_unlock(&aha->lock); return (0); } static void ahaallocccbs(struct aha_softc *aha) { struct aha_ccb *next_ccb; struct sg_map_node *sg_map; bus_addr_t physaddr; aha_sg_t *segs; int newcount; int i; next_ccb = &aha->aha_ccb_array[aha->num_ccbs]; sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); if (sg_map == NULL) return; /* Allocate S/G space for the next batch of CCBS */ if (bus_dmamem_alloc(aha->sg_dmat, (void **)&sg_map->sg_vaddr, BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { free(sg_map, M_DEVBUF); return; } SLIST_INSERT_HEAD(&aha->sg_maps, sg_map, links); bus_dmamap_load(aha->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, PAGE_SIZE, ahamapsgs, aha, /*flags*/0); segs = sg_map->sg_vaddr; physaddr = sg_map->sg_physaddr; newcount = (PAGE_SIZE / (AHA_NSEG * sizeof(aha_sg_t))); for (i = 0; aha->num_ccbs < aha->max_ccbs && i < newcount; i++) { int error; next_ccb->sg_list = segs; next_ccb->sg_list_phys = physaddr; next_ccb->flags = ACCB_FREE; callout_init_mtx(&next_ccb->timer, &aha->lock, 0); error = bus_dmamap_create(aha->buffer_dmat, /*flags*/0, &next_ccb->dmamap); if (error != 0) break; SLIST_INSERT_HEAD(&aha->free_aha_ccbs, next_ccb, links); segs += AHA_NSEG; physaddr += (AHA_NSEG * sizeof(aha_sg_t)); next_ccb++; aha->num_ccbs++; } /* Reserve a CCB for error recovery */ if (aha->recovery_accb == NULL) { aha->recovery_accb = SLIST_FIRST(&aha->free_aha_ccbs); SLIST_REMOVE_HEAD(&aha->free_aha_ccbs, links); } } static __inline void ahafreeccb(struct aha_softc *aha, struct aha_ccb *accb) { if (!dumping) mtx_assert(&aha->lock, MA_OWNED); if ((accb->flags & ACCB_ACTIVE) != 0) LIST_REMOVE(&accb->ccb->ccb_h, sim_links.le); if (aha->resource_shortage != 0 && (accb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { accb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; aha->resource_shortage = FALSE; } accb->flags = ACCB_FREE; SLIST_INSERT_HEAD(&aha->free_aha_ccbs, accb, links); aha->active_ccbs--; } static struct aha_ccb* ahagetccb(struct aha_softc *aha) { struct aha_ccb* accb; if (!dumping) mtx_assert(&aha->lock, MA_OWNED); if ((accb = SLIST_FIRST(&aha->free_aha_ccbs)) != NULL) { SLIST_REMOVE_HEAD(&aha->free_aha_ccbs, links); aha->active_ccbs++; } else if (aha->num_ccbs < aha->max_ccbs) { ahaallocccbs(aha); accb = SLIST_FIRST(&aha->free_aha_ccbs); if (accb == NULL) device_printf(aha->dev, "Can't malloc ACCB\n"); else { SLIST_REMOVE_HEAD(&aha->free_aha_ccbs, links); aha->active_ccbs++; } } return (accb); } static void ahaaction(struct cam_sim *sim, union ccb *ccb) { struct aha_softc *aha; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahaaction\n")); aha = (struct aha_softc *)cam_sim_softc(sim); mtx_assert(&aha->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ { struct aha_ccb *accb; struct aha_hccb *hccb; /* * Get an accb to use. */ if ((accb = ahagetccb(aha)) == NULL) { aha->resource_shortage = TRUE; xpt_freeze_simq(aha->sim, /*count*/1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } hccb = &accb->hccb; /* * So we can find the ACCB when an abort is requested */ accb->ccb = ccb; ccb->ccb_h.ccb_accb_ptr = accb; ccb->ccb_h.ccb_aha_ptr = aha; /* * Put all the arguments for the xfer in the accb */ hccb->target = ccb->ccb_h.target_id; hccb->lun = ccb->ccb_h.target_lun; hccb->ahastat = 0; hccb->sdstat = 0; if (ccb->ccb_h.func_code == XPT_SCSI_IO) { struct ccb_scsiio *csio; struct ccb_hdr *ccbh; int error; csio = &ccb->csio; ccbh = &csio->ccb_h; hccb->opcode = aha->ccb_ccb_opcode; hccb->datain = (ccb->ccb_h.flags & CAM_DIR_IN) != 0; hccb->dataout = (ccb->ccb_h.flags & CAM_DIR_OUT) != 0; hccb->cmd_len = csio->cdb_len; if (hccb->cmd_len > sizeof(hccb->scsi_cdb)) { ccb->ccb_h.status = CAM_REQ_INVALID; ahafreeccb(aha, accb); xpt_done(ccb); return; } hccb->sense_len = csio->sense_len; if ((ccbh->flags & CAM_CDB_POINTER) != 0) { if ((ccbh->flags & CAM_CDB_PHYS) == 0) { bcopy(csio->cdb_io.cdb_ptr, hccb->scsi_cdb, hccb->cmd_len); } else { /* I guess I could map it in... */ ccbh->status = CAM_REQ_INVALID; ahafreeccb(aha, accb); xpt_done(ccb); return; } } else { bcopy(csio->cdb_io.cdb_bytes, hccb->scsi_cdb, hccb->cmd_len); } /* * If we have any data to send with this command, * map it into bus space. */ error = bus_dmamap_load_ccb( aha->buffer_dmat, accb->dmamap, ccb, ahaexecuteccb, accb, /*flags*/0); if (error == EINPROGRESS) { /* * So as to maintain ordering, freeze the * controller queue until our mapping is * returned. */ xpt_freeze_simq(aha->sim, 1); csio->ccb_h.status |= CAM_RELEASE_SIMQ; } } else { hccb->opcode = INITIATOR_BUS_DEV_RESET; /* No data transfer */ hccb->datain = TRUE; hccb->dataout = TRUE; hccb->cmd_len = 0; hccb->sense_len = 0; ahaexecuteccb(accb, NULL, 0, 0); } break; } case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: /* XXX Implement */ ccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; u_int target_mask = 0x01 << ccb->ccb_h.target_id; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; if (cts->type == CTS_TYPE_USER_SETTINGS) { spi->flags = 0; if ((aha->disc_permitted & target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; if ((aha->sync_permitted & target_mask) != 0) { if (aha->boardid >= BOARD_1542CF) spi->sync_period = 25; else spi->sync_period = 50; } else { spi->sync_period = 0; } if (spi->sync_period != 0) spi->sync_offset = 15; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; } else { ahafetchtransinfo(aha, cts); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; uint32_t size_mb; uint32_t secs_per_cylinder; ccg = &ccb->ccg; size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); if (size_mb >= 1024 && (aha->extended_trans != 0)) { if (size_mb >= 2048) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 128; ccg->secs_per_track = 32; } } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ ahareset(aha, /*hardreset*/TRUE); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 7; cpi->max_lun = 7; cpi->initiator_id = aha->scsi_id; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); - cpi->transport = XPORT_SPI; - cpi->transport_version = 2; - cpi->protocol = PROTO_SCSI; - cpi->protocol_version = SCSI_REV_2; + cpi->transport = XPORT_SPI; + cpi->transport_version = 2; + cpi->protocol = PROTO_SCSI; + cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void ahaexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct aha_ccb *accb; union ccb *ccb; struct aha_softc *aha; uint32_t paddr; accb = (struct aha_ccb *)arg; ccb = accb->ccb; aha = (struct aha_softc *)ccb->ccb_h.ccb_aha_ptr; if (error != 0) { if (error != EFBIG) device_printf(aha->dev, "Unexepected error 0x%x returned from " "bus_dmamap_load\n", error); if (ccb->ccb_h.status == CAM_REQ_INPROG) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; } ahafreeccb(aha, accb); xpt_done(ccb); return; } if (nseg != 0) { aha_sg_t *sg; bus_dma_segment_t *end_seg; bus_dmasync_op_t op; end_seg = dm_segs + nseg; /* Copy the segments into our SG list */ sg = accb->sg_list; while (dm_segs < end_seg) { ahautoa24(dm_segs->ds_len, sg->len); ahautoa24(dm_segs->ds_addr, sg->addr); sg++; dm_segs++; } if (nseg > 1) { accb->hccb.opcode = aha->ccb_sg_opcode; ahautoa24((sizeof(aha_sg_t) * nseg), accb->hccb.data_len); ahautoa24(accb->sg_list_phys, accb->hccb.data_addr); } else { bcopy(accb->sg_list->len, accb->hccb.data_len, 3); bcopy(accb->sg_list->addr, accb->hccb.data_addr, 3); } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_PREREAD; else op = BUS_DMASYNC_PREWRITE; bus_dmamap_sync(aha->buffer_dmat, accb->dmamap, op); } else { accb->hccb.opcode = INITIATOR_CCB; ahautoa24(0, accb->hccb.data_len); ahautoa24(0, accb->hccb.data_addr); } /* * Last time we need to check if this CCB needs to * be aborted. */ if (ccb->ccb_h.status != CAM_REQ_INPROG) { if (nseg != 0) bus_dmamap_unload(aha->buffer_dmat, accb->dmamap); ahafreeccb(aha, accb); xpt_done(ccb); return; } accb->flags = ACCB_ACTIVE; ccb->ccb_h.status |= CAM_SIM_QUEUED; LIST_INSERT_HEAD(&aha->pending_ccbs, &ccb->ccb_h, sim_links.le); callout_reset_sbt(&accb->timer, SBT_1MS * ccb->ccb_h.timeout, 0, ahatimeout, accb, 0); /* Tell the adapter about this command */ if (aha->cur_outbox->action_code != AMBO_FREE) { /* * We should never encounter a busy mailbox. * If we do, warn the user, and treat it as * a resource shortage. If the controller is * hung, one of the pending transactions will * timeout causing us to start recovery operations. */ device_printf(aha->dev, "Encountered busy mailbox with %d out of %d " "commands active!!!", aha->active_ccbs, aha->max_ccbs); callout_stop(&accb->timer); if (nseg != 0) bus_dmamap_unload(aha->buffer_dmat, accb->dmamap); ahafreeccb(aha, accb); aha->resource_shortage = TRUE; xpt_freeze_simq(aha->sim, /*count*/1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } paddr = ahaccbvtop(aha, accb); ahautoa24(paddr, aha->cur_outbox->ccb_addr); aha->cur_outbox->action_code = AMBO_START; aha_outb(aha, COMMAND_REG, AOP_START_MBOX); ahanextoutbox(aha); } void aha_intr(void *arg) { struct aha_softc *aha; aha = arg; mtx_lock(&aha->lock); aha_intr_locked(aha); mtx_unlock(&aha->lock); } void aha_intr_locked(struct aha_softc *aha) { u_int intstat; uint32_t paddr; while (((intstat = aha_inb(aha, INTSTAT_REG)) & INTR_PENDING) != 0) { if ((intstat & CMD_COMPLETE) != 0) { aha->latched_status = aha_inb(aha, STATUS_REG); aha->command_cmp = TRUE; } aha_outb(aha, CONTROL_REG, RESET_INTR); if ((intstat & IMB_LOADED) != 0) { while (aha->cur_inbox->comp_code != AMBI_FREE) { paddr = aha_a24tou(aha->cur_inbox->ccb_addr); ahadone(aha, ahaccbptov(aha, paddr), aha->cur_inbox->comp_code); aha->cur_inbox->comp_code = AMBI_FREE; ahanextinbox(aha); } } if ((intstat & SCSI_BUS_RESET) != 0) { ahareset(aha, /*hardreset*/FALSE); } } } static void ahadone(struct aha_softc *aha, struct aha_ccb *accb, aha_mbi_comp_code_t comp_code) { union ccb *ccb; struct ccb_scsiio *csio; ccb = accb->ccb; csio = &accb->ccb->csio; if ((accb->flags & ACCB_ACTIVE) == 0) { device_printf(aha->dev, "ahadone - Attempt to free non-active ACCB %p\n", (void *)accb); return; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(aha->buffer_dmat, accb->dmamap, op); bus_dmamap_unload(aha->buffer_dmat, accb->dmamap); } if (accb == aha->recovery_accb) { /* * The recovery ACCB does not have a CCB associated * with it, so short circuit the normal error handling. * We now traverse our list of pending CCBs and process * any that were terminated by the recovery CCBs action. * We also reinstate timeouts for all remaining, pending, * CCBs. */ struct cam_path *path; struct ccb_hdr *ccb_h; cam_status error; /* Notify all clients that a BDR occurred */ error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(aha->sim), accb->hccb.target, CAM_LUN_WILDCARD); if (error == CAM_REQ_CMP) { xpt_async(AC_SENT_BDR, path, NULL); xpt_free_path(path); } ccb_h = LIST_FIRST(&aha->pending_ccbs); while (ccb_h != NULL) { struct aha_ccb *pending_accb; pending_accb = (struct aha_ccb *)ccb_h->ccb_accb_ptr; if (pending_accb->hccb.target == accb->hccb.target) { pending_accb->hccb.ahastat = AHASTAT_HA_BDR; ccb_h = LIST_NEXT(ccb_h, sim_links.le); ahadone(aha, pending_accb, AMBI_ERROR); } else { callout_reset_sbt(&pending_accb->timer, SBT_1MS * ccb_h->timeout, 0, ahatimeout, pending_accb, 0); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } } device_printf(aha->dev, "No longer in timeout\n"); return; } callout_stop(&accb->timer); switch (comp_code) { case AMBI_FREE: device_printf(aha->dev, "ahadone - CCB completed with free status!\n"); break; case AMBI_NOT_FOUND: device_printf(aha->dev, "ahadone - CCB Abort failed to find CCB\n"); break; case AMBI_ABORT: case AMBI_ERROR: /* An error occurred */ if (accb->hccb.opcode < INITIATOR_CCB_WRESID) csio->resid = 0; else csio->resid = aha_a24tou(accb->hccb.data_len); switch(accb->hccb.ahastat) { case AHASTAT_DATARUN_ERROR: { if (csio->resid <= 0) { csio->ccb_h.status = CAM_DATA_RUN_ERR; break; } /* FALLTHROUGH */ } case AHASTAT_NOERROR: csio->scsi_status = accb->hccb.sdstat; csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; switch(csio->scsi_status) { case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_CMD_TERMINATED: csio->ccb_h.status |= CAM_AUTOSNS_VALID; /* * The aha writes the sense data at different * offsets based on the scsi cmd len */ bcopy((caddr_t) &accb->hccb.scsi_cdb + accb->hccb.cmd_len, (caddr_t) &csio->sense_data, accb->hccb.sense_len); break; default: break; case SCSI_STATUS_OK: csio->ccb_h.status = CAM_REQ_CMP; break; } break; case AHASTAT_SELTIMEOUT: csio->ccb_h.status = CAM_SEL_TIMEOUT; break; case AHASTAT_UNEXPECTED_BUSFREE: csio->ccb_h.status = CAM_UNEXP_BUSFREE; break; case AHASTAT_INVALID_PHASE: csio->ccb_h.status = CAM_SEQUENCE_FAIL; break; case AHASTAT_INVALID_ACTION_CODE: panic("%s: Inavlid Action code", aha_name(aha)); break; case AHASTAT_INVALID_OPCODE: if (accb->hccb.opcode < INITIATOR_CCB_WRESID) panic("%s: Invalid CCB Opcode %x hccb = %p", aha_name(aha), accb->hccb.opcode, &accb->hccb); device_printf(aha->dev, "AHA-1540A compensation failed\n"); xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); csio->ccb_h.status = CAM_REQUEUE_REQ; break; case AHASTAT_LINKED_CCB_LUN_MISMATCH: /* We don't even support linked commands... */ panic("%s: Linked CCB Lun Mismatch", aha_name(aha)); break; case AHASTAT_INVALID_CCB_OR_SG_PARAM: panic("%s: Invalid CCB or SG list", aha_name(aha)); break; case AHASTAT_HA_SCSI_BUS_RESET: if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_CMD_TIMEOUT) csio->ccb_h.status = CAM_SCSI_BUS_RESET; break; case AHASTAT_HA_BDR: if ((accb->flags & ACCB_DEVICE_RESET) == 0) csio->ccb_h.status = CAM_BDR_SENT; else csio->ccb_h.status = CAM_CMD_TIMEOUT; break; } if (csio->ccb_h.status != CAM_REQ_CMP) { xpt_freeze_devq(csio->ccb_h.path, /*count*/1); csio->ccb_h.status |= CAM_DEV_QFRZN; } if ((accb->flags & ACCB_RELEASE_SIMQ) != 0) ccb->ccb_h.status |= CAM_RELEASE_SIMQ; ahafreeccb(aha, accb); xpt_done(ccb); break; case AMBI_OK: /* All completed without incident */ /* XXX DO WE NEED TO COPY SENSE BYTES HERE???? XXX */ /* I don't think so since it works???? */ ccb->ccb_h.status |= CAM_REQ_CMP; if ((accb->flags & ACCB_RELEASE_SIMQ) != 0) ccb->ccb_h.status |= CAM_RELEASE_SIMQ; ahafreeccb(aha, accb); xpt_done(ccb); break; } } static int ahareset(struct aha_softc* aha, int hard_reset) { struct ccb_hdr *ccb_h; u_int status; u_int timeout; uint8_t reset_type; if (hard_reset != 0) reset_type = HARD_RESET; else reset_type = SOFT_RESET; aha_outb(aha, CONTROL_REG, reset_type); /* Wait 5sec. for Diagnostic start */ timeout = 5 * 10000; while (--timeout) { status = aha_inb(aha, STATUS_REG); if ((status & DIAG_ACTIVE) != 0) break; DELAY(100); } if (timeout == 0) { PRVERB((aha->dev, "ahareset - Diagnostic Active failed to " "assert. status = %#x\n", status)); return (ETIMEDOUT); } /* Wait 10sec. for Diagnostic end */ timeout = 10 * 10000; while (--timeout) { status = aha_inb(aha, STATUS_REG); if ((status & DIAG_ACTIVE) == 0) break; DELAY(100); } if (timeout == 0) { panic("%s: ahareset - Diagnostic Active failed to drop. " "status = 0x%x\n", aha_name(aha), status); return (ETIMEDOUT); } /* Wait for the host adapter to become ready or report a failure */ timeout = 10000; while (--timeout) { status = aha_inb(aha, STATUS_REG); if ((status & (DIAG_FAIL|HA_READY|DATAIN_REG_READY)) != 0) break; DELAY(100); } if (timeout == 0) { device_printf(aha->dev, "ahareset - Host adapter failed to " "come ready. status = 0x%x\n", status); return (ETIMEDOUT); } /* If the diagnostics failed, tell the user */ if ((status & DIAG_FAIL) != 0 || (status & HA_READY) == 0) { device_printf(aha->dev, "ahareset - Adapter failed diag\n"); if ((status & DATAIN_REG_READY) != 0) device_printf(aha->dev, "ahareset - Host Adapter " "Error code = 0x%x\n", aha_inb(aha, DATAIN_REG)); return (ENXIO); } /* If we've attached to the XPT, tell it about the event */ if (aha->path != NULL) xpt_async(AC_BUS_RESET, aha->path, NULL); /* * Perform completion processing for all outstanding CCBs. */ while ((ccb_h = LIST_FIRST(&aha->pending_ccbs)) != NULL) { struct aha_ccb *pending_accb; pending_accb = (struct aha_ccb *)ccb_h->ccb_accb_ptr; pending_accb->hccb.ahastat = AHASTAT_HA_SCSI_BUS_RESET; ahadone(aha, pending_accb, AMBI_ERROR); } /* If we've allocated mailboxes, initialize them */ /* Must be done after we've aborted our queue, or aha_cmd fails */ if (aha->init_level > 4) ahainitmboxes(aha); return (0); } /* * Send a command to the adapter. */ int aha_cmd(struct aha_softc *aha, aha_op_t opcode, uint8_t *params, u_int param_len, uint8_t *reply_data, u_int reply_len, u_int cmd_timeout) { u_int timeout; u_int status; u_int saved_status; u_int intstat; u_int reply_buf_size; int cmd_complete; int error; /* No data returned to start */ reply_buf_size = reply_len; reply_len = 0; intstat = 0; cmd_complete = 0; saved_status = 0; error = 0; /* * All commands except for the "start mailbox" and the "enable * outgoing mailbox read interrupt" commands cannot be issued * while there are pending transactions. Freeze our SIMQ * and wait for all completions to occur if necessary. */ timeout = 10000; while (LIST_FIRST(&aha->pending_ccbs) != NULL && --timeout) { /* Fire the interrupt handler in case interrupts are blocked */ aha_intr(aha); DELAY(10); } if (timeout == 0) { device_printf(aha->dev, "aha_cmd: Timeout waiting for adapter idle\n"); return (ETIMEDOUT); } aha->command_cmp = 0; /* * Wait up to 10 sec. for the adapter to become * ready to accept commands. */ timeout = 100000; while (--timeout) { status = aha_inb(aha, STATUS_REG); if ((status & HA_READY) != 0 && (status & CMD_REG_BUSY) == 0) break; /* * Throw away any pending data which may be * left over from earlier commands that we * timedout on. */ if ((status & DATAIN_REG_READY) != 0) (void)aha_inb(aha, DATAIN_REG); DELAY(100); } if (timeout == 0) { device_printf(aha->dev, "aha_cmd: Timeout waiting for adapter" " ready, status = 0x%x\n", status); return (ETIMEDOUT); } /* * Send the opcode followed by any necessary parameter bytes. */ aha_outb(aha, COMMAND_REG, opcode); /* * Wait for up to 1sec to get the parameter list sent */ timeout = 10000; while (param_len && --timeout) { DELAY(100); status = aha_inb(aha, STATUS_REG); intstat = aha_inb(aha, INTSTAT_REG); if ((intstat & (INTR_PENDING|CMD_COMPLETE)) == (INTR_PENDING|CMD_COMPLETE)) { saved_status = status; cmd_complete = 1; break; } if (aha->command_cmp != 0) { saved_status = aha->latched_status; cmd_complete = 1; break; } if ((status & DATAIN_REG_READY) != 0) break; if ((status & CMD_REG_BUSY) == 0) { aha_outb(aha, COMMAND_REG, *params++); param_len--; timeout = 10000; } } if (timeout == 0) { device_printf(aha->dev, "aha_cmd: Timeout sending parameters, " "status = 0x%x\n", status); error = ETIMEDOUT; } /* * For all other commands, we wait for any output data * and the final comand completion interrupt. */ while (cmd_complete == 0 && --cmd_timeout) { status = aha_inb(aha, STATUS_REG); intstat = aha_inb(aha, INTSTAT_REG); if (aha->command_cmp != 0) { cmd_complete = 1; saved_status = aha->latched_status; } else if ((intstat & (INTR_PENDING|CMD_COMPLETE)) == (INTR_PENDING|CMD_COMPLETE)) { /* * Our poll (in case interrupts are blocked) * saw the CMD_COMPLETE interrupt. */ cmd_complete = 1; saved_status = status; } if ((status & DATAIN_REG_READY) != 0) { uint8_t data; data = aha_inb(aha, DATAIN_REG); if (reply_len < reply_buf_size) { *reply_data++ = data; } else { device_printf(aha->dev, "aha_cmd - Discarded reply data " "byte for opcode 0x%x\n", opcode); } /* * Reset timeout to ensure at least a second * between response bytes. */ cmd_timeout = MAX(cmd_timeout, 10000); reply_len++; } DELAY(100); } if (cmd_timeout == 0) { device_printf(aha->dev, "aha_cmd: Timeout: status = 0x%x, " "intstat = 0x%x, reply_len = %d\n", status, intstat, reply_len); return (ETIMEDOUT); } /* * Clear any pending interrupts. Block interrupts so our * interrupt handler is not re-entered. */ aha_intr(aha); if (error != 0) return (error); /* * If the command was rejected by the controller, tell the caller. */ if ((saved_status & CMD_INVALID) != 0) { PRVERB((aha->dev, "Invalid Command 0x%x\n", opcode)); /* * Some early adapters may not recover properly from * an invalid command. If it appears that the controller * has wedged (i.e. status was not cleared by our interrupt * reset above), perform a soft reset. */ DELAY(1000); status = aha_inb(aha, STATUS_REG); if ((status & (CMD_INVALID|STATUS_REG_RSVD|DATAIN_REG_READY| CMD_REG_BUSY|DIAG_FAIL|DIAG_ACTIVE)) != 0 || (status & (HA_READY|INIT_REQUIRED)) != (HA_READY|INIT_REQUIRED)) ahareset(aha, /*hard_reset*/FALSE); return (EINVAL); } if (param_len > 0) { /* The controller did not accept the full argument list */ PRVERB((aha->dev, "Controller did not accept full argument " "list (%d > 0)\n", param_len)); return (E2BIG); } if (reply_len != reply_buf_size) { /* Too much or too little data received */ PRVERB((aha->dev, "data received mismatch (%d != %d)\n", reply_len, reply_buf_size)); return (EMSGSIZE); } /* We were successful */ return (0); } static int ahainitmboxes(struct aha_softc *aha) { int error; init_24b_mbox_params_t init_mbox; bzero(aha->in_boxes, sizeof(aha_mbox_in_t) * aha->num_boxes); bzero(aha->out_boxes, sizeof(aha_mbox_out_t) * aha->num_boxes); aha->cur_inbox = aha->in_boxes; aha->last_inbox = aha->in_boxes + aha->num_boxes - 1; aha->cur_outbox = aha->out_boxes; aha->last_outbox = aha->out_boxes + aha->num_boxes - 1; /* Tell the adapter about them */ init_mbox.num_mboxes = aha->num_boxes; ahautoa24(aha->mailbox_physbase, init_mbox.base_addr); error = aha_cmd(aha, AOP_INITIALIZE_MBOX, (uint8_t *)&init_mbox, /*parmlen*/sizeof(init_mbox), /*reply_buf*/NULL, /*reply_len*/0, DEFAULT_CMD_TIMEOUT); if (error != 0) printf("ahainitmboxes: Initialization command failed\n"); return (error); } /* * Update the XPT's idea of the negotiated transfer * parameters for a particular target. */ static void ahafetchtransinfo(struct aha_softc *aha, struct ccb_trans_settings* cts) { setup_data_t setup_info; u_int target; u_int targ_offset; u_int sync_period; int error; uint8_t param; targ_syncinfo_t sync_info; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; target = cts->ccb_h.target_id; targ_offset = (target & 0x7); /* * Inquire Setup Information. This command retreives * the sync info for older models. */ param = sizeof(setup_info); error = aha_cmd(aha, AOP_INQUIRE_SETUP_INFO, ¶m, /*paramlen*/1, (uint8_t*)&setup_info, sizeof(setup_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(aha->dev, "ahafetchtransinfo - Inquire Setup Info Failed %d\n", error); return; } sync_info = setup_info.syncinfo[targ_offset]; if (sync_info.sync == 0) spi->sync_offset = 0; else spi->sync_offset = sync_info.offset; spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; if (aha->boardid >= BOARD_1542CF) sync_period = 1000; else sync_period = 2000; sync_period += 500 * sync_info.period; /* Convert ns value to standard SCSI sync rate */ if (spi->sync_offset != 0) spi->sync_period = scsi_calc_syncparam(sync_period); else spi->sync_period = 0; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH; xpt_async(AC_TRANSFER_NEG, cts->ccb_h.path, cts); } static void ahamapmboxes(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aha_softc* aha; aha = (struct aha_softc*)arg; aha->mailbox_physbase = segs->ds_addr; } static void ahamapccbs(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aha_softc* aha; aha = (struct aha_softc*)arg; aha->aha_ccb_physbase = segs->ds_addr; } static void ahamapsgs(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aha_softc* aha; aha = (struct aha_softc*)arg; SLIST_FIRST(&aha->sg_maps)->sg_physaddr = segs->ds_addr; } static void ahapoll(struct cam_sim *sim) { aha_intr_locked(cam_sim_softc(sim)); } static void ahatimeout(void *arg) { struct aha_ccb *accb; union ccb *ccb; struct aha_softc *aha; uint32_t paddr; struct ccb_hdr *ccb_h; accb = (struct aha_ccb *)arg; ccb = accb->ccb; aha = (struct aha_softc *)ccb->ccb_h.ccb_aha_ptr; mtx_assert(&aha->lock, MA_OWNED); xpt_print_path(ccb->ccb_h.path); printf("CCB %p - timed out\n", (void *)accb); if ((accb->flags & ACCB_ACTIVE) == 0) { xpt_print_path(ccb->ccb_h.path); printf("CCB %p - timed out CCB already completed\n", (void *)accb); return; } /* * In order to simplify the recovery process, we ask the XPT * layer to halt the queue of new transactions and we traverse * the list of pending CCBs and remove their timeouts. This * means that the driver attempts to clear only one error * condition at a time. In general, timeouts that occur * close together are related anyway, so there is no benefit * in attempting to handle errors in parallel. Timeouts will * be reinstated when the recovery process ends. */ if ((accb->flags & ACCB_DEVICE_RESET) == 0) { if ((accb->flags & ACCB_RELEASE_SIMQ) == 0) { xpt_freeze_simq(aha->sim, /*count*/1); accb->flags |= ACCB_RELEASE_SIMQ; } ccb_h = LIST_FIRST(&aha->pending_ccbs); while (ccb_h != NULL) { struct aha_ccb *pending_accb; pending_accb = (struct aha_ccb *)ccb_h->ccb_accb_ptr; callout_stop(&pending_accb->timer); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } } if ((accb->flags & ACCB_DEVICE_RESET) != 0 || aha->cur_outbox->action_code != AMBO_FREE) { /* * Try a full host adapter/SCSI bus reset. * We do this only if we have already attempted * to clear the condition with a BDR, or we cannot * attempt a BDR for lack of mailbox resources. */ ccb->ccb_h.status = CAM_CMD_TIMEOUT; ahareset(aha, /*hardreset*/TRUE); device_printf(aha->dev, "No longer in timeout\n"); } else { /* * Send a Bus Device Reset message: * The target that is holding up the bus may not * be the same as the one that triggered this timeout * (different commands have different timeout lengths), * but we have no way of determining this from our * timeout handler. Our strategy here is to queue a * BDR message to the target of the timed out command. * If this fails, we'll get another timeout 2 seconds * later which will attempt a bus reset. */ accb->flags |= ACCB_DEVICE_RESET; callout_reset(&accb->timer, 2 * hz, ahatimeout, accb); aha->recovery_accb->hccb.opcode = INITIATOR_BUS_DEV_RESET; /* No Data Transfer */ aha->recovery_accb->hccb.datain = TRUE; aha->recovery_accb->hccb.dataout = TRUE; aha->recovery_accb->hccb.ahastat = 0; aha->recovery_accb->hccb.sdstat = 0; aha->recovery_accb->hccb.target = ccb->ccb_h.target_id; /* Tell the adapter about this command */ paddr = ahaccbvtop(aha, aha->recovery_accb); ahautoa24(paddr, aha->cur_outbox->ccb_addr); aha->cur_outbox->action_code = AMBO_START; aha_outb(aha, COMMAND_REG, AOP_START_MBOX); ahanextoutbox(aha); } } int aha_detach(struct aha_softc *aha) { mtx_lock(&aha->lock); xpt_async(AC_LOST_DEVICE, aha->path, NULL); xpt_free_path(aha->path); xpt_bus_deregister(cam_sim_path(aha->sim)); cam_sim_free(aha->sim, /*free_devq*/TRUE); mtx_unlock(&aha->lock); /* XXX: Drain all timers? */ return (0); } MODULE_DEPEND(aha, cam, 1, 1, 1); Index: stable/11/sys/dev/ahb/ahb.c =================================================================== --- stable/11/sys/dev/ahb/ahb.c (revision 335137) +++ stable/11/sys/dev/ahb/ahb.c (revision 335138) @@ -1,1315 +1,1315 @@ /*- * CAM SCSI device driver for the Adaptec 174X SCSI Host adapter * * Copyright (c) 1998 Justin T. Gibbs * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ccb_ecb_ptr spriv_ptr0 #define ccb_ahb_ptr spriv_ptr1 #define ahb_inb(ahb, port) \ bus_read_1((ahb)->res, port) #define ahb_inl(ahb, port) \ bus_read_4((ahb)->res, port) #define ahb_outb(ahb, port, value) \ bus_write_1((ahb)->res, port, value) #define ahb_outl(ahb, port, value) \ bus_write_4((ahb)->res, port, value) static const char *ahbmatch(eisa_id_t type); static struct ahb_softc *ahballoc(device_t dev, struct resource *res); static void ahbfree(struct ahb_softc *ahb); static int ahbreset(struct ahb_softc *ahb); static void ahbmapecbs(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int ahbxptattach(struct ahb_softc *ahb); static void ahbhandleimmed(struct ahb_softc *ahb, u_int32_t mbox, u_int intstat); static void ahbcalcresid(struct ahb_softc *ahb, struct ecb *ecb, union ccb *ccb); static __inline void ahbdone(struct ahb_softc *ahb, u_int32_t mbox, u_int intstat); static void ahbintr(void *arg); static void ahbintr_locked(struct ahb_softc *ahb); static bus_dmamap_callback_t ahbexecuteecb; static void ahbaction(struct cam_sim *sim, union ccb *ccb); static void ahbpoll(struct cam_sim *sim); /* Our timeout handler */ static void ahbtimeout(void *arg); static __inline struct ecb* ahbecbget(struct ahb_softc *ahb); static __inline void ahbecbfree(struct ahb_softc* ahb, struct ecb* ecb); static __inline u_int32_t ahbecbvtop(struct ahb_softc *ahb, struct ecb *ecb); static __inline struct ecb* ahbecbptov(struct ahb_softc *ahb, u_int32_t ecb_addr); static __inline u_int32_t ahbstatuspaddr(u_int32_t ecb_paddr); static __inline u_int32_t ahbsensepaddr(u_int32_t ecb_paddr); static __inline u_int32_t ahbsgpaddr(u_int32_t ecb_paddr); static __inline void ahbqueuembox(struct ahb_softc *ahb, u_int32_t mboxval, u_int attn_code); static __inline struct ecb* ahbecbget(struct ahb_softc *ahb) { struct ecb* ecb; if (!dumping) mtx_assert(&ahb->lock, MA_OWNED); if ((ecb = SLIST_FIRST(&ahb->free_ecbs)) != NULL) SLIST_REMOVE_HEAD(&ahb->free_ecbs, links); return (ecb); } static __inline void ahbecbfree(struct ahb_softc* ahb, struct ecb* ecb) { if (!dumping) mtx_assert(&ahb->lock, MA_OWNED); ecb->state = ECB_FREE; SLIST_INSERT_HEAD(&ahb->free_ecbs, ecb, links); } static __inline u_int32_t ahbecbvtop(struct ahb_softc *ahb, struct ecb *ecb) { return (ahb->ecb_physbase + (u_int32_t)((caddr_t)ecb - (caddr_t)ahb->ecb_array)); } static __inline struct ecb* ahbecbptov(struct ahb_softc *ahb, u_int32_t ecb_addr) { return (ahb->ecb_array + ((struct ecb*)(uintptr_t)ecb_addr - (struct ecb*)(uintptr_t)ahb->ecb_physbase)); } static __inline u_int32_t ahbstatuspaddr(u_int32_t ecb_paddr) { return (ecb_paddr + offsetof(struct ecb, status)); } static __inline u_int32_t ahbsensepaddr(u_int32_t ecb_paddr) { return (ecb_paddr + offsetof(struct ecb, sense)); } static __inline u_int32_t ahbsgpaddr(u_int32_t ecb_paddr) { return (ecb_paddr + offsetof(struct ecb, sg_list)); } static __inline void ahbqueuembox(struct ahb_softc *ahb, u_int32_t mboxval, u_int attn_code) { u_int loopmax = 300; while (--loopmax) { u_int status; status = ahb_inb(ahb, HOSTSTAT); if ((status & (HOSTSTAT_MBOX_EMPTY|HOSTSTAT_BUSY)) == HOSTSTAT_MBOX_EMPTY) break; DELAY(20); } if (loopmax == 0) panic("%s: adapter not taking commands\n", device_get_nameunit(ahb->dev)); ahb_outl(ahb, MBOXOUT0, mboxval); ahb_outb(ahb, ATTN, attn_code); } static const char * ahbmatch(eisa_id_t type) { switch(type & 0xfffffe00) { case EISA_DEVICE_ID_ADAPTEC_1740: return ("Adaptec 174x SCSI host adapter"); break; default: break; } return (NULL); } static int ahbprobe(device_t dev) { const char *desc; u_int32_t iobase; u_int32_t irq; u_int8_t intdef; int shared; desc = ahbmatch(eisa_get_id(dev)); if (!desc) return (ENXIO); device_set_desc(dev, desc); iobase = (eisa_get_slot(dev) * EISA_SLOT_SIZE) + AHB_EISA_SLOT_OFFSET; eisa_add_iospace(dev, iobase, AHB_EISA_IOSIZE, RESVADDR_NONE); intdef = inb(INTDEF + iobase); switch (intdef & 0x7) { case INT9: irq = 9; break; case INT10: irq = 10; break; case INT11: irq = 11; break; case INT12: irq = 12; break; case INT14: irq = 14; break; case INT15: irq = 15; break; default: printf("Adaptec 174X at slot %d: illegal " "irq setting %d\n", eisa_get_slot(dev), (intdef & 0x7)); irq = 0; break; } if (irq == 0) return ENXIO; shared = (inb(INTDEF + iobase) & INTLEVEL) ? EISA_TRIGGER_LEVEL : EISA_TRIGGER_EDGE; eisa_add_intr(dev, irq, shared); return 0; } static int ahbattach(device_t dev) { /* * find unit and check we have that many defined */ struct ahb_softc *ahb; struct ecb* next_ecb; struct resource *io; struct resource *irq; int rid; void *ih; irq = NULL; rid = 0; io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (io == NULL) { device_printf(dev, "No I/O space?!\n"); return ENOMEM; } ahb = ahballoc(dev, io); if (ahbreset(ahb) != 0) goto error_exit; rid = 0; irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (irq == NULL) { device_printf(dev, "Can't allocate interrupt\n"); goto error_exit; } /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. */ /* DMA tag for mapping buffers into device visible space. */ if (bus_dma_tag_create( /* parent */ bus_get_dma_tag(dev), /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ DFLTPHYS, /* nsegments */ AHB_NSEG, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ BUS_DMA_ALLOCNOW, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &ahb->lock, &ahb->buffer_dmat) != 0) goto error_exit; ahb->init_level++; /* DMA tag for our ccb structures and ha inquiry data */ if (bus_dma_tag_create( /* parent */ bus_get_dma_tag(dev), /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ (AHB_NECB * sizeof(struct ecb)) + sizeof(*ahb->ha_inq_data), /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &ahb->ecb_dmat) != 0) goto error_exit; ahb->init_level++; /* Allocation for our ccbs */ if (bus_dmamem_alloc(ahb->ecb_dmat, (void **)&ahb->ecb_array, BUS_DMA_NOWAIT, &ahb->ecb_dmamap) != 0) goto error_exit; ahb->ha_inq_data = (struct ha_inquiry_data *)&ahb->ecb_array[AHB_NECB]; ahb->init_level++; /* And permanently map them */ bus_dmamap_load(ahb->ecb_dmat, ahb->ecb_dmamap, ahb->ecb_array, AHB_NSEG * sizeof(struct ecb), ahbmapecbs, ahb, /*flags*/0); ahb->init_level++; /* Allocate the buffer dmamaps for each of our ECBs */ bzero(ahb->ecb_array, (AHB_NECB * sizeof(struct ecb)) + sizeof(*ahb->ha_inq_data)); next_ecb = ahb->ecb_array; while (ahb->num_ecbs < AHB_NECB) { u_int32_t ecb_paddr; if (bus_dmamap_create(ahb->buffer_dmat, /*flags*/0, &next_ecb->dmamap)) break; callout_init_mtx(&next_ecb->timer, &ahb->lock, 0); ecb_paddr = ahbecbvtop(ahb, next_ecb); next_ecb->hecb.status_ptr = ahbstatuspaddr(ecb_paddr); next_ecb->hecb.sense_ptr = ahbsensepaddr(ecb_paddr); ahb->num_ecbs++; ahbecbfree(ahb, next_ecb); next_ecb++; } ahb->init_level++; /* * Now that we know we own the resources we need, register * our bus with the XPT. */ if (ahbxptattach(ahb)) goto error_exit; /* Enable our interrupt */ if (bus_setup_intr(dev, irq, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE, NULL, ahbintr, ahb, &ih) != 0) goto error_exit; return (0); error_exit: /* * The board's IRQ line will not be left enabled * if we can't initialize correctly, so its safe * to release the irq. */ ahbfree(ahb); if (irq != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, irq); bus_release_resource(dev, SYS_RES_IOPORT, 0, io); return (-1); } static struct ahb_softc * ahballoc(device_t dev, struct resource *res) { struct ahb_softc *ahb; ahb = device_get_softc(dev); SLIST_INIT(&ahb->free_ecbs); LIST_INIT(&ahb->pending_ccbs); ahb->res = res; ahb->disc_permitted = ~0; ahb->tags_permitted = ~0; ahb->dev = dev; mtx_init(&ahb->lock, "ahb", NULL, MTX_DEF); return (ahb); } static void ahbfree(struct ahb_softc *ahb) { switch (ahb->init_level) { default: case 4: bus_dmamap_unload(ahb->ecb_dmat, ahb->ecb_dmamap); case 3: bus_dmamem_free(ahb->ecb_dmat, ahb->ecb_array, ahb->ecb_dmamap); case 2: bus_dma_tag_destroy(ahb->ecb_dmat); case 1: bus_dma_tag_destroy(ahb->buffer_dmat); case 0: break; } mtx_destroy(&ahb->lock); } /* * reset board, If it doesn't respond, return failure */ static int ahbreset(struct ahb_softc *ahb) { int wait = 1000; /* 1 sec enough? */ int test; if ((ahb_inb(ahb, PORTADDR) & PORTADDR_ENHANCED) == 0) { printf("ahb_reset: Controller not in enhanced mode\n"); return (-1); } ahb_outb(ahb, CONTROL, CNTRL_HARD_RST); DELAY(1000); ahb_outb(ahb, CONTROL, 0); while (--wait) { DELAY(1000); if ((ahb_inb(ahb, HOSTSTAT) & HOSTSTAT_BUSY) == 0) break; } if (wait == 0) { printf("ahbreset: No answer from aha1742 board\n"); return (-1); } if ((test = ahb_inb(ahb, MBOXIN0)) != 0) { printf("ahb_reset: self test failed, val = 0x%x\n", test); return (-1); } while (ahb_inb(ahb, HOSTSTAT) & HOSTSTAT_INTPEND) { ahb_outb(ahb, CONTROL, CNTRL_CLRINT); DELAY(10000); } return (0); } static void ahbmapecbs(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct ahb_softc* ahb; ahb = (struct ahb_softc*)arg; ahb->ecb_physbase = segs->ds_addr; /* * Space for adapter inquiry information is on the * tail of the ecb array. */ ahb->ha_inq_physbase = ahbecbvtop(ahb, &ahb->ecb_array[AHB_NECB]); } static int ahbxptattach(struct ahb_softc *ahb) { struct cam_devq *devq; struct ecb *ecb; u_int i; mtx_lock(&ahb->lock); /* Remember who are we on the scsi bus */ ahb->scsi_id = ahb_inb(ahb, SCSIDEF) & HSCSIID; /* Use extended translation?? */ ahb->extended_trans = ahb_inb(ahb, RESV1) & EXTENDED_TRANS; /* Fetch adapter inquiry data */ ecb = ahbecbget(ahb); /* Always succeeds - no outstanding commands */ ecb->hecb.opcode = ECBOP_READ_HA_INQDATA; ecb->hecb.flag_word1 = FW1_SUPPRESS_URUN_ERR|FW1_ERR_STATUS_BLK_ONLY; ecb->hecb.data_ptr = ahb->ha_inq_physbase; ecb->hecb.data_len = sizeof(struct ha_inquiry_data); ecb->hecb.sense_ptr = 0; ecb->state = ECB_ACTIVE; /* Tell the adapter about this command */ ahbqueuembox(ahb, ahbecbvtop(ahb, ecb), ATTN_STARTECB|ahb->scsi_id); /* Poll for interrupt completion */ for (i = 1000; ecb->state != ECB_FREE && i != 0; i--) { ahbintr_locked(ahb); DELAY(1000); } ahb->num_ecbs = MIN(ahb->num_ecbs, ahb->ha_inq_data->scsi_data.spc2_flags); device_printf(ahb->dev, "%.8s %s SCSI Adapter, FW Rev. %.4s, ID=%d, %d ECBs\n", ahb->ha_inq_data->scsi_data.product, (ahb->ha_inq_data->scsi_data.flags & 0x4) ? "Differential" : "Single Ended", ahb->ha_inq_data->scsi_data.revision, ahb->scsi_id, ahb->num_ecbs); /* Restore sense paddr for future CCB clients */ ecb->hecb.sense_ptr = ahbsensepaddr(ahbecbvtop(ahb, ecb)); ahbecbfree(ahb, ecb); /* * Create the device queue for our SIM. */ devq = cam_simq_alloc(ahb->num_ecbs); if (devq == NULL) { mtx_unlock(&ahb->lock); return (ENOMEM); } /* * Construct our SIM entry */ ahb->sim = cam_sim_alloc(ahbaction, ahbpoll, "ahb", ahb, device_get_unit(ahb->dev), &ahb->lock, 2, ahb->num_ecbs, devq); if (ahb->sim == NULL) { cam_simq_free(devq); mtx_unlock(&ahb->lock); return (ENOMEM); } if (xpt_bus_register(ahb->sim, ahb->dev, 0) != CAM_SUCCESS) { cam_sim_free(ahb->sim, /*free_devq*/TRUE); mtx_unlock(&ahb->lock); return (ENXIO); } if (xpt_create_path(&ahb->path, /*periph*/NULL, cam_sim_path(ahb->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(ahb->sim)); cam_sim_free(ahb->sim, /*free_devq*/TRUE); mtx_unlock(&ahb->lock); return (ENXIO); } /* * Allow the board to generate interrupts. */ ahb_outb(ahb, INTDEF, ahb_inb(ahb, INTDEF) | INTEN); mtx_unlock(&ahb->lock); return (0); } static void ahbhandleimmed(struct ahb_softc *ahb, u_int32_t mbox, u_int intstat) { struct ccb_hdr *ccb_h; u_int target_id; if (ahb->immed_cmd == 0) { device_printf(ahb->dev, "Immediate Command complete with no " " pending command\n"); return; } target_id = intstat & INTSTAT_TARGET_MASK; ccb_h = LIST_FIRST(&ahb->pending_ccbs); while (ccb_h != NULL) { struct ecb *pending_ecb; union ccb *ccb; pending_ecb = (struct ecb *)ccb_h->ccb_ecb_ptr; ccb = pending_ecb->ccb; ccb_h = LIST_NEXT(ccb_h, sim_links.le); if (ccb->ccb_h.target_id == target_id || target_id == ahb->scsi_id) { callout_stop(&pending_ecb->timer); LIST_REMOVE(&ccb->ccb_h, sim_links.le); if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) bus_dmamap_unload(ahb->buffer_dmat, pending_ecb->dmamap); if (pending_ecb == ahb->immed_ecb) ccb->ccb_h.status = CAM_CMD_TIMEOUT|CAM_RELEASE_SIMQ; else if (target_id == ahb->scsi_id) ccb->ccb_h.status = CAM_SCSI_BUS_RESET; else ccb->ccb_h.status = CAM_BDR_SENT; ahbecbfree(ahb, pending_ecb); xpt_done(ccb); } else if (ahb->immed_ecb != NULL) { /* Re-instate timeout */ callout_reset_sbt(&pending_ecb->timer, SBT_1MS * ccb->ccb_h.timeout, 0, ahbtimeout, pending_ecb, 0); } } if (ahb->immed_ecb != NULL) { ahb->immed_ecb = NULL; device_printf(ahb->dev, "No longer in timeout\n"); } else if (target_id == ahb->scsi_id) device_printf(ahb->dev, "SCSI Bus Reset Delivered\n"); else device_printf(ahb->dev, "Bus Device Reset Delivered to target %d\n", target_id); ahb->immed_cmd = 0; } static void ahbcalcresid(struct ahb_softc *ahb, struct ecb *ecb, union ccb *ccb) { if (ecb->status.data_overrun != 0) { /* * Overrun Condition. The hardware doesn't * provide a meaningful byte count in this case * (the residual is always 0). Tell the XPT * layer about the error. */ ccb->ccb_h.status = CAM_DATA_RUN_ERR; } else { ccb->csio.resid = ecb->status.resid_count; if ((ecb->hecb.flag_word1 & FW1_SG_ECB) != 0) { /* * For S/G transfers, the adapter provides a pointer * to the address in the last S/G element used and a * residual for that element. So, we need to sum up * the elements that follow it in order to get a real * residual number. If we have an overrun, the residual * reported will be 0 and we already know that all S/G * segments have been exhausted, so we can skip this * step. */ ahb_sg_t *sg; int num_sg; num_sg = ecb->hecb.data_len / sizeof(ahb_sg_t); /* Find the S/G the adapter was working on */ for (sg = ecb->sg_list; num_sg != 0 && sg->addr != ecb->status.resid_addr; num_sg--, sg++) ; /* Skip it */ num_sg--; sg++; /* Sum the rest */ for (; num_sg != 0; num_sg--, sg++) ccb->csio.resid += sg->len; } /* Underruns are not errors */ ccb->ccb_h.status = CAM_REQ_CMP; } } static void ahbprocesserror(struct ahb_softc *ahb, struct ecb *ecb, union ccb *ccb) { struct hardware_ecb *hecb; struct ecb_status *status; hecb = &ecb->hecb; status = &ecb->status; switch (status->ha_status) { case HS_OK: ccb->csio.scsi_status = status->scsi_status; if (status->scsi_status != 0) { ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; if (status->sense_stored) { ccb->ccb_h.status |= CAM_AUTOSNS_VALID; ccb->csio.sense_resid = ccb->csio.sense_len - status->sense_len; bcopy(&ecb->sense, &ccb->csio.sense_data, status->sense_len); } } break; case HS_TARGET_NOT_ASSIGNED: ccb->ccb_h.status = CAM_PATH_INVALID; break; case HS_SEL_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case HS_DATA_RUN_ERR: ahbcalcresid(ahb, ecb, ccb); break; case HS_UNEXPECTED_BUSFREE: ccb->ccb_h.status = CAM_UNEXP_BUSFREE; break; case HS_INVALID_PHASE: ccb->ccb_h.status = CAM_SEQUENCE_FAIL; break; case HS_REQUEST_SENSE_FAILED: ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; break; case HS_TAG_MSG_REJECTED: { struct ccb_trans_settings neg; struct ccb_trans_settings_scsi *scsi = &neg.proto_specific.scsi; xpt_print_path(ccb->ccb_h.path); printf("refuses tagged commands. Performing " "non-tagged I/O\n"); memset(&neg, 0, sizeof (neg)); neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; scsi->flags = CTS_SCSI_VALID_TQ; xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg); ahb->tags_permitted &= ~(0x01 << ccb->ccb_h.target_id); ccb->ccb_h.status = CAM_MSG_REJECT_REC; break; } case HS_FIRMWARE_LOAD_REQ: case HS_HARDWARE_ERR: /* * Tell the system that the Adapter * is no longer functional. */ ccb->ccb_h.status = CAM_NO_HBA; break; case HS_CMD_ABORTED_HOST: case HS_CMD_ABORTED_ADAPTER: case HS_ATN_TARGET_FAILED: case HS_SCSI_RESET_ADAPTER: case HS_SCSI_RESET_INCOMING: ccb->ccb_h.status = CAM_SCSI_BUS_RESET; break; case HS_INVALID_ECB_PARAM: device_printf(ahb->dev, "opcode 0x%02x, flag_word1 0x%02x, flag_word2 0x%02x\n", hecb->opcode, hecb->flag_word1, hecb->flag_word2); ccb->ccb_h.status = CAM_SCSI_BUS_RESET; break; case HS_DUP_TCB_RECEIVED: case HS_INVALID_OPCODE: case HS_INVALID_CMD_LINK: case HS_PROGRAM_CKSUM_ERROR: panic("%s: Can't happen host status %x occurred", device_get_nameunit(ahb->dev), status->ha_status); break; } if (ccb->ccb_h.status != CAM_REQ_CMP) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } } static void ahbdone(struct ahb_softc *ahb, u_int32_t mbox, u_int intstat) { struct ecb *ecb; union ccb *ccb; ecb = ahbecbptov(ahb, mbox); if ((ecb->state & ECB_ACTIVE) == 0) panic("ecb not active"); ccb = ecb->ccb; if (ccb != NULL) { callout_stop(&ecb->timer); LIST_REMOVE(&ccb->ccb_h, sim_links.le); if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(ahb->buffer_dmat, ecb->dmamap, op); bus_dmamap_unload(ahb->buffer_dmat, ecb->dmamap); } if ((intstat & INTSTAT_MASK) == INTSTAT_ECB_OK) { ccb->ccb_h.status = CAM_REQ_CMP; ccb->csio.resid = 0; } else { ahbprocesserror(ahb, ecb, ccb); } ahbecbfree(ahb, ecb); xpt_done(ccb); } else { /* Non CCB Command */ if ((intstat & INTSTAT_MASK) != INTSTAT_ECB_OK) { device_printf(ahb->dev, "Command 0%x Failed %x:%x:%x\n", ecb->hecb.opcode, *((u_int16_t*)&ecb->status), ecb->status.ha_status, ecb->status.resid_count); } /* Client owns this ECB and will release it. */ } } /* * Catch an interrupt from the adaptor */ static void ahbintr(void *arg) { struct ahb_softc *ahb; ahb = arg; mtx_lock(&ahb->lock); ahbintr_locked(ahb); mtx_unlock(&ahb->lock); } static void ahbintr_locked(struct ahb_softc *ahb) { u_int intstat; u_int32_t mbox; while (ahb_inb(ahb, HOSTSTAT) & HOSTSTAT_INTPEND) { /* * Fetch information about this interrupt. */ intstat = ahb_inb(ahb, INTSTAT); mbox = ahb_inl(ahb, MBOXIN0); /* * Reset interrupt latch. */ ahb_outb(ahb, CONTROL, CNTRL_CLRINT); /* * Process the completed operation */ switch (intstat & INTSTAT_MASK) { case INTSTAT_ECB_OK: case INTSTAT_ECB_CMPWRETRY: case INTSTAT_ECB_CMPWERR: ahbdone(ahb, mbox, intstat); break; case INTSTAT_AEN_OCCURED: if ((intstat & INTSTAT_TARGET_MASK) == ahb->scsi_id) { /* Bus Reset */ xpt_print_path(ahb->path); switch (mbox) { case HS_SCSI_RESET_ADAPTER: printf("Host Adapter Initiated " "Bus Reset occurred\n"); break; case HS_SCSI_RESET_INCOMING: printf("Bus Reset Initiated " "by another device occurred\n"); break; } /* Notify the XPT */ xpt_async(AC_BUS_RESET, ahb->path, NULL); break; } printf("Unsupported initiator selection AEN occurred\n"); break; case INTSTAT_IMMED_OK: case INTSTAT_IMMED_ERR: ahbhandleimmed(ahb, mbox, intstat); break; case INTSTAT_HW_ERR: panic("Unrecoverable hardware Error Occurred\n"); } } } static void ahbexecuteecb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct ecb *ecb; union ccb *ccb; struct ahb_softc *ahb; u_int32_t ecb_paddr; ecb = (struct ecb *)arg; ccb = ecb->ccb; ahb = (struct ahb_softc *)ccb->ccb_h.ccb_ahb_ptr; mtx_assert(&ahb->lock, MA_OWNED); if (error != 0) { if (error != EFBIG) device_printf(ahb->dev, "Unexepected error 0x%x returned from " "bus_dmamap_load\n", error); if (ccb->ccb_h.status == CAM_REQ_INPROG) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; } ahbecbfree(ahb, ecb); xpt_done(ccb); return; } ecb_paddr = ahbecbvtop(ahb, ecb); if (nseg != 0) { ahb_sg_t *sg; bus_dma_segment_t *end_seg; bus_dmasync_op_t op; end_seg = dm_segs + nseg; /* Copy the segments into our SG list */ sg = ecb->sg_list; while (dm_segs < end_seg) { sg->addr = dm_segs->ds_addr; sg->len = dm_segs->ds_len; sg++; dm_segs++; } if (nseg > 1) { ecb->hecb.flag_word1 |= FW1_SG_ECB; ecb->hecb.data_ptr = ahbsgpaddr(ecb_paddr); ecb->hecb.data_len = sizeof(ahb_sg_t) * nseg; } else { ecb->hecb.data_ptr = ecb->sg_list->addr; ecb->hecb.data_len = ecb->sg_list->len; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { /* ecb->hecb.flag_word2 |= FW2_DATA_DIR_IN; */ op = BUS_DMASYNC_PREREAD; } else { op = BUS_DMASYNC_PREWRITE; } /* ecb->hecb.flag_word2 |= FW2_CHECK_DATA_DIR; */ bus_dmamap_sync(ahb->buffer_dmat, ecb->dmamap, op); } else { ecb->hecb.data_ptr = 0; ecb->hecb.data_len = 0; } /* * Last time we need to check if this CCB needs to * be aborted. */ if (ccb->ccb_h.status != CAM_REQ_INPROG) { if (nseg != 0) bus_dmamap_unload(ahb->buffer_dmat, ecb->dmamap); ahbecbfree(ahb, ecb); xpt_done(ccb); return; } ecb->state = ECB_ACTIVE; ccb->ccb_h.status |= CAM_SIM_QUEUED; LIST_INSERT_HEAD(&ahb->pending_ccbs, &ccb->ccb_h, sim_links.le); /* Tell the adapter about this command */ ahbqueuembox(ahb, ecb_paddr, ATTN_STARTECB|ccb->ccb_h.target_id); callout_reset_sbt(&ecb->timer, SBT_1MS * ccb->ccb_h.timeout, 0, ahbtimeout, ecb, 0); } static void ahbaction(struct cam_sim *sim, union ccb *ccb) { struct ahb_softc *ahb; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahbaction\n")); ahb = (struct ahb_softc *)cam_sim_softc(sim); mtx_assert(&ahb->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ { struct ecb *ecb; struct hardware_ecb *hecb; int error; /* * get an ecb to use. */ if ((ecb = ahbecbget(ahb)) == NULL) { /* Should never occur */ panic("Failed to get an ecb"); } /* * So we can find the ECB when an abort is requested */ ecb->ccb = ccb; ccb->ccb_h.ccb_ecb_ptr = ecb; ccb->ccb_h.ccb_ahb_ptr = ahb; /* * Put all the arguments for the xfer in the ecb */ hecb = &ecb->hecb; hecb->opcode = ECBOP_INITIATOR_SCSI_CMD; hecb->flag_word1 = FW1_AUTO_REQUEST_SENSE | FW1_ERR_STATUS_BLK_ONLY; hecb->flag_word2 = ccb->ccb_h.target_lun | FW2_NO_RETRY_ON_BUSY; if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { hecb->flag_word2 |= FW2_TAG_ENB | ((ccb->csio.tag_action & 0x3) << FW2_TAG_TYPE_SHIFT); } if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) hecb->flag_word2 |= FW2_DISABLE_DISC; hecb->sense_len = ccb->csio.sense_len; hecb->cdb_len = ccb->csio.cdb_len; if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { bcopy(ccb->csio.cdb_io.cdb_ptr, hecb->cdb, hecb->cdb_len); } else { /* I guess I could map it in... */ ccb->ccb_h.status = CAM_REQ_INVALID; ahbecbfree(ahb, ecb); xpt_done(ccb); return; } } else { bcopy(ccb->csio.cdb_io.cdb_bytes, hecb->cdb, hecb->cdb_len); } error = bus_dmamap_load_ccb( ahb->buffer_dmat, ecb->dmamap, ccb, ahbexecuteecb, ecb, /*flags*/0); if (error == EINPROGRESS) { /* * So as to maintain ordering, freeze the controller * queue until our mapping is returned. */ xpt_freeze_simq(ahb->sim, 1); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } break; } case XPT_EN_LUN: /* Enable LUN as a target */ case XPT_TARGET_IO: /* Execute target I/O request */ case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: { ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; u_int target_mask = 0x01 << ccb->ccb_h.target_id; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; if (cts->type == CTS_TYPE_USER_SETTINGS) { cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if ((ahb->disc_permitted & target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((ahb->tags_permitted & target_mask) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; spi->sync_period = 25; /* 10MHz */ if (spi->sync_period != 0) spi->sync_offset = 15; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; ccb->ccb_h.status = CAM_REQ_CMP; } else { ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; } xpt_done(ccb); break; } case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ { int i; ahb->immed_cmd = IMMED_RESET; ahbqueuembox(ahb, IMMED_RESET, ATTN_IMMED|ccb->ccb_h.target_id); /* Poll for interrupt completion */ for (i = 1000; ahb->immed_cmd != 0 && i != 0; i--) { DELAY(1000); ahbintr_locked(cam_sim_softc(sim)); } break; } case XPT_CALC_GEOMETRY: { cam_calc_geometry(&ccb->ccg, ahb->extended_trans); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { int i; ahb->immed_cmd = IMMED_RESET; ahbqueuembox(ahb, IMMED_RESET, ATTN_IMMED|ahb->scsi_id); /* Poll for interrupt completion */ for (i = 1000; ahb->immed_cmd != 0 && i != 0; i--) DELAY(1000); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 7; cpi->max_lun = 7; cpi->initiator_id = ahb->scsi_id; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); - cpi->transport = XPORT_SPI; - cpi->transport_version = 2; - cpi->protocol = PROTO_SCSI; - cpi->protocol_version = SCSI_REV_2; + cpi->transport = XPORT_SPI; + cpi->transport_version = 2; + cpi->protocol = PROTO_SCSI; + cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } #if 0 /* Need these??? */ case XPT_IMMED_NOTIFY: /* Notify Host Target driver of event */ case XPT_NOTIFY_ACK: /* Acknowledgement of event */ #endif default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void ahbpoll(struct cam_sim *sim) { ahbintr(cam_sim_softc(sim)); } static void ahbtimeout(void *arg) { struct ecb *ecb; union ccb *ccb; struct ahb_softc *ahb; ecb = (struct ecb *)arg; ccb = ecb->ccb; ahb = (struct ahb_softc *)ccb->ccb_h.ccb_ahb_ptr; mtx_assert(&ahb->lock, MA_OWNED); xpt_print_path(ccb->ccb_h.path); printf("ECB %p - timed out\n", (void *)ecb); if ((ecb->state & ECB_ACTIVE) == 0) { xpt_print_path(ccb->ccb_h.path); printf("ECB %p - timed out ECB already completed\n", (void *)ecb); return; } /* * In order to simplify the recovery process, we ask the XPT * layer to halt the queue of new transactions and we traverse * the list of pending CCBs and remove their timeouts. This * means that the driver attempts to clear only one error * condition at a time. In general, timeouts that occur * close together are related anyway, so there is no benefit * in attempting to handle errors in parallel. Timeouts will * be reinstated when the recovery process ends. */ if ((ecb->state & ECB_DEVICE_RESET) == 0) { struct ccb_hdr *ccb_h; if ((ecb->state & ECB_RELEASE_SIMQ) == 0) { xpt_freeze_simq(ahb->sim, /*count*/1); ecb->state |= ECB_RELEASE_SIMQ; } LIST_FOREACH(ccb_h, &ahb->pending_ccbs, sim_links.le) { struct ecb *pending_ecb; pending_ecb = (struct ecb *)ccb_h->ccb_ecb_ptr; callout_stop(&pending_ecb->timer); } /* Store for our interrupt handler */ ahb->immed_ecb = ecb; /* * Send a Bus Device Reset message: * The target that is holding up the bus may not * be the same as the one that triggered this timeout * (different commands have different timeout lengths), * but we have no way of determining this from our * timeout handler. Our strategy here is to queue a * BDR message to the target of the timed out command. * If this fails, we'll get another timeout 2 seconds * later which will attempt a bus reset. */ xpt_print_path(ccb->ccb_h.path); printf("Queuing BDR\n"); ecb->state |= ECB_DEVICE_RESET; callout_reset(&ecb->timer, 2 * hz, ahbtimeout, ecb); ahb->immed_cmd = IMMED_RESET; ahbqueuembox(ahb, IMMED_RESET, ATTN_IMMED|ccb->ccb_h.target_id); } else if ((ecb->state & ECB_SCSIBUS_RESET) != 0) { /* * Try a SCSI bus reset. We do this only if we * have already attempted to clear the condition with a BDR. */ xpt_print_path(ccb->ccb_h.path); printf("Attempting SCSI Bus reset\n"); ecb->state |= ECB_SCSIBUS_RESET; callout_reset(&ecb->timer, 2 * hz, ahbtimeout, ecb); ahb->immed_cmd = IMMED_RESET; ahbqueuembox(ahb, IMMED_RESET, ATTN_IMMED|ahb->scsi_id); } else { /* Bring out the hammer... */ ahbreset(ahb); /* Simulate the reset complete interrupt */ ahbhandleimmed(ahb, 0, ahb->scsi_id|INTSTAT_IMMED_OK); } } static device_method_t ahb_eisa_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ahbprobe), DEVMETHOD(device_attach, ahbattach), { 0, 0 } }; static driver_t ahb_eisa_driver = { "ahb", ahb_eisa_methods, sizeof(struct ahb_softc), }; static devclass_t ahb_devclass; DRIVER_MODULE(ahb, eisa, ahb_eisa_driver, ahb_devclass, 0, 0); MODULE_DEPEND(ahb, eisa, 1, 1, 1); MODULE_DEPEND(ahb, cam, 1, 1, 1); Index: stable/11/sys/dev/aic/aic.c =================================================================== --- stable/11/sys/dev/aic/aic.c (revision 335137) +++ stable/11/sys/dev/aic/aic.c (revision 335138) @@ -1,1598 +1,1598 @@ /*- * Copyright (c) 1999 Luoqi Chen. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void aic_action(struct cam_sim *sim, union ccb *ccb); static void aic_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error); static void aic_intr_locked(struct aic_softc *aic); static void aic_start(struct aic_softc *aic); static void aic_select(struct aic_softc *aic); static void aic_selected(struct aic_softc *aic); static void aic_reselected(struct aic_softc *aic); static void aic_reconnect(struct aic_softc *aic, int tag); static void aic_cmd(struct aic_softc *aic); static void aic_msgin(struct aic_softc *aic); static void aic_handle_msgin(struct aic_softc *aic); static void aic_msgout(struct aic_softc *aic); static void aic_datain(struct aic_softc *aic); static void aic_dataout(struct aic_softc *aic); static void aic_done(struct aic_softc *aic, struct aic_scb *scb); static void aic_poll(struct cam_sim *sim); static void aic_timeout(void *arg); static void aic_scsi_reset(struct aic_softc *aic); static void aic_chip_reset(struct aic_softc *aic); static void aic_reset(struct aic_softc *aic, int initiate_reset); devclass_t aic_devclass; static struct aic_scb * aic_get_scb(struct aic_softc *aic) { struct aic_scb *scb; if (!dumping) mtx_assert(&aic->lock, MA_OWNED); if ((scb = SLIST_FIRST(&aic->free_scbs)) != NULL) SLIST_REMOVE_HEAD(&aic->free_scbs, link); return (scb); } static void aic_free_scb(struct aic_softc *aic, struct aic_scb *scb) { if (!dumping) mtx_assert(&aic->lock, MA_OWNED); if ((aic->flags & AIC_RESOURCE_SHORTAGE) != 0 && (scb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { scb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; aic->flags &= ~AIC_RESOURCE_SHORTAGE; } scb->flags = 0; SLIST_INSERT_HEAD(&aic->free_scbs, scb, link); } static void aic_action(struct cam_sim *sim, union ccb *ccb) { struct aic_softc *aic; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("aic_action\n")); aic = (struct aic_softc *)cam_sim_softc(sim); mtx_assert(&aic->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ { struct aic_scb *scb; if ((scb = aic_get_scb(aic)) == NULL) { aic->flags |= AIC_RESOURCE_SHORTAGE; xpt_freeze_simq(aic->sim, /*count*/1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } scb->ccb = ccb; ccb->ccb_h.ccb_scb_ptr = scb; ccb->ccb_h.ccb_aic_ptr = aic; scb->target = ccb->ccb_h.target_id; scb->lun = ccb->ccb_h.target_lun; if (ccb->ccb_h.func_code == XPT_SCSI_IO) { scb->cmd_len = ccb->csio.cdb_len; if (ccb->ccb_h.flags & CAM_CDB_POINTER) { if (ccb->ccb_h.flags & CAM_CDB_PHYS) { ccb->ccb_h.status = CAM_REQ_INVALID; aic_free_scb(aic, scb); xpt_done(ccb); return; } scb->cmd_ptr = ccb->csio.cdb_io.cdb_ptr; } else { scb->cmd_ptr = ccb->csio.cdb_io.cdb_bytes; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) { ccb->ccb_h.status = CAM_REQ_INVALID; aic_free_scb(aic, scb); xpt_done(ccb); return; } scb->data_ptr = ccb->csio.data_ptr; scb->data_len = ccb->csio.dxfer_len; } else { scb->data_ptr = NULL; scb->data_len = 0; } aic_execute_scb(scb, NULL, 0, 0); } else { scb->flags |= SCB_DEVICE_RESET; aic_execute_scb(scb, NULL, 0, 0); } break; } case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct aic_tinfo *ti = &aic->tinfo[ccb->ccb_h.target_id]; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; if ((spi->valid & CTS_SPI_VALID_DISC) != 0 && (aic->flags & AIC_DISC_ENABLE) != 0) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ti->flags |= TINFO_DISC_ENB; else ti->flags &= ~TINFO_DISC_ENB; } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ti->flags |= TINFO_TAG_ENB; else ti->flags &= ~TINFO_TAG_ENB; } if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) { ti->goal.period = spi->sync_period; if (ti->goal.period > aic->min_period) { ti->goal.period = 0; ti->goal.offset = 0; } else if (ti->goal.period < aic->max_period) ti->goal.period = aic->max_period; } if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) { ti->goal.offset = spi->sync_offset; if (ti->goal.offset == 0) ti->goal.period = 0; else if (ti->goal.offset > AIC_SYNC_OFFSET) ti->goal.offset = AIC_SYNC_OFFSET; } if ((ti->goal.period != ti->current.period) || (ti->goal.offset != ti->current.offset)) ti->flags |= TINFO_SDTR_NEGO; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct aic_tinfo *ti = &aic->tinfo[ccb->ccb_h.target_id]; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if ((ti->flags & TINFO_DISC_ENB) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((ti->flags & TINFO_TAG_ENB) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { spi->sync_period = ti->current.period; spi->sync_offset = ti->current.offset; } else { spi->sync_period = ti->user.period; spi->sync_offset = ti->user.offset; } spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { cam_calc_geometry(&ccb->ccg, /*extended*/1); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ aic_reset(aic, /*initiate_reset*/TRUE); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 7; cpi->max_lun = 7; cpi->initiator_id = aic->initiator; cpi->bus_id = cam_sim_bus(sim); - cpi->base_transfer_speed = 3300; + cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void aic_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct aic_scb *scb = (struct aic_scb *)arg; union ccb *ccb = scb->ccb; struct aic_softc *aic = (struct aic_softc *)ccb->ccb_h.ccb_aic_ptr; if (!dumping) mtx_assert(&aic->lock, MA_OWNED); if (ccb->ccb_h.status != CAM_REQ_INPROG) { aic_free_scb(aic, scb); xpt_done(ccb); return; } scb->flags |= SCB_ACTIVE; ccb->ccb_h.status |= CAM_SIM_QUEUED; TAILQ_INSERT_TAIL(&aic->pending_ccbs, &ccb->ccb_h, sim_links.tqe); callout_reset_sbt(&scb->timer, SBT_1MS * ccb->ccb_h.timeout, 0, aic_timeout, scb, 0); aic_start(aic); } /* * Start another command if the controller is not busy. */ static void aic_start(struct aic_softc *aic) { struct ccb_hdr *ccb_h; struct aic_tinfo *ti; if (aic->state != AIC_IDLE) return; TAILQ_FOREACH(ccb_h, &aic->pending_ccbs, sim_links.tqe) { ti = &aic->tinfo[ccb_h->target_id]; if ((ti->lubusy & (1 << ccb_h->target_lun)) == 0) { TAILQ_REMOVE(&aic->pending_ccbs, ccb_h, sim_links.tqe); aic->nexus = (struct aic_scb *)ccb_h->ccb_scb_ptr; aic_select(aic); return; } } CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_start: idle\n")); aic_outb(aic, SIMODE0, ENSELDI); aic_outb(aic, SIMODE1, ENSCSIRST); aic_outb(aic, SCSISEQ, ENRESELI); } /* * Start a selection. */ static void aic_select(struct aic_softc *aic) { struct aic_scb *scb = aic->nexus; CAM_DEBUG(scb->ccb->ccb_h.path, CAM_DEBUG_TRACE, ("aic_select - ccb %p\n", scb->ccb)); aic->state = AIC_SELECTING; aic_outb(aic, DMACNTRL1, 0); aic_outb(aic, SCSIID, aic->initiator << OID_S | scb->target); aic_outb(aic, SXFRCTL1, STIMO_256ms | ENSTIMER | (aic->flags & AIC_PARITY_ENABLE ? ENSPCHK : 0)); aic_outb(aic, SIMODE0, ENSELDI|ENSELDO); aic_outb(aic, SIMODE1, ENSCSIRST|ENSELTIMO); aic_outb(aic, SCSISEQ, ENRESELI|ENSELO|ENAUTOATNO); } /* * We have successfully selected a target, prepare for the information * transfer phases. */ static void aic_selected(struct aic_softc *aic) { struct aic_scb *scb = aic->nexus; union ccb *ccb = scb->ccb; struct aic_tinfo *ti = &aic->tinfo[scb->target]; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("aic_selected - ccb %p\n", ccb)); aic->state = AIC_HASNEXUS; if (scb->flags & SCB_DEVICE_RESET) { aic->msg_buf[0] = MSG_BUS_DEV_RESET; aic->msg_len = 1; aic->msg_outq = AIC_MSG_MSGBUF; } else { aic->msg_outq = AIC_MSG_IDENTIFY; if ((ti->flags & TINFO_TAG_ENB) != 0 && (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) aic->msg_outq |= AIC_MSG_TAG_Q; else ti->lubusy |= 1 << scb->lun; if ((ti->flags & TINFO_SDTR_NEGO) != 0) aic->msg_outq |= AIC_MSG_SDTR; } aic_outb(aic, CLRSINT0, CLRSELDO); aic_outb(aic, CLRSINT1, CLRBUSFREE); aic_outb(aic, SCSISEQ, ENAUTOATNP); aic_outb(aic, SIMODE0, 0); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); aic_outb(aic, SCSIRATE, ti->scsirate); } /* * We are re-selected by a target, save the target id and wait for the * target to further identify itself. */ static void aic_reselected(struct aic_softc *aic) { u_int8_t selid; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_reselected\n")); /* * If we have started a selection, it must have lost out in * the arbitration, put the command back to the pending queue. */ if (aic->nexus) { TAILQ_INSERT_HEAD(&aic->pending_ccbs, &aic->nexus->ccb->ccb_h, sim_links.tqe); aic->nexus = NULL; } selid = aic_inb(aic, SELID) & ~(1 << aic->initiator); if (selid & (selid - 1)) { /* this should never have happened */ printf("aic_reselected: invalid selid %x\n", selid); aic_reset(aic, /*initiate_reset*/TRUE); return; } aic->state = AIC_RESELECTED; aic->target = ffs(selid) - 1; aic->lun = -1; aic_outb(aic, CLRSINT0, CLRSELDI); aic_outb(aic, CLRSINT1, CLRBUSFREE); aic_outb(aic, SIMODE0, 0); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); aic_outb(aic, SCSISEQ, ENAUTOATNP); aic_outb(aic, SCSIRATE, aic->tinfo[aic->target].scsirate); } /* * Raise ATNO to signal the target that we have a message for it. */ static __inline void aic_sched_msgout(struct aic_softc *aic, u_int8_t msg) { if (msg) { aic->msg_buf[0] = msg; aic->msg_len = 1; } aic->msg_outq |= AIC_MSG_MSGBUF; aic_outb(aic, SCSISIGO, aic_inb(aic, SCSISIGI) | ATNO); } /* * Wait for SPIORDY (SCSI PIO ready) flag, or a phase change. */ static __inline int aic_spiordy(struct aic_softc *aic) { while (!(aic_inb(aic, DMASTAT) & INTSTAT) && !(aic_inb(aic, SSTAT0) & SPIORDY)) ; return !(aic_inb(aic, DMASTAT) & INTSTAT); } /* * Reestablish a disconnected nexus. */ static void aic_reconnect(struct aic_softc *aic, int tag) { struct aic_scb *scb; struct ccb_hdr *ccb_h; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_reconnect\n")); /* Find the nexus */ scb = NULL; TAILQ_FOREACH(ccb_h, &aic->nexus_ccbs, sim_links.tqe) { scb = (struct aic_scb *)ccb_h->ccb_scb_ptr; if (scb->target == aic->target && scb->lun == aic->lun && (tag == -1 || scb->tag == tag)) break; } /* ABORT if nothing is found */ if (!ccb_h) { if (tag == -1) aic_sched_msgout(aic, MSG_ABORT); else aic_sched_msgout(aic, MSG_ABORT_TAG); xpt_async(AC_UNSOL_RESEL, aic->path, NULL); return; } /* Reestablish the nexus */ TAILQ_REMOVE(&aic->nexus_ccbs, ccb_h, sim_links.tqe); aic->nexus = scb; scb->flags &= ~SCB_DISCONNECTED; aic->state = AIC_HASNEXUS; } /* * Read messages. */ static void aic_msgin(struct aic_softc *aic) { int msglen; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_msgin\n")); aic_outb(aic, SIMODE1, ENSCSIRST|ENPHASEMIS|ENBUSFREE); aic_outb(aic, SXFRCTL0, CHEN|SPIOEN); aic->flags &= ~AIC_DROP_MSGIN; aic->msg_len = 0; do { /* * If a parity error is detected, drop the remaining * bytes and inform the target so it could resend * the messages. */ if (aic_inb(aic, SSTAT1) & SCSIPERR) { aic_outb(aic, CLRSINT1, CLRSCSIPERR); aic->flags |= AIC_DROP_MSGIN; aic_sched_msgout(aic, MSG_PARITY_ERROR); } if ((aic->flags & AIC_DROP_MSGIN)) { aic_inb(aic, SCSIDAT); continue; } /* read the message byte without ACKing on it */ aic->msg_buf[aic->msg_len++] = aic_inb(aic, SCSIBUS); if (aic->msg_buf[0] == MSG_EXTENDED) { if (aic->msg_len < 2) { (void) aic_inb(aic, SCSIDAT); continue; } switch (aic->msg_buf[2]) { case MSG_EXT_SDTR: msglen = MSG_EXT_SDTR_LEN; break; case MSG_EXT_WDTR: msglen = MSG_EXT_WDTR_LEN; break; default: msglen = 0; break; } if (aic->msg_buf[1] != msglen) { aic->flags |= AIC_DROP_MSGIN; aic_sched_msgout(aic, MSG_MESSAGE_REJECT); } msglen += 2; } else if (aic->msg_buf[0] >= 0x20 && aic->msg_buf[0] <= 0x2f) msglen = 2; else msglen = 1; /* * If we have a complete message, handle it before the final * ACK (in case we decide to reject the message). */ if (aic->msg_len == msglen) { aic_handle_msgin(aic); aic->msg_len = 0; } /* ACK on the message byte */ (void) aic_inb(aic, SCSIDAT); } while (aic_spiordy(aic)); aic_outb(aic, SXFRCTL0, CHEN); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); } /* * Handle a message. */ static void aic_handle_msgin(struct aic_softc *aic) { struct aic_scb *scb; struct ccb_hdr *ccb_h; struct aic_tinfo *ti; struct ccb_trans_settings neg; struct ccb_trans_settings_spi *spi = &neg.xport_specific.spi; if (aic->state == AIC_RESELECTED) { if (!MSG_ISIDENTIFY(aic->msg_buf[0])) { aic_sched_msgout(aic, MSG_MESSAGE_REJECT); return; } aic->lun = aic->msg_buf[0] & MSG_IDENTIFY_LUNMASK; if (aic->tinfo[aic->target].lubusy & (1 << aic->lun)) aic_reconnect(aic, -1); else aic->state = AIC_RECONNECTING; return; } if (aic->state == AIC_RECONNECTING) { if (aic->msg_buf[0] != MSG_SIMPLE_Q_TAG) { aic_sched_msgout(aic, MSG_MESSAGE_REJECT); return; } aic_reconnect(aic, aic->msg_buf[1]); return; } switch (aic->msg_buf[0]) { case MSG_CMDCOMPLETE: { struct ccb_scsiio *csio; scb = aic->nexus; ccb_h = &scb->ccb->ccb_h; csio = &scb->ccb->csio; if ((scb->flags & SCB_SENSE) != 0) { /* auto REQUEST SENSE command */ scb->flags &= ~SCB_SENSE; csio->sense_resid = scb->data_len; if (scb->status == SCSI_STATUS_OK) { ccb_h->status |= CAM_SCSI_STATUS_ERROR|CAM_AUTOSNS_VALID; /*scsi_sense_print(csio);*/ } else { ccb_h->status |= CAM_AUTOSENSE_FAIL; printf("ccb %p sense failed %x\n", ccb_h, scb->status); } } else { csio->scsi_status = scb->status; csio->resid = scb->data_len; if (scb->status == SCSI_STATUS_OK) { /* everything goes well */ ccb_h->status |= CAM_REQ_CMP; } else if ((ccb_h->flags & CAM_DIS_AUTOSENSE) == 0 && (csio->scsi_status == SCSI_STATUS_CHECK_COND || csio->scsi_status == SCSI_STATUS_CMD_TERMINATED)) { /* try to retrieve sense information */ scb->flags |= SCB_SENSE; aic->flags |= AIC_BUSFREE_OK; return; } else ccb_h->status |= CAM_SCSI_STATUS_ERROR; } aic_done(aic, scb); aic->flags |= AIC_BUSFREE_OK; break; } case MSG_EXTENDED: switch (aic->msg_buf[2]) { case MSG_EXT_SDTR: scb = aic->nexus; ti = &aic->tinfo[scb->target]; if (ti->flags & TINFO_SDTR_SENT) { ti->current.period = aic->msg_buf[3]; ti->current.offset = aic->msg_buf[4]; } else { ti->current.period = aic->msg_buf[3] = max(ti->goal.period, aic->msg_buf[3]); ti->current.offset = aic->msg_buf[4] = min(ti->goal.offset, aic->msg_buf[4]); /* * The target initiated the negotiation, * send back a response. */ aic_sched_msgout(aic, 0); } ti->flags &= ~(TINFO_SDTR_SENT|TINFO_SDTR_NEGO); ti->scsirate = ti->current.offset ? ti->current.offset | ((ti->current.period * 4 + 49) / 50 - 2) << 4 : 0; aic_outb(aic, SCSIRATE, ti->scsirate); memset(&neg, 0, sizeof (neg)); neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; spi->sync_period = ti->goal.period = ti->current.period; spi->sync_offset = ti->goal.offset = ti->current.offset; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET; ccb_h = &scb->ccb->ccb_h; xpt_setup_ccb(&neg.ccb_h, ccb_h->path, 1); xpt_async(AC_TRANSFER_NEG, ccb_h->path, &neg); break; case MSG_EXT_WDTR: default: aic_sched_msgout(aic, MSG_MESSAGE_REJECT); break; } break; case MSG_DISCONNECT: scb = aic->nexus; ccb_h = &scb->ccb->ccb_h; TAILQ_INSERT_TAIL(&aic->nexus_ccbs, ccb_h, sim_links.tqe); scb->flags |= SCB_DISCONNECTED; aic->flags |= AIC_BUSFREE_OK; aic->nexus = NULL; CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE, ("disconnected\n")); break; case MSG_MESSAGE_REJECT: switch (aic->msg_outq & -aic->msg_outq) { case AIC_MSG_TAG_Q: scb = aic->nexus; ti = &aic->tinfo[scb->target]; ti->flags &= ~TINFO_TAG_ENB; ti->lubusy |= 1 << scb->lun; break; case AIC_MSG_SDTR: scb = aic->nexus; ti = &aic->tinfo[scb->target]; ti->current.period = ti->goal.period = 0; ti->current.offset = ti->goal.offset = 0; ti->flags &= ~(TINFO_SDTR_SENT|TINFO_SDTR_NEGO); ti->scsirate = 0; aic_outb(aic, SCSIRATE, ti->scsirate); memset(&neg, 0, sizeof (neg)); neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; spi->sync_period = ti->current.period; spi->sync_offset = ti->current.offset; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET; ccb_h = &scb->ccb->ccb_h; xpt_setup_ccb(&neg.ccb_h, ccb_h->path, 1); xpt_async(AC_TRANSFER_NEG, ccb_h->path, &neg); break; default: break; } break; case MSG_SAVEDATAPOINTER: break; case MSG_RESTOREPOINTERS: break; case MSG_NOOP: break; default: aic_sched_msgout(aic, MSG_MESSAGE_REJECT); break; } } /* * Send messages. */ static void aic_msgout(struct aic_softc *aic) { struct aic_scb *scb; union ccb *ccb; struct aic_tinfo *ti; int msgidx = 0; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_msgout\n")); aic_outb(aic, SIMODE1, ENSCSIRST|ENPHASEMIS|ENBUSFREE); aic_outb(aic, SXFRCTL0, CHEN|SPIOEN); /* * If the previous phase is also the message out phase, * we need to retransmit all the messages, probably * because the target has detected a parity error during * the past transmission. */ if (aic->prev_phase == PH_MSGOUT) aic->msg_outq = aic->msg_sent; do { int q = aic->msg_outq; if (msgidx > 0 && msgidx == aic->msg_len) { /* complete message sent, start the next one */ q &= -q; aic->msg_sent |= q; aic->msg_outq ^= q; q = aic->msg_outq; msgidx = 0; } if (msgidx == 0) { /* setup the message */ switch (q & -q) { case AIC_MSG_IDENTIFY: scb = aic->nexus; ccb = scb->ccb; ti = &aic->tinfo[scb->target]; aic->msg_buf[0] = MSG_IDENTIFY(scb->lun, (ti->flags & TINFO_DISC_ENB) && !(ccb->ccb_h.flags & CAM_DIS_DISCONNECT)); aic->msg_len = 1; break; case AIC_MSG_TAG_Q: scb = aic->nexus; ccb = scb->ccb; aic->msg_buf[0] = ccb->csio.tag_action; aic->msg_buf[1] = scb->tag; aic->msg_len = 2; break; case AIC_MSG_SDTR: scb = aic->nexus; ti = &aic->tinfo[scb->target]; aic->msg_buf[0] = MSG_EXTENDED; aic->msg_buf[1] = MSG_EXT_SDTR_LEN; aic->msg_buf[2] = MSG_EXT_SDTR; aic->msg_buf[3] = ti->goal.period; aic->msg_buf[4] = ti->goal.offset; aic->msg_len = MSG_EXT_SDTR_LEN + 2; ti->flags |= TINFO_SDTR_SENT; break; case AIC_MSG_MSGBUF: /* a single message already in the buffer */ if (aic->msg_buf[0] == MSG_BUS_DEV_RESET || aic->msg_buf[0] == MSG_ABORT || aic->msg_buf[0] == MSG_ABORT_TAG) aic->flags |= AIC_BUSFREE_OK; break; } } /* * If this is the last message byte of all messages, * clear ATNO to signal transmission complete. */ if ((q & (q - 1)) == 0 && msgidx == aic->msg_len - 1) aic_outb(aic, CLRSINT1, CLRATNO); /* transmit the message byte */ aic_outb(aic, SCSIDAT, aic->msg_buf[msgidx++]); } while (aic_spiordy(aic)); aic_outb(aic, SXFRCTL0, CHEN); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); } /* * Read data bytes. */ static void aic_datain(struct aic_softc *aic) { struct aic_scb *scb = aic->nexus; u_int8_t dmastat, dmacntrl0; int n; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_datain\n")); aic_outb(aic, SIMODE1, ENSCSIRST|ENPHASEMIS|ENBUSFREE); aic_outb(aic, SXFRCTL0, SCSIEN|DMAEN|CHEN); dmacntrl0 = ENDMA; if (aic->flags & AIC_DWIO_ENABLE) dmacntrl0 |= DWORDPIO; aic_outb(aic, DMACNTRL0, dmacntrl0); while (scb->data_len > 0) { for (;;) { /* wait for the fifo to fill up or a phase change */ dmastat = aic_inb(aic, DMASTAT); if (dmastat & (INTSTAT|DFIFOFULL)) break; } if (dmastat & DFIFOFULL) { n = FIFOSIZE; } else { /* * No more data, wait for the remaining bytes in * the scsi fifo to be transfer to the host fifo. */ while (!(aic_inb(aic, SSTAT2) & SEMPTY)) ; n = aic_inb(aic, FIFOSTAT); } n = imin(scb->data_len, n); if (aic->flags & AIC_DWIO_ENABLE) { if (n >= 12) { aic_insl(aic, DMADATALONG, scb->data_ptr, n>>2); scb->data_ptr += n & ~3; scb->data_len -= n & ~3; n &= 3; } } else { if (n >= 8) { aic_insw(aic, DMADATA, scb->data_ptr, n >> 1); scb->data_ptr += n & ~1; scb->data_len -= n & ~1; n &= 1; } } if (n) { aic_outb(aic, DMACNTRL0, ENDMA|B8MODE); aic_insb(aic, DMADATA, scb->data_ptr, n); scb->data_ptr += n; scb->data_len -= n; aic_outb(aic, DMACNTRL0, dmacntrl0); } if (dmastat & INTSTAT) break; } aic_outb(aic, SXFRCTL0, CHEN); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); } /* * Send data bytes. */ static void aic_dataout(struct aic_softc *aic) { struct aic_scb *scb = aic->nexus; u_int8_t dmastat, dmacntrl0, sstat2; int n; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_dataout\n")); aic_outb(aic, SIMODE1, ENSCSIRST|ENPHASEMIS|ENBUSFREE); aic_outb(aic, SXFRCTL0, SCSIEN|DMAEN|CHEN); dmacntrl0 = ENDMA|WRITE; if (aic->flags & AIC_DWIO_ENABLE) dmacntrl0 |= DWORDPIO; aic_outb(aic, DMACNTRL0, dmacntrl0); while (scb->data_len > 0) { for (;;) { /* wait for the fifo to clear up or a phase change */ dmastat = aic_inb(aic, DMASTAT); if (dmastat & (INTSTAT|DFIFOEMP)) break; } if (dmastat & INTSTAT) break; n = imin(scb->data_len, FIFOSIZE); if (aic->flags & AIC_DWIO_ENABLE) { if (n >= 12) { aic_outsl(aic, DMADATALONG, scb->data_ptr,n>>2); scb->data_ptr += n & ~3; scb->data_len -= n & ~3; n &= 3; } } else { if (n >= 8) { aic_outsw(aic, DMADATA, scb->data_ptr, n >> 1); scb->data_ptr += n & ~1; scb->data_len -= n & ~1; n &= 1; } } if (n) { aic_outb(aic, DMACNTRL0, ENDMA|WRITE|B8MODE); aic_outsb(aic, DMADATA, scb->data_ptr, n); scb->data_ptr += n; scb->data_len -= n; aic_outb(aic, DMACNTRL0, dmacntrl0); } } for (;;) { /* wait until all bytes in the fifos are transmitted */ dmastat = aic_inb(aic, DMASTAT); sstat2 = aic_inb(aic, SSTAT2); if ((dmastat & DFIFOEMP) && (sstat2 & SEMPTY)) break; if (dmastat & INTSTAT) { /* adjust for untransmitted bytes */ n = aic_inb(aic, FIFOSTAT) + (sstat2 & 0xf); scb->data_ptr -= n; scb->data_len += n; /* clear the fifo */ aic_outb(aic, SXFRCTL0, CHEN|CLRCH); aic_outb(aic, DMACNTRL0, RSTFIFO); break; } } aic_outb(aic, SXFRCTL0, CHEN); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); } /* * Send the scsi command. */ static void aic_cmd(struct aic_softc *aic) { struct aic_scb *scb = aic->nexus; struct scsi_request_sense sense_cmd; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_cmd\n")); if (scb->flags & SCB_SENSE) { /* autosense request */ sense_cmd.opcode = REQUEST_SENSE; sense_cmd.byte2 = scb->lun << 5; sense_cmd.length = scb->ccb->csio.sense_len; sense_cmd.control = 0; sense_cmd.unused[0] = 0; sense_cmd.unused[1] = 0; scb->cmd_ptr = (u_int8_t *)&sense_cmd; scb->cmd_len = sizeof(sense_cmd); scb->data_ptr = (u_int8_t *)&scb->ccb->csio.sense_data; scb->data_len = scb->ccb->csio.sense_len; } aic_outb(aic, SIMODE1, ENSCSIRST|ENPHASEMIS|ENBUSFREE); aic_outb(aic, DMACNTRL0, ENDMA|WRITE); aic_outb(aic, SXFRCTL0, SCSIEN|DMAEN|CHEN); aic_outsw(aic, DMADATA, (u_int16_t *)scb->cmd_ptr, scb->cmd_len >> 1); while ((aic_inb(aic, SSTAT2) & SEMPTY) == 0 && (aic_inb(aic, DMASTAT) & INTSTAT) == 0) ; aic_outb(aic, SXFRCTL0, CHEN); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); } /* * Finish off a command. The caller is responsible to remove the ccb * from any queue. */ static void aic_done(struct aic_softc *aic, struct aic_scb *scb) { union ccb *ccb = scb->ccb; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("aic_done - ccb %p status %x resid %d\n", ccb, ccb->ccb_h.status, ccb->csio.resid)); callout_stop(&scb->timer); if ((scb->flags & SCB_DEVICE_RESET) != 0 && ccb->ccb_h.func_code != XPT_RESET_DEV) { struct cam_path *path; struct ccb_hdr *ccb_h; cam_status error; error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(aic->sim), scb->target, CAM_LUN_WILDCARD); if (error == CAM_REQ_CMP) { xpt_async(AC_SENT_BDR, path, NULL); xpt_free_path(path); } ccb_h = TAILQ_FIRST(&aic->pending_ccbs); while (ccb_h != NULL) { struct aic_scb *pending_scb; pending_scb = (struct aic_scb *)ccb_h->ccb_scb_ptr; if (ccb_h->target_id == scb->target) { ccb_h->status |= CAM_BDR_SENT; ccb_h = TAILQ_NEXT(ccb_h, sim_links.tqe); TAILQ_REMOVE(&aic->pending_ccbs, &pending_scb->ccb->ccb_h, sim_links.tqe); aic_done(aic, pending_scb); } else { callout_reset_sbt(&pending_scb->timer, SBT_1MS * ccb_h->timeout, 0, aic_timeout, pending_scb, 0); ccb_h = TAILQ_NEXT(ccb_h, sim_links.tqe); } } ccb_h = TAILQ_FIRST(&aic->nexus_ccbs); while (ccb_h != NULL) { struct aic_scb *nexus_scb; nexus_scb = (struct aic_scb *)ccb_h->ccb_scb_ptr; if (ccb_h->target_id == scb->target) { ccb_h->status |= CAM_BDR_SENT; ccb_h = TAILQ_NEXT(ccb_h, sim_links.tqe); TAILQ_REMOVE(&aic->nexus_ccbs, &nexus_scb->ccb->ccb_h, sim_links.tqe); aic_done(aic, nexus_scb); } else { callout_reset_sbt(&nexus_scb->timer, SBT_1MS * ccb_h->timeout, 0, aic_timeout, nexus_scb, 0); ccb_h = TAILQ_NEXT(ccb_h, sim_links.tqe); } } } if (aic->nexus == scb || scb->flags & SCB_DISCONNECTED) aic->tinfo[scb->target].lubusy &= ~(1 << scb->lun); if (aic->nexus == scb) { aic->nexus = NULL; } aic_free_scb(aic, scb); xpt_done(ccb); } static void aic_poll(struct cam_sim *sim) { aic_intr_locked(cam_sim_softc(sim)); } static void aic_timeout(void *arg) { struct aic_scb *scb = (struct aic_scb *)arg; union ccb *ccb = scb->ccb; struct aic_softc *aic = (struct aic_softc *)ccb->ccb_h.ccb_aic_ptr; mtx_assert(&aic->lock, MA_OWNED); xpt_print_path(ccb->ccb_h.path); printf("ccb %p - timed out", ccb); if (aic->nexus && aic->nexus != scb) printf(", nexus %p", aic->nexus->ccb); printf(", phase 0x%x, state %d\n", aic_inb(aic, SCSISIGI), aic->state); if ((scb->flags & SCB_ACTIVE) == 0) { xpt_print_path(ccb->ccb_h.path); printf("ccb %p - timed out already completed\n", ccb); return; } if ((scb->flags & SCB_DEVICE_RESET) == 0 && aic->nexus == scb) { struct ccb_hdr *ccb_h = &scb->ccb->ccb_h; struct aic_scb *pending_scb; if ((ccb_h->status & CAM_RELEASE_SIMQ) == 0) { xpt_freeze_simq(aic->sim, /*count*/1); ccb_h->status |= CAM_RELEASE_SIMQ; } TAILQ_FOREACH(ccb_h, &aic->pending_ccbs, sim_links.tqe) { pending_scb = ccb_h->ccb_scb_ptr; callout_stop(&pending_scb->timer); } TAILQ_FOREACH(ccb_h, &aic->nexus_ccbs, sim_links.tqe) { pending_scb = ccb_h->ccb_scb_ptr; callout_stop(&pending_scb->timer); } scb->flags |= SCB_DEVICE_RESET; callout_reset(&scb->timer, 5 * hz, aic_timeout, scb); aic_sched_msgout(aic, MSG_BUS_DEV_RESET); } else { if (aic->nexus == scb) { ccb->ccb_h.status |= CAM_CMD_TIMEOUT; aic_done(aic, scb); } aic_reset(aic, /*initiate_reset*/TRUE); } } void aic_intr(void *arg) { struct aic_softc *aic = (struct aic_softc *)arg; mtx_lock(&aic->lock); aic_intr_locked(aic); mtx_unlock(&aic->lock); } void aic_intr_locked(struct aic_softc *aic) { u_int8_t sstat0, sstat1; union ccb *ccb; struct aic_scb *scb; if (!(aic_inb(aic, DMASTAT) & INTSTAT)) return; aic_outb(aic, DMACNTRL0, 0); sstat0 = aic_inb(aic, SSTAT0); sstat1 = aic_inb(aic, SSTAT1); if ((sstat1 & SCSIRSTI) != 0) { /* a device-initiated bus reset */ aic_outb(aic, CLRSINT1, CLRSCSIRSTI); aic_reset(aic, /*initiate_reset*/FALSE); return; } if ((sstat1 & SCSIPERR) != 0) { aic_outb(aic, CLRSINT1, CLRSCSIPERR); aic_sched_msgout(aic, MSG_PARITY_ERROR); aic_outb(aic, DMACNTRL0, INTEN); return; } if (aic_inb(aic, SSTAT4)) { aic_outb(aic, CLRSERR, CLRSYNCERR|CLRFWERR|CLRFRERR); aic_reset(aic, /*initiate_reset*/TRUE); return; } if (aic->state <= AIC_SELECTING) { if ((sstat0 & SELDI) != 0) { aic_reselected(aic); aic_outb(aic, DMACNTRL0, INTEN); return; } if ((sstat0 & SELDO) != 0) { aic_selected(aic); aic_outb(aic, DMACNTRL0, INTEN); return; } if ((sstat1 & SELTO) != 0) { scb = aic->nexus; ccb = scb->ccb; ccb->ccb_h.status = CAM_SEL_TIMEOUT; aic_done(aic, scb); while ((sstat1 & BUSFREE) == 0) sstat1 = aic_inb(aic, SSTAT1); aic->flags |= AIC_BUSFREE_OK; } } if ((sstat1 & BUSFREE) != 0) { aic_outb(aic, SCSISEQ, 0); aic_outb(aic, CLRSINT0, sstat0); aic_outb(aic, CLRSINT1, sstat1); if ((scb = aic->nexus)) { if ((aic->flags & AIC_BUSFREE_OK) == 0) { ccb = scb->ccb; ccb->ccb_h.status = CAM_UNEXP_BUSFREE; aic_done(aic, scb); } else if (scb->flags & SCB_DEVICE_RESET) { ccb = scb->ccb; if (ccb->ccb_h.func_code == XPT_RESET_DEV) { xpt_async(AC_SENT_BDR, ccb->ccb_h.path, NULL); ccb->ccb_h.status |= CAM_REQ_CMP; } else ccb->ccb_h.status |= CAM_CMD_TIMEOUT; aic_done(aic, scb); } else if (scb->flags & SCB_SENSE) { /* autosense request */ aic->flags &= ~AIC_BUSFREE_OK; aic->tinfo[scb->target].lubusy &= ~(1 << scb->lun); aic_select(aic); aic_outb(aic, DMACNTRL0, INTEN); return; } } aic->flags &= ~AIC_BUSFREE_OK; aic->state = AIC_IDLE; aic_start(aic); aic_outb(aic, DMACNTRL0, INTEN); return; } if ((sstat1 & REQINIT) != 0) { u_int8_t phase = aic_inb(aic, SCSISIGI) & PH_MASK; aic_outb(aic, SCSISIGO, phase); aic_outb(aic, CLRSINT1, CLRPHASECHG); switch (phase) { case PH_MSGOUT: aic_msgout(aic); break; case PH_MSGIN: aic_msgin(aic); break; case PH_STAT: scb = aic->nexus; ccb = scb->ccb; aic_outb(aic, DMACNTRL0, 0); aic_outb(aic, SXFRCTL0, CHEN|SPIOEN); scb->status = aic_inb(aic, SCSIDAT); aic_outb(aic, SXFRCTL0, CHEN); break; case PH_CMD: aic_cmd(aic); break; case PH_DATAIN: aic_datain(aic); break; case PH_DATAOUT: aic_dataout(aic); break; } aic->prev_phase = phase; aic_outb(aic, DMACNTRL0, INTEN); return; } printf("aic_intr: unexpected intr sstat0 %x sstat1 %x\n", sstat0, sstat1); aic_outb(aic, DMACNTRL0, INTEN); } /* * Reset ourselves. */ static void aic_chip_reset(struct aic_softc *aic) { /* * Doc. recommends to clear these two registers before * operations commence */ aic_outb(aic, SCSITEST, 0); aic_outb(aic, TEST, 0); /* Reset SCSI-FIFO and abort any transfers */ aic_outb(aic, SXFRCTL0, CHEN|CLRCH|CLRSTCNT); /* Reset HOST-FIFO */ aic_outb(aic, DMACNTRL0, RSTFIFO); aic_outb(aic, DMACNTRL1, 0); /* Disable all selection features */ aic_outb(aic, SCSISEQ, 0); aic_outb(aic, SXFRCTL1, 0); /* Disable interrupts */ aic_outb(aic, SIMODE0, 0); aic_outb(aic, SIMODE1, 0); /* Clear interrupts */ aic_outb(aic, CLRSINT0, 0x7f); aic_outb(aic, CLRSINT1, 0xef); /* Disable synchronous transfers */ aic_outb(aic, SCSIRATE, 0); /* Haven't seen ant errors (yet) */ aic_outb(aic, CLRSERR, 0x07); /* Set our SCSI-ID */ aic_outb(aic, SCSIID, aic->initiator << OID_S); aic_outb(aic, BRSTCNTRL, EISA_BRST_TIM); } /* * Reset the SCSI bus */ static void aic_scsi_reset(struct aic_softc *aic) { aic_outb(aic, SCSISEQ, SCSIRSTO); DELAY(500); aic_outb(aic, SCSISEQ, 0); DELAY(50); } /* * Reset. Abort all pending commands. */ static void aic_reset(struct aic_softc *aic, int initiate_reset) { struct ccb_hdr *ccb_h; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_reset\n")); if (initiate_reset) aic_scsi_reset(aic); aic_chip_reset(aic); xpt_async(AC_BUS_RESET, aic->path, NULL); while ((ccb_h = TAILQ_FIRST(&aic->pending_ccbs)) != NULL) { TAILQ_REMOVE(&aic->pending_ccbs, ccb_h, sim_links.tqe); ccb_h->status |= CAM_SCSI_BUS_RESET; aic_done(aic, (struct aic_scb *)ccb_h->ccb_scb_ptr); } while ((ccb_h = TAILQ_FIRST(&aic->nexus_ccbs)) != NULL) { TAILQ_REMOVE(&aic->nexus_ccbs, ccb_h, sim_links.tqe); ccb_h->status |= CAM_SCSI_BUS_RESET; aic_done(aic, (struct aic_scb *)ccb_h->ccb_scb_ptr); } if (aic->nexus) { ccb_h = &aic->nexus->ccb->ccb_h; ccb_h->status |= CAM_SCSI_BUS_RESET; aic_done(aic, aic->nexus); } aic->state = AIC_IDLE; aic_outb(aic, DMACNTRL0, INTEN); } static char *aic_chip_names[] = { "AIC6260", "AIC6360", "AIC6370", "GM82C700", }; static struct { int type; char *idstring; } aic_chip_ids[] = { { AIC6360, IDSTRING_AIC6360 }, { AIC6370, IDSTRING_AIC6370 }, { GM82C700, IDSTRING_GM82C700 }, }; static void aic_init(struct aic_softc *aic) { struct aic_scb *scb; struct aic_tinfo *ti; u_int8_t porta, portb; char chip_id[33]; int i; TAILQ_INIT(&aic->pending_ccbs); TAILQ_INIT(&aic->nexus_ccbs); SLIST_INIT(&aic->free_scbs); aic->nexus = NULL; aic->state = AIC_IDLE; aic->prev_phase = -1; aic->flags = 0; aic_chip_reset(aic); aic_scsi_reset(aic); /* determine the chip type from its ID string */ aic->chip_type = AIC6260; aic_insb(aic, ID, chip_id, sizeof(chip_id) - 1); chip_id[sizeof(chip_id) - 1] = '\0'; for (i = 0; i < nitems(aic_chip_ids); i++) { if (!strcmp(chip_id, aic_chip_ids[i].idstring)) { aic->chip_type = aic_chip_ids[i].type; break; } } porta = aic_inb(aic, PORTA); portb = aic_inb(aic, PORTB); aic->initiator = PORTA_ID(porta); if (PORTA_PARITY(porta)) aic->flags |= AIC_PARITY_ENABLE; if (PORTB_DISC(portb)) aic->flags |= AIC_DISC_ENABLE; if (PORTB_DMA(portb)) aic->flags |= AIC_DMA_ENABLE; /* * We can do fast SCSI (10MHz clock rate) if bit 4 of portb * is set and we've got a 6360. The 6260 can only do standard * 5MHz SCSI. */ if (aic->chip_type > AIC6260 || aic_inb(aic, REV)) { if (PORTB_FSYNC(portb)) aic->flags |= AIC_FAST_ENABLE; aic->flags |= AIC_DWIO_ENABLE; } if (aic->flags & AIC_FAST_ENABLE) aic->max_period = AIC_FAST_SYNC_PERIOD; else aic->max_period = AIC_SYNC_PERIOD; aic->min_period = AIC_MIN_SYNC_PERIOD; for (i = 255; i >= 0; i--) { scb = &aic->scbs[i]; scb->tag = i; callout_init_mtx(&scb->timer, &aic->lock, 0); aic_free_scb(aic, scb); } for (i = 0; i < 8; i++) { if (i == aic->initiator) continue; ti = &aic->tinfo[i]; bzero(ti, sizeof(*ti)); ti->flags = TINFO_TAG_ENB; if (aic->flags & AIC_DISC_ENABLE) ti->flags |= TINFO_DISC_ENB; ti->user.period = aic->max_period; ti->user.offset = AIC_SYNC_OFFSET; ti->scsirate = 0; } aic_outb(aic, DMACNTRL0, INTEN); } int aic_probe(struct aic_softc *aic) { int i; /* Remove aic6360 from possible powerdown mode */ aic_outb(aic, DMACNTRL0, 0); #define STSIZE 16 aic_outb(aic, DMACNTRL1, 0); /* Reset stack pointer */ for (i = 0; i < STSIZE; i++) aic_outb(aic, STACK, i); /* See if we can pull out the same sequence */ aic_outb(aic, DMACNTRL1, 0); for (i = 0; i < STSIZE && aic_inb(aic, STACK) == i; i++) ; if (i != STSIZE) return (ENXIO); #undef STSIZE return (0); } int aic_attach(struct aic_softc *aic) { struct cam_devq *devq; /* * Create the device queue for our SIM. */ devq = cam_simq_alloc(256); if (devq == NULL) return (ENOMEM); /* * Construct our SIM entry */ aic->sim = cam_sim_alloc(aic_action, aic_poll, "aic", aic, device_get_unit(aic->dev), &aic->lock, 2, 256, devq); if (aic->sim == NULL) { cam_simq_free(devq); return (ENOMEM); } mtx_lock(&aic->lock); if (xpt_bus_register(aic->sim, aic->dev, 0) != CAM_SUCCESS) { cam_sim_free(aic->sim, /*free_devq*/TRUE); mtx_unlock(&aic->lock); return (ENXIO); } if (xpt_create_path(&aic->path, /*periph*/NULL, cam_sim_path(aic->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(aic->sim)); cam_sim_free(aic->sim, /*free_devq*/TRUE); mtx_unlock(&aic->lock); return (ENXIO); } aic_init(aic); device_printf(aic->dev, "%s", aic_chip_names[aic->chip_type]); if (aic->flags & AIC_DMA_ENABLE) printf(", dma"); if (aic->flags & AIC_DISC_ENABLE) printf(", disconnection"); if (aic->flags & AIC_PARITY_ENABLE) printf(", parity check"); if (aic->flags & AIC_FAST_ENABLE) printf(", fast SCSI"); printf("\n"); mtx_unlock(&aic->lock); return (0); } int aic_detach(struct aic_softc *aic) { struct aic_scb *scb; int i; mtx_lock(&aic->lock); xpt_async(AC_LOST_DEVICE, aic->path, NULL); xpt_free_path(aic->path); xpt_bus_deregister(cam_sim_path(aic->sim)); cam_sim_free(aic->sim, /*free_devq*/TRUE); mtx_unlock(&aic->lock); for (i = 255; i >= 0; i--) { scb = &aic->scbs[i]; callout_drain(&scb->timer); } return (0); } Index: stable/11/sys/dev/ciss/ciss.c =================================================================== --- stable/11/sys/dev/ciss/ciss.c (revision 335137) +++ stable/11/sys/dev/ciss/ciss.c (revision 335138) @@ -1,4731 +1,4731 @@ /*- * Copyright (c) 2001 Michael Smith * Copyright (c) 2004 Paul Saab * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Common Interface for SCSI-3 Support driver. * * CISS claims to provide a common interface between a generic SCSI * transport and an intelligent host adapter. * * This driver supports CISS as defined in the document "CISS Command * Interface for SCSI-3 Support Open Specification", Version 1.04, * Valence Number 1, dated 20001127, produced by Compaq Computer * Corporation. This document appears to be a hastily and somewhat * arbitrarlily cut-down version of a larger (and probably even more * chaotic and inconsistent) Compaq internal document. Various * details were also gleaned from Compaq's "cciss" driver for Linux. * * We provide a shim layer between the CISS interface and CAM, * offloading most of the queueing and being-a-disk chores onto CAM. * Entry to the driver is via the PCI bus attachment (ciss_probe, * ciss_attach, etc) and via the CAM interface (ciss_cam_action, * ciss_cam_poll). The Compaq CISS adapters are, however, poor SCSI * citizens and we have to fake up some responses to get reasonable * behaviour out of them. In addition, the CISS command set is by no * means adequate to support the functionality of a RAID controller, * and thus the supported Compaq adapters utilise portions of the * control protocol from earlier Compaq adapter families. * * Note that we only support the "simple" transport layer over PCI. * This interface (ab)uses the I2O register set (specifically the post * queues) to exchange commands with the adapter. Other interfaces * are available, but we aren't supposed to know about them, and it is * dubious whether they would provide major performance improvements * except under extreme load. * * Currently the only supported CISS adapters are the Compaq Smart * Array 5* series (5300, 5i, 532). Even with only three adapters, * Compaq still manage to have interface variations. * * * Thanks must go to Fred Harris and Darryl DeVinney at Compaq, as * well as Paul Saab at Yahoo! for their assistance in making this * driver happen. * * More thanks must go to John Cagle at HP for the countless hours * spent making this driver "work" with the MSA* series storage * enclosures. Without his help (and nagging), this driver could not * be used with these enclosures. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(CISS_MALLOC_CLASS, "ciss_data", "ciss internal data buffers"); /* pci interface */ static int ciss_lookup(device_t dev); static int ciss_probe(device_t dev); static int ciss_attach(device_t dev); static int ciss_detach(device_t dev); static int ciss_shutdown(device_t dev); /* (de)initialisation functions, control wrappers */ static int ciss_init_pci(struct ciss_softc *sc); static int ciss_setup_msix(struct ciss_softc *sc); static int ciss_init_perf(struct ciss_softc *sc); static int ciss_wait_adapter(struct ciss_softc *sc); static int ciss_flush_adapter(struct ciss_softc *sc); static int ciss_init_requests(struct ciss_softc *sc); static void ciss_command_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int ciss_identify_adapter(struct ciss_softc *sc); static int ciss_init_logical(struct ciss_softc *sc); static int ciss_init_physical(struct ciss_softc *sc); static int ciss_filter_physical(struct ciss_softc *sc, struct ciss_lun_report *cll); static int ciss_identify_logical(struct ciss_softc *sc, struct ciss_ldrive *ld); static int ciss_get_ldrive_status(struct ciss_softc *sc, struct ciss_ldrive *ld); static int ciss_update_config(struct ciss_softc *sc); static int ciss_accept_media(struct ciss_softc *sc, struct ciss_ldrive *ld); static void ciss_init_sysctl(struct ciss_softc *sc); static void ciss_soft_reset(struct ciss_softc *sc); static void ciss_free(struct ciss_softc *sc); static void ciss_spawn_notify_thread(struct ciss_softc *sc); static void ciss_kill_notify_thread(struct ciss_softc *sc); /* request submission/completion */ static int ciss_start(struct ciss_request *cr); static void ciss_done(struct ciss_softc *sc, cr_qhead_t *qh); static void ciss_perf_done(struct ciss_softc *sc, cr_qhead_t *qh); static void ciss_intr(void *arg); static void ciss_perf_intr(void *arg); static void ciss_perf_msi_intr(void *arg); static void ciss_complete(struct ciss_softc *sc, cr_qhead_t *qh); static int _ciss_report_request(struct ciss_request *cr, int *command_status, int *scsi_status, const char *func); static int ciss_synch_request(struct ciss_request *cr, int timeout); static int ciss_poll_request(struct ciss_request *cr, int timeout); static int ciss_wait_request(struct ciss_request *cr, int timeout); #if 0 static int ciss_abort_request(struct ciss_request *cr); #endif /* request queueing */ static int ciss_get_request(struct ciss_softc *sc, struct ciss_request **crp); static void ciss_preen_command(struct ciss_request *cr); static void ciss_release_request(struct ciss_request *cr); /* request helpers */ static int ciss_get_bmic_request(struct ciss_softc *sc, struct ciss_request **crp, int opcode, void **bufp, size_t bufsize); static int ciss_user_command(struct ciss_softc *sc, IOCTL_Command_struct *ioc); /* DMA map/unmap */ static int ciss_map_request(struct ciss_request *cr); static void ciss_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void ciss_unmap_request(struct ciss_request *cr); /* CAM interface */ static int ciss_cam_init(struct ciss_softc *sc); static void ciss_cam_rescan_target(struct ciss_softc *sc, int bus, int target); static void ciss_cam_action(struct cam_sim *sim, union ccb *ccb); static int ciss_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio); static int ciss_cam_emulate(struct ciss_softc *sc, struct ccb_scsiio *csio); static void ciss_cam_poll(struct cam_sim *sim); static void ciss_cam_complete(struct ciss_request *cr); static void ciss_cam_complete_fixup(struct ciss_softc *sc, struct ccb_scsiio *csio); static int ciss_name_device(struct ciss_softc *sc, int bus, int target); /* periodic status monitoring */ static void ciss_periodic(void *arg); static void ciss_nop_complete(struct ciss_request *cr); static void ciss_disable_adapter(struct ciss_softc *sc); static void ciss_notify_event(struct ciss_softc *sc); static void ciss_notify_complete(struct ciss_request *cr); static int ciss_notify_abort(struct ciss_softc *sc); static int ciss_notify_abort_bmic(struct ciss_softc *sc); static void ciss_notify_hotplug(struct ciss_softc *sc, struct ciss_notify *cn); static void ciss_notify_logical(struct ciss_softc *sc, struct ciss_notify *cn); static void ciss_notify_physical(struct ciss_softc *sc, struct ciss_notify *cn); /* debugging output */ static void ciss_print_request(struct ciss_request *cr); static void ciss_print_ldrive(struct ciss_softc *sc, struct ciss_ldrive *ld); static const char *ciss_name_ldrive_status(int status); static int ciss_decode_ldrive_status(int status); static const char *ciss_name_ldrive_org(int org); static const char *ciss_name_command_status(int status); /* * PCI bus interface. */ static device_method_t ciss_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ciss_probe), DEVMETHOD(device_attach, ciss_attach), DEVMETHOD(device_detach, ciss_detach), DEVMETHOD(device_shutdown, ciss_shutdown), { 0, 0 } }; static driver_t ciss_pci_driver = { "ciss", ciss_methods, sizeof(struct ciss_softc) }; static devclass_t ciss_devclass; DRIVER_MODULE(ciss, pci, ciss_pci_driver, ciss_devclass, 0, 0); MODULE_DEPEND(ciss, cam, 1, 1, 1); MODULE_DEPEND(ciss, pci, 1, 1, 1); /* * Control device interface. */ static d_open_t ciss_open; static d_close_t ciss_close; static d_ioctl_t ciss_ioctl; static struct cdevsw ciss_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = ciss_open, .d_close = ciss_close, .d_ioctl = ciss_ioctl, .d_name = "ciss", }; /* * This tunable can be set at boot time and controls whether physical devices * that are marked hidden by the firmware should be exposed anyways. */ static unsigned int ciss_expose_hidden_physical = 0; TUNABLE_INT("hw.ciss.expose_hidden_physical", &ciss_expose_hidden_physical); static unsigned int ciss_nop_message_heartbeat = 0; TUNABLE_INT("hw.ciss.nop_message_heartbeat", &ciss_nop_message_heartbeat); /* * This tunable can force a particular transport to be used: * <= 0 : use default * 1 : force simple * 2 : force performant */ static int ciss_force_transport = 0; TUNABLE_INT("hw.ciss.force_transport", &ciss_force_transport); /* * This tunable can force a particular interrupt delivery method to be used: * <= 0 : use default * 1 : force INTx * 2 : force MSIX */ static int ciss_force_interrupt = 0; TUNABLE_INT("hw.ciss.force_interrupt", &ciss_force_interrupt); /************************************************************************ * CISS adapters amazingly don't have a defined programming interface * value. (One could say some very despairing things about PCI and * people just not getting the general idea.) So we are forced to * stick with matching against subvendor/subdevice, and thus have to * be updated for every new CISS adapter that appears. */ #define CISS_BOARD_UNKNWON 0 #define CISS_BOARD_SA5 1 #define CISS_BOARD_SA5B 2 #define CISS_BOARD_NOMSI (1<<4) #define CISS_BOARD_SIMPLE (1<<5) static struct { u_int16_t subvendor; u_int16_t subdevice; int flags; char *desc; } ciss_vendor_data[] = { { 0x0e11, 0x4070, CISS_BOARD_SA5|CISS_BOARD_NOMSI|CISS_BOARD_SIMPLE, "Compaq Smart Array 5300" }, { 0x0e11, 0x4080, CISS_BOARD_SA5B|CISS_BOARD_NOMSI, "Compaq Smart Array 5i" }, { 0x0e11, 0x4082, CISS_BOARD_SA5B|CISS_BOARD_NOMSI, "Compaq Smart Array 532" }, { 0x0e11, 0x4083, CISS_BOARD_SA5B|CISS_BOARD_NOMSI, "HP Smart Array 5312" }, { 0x0e11, 0x4091, CISS_BOARD_SA5, "HP Smart Array 6i" }, { 0x0e11, 0x409A, CISS_BOARD_SA5, "HP Smart Array 641" }, { 0x0e11, 0x409B, CISS_BOARD_SA5, "HP Smart Array 642" }, { 0x0e11, 0x409C, CISS_BOARD_SA5, "HP Smart Array 6400" }, { 0x0e11, 0x409D, CISS_BOARD_SA5, "HP Smart Array 6400 EM" }, { 0x103C, 0x3211, CISS_BOARD_SA5, "HP Smart Array E200i" }, { 0x103C, 0x3212, CISS_BOARD_SA5, "HP Smart Array E200" }, { 0x103C, 0x3213, CISS_BOARD_SA5, "HP Smart Array E200i" }, { 0x103C, 0x3214, CISS_BOARD_SA5, "HP Smart Array E200i" }, { 0x103C, 0x3215, CISS_BOARD_SA5, "HP Smart Array E200i" }, { 0x103C, 0x3220, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3222, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3223, CISS_BOARD_SA5, "HP Smart Array P800" }, { 0x103C, 0x3225, CISS_BOARD_SA5, "HP Smart Array P600" }, { 0x103C, 0x3230, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3231, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3232, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3233, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3234, CISS_BOARD_SA5, "HP Smart Array P400" }, { 0x103C, 0x3235, CISS_BOARD_SA5, "HP Smart Array P400i" }, { 0x103C, 0x3236, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3237, CISS_BOARD_SA5, "HP Smart Array E500" }, { 0x103C, 0x3238, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3239, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x323A, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x323B, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x323C, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x323D, CISS_BOARD_SA5, "HP Smart Array P700m" }, { 0x103C, 0x3241, CISS_BOARD_SA5, "HP Smart Array P212" }, { 0x103C, 0x3243, CISS_BOARD_SA5, "HP Smart Array P410" }, { 0x103C, 0x3245, CISS_BOARD_SA5, "HP Smart Array P410i" }, { 0x103C, 0x3247, CISS_BOARD_SA5, "HP Smart Array P411" }, { 0x103C, 0x3249, CISS_BOARD_SA5, "HP Smart Array P812" }, { 0x103C, 0x324A, CISS_BOARD_SA5, "HP Smart Array P712m" }, { 0x103C, 0x324B, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3350, CISS_BOARD_SA5, "HP Smart Array P222" }, { 0x103C, 0x3351, CISS_BOARD_SA5, "HP Smart Array P420" }, { 0x103C, 0x3352, CISS_BOARD_SA5, "HP Smart Array P421" }, { 0x103C, 0x3353, CISS_BOARD_SA5, "HP Smart Array P822" }, { 0x103C, 0x3354, CISS_BOARD_SA5, "HP Smart Array P420i" }, { 0x103C, 0x3355, CISS_BOARD_SA5, "HP Smart Array P220i" }, { 0x103C, 0x3356, CISS_BOARD_SA5, "HP Smart Array P721m" }, { 0x103C, 0x1920, CISS_BOARD_SA5, "HP Smart Array P430i" }, { 0x103C, 0x1921, CISS_BOARD_SA5, "HP Smart Array P830i" }, { 0x103C, 0x1922, CISS_BOARD_SA5, "HP Smart Array P430" }, { 0x103C, 0x1923, CISS_BOARD_SA5, "HP Smart Array P431" }, { 0x103C, 0x1924, CISS_BOARD_SA5, "HP Smart Array P830" }, { 0x103C, 0x1926, CISS_BOARD_SA5, "HP Smart Array P731m" }, { 0x103C, 0x1928, CISS_BOARD_SA5, "HP Smart Array P230i" }, { 0x103C, 0x1929, CISS_BOARD_SA5, "HP Smart Array P530" }, { 0x103C, 0x192A, CISS_BOARD_SA5, "HP Smart Array P531" }, { 0x103C, 0x21BD, CISS_BOARD_SA5, "HP Smart Array P244br" }, { 0x103C, 0x21BE, CISS_BOARD_SA5, "HP Smart Array P741m" }, { 0x103C, 0x21BF, CISS_BOARD_SA5, "HP Smart Array H240ar" }, { 0x103C, 0x21C0, CISS_BOARD_SA5, "HP Smart Array P440ar" }, { 0x103C, 0x21C1, CISS_BOARD_SA5, "HP Smart Array P840ar" }, { 0x103C, 0x21C2, CISS_BOARD_SA5, "HP Smart Array P440" }, { 0x103C, 0x21C3, CISS_BOARD_SA5, "HP Smart Array P441" }, { 0x103C, 0x21C5, CISS_BOARD_SA5, "HP Smart Array P841" }, { 0x103C, 0x21C6, CISS_BOARD_SA5, "HP Smart Array H244br" }, { 0x103C, 0x21C7, CISS_BOARD_SA5, "HP Smart Array H240" }, { 0x103C, 0x21C8, CISS_BOARD_SA5, "HP Smart Array H241" }, { 0x103C, 0x21CA, CISS_BOARD_SA5, "HP Smart Array P246br" }, { 0x103C, 0x21CB, CISS_BOARD_SA5, "HP Smart Array P840" }, { 0x103C, 0x21CC, CISS_BOARD_SA5, "HP Smart Array TBD" }, { 0x103C, 0x21CD, CISS_BOARD_SA5, "HP Smart Array P240nr" }, { 0x103C, 0x21CE, CISS_BOARD_SA5, "HP Smart Array H240nr" }, { 0, 0, 0, NULL } }; /************************************************************************ * Find a match for the device in our list of known adapters. */ static int ciss_lookup(device_t dev) { int i; for (i = 0; ciss_vendor_data[i].desc != NULL; i++) if ((pci_get_subvendor(dev) == ciss_vendor_data[i].subvendor) && (pci_get_subdevice(dev) == ciss_vendor_data[i].subdevice)) { return(i); } return(-1); } /************************************************************************ * Match a known CISS adapter. */ static int ciss_probe(device_t dev) { int i; i = ciss_lookup(dev); if (i != -1) { device_set_desc(dev, ciss_vendor_data[i].desc); return(BUS_PROBE_DEFAULT); } return(ENOENT); } /************************************************************************ * Attach the driver to this adapter. */ static int ciss_attach(device_t dev) { struct ciss_softc *sc; int error; debug_called(1); #ifdef CISS_DEBUG /* print structure/union sizes */ debug_struct(ciss_command); debug_struct(ciss_header); debug_union(ciss_device_address); debug_struct(ciss_cdb); debug_struct(ciss_report_cdb); debug_struct(ciss_notify_cdb); debug_struct(ciss_notify); debug_struct(ciss_message_cdb); debug_struct(ciss_error_info_pointer); debug_struct(ciss_error_info); debug_struct(ciss_sg_entry); debug_struct(ciss_config_table); debug_struct(ciss_bmic_cdb); debug_struct(ciss_bmic_id_ldrive); debug_struct(ciss_bmic_id_lstatus); debug_struct(ciss_bmic_id_table); debug_struct(ciss_bmic_id_pdrive); debug_struct(ciss_bmic_blink_pdrive); debug_struct(ciss_bmic_flush_cache); debug_const(CISS_MAX_REQUESTS); debug_const(CISS_MAX_LOGICAL); debug_const(CISS_INTERRUPT_COALESCE_DELAY); debug_const(CISS_INTERRUPT_COALESCE_COUNT); debug_const(CISS_COMMAND_ALLOC_SIZE); debug_const(CISS_COMMAND_SG_LENGTH); debug_type(cciss_pci_info_struct); debug_type(cciss_coalint_struct); debug_type(cciss_coalint_struct); debug_type(NodeName_type); debug_type(NodeName_type); debug_type(Heartbeat_type); debug_type(BusTypes_type); debug_type(FirmwareVer_type); debug_type(DriverVer_type); debug_type(IOCTL_Command_struct); #endif sc = device_get_softc(dev); sc->ciss_dev = dev; mtx_init(&sc->ciss_mtx, "cissmtx", NULL, MTX_DEF); callout_init_mtx(&sc->ciss_periodic, &sc->ciss_mtx, 0); /* * Do PCI-specific init. */ if ((error = ciss_init_pci(sc)) != 0) goto out; /* * Initialise driver queues. */ ciss_initq_free(sc); ciss_initq_notify(sc); /* * Initialize device sysctls. */ ciss_init_sysctl(sc); /* * Initialise command/request pool. */ if ((error = ciss_init_requests(sc)) != 0) goto out; /* * Get adapter information. */ if ((error = ciss_identify_adapter(sc)) != 0) goto out; /* * Find all the physical devices. */ if ((error = ciss_init_physical(sc)) != 0) goto out; /* * Build our private table of logical devices. */ if ((error = ciss_init_logical(sc)) != 0) goto out; /* * Enable interrupts so that the CAM scan can complete. */ CISS_TL_SIMPLE_ENABLE_INTERRUPTS(sc); /* * Initialise the CAM interface. */ if ((error = ciss_cam_init(sc)) != 0) goto out; /* * Start the heartbeat routine and event chain. */ ciss_periodic(sc); /* * Create the control device. */ sc->ciss_dev_t = make_dev(&ciss_cdevsw, device_get_unit(sc->ciss_dev), UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "ciss%d", device_get_unit(sc->ciss_dev)); sc->ciss_dev_t->si_drv1 = sc; /* * The adapter is running; synchronous commands can now sleep * waiting for an interrupt to signal completion. */ sc->ciss_flags |= CISS_FLAG_RUNNING; ciss_spawn_notify_thread(sc); error = 0; out: if (error != 0) { /* ciss_free() expects the mutex to be held */ mtx_lock(&sc->ciss_mtx); ciss_free(sc); } return(error); } /************************************************************************ * Detach the driver from this adapter. */ static int ciss_detach(device_t dev) { struct ciss_softc *sc = device_get_softc(dev); debug_called(1); mtx_lock(&sc->ciss_mtx); if (sc->ciss_flags & CISS_FLAG_CONTROL_OPEN) { mtx_unlock(&sc->ciss_mtx); return (EBUSY); } /* flush adapter cache */ ciss_flush_adapter(sc); /* release all resources. The mutex is released and freed here too. */ ciss_free(sc); return(0); } /************************************************************************ * Prepare adapter for system shutdown. */ static int ciss_shutdown(device_t dev) { struct ciss_softc *sc = device_get_softc(dev); debug_called(1); mtx_lock(&sc->ciss_mtx); /* flush adapter cache */ ciss_flush_adapter(sc); if (sc->ciss_soft_reset) ciss_soft_reset(sc); mtx_unlock(&sc->ciss_mtx); return(0); } static void ciss_init_sysctl(struct ciss_softc *sc) { SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->ciss_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ciss_dev)), OID_AUTO, "soft_reset", CTLFLAG_RW, &sc->ciss_soft_reset, 0, ""); } /************************************************************************ * Perform PCI-specific attachment actions. */ static int ciss_init_pci(struct ciss_softc *sc) { uintptr_t cbase, csize, cofs; uint32_t method, supported_methods; int error, sqmask, i; void *intr; debug_called(1); /* * Work out adapter type. */ i = ciss_lookup(sc->ciss_dev); if (i < 0) { ciss_printf(sc, "unknown adapter type\n"); return (ENXIO); } if (ciss_vendor_data[i].flags & CISS_BOARD_SA5) { sqmask = CISS_TL_SIMPLE_INTR_OPQ_SA5; } else if (ciss_vendor_data[i].flags & CISS_BOARD_SA5B) { sqmask = CISS_TL_SIMPLE_INTR_OPQ_SA5B; } else { /* * XXX Big hammer, masks/unmasks all possible interrupts. This should * work on all hardware variants. Need to add code to handle the * "controller crashed" interrupt bit that this unmasks. */ sqmask = ~0; } /* * Allocate register window first (we need this to find the config * struct). */ error = ENXIO; sc->ciss_regs_rid = CISS_TL_SIMPLE_BAR_REGS; if ((sc->ciss_regs_resource = bus_alloc_resource_any(sc->ciss_dev, SYS_RES_MEMORY, &sc->ciss_regs_rid, RF_ACTIVE)) == NULL) { ciss_printf(sc, "can't allocate register window\n"); return(ENXIO); } sc->ciss_regs_bhandle = rman_get_bushandle(sc->ciss_regs_resource); sc->ciss_regs_btag = rman_get_bustag(sc->ciss_regs_resource); /* * Find the BAR holding the config structure. If it's not the one * we already mapped for registers, map it too. */ sc->ciss_cfg_rid = CISS_TL_SIMPLE_READ(sc, CISS_TL_SIMPLE_CFG_BAR) & 0xffff; if (sc->ciss_cfg_rid != sc->ciss_regs_rid) { if ((sc->ciss_cfg_resource = bus_alloc_resource_any(sc->ciss_dev, SYS_RES_MEMORY, &sc->ciss_cfg_rid, RF_ACTIVE)) == NULL) { ciss_printf(sc, "can't allocate config window\n"); return(ENXIO); } cbase = (uintptr_t)rman_get_virtual(sc->ciss_cfg_resource); csize = rman_get_end(sc->ciss_cfg_resource) - rman_get_start(sc->ciss_cfg_resource) + 1; } else { cbase = (uintptr_t)rman_get_virtual(sc->ciss_regs_resource); csize = rman_get_end(sc->ciss_regs_resource) - rman_get_start(sc->ciss_regs_resource) + 1; } cofs = CISS_TL_SIMPLE_READ(sc, CISS_TL_SIMPLE_CFG_OFF); /* * Use the base/size/offset values we just calculated to * sanity-check the config structure. If it's OK, point to it. */ if ((cofs + sizeof(struct ciss_config_table)) > csize) { ciss_printf(sc, "config table outside window\n"); return(ENXIO); } sc->ciss_cfg = (struct ciss_config_table *)(cbase + cofs); debug(1, "config struct at %p", sc->ciss_cfg); /* * Calculate the number of request structures/commands we are * going to provide for this adapter. */ sc->ciss_max_requests = min(CISS_MAX_REQUESTS, sc->ciss_cfg->max_outstanding_commands); /* * Validate the config structure. If we supported other transport * methods, we could select amongst them at this point in time. */ if (strncmp(sc->ciss_cfg->signature, "CISS", 4)) { ciss_printf(sc, "config signature mismatch (got '%c%c%c%c')\n", sc->ciss_cfg->signature[0], sc->ciss_cfg->signature[1], sc->ciss_cfg->signature[2], sc->ciss_cfg->signature[3]); return(ENXIO); } /* * Select the mode of operation, prefer Performant. */ if (!(sc->ciss_cfg->supported_methods & (CISS_TRANSPORT_METHOD_SIMPLE | CISS_TRANSPORT_METHOD_PERF))) { ciss_printf(sc, "No supported transport layers: 0x%x\n", sc->ciss_cfg->supported_methods); } switch (ciss_force_transport) { case 1: supported_methods = CISS_TRANSPORT_METHOD_SIMPLE; break; case 2: supported_methods = CISS_TRANSPORT_METHOD_PERF; break; default: /* * Override the capabilities of the BOARD and specify SIMPLE * MODE */ if (ciss_vendor_data[i].flags & CISS_BOARD_SIMPLE) supported_methods = CISS_TRANSPORT_METHOD_SIMPLE; else supported_methods = sc->ciss_cfg->supported_methods; break; } setup: if ((supported_methods & CISS_TRANSPORT_METHOD_PERF) != 0) { method = CISS_TRANSPORT_METHOD_PERF; sc->ciss_perf = (struct ciss_perf_config *)(cbase + cofs + sc->ciss_cfg->transport_offset); if (ciss_init_perf(sc)) { supported_methods &= ~method; goto setup; } } else if (supported_methods & CISS_TRANSPORT_METHOD_SIMPLE) { method = CISS_TRANSPORT_METHOD_SIMPLE; } else { ciss_printf(sc, "No supported transport methods: 0x%x\n", sc->ciss_cfg->supported_methods); return(ENXIO); } /* * Tell it we're using the low 4GB of RAM. Set the default interrupt * coalescing options. */ sc->ciss_cfg->requested_method = method; sc->ciss_cfg->command_physlimit = 0; sc->ciss_cfg->interrupt_coalesce_delay = CISS_INTERRUPT_COALESCE_DELAY; sc->ciss_cfg->interrupt_coalesce_count = CISS_INTERRUPT_COALESCE_COUNT; #ifdef __i386__ sc->ciss_cfg->host_driver |= CISS_DRIVER_SCSI_PREFETCH; #endif if (ciss_update_config(sc)) { ciss_printf(sc, "adapter refuses to accept config update (IDBR 0x%x)\n", CISS_TL_SIMPLE_READ(sc, CISS_TL_SIMPLE_IDBR)); return(ENXIO); } if ((sc->ciss_cfg->active_method & method) == 0) { supported_methods &= ~method; if (supported_methods == 0) { ciss_printf(sc, "adapter refuses to go into available transports " "mode (0x%x, 0x%x)\n", supported_methods, sc->ciss_cfg->active_method); return(ENXIO); } else goto setup; } /* * Wait for the adapter to come ready. */ if ((error = ciss_wait_adapter(sc)) != 0) return(error); /* Prepare to possibly use MSIX and/or PERFORMANT interrupts. Normal * interrupts have a rid of 0, this will be overridden if MSIX is used. */ sc->ciss_irq_rid[0] = 0; if (method == CISS_TRANSPORT_METHOD_PERF) { ciss_printf(sc, "PERFORMANT Transport\n"); if ((ciss_force_interrupt != 1) && (ciss_setup_msix(sc) == 0)) { intr = ciss_perf_msi_intr; } else { intr = ciss_perf_intr; } /* XXX The docs say that the 0x01 bit is only for SAS controllers. * Unfortunately, there is no good way to know if this is a SAS * controller. Hopefully enabling this bit universally will work OK. * It seems to work fine for SA6i controllers. */ sc->ciss_interrupt_mask = CISS_TL_PERF_INTR_OPQ | CISS_TL_PERF_INTR_MSI; } else { ciss_printf(sc, "SIMPLE Transport\n"); /* MSIX doesn't seem to work in SIMPLE mode, only enable if it forced */ if (ciss_force_interrupt == 2) /* If this fails, we automatically revert to INTx */ ciss_setup_msix(sc); sc->ciss_perf = NULL; intr = ciss_intr; sc->ciss_interrupt_mask = sqmask; } /* * Turn off interrupts before we go routing anything. */ CISS_TL_SIMPLE_DISABLE_INTERRUPTS(sc); /* * Allocate and set up our interrupt. */ if ((sc->ciss_irq_resource = bus_alloc_resource_any(sc->ciss_dev, SYS_RES_IRQ, &sc->ciss_irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) { ciss_printf(sc, "can't allocate interrupt\n"); return(ENXIO); } if (bus_setup_intr(sc->ciss_dev, sc->ciss_irq_resource, INTR_TYPE_CAM|INTR_MPSAFE, NULL, intr, sc, &sc->ciss_intr)) { ciss_printf(sc, "can't set up interrupt\n"); return(ENXIO); } /* * Allocate the parent bus DMA tag appropriate for our PCI * interface. * * Note that "simple" adapters can only address within a 32-bit * span. */ if (bus_dma_tag_create(bus_get_dma_tag(sc->ciss_dev),/* PCI parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ciss_parent_dmat)) { ciss_printf(sc, "can't allocate parent DMA tag\n"); return(ENOMEM); } /* * Create DMA tag for mapping buffers into adapter-addressable * space. */ if (bus_dma_tag_create(sc->ciss_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ (CISS_MAX_SG_ELEMENTS - 1) * PAGE_SIZE, /* maxsize */ CISS_MAX_SG_ELEMENTS, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, &sc->ciss_mtx, /* lockfunc, lockarg */ &sc->ciss_buffer_dmat)) { ciss_printf(sc, "can't allocate buffer DMA tag\n"); return(ENOMEM); } return(0); } /************************************************************************ * Setup MSI/MSIX operation (Performant only) * Four interrupts are available, but we only use 1 right now. If MSI-X * isn't avaialble, try using MSI instead. */ static int ciss_setup_msix(struct ciss_softc *sc) { int val, i; /* Weed out devices that don't actually support MSI */ i = ciss_lookup(sc->ciss_dev); if (ciss_vendor_data[i].flags & CISS_BOARD_NOMSI) return (EINVAL); /* * Only need to use the minimum number of MSI vectors, as the driver * doesn't support directed MSIX interrupts. */ val = pci_msix_count(sc->ciss_dev); if (val < CISS_MSI_COUNT) { val = pci_msi_count(sc->ciss_dev); device_printf(sc->ciss_dev, "got %d MSI messages]\n", val); if (val < CISS_MSI_COUNT) return (EINVAL); } val = MIN(val, CISS_MSI_COUNT); if (pci_alloc_msix(sc->ciss_dev, &val) != 0) { if (pci_alloc_msi(sc->ciss_dev, &val) != 0) return (EINVAL); } sc->ciss_msi = val; if (bootverbose) ciss_printf(sc, "Using %d MSIX interrupt%s\n", val, (val != 1) ? "s" : ""); for (i = 0; i < val; i++) sc->ciss_irq_rid[i] = i + 1; return (0); } /************************************************************************ * Setup the Performant structures. */ static int ciss_init_perf(struct ciss_softc *sc) { struct ciss_perf_config *pc = sc->ciss_perf; int reply_size; /* * Create the DMA tag for the reply queue. */ reply_size = sizeof(uint64_t) * sc->ciss_max_requests; if (bus_dma_tag_create(sc->ciss_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ reply_size, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ciss_reply_dmat)) { ciss_printf(sc, "can't allocate reply DMA tag\n"); return(ENOMEM); } /* * Allocate memory and make it available for DMA. */ if (bus_dmamem_alloc(sc->ciss_reply_dmat, (void **)&sc->ciss_reply, BUS_DMA_NOWAIT, &sc->ciss_reply_map)) { ciss_printf(sc, "can't allocate reply memory\n"); return(ENOMEM); } bus_dmamap_load(sc->ciss_reply_dmat, sc->ciss_reply_map, sc->ciss_reply, reply_size, ciss_command_map_helper, &sc->ciss_reply_phys, 0); bzero(sc->ciss_reply, reply_size); sc->ciss_cycle = 0x1; sc->ciss_rqidx = 0; /* * Preload the fetch table with common command sizes. This allows the * hardware to not waste bus cycles for typical i/o commands, but also not * tax the driver to be too exact in choosing sizes. The table is optimized * for page-aligned i/o's, but since most i/o comes from the various pagers, * it's a reasonable assumption to make. */ pc->fetch_count[CISS_SG_FETCH_NONE] = (sizeof(struct ciss_command) + 15) / 16; pc->fetch_count[CISS_SG_FETCH_1] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 1 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_2] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 2 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_4] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 4 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_8] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 8 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_16] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 16 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_32] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 32 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_MAX] = (CISS_COMMAND_ALLOC_SIZE + 15) / 16; pc->rq_size = sc->ciss_max_requests; /* XXX less than the card supports? */ pc->rq_count = 1; /* XXX Hardcode for a single queue */ pc->rq_bank_hi = 0; pc->rq_bank_lo = 0; pc->rq[0].rq_addr_hi = 0x0; pc->rq[0].rq_addr_lo = sc->ciss_reply_phys; return(0); } /************************************************************************ * Wait for the adapter to come ready. */ static int ciss_wait_adapter(struct ciss_softc *sc) { int i; debug_called(1); /* * Wait for the adapter to come ready. */ if (!(sc->ciss_cfg->active_method & CISS_TRANSPORT_METHOD_READY)) { ciss_printf(sc, "waiting for adapter to come ready...\n"); for (i = 0; !(sc->ciss_cfg->active_method & CISS_TRANSPORT_METHOD_READY); i++) { DELAY(1000000); /* one second */ if (i > 30) { ciss_printf(sc, "timed out waiting for adapter to come ready\n"); return(EIO); } } } return(0); } /************************************************************************ * Flush the adapter cache. */ static int ciss_flush_adapter(struct ciss_softc *sc) { struct ciss_request *cr; struct ciss_bmic_flush_cache *cbfc; int error, command_status; debug_called(1); cr = NULL; cbfc = NULL; /* * Build a BMIC request to flush the cache. We don't disable * it, as we may be going to do more I/O (eg. we are emulating * the Synchronise Cache command). */ if ((cbfc = malloc(sizeof(*cbfc), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO)) == NULL) { error = ENOMEM; goto out; } if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_FLUSH_CACHE, (void **)&cbfc, sizeof(*cbfc))) != 0) goto out; /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC FLUSH_CACHE command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: break; default: ciss_printf(sc, "error flushing cache (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } out: if (cbfc != NULL) free(cbfc, CISS_MALLOC_CLASS); if (cr != NULL) ciss_release_request(cr); return(error); } static void ciss_soft_reset(struct ciss_softc *sc) { struct ciss_request *cr = NULL; struct ciss_command *cc; int i, error = 0; for (i = 0; i < sc->ciss_max_logical_bus; i++) { /* only reset proxy controllers */ if (sc->ciss_controllers[i].physical.bus == 0) continue; if ((error = ciss_get_request(sc, &cr)) != 0) break; if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_SOFT_RESET, NULL, 0)) != 0) break; cc = cr->cr_cc; cc->header.address = sc->ciss_controllers[i]; if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) break; ciss_release_request(cr); } if (error) ciss_printf(sc, "error resetting controller (%d)\n", error); if (cr != NULL) ciss_release_request(cr); } /************************************************************************ * Allocate memory for the adapter command structures, initialise * the request structures. * * Note that the entire set of commands are allocated in a single * contiguous slab. */ static int ciss_init_requests(struct ciss_softc *sc) { struct ciss_request *cr; int i; debug_called(1); if (bootverbose) ciss_printf(sc, "using %d of %d available commands\n", sc->ciss_max_requests, sc->ciss_cfg->max_outstanding_commands); /* * Create the DMA tag for commands. */ if (bus_dma_tag_create(sc->ciss_parent_dmat, /* parent */ 32, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ CISS_COMMAND_ALLOC_SIZE * sc->ciss_max_requests, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ciss_command_dmat)) { ciss_printf(sc, "can't allocate command DMA tag\n"); return(ENOMEM); } /* * Allocate memory and make it available for DMA. */ if (bus_dmamem_alloc(sc->ciss_command_dmat, (void **)&sc->ciss_command, BUS_DMA_NOWAIT, &sc->ciss_command_map)) { ciss_printf(sc, "can't allocate command memory\n"); return(ENOMEM); } bus_dmamap_load(sc->ciss_command_dmat, sc->ciss_command_map,sc->ciss_command, CISS_COMMAND_ALLOC_SIZE * sc->ciss_max_requests, ciss_command_map_helper, &sc->ciss_command_phys, 0); bzero(sc->ciss_command, CISS_COMMAND_ALLOC_SIZE * sc->ciss_max_requests); /* * Set up the request and command structures, push requests onto * the free queue. */ for (i = 1; i < sc->ciss_max_requests; i++) { cr = &sc->ciss_request[i]; cr->cr_sc = sc; cr->cr_tag = i; cr->cr_cc = (struct ciss_command *)((uintptr_t)sc->ciss_command + CISS_COMMAND_ALLOC_SIZE * i); cr->cr_ccphys = sc->ciss_command_phys + CISS_COMMAND_ALLOC_SIZE * i; bus_dmamap_create(sc->ciss_buffer_dmat, 0, &cr->cr_datamap); ciss_enqueue_free(cr); } return(0); } static void ciss_command_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { uint32_t *addr; addr = arg; *addr = segs[0].ds_addr; } /************************************************************************ * Identify the adapter, print some information about it. */ static int ciss_identify_adapter(struct ciss_softc *sc) { struct ciss_request *cr; int error, command_status; debug_called(1); cr = NULL; /* * Get a request, allocate storage for the adapter data. */ if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_ID_CTLR, (void **)&sc->ciss_id, sizeof(*sc->ciss_id))) != 0) goto out; /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC ID_CTLR command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: /* buffer right size */ break; case CISS_CMD_STATUS_DATA_UNDERRUN: case CISS_CMD_STATUS_DATA_OVERRUN: ciss_printf(sc, "data over/underrun reading adapter information\n"); default: ciss_printf(sc, "error reading adapter information (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } /* sanity-check reply */ if (!(sc->ciss_id->controller_flags & CONTROLLER_FLAGS_BIG_MAP_SUPPORT)) { ciss_printf(sc, "adapter does not support BIG_MAP\n"); error = ENXIO; goto out; } #if 0 /* XXX later revisions may not need this */ sc->ciss_flags |= CISS_FLAG_FAKE_SYNCH; #endif /* XXX only really required for old 5300 adapters? */ sc->ciss_flags |= CISS_FLAG_BMIC_ABORT; /* * Earlier controller specs do not contain these config * entries, so assume that a 0 means its old and assign * these values to the defaults that were established * when this driver was developed for them */ if (sc->ciss_cfg->max_logical_supported == 0) sc->ciss_cfg->max_logical_supported = CISS_MAX_LOGICAL; if (sc->ciss_cfg->max_physical_supported == 0) sc->ciss_cfg->max_physical_supported = CISS_MAX_PHYSICAL; /* print information */ if (bootverbose) { ciss_printf(sc, " %d logical drive%s configured\n", sc->ciss_id->configured_logical_drives, (sc->ciss_id->configured_logical_drives == 1) ? "" : "s"); ciss_printf(sc, " firmware %4.4s\n", sc->ciss_id->running_firmware_revision); ciss_printf(sc, " %d SCSI channels\n", sc->ciss_id->scsi_chip_count); ciss_printf(sc, " signature '%.4s'\n", sc->ciss_cfg->signature); ciss_printf(sc, " valence %d\n", sc->ciss_cfg->valence); ciss_printf(sc, " supported I/O methods 0x%b\n", sc->ciss_cfg->supported_methods, "\20\1READY\2simple\3performant\4MEMQ\n"); ciss_printf(sc, " active I/O method 0x%b\n", sc->ciss_cfg->active_method, "\20\2simple\3performant\4MEMQ\n"); ciss_printf(sc, " 4G page base 0x%08x\n", sc->ciss_cfg->command_physlimit); ciss_printf(sc, " interrupt coalesce delay %dus\n", sc->ciss_cfg->interrupt_coalesce_delay); ciss_printf(sc, " interrupt coalesce count %d\n", sc->ciss_cfg->interrupt_coalesce_count); ciss_printf(sc, " max outstanding commands %d\n", sc->ciss_cfg->max_outstanding_commands); ciss_printf(sc, " bus types 0x%b\n", sc->ciss_cfg->bus_types, "\20\1ultra2\2ultra3\10fibre1\11fibre2\n"); ciss_printf(sc, " server name '%.16s'\n", sc->ciss_cfg->server_name); ciss_printf(sc, " heartbeat 0x%x\n", sc->ciss_cfg->heartbeat); ciss_printf(sc, " max logical logical volumes: %d\n", sc->ciss_cfg->max_logical_supported); ciss_printf(sc, " max physical disks supported: %d\n", sc->ciss_cfg->max_physical_supported); ciss_printf(sc, " max physical disks per logical volume: %d\n", sc->ciss_cfg->max_physical_per_logical); ciss_printf(sc, " JBOD Support is %s\n", (sc->ciss_id->uiYetMoreControllerFlags & YMORE_CONTROLLER_FLAGS_JBOD_SUPPORTED) ? "Available" : "Unavailable"); ciss_printf(sc, " JBOD Mode is %s\n", (sc->ciss_id->PowerUPNvramFlags & PWR_UP_FLAG_JBOD_ENABLED) ? "Enabled" : "Disabled"); } out: if (error) { if (sc->ciss_id != NULL) { free(sc->ciss_id, CISS_MALLOC_CLASS); sc->ciss_id = NULL; } } if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Helper routine for generating a list of logical and physical luns. */ static struct ciss_lun_report * ciss_report_luns(struct ciss_softc *sc, int opcode, int nunits) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_report_cdb *crc; struct ciss_lun_report *cll; int command_status; int report_size; int error = 0; debug_called(1); cr = NULL; cll = NULL; /* * Get a request, allocate storage for the address list. */ if ((error = ciss_get_request(sc, &cr)) != 0) goto out; report_size = sizeof(*cll) + nunits * sizeof(union ciss_device_address); if ((cll = malloc(report_size, CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO)) == NULL) { ciss_printf(sc, "can't allocate memory for lun report\n"); error = ENOMEM; goto out; } /* * Build the Report Logical/Physical LUNs command. */ cc = cr->cr_cc; cr->cr_data = cll; cr->cr_length = report_size; cr->cr_flags = CISS_REQ_DATAIN; cc->header.address.physical.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; cc->header.address.physical.bus = 0; cc->header.address.physical.target = 0; cc->cdb.cdb_length = sizeof(*crc); cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_READ; cc->cdb.timeout = 30; /* XXX better suggestions? */ crc = (struct ciss_report_cdb *)&(cc->cdb.cdb[0]); bzero(crc, sizeof(*crc)); crc->opcode = opcode; crc->length = htonl(report_size); /* big-endian field */ cll->list_size = htonl(report_size - sizeof(*cll)); /* big-endian field */ /* * Submit the request and wait for it to complete. (timeout * here should be much greater than above) */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending %d LUN command (%d)\n", opcode, error); goto out; } /* * Check response. Note that data over/underrun is OK. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: /* buffer right size */ case CISS_CMD_STATUS_DATA_UNDERRUN: /* buffer too large, not bad */ break; case CISS_CMD_STATUS_DATA_OVERRUN: ciss_printf(sc, "WARNING: more units than driver limit (%d)\n", sc->ciss_cfg->max_logical_supported); break; default: ciss_printf(sc, "error detecting logical drive configuration (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } ciss_release_request(cr); cr = NULL; out: if (cr != NULL) ciss_release_request(cr); if (error && cll != NULL) { free(cll, CISS_MALLOC_CLASS); cll = NULL; } return(cll); } /************************************************************************ * Find logical drives on the adapter. */ static int ciss_init_logical(struct ciss_softc *sc) { struct ciss_lun_report *cll; int error = 0, i, j; int ndrives; debug_called(1); cll = ciss_report_luns(sc, CISS_OPCODE_REPORT_LOGICAL_LUNS, sc->ciss_cfg->max_logical_supported); if (cll == NULL) { error = ENXIO; goto out; } /* sanity-check reply */ ndrives = (ntohl(cll->list_size) / sizeof(union ciss_device_address)); if ((ndrives < 0) || (ndrives > sc->ciss_cfg->max_logical_supported)) { ciss_printf(sc, "adapter claims to report absurd number of logical drives (%d > %d)\n", ndrives, sc->ciss_cfg->max_logical_supported); error = ENXIO; goto out; } /* * Save logical drive information. */ if (bootverbose) { ciss_printf(sc, "%d logical drive%s\n", ndrives, (ndrives > 1 || ndrives == 0) ? "s" : ""); } sc->ciss_logical = malloc(sc->ciss_max_logical_bus * sizeof(struct ciss_ldrive *), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_logical == NULL) { error = ENXIO; goto out; } for (i = 0; i < sc->ciss_max_logical_bus; i++) { sc->ciss_logical[i] = malloc(sc->ciss_cfg->max_logical_supported * sizeof(struct ciss_ldrive), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_logical[i] == NULL) { error = ENXIO; goto out; } for (j = 0; j < sc->ciss_cfg->max_logical_supported; j++) sc->ciss_logical[i][j].cl_status = CISS_LD_NONEXISTENT; } for (i = 0; i < sc->ciss_cfg->max_logical_supported; i++) { if (i < ndrives) { struct ciss_ldrive *ld; int bus, target; bus = CISS_LUN_TO_BUS(cll->lun[i].logical.lun); target = CISS_LUN_TO_TARGET(cll->lun[i].logical.lun); ld = &sc->ciss_logical[bus][target]; ld->cl_address = cll->lun[i]; ld->cl_controller = &sc->ciss_controllers[bus]; if (ciss_identify_logical(sc, ld) != 0) continue; /* * If the drive has had media exchanged, we should bring it online. */ if (ld->cl_lstatus->media_exchanged) ciss_accept_media(sc, ld); } } out: if (cll != NULL) free(cll, CISS_MALLOC_CLASS); return(error); } static int ciss_init_physical(struct ciss_softc *sc) { struct ciss_lun_report *cll; int error = 0, i; int nphys; int bus, target; debug_called(1); bus = 0; target = 0; cll = ciss_report_luns(sc, CISS_OPCODE_REPORT_PHYSICAL_LUNS, sc->ciss_cfg->max_physical_supported); if (cll == NULL) { error = ENXIO; goto out; } nphys = (ntohl(cll->list_size) / sizeof(union ciss_device_address)); if (bootverbose) { ciss_printf(sc, "%d physical device%s\n", nphys, (nphys > 1 || nphys == 0) ? "s" : ""); } /* * Figure out the bus mapping. * Logical buses include both the local logical bus for local arrays and * proxy buses for remote arrays. Physical buses are numbered by the * controller and represent physical buses that hold physical devices. * We shift these bus numbers so that everything fits into a single flat * numbering space for CAM. Logical buses occupy the first 32 CAM bus * numbers, and the physical bus numbers are shifted to be above that. * This results in the various driver arrays being indexed as follows: * * ciss_controllers[] - indexed by logical bus * ciss_cam_sim[] - indexed by both logical and physical, with physical * being shifted by 32. * ciss_logical[][] - indexed by logical bus * ciss_physical[][] - indexed by physical bus * * XXX This is getting more and more hackish. CISS really doesn't play * well with a standard SCSI model; devices are addressed via magic * cookies, not via b/t/l addresses. Since there is no way to store * the cookie in the CAM device object, we have to keep these lookup * tables handy so that the devices can be found quickly at the cost * of wasting memory and having a convoluted lookup scheme. This * driver should probably be converted to block interface. */ /* * If the L2 and L3 SCSI addresses are 0, this signifies a proxy * controller. A proxy controller is another physical controller * behind the primary PCI controller. We need to know about this * so that BMIC commands can be properly targeted. There can be * proxy controllers attached to a single PCI controller, so * find the highest numbered one so the array can be properly * sized. */ sc->ciss_max_logical_bus = 1; for (i = 0; i < nphys; i++) { if (cll->lun[i].physical.extra_address == 0) { bus = cll->lun[i].physical.bus; sc->ciss_max_logical_bus = max(sc->ciss_max_logical_bus, bus) + 1; } else { bus = CISS_EXTRA_BUS2(cll->lun[i].physical.extra_address); sc->ciss_max_physical_bus = max(sc->ciss_max_physical_bus, bus); } } sc->ciss_controllers = malloc(sc->ciss_max_logical_bus * sizeof (union ciss_device_address), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_controllers == NULL) { ciss_printf(sc, "Could not allocate memory for controller map\n"); error = ENOMEM; goto out; } /* setup a map of controller addresses */ for (i = 0; i < nphys; i++) { if (cll->lun[i].physical.extra_address == 0) { sc->ciss_controllers[cll->lun[i].physical.bus] = cll->lun[i]; } } sc->ciss_physical = malloc(sc->ciss_max_physical_bus * sizeof(struct ciss_pdrive *), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_physical == NULL) { ciss_printf(sc, "Could not allocate memory for physical device map\n"); error = ENOMEM; goto out; } for (i = 0; i < sc->ciss_max_physical_bus; i++) { sc->ciss_physical[i] = malloc(sizeof(struct ciss_pdrive) * CISS_MAX_PHYSTGT, CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_physical[i] == NULL) { ciss_printf(sc, "Could not allocate memory for target map\n"); error = ENOMEM; goto out; } } ciss_filter_physical(sc, cll); out: if (cll != NULL) free(cll, CISS_MALLOC_CLASS); return(error); } static int ciss_filter_physical(struct ciss_softc *sc, struct ciss_lun_report *cll) { u_int32_t ea; int i, nphys; int bus, target; nphys = (ntohl(cll->list_size) / sizeof(union ciss_device_address)); for (i = 0; i < nphys; i++) { if (cll->lun[i].physical.extra_address == 0) continue; /* * Filter out devices that we don't want. Level 3 LUNs could * probably be supported, but the docs don't give enough of a * hint to know how. * * The mode field of the physical address is likely set to have * hard disks masked out. Honor it unless the user has overridden * us with the tunable. We also munge the inquiry data for these * disks so that they only show up as passthrough devices. Keeping * them visible in this fashion is useful for doing things like * flashing firmware. */ ea = cll->lun[i].physical.extra_address; if ((CISS_EXTRA_BUS3(ea) != 0) || (CISS_EXTRA_TARGET3(ea) != 0) || (CISS_EXTRA_MODE2(ea) == 0x3)) continue; if ((ciss_expose_hidden_physical == 0) && (cll->lun[i].physical.mode == CISS_HDR_ADDRESS_MODE_MASK_PERIPHERAL)) continue; /* * Note: CISS firmware numbers physical busses starting at '1', not * '0'. This numbering is internal to the firmware and is only * used as a hint here. */ bus = CISS_EXTRA_BUS2(ea) - 1; target = CISS_EXTRA_TARGET2(ea); sc->ciss_physical[bus][target].cp_address = cll->lun[i]; sc->ciss_physical[bus][target].cp_online = 1; } return (0); } static int ciss_inquiry_logical(struct ciss_softc *sc, struct ciss_ldrive *ld) { struct ciss_request *cr; struct ciss_command *cc; struct scsi_inquiry *inq; int error; int command_status; cr = NULL; bzero(&ld->cl_geometry, sizeof(ld->cl_geometry)); if ((error = ciss_get_request(sc, &cr)) != 0) goto out; cc = cr->cr_cc; cr->cr_data = &ld->cl_geometry; cr->cr_length = sizeof(ld->cl_geometry); cr->cr_flags = CISS_REQ_DATAIN; cc->header.address = ld->cl_address; cc->cdb.cdb_length = 6; cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_READ; cc->cdb.timeout = 30; inq = (struct scsi_inquiry *)&(cc->cdb.cdb[0]); inq->opcode = INQUIRY; inq->byte2 = SI_EVPD; inq->page_code = CISS_VPD_LOGICAL_DRIVE_GEOMETRY; scsi_ulto2b(sizeof(ld->cl_geometry), inq->length); if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error getting geometry (%d)\n", error); goto out; } ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: case CISS_CMD_STATUS_DATA_UNDERRUN: break; case CISS_CMD_STATUS_DATA_OVERRUN: ciss_printf(sc, "WARNING: Data overrun\n"); break; default: ciss_printf(sc, "Error detecting logical drive geometry (%s)\n", ciss_name_command_status(command_status)); break; } out: if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Identify a logical drive, initialise state related to it. */ static int ciss_identify_logical(struct ciss_softc *sc, struct ciss_ldrive *ld) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_bmic_cdb *cbc; int error, command_status; debug_called(1); cr = NULL; /* * Build a BMIC request to fetch the drive ID. */ if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_ID_LDRIVE, (void **)&ld->cl_ldrive, sizeof(*ld->cl_ldrive))) != 0) goto out; cc = cr->cr_cc; cc->header.address = *ld->cl_controller; /* target controller */ cbc = (struct ciss_bmic_cdb *)&(cc->cdb.cdb[0]); cbc->log_drive = CISS_LUN_TO_TARGET(ld->cl_address.logical.lun); /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC LDRIVE command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: /* buffer right size */ break; case CISS_CMD_STATUS_DATA_UNDERRUN: case CISS_CMD_STATUS_DATA_OVERRUN: ciss_printf(sc, "data over/underrun reading logical drive ID\n"); default: ciss_printf(sc, "error reading logical drive ID (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } ciss_release_request(cr); cr = NULL; /* * Build a CISS BMIC command to get the logical drive status. */ if ((error = ciss_get_ldrive_status(sc, ld)) != 0) goto out; /* * Get the logical drive geometry. */ if ((error = ciss_inquiry_logical(sc, ld)) != 0) goto out; /* * Print the drive's basic characteristics. */ if (bootverbose) { ciss_printf(sc, "logical drive (b%dt%d): %s, %dMB ", CISS_LUN_TO_BUS(ld->cl_address.logical.lun), CISS_LUN_TO_TARGET(ld->cl_address.logical.lun), ciss_name_ldrive_org(ld->cl_ldrive->fault_tolerance), ((ld->cl_ldrive->blocks_available / (1024 * 1024)) * ld->cl_ldrive->block_size)); ciss_print_ldrive(sc, ld); } out: if (error != 0) { /* make the drive not-exist */ ld->cl_status = CISS_LD_NONEXISTENT; if (ld->cl_ldrive != NULL) { free(ld->cl_ldrive, CISS_MALLOC_CLASS); ld->cl_ldrive = NULL; } if (ld->cl_lstatus != NULL) { free(ld->cl_lstatus, CISS_MALLOC_CLASS); ld->cl_lstatus = NULL; } } if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Get status for a logical drive. * * XXX should we also do this in response to Test Unit Ready? */ static int ciss_get_ldrive_status(struct ciss_softc *sc, struct ciss_ldrive *ld) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_bmic_cdb *cbc; int error, command_status; /* * Build a CISS BMIC command to get the logical drive status. */ if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_ID_LSTATUS, (void **)&ld->cl_lstatus, sizeof(*ld->cl_lstatus))) != 0) goto out; cc = cr->cr_cc; cc->header.address = *ld->cl_controller; /* target controller */ cbc = (struct ciss_bmic_cdb *)&(cc->cdb.cdb[0]); cbc->log_drive = CISS_LUN_TO_TARGET(ld->cl_address.logical.lun); /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC LSTATUS command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: /* buffer right size */ break; case CISS_CMD_STATUS_DATA_UNDERRUN: case CISS_CMD_STATUS_DATA_OVERRUN: ciss_printf(sc, "data over/underrun reading logical drive status\n"); default: ciss_printf(sc, "error reading logical drive status (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } /* * Set the drive's summary status based on the returned status. * * XXX testing shows that a failed JBOD drive comes back at next * boot in "queued for expansion" mode. WTF? */ ld->cl_status = ciss_decode_ldrive_status(ld->cl_lstatus->status); out: if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Notify the adapter of a config update. */ static int ciss_update_config(struct ciss_softc *sc) { int i; debug_called(1); CISS_TL_SIMPLE_WRITE(sc, CISS_TL_SIMPLE_IDBR, CISS_TL_SIMPLE_IDBR_CFG_TABLE); for (i = 0; i < 1000; i++) { if (!(CISS_TL_SIMPLE_READ(sc, CISS_TL_SIMPLE_IDBR) & CISS_TL_SIMPLE_IDBR_CFG_TABLE)) { return(0); } DELAY(1000); } return(1); } /************************************************************************ * Accept new media into a logical drive. * * XXX The drive has previously been offline; it would be good if we * could make sure it's not open right now. */ static int ciss_accept_media(struct ciss_softc *sc, struct ciss_ldrive *ld) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_bmic_cdb *cbc; int command_status; int error = 0, ldrive; ldrive = CISS_LUN_TO_TARGET(ld->cl_address.logical.lun); debug(0, "bringing logical drive %d back online", ldrive); /* * Build a CISS BMIC command to bring the drive back online. */ if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_ACCEPT_MEDIA, NULL, 0)) != 0) goto out; cc = cr->cr_cc; cc->header.address = *ld->cl_controller; /* target controller */ cbc = (struct ciss_bmic_cdb *)&(cc->cdb.cdb[0]); cbc->log_drive = ldrive; /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC ACCEPT MEDIA command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: /* all OK */ /* we should get a logical drive status changed event here */ break; default: ciss_printf(cr->cr_sc, "error accepting media into failed logical drive (%s)\n", ciss_name_command_status(command_status)); break; } out: if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Release adapter resources. */ static void ciss_free(struct ciss_softc *sc) { struct ciss_request *cr; int i, j; debug_called(1); /* we're going away */ sc->ciss_flags |= CISS_FLAG_ABORTING; /* terminate the periodic heartbeat routine */ callout_stop(&sc->ciss_periodic); /* cancel the Event Notify chain */ ciss_notify_abort(sc); ciss_kill_notify_thread(sc); /* disconnect from CAM */ if (sc->ciss_cam_sim) { for (i = 0; i < sc->ciss_max_logical_bus; i++) { if (sc->ciss_cam_sim[i]) { xpt_bus_deregister(cam_sim_path(sc->ciss_cam_sim[i])); cam_sim_free(sc->ciss_cam_sim[i], 0); } } for (i = CISS_PHYSICAL_BASE; i < sc->ciss_max_physical_bus + CISS_PHYSICAL_BASE; i++) { if (sc->ciss_cam_sim[i]) { xpt_bus_deregister(cam_sim_path(sc->ciss_cam_sim[i])); cam_sim_free(sc->ciss_cam_sim[i], 0); } } free(sc->ciss_cam_sim, CISS_MALLOC_CLASS); } if (sc->ciss_cam_devq) cam_simq_free(sc->ciss_cam_devq); /* remove the control device */ mtx_unlock(&sc->ciss_mtx); if (sc->ciss_dev_t != NULL) destroy_dev(sc->ciss_dev_t); /* Final cleanup of the callout. */ callout_drain(&sc->ciss_periodic); mtx_destroy(&sc->ciss_mtx); /* free the controller data */ if (sc->ciss_id != NULL) free(sc->ciss_id, CISS_MALLOC_CLASS); /* release I/O resources */ if (sc->ciss_regs_resource != NULL) bus_release_resource(sc->ciss_dev, SYS_RES_MEMORY, sc->ciss_regs_rid, sc->ciss_regs_resource); if (sc->ciss_cfg_resource != NULL) bus_release_resource(sc->ciss_dev, SYS_RES_MEMORY, sc->ciss_cfg_rid, sc->ciss_cfg_resource); if (sc->ciss_intr != NULL) bus_teardown_intr(sc->ciss_dev, sc->ciss_irq_resource, sc->ciss_intr); if (sc->ciss_irq_resource != NULL) bus_release_resource(sc->ciss_dev, SYS_RES_IRQ, sc->ciss_irq_rid[0], sc->ciss_irq_resource); if (sc->ciss_msi) pci_release_msi(sc->ciss_dev); while ((cr = ciss_dequeue_free(sc)) != NULL) bus_dmamap_destroy(sc->ciss_buffer_dmat, cr->cr_datamap); if (sc->ciss_buffer_dmat) bus_dma_tag_destroy(sc->ciss_buffer_dmat); /* destroy command memory and DMA tag */ if (sc->ciss_command != NULL) { bus_dmamap_unload(sc->ciss_command_dmat, sc->ciss_command_map); bus_dmamem_free(sc->ciss_command_dmat, sc->ciss_command, sc->ciss_command_map); } if (sc->ciss_command_dmat) bus_dma_tag_destroy(sc->ciss_command_dmat); if (sc->ciss_reply) { bus_dmamap_unload(sc->ciss_reply_dmat, sc->ciss_reply_map); bus_dmamem_free(sc->ciss_reply_dmat, sc->ciss_reply, sc->ciss_reply_map); } if (sc->ciss_reply_dmat) bus_dma_tag_destroy(sc->ciss_reply_dmat); /* destroy DMA tags */ if (sc->ciss_parent_dmat) bus_dma_tag_destroy(sc->ciss_parent_dmat); if (sc->ciss_logical) { for (i = 0; i < sc->ciss_max_logical_bus; i++) { for (j = 0; j < sc->ciss_cfg->max_logical_supported; j++) { if (sc->ciss_logical[i][j].cl_ldrive) free(sc->ciss_logical[i][j].cl_ldrive, CISS_MALLOC_CLASS); if (sc->ciss_logical[i][j].cl_lstatus) free(sc->ciss_logical[i][j].cl_lstatus, CISS_MALLOC_CLASS); } free(sc->ciss_logical[i], CISS_MALLOC_CLASS); } free(sc->ciss_logical, CISS_MALLOC_CLASS); } if (sc->ciss_physical) { for (i = 0; i < sc->ciss_max_physical_bus; i++) free(sc->ciss_physical[i], CISS_MALLOC_CLASS); free(sc->ciss_physical, CISS_MALLOC_CLASS); } if (sc->ciss_controllers) free(sc->ciss_controllers, CISS_MALLOC_CLASS); } /************************************************************************ * Give a command to the adapter. * * Note that this uses the simple transport layer directly. If we * want to add support for other layers, we'll need a switch of some * sort. * * Note that the simple transport layer has no way of refusing a * command; we only have as many request structures as the adapter * supports commands, so we don't have to check (this presumes that * the adapter can handle commands as fast as we throw them at it). */ static int ciss_start(struct ciss_request *cr) { struct ciss_command *cc; /* XXX debugging only */ int error; cc = cr->cr_cc; debug(2, "post command %d tag %d ", cr->cr_tag, cc->header.host_tag); /* * Map the request's data. */ if ((error = ciss_map_request(cr))) return(error); #if 0 ciss_print_request(cr); #endif return(0); } /************************************************************************ * Fetch completed request(s) from the adapter, queue them for * completion handling. * * Note that this uses the simple transport layer directly. If we * want to add support for other layers, we'll need a switch of some * sort. * * Note that the simple transport mechanism does not require any * reentrancy protection; the OPQ read is atomic. If there is a * chance of a race with something else that might move the request * off the busy list, then we will have to lock against that * (eg. timeouts, etc.) */ static void ciss_done(struct ciss_softc *sc, cr_qhead_t *qh) { struct ciss_request *cr; struct ciss_command *cc; u_int32_t tag, index; debug_called(3); /* * Loop quickly taking requests from the adapter and moving them * to the completed queue. */ for (;;) { tag = CISS_TL_SIMPLE_FETCH_CMD(sc); if (tag == CISS_TL_SIMPLE_OPQ_EMPTY) break; index = tag >> 2; debug(2, "completed command %d%s", index, (tag & CISS_HDR_HOST_TAG_ERROR) ? " with error" : ""); if (index >= sc->ciss_max_requests) { ciss_printf(sc, "completed invalid request %d (0x%x)\n", index, tag); continue; } cr = &(sc->ciss_request[index]); cc = cr->cr_cc; cc->header.host_tag = tag; /* not updated by adapter */ ciss_enqueue_complete(cr, qh); } } static void ciss_perf_done(struct ciss_softc *sc, cr_qhead_t *qh) { struct ciss_request *cr; struct ciss_command *cc; u_int32_t tag, index; debug_called(3); /* * Loop quickly taking requests from the adapter and moving them * to the completed queue. */ for (;;) { tag = sc->ciss_reply[sc->ciss_rqidx]; if ((tag & CISS_CYCLE_MASK) != sc->ciss_cycle) break; index = tag >> 2; debug(2, "completed command %d%s\n", index, (tag & CISS_HDR_HOST_TAG_ERROR) ? " with error" : ""); if (index < sc->ciss_max_requests) { cr = &(sc->ciss_request[index]); cc = cr->cr_cc; cc->header.host_tag = tag; /* not updated by adapter */ ciss_enqueue_complete(cr, qh); } else { ciss_printf(sc, "completed invalid request %d (0x%x)\n", index, tag); } if (++sc->ciss_rqidx == sc->ciss_max_requests) { sc->ciss_rqidx = 0; sc->ciss_cycle ^= 1; } } } /************************************************************************ * Take an interrupt from the adapter. */ static void ciss_intr(void *arg) { cr_qhead_t qh; struct ciss_softc *sc = (struct ciss_softc *)arg; /* * The only interrupt we recognise indicates that there are * entries in the outbound post queue. */ STAILQ_INIT(&qh); ciss_done(sc, &qh); mtx_lock(&sc->ciss_mtx); ciss_complete(sc, &qh); mtx_unlock(&sc->ciss_mtx); } static void ciss_perf_intr(void *arg) { struct ciss_softc *sc = (struct ciss_softc *)arg; /* Clear the interrupt and flush the bridges. Docs say that the flush * needs to be done twice, which doesn't seem right. */ CISS_TL_PERF_CLEAR_INT(sc); CISS_TL_PERF_FLUSH_INT(sc); ciss_perf_msi_intr(sc); } static void ciss_perf_msi_intr(void *arg) { cr_qhead_t qh; struct ciss_softc *sc = (struct ciss_softc *)arg; STAILQ_INIT(&qh); ciss_perf_done(sc, &qh); mtx_lock(&sc->ciss_mtx); ciss_complete(sc, &qh); mtx_unlock(&sc->ciss_mtx); } /************************************************************************ * Process completed requests. * * Requests can be completed in three fashions: * * - by invoking a callback function (cr_complete is non-null) * - by waking up a sleeper (cr_flags has CISS_REQ_SLEEP set) * - by clearing the CISS_REQ_POLL flag in interrupt/timeout context */ static void ciss_complete(struct ciss_softc *sc, cr_qhead_t *qh) { struct ciss_request *cr; debug_called(2); /* * Loop taking requests off the completed queue and performing * completion processing on them. */ for (;;) { if ((cr = ciss_dequeue_complete(sc, qh)) == NULL) break; ciss_unmap_request(cr); if ((cr->cr_flags & CISS_REQ_BUSY) == 0) ciss_printf(sc, "WARNING: completing non-busy request\n"); cr->cr_flags &= ~CISS_REQ_BUSY; /* * If the request has a callback, invoke it. */ if (cr->cr_complete != NULL) { cr->cr_complete(cr); continue; } /* * If someone is sleeping on this request, wake them up. */ if (cr->cr_flags & CISS_REQ_SLEEP) { cr->cr_flags &= ~CISS_REQ_SLEEP; wakeup(cr); continue; } /* * If someone is polling this request for completion, signal. */ if (cr->cr_flags & CISS_REQ_POLL) { cr->cr_flags &= ~CISS_REQ_POLL; continue; } /* * Give up and throw the request back on the free queue. This * should never happen; resources will probably be lost. */ ciss_printf(sc, "WARNING: completed command with no submitter\n"); ciss_enqueue_free(cr); } } /************************************************************************ * Report on the completion status of a request, and pass back SCSI * and command status values. */ static int _ciss_report_request(struct ciss_request *cr, int *command_status, int *scsi_status, const char *func) { struct ciss_command *cc; struct ciss_error_info *ce; debug_called(2); cc = cr->cr_cc; ce = (struct ciss_error_info *)&(cc->sg[0]); /* * We don't consider data under/overrun an error for the Report * Logical/Physical LUNs commands. */ if ((cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR) && ((ce->command_status == CISS_CMD_STATUS_DATA_OVERRUN) || (ce->command_status == CISS_CMD_STATUS_DATA_UNDERRUN)) && ((cc->cdb.cdb[0] == CISS_OPCODE_REPORT_LOGICAL_LUNS) || (cc->cdb.cdb[0] == CISS_OPCODE_REPORT_PHYSICAL_LUNS) || (cc->cdb.cdb[0] == INQUIRY))) { cc->header.host_tag &= ~CISS_HDR_HOST_TAG_ERROR; debug(2, "ignoring irrelevant under/overrun error"); } /* * Check the command's error bit, if clear, there's no status and * everything is OK. */ if (!(cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR)) { if (scsi_status != NULL) *scsi_status = SCSI_STATUS_OK; if (command_status != NULL) *command_status = CISS_CMD_STATUS_SUCCESS; return(0); } else { if (command_status != NULL) *command_status = ce->command_status; if (scsi_status != NULL) { if (ce->command_status == CISS_CMD_STATUS_TARGET_STATUS) { *scsi_status = ce->scsi_status; } else { *scsi_status = -1; } } if (bootverbose) ciss_printf(cr->cr_sc, "command status 0x%x (%s) scsi status 0x%x\n", ce->command_status, ciss_name_command_status(ce->command_status), ce->scsi_status); if (ce->command_status == CISS_CMD_STATUS_INVALID_COMMAND) { ciss_printf(cr->cr_sc, "invalid command, offense size %d at %d, value 0x%x, function %s\n", ce->additional_error_info.invalid_command.offense_size, ce->additional_error_info.invalid_command.offense_offset, ce->additional_error_info.invalid_command.offense_value, func); } } #if 0 ciss_print_request(cr); #endif return(1); } /************************************************************************ * Issue a request and don't return until it's completed. * * Depending on adapter status, we may poll or sleep waiting for * completion. */ static int ciss_synch_request(struct ciss_request *cr, int timeout) { if (cr->cr_sc->ciss_flags & CISS_FLAG_RUNNING) { return(ciss_wait_request(cr, timeout)); } else { return(ciss_poll_request(cr, timeout)); } } /************************************************************************ * Issue a request and poll for completion. * * Timeout in milliseconds. */ static int ciss_poll_request(struct ciss_request *cr, int timeout) { cr_qhead_t qh; struct ciss_softc *sc; int error; debug_called(2); STAILQ_INIT(&qh); sc = cr->cr_sc; cr->cr_flags |= CISS_REQ_POLL; if ((error = ciss_start(cr)) != 0) return(error); do { if (sc->ciss_perf) ciss_perf_done(sc, &qh); else ciss_done(sc, &qh); ciss_complete(sc, &qh); if (!(cr->cr_flags & CISS_REQ_POLL)) return(0); DELAY(1000); } while (timeout-- >= 0); return(EWOULDBLOCK); } /************************************************************************ * Issue a request and sleep waiting for completion. * * Timeout in milliseconds. Note that a spurious wakeup will reset * the timeout. */ static int ciss_wait_request(struct ciss_request *cr, int timeout) { int error; debug_called(2); cr->cr_flags |= CISS_REQ_SLEEP; if ((error = ciss_start(cr)) != 0) return(error); while ((cr->cr_flags & CISS_REQ_SLEEP) && (error != EWOULDBLOCK)) { error = msleep_sbt(cr, &cr->cr_sc->ciss_mtx, PRIBIO, "cissREQ", SBT_1MS * timeout, 0, 0); } return(error); } #if 0 /************************************************************************ * Abort a request. Note that a potential exists here to race the * request being completed; the caller must deal with this. */ static int ciss_abort_request(struct ciss_request *ar) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_message_cdb *cmc; int error; debug_called(1); /* get a request */ if ((error = ciss_get_request(ar->cr_sc, &cr)) != 0) return(error); /* build the abort command */ cc = cr->cr_cc; cc->header.address.mode.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; /* addressing? */ cc->header.address.physical.target = 0; cc->header.address.physical.bus = 0; cc->cdb.cdb_length = sizeof(*cmc); cc->cdb.type = CISS_CDB_TYPE_MESSAGE; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_NONE; cc->cdb.timeout = 30; cmc = (struct ciss_message_cdb *)&(cc->cdb.cdb[0]); cmc->opcode = CISS_OPCODE_MESSAGE_ABORT; cmc->type = CISS_MESSAGE_ABORT_TASK; cmc->abort_tag = ar->cr_tag; /* endianness?? */ /* * Send the request and wait for a response. If we believe we * aborted the request OK, clear the flag that indicates it's * running. */ error = ciss_synch_request(cr, 35 * 1000); if (!error) error = ciss_report_request(cr, NULL, NULL); ciss_release_request(cr); return(error); } #endif /************************************************************************ * Fetch and initialise a request */ static int ciss_get_request(struct ciss_softc *sc, struct ciss_request **crp) { struct ciss_request *cr; debug_called(2); /* * Get a request and clean it up. */ if ((cr = ciss_dequeue_free(sc)) == NULL) return(ENOMEM); cr->cr_data = NULL; cr->cr_flags = 0; cr->cr_complete = NULL; cr->cr_private = NULL; cr->cr_sg_tag = CISS_SG_MAX; /* Backstop to prevent accidents */ ciss_preen_command(cr); *crp = cr; return(0); } static void ciss_preen_command(struct ciss_request *cr) { struct ciss_command *cc; u_int32_t cmdphys; /* * Clean up the command structure. * * Note that we set up the error_info structure here, since the * length can be overwritten by any command. */ cc = cr->cr_cc; cc->header.sg_in_list = 0; /* kinda inefficient this way */ cc->header.sg_total = 0; cc->header.host_tag = cr->cr_tag << 2; cc->header.host_tag_zeroes = 0; bzero(&(cc->sg[0]), CISS_COMMAND_ALLOC_SIZE - sizeof(struct ciss_command)); cmdphys = cr->cr_ccphys; cc->error_info.error_info_address = cmdphys + sizeof(struct ciss_command); cc->error_info.error_info_length = CISS_COMMAND_ALLOC_SIZE - sizeof(struct ciss_command); } /************************************************************************ * Release a request to the free list. */ static void ciss_release_request(struct ciss_request *cr) { struct ciss_softc *sc; debug_called(2); sc = cr->cr_sc; /* release the request to the free queue */ ciss_requeue_free(cr); } /************************************************************************ * Allocate a request that will be used to send a BMIC command. Do some * of the common setup here to avoid duplicating it everywhere else. */ static int ciss_get_bmic_request(struct ciss_softc *sc, struct ciss_request **crp, int opcode, void **bufp, size_t bufsize) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_bmic_cdb *cbc; void *buf; int error; int dataout; debug_called(2); cr = NULL; buf = NULL; /* * Get a request. */ if ((error = ciss_get_request(sc, &cr)) != 0) goto out; /* * Allocate data storage if requested, determine the data direction. */ dataout = 0; if ((bufsize > 0) && (bufp != NULL)) { if (*bufp == NULL) { if ((buf = malloc(bufsize, CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO)) == NULL) { error = ENOMEM; goto out; } } else { buf = *bufp; dataout = 1; /* we are given a buffer, so we are writing */ } } /* * Build a CISS BMIC command to get the logical drive ID. */ cr->cr_data = buf; cr->cr_length = bufsize; if (!dataout) cr->cr_flags = CISS_REQ_DATAIN; cc = cr->cr_cc; cc->header.address.physical.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; cc->header.address.physical.bus = 0; cc->header.address.physical.target = 0; cc->cdb.cdb_length = sizeof(*cbc); cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = dataout ? CISS_CDB_DIRECTION_WRITE : CISS_CDB_DIRECTION_READ; cc->cdb.timeout = 0; cbc = (struct ciss_bmic_cdb *)&(cc->cdb.cdb[0]); bzero(cbc, sizeof(*cbc)); cbc->opcode = dataout ? CISS_ARRAY_CONTROLLER_WRITE : CISS_ARRAY_CONTROLLER_READ; cbc->bmic_opcode = opcode; cbc->size = htons((u_int16_t)bufsize); out: if (error) { if (cr != NULL) ciss_release_request(cr); } else { *crp = cr; if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL)) *bufp = buf; } return(error); } /************************************************************************ * Handle a command passed in from userspace. */ static int ciss_user_command(struct ciss_softc *sc, IOCTL_Command_struct *ioc) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_error_info *ce; int error = 0; debug_called(1); cr = NULL; /* * Get a request. */ while (ciss_get_request(sc, &cr) != 0) msleep(sc, &sc->ciss_mtx, PPAUSE, "cissREQ", hz); cc = cr->cr_cc; /* * Allocate an in-kernel databuffer if required, copy in user data. */ mtx_unlock(&sc->ciss_mtx); cr->cr_length = ioc->buf_size; if (ioc->buf_size > 0) { if ((cr->cr_data = malloc(ioc->buf_size, CISS_MALLOC_CLASS, M_NOWAIT)) == NULL) { error = ENOMEM; goto out_unlocked; } if ((error = copyin(ioc->buf, cr->cr_data, ioc->buf_size))) { debug(0, "copyin: bad data buffer %p/%d", ioc->buf, ioc->buf_size); goto out_unlocked; } } /* * Build the request based on the user command. */ bcopy(&ioc->LUN_info, &cc->header.address, sizeof(cc->header.address)); bcopy(&ioc->Request, &cc->cdb, sizeof(cc->cdb)); /* XXX anything else to populate here? */ mtx_lock(&sc->ciss_mtx); /* * Run the command. */ if ((error = ciss_synch_request(cr, 60 * 1000))) { debug(0, "request failed - %d", error); goto out; } /* * Check to see if the command succeeded. */ ce = (struct ciss_error_info *)&(cc->sg[0]); if ((cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR) == 0) bzero(ce, sizeof(*ce)); /* * Copy the results back to the user. */ bcopy(ce, &ioc->error_info, sizeof(*ce)); mtx_unlock(&sc->ciss_mtx); if ((ioc->buf_size > 0) && (error = copyout(cr->cr_data, ioc->buf, ioc->buf_size))) { debug(0, "copyout: bad data buffer %p/%d", ioc->buf, ioc->buf_size); goto out_unlocked; } /* done OK */ error = 0; out_unlocked: mtx_lock(&sc->ciss_mtx); out: if ((cr != NULL) && (cr->cr_data != NULL)) free(cr->cr_data, CISS_MALLOC_CLASS); if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Map a request into bus-visible space, initialise the scatter/gather * list. */ static int ciss_map_request(struct ciss_request *cr) { struct ciss_softc *sc; int error = 0; debug_called(2); sc = cr->cr_sc; /* check that mapping is necessary */ if (cr->cr_flags & CISS_REQ_MAPPED) return(0); cr->cr_flags |= CISS_REQ_MAPPED; bus_dmamap_sync(sc->ciss_command_dmat, sc->ciss_command_map, BUS_DMASYNC_PREWRITE); if (cr->cr_data != NULL) { if (cr->cr_flags & CISS_REQ_CCB) error = bus_dmamap_load_ccb(sc->ciss_buffer_dmat, cr->cr_datamap, cr->cr_data, ciss_request_map_helper, cr, 0); else error = bus_dmamap_load(sc->ciss_buffer_dmat, cr->cr_datamap, cr->cr_data, cr->cr_length, ciss_request_map_helper, cr, 0); if (error != 0) return (error); } else { /* * Post the command to the adapter. */ cr->cr_sg_tag = CISS_SG_NONE; cr->cr_flags |= CISS_REQ_BUSY; if (sc->ciss_perf) CISS_TL_PERF_POST_CMD(sc, cr); else CISS_TL_SIMPLE_POST_CMD(sc, cr->cr_ccphys); } return(0); } static void ciss_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct ciss_command *cc; struct ciss_request *cr; struct ciss_softc *sc; int i; debug_called(2); cr = (struct ciss_request *)arg; sc = cr->cr_sc; cc = cr->cr_cc; for (i = 0; i < nseg; i++) { cc->sg[i].address = segs[i].ds_addr; cc->sg[i].length = segs[i].ds_len; cc->sg[i].extension = 0; } /* we leave the s/g table entirely within the command */ cc->header.sg_in_list = nseg; cc->header.sg_total = nseg; if (cr->cr_flags & CISS_REQ_DATAIN) bus_dmamap_sync(sc->ciss_buffer_dmat, cr->cr_datamap, BUS_DMASYNC_PREREAD); if (cr->cr_flags & CISS_REQ_DATAOUT) bus_dmamap_sync(sc->ciss_buffer_dmat, cr->cr_datamap, BUS_DMASYNC_PREWRITE); if (nseg == 0) cr->cr_sg_tag = CISS_SG_NONE; else if (nseg == 1) cr->cr_sg_tag = CISS_SG_1; else if (nseg == 2) cr->cr_sg_tag = CISS_SG_2; else if (nseg <= 4) cr->cr_sg_tag = CISS_SG_4; else if (nseg <= 8) cr->cr_sg_tag = CISS_SG_8; else if (nseg <= 16) cr->cr_sg_tag = CISS_SG_16; else if (nseg <= 32) cr->cr_sg_tag = CISS_SG_32; else cr->cr_sg_tag = CISS_SG_MAX; /* * Post the command to the adapter. */ cr->cr_flags |= CISS_REQ_BUSY; if (sc->ciss_perf) CISS_TL_PERF_POST_CMD(sc, cr); else CISS_TL_SIMPLE_POST_CMD(sc, cr->cr_ccphys); } /************************************************************************ * Unmap a request from bus-visible space. */ static void ciss_unmap_request(struct ciss_request *cr) { struct ciss_softc *sc; debug_called(2); sc = cr->cr_sc; /* check that unmapping is necessary */ if ((cr->cr_flags & CISS_REQ_MAPPED) == 0) return; bus_dmamap_sync(sc->ciss_command_dmat, sc->ciss_command_map, BUS_DMASYNC_POSTWRITE); if (cr->cr_data == NULL) goto out; if (cr->cr_flags & CISS_REQ_DATAIN) bus_dmamap_sync(sc->ciss_buffer_dmat, cr->cr_datamap, BUS_DMASYNC_POSTREAD); if (cr->cr_flags & CISS_REQ_DATAOUT) bus_dmamap_sync(sc->ciss_buffer_dmat, cr->cr_datamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->ciss_buffer_dmat, cr->cr_datamap); out: cr->cr_flags &= ~CISS_REQ_MAPPED; } /************************************************************************ * Attach the driver to CAM. * * We put all the logical drives on a single SCSI bus. */ static int ciss_cam_init(struct ciss_softc *sc) { int i, maxbus; debug_called(1); /* * Allocate a devq. We can reuse this for the masked physical * devices if we decide to export these as well. */ if ((sc->ciss_cam_devq = cam_simq_alloc(sc->ciss_max_requests - 2)) == NULL) { ciss_printf(sc, "can't allocate CAM SIM queue\n"); return(ENOMEM); } /* * Create a SIM. * * This naturally wastes a bit of memory. The alternative is to allocate * and register each bus as it is found, and then track them on a linked * list. Unfortunately, the driver has a few places where it needs to * look up the SIM based solely on bus number, and it's unclear whether * a list traversal would work for these situations. */ maxbus = max(sc->ciss_max_logical_bus, sc->ciss_max_physical_bus + CISS_PHYSICAL_BASE); sc->ciss_cam_sim = malloc(maxbus * sizeof(struct cam_sim*), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_cam_sim == NULL) { ciss_printf(sc, "can't allocate memory for controller SIM\n"); return(ENOMEM); } for (i = 0; i < sc->ciss_max_logical_bus; i++) { if ((sc->ciss_cam_sim[i] = cam_sim_alloc(ciss_cam_action, ciss_cam_poll, "ciss", sc, device_get_unit(sc->ciss_dev), &sc->ciss_mtx, 2, sc->ciss_max_requests - 2, sc->ciss_cam_devq)) == NULL) { ciss_printf(sc, "can't allocate CAM SIM for controller %d\n", i); return(ENOMEM); } /* * Register bus with this SIM. */ mtx_lock(&sc->ciss_mtx); if (i == 0 || sc->ciss_controllers[i].physical.bus != 0) { if (xpt_bus_register(sc->ciss_cam_sim[i], sc->ciss_dev, i) != 0) { ciss_printf(sc, "can't register SCSI bus %d\n", i); mtx_unlock(&sc->ciss_mtx); return (ENXIO); } } mtx_unlock(&sc->ciss_mtx); } for (i = CISS_PHYSICAL_BASE; i < sc->ciss_max_physical_bus + CISS_PHYSICAL_BASE; i++) { if ((sc->ciss_cam_sim[i] = cam_sim_alloc(ciss_cam_action, ciss_cam_poll, "ciss", sc, device_get_unit(sc->ciss_dev), &sc->ciss_mtx, 1, sc->ciss_max_requests - 2, sc->ciss_cam_devq)) == NULL) { ciss_printf(sc, "can't allocate CAM SIM for controller %d\n", i); return (ENOMEM); } mtx_lock(&sc->ciss_mtx); if (xpt_bus_register(sc->ciss_cam_sim[i], sc->ciss_dev, i) != 0) { ciss_printf(sc, "can't register SCSI bus %d\n", i); mtx_unlock(&sc->ciss_mtx); return (ENXIO); } mtx_unlock(&sc->ciss_mtx); } return(0); } /************************************************************************ * Initiate a rescan of the 'logical devices' SIM */ static void ciss_cam_rescan_target(struct ciss_softc *sc, int bus, int target) { union ccb *ccb; debug_called(1); if ((ccb = xpt_alloc_ccb_nowait()) == NULL) { ciss_printf(sc, "rescan failed (can't allocate CCB)\n"); return; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sc->ciss_cam_sim[bus]), target, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { ciss_printf(sc, "rescan failed (can't create path)\n"); xpt_free_ccb(ccb); return; } xpt_rescan(ccb); /* scan is now in progress */ } /************************************************************************ * Handle requests coming from CAM */ static void ciss_cam_action(struct cam_sim *sim, union ccb *ccb) { struct ciss_softc *sc; struct ccb_scsiio *csio; int bus, target; int physical; sc = cam_sim_softc(sim); bus = cam_sim_bus(sim); csio = (struct ccb_scsiio *)&ccb->csio; target = csio->ccb_h.target_id; physical = CISS_IS_PHYSICAL(bus); switch (ccb->ccb_h.func_code) { /* perform SCSI I/O */ case XPT_SCSI_IO: if (!ciss_cam_action_io(sim, csio)) return; break; /* perform geometry calculations */ case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg = &ccb->ccg; struct ciss_ldrive *ld; debug(1, "XPT_CALC_GEOMETRY %d:%d:%d", cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun); ld = NULL; if (!physical) ld = &sc->ciss_logical[bus][target]; /* * Use the cached geometry settings unless the fault tolerance * is invalid. */ if (physical || ld->cl_geometry.fault_tolerance == 0xFF) { u_int32_t secs_per_cylinder; ccg->heads = 255; ccg->secs_per_track = 32; secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; } else { ccg->heads = ld->cl_geometry.heads; ccg->secs_per_track = ld->cl_geometry.sectors; ccg->cylinders = ntohs(ld->cl_geometry.cylinders); } ccb->ccb_h.status = CAM_REQ_CMP; break; } /* handle path attribute inquiry */ case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; int sg_length; debug(1, "XPT_PATH_INQ %d:%d:%d", cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun); cpi->version_num = 1; cpi->hba_inquiry = PI_TAG_ABLE; /* XXX is this correct? */ cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->max_target = sc->ciss_cfg->max_logical_supported; cpi->max_lun = 0; /* 'logical drive' channel only */ cpi->initiator_id = sc->ciss_cfg->max_logical_supported; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); - strlcpy(cpi->hba_vid, "CISS", HBA_IDLEN); - strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); - cpi->unit_number = cam_sim_unit(sim); - cpi->bus_id = cam_sim_bus(sim); + strlcpy(cpi->hba_vid, "CISS", HBA_IDLEN); + strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); + cpi->unit_number = cam_sim_unit(sim); + cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 132 * 1024; /* XXX what to set this to? */ cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; if (sc->ciss_cfg->max_sg_length == 0) { sg_length = 17; } else { /* XXX Fix for ZMR cards that advertise max_sg_length == 32 * Confusing bit here. max_sg_length is usually a power of 2. We always * need to subtract 1 to account for partial pages. Then we need to * align on a valid PAGE_SIZE so we round down to the nearest power of 2. * Add 1 so we can then subtract it out in the assignment to maxio. * The reason for all these shenanigans is to create a maxio value that * creates IO operations to volumes that yield consistent operations * with good performance. */ sg_length = sc->ciss_cfg->max_sg_length - 1; sg_length = (1 << (fls(sg_length) - 1)) + 1; } cpi->maxio = (min(CISS_MAX_SG_ELEMENTS, sg_length) - 1) * PAGE_SIZE; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; int bus, target; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; bus = cam_sim_bus(sim); target = cts->ccb_h.target_id; debug(1, "XPT_GET_TRAN_SETTINGS %d:%d", bus, target); /* disconnect always OK */ cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; spi->valid = CTS_SPI_VALID_DISC; spi->flags = CTS_SPI_FLAGS_DISC_ENB; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; cts->ccb_h.status = CAM_REQ_CMP; break; } default: /* we can't do this */ debug(1, "unspported func_code = 0x%x", ccb->ccb_h.func_code); ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } /************************************************************************ * Handle a CAM SCSI I/O request. */ static int ciss_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio) { struct ciss_softc *sc; int bus, target; struct ciss_request *cr; struct ciss_command *cc; int error; sc = cam_sim_softc(sim); bus = cam_sim_bus(sim); target = csio->ccb_h.target_id; debug(2, "XPT_SCSI_IO %d:%d:%d", bus, target, csio->ccb_h.target_lun); /* check that the CDB pointer is not to a physical address */ if ((csio->ccb_h.flags & CAM_CDB_POINTER) && (csio->ccb_h.flags & CAM_CDB_PHYS)) { debug(3, " CDB pointer is to physical address"); csio->ccb_h.status = CAM_REQ_CMP_ERR; } /* abandon aborted ccbs or those that have failed validation */ if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { debug(3, "abandoning CCB due to abort/validation failure"); return(EINVAL); } /* handle emulation of some SCSI commands ourself */ if (ciss_cam_emulate(sc, csio)) return(0); /* * Get a request to manage this command. If we can't, return the * ccb, freeze the queue and flag so that we unfreeze it when a * request completes. */ if ((error = ciss_get_request(sc, &cr)) != 0) { xpt_freeze_simq(sim, 1); sc->ciss_flags |= CISS_FLAG_BUSY; csio->ccb_h.status |= CAM_REQUEUE_REQ; return(error); } /* * Build the command. */ cc = cr->cr_cc; cr->cr_data = csio; cr->cr_length = csio->dxfer_len; cr->cr_complete = ciss_cam_complete; cr->cr_private = csio; /* * Target the right logical volume. */ if (CISS_IS_PHYSICAL(bus)) cc->header.address = sc->ciss_physical[CISS_CAM_TO_PBUS(bus)][target].cp_address; else cc->header.address = sc->ciss_logical[bus][target].cl_address; cc->cdb.cdb_length = csio->cdb_len; cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; /* XXX ordered tags? */ if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { cr->cr_flags = CISS_REQ_DATAOUT | CISS_REQ_CCB; cc->cdb.direction = CISS_CDB_DIRECTION_WRITE; } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { cr->cr_flags = CISS_REQ_DATAIN | CISS_REQ_CCB; cc->cdb.direction = CISS_CDB_DIRECTION_READ; } else { cr->cr_data = NULL; cr->cr_flags = 0; cc->cdb.direction = CISS_CDB_DIRECTION_NONE; } cc->cdb.timeout = (csio->ccb_h.timeout / 1000) + 1; if (csio->ccb_h.flags & CAM_CDB_POINTER) { bcopy(csio->cdb_io.cdb_ptr, &cc->cdb.cdb[0], csio->cdb_len); } else { bcopy(csio->cdb_io.cdb_bytes, &cc->cdb.cdb[0], csio->cdb_len); } /* * Submit the request to the adapter. * * Note that this may fail if we're unable to map the request (and * if we ever learn a transport layer other than simple, may fail * if the adapter rejects the command). */ if ((error = ciss_start(cr)) != 0) { xpt_freeze_simq(sim, 1); csio->ccb_h.status |= CAM_RELEASE_SIMQ; if (error == EINPROGRESS) { error = 0; } else { csio->ccb_h.status |= CAM_REQUEUE_REQ; ciss_release_request(cr); } return(error); } return(0); } /************************************************************************ * Emulate SCSI commands the adapter doesn't handle as we might like. */ static int ciss_cam_emulate(struct ciss_softc *sc, struct ccb_scsiio *csio) { int bus, target; u_int8_t opcode; target = csio->ccb_h.target_id; bus = cam_sim_bus(xpt_path_sim(csio->ccb_h.path)); opcode = (csio->ccb_h.flags & CAM_CDB_POINTER) ? *(u_int8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes[0]; if (CISS_IS_PHYSICAL(bus)) { if (sc->ciss_physical[CISS_CAM_TO_PBUS(bus)][target].cp_online != 1) { csio->ccb_h.status |= CAM_SEL_TIMEOUT; xpt_done((union ccb *)csio); return(1); } else return(0); } /* * Handle requests for volumes that don't exist or are not online. * A selection timeout is slightly better than an illegal request. * Other errors might be better. */ if (sc->ciss_logical[bus][target].cl_status != CISS_LD_ONLINE) { csio->ccb_h.status |= CAM_SEL_TIMEOUT; xpt_done((union ccb *)csio); return(1); } /* if we have to fake Synchronise Cache */ if (sc->ciss_flags & CISS_FLAG_FAKE_SYNCH) { /* * If this is a Synchronise Cache command, typically issued when * a device is closed, flush the adapter and complete now. */ if (((csio->ccb_h.flags & CAM_CDB_POINTER) ? *(u_int8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE) { ciss_flush_adapter(sc); csio->ccb_h.status |= CAM_REQ_CMP; xpt_done((union ccb *)csio); return(1); } } /* * A CISS target can only ever have one lun per target. REPORT_LUNS requires * at least one LUN field to be pre created for us, so snag it and fill in * the least significant byte indicating 1 LUN here. Emulate the command * return to shut up warning on console of a CDB error. swb */ if (opcode == REPORT_LUNS && csio->dxfer_len > 0) { csio->data_ptr[3] = 8; csio->ccb_h.status |= CAM_REQ_CMP; xpt_done((union ccb *)csio); return(1); } return(0); } /************************************************************************ * Check for possibly-completed commands. */ static void ciss_cam_poll(struct cam_sim *sim) { cr_qhead_t qh; struct ciss_softc *sc = cam_sim_softc(sim); debug_called(2); STAILQ_INIT(&qh); if (sc->ciss_perf) ciss_perf_done(sc, &qh); else ciss_done(sc, &qh); ciss_complete(sc, &qh); } /************************************************************************ * Handle completion of a command - pass results back through the CCB */ static void ciss_cam_complete(struct ciss_request *cr) { struct ciss_softc *sc; struct ciss_command *cc; struct ciss_error_info *ce; struct ccb_scsiio *csio; int scsi_status; int command_status; debug_called(2); sc = cr->cr_sc; cc = cr->cr_cc; ce = (struct ciss_error_info *)&(cc->sg[0]); csio = (struct ccb_scsiio *)cr->cr_private; /* * Extract status values from request. */ ciss_report_request(cr, &command_status, &scsi_status); csio->scsi_status = scsi_status; /* * Handle specific SCSI status values. */ switch(scsi_status) { /* no status due to adapter error */ case -1: debug(0, "adapter error"); csio->ccb_h.status |= CAM_REQ_CMP_ERR; break; /* no status due to command completed OK */ case SCSI_STATUS_OK: /* CISS_SCSI_STATUS_GOOD */ debug(2, "SCSI_STATUS_OK"); csio->ccb_h.status |= CAM_REQ_CMP; break; /* check condition, sense data included */ case SCSI_STATUS_CHECK_COND: /* CISS_SCSI_STATUS_CHECK_CONDITION */ debug(0, "SCSI_STATUS_CHECK_COND sense size %d resid %d\n", ce->sense_length, ce->residual_count); bzero(&csio->sense_data, SSD_FULL_SIZE); bcopy(&ce->sense_info[0], &csio->sense_data, ce->sense_length); if (csio->sense_len > ce->sense_length) csio->sense_resid = csio->sense_len - ce->sense_length; else csio->sense_resid = 0; csio->resid = ce->residual_count; csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; #ifdef CISS_DEBUG { struct scsi_sense_data *sns = (struct scsi_sense_data *)&ce->sense_info[0]; debug(0, "sense key %x", scsi_get_sense_key(sns, csio->sense_len - csio->sense_resid, /*show_errors*/ 1)); } #endif break; case SCSI_STATUS_BUSY: /* CISS_SCSI_STATUS_BUSY */ debug(0, "SCSI_STATUS_BUSY"); csio->ccb_h.status |= CAM_SCSI_BUSY; break; default: debug(0, "unknown status 0x%x", csio->scsi_status); csio->ccb_h.status |= CAM_REQ_CMP_ERR; break; } /* handle post-command fixup */ ciss_cam_complete_fixup(sc, csio); ciss_release_request(cr); if (sc->ciss_flags & CISS_FLAG_BUSY) { sc->ciss_flags &= ~CISS_FLAG_BUSY; if (csio->ccb_h.status & CAM_RELEASE_SIMQ) xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0); else csio->ccb_h.status |= CAM_RELEASE_SIMQ; } xpt_done((union ccb *)csio); } /******************************************************************************** * Fix up the result of some commands here. */ static void ciss_cam_complete_fixup(struct ciss_softc *sc, struct ccb_scsiio *csio) { struct scsi_inquiry_data *inq; struct ciss_ldrive *cl; uint8_t *cdb; int bus, target; cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ? (uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes; if (cdb[0] == INQUIRY && (cdb[1] & SI_EVPD) == 0 && (csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN && csio->dxfer_len >= SHORT_INQUIRY_LENGTH) { inq = (struct scsi_inquiry_data *)csio->data_ptr; target = csio->ccb_h.target_id; bus = cam_sim_bus(xpt_path_sim(csio->ccb_h.path)); /* * If the controller is in JBOD mode, there are no logical volumes. * Let the disks be probed and dealt with via CAM. Else, mask off * the physical disks and setup the parts of the inq structure for * the logical volume. swb */ if( !(sc->ciss_id->PowerUPNvramFlags & PWR_UP_FLAG_JBOD_ENABLED)){ if (CISS_IS_PHYSICAL(bus)) { if (SID_TYPE(inq) == T_DIRECT) inq->device = (inq->device & 0xe0) | T_NODEVICE; return; } cl = &sc->ciss_logical[bus][target]; padstr(inq->vendor, "HP", SID_VENDOR_SIZE); padstr(inq->product, ciss_name_ldrive_org(cl->cl_ldrive->fault_tolerance), SID_PRODUCT_SIZE); padstr(inq->revision, ciss_name_ldrive_status(cl->cl_lstatus->status), SID_REVISION_SIZE); } } } /******************************************************************************** * Name the device at (target) * * XXX is this strictly correct? */ static int ciss_name_device(struct ciss_softc *sc, int bus, int target) { struct cam_periph *periph; struct cam_path *path; int status; if (CISS_IS_PHYSICAL(bus)) return (0); status = xpt_create_path(&path, NULL, cam_sim_path(sc->ciss_cam_sim[bus]), target, 0); if (status == CAM_REQ_CMP) { xpt_path_lock(path); periph = cam_periph_find(path, NULL); xpt_path_unlock(path); xpt_free_path(path); if (periph != NULL) { sprintf(sc->ciss_logical[bus][target].cl_name, "%s%d", periph->periph_name, periph->unit_number); return(0); } } sc->ciss_logical[bus][target].cl_name[0] = 0; return(ENOENT); } /************************************************************************ * Periodic status monitoring. */ static void ciss_periodic(void *arg) { struct ciss_softc *sc; struct ciss_request *cr = NULL; struct ciss_command *cc = NULL; int error = 0; debug_called(1); sc = (struct ciss_softc *)arg; /* * Check the adapter heartbeat. */ if (sc->ciss_cfg->heartbeat == sc->ciss_heartbeat) { sc->ciss_heart_attack++; debug(0, "adapter heart attack in progress 0x%x/%d", sc->ciss_heartbeat, sc->ciss_heart_attack); if (sc->ciss_heart_attack == 3) { ciss_printf(sc, "ADAPTER HEARTBEAT FAILED\n"); ciss_disable_adapter(sc); return; } } else { sc->ciss_heartbeat = sc->ciss_cfg->heartbeat; sc->ciss_heart_attack = 0; debug(3, "new heartbeat 0x%x", sc->ciss_heartbeat); } /* * Send the NOP message and wait for a response. */ if (ciss_nop_message_heartbeat != 0 && (error = ciss_get_request(sc, &cr)) == 0) { cc = cr->cr_cc; cr->cr_complete = ciss_nop_complete; cc->cdb.cdb_length = 1; cc->cdb.type = CISS_CDB_TYPE_MESSAGE; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_WRITE; cc->cdb.timeout = 0; cc->cdb.cdb[0] = CISS_OPCODE_MESSAGE_NOP; if ((error = ciss_start(cr)) != 0) { ciss_printf(sc, "SENDING NOP MESSAGE FAILED\n"); } } /* * If the notify event request has died for some reason, or has * not started yet, restart it. */ if (!(sc->ciss_flags & CISS_FLAG_NOTIFY_OK)) { debug(0, "(re)starting Event Notify chain"); ciss_notify_event(sc); } /* * Reschedule. */ callout_reset(&sc->ciss_periodic, CISS_HEARTBEAT_RATE * hz, ciss_periodic, sc); } static void ciss_nop_complete(struct ciss_request *cr) { struct ciss_softc *sc; static int first_time = 1; sc = cr->cr_sc; if (ciss_report_request(cr, NULL, NULL) != 0) { if (first_time == 1) { first_time = 0; ciss_printf(sc, "SENDING NOP MESSAGE FAILED (not logging anymore)\n"); } } ciss_release_request(cr); } /************************************************************************ * Disable the adapter. * * The all requests in completed queue is failed with hardware error. * This will cause failover in a multipath configuration. */ static void ciss_disable_adapter(struct ciss_softc *sc) { cr_qhead_t qh; struct ciss_request *cr; struct ciss_command *cc; struct ciss_error_info *ce; int i; CISS_TL_SIMPLE_DISABLE_INTERRUPTS(sc); pci_disable_busmaster(sc->ciss_dev); sc->ciss_flags &= ~CISS_FLAG_RUNNING; for (i = 1; i < sc->ciss_max_requests; i++) { cr = &sc->ciss_request[i]; if ((cr->cr_flags & CISS_REQ_BUSY) == 0) continue; cc = cr->cr_cc; ce = (struct ciss_error_info *)&(cc->sg[0]); ce->command_status = CISS_CMD_STATUS_HARDWARE_ERROR; ciss_enqueue_complete(cr, &qh); } for (;;) { if ((cr = ciss_dequeue_complete(sc, &qh)) == NULL) break; /* * If the request has a callback, invoke it. */ if (cr->cr_complete != NULL) { cr->cr_complete(cr); continue; } /* * If someone is sleeping on this request, wake them up. */ if (cr->cr_flags & CISS_REQ_SLEEP) { cr->cr_flags &= ~CISS_REQ_SLEEP; wakeup(cr); continue; } } } /************************************************************************ * Request a notification response from the adapter. * * If (cr) is NULL, this is the first request of the adapter, so * reset the adapter's message pointer and start with the oldest * message available. */ static void ciss_notify_event(struct ciss_softc *sc) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_notify_cdb *cnc; int error; debug_called(1); cr = sc->ciss_periodic_notify; /* get a request if we don't already have one */ if (cr == NULL) { if ((error = ciss_get_request(sc, &cr)) != 0) { debug(0, "can't get notify event request"); goto out; } sc->ciss_periodic_notify = cr; cr->cr_complete = ciss_notify_complete; debug(1, "acquired request %d", cr->cr_tag); } /* * Get a databuffer if we don't already have one, note that the * adapter command wants a larger buffer than the actual * structure. */ if (cr->cr_data == NULL) { if ((cr->cr_data = malloc(CISS_NOTIFY_DATA_SIZE, CISS_MALLOC_CLASS, M_NOWAIT)) == NULL) { debug(0, "can't get notify event request buffer"); error = ENOMEM; goto out; } cr->cr_length = CISS_NOTIFY_DATA_SIZE; } /* re-setup the request's command (since we never release it) XXX overkill*/ ciss_preen_command(cr); /* (re)build the notify event command */ cc = cr->cr_cc; cc->header.address.physical.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; cc->header.address.physical.bus = 0; cc->header.address.physical.target = 0; cc->cdb.cdb_length = sizeof(*cnc); cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_READ; cc->cdb.timeout = 0; /* no timeout, we hope */ cnc = (struct ciss_notify_cdb *)&(cc->cdb.cdb[0]); bzero(cr->cr_data, CISS_NOTIFY_DATA_SIZE); cnc->opcode = CISS_OPCODE_READ; cnc->command = CISS_COMMAND_NOTIFY_ON_EVENT; cnc->timeout = 0; /* no timeout, we hope */ cnc->synchronous = 0; cnc->ordered = 0; cnc->seek_to_oldest = 0; if ((sc->ciss_flags & CISS_FLAG_RUNNING) == 0) cnc->new_only = 1; else cnc->new_only = 0; cnc->length = htonl(CISS_NOTIFY_DATA_SIZE); /* submit the request */ error = ciss_start(cr); out: if (error) { if (cr != NULL) { if (cr->cr_data != NULL) free(cr->cr_data, CISS_MALLOC_CLASS); ciss_release_request(cr); } sc->ciss_periodic_notify = NULL; debug(0, "can't submit notify event request"); sc->ciss_flags &= ~CISS_FLAG_NOTIFY_OK; } else { debug(1, "notify event submitted"); sc->ciss_flags |= CISS_FLAG_NOTIFY_OK; } } static void ciss_notify_complete(struct ciss_request *cr) { struct ciss_command *cc; struct ciss_notify *cn; struct ciss_softc *sc; int scsi_status; int command_status; debug_called(1); cc = cr->cr_cc; cn = (struct ciss_notify *)cr->cr_data; sc = cr->cr_sc; /* * Report request results, decode status. */ ciss_report_request(cr, &command_status, &scsi_status); /* * Abort the chain on a fatal error. * * XXX which of these are actually errors? */ if ((command_status != CISS_CMD_STATUS_SUCCESS) && (command_status != CISS_CMD_STATUS_TARGET_STATUS) && (command_status != CISS_CMD_STATUS_TIMEOUT)) { /* XXX timeout? */ ciss_printf(sc, "fatal error in Notify Event request (%s)\n", ciss_name_command_status(command_status)); ciss_release_request(cr); sc->ciss_flags &= ~CISS_FLAG_NOTIFY_OK; return; } /* * If the adapter gave us a text message, print it. */ if (cn->message[0] != 0) ciss_printf(sc, "*** %.80s\n", cn->message); debug(0, "notify event class %d subclass %d detail %d", cn->class, cn->subclass, cn->detail); /* * If the response indicates that the notifier has been aborted, * release the notifier command. */ if ((cn->class == CISS_NOTIFY_NOTIFIER) && (cn->subclass == CISS_NOTIFY_NOTIFIER_STATUS) && (cn->detail == 1)) { debug(0, "notifier exiting"); sc->ciss_flags &= ~CISS_FLAG_NOTIFY_OK; ciss_release_request(cr); sc->ciss_periodic_notify = NULL; wakeup(&sc->ciss_periodic_notify); } else { /* Handle notify events in a kernel thread */ ciss_enqueue_notify(cr); sc->ciss_periodic_notify = NULL; wakeup(&sc->ciss_periodic_notify); wakeup(&sc->ciss_notify); } /* * Send a new notify event command, if we're not aborting. */ if (!(sc->ciss_flags & CISS_FLAG_ABORTING)) { ciss_notify_event(sc); } } /************************************************************************ * Abort the Notify Event chain. * * Note that we can't just abort the command in progress; we have to * explicitly issue an Abort Notify Event command in order for the * adapter to clean up correctly. * * If we are called with CISS_FLAG_ABORTING set in the adapter softc, * the chain will not restart itself. */ static int ciss_notify_abort(struct ciss_softc *sc) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_notify_cdb *cnc; int error, command_status, scsi_status; debug_called(1); cr = NULL; error = 0; /* verify that there's an outstanding command */ if (!(sc->ciss_flags & CISS_FLAG_NOTIFY_OK)) goto out; /* get a command to issue the abort with */ if ((error = ciss_get_request(sc, &cr))) goto out; /* get a buffer for the result */ if ((cr->cr_data = malloc(CISS_NOTIFY_DATA_SIZE, CISS_MALLOC_CLASS, M_NOWAIT)) == NULL) { debug(0, "can't get notify event request buffer"); error = ENOMEM; goto out; } cr->cr_length = CISS_NOTIFY_DATA_SIZE; /* build the CDB */ cc = cr->cr_cc; cc->header.address.physical.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; cc->header.address.physical.bus = 0; cc->header.address.physical.target = 0; cc->cdb.cdb_length = sizeof(*cnc); cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_READ; cc->cdb.timeout = 0; /* no timeout, we hope */ cnc = (struct ciss_notify_cdb *)&(cc->cdb.cdb[0]); bzero(cnc, sizeof(*cnc)); cnc->opcode = CISS_OPCODE_WRITE; cnc->command = CISS_COMMAND_ABORT_NOTIFY; cnc->length = htonl(CISS_NOTIFY_DATA_SIZE); ciss_print_request(cr); /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "Abort Notify Event command failed (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, &scsi_status); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: break; case CISS_CMD_STATUS_INVALID_COMMAND: /* * Some older adapters don't support the CISS version of this * command. Fall back to using the BMIC version. */ error = ciss_notify_abort_bmic(sc); if (error != 0) goto out; break; case CISS_CMD_STATUS_TARGET_STATUS: /* * This can happen if the adapter thinks there wasn't an outstanding * Notify Event command but we did. We clean up here. */ if (scsi_status == CISS_SCSI_STATUS_CHECK_CONDITION) { if (sc->ciss_periodic_notify != NULL) ciss_release_request(sc->ciss_periodic_notify); error = 0; goto out; } /* FALLTHROUGH */ default: ciss_printf(sc, "Abort Notify Event command failed (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } /* * Sleep waiting for the notifier command to complete. Note * that if it doesn't, we may end up in a bad situation, since * the adapter may deliver it later. Also note that the adapter * requires the Notify Event command to be cancelled in order to * maintain internal bookkeeping. */ while (sc->ciss_periodic_notify != NULL) { error = msleep(&sc->ciss_periodic_notify, &sc->ciss_mtx, PRIBIO, "cissNEA", hz * 5); if (error == EWOULDBLOCK) { ciss_printf(sc, "Notify Event command failed to abort, adapter may wedge.\n"); break; } } out: /* release the cancel request */ if (cr != NULL) { if (cr->cr_data != NULL) free(cr->cr_data, CISS_MALLOC_CLASS); ciss_release_request(cr); } if (error == 0) sc->ciss_flags &= ~CISS_FLAG_NOTIFY_OK; return(error); } /************************************************************************ * Abort the Notify Event chain using a BMIC command. */ static int ciss_notify_abort_bmic(struct ciss_softc *sc) { struct ciss_request *cr; int error, command_status; debug_called(1); cr = NULL; error = 0; /* verify that there's an outstanding command */ if (!(sc->ciss_flags & CISS_FLAG_NOTIFY_OK)) goto out; /* * Build a BMIC command to cancel the Notify on Event command. * * Note that we are sending a CISS opcode here. Odd. */ if ((error = ciss_get_bmic_request(sc, &cr, CISS_COMMAND_ABORT_NOTIFY, NULL, 0)) != 0) goto out; /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC Cancel Notify on Event command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: break; default: ciss_printf(sc, "error cancelling Notify on Event (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } out: if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Handle rescanning all the logical volumes when a notify event * causes the drives to come online or offline. */ static void ciss_notify_rescan_logical(struct ciss_softc *sc) { struct ciss_lun_report *cll; struct ciss_ldrive *ld; int i, j, ndrives; /* * We must rescan all logical volumes to get the right logical * drive address. */ cll = ciss_report_luns(sc, CISS_OPCODE_REPORT_LOGICAL_LUNS, sc->ciss_cfg->max_logical_supported); if (cll == NULL) return; ndrives = (ntohl(cll->list_size) / sizeof(union ciss_device_address)); /* * Delete any of the drives which were destroyed by the * firmware. */ for (i = 0; i < sc->ciss_max_logical_bus; i++) { for (j = 0; j < sc->ciss_cfg->max_logical_supported; j++) { ld = &sc->ciss_logical[i][j]; if (ld->cl_update == 0) continue; if (ld->cl_status != CISS_LD_ONLINE) { ciss_cam_rescan_target(sc, i, j); ld->cl_update = 0; if (ld->cl_ldrive) free(ld->cl_ldrive, CISS_MALLOC_CLASS); if (ld->cl_lstatus) free(ld->cl_lstatus, CISS_MALLOC_CLASS); ld->cl_ldrive = NULL; ld->cl_lstatus = NULL; } } } /* * Scan for new drives. */ for (i = 0; i < ndrives; i++) { int bus, target; bus = CISS_LUN_TO_BUS(cll->lun[i].logical.lun); target = CISS_LUN_TO_TARGET(cll->lun[i].logical.lun); ld = &sc->ciss_logical[bus][target]; if (ld->cl_update == 0) continue; ld->cl_update = 0; ld->cl_address = cll->lun[i]; ld->cl_controller = &sc->ciss_controllers[bus]; if (ciss_identify_logical(sc, ld) == 0) { ciss_cam_rescan_target(sc, bus, target); } } free(cll, CISS_MALLOC_CLASS); } /************************************************************************ * Handle a notify event relating to the status of a logical drive. * * XXX need to be able to defer some of these to properly handle * calling the "ID Physical drive" command, unless the 'extended' * drive IDs are always in BIG_MAP format. */ static void ciss_notify_logical(struct ciss_softc *sc, struct ciss_notify *cn) { struct ciss_ldrive *ld; int ostatus, bus, target; debug_called(2); bus = cn->device.physical.bus; target = cn->data.logical_status.logical_drive; ld = &sc->ciss_logical[bus][target]; switch (cn->subclass) { case CISS_NOTIFY_LOGICAL_STATUS: switch (cn->detail) { case 0: ciss_name_device(sc, bus, target); ciss_printf(sc, "logical drive %d (%s) changed status %s->%s, spare status 0x%b\n", cn->data.logical_status.logical_drive, ld->cl_name, ciss_name_ldrive_status(cn->data.logical_status.previous_state), ciss_name_ldrive_status(cn->data.logical_status.new_state), cn->data.logical_status.spare_state, "\20\1configured\2rebuilding\3failed\4in use\5available\n"); /* * Update our idea of the drive's status. */ ostatus = ciss_decode_ldrive_status(cn->data.logical_status.previous_state); ld->cl_status = ciss_decode_ldrive_status(cn->data.logical_status.new_state); if (ld->cl_lstatus != NULL) ld->cl_lstatus->status = cn->data.logical_status.new_state; /* * Have CAM rescan the drive if its status has changed. */ if (ostatus != ld->cl_status) { ld->cl_update = 1; ciss_notify_rescan_logical(sc); } break; case 1: /* logical drive has recognised new media, needs Accept Media Exchange */ ciss_name_device(sc, bus, target); ciss_printf(sc, "logical drive %d (%s) media exchanged, ready to go online\n", cn->data.logical_status.logical_drive, ld->cl_name); ciss_accept_media(sc, ld); ld->cl_update = 1; ld->cl_status = ciss_decode_ldrive_status(cn->data.logical_status.new_state); ciss_notify_rescan_logical(sc); break; case 2: case 3: ciss_printf(sc, "rebuild of logical drive %d (%s) failed due to %s error\n", cn->data.rebuild_aborted.logical_drive, ld->cl_name, (cn->detail == 2) ? "read" : "write"); break; } break; case CISS_NOTIFY_LOGICAL_ERROR: if (cn->detail == 0) { ciss_printf(sc, "FATAL I/O ERROR on logical drive %d (%s), SCSI port %d ID %d\n", cn->data.io_error.logical_drive, ld->cl_name, cn->data.io_error.failure_bus, cn->data.io_error.failure_drive); /* XXX should we take the drive down at this point, or will we be told? */ } break; case CISS_NOTIFY_LOGICAL_SURFACE: if (cn->detail == 0) ciss_printf(sc, "logical drive %d (%s) completed consistency initialisation\n", cn->data.consistency_completed.logical_drive, ld->cl_name); break; } } /************************************************************************ * Handle a notify event relating to the status of a physical drive. */ static void ciss_notify_physical(struct ciss_softc *sc, struct ciss_notify *cn) { } /************************************************************************ * Handle a notify event relating to the status of a physical drive. */ static void ciss_notify_hotplug(struct ciss_softc *sc, struct ciss_notify *cn) { struct ciss_lun_report *cll = NULL; int bus, target; switch (cn->subclass) { case CISS_NOTIFY_HOTPLUG_PHYSICAL: case CISS_NOTIFY_HOTPLUG_NONDISK: bus = CISS_BIG_MAP_BUS(sc, cn->data.drive.big_physical_drive_number); target = CISS_BIG_MAP_TARGET(sc, cn->data.drive.big_physical_drive_number); if (cn->detail == 0) { /* * Mark the device offline so that it'll start producing selection * timeouts to the upper layer. */ if ((bus >= 0) && (target >= 0)) sc->ciss_physical[bus][target].cp_online = 0; } else { /* * Rescan the physical lun list for new items */ cll = ciss_report_luns(sc, CISS_OPCODE_REPORT_PHYSICAL_LUNS, sc->ciss_cfg->max_physical_supported); if (cll == NULL) { ciss_printf(sc, "Warning, cannot get physical lun list\n"); break; } ciss_filter_physical(sc, cll); } break; default: ciss_printf(sc, "Unknown hotplug event %d\n", cn->subclass); return; } if (cll != NULL) free(cll, CISS_MALLOC_CLASS); } /************************************************************************ * Handle deferred processing of notify events. Notify events may need * sleep which is unsafe during an interrupt. */ static void ciss_notify_thread(void *arg) { struct ciss_softc *sc; struct ciss_request *cr; struct ciss_notify *cn; sc = (struct ciss_softc *)arg; mtx_lock(&sc->ciss_mtx); for (;;) { if (STAILQ_EMPTY(&sc->ciss_notify) != 0 && (sc->ciss_flags & CISS_FLAG_THREAD_SHUT) == 0) { msleep(&sc->ciss_notify, &sc->ciss_mtx, PUSER, "idle", 0); } if (sc->ciss_flags & CISS_FLAG_THREAD_SHUT) break; cr = ciss_dequeue_notify(sc); if (cr == NULL) panic("cr null"); cn = (struct ciss_notify *)cr->cr_data; switch (cn->class) { case CISS_NOTIFY_HOTPLUG: ciss_notify_hotplug(sc, cn); break; case CISS_NOTIFY_LOGICAL: ciss_notify_logical(sc, cn); break; case CISS_NOTIFY_PHYSICAL: ciss_notify_physical(sc, cn); break; } ciss_release_request(cr); } sc->ciss_notify_thread = NULL; wakeup(&sc->ciss_notify_thread); mtx_unlock(&sc->ciss_mtx); kproc_exit(0); } /************************************************************************ * Start the notification kernel thread. */ static void ciss_spawn_notify_thread(struct ciss_softc *sc) { if (kproc_create((void(*)(void *))ciss_notify_thread, sc, &sc->ciss_notify_thread, 0, 0, "ciss_notify%d", device_get_unit(sc->ciss_dev))) panic("Could not create notify thread\n"); } /************************************************************************ * Kill the notification kernel thread. */ static void ciss_kill_notify_thread(struct ciss_softc *sc) { if (sc->ciss_notify_thread == NULL) return; sc->ciss_flags |= CISS_FLAG_THREAD_SHUT; wakeup(&sc->ciss_notify); msleep(&sc->ciss_notify_thread, &sc->ciss_mtx, PUSER, "thtrm", 0); } /************************************************************************ * Print a request. */ static void ciss_print_request(struct ciss_request *cr) { struct ciss_softc *sc; struct ciss_command *cc; int i; sc = cr->cr_sc; cc = cr->cr_cc; ciss_printf(sc, "REQUEST @ %p\n", cr); ciss_printf(sc, " data %p/%d tag %d flags %b\n", cr->cr_data, cr->cr_length, cr->cr_tag, cr->cr_flags, "\20\1mapped\2sleep\3poll\4dataout\5datain\n"); ciss_printf(sc, " sg list/total %d/%d host tag 0x%x\n", cc->header.sg_in_list, cc->header.sg_total, cc->header.host_tag); switch(cc->header.address.mode.mode) { case CISS_HDR_ADDRESS_MODE_PERIPHERAL: case CISS_HDR_ADDRESS_MODE_MASK_PERIPHERAL: ciss_printf(sc, " physical bus %d target %d\n", cc->header.address.physical.bus, cc->header.address.physical.target); break; case CISS_HDR_ADDRESS_MODE_LOGICAL: ciss_printf(sc, " logical unit %d\n", cc->header.address.logical.lun); break; } ciss_printf(sc, " %s cdb length %d type %s attribute %s\n", (cc->cdb.direction == CISS_CDB_DIRECTION_NONE) ? "no-I/O" : (cc->cdb.direction == CISS_CDB_DIRECTION_READ) ? "READ" : (cc->cdb.direction == CISS_CDB_DIRECTION_WRITE) ? "WRITE" : "??", cc->cdb.cdb_length, (cc->cdb.type == CISS_CDB_TYPE_COMMAND) ? "command" : (cc->cdb.type == CISS_CDB_TYPE_MESSAGE) ? "message" : "??", (cc->cdb.attribute == CISS_CDB_ATTRIBUTE_UNTAGGED) ? "untagged" : (cc->cdb.attribute == CISS_CDB_ATTRIBUTE_SIMPLE) ? "simple" : (cc->cdb.attribute == CISS_CDB_ATTRIBUTE_HEAD_OF_QUEUE) ? "head-of-queue" : (cc->cdb.attribute == CISS_CDB_ATTRIBUTE_ORDERED) ? "ordered" : (cc->cdb.attribute == CISS_CDB_ATTRIBUTE_AUTO_CONTINGENT) ? "auto-contingent" : "??"); ciss_printf(sc, " %*D\n", cc->cdb.cdb_length, &cc->cdb.cdb[0], " "); if (cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR) { /* XXX print error info */ } else { /* since we don't use chained s/g, don't support it here */ for (i = 0; i < cc->header.sg_in_list; i++) { if ((i % 4) == 0) ciss_printf(sc, " "); printf("0x%08x/%d ", (u_int32_t)cc->sg[i].address, cc->sg[i].length); if ((((i + 1) % 4) == 0) || (i == (cc->header.sg_in_list - 1))) printf("\n"); } } } /************************************************************************ * Print information about the status of a logical drive. */ static void ciss_print_ldrive(struct ciss_softc *sc, struct ciss_ldrive *ld) { int bus, target, i; if (ld->cl_lstatus == NULL) { printf("does not exist\n"); return; } /* print drive status */ switch(ld->cl_lstatus->status) { case CISS_LSTATUS_OK: printf("online\n"); break; case CISS_LSTATUS_INTERIM_RECOVERY: printf("in interim recovery mode\n"); break; case CISS_LSTATUS_READY_RECOVERY: printf("ready to begin recovery\n"); break; case CISS_LSTATUS_RECOVERING: bus = CISS_BIG_MAP_BUS(sc, ld->cl_lstatus->drive_rebuilding); target = CISS_BIG_MAP_BUS(sc, ld->cl_lstatus->drive_rebuilding); printf("being recovered, working on physical drive %d.%d, %u blocks remaining\n", bus, target, ld->cl_lstatus->blocks_to_recover); break; case CISS_LSTATUS_EXPANDING: printf("being expanded, %u blocks remaining\n", ld->cl_lstatus->blocks_to_recover); break; case CISS_LSTATUS_QUEUED_FOR_EXPANSION: printf("queued for expansion\n"); break; case CISS_LSTATUS_FAILED: printf("queued for expansion\n"); break; case CISS_LSTATUS_WRONG_PDRIVE: printf("wrong physical drive inserted\n"); break; case CISS_LSTATUS_MISSING_PDRIVE: printf("missing a needed physical drive\n"); break; case CISS_LSTATUS_BECOMING_READY: printf("becoming ready\n"); break; } /* print failed physical drives */ for (i = 0; i < CISS_BIG_MAP_ENTRIES / 8; i++) { bus = CISS_BIG_MAP_BUS(sc, ld->cl_lstatus->drive_failure_map[i]); target = CISS_BIG_MAP_TARGET(sc, ld->cl_lstatus->drive_failure_map[i]); if (bus == -1) continue; ciss_printf(sc, "physical drive %d:%d (%x) failed\n", bus, target, ld->cl_lstatus->drive_failure_map[i]); } } #ifdef CISS_DEBUG #include "opt_ddb.h" #ifdef DDB #include /************************************************************************ * Print information about the controller/driver. */ static void ciss_print_adapter(struct ciss_softc *sc) { int i, j; ciss_printf(sc, "ADAPTER:\n"); for (i = 0; i < CISSQ_COUNT; i++) { ciss_printf(sc, "%s %d/%d\n", i == 0 ? "free" : i == 1 ? "busy" : "complete", sc->ciss_qstat[i].q_length, sc->ciss_qstat[i].q_max); } ciss_printf(sc, "max_requests %d\n", sc->ciss_max_requests); ciss_printf(sc, "flags %b\n", sc->ciss_flags, "\20\1notify_ok\2control_open\3aborting\4running\21fake_synch\22bmic_abort\n"); for (i = 0; i < sc->ciss_max_logical_bus; i++) { for (j = 0; j < sc->ciss_cfg->max_logical_supported; j++) { ciss_printf(sc, "LOGICAL DRIVE %d: ", i); ciss_print_ldrive(sc, &sc->ciss_logical[i][j]); } } /* XXX Should physical drives be printed out here? */ for (i = 1; i < sc->ciss_max_requests; i++) ciss_print_request(sc->ciss_request + i); } /* DDB hook */ DB_COMMAND(ciss_prt, db_ciss_prt) { struct ciss_softc *sc; devclass_t dc; int maxciss, i; dc = devclass_find("ciss"); if ( dc == NULL ) { printf("%s: can't find devclass!\n", __func__); return; } maxciss = devclass_get_maxunit(dc); for (i = 0; i < maxciss; i++) { sc = devclass_get_softc(dc, i); ciss_print_adapter(sc); } } #endif #endif /************************************************************************ * Return a name for a logical drive status value. */ static const char * ciss_name_ldrive_status(int status) { switch (status) { case CISS_LSTATUS_OK: return("OK"); case CISS_LSTATUS_FAILED: return("failed"); case CISS_LSTATUS_NOT_CONFIGURED: return("not configured"); case CISS_LSTATUS_INTERIM_RECOVERY: return("interim recovery"); case CISS_LSTATUS_READY_RECOVERY: return("ready for recovery"); case CISS_LSTATUS_RECOVERING: return("recovering"); case CISS_LSTATUS_WRONG_PDRIVE: return("wrong physical drive inserted"); case CISS_LSTATUS_MISSING_PDRIVE: return("missing physical drive"); case CISS_LSTATUS_EXPANDING: return("expanding"); case CISS_LSTATUS_BECOMING_READY: return("becoming ready"); case CISS_LSTATUS_QUEUED_FOR_EXPANSION: return("queued for expansion"); } return("unknown status"); } /************************************************************************ * Return an online/offline/nonexistent value for a logical drive * status value. */ static int ciss_decode_ldrive_status(int status) { switch(status) { case CISS_LSTATUS_NOT_CONFIGURED: return(CISS_LD_NONEXISTENT); case CISS_LSTATUS_OK: case CISS_LSTATUS_INTERIM_RECOVERY: case CISS_LSTATUS_READY_RECOVERY: case CISS_LSTATUS_RECOVERING: case CISS_LSTATUS_EXPANDING: case CISS_LSTATUS_QUEUED_FOR_EXPANSION: return(CISS_LD_ONLINE); case CISS_LSTATUS_FAILED: case CISS_LSTATUS_WRONG_PDRIVE: case CISS_LSTATUS_MISSING_PDRIVE: case CISS_LSTATUS_BECOMING_READY: default: return(CISS_LD_OFFLINE); } } /************************************************************************ * Return a name for a logical drive's organisation. */ static const char * ciss_name_ldrive_org(int org) { switch(org) { case CISS_LDRIVE_RAID0: return("RAID 0"); case CISS_LDRIVE_RAID1: return("RAID 1(1+0)"); case CISS_LDRIVE_RAID4: return("RAID 4"); case CISS_LDRIVE_RAID5: return("RAID 5"); case CISS_LDRIVE_RAID51: return("RAID 5+1"); case CISS_LDRIVE_RAIDADG: return("RAID ADG"); } return("unknown"); } /************************************************************************ * Return a name for a command status value. */ static const char * ciss_name_command_status(int status) { switch(status) { case CISS_CMD_STATUS_SUCCESS: return("success"); case CISS_CMD_STATUS_TARGET_STATUS: return("target status"); case CISS_CMD_STATUS_DATA_UNDERRUN: return("data underrun"); case CISS_CMD_STATUS_DATA_OVERRUN: return("data overrun"); case CISS_CMD_STATUS_INVALID_COMMAND: return("invalid command"); case CISS_CMD_STATUS_PROTOCOL_ERROR: return("protocol error"); case CISS_CMD_STATUS_HARDWARE_ERROR: return("hardware error"); case CISS_CMD_STATUS_CONNECTION_LOST: return("connection lost"); case CISS_CMD_STATUS_ABORTED: return("aborted"); case CISS_CMD_STATUS_ABORT_FAILED: return("abort failed"); case CISS_CMD_STATUS_UNSOLICITED_ABORT: return("unsolicited abort"); case CISS_CMD_STATUS_TIMEOUT: return("timeout"); case CISS_CMD_STATUS_UNABORTABLE: return("unabortable"); } return("unknown status"); } /************************************************************************ * Handle an open on the control device. */ static int ciss_open(struct cdev *dev, int flags, int fmt, struct thread *p) { struct ciss_softc *sc; debug_called(1); sc = (struct ciss_softc *)dev->si_drv1; /* we might want to veto if someone already has us open */ mtx_lock(&sc->ciss_mtx); sc->ciss_flags |= CISS_FLAG_CONTROL_OPEN; mtx_unlock(&sc->ciss_mtx); return(0); } /************************************************************************ * Handle the last close on the control device. */ static int ciss_close(struct cdev *dev, int flags, int fmt, struct thread *p) { struct ciss_softc *sc; debug_called(1); sc = (struct ciss_softc *)dev->si_drv1; mtx_lock(&sc->ciss_mtx); sc->ciss_flags &= ~CISS_FLAG_CONTROL_OPEN; mtx_unlock(&sc->ciss_mtx); return (0); } /******************************************************************************** * Handle adapter-specific control operations. * * Note that the API here is compatible with the Linux driver, in order to * simplify the porting of Compaq's userland tools. */ static int ciss_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *p) { struct ciss_softc *sc; IOCTL_Command_struct *ioc = (IOCTL_Command_struct *)addr; #ifdef __amd64__ IOCTL_Command_struct32 *ioc32 = (IOCTL_Command_struct32 *)addr; IOCTL_Command_struct ioc_swab; #endif int error; debug_called(1); sc = (struct ciss_softc *)dev->si_drv1; error = 0; mtx_lock(&sc->ciss_mtx); switch(cmd) { case CCISS_GETQSTATS: { union ciss_statrequest *cr = (union ciss_statrequest *)addr; switch (cr->cs_item) { case CISSQ_FREE: case CISSQ_NOTIFY: bcopy(&sc->ciss_qstat[cr->cs_item], &cr->cs_qstat, sizeof(struct ciss_qstat)); break; default: error = ENOIOCTL; break; } break; } case CCISS_GETPCIINFO: { cciss_pci_info_struct *pis = (cciss_pci_info_struct *)addr; pis->bus = pci_get_bus(sc->ciss_dev); pis->dev_fn = pci_get_slot(sc->ciss_dev); pis->board_id = (pci_get_subvendor(sc->ciss_dev) << 16) | pci_get_subdevice(sc->ciss_dev); break; } case CCISS_GETINTINFO: { cciss_coalint_struct *cis = (cciss_coalint_struct *)addr; cis->delay = sc->ciss_cfg->interrupt_coalesce_delay; cis->count = sc->ciss_cfg->interrupt_coalesce_count; break; } case CCISS_SETINTINFO: { cciss_coalint_struct *cis = (cciss_coalint_struct *)addr; if ((cis->delay == 0) && (cis->count == 0)) { error = EINVAL; break; } /* * XXX apparently this is only safe if the controller is idle, * we should suspend it before doing this. */ sc->ciss_cfg->interrupt_coalesce_delay = cis->delay; sc->ciss_cfg->interrupt_coalesce_count = cis->count; if (ciss_update_config(sc)) error = EIO; /* XXX resume the controller here */ break; } case CCISS_GETNODENAME: bcopy(sc->ciss_cfg->server_name, (NodeName_type *)addr, sizeof(NodeName_type)); break; case CCISS_SETNODENAME: bcopy((NodeName_type *)addr, sc->ciss_cfg->server_name, sizeof(NodeName_type)); if (ciss_update_config(sc)) error = EIO; break; case CCISS_GETHEARTBEAT: *(Heartbeat_type *)addr = sc->ciss_cfg->heartbeat; break; case CCISS_GETBUSTYPES: *(BusTypes_type *)addr = sc->ciss_cfg->bus_types; break; case CCISS_GETFIRMVER: bcopy(sc->ciss_id->running_firmware_revision, (FirmwareVer_type *)addr, sizeof(FirmwareVer_type)); break; case CCISS_GETDRIVERVER: *(DriverVer_type *)addr = CISS_DRIVER_VERSION; break; case CCISS_REVALIDVOLS: /* * This is a bit ugly; to do it "right" we really need * to find any disks that have changed, kick CAM off them, * then rescan only these disks. It'd be nice if they * a) told us which disk(s) they were going to play with, * and b) which ones had arrived. 8( */ break; #ifdef __amd64__ case CCISS_PASSTHRU32: ioc_swab.LUN_info = ioc32->LUN_info; ioc_swab.Request = ioc32->Request; ioc_swab.error_info = ioc32->error_info; ioc_swab.buf_size = ioc32->buf_size; ioc_swab.buf = (u_int8_t *)(uintptr_t)ioc32->buf; ioc = &ioc_swab; /* FALLTHROUGH */ #endif case CCISS_PASSTHRU: error = ciss_user_command(sc, ioc); break; default: debug(0, "unknown ioctl 0x%lx", cmd); debug(1, "CCISS_GETPCIINFO: 0x%lx", CCISS_GETPCIINFO); debug(1, "CCISS_GETINTINFO: 0x%lx", CCISS_GETINTINFO); debug(1, "CCISS_SETINTINFO: 0x%lx", CCISS_SETINTINFO); debug(1, "CCISS_GETNODENAME: 0x%lx", CCISS_GETNODENAME); debug(1, "CCISS_SETNODENAME: 0x%lx", CCISS_SETNODENAME); debug(1, "CCISS_GETHEARTBEAT: 0x%lx", CCISS_GETHEARTBEAT); debug(1, "CCISS_GETBUSTYPES: 0x%lx", CCISS_GETBUSTYPES); debug(1, "CCISS_GETFIRMVER: 0x%lx", CCISS_GETFIRMVER); debug(1, "CCISS_GETDRIVERVER: 0x%lx", CCISS_GETDRIVERVER); debug(1, "CCISS_REVALIDVOLS: 0x%lx", CCISS_REVALIDVOLS); debug(1, "CCISS_PASSTHRU: 0x%lx", CCISS_PASSTHRU); error = ENOIOCTL; break; } mtx_unlock(&sc->ciss_mtx); return(error); } Index: stable/11/sys/dev/dpt/dpt_scsi.c =================================================================== --- stable/11/sys/dev/dpt/dpt_scsi.c (revision 335137) +++ stable/11/sys/dev/dpt/dpt_scsi.c (revision 335138) @@ -1,2523 +1,2523 @@ /*- * Copyright (c) 1997 by Simon Shapiro * All Rights Reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * dpt_scsi.c: SCSI dependent code for the DPT driver * * credits: Assisted by Mike Neuffer in the early low level DPT code * Thanx to Mark Salyzyn of DPT for his assistance. * Special thanx to Justin Gibbs for invaluable help in * making this driver look and work like a FreeBSD component. * Last but not least, many thanx to UCB and the FreeBSD * team for creating and maintaining such a wonderful O/S. * * TODO: * Add ISA probe code. * * Add driver-level RAID-0. This will allow interoperability with * NiceTry, M$-Doze, Win-Dog, Slowlaris, etc., in recognizing RAID * arrays that span controllers (Wow!). */ #define _DPT_C_ #include "opt_dpt.h" #include "opt_eisa.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* dpt_isa.c, dpt_eisa.c, and dpt_pci.c need this in a central place */ devclass_t dpt_devclass; #define microtime_now dpt_time_now() #define dpt_inl(dpt, port) \ bus_read_4((dpt)->io_res, (dpt)->io_offset + port) #define dpt_inb(dpt, port) \ bus_read_1((dpt)->io_res, (dpt)->io_offset + port) #define dpt_outl(dpt, port, value) \ bus_write_4((dpt)->io_res, (dpt)->io_offset + port, value) #define dpt_outb(dpt, port, value) \ bus_write_1((dpt)->io_res, (dpt)->io_offset + port, value) /* * These will have to be setup by parameters passed at boot/load time. For * performance reasons, we make them constants for the time being. */ #define dpt_min_segs DPT_MAX_SEGS #define dpt_max_segs DPT_MAX_SEGS /* Definitions for our use of the SIM private CCB area */ #define ccb_dccb_ptr spriv_ptr0 #define ccb_dpt_ptr spriv_ptr1 /* ================= Private Inline Function declarations ===================*/ static __inline int dpt_just_reset(dpt_softc_t * dpt); static __inline int dpt_raid_busy(dpt_softc_t * dpt); #ifdef DEV_EISA static __inline int dpt_pio_wait (u_int32_t, u_int, u_int, u_int); #endif static __inline int dpt_wait(dpt_softc_t *dpt, u_int bits, u_int state); static __inline struct dpt_ccb* dptgetccb(struct dpt_softc *dpt); static __inline void dptfreeccb(struct dpt_softc *dpt, struct dpt_ccb *dccb); static __inline bus_addr_t dptccbvtop(struct dpt_softc *dpt, struct dpt_ccb *dccb); static __inline int dpt_send_immediate(dpt_softc_t *dpt, eata_ccb_t *cmd_block, u_int32_t cmd_busaddr, u_int retries, u_int ifc, u_int code, u_int code2); /* ==================== Private Function declarations =======================*/ static void dptmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error); static struct sg_map_node* dptallocsgmap(struct dpt_softc *dpt); static int dptallocccbs(dpt_softc_t *dpt); static int dpt_get_conf(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr, u_int size, u_int page, u_int target, int extent); static void dpt_detect_cache(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr, u_int8_t *buff); static void dpt_poll(struct cam_sim *sim); static void dpt_intr_locked(dpt_softc_t *dpt); static void dptexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error); static void dpt_action(struct cam_sim *sim, union ccb *ccb); static int dpt_send_eata_command(dpt_softc_t *dpt, eata_ccb_t *cmd, u_int32_t cmd_busaddr, u_int command, u_int retries, u_int ifc, u_int code, u_int code2); static void dptprocesserror(dpt_softc_t *dpt, dpt_ccb_t *dccb, union ccb *ccb, u_int hba_stat, u_int scsi_stat, u_int32_t resid); static void dpttimeout(void *arg); static void dptshutdown(void *arg, int howto); /* ================= Private Inline Function definitions ====================*/ static __inline int dpt_just_reset(dpt_softc_t * dpt) { if ((dpt_inb(dpt, 2) == 'D') && (dpt_inb(dpt, 3) == 'P') && (dpt_inb(dpt, 4) == 'T') && (dpt_inb(dpt, 5) == 'H')) return (1); else return (0); } static __inline int dpt_raid_busy(dpt_softc_t * dpt) { if ((dpt_inb(dpt, 0) == 'D') && (dpt_inb(dpt, 1) == 'P') && (dpt_inb(dpt, 2) == 'T')) return (1); else return (0); } #ifdef DEV_EISA static __inline int dpt_pio_wait (u_int32_t base, u_int reg, u_int bits, u_int state) { int i; u_int c; for (i = 0; i < 20000; i++) { /* wait 20ms for not busy */ c = inb(base + reg) & bits; if (!(c == state)) return (0); else DELAY(50); } return (-1); } #endif static __inline int dpt_wait(dpt_softc_t *dpt, u_int bits, u_int state) { int i; u_int c; for (i = 0; i < 20000; i++) { /* wait 20ms for not busy */ c = dpt_inb(dpt, HA_RSTATUS) & bits; if (c == state) return (0); else DELAY(50); } return (-1); } static __inline struct dpt_ccb* dptgetccb(struct dpt_softc *dpt) { struct dpt_ccb* dccb; if (!dumping) mtx_assert(&dpt->lock, MA_OWNED); if ((dccb = SLIST_FIRST(&dpt->free_dccb_list)) != NULL) { SLIST_REMOVE_HEAD(&dpt->free_dccb_list, links); dpt->free_dccbs--; } else if (dpt->total_dccbs < dpt->max_dccbs) { dptallocccbs(dpt); dccb = SLIST_FIRST(&dpt->free_dccb_list); if (dccb == NULL) device_printf(dpt->dev, "Can't malloc DCCB\n"); else { SLIST_REMOVE_HEAD(&dpt->free_dccb_list, links); dpt->free_dccbs--; } } return (dccb); } static __inline void dptfreeccb(struct dpt_softc *dpt, struct dpt_ccb *dccb) { if (!dumping) mtx_assert(&dpt->lock, MA_OWNED); if ((dccb->state & DCCB_ACTIVE) != 0) LIST_REMOVE(&dccb->ccb->ccb_h, sim_links.le); if ((dccb->state & DCCB_RELEASE_SIMQ) != 0) dccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; else if (dpt->resource_shortage != 0 && (dccb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { dccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; dpt->resource_shortage = FALSE; } dccb->state = DCCB_FREE; SLIST_INSERT_HEAD(&dpt->free_dccb_list, dccb, links); ++dpt->free_dccbs; } static __inline bus_addr_t dptccbvtop(struct dpt_softc *dpt, struct dpt_ccb *dccb) { return (dpt->dpt_ccb_busbase + (u_int32_t)((caddr_t)dccb - (caddr_t)dpt->dpt_dccbs)); } static __inline struct dpt_ccb * dptccbptov(struct dpt_softc *dpt, bus_addr_t busaddr) { return (dpt->dpt_dccbs + ((struct dpt_ccb *)busaddr - (struct dpt_ccb *)dpt->dpt_ccb_busbase)); } /* * Send a command for immediate execution by the DPT * See above function for IMPORTANT notes. */ static __inline int dpt_send_immediate(dpt_softc_t *dpt, eata_ccb_t *cmd_block, u_int32_t cmd_busaddr, u_int retries, u_int ifc, u_int code, u_int code2) { return (dpt_send_eata_command(dpt, cmd_block, cmd_busaddr, EATA_CMD_IMMEDIATE, retries, ifc, code, code2)); } /* ===================== Private Function definitions =======================*/ static void dptmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *busaddrp; busaddrp = (bus_addr_t *)arg; *busaddrp = segs->ds_addr; } static struct sg_map_node * dptallocsgmap(struct dpt_softc *dpt) { struct sg_map_node *sg_map; sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); if (sg_map == NULL) return (NULL); /* Allocate S/G space for the next batch of CCBS */ if (bus_dmamem_alloc(dpt->sg_dmat, (void **)&sg_map->sg_vaddr, BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { free(sg_map, M_DEVBUF); return (NULL); } (void)bus_dmamap_load(dpt->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, PAGE_SIZE, dptmapmem, &sg_map->sg_physaddr, /*flags*/0); SLIST_INSERT_HEAD(&dpt->sg_maps, sg_map, links); return (sg_map); } /* * Allocate another chunk of CCB's. Return count of entries added. */ static int dptallocccbs(dpt_softc_t *dpt) { struct dpt_ccb *next_ccb; struct sg_map_node *sg_map; bus_addr_t physaddr; dpt_sg_t *segs; int newcount; int i; if (!dumping) mtx_assert(&dpt->lock, MA_OWNED); next_ccb = &dpt->dpt_dccbs[dpt->total_dccbs]; if (next_ccb == dpt->dpt_dccbs) { /* * First time through. Re-use the S/G * space we allocated for initialization * CCBS. */ sg_map = SLIST_FIRST(&dpt->sg_maps); } else { sg_map = dptallocsgmap(dpt); } if (sg_map == NULL) return (0); segs = sg_map->sg_vaddr; physaddr = sg_map->sg_physaddr; newcount = (PAGE_SIZE / (dpt->sgsize * sizeof(dpt_sg_t))); for (i = 0; dpt->total_dccbs < dpt->max_dccbs && i < newcount; i++) { int error; error = bus_dmamap_create(dpt->buffer_dmat, /*flags*/0, &next_ccb->dmamap); if (error != 0) break; callout_init_mtx(&next_ccb->timer, &dpt->lock, 0); next_ccb->sg_list = segs; next_ccb->sg_busaddr = htonl(physaddr); next_ccb->eata_ccb.cp_dataDMA = htonl(physaddr); next_ccb->eata_ccb.cp_statDMA = htonl(dpt->sp_physaddr); next_ccb->eata_ccb.cp_reqDMA = htonl(dptccbvtop(dpt, next_ccb) + offsetof(struct dpt_ccb, sense_data)); next_ccb->eata_ccb.cp_busaddr = dpt->dpt_ccb_busend; next_ccb->state = DCCB_FREE; next_ccb->tag = dpt->total_dccbs; SLIST_INSERT_HEAD(&dpt->free_dccb_list, next_ccb, links); segs += dpt->sgsize; physaddr += (dpt->sgsize * sizeof(dpt_sg_t)); dpt->dpt_ccb_busend += sizeof(*next_ccb); next_ccb++; dpt->total_dccbs++; } return (i); } #ifdef DEV_EISA dpt_conf_t * dpt_pio_get_conf (u_int32_t base) { static dpt_conf_t * conf; u_int16_t * p; int i; /* * Allocate a dpt_conf_t */ if (!conf) { conf = (dpt_conf_t *)malloc(sizeof(dpt_conf_t), M_DEVBUF, M_NOWAIT | M_ZERO); } /* * If we didn't get one then we probably won't ever get one. */ if (!conf) { printf("dpt: unable to allocate dpt_conf_t\n"); return (NULL); } /* * Reset the controller. */ outb((base + HA_WCOMMAND), EATA_CMD_RESET); /* * Wait for the controller to become ready. * For some reason there can be -no- delays after calling reset * before we wait on ready status. */ if (dpt_pio_wait(base, HA_RSTATUS, HA_SBUSY, 0)) { printf("dpt: timeout waiting for controller to become ready\n"); return (NULL); } if (dpt_pio_wait(base, HA_RAUXSTAT, HA_ABUSY, 0)) { printf("dpt: timetout waiting for adapter ready.\n"); return (NULL); } /* * Send the PIO_READ_CONFIG command. */ outb((base + HA_WCOMMAND), EATA_CMD_PIO_READ_CONFIG); /* * Read the data into the struct. */ p = (u_int16_t *)conf; for (i = 0; i < (sizeof(dpt_conf_t) / 2); i++) { if (dpt_pio_wait(base, HA_RSTATUS, HA_SDRQ, 0)) { if (bootverbose) printf("dpt: timeout in data read.\n"); return (NULL); } (*p) = inw(base + HA_RDATA); p++; } if (inb(base + HA_RSTATUS) & HA_SERROR) { if (bootverbose) printf("dpt: error reading configuration data.\n"); return (NULL); } #define BE_EATA_SIGNATURE 0x45415441 #define LE_EATA_SIGNATURE 0x41544145 /* * Test to see if we have a valid card. */ if ((conf->signature == BE_EATA_SIGNATURE) || (conf->signature == LE_EATA_SIGNATURE)) { while (inb(base + HA_RSTATUS) & HA_SDRQ) { inw(base + HA_RDATA); } return (conf); } return (NULL); } #endif /* * Read a configuration page into the supplied dpt_cont_t buffer. */ static int dpt_get_conf(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr, u_int size, u_int page, u_int target, int extent) { eata_ccb_t *cp; u_int8_t status; int ndx; int result; mtx_assert(&dpt->lock, MA_OWNED); cp = &dccb->eata_ccb; bzero((void *)(uintptr_t)(volatile void *)dpt->sp, sizeof(*dpt->sp)); cp->Interpret = 1; cp->DataIn = 1; cp->Auto_Req_Sen = 1; cp->reqlen = sizeof(struct scsi_sense_data); cp->cp_id = target; cp->cp_LUN = 0; /* In the EATA packet */ cp->cp_lun = 0; /* In the SCSI command */ cp->cp_scsi_cmd = INQUIRY; cp->cp_len = size; cp->cp_extent = extent; cp->cp_page = page; cp->cp_channel = 0; /* DNC, Interpret mode is set */ cp->cp_identify = 1; cp->cp_datalen = htonl(size); /* * This could be a simple for loop, but we suspected the compiler To * have optimized it a bit too much. Wait for the controller to * become ready */ while (((status = dpt_inb(dpt, HA_RSTATUS)) != (HA_SREADY | HA_SSC) && (status != (HA_SREADY | HA_SSC | HA_SERROR)) && (status != (HA_SDRDY | HA_SERROR | HA_SDRQ))) || (dpt_wait(dpt, HA_SBUSY, 0))) { /* * RAID Drives still Spinning up? (This should only occur if * the DPT controller is in a NON PC (PCI?) platform). */ if (dpt_raid_busy(dpt)) { device_printf(dpt->dev, "WARNING: Get_conf() RSUS failed.\n"); return (0); } } DptStat_Reset_BUSY(dpt->sp); /* * XXXX We might want to do something more clever than aborting at * this point, like resetting (rebooting) the controller and trying * again. */ if ((result = dpt_send_eata_command(dpt, cp, dccb_busaddr, EATA_CMD_DMA_SEND_CP, 10000, 0, 0, 0)) != 0) { device_printf(dpt->dev, "WARNING: Get_conf() failed (%d) to send " "EATA_CMD_DMA_READ_CONFIG\n", result); return (0); } /* Wait for two seconds for a response. This can be slow */ for (ndx = 0; (ndx < 20000) && !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ); ndx++) { DELAY(50); } /* Grab the status and clear interrupts */ status = dpt_inb(dpt, HA_RSTATUS); /* * Check the status carefully. Return only if the * command was successful. */ if (((status & HA_SERROR) == 0) && (dpt->sp->hba_stat == 0) && (dpt->sp->scsi_stat == 0) && (dpt->sp->residue_len == 0)) return (0); if (dpt->sp->scsi_stat == SCSI_STATUS_CHECK_COND) return (0); return (1); } /* Detect Cache parameters and size */ static void dpt_detect_cache(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr, u_int8_t *buff) { eata_ccb_t *cp; u_int8_t *param; int bytes; int result; int ndx; u_int8_t status; mtx_assert(&dpt->lock, MA_OWNED); /* * Default setting, for best performance.. * This is what virtually all cards default to.. */ dpt->cache_type = DPT_CACHE_WRITEBACK; dpt->cache_size = 0; cp = &dccb->eata_ccb; bzero((void *)(uintptr_t)(volatile void *)dpt->sp, sizeof(dpt->sp)); bzero(buff, 512); /* Setup the command structure */ cp->Interpret = 1; cp->DataIn = 1; cp->Auto_Req_Sen = 1; cp->reqlen = sizeof(struct scsi_sense_data); cp->cp_id = 0; /* who cares? The HBA will interpret.. */ cp->cp_LUN = 0; /* In the EATA packet */ cp->cp_lun = 0; /* In the SCSI command */ cp->cp_channel = 0; cp->cp_scsi_cmd = EATA_CMD_DMA_SEND_CP; cp->cp_len = 56; cp->cp_extent = 0; cp->cp_page = 0; cp->cp_identify = 1; cp->cp_dispri = 1; /* * Build the EATA Command Packet structure * for a Log Sense Command. */ cp->cp_cdb[0] = 0x4d; cp->cp_cdb[1] = 0x0; cp->cp_cdb[2] = 0x40 | 0x33; cp->cp_cdb[7] = 1; cp->cp_datalen = htonl(512); result = dpt_send_eata_command(dpt, cp, dccb_busaddr, EATA_CMD_DMA_SEND_CP, 10000, 0, 0, 0); if (result != 0) { device_printf(dpt->dev, "WARNING: detect_cache() failed (%d) to send " "EATA_CMD_DMA_SEND_CP\n", result); return; } /* Wait for two seconds for a response. This can be slow... */ for (ndx = 0; (ndx < 20000) && !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ); ndx++) { DELAY(50); } /* Grab the status and clear interrupts */ status = dpt_inb(dpt, HA_RSTATUS); /* * Sanity check */ if (buff[0] != 0x33) { return; } bytes = DPT_HCP_LENGTH(buff); param = DPT_HCP_FIRST(buff); if (DPT_HCP_CODE(param) != 1) { /* * DPT Log Page layout error */ device_printf(dpt->dev, "NOTICE: Log Page (1) layout error\n"); return; } if (!(param[4] & 0x4)) { dpt->cache_type = DPT_NO_CACHE; return; } while (DPT_HCP_CODE(param) != 6) { param = DPT_HCP_NEXT(param); if ((param < buff) || (param >= &buff[bytes])) { return; } } if (param[4] & 0x2) { /* * Cache disabled */ dpt->cache_type = DPT_NO_CACHE; return; } if (param[4] & 0x4) { dpt->cache_type = DPT_CACHE_WRITETHROUGH; } /* XXX This isn't correct. This log parameter only has two bytes.... */ #if 0 dpt->cache_size = param[5] | (param[6] << 8) | (param[7] << 16) | (param[8] << 24); #endif } static void dpt_poll(struct cam_sim *sim) { dpt_intr_locked(cam_sim_softc(sim)); } static void dptexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct dpt_ccb *dccb; union ccb *ccb; struct dpt_softc *dpt; dccb = (struct dpt_ccb *)arg; ccb = dccb->ccb; dpt = (struct dpt_softc *)ccb->ccb_h.ccb_dpt_ptr; if (!dumping) mtx_assert(&dpt->lock, MA_OWNED); if (error != 0) { if (error != EFBIG) device_printf(dpt->dev, "Unexepected error 0x%x returned from " "bus_dmamap_load\n", error); if (ccb->ccb_h.status == CAM_REQ_INPROG) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; } dptfreeccb(dpt, dccb); xpt_done(ccb); return; } if (nseg != 0) { dpt_sg_t *sg; bus_dma_segment_t *end_seg; bus_dmasync_op_t op; end_seg = dm_segs + nseg; /* Copy the segments into our SG list */ sg = dccb->sg_list; while (dm_segs < end_seg) { sg->seg_len = htonl(dm_segs->ds_len); sg->seg_addr = htonl(dm_segs->ds_addr); sg++; dm_segs++; } if (nseg > 1) { dccb->eata_ccb.scatter = 1; dccb->eata_ccb.cp_dataDMA = dccb->sg_busaddr; dccb->eata_ccb.cp_datalen = htonl(nseg * sizeof(dpt_sg_t)); } else { dccb->eata_ccb.cp_dataDMA = dccb->sg_list[0].seg_addr; dccb->eata_ccb.cp_datalen = dccb->sg_list[0].seg_len; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_PREREAD; else op = BUS_DMASYNC_PREWRITE; bus_dmamap_sync(dpt->buffer_dmat, dccb->dmamap, op); } else { dccb->eata_ccb.cp_dataDMA = 0; dccb->eata_ccb.cp_datalen = 0; } /* * Last time we need to check if this CCB needs to * be aborted. */ if (ccb->ccb_h.status != CAM_REQ_INPROG) { if (nseg != 0) bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap); dptfreeccb(dpt, dccb); xpt_done(ccb); return; } dccb->state |= DCCB_ACTIVE; ccb->ccb_h.status |= CAM_SIM_QUEUED; LIST_INSERT_HEAD(&dpt->pending_ccb_list, &ccb->ccb_h, sim_links.le); callout_reset_sbt(&dccb->timer, SBT_1MS * ccb->ccb_h.timeout, 0, dpttimeout, dccb, 0); if (dpt_send_eata_command(dpt, &dccb->eata_ccb, dccb->eata_ccb.cp_busaddr, EATA_CMD_DMA_SEND_CP, 0, 0, 0, 0) != 0) { ccb->ccb_h.status = CAM_NO_HBA; /* HBA dead or just busy?? */ if (nseg != 0) bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap); dptfreeccb(dpt, dccb); xpt_done(ccb); } } static void dpt_action(struct cam_sim *sim, union ccb *ccb) { struct dpt_softc *dpt; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("dpt_action\n")); dpt = (struct dpt_softc *)cam_sim_softc(sim); mtx_assert(&dpt->lock, MA_OWNED); if ((dpt->state & DPT_HA_SHUTDOWN_ACTIVE) != 0) { xpt_print_path(ccb->ccb_h.path); printf("controller is shutdown. Aborting CCB.\n"); ccb->ccb_h.status = CAM_NO_HBA; xpt_done(ccb); return; } switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ { struct ccb_scsiio *csio; struct ccb_hdr *ccbh; struct dpt_ccb *dccb; struct eata_ccb *eccb; csio = &ccb->csio; ccbh = &ccb->ccb_h; /* Max CDB length is 12 bytes */ if (csio->cdb_len > 12) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } if ((dccb = dptgetccb(dpt)) == NULL) { dpt->resource_shortage = 1; xpt_freeze_simq(sim, /*count*/1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } eccb = &dccb->eata_ccb; /* Link dccb and ccb so we can find one from the other */ dccb->ccb = ccb; ccb->ccb_h.ccb_dccb_ptr = dccb; ccb->ccb_h.ccb_dpt_ptr = dpt; /* * Explicitly set all flags so that the compiler can * be smart about setting them. */ eccb->SCSI_Reset = 0; eccb->HBA_Init = 0; eccb->Auto_Req_Sen = (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) ? 0 : 1; eccb->scatter = 0; eccb->Quick = 0; eccb->Interpret = ccb->ccb_h.target_id == dpt->hostid[cam_sim_bus(sim)] ? 1 : 0; eccb->DataOut = (ccb->ccb_h.flags & CAM_DIR_OUT) ? 1 : 0; eccb->DataIn = (ccb->ccb_h.flags & CAM_DIR_IN) ? 1 : 0; eccb->reqlen = csio->sense_len; eccb->cp_id = ccb->ccb_h.target_id; eccb->cp_channel = cam_sim_bus(sim); eccb->cp_LUN = ccb->ccb_h.target_lun; eccb->cp_luntar = 0; eccb->cp_dispri = (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) ? 0 : 1; eccb->cp_identify = 1; if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0 && csio->tag_action != CAM_TAG_ACTION_NONE) { eccb->cp_msg[0] = csio->tag_action; eccb->cp_msg[1] = dccb->tag; } else { eccb->cp_msg[0] = 0; eccb->cp_msg[1] = 0; } eccb->cp_msg[2] = 0; if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { bcopy(csio->cdb_io.cdb_ptr, eccb->cp_cdb, csio->cdb_len); } else { /* I guess I could map it in... */ ccb->ccb_h.status = CAM_REQ_INVALID; dptfreeccb(dpt, dccb); xpt_done(ccb); return; } } else { bcopy(csio->cdb_io.cdb_bytes, eccb->cp_cdb, csio->cdb_len); } /* * If we have any data to send with this command, * map it into bus space. */ /* Only use S/G if there is a transfer */ if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { int error; error = bus_dmamap_load_ccb(dpt->buffer_dmat, dccb->dmamap, ccb, dptexecuteccb, dccb, /*flags*/0); if (error == EINPROGRESS) { /* * So as to maintain ordering, * freeze the controller queue * until our mapping is * returned. */ xpt_freeze_simq(sim, 1); dccb->state |= CAM_RELEASE_SIMQ; } } else { /* * XXX JGibbs. * Does it want them both on or both off? * CAM_DIR_NONE is both on, so this code can * be removed if this is also what the DPT * exptects. */ eccb->DataOut = 0; eccb->DataIn = 0; dptexecuteccb(dccb, NULL, 0, 0); } break; } case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: { ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; if (cts->type == CTS_TYPE_USER_SETTINGS) { spi->flags = CTS_SPI_FLAGS_DISC_ENB; spi->bus_width = (dpt->max_id > 7) ? MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; spi->sync_period = 25; /* 10MHz */ if (spi->sync_period != 0) spi->sync_offset = 15; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; ccb->ccb_h.status = CAM_REQ_CMP; } else { ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; } xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { /* * XXX Use Adaptec translation until I find out how to * get this information from the card. */ cam_calc_geometry(&ccb->ccg, /*extended*/1); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; if (dpt->max_id > 7) cpi->hba_inquiry |= PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = dpt->max_id; cpi->max_lun = dpt->max_lun; cpi->initiator_id = dpt->hostid[cam_sim_bus(sim)]; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "DPT", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); - cpi->transport = XPORT_SPI; - cpi->transport_version = 2; - cpi->protocol = PROTO_SCSI; - cpi->protocol_version = SCSI_REV_2; + cpi->transport = XPORT_SPI; + cpi->transport_version = 2; + cpi->protocol = PROTO_SCSI; + cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } /* * This routine will try to send an EATA command to the DPT HBA. * It will, by default, try 20,000 times, waiting 50us between tries. * It returns 0 on success and 1 on failure. */ static int dpt_send_eata_command(dpt_softc_t *dpt, eata_ccb_t *cmd_block, u_int32_t cmd_busaddr, u_int command, u_int retries, u_int ifc, u_int code, u_int code2) { u_int loop; if (!retries) retries = 20000; /* * I hate this polling nonsense. Wish there was a way to tell the DPT * to go get commands at its own pace, or to interrupt when ready. * In the mean time we will measure how many itterations it really * takes. */ for (loop = 0; loop < retries; loop++) { if ((dpt_inb(dpt, HA_RAUXSTAT) & HA_ABUSY) == 0) break; else DELAY(50); } if (loop < retries) { #ifdef DPT_MEASURE_PERFORMANCE if (loop > dpt->performance.max_eata_tries) dpt->performance.max_eata_tries = loop; if (loop < dpt->performance.min_eata_tries) dpt->performance.min_eata_tries = loop; #endif } else { #ifdef DPT_MEASURE_PERFORMANCE ++dpt->performance.command_too_busy; #endif return (1); } /* The controller is alive, advance the wedge timer */ #ifdef DPT_RESET_HBA dpt->last_contact = microtime_now; #endif if (cmd_block == NULL) cmd_busaddr = 0; #if (BYTE_ORDER == BIG_ENDIAN) else { cmd_busaddr = ((cmd_busaddr >> 24) & 0xFF) | ((cmd_busaddr >> 16) & 0xFF) | ((cmd_busaddr >> 8) & 0xFF) | (cmd_busaddr & 0xFF); } #endif /* And now the address */ dpt_outl(dpt, HA_WDMAADDR, cmd_busaddr); if (command == EATA_CMD_IMMEDIATE) { if (cmd_block == NULL) { dpt_outb(dpt, HA_WCODE2, code2); dpt_outb(dpt, HA_WCODE, code); } dpt_outb(dpt, HA_WIFC, ifc); } dpt_outb(dpt, HA_WCOMMAND, command); return (0); } /* ==================== Exported Function definitions =======================*/ void dpt_alloc(device_t dev) { dpt_softc_t *dpt = device_get_softc(dev); int i; mtx_init(&dpt->lock, "dpt", NULL, MTX_DEF); SLIST_INIT(&dpt->free_dccb_list); LIST_INIT(&dpt->pending_ccb_list); for (i = 0; i < MAX_CHANNELS; i++) dpt->resetlevel[i] = DPT_HA_OK; #ifdef DPT_MEASURE_PERFORMANCE dpt_reset_performance(dpt); #endif /* DPT_MEASURE_PERFORMANCE */ return; } void dpt_free(struct dpt_softc *dpt) { switch (dpt->init_level) { default: case 5: bus_dmamap_unload(dpt->dccb_dmat, dpt->dccb_dmamap); case 4: bus_dmamem_free(dpt->dccb_dmat, dpt->dpt_dccbs, dpt->dccb_dmamap); case 3: bus_dma_tag_destroy(dpt->dccb_dmat); case 2: bus_dma_tag_destroy(dpt->buffer_dmat); case 1: { struct sg_map_node *sg_map; while ((sg_map = SLIST_FIRST(&dpt->sg_maps)) != NULL) { SLIST_REMOVE_HEAD(&dpt->sg_maps, links); bus_dmamap_unload(dpt->sg_dmat, sg_map->sg_dmamap); bus_dmamem_free(dpt->sg_dmat, sg_map->sg_vaddr, sg_map->sg_dmamap); free(sg_map, M_DEVBUF); } bus_dma_tag_destroy(dpt->sg_dmat); } case 0: break; } mtx_destroy(&dpt->lock); } int dpt_alloc_resources (device_t dev) { dpt_softc_t * dpt; int error; dpt = device_get_softc(dev); dpt->io_res = bus_alloc_resource_any(dev, dpt->io_type, &dpt->io_rid, RF_ACTIVE); if (dpt->io_res == NULL) { device_printf(dev, "No I/O space?!\n"); error = ENOMEM; goto bad; } dpt->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &dpt->irq_rid, RF_ACTIVE); if (dpt->irq_res == NULL) { device_printf(dev, "No IRQ!\n"); error = ENOMEM; goto bad; } return (0); bad: return(error); } void dpt_release_resources (device_t dev) { struct dpt_softc * dpt; dpt = device_get_softc(dev); if (dpt->ih) bus_teardown_intr(dev, dpt->irq_res, dpt->ih); if (dpt->io_res) bus_release_resource(dev, dpt->io_type, dpt->io_rid, dpt->io_res); if (dpt->irq_res) bus_release_resource(dev, SYS_RES_IRQ, dpt->irq_rid, dpt->irq_res); if (dpt->drq_res) bus_release_resource(dev, SYS_RES_DRQ, dpt->drq_rid, dpt->drq_res); return; } static u_int8_t string_sizes[] = { sizeof(((dpt_inq_t*)NULL)->vendor), sizeof(((dpt_inq_t*)NULL)->modelNum), sizeof(((dpt_inq_t*)NULL)->firmware), sizeof(((dpt_inq_t*)NULL)->protocol), }; int dpt_init(struct dpt_softc *dpt) { dpt_conf_t conf; struct sg_map_node *sg_map; dpt_ccb_t *dccb; u_int8_t *strp; int index; int i; int retval; dpt->init_level = 0; SLIST_INIT(&dpt->sg_maps); mtx_lock(&dpt->lock); #ifdef DPT_RESET_BOARD device_printf(dpt->dev, "resetting HBA\n"); dpt_outb(dpt, HA_WCOMMAND, EATA_CMD_RESET); DELAY(750000); /* XXX Shouldn't we poll a status register or something??? */ #endif /* DMA tag for our S/G structures. We allocate in page sized chunks */ if (bus_dma_tag_create( /* parent */ dpt->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ PAGE_SIZE, /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &dpt->sg_dmat) != 0) { goto error_exit; } dpt->init_level++; /* * We allocate our DPT ccbs as a contiguous array of bus dma'able * memory. To get the allocation size, we need to know how many * ccbs the card supports. This requires a ccb. We solve this * chicken and egg problem by allocating some re-usable S/G space * up front, and treating it as our status packet, CCB, and target * memory space for these commands. */ sg_map = dptallocsgmap(dpt); if (sg_map == NULL) goto error_exit; dpt->sp = (volatile dpt_sp_t *)sg_map->sg_vaddr; dccb = (struct dpt_ccb *)(uintptr_t)(volatile void *)&dpt->sp[1]; bzero(dccb, sizeof(*dccb)); dpt->sp_physaddr = sg_map->sg_physaddr; dccb->eata_ccb.cp_dataDMA = htonl(sg_map->sg_physaddr + sizeof(dpt_sp_t) + sizeof(*dccb)); dccb->eata_ccb.cp_busaddr = ~0; dccb->eata_ccb.cp_statDMA = htonl(dpt->sp_physaddr); dccb->eata_ccb.cp_reqDMA = htonl(dpt->sp_physaddr + sizeof(*dccb) + offsetof(struct dpt_ccb, sense_data)); /* Okay. Fetch our config */ bzero(&dccb[1], sizeof(conf)); /* data area */ retval = dpt_get_conf(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t), sizeof(conf), 0xc1, 7, 1); if (retval != 0) { device_printf(dpt->dev, "Failed to get board configuration\n"); goto error_exit; } bcopy(&dccb[1], &conf, sizeof(conf)); bzero(&dccb[1], sizeof(dpt->board_data)); retval = dpt_get_conf(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t), sizeof(dpt->board_data), 0, conf.scsi_id0, 0); if (retval != 0) { device_printf(dpt->dev, "Failed to get inquiry information\n"); goto error_exit; } bcopy(&dccb[1], &dpt->board_data, sizeof(dpt->board_data)); dpt_detect_cache(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t), (u_int8_t *)&dccb[1]); switch (ntohl(conf.splen)) { case DPT_EATA_REVA: dpt->EATA_revision = 'a'; break; case DPT_EATA_REVB: dpt->EATA_revision = 'b'; break; case DPT_EATA_REVC: dpt->EATA_revision = 'c'; break; case DPT_EATA_REVZ: dpt->EATA_revision = 'z'; break; default: dpt->EATA_revision = '?'; } dpt->max_id = conf.MAX_ID; dpt->max_lun = conf.MAX_LUN; dpt->irq = conf.IRQ; dpt->dma_channel = (8 - conf.DMA_channel) & 7; dpt->channels = conf.MAX_CHAN + 1; dpt->state |= DPT_HA_OK; if (conf.SECOND) dpt->primary = FALSE; else dpt->primary = TRUE; dpt->more_support = conf.MORE_support; if (strncmp(dpt->board_data.firmware, "07G0", 4) >= 0) dpt->immediate_support = 1; else dpt->immediate_support = 0; dpt->broken_INQUIRY = FALSE; dpt->cplen = ntohl(conf.cplen); dpt->cppadlen = ntohs(conf.cppadlen); dpt->max_dccbs = ntohs(conf.queuesiz); if (dpt->max_dccbs > 256) { device_printf(dpt->dev, "Max CCBs reduced from %d to " "256 due to tag algorithm\n", dpt->max_dccbs); dpt->max_dccbs = 256; } dpt->hostid[0] = conf.scsi_id0; dpt->hostid[1] = conf.scsi_id1; dpt->hostid[2] = conf.scsi_id2; if (conf.SG_64K) dpt->sgsize = 8192; else dpt->sgsize = ntohs(conf.SGsiz); /* We can only get 64k buffers, so don't bother to waste space. */ if (dpt->sgsize < 17 || dpt->sgsize > 32) dpt->sgsize = 32; if (dpt->sgsize > dpt_max_segs) dpt->sgsize = dpt_max_segs; /* DMA tag for mapping buffers into device visible space. */ if (bus_dma_tag_create( /* parent */ dpt->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ DFLTPHYS, /* nsegments */ dpt->sgsize, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ BUS_DMA_ALLOCNOW, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &dpt->lock, &dpt->buffer_dmat) != 0) { device_printf(dpt->dev, "bus_dma_tag_create(...,dpt->buffer_dmat) failed\n"); goto error_exit; } dpt->init_level++; /* DMA tag for our ccb structures and interrupt status packet */ if (bus_dma_tag_create( /* parent */ dpt->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ (dpt->max_dccbs * sizeof(struct dpt_ccb)) + sizeof(dpt_sp_t), /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &dpt->dccb_dmat) != 0) { device_printf(dpt->dev, "bus_dma_tag_create(...,dpt->dccb_dmat) failed\n"); goto error_exit; } dpt->init_level++; /* Allocation for our ccbs and interrupt status packet */ if (bus_dmamem_alloc(dpt->dccb_dmat, (void **)&dpt->dpt_dccbs, BUS_DMA_NOWAIT, &dpt->dccb_dmamap) != 0) { device_printf(dpt->dev, "bus_dmamem_alloc(dpt->dccb_dmat,...) failed\n"); goto error_exit; } dpt->init_level++; /* And permanently map them */ bus_dmamap_load(dpt->dccb_dmat, dpt->dccb_dmamap, dpt->dpt_dccbs, (dpt->max_dccbs * sizeof(struct dpt_ccb)) + sizeof(dpt_sp_t), dptmapmem, &dpt->dpt_ccb_busbase, /*flags*/0); /* Clear them out. */ bzero(dpt->dpt_dccbs, (dpt->max_dccbs * sizeof(struct dpt_ccb)) + sizeof(dpt_sp_t)); dpt->dpt_ccb_busend = dpt->dpt_ccb_busbase; dpt->sp = (dpt_sp_t*)&dpt->dpt_dccbs[dpt->max_dccbs]; dpt->sp_physaddr = dpt->dpt_ccb_busbase + (dpt->max_dccbs * sizeof(dpt_ccb_t)); dpt->init_level++; /* Allocate our first batch of ccbs */ if (dptallocccbs(dpt) == 0) { device_printf(dpt->dev, "dptallocccbs(dpt) == 0\n"); mtx_unlock(&dpt->lock); return (2); } /* Prepare for Target Mode */ dpt->target_mode_enabled = 1; /* Nuke excess spaces from inquiry information */ strp = dpt->board_data.vendor; for (i = 0; i < sizeof(string_sizes); i++) { index = string_sizes[i] - 1; while (index && (strp[index] == ' ')) strp[index--] = '\0'; strp += string_sizes[i]; } device_printf(dpt->dev, "%.8s %.16s FW Rev. %.4s, ", dpt->board_data.vendor, dpt->board_data.modelNum, dpt->board_data.firmware); printf("%d channel%s, ", dpt->channels, dpt->channels > 1 ? "s" : ""); if (dpt->cache_type != DPT_NO_CACHE && dpt->cache_size != 0) { printf("%s Cache, ", dpt->cache_type == DPT_CACHE_WRITETHROUGH ? "Write-Through" : "Write-Back"); } printf("%d CCBs\n", dpt->max_dccbs); mtx_unlock(&dpt->lock); return (0); error_exit: mtx_unlock(&dpt->lock); return (1); } int dpt_attach(dpt_softc_t *dpt) { struct cam_devq *devq; int i; /* * Create the device queue for our SIM. */ devq = cam_simq_alloc(dpt->max_dccbs); if (devq == NULL) return (0); mtx_lock(&dpt->lock); for (i = 0; i < dpt->channels; i++) { /* * Construct our SIM entry */ dpt->sims[i] = cam_sim_alloc(dpt_action, dpt_poll, "dpt", dpt, device_get_unit(dpt->dev), &dpt->lock, /*untagged*/2, /*tagged*/dpt->max_dccbs, devq); if (dpt->sims[i] == NULL) { if (i == 0) cam_simq_free(devq); else printf( "%s(): Unable to attach bus %d " "due to resource shortage\n", __func__, i); break; } if (xpt_bus_register(dpt->sims[i], dpt->dev, i) != CAM_SUCCESS){ cam_sim_free(dpt->sims[i], /*free_devq*/i == 0); dpt->sims[i] = NULL; break; } if (xpt_create_path(&dpt->paths[i], /*periph*/NULL, cam_sim_path(dpt->sims[i]), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(dpt->sims[i])); cam_sim_free(dpt->sims[i], /*free_devq*/i == 0); dpt->sims[i] = NULL; break; } } mtx_unlock(&dpt->lock); if (i > 0) EVENTHANDLER_REGISTER(shutdown_final, dptshutdown, dpt, SHUTDOWN_PRI_DEFAULT); return (i); } int dpt_detach (device_t dev) { struct dpt_softc * dpt; int i; dpt = device_get_softc(dev); mtx_lock(&dpt->lock); for (i = 0; i < dpt->channels; i++) { #if 0 xpt_async(AC_LOST_DEVICE, dpt->paths[i], NULL); #endif xpt_free_path(dpt->paths[i]); xpt_bus_deregister(cam_sim_path(dpt->sims[i])); cam_sim_free(dpt->sims[i], /*free_devq*/TRUE); } mtx_unlock(&dpt->lock); dptshutdown((void *)dpt, SHUTDOWN_PRI_DEFAULT); dpt_release_resources(dev); dpt_free(dpt); return (0); } /* * This is the interrupt handler for the DPT driver. */ void dpt_intr(void *arg) { dpt_softc_t *dpt; dpt = arg; mtx_lock(&dpt->lock); dpt_intr_locked(dpt); mtx_unlock(&dpt->lock); } void dpt_intr_locked(dpt_softc_t *dpt) { dpt_ccb_t *dccb; union ccb *ccb; u_int status; u_int aux_status; u_int hba_stat; u_int scsi_stat; u_int32_t residue_len; /* Number of bytes not transferred */ /* First order of business is to check if this interrupt is for us */ while (((aux_status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ) != 0) { /* * What we want to do now, is to capture the status, all of it, * move it where it belongs, wake up whoever sleeps waiting to * process this result, and get out of here. */ if (dpt->sp->ccb_busaddr < dpt->dpt_ccb_busbase || dpt->sp->ccb_busaddr >= dpt->dpt_ccb_busend) { device_printf(dpt->dev, "Encountered bogus status packet\n"); status = dpt_inb(dpt, HA_RSTATUS); return; } dccb = dptccbptov(dpt, dpt->sp->ccb_busaddr); dpt->sp->ccb_busaddr = ~0; /* Ignore status packets with EOC not set */ if (dpt->sp->EOC == 0) { device_printf(dpt->dev, "ERROR: Request %d received with " "clear EOC.\n Marking as LOST.\n", dccb->transaction_id); /* This CLEARS the interrupt! */ status = dpt_inb(dpt, HA_RSTATUS); continue; } dpt->sp->EOC = 0; /* * Double buffer the status information so the hardware can * work on updating the status packet while we decifer the * one we were just interrupted for. * According to Mark Salyzyn, we only need few pieces of it. */ hba_stat = dpt->sp->hba_stat; scsi_stat = dpt->sp->scsi_stat; residue_len = dpt->sp->residue_len; /* Clear interrupts, check for error */ if ((status = dpt_inb(dpt, HA_RSTATUS)) & HA_SERROR) { /* * Error Condition. Check for magic cookie. Exit * this test on earliest sign of non-reset condition */ /* Check that this is not a board reset interrupt */ if (dpt_just_reset(dpt)) { device_printf(dpt->dev, "HBA rebooted.\n" " All transactions should be " "resubmitted\n"); device_printf(dpt->dev, ">>---->> This is incomplete, " "fix me.... <<----<<"); panic("DPT Rebooted"); } } /* Process CCB */ ccb = dccb->ccb; callout_stop(&dccb->timer); if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(dpt->buffer_dmat, dccb->dmamap, op); bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap); } /* Common Case inline... */ if (hba_stat == HA_NO_ERROR) { ccb->csio.scsi_status = scsi_stat; ccb->ccb_h.status = 0; switch (scsi_stat) { case SCSI_STATUS_OK: ccb->ccb_h.status |= CAM_REQ_CMP; break; case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_CMD_TERMINATED: bcopy(&dccb->sense_data, &ccb->csio.sense_data, ccb->csio.sense_len); ccb->ccb_h.status |= CAM_AUTOSNS_VALID; /* FALLTHROUGH */ default: ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; /* XXX Freeze DevQ */ break; } ccb->csio.resid = residue_len; dptfreeccb(dpt, dccb); xpt_done(ccb); } else { dptprocesserror(dpt, dccb, ccb, hba_stat, scsi_stat, residue_len); } } } static void dptprocesserror(dpt_softc_t *dpt, dpt_ccb_t *dccb, union ccb *ccb, u_int hba_stat, u_int scsi_stat, u_int32_t resid) { ccb->csio.resid = resid; switch (hba_stat) { case HA_ERR_SEL_TO: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case HA_ERR_CMD_TO: ccb->ccb_h.status = CAM_CMD_TIMEOUT; break; case HA_SCSIBUS_RESET: case HA_HBA_POWER_UP: /* Similar effect to a bus reset??? */ ccb->ccb_h.status = CAM_SCSI_BUS_RESET; break; case HA_CP_ABORTED: case HA_CP_RESET: /* XXX ??? */ case HA_CP_ABORT_NA: /* XXX ??? */ case HA_CP_RESET_NA: /* XXX ??? */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) ccb->ccb_h.status = CAM_REQ_ABORTED; break; case HA_PCI_PARITY: case HA_PCI_MABORT: case HA_PCI_TABORT: case HA_PCI_STABORT: case HA_BUS_PARITY: case HA_PARITY_ERR: case HA_ECC_ERR: ccb->ccb_h.status = CAM_UNCOR_PARITY; break; case HA_UNX_MSGRJCT: ccb->ccb_h.status = CAM_MSG_REJECT_REC; break; case HA_UNX_BUSPHASE: ccb->ccb_h.status = CAM_SEQUENCE_FAIL; break; case HA_UNX_BUS_FREE: ccb->ccb_h.status = CAM_UNEXP_BUSFREE; break; case HA_SCSI_HUNG: case HA_RESET_STUCK: /* * Dead??? Can the controller get unstuck * from these conditions */ ccb->ccb_h.status = CAM_NO_HBA; break; case HA_RSENSE_FAIL: ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; break; default: device_printf(dpt->dev, "Undocumented Error %x\n", hba_stat); printf("Please mail this message to shimon@simon-shapiro.org\n"); ccb->ccb_h.status = CAM_REQ_CMP_ERR; break; } dptfreeccb(dpt, dccb); xpt_done(ccb); } static void dpttimeout(void *arg) { struct dpt_ccb *dccb; union ccb *ccb; struct dpt_softc *dpt; dccb = (struct dpt_ccb *)arg; ccb = dccb->ccb; dpt = (struct dpt_softc *)ccb->ccb_h.ccb_dpt_ptr; mtx_assert(&dpt->lock, MA_OWNED); xpt_print_path(ccb->ccb_h.path); printf("CCB %p - timed out\n", (void *)dccb); /* * Try to clear any pending jobs. FreeBSD will lose interrupts, * leaving the controller suspended, and commands timed-out. * By calling the interrupt handler, any command thus stuck will be * completed. */ dpt_intr_locked(dpt); if ((dccb->state & DCCB_ACTIVE) == 0) { xpt_print_path(ccb->ccb_h.path); printf("CCB %p - timed out CCB already completed\n", (void *)dccb); return; } /* Abort this particular command. Leave all others running */ dpt_send_immediate(dpt, &dccb->eata_ccb, dccb->eata_ccb.cp_busaddr, /*retries*/20000, EATA_SPECIFIC_ABORT, 0, 0); ccb->ccb_h.status = CAM_CMD_TIMEOUT; } /* * Shutdown the controller and ensure that the cache is completely flushed. * Called from the shutdown_final event after all disk access has completed. */ static void dptshutdown(void *arg, int howto) { dpt_softc_t *dpt; dpt = (dpt_softc_t *)arg; device_printf(dpt->dev, "Shutting down (mode %x) HBA. Please wait...\n", howto); /* * What we do for a shutdown, is give the DPT early power loss warning */ mtx_lock(&dpt->lock); dpt_send_immediate(dpt, NULL, 0, EATA_POWER_OFF_WARN, 0, 0, 0); mtx_unlock(&dpt->lock); DELAY(1000 * 1000 * 5); device_printf(dpt->dev, "Controller was warned of shutdown and is now " "disabled\n"); } /*============================================================================*/ #if 0 #ifdef DPT_RESET_HBA /* ** Function name : dpt_reset_hba ** ** Description : Reset the HBA and properly discard all pending work ** Input : Softc ** Output : Nothing */ static void dpt_reset_hba(dpt_softc_t *dpt) { eata_ccb_t *ccb; dpt_ccb_t dccb, *dccbp; int result; struct scsi_xfer *xs; mtx_assert(&dpt->lock, MA_OWNED); /* Prepare a control block. The SCSI command part is immaterial */ dccb.xs = NULL; dccb.flags = 0; dccb.state = DPT_CCB_STATE_NEW; dccb.std_callback = NULL; dccb.wrbuff_callback = NULL; ccb = &dccb.eata_ccb; ccb->CP_OpCode = EATA_CMD_RESET; ccb->SCSI_Reset = 0; ccb->HBA_Init = 1; ccb->Auto_Req_Sen = 1; ccb->cp_id = 0; /* Should be ignored */ ccb->DataIn = 1; ccb->DataOut = 0; ccb->Interpret = 1; ccb->reqlen = htonl(sizeof(struct scsi_sense_data)); ccb->cp_statDMA = htonl(vtophys(&ccb->cp_statDMA)); ccb->cp_reqDMA = htonl(vtophys(&ccb->cp_reqDMA)); ccb->cp_viraddr = (u_int32_t) & ccb; ccb->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO; ccb->cp_scsi_cmd = 0; /* Should be ignored */ /* Lock up the submitted queue. We are very persistent here */ while (dpt->queue_status & DPT_SUBMITTED_QUEUE_ACTIVE) { DELAY(100); } dpt->queue_status |= DPT_SUBMITTED_QUEUE_ACTIVE; /* Send the RESET message */ if ((result = dpt_send_eata_command(dpt, &dccb.eata_ccb, EATA_CMD_RESET, 0, 0, 0, 0)) != 0) { device_printf(dpt->dev, "Failed to send the RESET message.\n" " Trying cold boot (ouch!)\n"); if ((result = dpt_send_eata_command(dpt, &dccb.eata_ccb, EATA_COLD_BOOT, 0, 0, 0, 0)) != 0) { panic("%s: Faild to cold boot the HBA\n", device_get_nameunit(dpt->dev)); } #ifdef DPT_MEASURE_PERFORMANCE dpt->performance.cold_boots++; #endif /* DPT_MEASURE_PERFORMANCE */ } #ifdef DPT_MEASURE_PERFORMANCE dpt->performance.warm_starts++; #endif /* DPT_MEASURE_PERFORMANCE */ device_printf(dpt->dev, "Aborting pending requests. O/S should re-submit\n"); while ((dccbp = TAILQ_FIRST(&dpt->completed_ccbs)) != NULL) { struct scsi_xfer *xs = dccbp->xs; /* Not all transactions have xs structs */ if (xs != NULL) { /* Tell the kernel proper this did not complete well */ xs->error |= XS_SELTIMEOUT; xs->flags |= SCSI_ITSDONE; scsi_done(xs); } dpt_Qremove_submitted(dpt, dccbp); /* Remember, Callbacks are NOT in the standard queue */ if (dccbp->std_callback != NULL) { (dccbp->std_callback)(dpt, dccbp->eata_ccb.cp_channel, dccbp); } else { dpt_Qpush_free(dpt, dccbp); } } device_printf(dpt->dev, "reset done aborting all pending commands\n"); dpt->queue_status &= ~DPT_SUBMITTED_QUEUE_ACTIVE; } #endif /* DPT_RESET_HBA */ /* * Build a Command Block for target mode READ/WRITE BUFFER, * with the ``sync'' bit ON. * * Although the length and offset are 24 bit fields in the command, they cannot * exceed 8192 bytes, so we take them as short integers andcheck their range. * If they are sensless, we round them to zero offset, maximum length and * complain. */ static void dpt_target_ccb(dpt_softc_t * dpt, int bus, u_int8_t target, u_int8_t lun, dpt_ccb_t * ccb, int mode, u_int8_t command, u_int16_t length, u_int16_t offset) { eata_ccb_t *cp; mtx_assert(&dpt->lock, MA_OWNED); if ((length + offset) > DPT_MAX_TARGET_MODE_BUFFER_SIZE) { device_printf(dpt->dev, "Length of %d, and offset of %d are wrong\n", length, offset); length = DPT_MAX_TARGET_MODE_BUFFER_SIZE; offset = 0; } ccb->xs = NULL; ccb->flags = 0; ccb->state = DPT_CCB_STATE_NEW; ccb->std_callback = (ccb_callback) dpt_target_done; ccb->wrbuff_callback = NULL; cp = &ccb->eata_ccb; cp->CP_OpCode = EATA_CMD_DMA_SEND_CP; cp->SCSI_Reset = 0; cp->HBA_Init = 0; cp->Auto_Req_Sen = 1; cp->cp_id = target; cp->DataIn = 1; cp->DataOut = 0; cp->Interpret = 0; cp->reqlen = htonl(sizeof(struct scsi_sense_data)); cp->cp_statDMA = htonl(vtophys(&cp->cp_statDMA)); cp->cp_reqDMA = htonl(vtophys(&cp->cp_reqDMA)); cp->cp_viraddr = (u_int32_t) & ccb; cp->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO; cp->cp_scsi_cmd = command; cp->cp_cdb[1] = (u_int8_t) (mode & SCSI_TM_MODE_MASK); cp->cp_lun = lun; /* Order is important here! */ cp->cp_cdb[2] = 0x00; /* Buffer Id, only 1 :-( */ cp->cp_cdb[3] = (length >> 16) & 0xFF; /* Buffer offset MSB */ cp->cp_cdb[4] = (length >> 8) & 0xFF; cp->cp_cdb[5] = length & 0xFF; cp->cp_cdb[6] = (length >> 16) & 0xFF; /* Length MSB */ cp->cp_cdb[7] = (length >> 8) & 0xFF; cp->cp_cdb[8] = length & 0xFF; /* Length LSB */ cp->cp_cdb[9] = 0; /* No sync, no match bits */ /* * This could be optimized to live in dpt_register_buffer. * We keep it here, just in case the kernel decides to reallocate pages */ if (dpt_scatter_gather(dpt, ccb, DPT_RW_BUFFER_SIZE, dpt->rw_buffer[bus][target][lun])) { device_printf(dpt->dev, "Failed to setup Scatter/Gather for " "Target-Mode buffer\n"); } } /* Setup a target mode READ command */ static void dpt_set_target(int redo, dpt_softc_t * dpt, u_int8_t bus, u_int8_t target, u_int8_t lun, int mode, u_int16_t length, u_int16_t offset, dpt_ccb_t * ccb) { mtx_assert(&dpt->lock, MA_OWNED); if (dpt->target_mode_enabled) { if (!redo) dpt_target_ccb(dpt, bus, target, lun, ccb, mode, SCSI_TM_READ_BUFFER, length, offset); ccb->transaction_id = ++dpt->commands_processed; #ifdef DPT_MEASURE_PERFORMANCE dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]++; ccb->command_started = microtime_now; #endif dpt_Qadd_waiting(dpt, ccb); dpt_sched_queue(dpt); } else { device_printf(dpt->dev, "Target Mode Request, but Target Mode is OFF\n"); } } /* * Schedule a buffer to be sent to another target. * The work will be scheduled and the callback provided will be called when * the work is actually done. * * Please NOTE: ``Anyone'' can send a buffer, but only registered clients * get notified of receipt of buffers. */ int dpt_send_buffer(int unit, u_int8_t channel, u_int8_t target, u_int8_t lun, u_int8_t mode, u_int16_t length, u_int16_t offset, void *data, buff_wr_done callback) { dpt_softc_t *dpt; dpt_ccb_t *ccb = NULL; /* This is an external call. Be a bit paranoid */ dpt = devclass_get_device(dpt_devclass, unit); if (dpt == NULL) return (INVALID_UNIT); mtx_lock(&dpt->lock); if (dpt->target_mode_enabled) { if ((channel >= dpt->channels) || (target > dpt->max_id) || (lun > dpt->max_lun)) { mtx_unlock(&dpt->lock); return (INVALID_SENDER); } if ((dpt->rw_buffer[channel][target][lun] == NULL) || (dpt->buffer_receiver[channel][target][lun] == NULL)) { mtx_unlock(&dpt->lock); return (NOT_REGISTERED); } /* Process the free list */ if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) { device_printf(dpt->dev, "ERROR: Cannot allocate any more free CCB's.\n" " Please try later\n"); mtx_unlock(&dpt->lock); return (NO_RESOURCES); } /* Now grab the newest CCB */ if ((ccb = dpt_Qpop_free(dpt)) == NULL) { mtx_unlock(&dpt->lock); panic("%s: Got a NULL CCB from pop_free()\n", device_get_nameunit(dpt->dev)); } bcopy(dpt->rw_buffer[channel][target][lun] + offset, data, length); dpt_target_ccb(dpt, channel, target, lun, ccb, mode, SCSI_TM_WRITE_BUFFER, length, offset); ccb->std_callback = (ccb_callback) callback; /* Potential trouble */ ccb->transaction_id = ++dpt->commands_processed; #ifdef DPT_MEASURE_PERFORMANCE dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]++; ccb->command_started = microtime_now; #endif dpt_Qadd_waiting(dpt, ccb); dpt_sched_queue(dpt); mtx_unlock(&dpt->lock); return (0); } mtx_unlock(&dpt->lock); return (DRIVER_DOWN); } static void dpt_target_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb) { eata_ccb_t *cp; cp = &ccb->eata_ccb; /* * Remove the CCB from the waiting queue. * We do NOT put it back on the free, etc., queues as it is a special * ccb, owned by the dpt_softc of this unit. */ dpt_Qremove_completed(dpt, ccb); #define br_channel (ccb->eata_ccb.cp_channel) #define br_target (ccb->eata_ccb.cp_id) #define br_lun (ccb->eata_ccb.cp_LUN) #define br_index [br_channel][br_target][br_lun] #define read_buffer_callback (dpt->buffer_receiver br_index ) #define read_buffer (dpt->rw_buffer[br_channel][br_target][br_lun]) #define cb(offset) (ccb->eata_ccb.cp_cdb[offset]) #define br_offset ((cb(3) << 16) | (cb(4) << 8) | cb(5)) #define br_length ((cb(6) << 16) | (cb(7) << 8) | cb(8)) /* Different reasons for being here, you know... */ switch (ccb->eata_ccb.cp_scsi_cmd) { case SCSI_TM_READ_BUFFER: if (read_buffer_callback != NULL) { /* This is a buffer generated by a kernel process */ read_buffer_callback(device_get_unit(dpt->dev), br_channel, br_target, br_lun, read_buffer, br_offset, br_length); } else { /* * This is a buffer waited for by a user (sleeping) * command */ wakeup(ccb); } /* We ALWAYS re-issue the same command; args are don't-care */ dpt_set_target(1, 0, 0, 0, 0, 0, 0, 0, 0); break; case SCSI_TM_WRITE_BUFFER: (ccb->wrbuff_callback) (device_get_unit(dpt->dev), br_channel, br_target, br_offset, br_length, br_lun, ccb->status_packet.hba_stat); break; default: device_printf(dpt->dev, "%s is an unsupported command for target mode\n", scsi_cmd_name(ccb->eata_ccb.cp_scsi_cmd)); } dpt->target_ccb[br_channel][br_target][br_lun] = NULL; dpt_Qpush_free(dpt, ccb); } /* * Use this function to register a client for a buffer read target operation. * The function you register will be called every time a buffer is received * by the target mode code. */ dpt_rb_t dpt_register_buffer(int unit, u_int8_t channel, u_int8_t target, u_int8_t lun, u_int8_t mode, u_int16_t length, u_int16_t offset, dpt_rec_buff callback, dpt_rb_op_t op) { dpt_softc_t *dpt; dpt_ccb_t *ccb = NULL; int ospl; dpt = devclass_get_device(dpt_devclass, unit); if (dpt == NULL) return (INVALID_UNIT); mtx_lock(&dpt->lock); if (dpt->state & DPT_HA_SHUTDOWN_ACTIVE) { mtx_unlock(&dpt->lock); return (DRIVER_DOWN); } if ((channel > (dpt->channels - 1)) || (target > (dpt->max_id - 1)) || (lun > (dpt->max_lun - 1))) { mtx_unlock(&dpt->lock); return (INVALID_SENDER); } if (dpt->buffer_receiver[channel][target][lun] == NULL) { if (op == REGISTER_BUFFER) { /* Assign the requested callback */ dpt->buffer_receiver[channel][target][lun] = callback; /* Get a CCB */ /* Process the free list */ if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) { device_printf(dpt->dev, "ERROR: Cannot allocate any more free CCB's.\n" " Please try later\n"); mtx_unlock(&dpt->lock); return (NO_RESOURCES); } /* Now grab the newest CCB */ if ((ccb = dpt_Qpop_free(dpt)) == NULL) { mtx_unlock(&dpt->lock); panic("%s: Got a NULL CCB from pop_free()\n", device_get_nameunit(dpt->dev)); } /* Clean up the leftover of the previous tenant */ ccb->status = DPT_CCB_STATE_NEW; dpt->target_ccb[channel][target][lun] = ccb; dpt->rw_buffer[channel][target][lun] = malloc(DPT_RW_BUFFER_SIZE, M_DEVBUF, M_NOWAIT); if (dpt->rw_buffer[channel][target][lun] == NULL) { device_printf(dpt->dev, "Failed to allocate " "Target-Mode buffer\n"); dpt_Qpush_free(dpt, ccb); mtx_unlock(&dpt->lock); return (NO_RESOURCES); } dpt_set_target(0, dpt, channel, target, lun, mode, length, offset, ccb); mtx_unlock(&dpt->lock); return (SUCCESSFULLY_REGISTERED); } else { mtx_unlock(&dpt->lock); return (NOT_REGISTERED); } } else { if (op == REGISTER_BUFFER) { if (dpt->buffer_receiver[channel][target][lun] == callback) { mtx_unlock(&dpt->lock); return (ALREADY_REGISTERED); } else { mtx_unlock(&dpt->lock); return (REGISTERED_TO_ANOTHER); } } else { if (dpt->buffer_receiver[channel][target][lun] == callback) { dpt->buffer_receiver[channel][target][lun] = NULL; dpt_Qpush_free(dpt, ccb); free(dpt->rw_buffer[channel][target][lun], M_DEVBUF); mtx_unlock(&dpt->lock); return (SUCCESSFULLY_REGISTERED); } else { mtx_unlock(&dpt->lock); return (INVALID_CALLBACK); } } } mtx_unlock(&dpt->lock); } /* Return the state of the blinking DPT LED's */ u_int8_t dpt_blinking_led(dpt_softc_t * dpt) { int ndx; u_int32_t state; u_int32_t previous; u_int8_t result; mtx_assert(&dpt->lock, MA_OWNED); result = 0; for (ndx = 0, state = 0, previous = 0; (ndx < 10) && (state != previous); ndx++) { previous = state; state = dpt_inl(dpt, 1); } if ((state == previous) && (state == DPT_BLINK_INDICATOR)) result = dpt_inb(dpt, 5); return (result); } /* * Execute a command which did not come from the kernel's SCSI layer. * The only way to map user commands to bus and target is to comply with the * standard DPT wire-down scheme: */ int dpt_user_cmd(dpt_softc_t * dpt, eata_pt_t * user_cmd, caddr_t cmdarg, int minor_no) { dpt_ccb_t *ccb; void *data; int channel, target, lun; int huh; int result; int submitted; mtx_assert(&dpt->lock, MA_OWNED); data = NULL; channel = minor2hba(minor_no); target = minor2target(minor_no); lun = minor2lun(minor_no); if ((channel > (dpt->channels - 1)) || (target > dpt->max_id) || (lun > dpt->max_lun)) return (ENXIO); if (target == dpt->sc_scsi_link[channel].adapter_targ) { /* This one is for the controller itself */ if ((user_cmd->eataID[0] != 'E') || (user_cmd->eataID[1] != 'A') || (user_cmd->eataID[2] != 'T') || (user_cmd->eataID[3] != 'A')) { return (ENXIO); } } /* Get a DPT CCB, so we can prepare a command */ /* Process the free list */ if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) { device_printf(dpt->dev, "ERROR: Cannot allocate any more free CCB's.\n" " Please try later\n"); return (EFAULT); } /* Now grab the newest CCB */ if ((ccb = dpt_Qpop_free(dpt)) == NULL) { panic("%s: Got a NULL CCB from pop_free()\n", device_get_nameunit(dpt->dev)); } else { /* Clean up the leftover of the previous tenant */ ccb->status = DPT_CCB_STATE_NEW; } bcopy((caddr_t) & user_cmd->command_packet, (caddr_t) & ccb->eata_ccb, sizeof(eata_ccb_t)); /* We do not want to do user specified scatter/gather. Why?? */ if (ccb->eata_ccb.scatter == 1) return (EINVAL); ccb->eata_ccb.Auto_Req_Sen = 1; ccb->eata_ccb.reqlen = htonl(sizeof(struct scsi_sense_data)); ccb->eata_ccb.cp_datalen = htonl(sizeof(ccb->eata_ccb.cp_datalen)); ccb->eata_ccb.cp_dataDMA = htonl(vtophys(ccb->eata_ccb.cp_dataDMA)); ccb->eata_ccb.cp_statDMA = htonl(vtophys(&ccb->eata_ccb.cp_statDMA)); ccb->eata_ccb.cp_reqDMA = htonl(vtophys(&ccb->eata_ccb.cp_reqDMA)); ccb->eata_ccb.cp_viraddr = (u_int32_t) & ccb; if (ccb->eata_ccb.DataIn || ccb->eata_ccb.DataOut) { /* Data I/O is involved in this command. Alocate buffer */ if (ccb->eata_ccb.cp_datalen > PAGE_SIZE) { data = contigmalloc(ccb->eata_ccb.cp_datalen, M_TEMP, M_WAITOK, 0, ~0, ccb->eata_ccb.cp_datalen, 0x10000); } else { data = malloc(ccb->eata_ccb.cp_datalen, M_TEMP, M_WAITOK); } if (data == NULL) { device_printf(dpt->dev, "Cannot allocate %d bytes " "for EATA command\n", ccb->eata_ccb.cp_datalen); return (EFAULT); } #define usr_cmd_DMA (caddr_t)user_cmd->command_packet.cp_dataDMA if (ccb->eata_ccb.DataIn == 1) { if (copyin(usr_cmd_DMA, data, ccb->eata_ccb.cp_datalen) == -1) return (EFAULT); } } else { /* No data I/O involved here. Make sure the DPT knows that */ ccb->eata_ccb.cp_datalen = 0; data = NULL; } if (ccb->eata_ccb.FWNEST == 1) ccb->eata_ccb.FWNEST = 0; if (ccb->eata_ccb.cp_datalen != 0) { if (dpt_scatter_gather(dpt, ccb, ccb->eata_ccb.cp_datalen, data) != 0) { if (data != NULL) free(data, M_TEMP); return (EFAULT); } } /** * We are required to quiet a SCSI bus. * since we do not queue comands on a bus basis, * we wait for ALL commands on a controller to complete. * In the mean time, sched_queue() will not schedule new commands. */ if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD) && (ccb->eata_ccb.cp_cdb[2] == BUS_QUIET)) { /* We wait for ALL traffic for this HBa to subside */ dpt->state |= DPT_HA_QUIET; while ((submitted = dpt->submitted_ccbs_count) != 0) { huh = mtx_sleep((void *) dpt, &dpt->lock, PCATCH | PRIBIO, "dptqt", 100 * hz); switch (huh) { case 0: /* Wakeup call received */ break; case EWOULDBLOCK: /* Timer Expired */ break; default: /* anything else */ break; } } } /* Resume normal operation */ if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD) && (ccb->eata_ccb.cp_cdb[2] == BUS_UNQUIET)) { dpt->state &= ~DPT_HA_QUIET; } /** * Schedule the command and submit it. * We bypass dpt_sched_queue, as it will block on DPT_HA_QUIET */ ccb->xs = NULL; ccb->flags = 0; ccb->eata_ccb.Auto_Req_Sen = 1; /* We always want this feature */ ccb->transaction_id = ++dpt->commands_processed; ccb->std_callback = (ccb_callback) dpt_user_cmd_done; ccb->result = (u_int32_t) & cmdarg; ccb->data = data; #ifdef DPT_MEASURE_PERFORMANCE ++dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]; ccb->command_started = microtime_now; #endif dpt_Qadd_waiting(dpt, ccb); dpt_sched_queue(dpt); /* Wait for the command to complete */ (void) mtx_sleep((void *) ccb, &dpt->lock, PCATCH | PRIBIO, "dptucw", 100 * hz); /* Free allocated memory */ if (data != NULL) free(data, M_TEMP); return (0); } static void dpt_user_cmd_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb) { u_int32_t result; caddr_t cmd_arg; mtx_unlock(&dpt->lock); /** * If Auto Request Sense is on, copyout the sense struct */ #define usr_pckt_DMA (caddr_t)(intptr_t)ntohl(ccb->eata_ccb.cp_reqDMA) #define usr_pckt_len ntohl(ccb->eata_ccb.cp_datalen) if (ccb->eata_ccb.Auto_Req_Sen == 1) { if (copyout((caddr_t) & ccb->sense_data, usr_pckt_DMA, sizeof(struct scsi_sense_data))) { mtx_lock(&dpt->lock); ccb->result = EFAULT; dpt_Qpush_free(dpt, ccb); wakeup(ccb); return; } } /* If DataIn is on, copyout the data */ if ((ccb->eata_ccb.DataIn == 1) && (ccb->status_packet.hba_stat == HA_NO_ERROR)) { if (copyout(ccb->data, usr_pckt_DMA, usr_pckt_len)) { mtx_lock(&dpt->lock); dpt_Qpush_free(dpt, ccb); ccb->result = EFAULT; wakeup(ccb); return; } } /* Copyout the status */ result = ccb->status_packet.hba_stat; cmd_arg = (caddr_t) ccb->result; if (copyout((caddr_t) & result, cmd_arg, sizeof(result))) { mtx_lock(&dpt->lock); dpt_Qpush_free(dpt, ccb); ccb->result = EFAULT; wakeup(ccb); return; } mtx_lock(&dpt->lock); /* Put the CCB back in the freelist */ ccb->state |= DPT_CCB_STATE_COMPLETED; dpt_Qpush_free(dpt, ccb); /* Free allocated memory */ return; } #endif Index: stable/11/sys/dev/firewire/sbp.c =================================================================== --- stable/11/sys/dev/firewire/sbp.c (revision 335137) +++ stable/11/sys/dev/firewire/sbp.c (revision 335138) @@ -1,2857 +1,2857 @@ /*- * Copyright (c) 2003 Hidetoshi Shimokawa * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the acknowledgement as bellow: * * This product includes software developed by K. Kobayashi and H. Shimokawa * * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ccb_sdev_ptr spriv_ptr0 #define ccb_sbp_ptr spriv_ptr1 #define SBP_NUM_TARGETS 8 /* MAX 64 */ /* * Scan_bus doesn't work for more than 8 LUNs * because of CAM_SCSI2_MAXLUN in cam_xpt.c */ #define SBP_NUM_LUNS 64 #define SBP_MAXPHYS MIN(MAXPHYS, (512*1024) /* 512KB */) #define SBP_DMA_SIZE PAGE_SIZE #define SBP_LOGIN_SIZE sizeof(struct sbp_login_res) #define SBP_QUEUE_LEN ((SBP_DMA_SIZE - SBP_LOGIN_SIZE) / sizeof(struct sbp_ocb)) #define SBP_NUM_OCB (SBP_QUEUE_LEN * SBP_NUM_TARGETS) /* * STATUS FIFO addressing * bit *----------------------- * 0- 1( 2): 0 (alignment) * 2- 7( 6): target * 8-15( 8): lun * 16-31( 8): reserved * 32-47(16): SBP_BIND_HI * 48-64(16): bus_id, node_id */ #define SBP_BIND_HI 0x1 #define SBP_DEV2ADDR(t, l) \ (((u_int64_t)SBP_BIND_HI << 32) \ | (((l) & 0xff) << 8) \ | (((t) & 0x3f) << 2)) #define SBP_ADDR2TRG(a) (((a) >> 2) & 0x3f) #define SBP_ADDR2LUN(a) (((a) >> 8) & 0xff) #define SBP_INITIATOR 7 static char *orb_fun_name[] = { ORB_FUN_NAMES }; static int debug = 0; static int auto_login = 1; static int max_speed = -1; static int sbp_cold = 1; static int ex_login = 1; static int login_delay = 1000; /* msec */ static int scan_delay = 500; /* msec */ static int use_doorbell = 0; static int sbp_tags = 0; SYSCTL_DECL(_hw_firewire); static SYSCTL_NODE(_hw_firewire, OID_AUTO, sbp, CTLFLAG_RD, 0, "SBP-II Subsystem"); SYSCTL_INT(_debug, OID_AUTO, sbp_debug, CTLFLAG_RWTUN, &debug, 0, "SBP debug flag"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, auto_login, CTLFLAG_RWTUN, &auto_login, 0, "SBP perform login automatically"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, max_speed, CTLFLAG_RWTUN, &max_speed, 0, "SBP transfer max speed"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, exclusive_login, CTLFLAG_RWTUN, &ex_login, 0, "SBP enable exclusive login"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, login_delay, CTLFLAG_RWTUN, &login_delay, 0, "SBP login delay in msec"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, scan_delay, CTLFLAG_RWTUN, &scan_delay, 0, "SBP scan delay in msec"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, use_doorbell, CTLFLAG_RWTUN, &use_doorbell, 0, "SBP use doorbell request"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, tags, CTLFLAG_RWTUN, &sbp_tags, 0, "SBP tagged queuing support"); #define NEED_RESPONSE 0 #define SBP_SEG_MAX rounddown(0xffff, PAGE_SIZE) #ifdef __sparc64__ /* iommu */ #define SBP_IND_MAX howmany(SBP_MAXPHYS, SBP_SEG_MAX) #else #define SBP_IND_MAX howmany(SBP_MAXPHYS, PAGE_SIZE) #endif struct sbp_ocb { STAILQ_ENTRY(sbp_ocb) ocb; union ccb *ccb; bus_addr_t bus_addr; uint32_t orb[8]; #define IND_PTR_OFFSET (8*sizeof(uint32_t)) struct ind_ptr ind_ptr[SBP_IND_MAX]; struct sbp_dev *sdev; int flags; /* XXX should be removed */ bus_dmamap_t dmamap; struct callout timer; }; #define OCB_ACT_MGM 0 #define OCB_ACT_CMD 1 #define OCB_MATCH(o,s) ((o)->bus_addr == ntohl((s)->orb_lo)) struct sbp_dev { #define SBP_DEV_RESET 0 /* accept login */ #define SBP_DEV_LOGIN 1 /* to login */ #if 0 #define SBP_DEV_RECONN 2 /* to reconnect */ #endif #define SBP_DEV_TOATTACH 3 /* to attach */ #define SBP_DEV_PROBE 4 /* scan lun */ #define SBP_DEV_ATTACHED 5 /* in operation */ #define SBP_DEV_DEAD 6 /* unavailable unit */ #define SBP_DEV_RETRY 7 /* unavailable unit */ uint8_t status:4, timeout:4; uint8_t type; uint16_t lun_id; uint16_t freeze; #define ORB_LINK_DEAD (1 << 0) #define VALID_LUN (1 << 1) #define ORB_POINTER_ACTIVE (1 << 2) #define ORB_POINTER_NEED (1 << 3) #define ORB_DOORBELL_ACTIVE (1 << 4) #define ORB_DOORBELL_NEED (1 << 5) #define ORB_SHORTAGE (1 << 6) uint16_t flags; struct cam_path *path; struct sbp_target *target; struct fwdma_alloc dma; struct sbp_login_res *login; struct callout login_callout; struct sbp_ocb *ocb; STAILQ_HEAD(, sbp_ocb) ocbs; STAILQ_HEAD(, sbp_ocb) free_ocbs; struct sbp_ocb *last_ocb; char vendor[32]; char product[32]; char revision[10]; char bustgtlun[32]; }; struct sbp_target { int target_id; int num_lun; struct sbp_dev **luns; struct sbp_softc *sbp; struct fw_device *fwdev; uint32_t mgm_hi, mgm_lo; struct sbp_ocb *mgm_ocb_cur; STAILQ_HEAD(, sbp_ocb) mgm_ocb_queue; struct callout mgm_ocb_timeout; struct callout scan_callout; STAILQ_HEAD(, fw_xfer) xferlist; int n_xfer; }; struct sbp_softc { struct firewire_dev_comm fd; struct cam_sim *sim; struct cam_path *path; struct sbp_target targets[SBP_NUM_TARGETS]; struct fw_bind fwb; bus_dma_tag_t dmat; struct timeval last_busreset; #define SIMQ_FREEZED 1 int flags; struct mtx mtx; }; #define SBP_LOCK(sbp) mtx_lock(&(sbp)->mtx) #define SBP_UNLOCK(sbp) mtx_unlock(&(sbp)->mtx) #define SBP_LOCK_ASSERT(sbp) mtx_assert(&(sbp)->mtx, MA_OWNED) static void sbp_post_explore (void *); static void sbp_recv (struct fw_xfer *); static void sbp_mgm_callback (struct fw_xfer *); #if 0 static void sbp_cmd_callback (struct fw_xfer *); #endif static void sbp_orb_pointer (struct sbp_dev *, struct sbp_ocb *); static void sbp_doorbell(struct sbp_dev *); static void sbp_execute_ocb (void *, bus_dma_segment_t *, int, int); static void sbp_free_ocb (struct sbp_dev *, struct sbp_ocb *); static void sbp_abort_ocb (struct sbp_ocb *, int); static void sbp_abort_all_ocbs (struct sbp_dev *, int); static struct fw_xfer * sbp_write_cmd (struct sbp_dev *, int, int); static struct sbp_ocb * sbp_get_ocb (struct sbp_dev *); static struct sbp_ocb * sbp_enqueue_ocb (struct sbp_dev *, struct sbp_ocb *); static struct sbp_ocb * sbp_dequeue_ocb (struct sbp_dev *, struct sbp_status *); static void sbp_cam_detach_sdev(struct sbp_dev *); static void sbp_free_sdev(struct sbp_dev *); static void sbp_cam_detach_target (struct sbp_target *); static void sbp_free_target (struct sbp_target *); static void sbp_mgm_timeout (void *arg); static void sbp_timeout (void *arg); static void sbp_mgm_orb (struct sbp_dev *, int, struct sbp_ocb *); static MALLOC_DEFINE(M_SBP, "sbp", "SBP-II/FireWire"); /* cam related functions */ static void sbp_action(struct cam_sim *sim, union ccb *ccb); static void sbp_poll(struct cam_sim *sim); static void sbp_cam_scan_lun(struct cam_periph *, union ccb *); static void sbp_cam_scan_target(void *arg); static char *orb_status0[] = { /* 0 */ "No additional information to report", /* 1 */ "Request type not supported", /* 2 */ "Speed not supported", /* 3 */ "Page size not supported", /* 4 */ "Access denied", /* 5 */ "Logical unit not supported", /* 6 */ "Maximum payload too small", /* 7 */ "Reserved for future standardization", /* 8 */ "Resources unavailable", /* 9 */ "Function rejected", /* A */ "Login ID not recognized", /* B */ "Dummy ORB completed", /* C */ "Request aborted", /* FF */ "Unspecified error" #define MAX_ORB_STATUS0 0xd }; static char *orb_status1_object[] = { /* 0 */ "Operation request block (ORB)", /* 1 */ "Data buffer", /* 2 */ "Page table", /* 3 */ "Unable to specify" }; static char *orb_status1_serial_bus_error[] = { /* 0 */ "Missing acknowledge", /* 1 */ "Reserved; not to be used", /* 2 */ "Time-out error", /* 3 */ "Reserved; not to be used", /* 4 */ "Busy retry limit exceeded(X)", /* 5 */ "Busy retry limit exceeded(A)", /* 6 */ "Busy retry limit exceeded(B)", /* 7 */ "Reserved for future standardization", /* 8 */ "Reserved for future standardization", /* 9 */ "Reserved for future standardization", /* A */ "Reserved for future standardization", /* B */ "Tardy retry limit exceeded", /* C */ "Conflict error", /* D */ "Data error", /* E */ "Type error", /* F */ "Address error" }; static void sbp_identify(driver_t *driver, device_t parent) { SBP_DEBUG(0) printf("sbp_identify\n"); END_DEBUG if (device_find_child(parent, "sbp", -1) == NULL) BUS_ADD_CHILD(parent, 0, "sbp", -1); } /* * sbp_probe() */ static int sbp_probe(device_t dev) { SBP_DEBUG(0) printf("sbp_probe\n"); END_DEBUG device_set_desc(dev, "SBP-2/SCSI over FireWire"); #if 0 if (bootverbose) debug = bootverbose; #endif return (0); } /* * Display device characteristics on the console */ static void sbp_show_sdev_info(struct sbp_dev *sdev) { struct fw_device *fwdev; fwdev = sdev->target->fwdev; device_printf(sdev->target->sbp->fd.dev, "%s: %s: ordered:%d type:%d EUI:%08x%08x node:%d " "speed:%d maxrec:%d\n", __func__, sdev->bustgtlun, (sdev->type & 0x40) >> 6, (sdev->type & 0x1f), fwdev->eui.hi, fwdev->eui.lo, fwdev->dst, fwdev->speed, fwdev->maxrec); device_printf(sdev->target->sbp->fd.dev, "%s: %s '%s' '%s' '%s'\n", __func__, sdev->bustgtlun, sdev->vendor, sdev->product, sdev->revision); } static struct { int bus; int target; struct fw_eui64 eui; } wired[] = { /* Bus Target EUI64 */ #if 0 {0, 2, {0x00018ea0, 0x01fd0154}}, /* Logitec HDD */ {0, 0, {0x00018ea6, 0x00100682}}, /* Logitec DVD */ {0, 1, {0x00d03200, 0xa412006a}}, /* Yano HDD */ #endif {-1, -1, {0,0}} }; static int sbp_new_target(struct sbp_softc *sbp, struct fw_device *fwdev) { int bus, i, target=-1; char w[SBP_NUM_TARGETS]; bzero(w, sizeof(w)); bus = device_get_unit(sbp->fd.dev); /* XXX wired-down configuration should be gotten from tunable or device hint */ for (i = 0; wired[i].bus >= 0; i++) { if (wired[i].bus == bus) { w[wired[i].target] = 1; if (wired[i].eui.hi == fwdev->eui.hi && wired[i].eui.lo == fwdev->eui.lo) target = wired[i].target; } } if (target >= 0) { if (target < SBP_NUM_TARGETS && sbp->targets[target].fwdev == NULL) return (target); device_printf(sbp->fd.dev, "target %d is not free for %08x:%08x\n", target, fwdev->eui.hi, fwdev->eui.lo); target = -1; } /* non-wired target */ for (i = 0; i < SBP_NUM_TARGETS; i++) if (sbp->targets[i].fwdev == NULL && w[i] == 0) { target = i; break; } return target; } static void sbp_alloc_lun(struct sbp_target *target) { struct crom_context cc; struct csrreg *reg; struct sbp_dev *sdev, **newluns; struct sbp_softc *sbp; int maxlun, lun, i; sbp = target->sbp; crom_init_context(&cc, target->fwdev->csrrom); /* XXX shoud parse appropriate unit directories only */ maxlun = -1; while (cc.depth >= 0) { reg = crom_search_key(&cc, CROM_LUN); if (reg == NULL) break; lun = reg->val & 0xffff; SBP_DEBUG(0) printf("target %d lun %d found\n", target->target_id, lun); END_DEBUG if (maxlun < lun) maxlun = lun; crom_next(&cc); } if (maxlun < 0) device_printf(target->sbp->fd.dev, "%d no LUN found\n", target->target_id); maxlun++; if (maxlun >= SBP_NUM_LUNS) maxlun = SBP_NUM_LUNS; /* Invalidiate stale devices */ for (lun = 0; lun < target->num_lun; lun++) { sdev = target->luns[lun]; if (sdev == NULL) continue; sdev->flags &= ~VALID_LUN; if (lun >= maxlun) { /* lost device */ sbp_cam_detach_sdev(sdev); sbp_free_sdev(sdev); target->luns[lun] = NULL; } } /* Reallocate */ if (maxlun != target->num_lun) { newluns = (struct sbp_dev **) realloc(target->luns, sizeof(struct sbp_dev *) * maxlun, M_SBP, M_NOWAIT | M_ZERO); if (newluns == NULL) { printf("%s: realloc failed\n", __func__); newluns = target->luns; maxlun = target->num_lun; } /* * We must zero the extended region for the case * realloc() doesn't allocate new buffer. */ if (maxlun > target->num_lun) bzero(&newluns[target->num_lun], sizeof(struct sbp_dev *) * (maxlun - target->num_lun)); target->luns = newluns; target->num_lun = maxlun; } crom_init_context(&cc, target->fwdev->csrrom); while (cc.depth >= 0) { int new = 0; reg = crom_search_key(&cc, CROM_LUN); if (reg == NULL) break; lun = reg->val & 0xffff; if (lun >= SBP_NUM_LUNS) { printf("too large lun %d\n", lun); goto next; } sdev = target->luns[lun]; if (sdev == NULL) { sdev = malloc(sizeof(struct sbp_dev), M_SBP, M_NOWAIT | M_ZERO); if (sdev == NULL) { printf("%s: malloc failed\n", __func__); goto next; } target->luns[lun] = sdev; sdev->lun_id = lun; sdev->target = target; STAILQ_INIT(&sdev->ocbs); callout_init_mtx(&sdev->login_callout, &sbp->mtx, 0); sdev->status = SBP_DEV_RESET; new = 1; snprintf(sdev->bustgtlun, 32, "%s:%d:%d", device_get_nameunit(sdev->target->sbp->fd.dev), sdev->target->target_id, sdev->lun_id); } sdev->flags |= VALID_LUN; sdev->type = (reg->val & 0xff0000) >> 16; if (new == 0) goto next; fwdma_malloc(sbp->fd.fc, /* alignment */ sizeof(uint32_t), SBP_DMA_SIZE, &sdev->dma, BUS_DMA_NOWAIT | BUS_DMA_COHERENT); if (sdev->dma.v_addr == NULL) { printf("%s: dma space allocation failed\n", __func__); free(sdev, M_SBP); target->luns[lun] = NULL; goto next; } sdev->login = (struct sbp_login_res *) sdev->dma.v_addr; sdev->ocb = (struct sbp_ocb *) ((char *)sdev->dma.v_addr + SBP_LOGIN_SIZE); bzero((char *)sdev->ocb, sizeof(struct sbp_ocb) * SBP_QUEUE_LEN); STAILQ_INIT(&sdev->free_ocbs); for (i = 0; i < SBP_QUEUE_LEN; i++) { struct sbp_ocb *ocb; ocb = &sdev->ocb[i]; ocb->bus_addr = sdev->dma.bus_addr + SBP_LOGIN_SIZE + sizeof(struct sbp_ocb) * i + offsetof(struct sbp_ocb, orb[0]); if (bus_dmamap_create(sbp->dmat, 0, &ocb->dmamap)) { printf("sbp_attach: cannot create dmamap\n"); /* XXX */ goto next; } callout_init_mtx(&ocb->timer, &sbp->mtx, 0); SBP_LOCK(sbp); sbp_free_ocb(sdev, ocb); SBP_UNLOCK(sbp); } next: crom_next(&cc); } for (lun = 0; lun < target->num_lun; lun++) { sdev = target->luns[lun]; if (sdev != NULL && (sdev->flags & VALID_LUN) == 0) { sbp_cam_detach_sdev(sdev); sbp_free_sdev(sdev); target->luns[lun] = NULL; } } } static struct sbp_target * sbp_alloc_target(struct sbp_softc *sbp, struct fw_device *fwdev) { int i; struct sbp_target *target; struct crom_context cc; struct csrreg *reg; SBP_DEBUG(1) printf("sbp_alloc_target\n"); END_DEBUG i = sbp_new_target(sbp, fwdev); if (i < 0) { device_printf(sbp->fd.dev, "increase SBP_NUM_TARGETS!\n"); return NULL; } /* new target */ target = &sbp->targets[i]; target->fwdev = fwdev; target->target_id = i; /* XXX we may want to reload mgm port after each bus reset */ /* XXX there might be multiple management agents */ crom_init_context(&cc, target->fwdev->csrrom); reg = crom_search_key(&cc, CROM_MGM); if (reg == NULL || reg->val == 0) { printf("NULL management address\n"); target->fwdev = NULL; return NULL; } target->mgm_hi = 0xffff; target->mgm_lo = 0xf0000000 | (reg->val << 2); target->mgm_ocb_cur = NULL; SBP_DEBUG(1) printf("target:%d mgm_port: %x\n", i, target->mgm_lo); END_DEBUG STAILQ_INIT(&target->xferlist); target->n_xfer = 0; STAILQ_INIT(&target->mgm_ocb_queue); callout_init_mtx(&target->mgm_ocb_timeout, &sbp->mtx, 0); callout_init_mtx(&target->scan_callout, &sbp->mtx, 0); target->luns = NULL; target->num_lun = 0; return target; } static void sbp_probe_lun(struct sbp_dev *sdev) { struct fw_device *fwdev; struct crom_context c, *cc = &c; struct csrreg *reg; bzero(sdev->vendor, sizeof(sdev->vendor)); bzero(sdev->product, sizeof(sdev->product)); fwdev = sdev->target->fwdev; crom_init_context(cc, fwdev->csrrom); /* get vendor string */ crom_search_key(cc, CSRKEY_VENDOR); crom_next(cc); crom_parse_text(cc, sdev->vendor, sizeof(sdev->vendor)); /* skip to the unit directory for SBP-2 */ while ((reg = crom_search_key(cc, CSRKEY_VER)) != NULL) { if (reg->val == CSRVAL_T10SBP2) break; crom_next(cc); } /* get firmware revision */ reg = crom_search_key(cc, CSRKEY_FIRM_VER); if (reg != NULL) snprintf(sdev->revision, sizeof(sdev->revision), "%06x", reg->val); /* get product string */ crom_search_key(cc, CSRKEY_MODEL); crom_next(cc); crom_parse_text(cc, sdev->product, sizeof(sdev->product)); } static void sbp_login_callout(void *arg) { struct sbp_dev *sdev = (struct sbp_dev *)arg; SBP_LOCK_ASSERT(sdev->target->sbp); sbp_mgm_orb(sdev, ORB_FUN_LGI, NULL); } static void sbp_login(struct sbp_dev *sdev) { struct timeval delta; struct timeval t; int ticks = 0; microtime(&delta); timevalsub(&delta, &sdev->target->sbp->last_busreset); t.tv_sec = login_delay / 1000; t.tv_usec = (login_delay % 1000) * 1000; timevalsub(&t, &delta); if (t.tv_sec >= 0 && t.tv_usec > 0) ticks = (t.tv_sec * 1000 + t.tv_usec / 1000) * hz / 1000; SBP_DEBUG(0) printf("%s: sec = %jd usec = %ld ticks = %d\n", __func__, (intmax_t)t.tv_sec, t.tv_usec, ticks); END_DEBUG callout_reset(&sdev->login_callout, ticks, sbp_login_callout, (void *)(sdev)); } #define SBP_FWDEV_ALIVE(fwdev) (((fwdev)->status == FWDEVATTACHED) \ && crom_has_specver((fwdev)->csrrom, CSRVAL_ANSIT10, CSRVAL_T10SBP2)) static void sbp_probe_target(struct sbp_target *target) { struct sbp_softc *sbp = target->sbp; struct sbp_dev *sdev; int i, alive; alive = SBP_FWDEV_ALIVE(target->fwdev); SBP_DEBUG(1) device_printf(sbp->fd.dev, "%s %d%salive\n", __func__, target->target_id, (!alive) ? " not " : ""); END_DEBUG sbp_alloc_lun(target); /* XXX untimeout mgm_ocb and dequeue */ for (i=0; i < target->num_lun; i++) { sdev = target->luns[i]; if (sdev == NULL) continue; if (alive && (sdev->status != SBP_DEV_DEAD)) { if (sdev->path != NULL) { xpt_freeze_devq(sdev->path, 1); sdev->freeze++; } sbp_probe_lun(sdev); sbp_show_sdev_info(sdev); SBP_LOCK(sbp); sbp_abort_all_ocbs(sdev, CAM_SCSI_BUS_RESET); SBP_UNLOCK(sbp); switch (sdev->status) { case SBP_DEV_RESET: /* new or revived target */ if (auto_login) sbp_login(sdev); break; case SBP_DEV_TOATTACH: case SBP_DEV_PROBE: case SBP_DEV_ATTACHED: case SBP_DEV_RETRY: default: sbp_mgm_orb(sdev, ORB_FUN_RCN, NULL); break; } } else { switch (sdev->status) { case SBP_DEV_ATTACHED: SBP_DEBUG(0) /* the device has gone */ device_printf(sbp->fd.dev, "%s: lost target\n", __func__); END_DEBUG if (sdev->path) { xpt_freeze_devq(sdev->path, 1); sdev->freeze++; } sdev->status = SBP_DEV_RETRY; sbp_cam_detach_sdev(sdev); sbp_free_sdev(sdev); target->luns[i] = NULL; break; case SBP_DEV_PROBE: case SBP_DEV_TOATTACH: sdev->status = SBP_DEV_RESET; break; case SBP_DEV_RETRY: case SBP_DEV_RESET: case SBP_DEV_DEAD: break; } } } } static void sbp_post_busreset(void *arg) { struct sbp_softc *sbp; sbp = (struct sbp_softc *)arg; SBP_DEBUG(0) printf("sbp_post_busreset\n"); END_DEBUG SBP_LOCK(sbp); if ((sbp->flags & SIMQ_FREEZED) == 0) { xpt_freeze_simq(sbp->sim, /*count*/1); sbp->flags |= SIMQ_FREEZED; } microtime(&sbp->last_busreset); SBP_UNLOCK(sbp); } static void sbp_post_explore(void *arg) { struct sbp_softc *sbp = (struct sbp_softc *)arg; struct sbp_target *target; struct fw_device *fwdev; int i, alive; SBP_DEBUG(0) printf("sbp_post_explore (sbp_cold=%d)\n", sbp_cold); END_DEBUG /* We need physical access */ if (!firewire_phydma_enable) return; if (sbp_cold > 0) sbp_cold--; SBP_LOCK(sbp); /* Garbage Collection */ for (i = 0; i < SBP_NUM_TARGETS; i++) { target = &sbp->targets[i]; if (target->fwdev == NULL) continue; STAILQ_FOREACH(fwdev, &sbp->fd.fc->devices, link) if (target->fwdev == fwdev) break; if (fwdev == NULL) { /* device has removed in lower driver */ sbp_cam_detach_target(target); sbp_free_target(target); } } /* traverse device list */ STAILQ_FOREACH(fwdev, &sbp->fd.fc->devices, link) { SBP_DEBUG(0) device_printf(sbp->fd.dev,"%s:: EUI:%08x%08x %s attached, state=%d\n", __func__, fwdev->eui.hi, fwdev->eui.lo, (fwdev->status != FWDEVATTACHED) ? "not" : "", fwdev->status); END_DEBUG alive = SBP_FWDEV_ALIVE(fwdev); for (i = 0; i < SBP_NUM_TARGETS; i++) { target = &sbp->targets[i]; if (target->fwdev == fwdev) { /* known target */ break; } } if (i == SBP_NUM_TARGETS) { if (alive) { /* new target */ target = sbp_alloc_target(sbp, fwdev); if (target == NULL) continue; } else { continue; } } /* * It is safe to drop the lock here as the target is already * reserved, so there should be no contenders for it. * And the target is not yet exposed, so there should not be * any other accesses to it. * Finally, the list being iterated is protected somewhere else. */ SBP_UNLOCK(sbp); sbp_probe_target(target); SBP_LOCK(sbp); if (target->num_lun == 0) sbp_free_target(target); } if ((sbp->flags & SIMQ_FREEZED) != 0) { xpt_release_simq(sbp->sim, /*run queue*/TRUE); sbp->flags &= ~SIMQ_FREEZED; } SBP_UNLOCK(sbp); } #if NEED_RESPONSE static void sbp_loginres_callback(struct fw_xfer *xfer) { struct sbp_dev *sdev; sdev = (struct sbp_dev *)xfer->sc; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev,"%s\n", __func__); END_DEBUG /* recycle */ SBP_LOCK(sdev->target->sbp); STAILQ_INSERT_TAIL(&sdev->target->sbp->fwb.xferlist, xfer, link); SBP_UNLOCK(sdev->target->sbp); return; } #endif static __inline void sbp_xfer_free(struct fw_xfer *xfer) { struct sbp_dev *sdev; sdev = (struct sbp_dev *)xfer->sc; fw_xfer_unload(xfer); SBP_LOCK_ASSERT(sdev->target->sbp); STAILQ_INSERT_TAIL(&sdev->target->xferlist, xfer, link); } static void sbp_reset_start_callback(struct fw_xfer *xfer) { struct sbp_dev *tsdev, *sdev = (struct sbp_dev *)xfer->sc; struct sbp_target *target = sdev->target; int i; if (xfer->resp != 0) { device_printf(sdev->target->sbp->fd.dev, "%s: %s failed: resp=%d\n", __func__, sdev->bustgtlun, xfer->resp); } SBP_LOCK(target->sbp); for (i = 0; i < target->num_lun; i++) { tsdev = target->luns[i]; if (tsdev != NULL && tsdev->status == SBP_DEV_LOGIN) sbp_login(tsdev); } SBP_UNLOCK(target->sbp); } static void sbp_reset_start(struct sbp_dev *sdev) { struct fw_xfer *xfer; struct fw_pkt *fp; SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__,sdev->bustgtlun); END_DEBUG xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0); xfer->hand = sbp_reset_start_callback; fp = &xfer->send.hdr; fp->mode.wreqq.dest_hi = 0xffff; fp->mode.wreqq.dest_lo = 0xf0000000 | RESET_START; fp->mode.wreqq.data = htonl(0xf); fw_asyreq(xfer->fc, -1, xfer); } static void sbp_mgm_callback(struct fw_xfer *xfer) { struct sbp_dev *sdev; int resp; sdev = (struct sbp_dev *)xfer->sc; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG resp = xfer->resp; SBP_LOCK(sdev->target->sbp); sbp_xfer_free(xfer); SBP_UNLOCK(sdev->target->sbp); } static struct sbp_dev * sbp_next_dev(struct sbp_target *target, int lun) { struct sbp_dev **sdevp; int i; for (i = lun, sdevp = &target->luns[lun]; i < target->num_lun; i++, sdevp++) if (*sdevp != NULL && (*sdevp)->status == SBP_DEV_PROBE) return (*sdevp); return (NULL); } #define SCAN_PRI 1 static void sbp_cam_scan_lun(struct cam_periph *periph, union ccb *ccb) { struct sbp_softc *sbp; struct sbp_target *target; struct sbp_dev *sdev; sdev = (struct sbp_dev *) ccb->ccb_h.ccb_sdev_ptr; target = sdev->target; sbp = target->sbp; SBP_LOCK(sbp); SBP_DEBUG(0) device_printf(sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { sdev->status = SBP_DEV_ATTACHED; } else { device_printf(sbp->fd.dev, "%s:%s failed\n", __func__, sdev->bustgtlun); } sdev = sbp_next_dev(target, sdev->lun_id + 1); if (sdev == NULL) { SBP_UNLOCK(sbp); free(ccb, M_SBP); return; } /* reuse ccb */ xpt_setup_ccb(&ccb->ccb_h, sdev->path, SCAN_PRI); ccb->ccb_h.ccb_sdev_ptr = sdev; ccb->ccb_h.flags |= CAM_DEV_QFREEZE; SBP_UNLOCK(sbp); xpt_action(ccb); xpt_release_devq(sdev->path, sdev->freeze, TRUE); sdev->freeze = 1; } static void sbp_cam_scan_target(void *arg) { struct sbp_target *target = (struct sbp_target *)arg; struct sbp_dev *sdev; union ccb *ccb; SBP_LOCK_ASSERT(target->sbp); sdev = sbp_next_dev(target, 0); if (sdev == NULL) { printf("sbp_cam_scan_target: nothing to do for target%d\n", target->target_id); return; } SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG ccb = malloc(sizeof(union ccb), M_SBP, M_NOWAIT | M_ZERO); if (ccb == NULL) { printf("sbp_cam_scan_target: malloc failed\n"); return; } SBP_UNLOCK(target->sbp); xpt_setup_ccb(&ccb->ccb_h, sdev->path, SCAN_PRI); ccb->ccb_h.func_code = XPT_SCAN_LUN; ccb->ccb_h.cbfcnp = sbp_cam_scan_lun; ccb->ccb_h.flags |= CAM_DEV_QFREEZE; ccb->crcn.flags = CAM_FLAG_NONE; ccb->ccb_h.ccb_sdev_ptr = sdev; /* The scan is in progress now. */ xpt_action(ccb); SBP_LOCK(target->sbp); xpt_release_devq(sdev->path, sdev->freeze, TRUE); sdev->freeze = 1; } static __inline void sbp_scan_dev(struct sbp_dev *sdev) { sdev->status = SBP_DEV_PROBE; callout_reset_sbt(&sdev->target->scan_callout, SBT_1MS * scan_delay, 0, sbp_cam_scan_target, (void *)sdev->target, 0); } static void sbp_do_attach(struct fw_xfer *xfer) { struct sbp_dev *sdev; struct sbp_target *target; struct sbp_softc *sbp; sdev = (struct sbp_dev *)xfer->sc; target = sdev->target; sbp = target->sbp; SBP_LOCK(sbp); SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG sbp_xfer_free(xfer); if (sdev->path == NULL) xpt_create_path(&sdev->path, NULL, cam_sim_path(target->sbp->sim), target->target_id, sdev->lun_id); /* * Let CAM scan the bus if we are in the boot process. * XXX xpt_scan_bus cannot detect LUN larger than 0 * if LUN 0 doesn't exist. */ if (sbp_cold > 0) { sdev->status = SBP_DEV_ATTACHED; SBP_UNLOCK(sbp); return; } sbp_scan_dev(sdev); SBP_UNLOCK(sbp); } static void sbp_agent_reset_callback(struct fw_xfer *xfer) { struct sbp_dev *sdev; sdev = (struct sbp_dev *)xfer->sc; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG if (xfer->resp != 0) { device_printf(sdev->target->sbp->fd.dev, "%s:%s resp=%d\n", __func__, sdev->bustgtlun, xfer->resp); } SBP_LOCK(sdev->target->sbp); sbp_xfer_free(xfer); if (sdev->path) { xpt_release_devq(sdev->path, sdev->freeze, TRUE); sdev->freeze = 0; } SBP_UNLOCK(sdev->target->sbp); } static void sbp_agent_reset(struct sbp_dev *sdev) { struct fw_xfer *xfer; struct fw_pkt *fp; SBP_LOCK_ASSERT(sdev->target->sbp); SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0x04); if (xfer == NULL) return; if (sdev->status == SBP_DEV_ATTACHED || sdev->status == SBP_DEV_PROBE) xfer->hand = sbp_agent_reset_callback; else xfer->hand = sbp_do_attach; fp = &xfer->send.hdr; fp->mode.wreqq.data = htonl(0xf); fw_asyreq(xfer->fc, -1, xfer); sbp_abort_all_ocbs(sdev, CAM_BDR_SENT); } static void sbp_busy_timeout_callback(struct fw_xfer *xfer) { struct sbp_dev *sdev; sdev = (struct sbp_dev *)xfer->sc; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG SBP_LOCK(sdev->target->sbp); sbp_xfer_free(xfer); sbp_agent_reset(sdev); SBP_UNLOCK(sdev->target->sbp); } static void sbp_busy_timeout(struct sbp_dev *sdev) { struct fw_pkt *fp; struct fw_xfer *xfer; SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0); xfer->hand = sbp_busy_timeout_callback; fp = &xfer->send.hdr; fp->mode.wreqq.dest_hi = 0xffff; fp->mode.wreqq.dest_lo = 0xf0000000 | BUSY_TIMEOUT; fp->mode.wreqq.data = htonl((1 << (13 + 12)) | 0xf); fw_asyreq(xfer->fc, -1, xfer); } static void sbp_orb_pointer_callback(struct fw_xfer *xfer) { struct sbp_dev *sdev; sdev = (struct sbp_dev *)xfer->sc; SBP_DEBUG(2) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG if (xfer->resp != 0) { /* XXX */ printf("%s: xfer->resp = %d\n", __func__, xfer->resp); } SBP_LOCK(sdev->target->sbp); sbp_xfer_free(xfer); sdev->flags &= ~ORB_POINTER_ACTIVE; if ((sdev->flags & ORB_POINTER_NEED) != 0) { struct sbp_ocb *ocb; sdev->flags &= ~ORB_POINTER_NEED; ocb = STAILQ_FIRST(&sdev->ocbs); if (ocb != NULL) sbp_orb_pointer(sdev, ocb); } SBP_UNLOCK(sdev->target->sbp); return; } static void sbp_orb_pointer(struct sbp_dev *sdev, struct sbp_ocb *ocb) { struct fw_xfer *xfer; struct fw_pkt *fp; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s 0x%08x\n", __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr); END_DEBUG SBP_LOCK_ASSERT(sdev->target->sbp); if ((sdev->flags & ORB_POINTER_ACTIVE) != 0) { SBP_DEBUG(0) printf("%s: orb pointer active\n", __func__); END_DEBUG sdev->flags |= ORB_POINTER_NEED; return; } sdev->flags |= ORB_POINTER_ACTIVE; xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0x08); if (xfer == NULL) return; xfer->hand = sbp_orb_pointer_callback; fp = &xfer->send.hdr; fp->mode.wreqb.len = 8; fp->mode.wreqb.extcode = 0; xfer->send.payload[0] = htonl(((sdev->target->sbp->fd.fc->nodeid | FWLOCALBUS) << 16)); xfer->send.payload[1] = htonl((uint32_t)ocb->bus_addr); if (fw_asyreq(xfer->fc, -1, xfer) != 0) { sbp_xfer_free(xfer); ocb->ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ocb->ccb); } } static void sbp_doorbell_callback(struct fw_xfer *xfer) { struct sbp_dev *sdev; sdev = (struct sbp_dev *)xfer->sc; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG if (xfer->resp != 0) { /* XXX */ device_printf(sdev->target->sbp->fd.dev, "%s: xfer->resp = %d\n", __func__, xfer->resp); } SBP_LOCK(sdev->target->sbp); sbp_xfer_free(xfer); sdev->flags &= ~ORB_DOORBELL_ACTIVE; if ((sdev->flags & ORB_DOORBELL_NEED) != 0) { sdev->flags &= ~ORB_DOORBELL_NEED; sbp_doorbell(sdev); } SBP_UNLOCK(sdev->target->sbp); } static void sbp_doorbell(struct sbp_dev *sdev) { struct fw_xfer *xfer; struct fw_pkt *fp; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG if ((sdev->flags & ORB_DOORBELL_ACTIVE) != 0) { sdev->flags |= ORB_DOORBELL_NEED; return; } sdev->flags |= ORB_DOORBELL_ACTIVE; xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0x10); if (xfer == NULL) return; xfer->hand = sbp_doorbell_callback; fp = &xfer->send.hdr; fp->mode.wreqq.data = htonl(0xf); fw_asyreq(xfer->fc, -1, xfer); } static struct fw_xfer * sbp_write_cmd(struct sbp_dev *sdev, int tcode, int offset) { struct fw_xfer *xfer; struct fw_pkt *fp; struct sbp_target *target; int new = 0; SBP_LOCK_ASSERT(sdev->target->sbp); target = sdev->target; xfer = STAILQ_FIRST(&target->xferlist); if (xfer == NULL) { if (target->n_xfer > 5 /* XXX */) { printf("sbp: no more xfer for this target\n"); return (NULL); } xfer = fw_xfer_alloc_buf(M_SBP, 8, 0); if (xfer == NULL) { printf("sbp: fw_xfer_alloc_buf failed\n"); return NULL; } target->n_xfer++; if (debug) printf("sbp: alloc %d xfer\n", target->n_xfer); new = 1; } else { STAILQ_REMOVE_HEAD(&target->xferlist, link); } if (new) { xfer->recv.pay_len = 0; xfer->send.spd = min(sdev->target->fwdev->speed, max_speed); xfer->fc = sdev->target->sbp->fd.fc; } if (tcode == FWTCODE_WREQB) xfer->send.pay_len = 8; else xfer->send.pay_len = 0; xfer->sc = (caddr_t)sdev; fp = &xfer->send.hdr; fp->mode.wreqq.dest_hi = sdev->login->cmd_hi; fp->mode.wreqq.dest_lo = sdev->login->cmd_lo + offset; fp->mode.wreqq.tlrt = 0; fp->mode.wreqq.tcode = tcode; fp->mode.wreqq.pri = 0; fp->mode.wreqq.dst = FWLOCALBUS | sdev->target->fwdev->dst; return xfer; } static void sbp_mgm_orb(struct sbp_dev *sdev, int func, struct sbp_ocb *aocb) { struct fw_xfer *xfer; struct fw_pkt *fp; struct sbp_ocb *ocb; struct sbp_target *target; int nid; target = sdev->target; nid = target->sbp->fd.fc->nodeid | FWLOCALBUS; SBP_LOCK_ASSERT(target->sbp); if (func == ORB_FUN_RUNQUEUE) { ocb = STAILQ_FIRST(&target->mgm_ocb_queue); if (target->mgm_ocb_cur != NULL || ocb == NULL) { return; } STAILQ_REMOVE_HEAD(&target->mgm_ocb_queue, ocb); goto start; } if ((ocb = sbp_get_ocb(sdev)) == NULL) { /* XXX */ return; } ocb->flags = OCB_ACT_MGM; ocb->sdev = sdev; bzero((void *)ocb->orb, sizeof(ocb->orb)); ocb->orb[6] = htonl((nid << 16) | SBP_BIND_HI); ocb->orb[7] = htonl(SBP_DEV2ADDR(target->target_id, sdev->lun_id)); SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s %s\n", __func__,sdev->bustgtlun, orb_fun_name[(func >> 16) & 0xf]); END_DEBUG switch (func) { case ORB_FUN_LGI: ocb->orb[0] = ocb->orb[1] = 0; /* password */ ocb->orb[2] = htonl(nid << 16); ocb->orb[3] = htonl(sdev->dma.bus_addr); ocb->orb[4] = htonl(ORB_NOTIFY | sdev->lun_id); if (ex_login) ocb->orb[4] |= htonl(ORB_EXV); ocb->orb[5] = htonl(SBP_LOGIN_SIZE); fwdma_sync(&sdev->dma, BUS_DMASYNC_PREREAD); break; case ORB_FUN_ATA: ocb->orb[0] = htonl((0 << 16) | 0); ocb->orb[1] = htonl(aocb->bus_addr & 0xffffffff); /* fall through */ case ORB_FUN_RCN: case ORB_FUN_LGO: case ORB_FUN_LUR: case ORB_FUN_RST: case ORB_FUN_ATS: ocb->orb[4] = htonl(ORB_NOTIFY | func | sdev->login->id); break; } if (target->mgm_ocb_cur != NULL) { /* there is a standing ORB */ STAILQ_INSERT_TAIL(&sdev->target->mgm_ocb_queue, ocb, ocb); return; } start: target->mgm_ocb_cur = ocb; callout_reset(&target->mgm_ocb_timeout, 5 * hz, sbp_mgm_timeout, (caddr_t)ocb); xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0); if (xfer == NULL) { return; } xfer->hand = sbp_mgm_callback; fp = &xfer->send.hdr; fp->mode.wreqb.dest_hi = sdev->target->mgm_hi; fp->mode.wreqb.dest_lo = sdev->target->mgm_lo; fp->mode.wreqb.len = 8; fp->mode.wreqb.extcode = 0; xfer->send.payload[0] = htonl(nid << 16); xfer->send.payload[1] = htonl(ocb->bus_addr & 0xffffffff); fw_asyreq(xfer->fc, -1, xfer); } static void sbp_print_scsi_cmd(struct sbp_ocb *ocb) { struct ccb_scsiio *csio; csio = &ocb->ccb->csio; printf("%s:%d:%jx XPT_SCSI_IO: " "cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x" ", flags: 0x%02x, " "%db cmd/%db data/%db sense\n", device_get_nameunit(ocb->sdev->target->sbp->fd.dev), ocb->ccb->ccb_h.target_id, (uintmax_t)ocb->ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0], csio->cdb_io.cdb_bytes[1], csio->cdb_io.cdb_bytes[2], csio->cdb_io.cdb_bytes[3], csio->cdb_io.cdb_bytes[4], csio->cdb_io.cdb_bytes[5], csio->cdb_io.cdb_bytes[6], csio->cdb_io.cdb_bytes[7], csio->cdb_io.cdb_bytes[8], csio->cdb_io.cdb_bytes[9], ocb->ccb->ccb_h.flags & CAM_DIR_MASK, csio->cdb_len, csio->dxfer_len, csio->sense_len); } static void sbp_scsi_status(struct sbp_status *sbp_status, struct sbp_ocb *ocb) { struct sbp_cmd_status *sbp_cmd_status; struct scsi_sense_data_fixed *sense; sbp_cmd_status = (struct sbp_cmd_status *)sbp_status->data; sense = (struct scsi_sense_data_fixed *)&ocb->ccb->csio.sense_data; SBP_DEBUG(0) sbp_print_scsi_cmd(ocb); /* XXX need decode status */ printf("%s: SCSI status %x sfmt %x valid %x key %x code %x qlfr %x len %d\n", ocb->sdev->bustgtlun, sbp_cmd_status->status, sbp_cmd_status->sfmt, sbp_cmd_status->valid, sbp_cmd_status->s_key, sbp_cmd_status->s_code, sbp_cmd_status->s_qlfr, sbp_status->len); END_DEBUG switch (sbp_cmd_status->status) { case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_BUSY: case SCSI_STATUS_CMD_TERMINATED: if (sbp_cmd_status->sfmt == SBP_SFMT_CURR) { sense->error_code = SSD_CURRENT_ERROR; } else { sense->error_code = SSD_DEFERRED_ERROR; } if (sbp_cmd_status->valid) sense->error_code |= SSD_ERRCODE_VALID; sense->flags = sbp_cmd_status->s_key; if (sbp_cmd_status->mark) sense->flags |= SSD_FILEMARK; if (sbp_cmd_status->eom) sense->flags |= SSD_EOM; if (sbp_cmd_status->ill_len) sense->flags |= SSD_ILI; bcopy(&sbp_cmd_status->info, &sense->info[0], 4); if (sbp_status->len <= 1) /* XXX not scsi status. shouldn't be happened */ sense->extra_len = 0; else if (sbp_status->len <= 4) /* add_sense_code(_qual), info, cmd_spec_info */ sense->extra_len = 6; else /* fru, sense_key_spec */ sense->extra_len = 10; bcopy(&sbp_cmd_status->cdb, &sense->cmd_spec_info[0], 4); sense->add_sense_code = sbp_cmd_status->s_code; sense->add_sense_code_qual = sbp_cmd_status->s_qlfr; sense->fru = sbp_cmd_status->fru; bcopy(&sbp_cmd_status->s_keydep[0], &sense->sense_key_spec[0], 3); ocb->ccb->csio.scsi_status = sbp_cmd_status->status; ocb->ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; /* { uint8_t j, *tmp; tmp = sense; for (j = 0; j < 32; j += 8) { printf("sense %02x%02x %02x%02x %02x%02x %02x%02x\n", tmp[j], tmp[j + 1], tmp[j + 2], tmp[j + 3], tmp[j + 4], tmp[j + 5], tmp[j + 6], tmp[j + 7]); } } */ break; default: device_printf(ocb->sdev->target->sbp->fd.dev, "%s:%s unknown scsi status 0x%x\n", __func__, ocb->sdev->bustgtlun, sbp_cmd_status->status); } } static void sbp_fix_inq_data(struct sbp_ocb *ocb) { union ccb *ccb; struct sbp_dev *sdev; struct scsi_inquiry_data *inq; ccb = ocb->ccb; sdev = ocb->sdev; if (ccb->csio.cdb_io.cdb_bytes[1] & SI_EVPD) return; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG inq = (struct scsi_inquiry_data *) ccb->csio.data_ptr; switch (SID_TYPE(inq)) { case T_DIRECT: #if 0 /* * XXX Convert Direct Access device to RBC. * I've never seen FireWire DA devices which support READ_6. */ if (SID_TYPE(inq) == T_DIRECT) inq->device |= T_RBC; /* T_DIRECT == 0 */ #endif /* fall through */ case T_RBC: /* * Override vendor/product/revision information. * Some devices sometimes return strange strings. */ #if 1 bcopy(sdev->vendor, inq->vendor, sizeof(inq->vendor)); bcopy(sdev->product, inq->product, sizeof(inq->product)); bcopy(sdev->revision + 2, inq->revision, sizeof(inq->revision)); #endif break; } /* * Force to enable/disable tagged queuing. * XXX CAM also checks SCP_QUEUE_DQUE flag in the control mode page. */ if (sbp_tags > 0) inq->flags |= SID_CmdQue; else if (sbp_tags < 0) inq->flags &= ~SID_CmdQue; } static void sbp_recv1(struct fw_xfer *xfer) { struct fw_pkt *rfp; #if NEED_RESPONSE struct fw_pkt *sfp; #endif struct sbp_softc *sbp; struct sbp_dev *sdev; struct sbp_ocb *ocb; struct sbp_login_res *login_res = NULL; struct sbp_status *sbp_status; struct sbp_target *target; int orb_fun, status_valid0, status_valid, t, l, reset_agent = 0; uint32_t addr; /* uint32_t *ld; ld = xfer->recv.buf; printf("sbp %x %d %d %08x %08x %08x %08x\n", xfer->resp, xfer->recv.len, xfer->recv.off, ntohl(ld[0]), ntohl(ld[1]), ntohl(ld[2]), ntohl(ld[3])); printf("sbp %08x %08x %08x %08x\n", ntohl(ld[4]), ntohl(ld[5]), ntohl(ld[6]), ntohl(ld[7])); printf("sbp %08x %08x %08x %08x\n", ntohl(ld[8]), ntohl(ld[9]), ntohl(ld[10]), ntohl(ld[11])); */ sbp = (struct sbp_softc *)xfer->sc; SBP_LOCK_ASSERT(sbp); if (xfer->resp != 0) { printf("sbp_recv: xfer->resp = %d\n", xfer->resp); goto done0; } if (xfer->recv.payload == NULL) { printf("sbp_recv: xfer->recv.payload == NULL\n"); goto done0; } rfp = &xfer->recv.hdr; if (rfp->mode.wreqb.tcode != FWTCODE_WREQB) { printf("sbp_recv: tcode = %d\n", rfp->mode.wreqb.tcode); goto done0; } sbp_status = (struct sbp_status *)xfer->recv.payload; addr = rfp->mode.wreqb.dest_lo; SBP_DEBUG(2) printf("received address 0x%x\n", addr); END_DEBUG t = SBP_ADDR2TRG(addr); if (t >= SBP_NUM_TARGETS) { device_printf(sbp->fd.dev, "sbp_recv1: invalid target %d\n", t); goto done0; } target = &sbp->targets[t]; l = SBP_ADDR2LUN(addr); if (l >= target->num_lun || target->luns[l] == NULL) { device_printf(sbp->fd.dev, "sbp_recv1: invalid lun %d (target=%d)\n", l, t); goto done0; } sdev = target->luns[l]; ocb = NULL; switch (sbp_status->src) { case 0: case 1: /* check mgm_ocb_cur first */ ocb = target->mgm_ocb_cur; if (ocb != NULL) { if (OCB_MATCH(ocb, sbp_status)) { callout_stop(&target->mgm_ocb_timeout); target->mgm_ocb_cur = NULL; break; } } ocb = sbp_dequeue_ocb(sdev, sbp_status); if (ocb == NULL) { device_printf(sdev->target->sbp->fd.dev, "%s:%s No ocb(%x) on the queue\n", __func__,sdev->bustgtlun, ntohl(sbp_status->orb_lo)); } break; case 2: /* unsolicit */ device_printf(sdev->target->sbp->fd.dev, "%s:%s unsolicit status received\n", __func__, sdev->bustgtlun); break; default: device_printf(sdev->target->sbp->fd.dev, "%s:%s unknown sbp_status->src\n", __func__, sdev->bustgtlun); } status_valid0 = (sbp_status->src < 2 && sbp_status->resp == ORB_RES_CMPL && sbp_status->dead == 0); status_valid = (status_valid0 && sbp_status->status == 0); if (!status_valid0 || debug > 2) { int status; SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s ORB status src:%x resp:%x dead:%x" " len:%x stat:%x orb:%x%08x\n", __func__, sdev->bustgtlun, sbp_status->src, sbp_status->resp, sbp_status->dead, sbp_status->len, sbp_status->status, ntohs(sbp_status->orb_hi), ntohl(sbp_status->orb_lo)); END_DEBUG device_printf(sdev->target->sbp->fd.dev, "%s\n", sdev->bustgtlun); status = sbp_status->status; switch (sbp_status->resp) { case 0: if (status > MAX_ORB_STATUS0) printf("%s\n", orb_status0[MAX_ORB_STATUS0]); else printf("%s\n", orb_status0[status]); break; case 1: printf("Obj: %s, Error: %s\n", orb_status1_object[(status >> 6) & 3], orb_status1_serial_bus_error[status & 0xf]); break; case 2: printf("Illegal request\n"); break; case 3: printf("Vendor dependent\n"); break; default: printf("unknown respose code %d\n", sbp_status->resp); } } /* we have to reset the fetch agent if it's dead */ if (sbp_status->dead) { if (sdev->path) { xpt_freeze_devq(sdev->path, 1); sdev->freeze++; } reset_agent = 1; } if (ocb == NULL) goto done; switch (ntohl(ocb->orb[4]) & ORB_FMT_MSK) { case ORB_FMT_NOP: break; case ORB_FMT_VED: break; case ORB_FMT_STD: switch (ocb->flags) { case OCB_ACT_MGM: orb_fun = ntohl(ocb->orb[4]) & ORB_FUN_MSK; reset_agent = 0; switch (orb_fun) { case ORB_FUN_LGI: fwdma_sync(&sdev->dma, BUS_DMASYNC_POSTREAD); login_res = sdev->login; login_res->len = ntohs(login_res->len); login_res->id = ntohs(login_res->id); login_res->cmd_hi = ntohs(login_res->cmd_hi); login_res->cmd_lo = ntohl(login_res->cmd_lo); if (status_valid) { SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s login: len %d, ID %d, cmd %08x%08x, recon_hold %d\n", __func__, sdev->bustgtlun, login_res->len, login_res->id, login_res->cmd_hi, login_res->cmd_lo, ntohs(login_res->recon_hold)); END_DEBUG sbp_busy_timeout(sdev); } else { /* forgot logout? */ device_printf(sdev->target->sbp->fd.dev, "%s:%s login failed\n", __func__, sdev->bustgtlun); sdev->status = SBP_DEV_RESET; } break; case ORB_FUN_RCN: login_res = sdev->login; if (status_valid) { SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s reconnect: len %d, ID %d, cmd %08x%08x\n", __func__, sdev->bustgtlun, login_res->len, login_res->id, login_res->cmd_hi, login_res->cmd_lo); END_DEBUG if (sdev->status == SBP_DEV_ATTACHED) sbp_scan_dev(sdev); else sbp_agent_reset(sdev); } else { /* reconnection hold time exceed? */ SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s reconnect failed\n", __func__, sdev->bustgtlun); END_DEBUG sbp_login(sdev); } break; case ORB_FUN_LGO: sdev->status = SBP_DEV_RESET; break; case ORB_FUN_RST: sbp_busy_timeout(sdev); break; case ORB_FUN_LUR: case ORB_FUN_ATA: case ORB_FUN_ATS: sbp_agent_reset(sdev); break; default: device_printf(sdev->target->sbp->fd.dev, "%s:%s unknown function %d\n", __func__, sdev->bustgtlun, orb_fun); break; } sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL); break; case OCB_ACT_CMD: sdev->timeout = 0; if (ocb->ccb != NULL) { union ccb *ccb; ccb = ocb->ccb; if (sbp_status->len > 1) { sbp_scsi_status(sbp_status, ocb); } else { if (sbp_status->resp != ORB_RES_CMPL) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; } else { ccb->ccb_h.status = CAM_REQ_CMP; } } /* fix up inq data */ if (ccb->csio.cdb_io.cdb_bytes[0] == INQUIRY) sbp_fix_inq_data(ocb); xpt_done(ccb); } break; default: break; } } if (!use_doorbell) sbp_free_ocb(sdev, ocb); done: if (reset_agent) sbp_agent_reset(sdev); done0: xfer->recv.pay_len = SBP_RECV_LEN; /* The received packet is usually small enough to be stored within * the buffer. In that case, the controller return ack_complete and * no respose is necessary. * * XXX fwohci.c and firewire.c should inform event_code such as * ack_complete or ack_pending to upper driver. */ #if NEED_RESPONSE xfer->send.off = 0; sfp = (struct fw_pkt *)xfer->send.buf; sfp->mode.wres.dst = rfp->mode.wreqb.src; xfer->dst = sfp->mode.wres.dst; xfer->spd = min(sdev->target->fwdev->speed, max_speed); xfer->hand = sbp_loginres_callback; sfp->mode.wres.tlrt = rfp->mode.wreqb.tlrt; sfp->mode.wres.tcode = FWTCODE_WRES; sfp->mode.wres.rtcode = 0; sfp->mode.wres.pri = 0; fw_asyreq(xfer->fc, -1, xfer); #else /* recycle */ STAILQ_INSERT_TAIL(&sbp->fwb.xferlist, xfer, link); #endif } static void sbp_recv(struct fw_xfer *xfer) { struct sbp_softc *sbp; sbp = (struct sbp_softc *)xfer->sc; SBP_LOCK(sbp); sbp_recv1(xfer); SBP_UNLOCK(sbp); } /* * sbp_attach() */ static int sbp_attach(device_t dev) { struct sbp_softc *sbp; struct cam_devq *devq; struct firewire_comm *fc; int i, error; if (DFLTPHYS > SBP_MAXPHYS) device_printf(dev, "Warning, DFLTPHYS(%dKB) is larger than " "SBP_MAXPHYS(%dKB).\n", DFLTPHYS / 1024, SBP_MAXPHYS / 1024); if (!firewire_phydma_enable) device_printf(dev, "Warning, hw.firewire.phydma_enable must be 1 " "for SBP over FireWire.\n"); SBP_DEBUG(0) printf("sbp_attach (cold=%d)\n", cold); END_DEBUG if (cold) sbp_cold++; sbp = device_get_softc(dev); sbp->fd.dev = dev; sbp->fd.fc = fc = device_get_ivars(dev); mtx_init(&sbp->mtx, "sbp", NULL, MTX_DEF); if (max_speed < 0) max_speed = fc->speed; error = bus_dma_tag_create(/*parent*/fc->dmat, /* XXX shoud be 4 for sane backend? */ /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/0x100000, /*nsegments*/SBP_IND_MAX, /*maxsegsz*/SBP_SEG_MAX, /*flags*/BUS_DMA_ALLOCNOW, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&sbp->mtx, &sbp->dmat); if (error != 0) { printf("sbp_attach: Could not allocate DMA tag " "- error %d\n", error); return (ENOMEM); } devq = cam_simq_alloc(/*maxopenings*/SBP_NUM_OCB); if (devq == NULL) return (ENXIO); for (i = 0; i < SBP_NUM_TARGETS; i++) { sbp->targets[i].fwdev = NULL; sbp->targets[i].luns = NULL; sbp->targets[i].sbp = sbp; } sbp->sim = cam_sim_alloc(sbp_action, sbp_poll, "sbp", sbp, device_get_unit(dev), &sbp->mtx, /*untagged*/ 1, /*tagged*/ SBP_QUEUE_LEN - 1, devq); if (sbp->sim == NULL) { cam_simq_free(devq); return (ENXIO); } SBP_LOCK(sbp); if (xpt_bus_register(sbp->sim, dev, /*bus*/0) != CAM_SUCCESS) goto fail; if (xpt_create_path(&sbp->path, NULL, cam_sim_path(sbp->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sbp->sim)); goto fail; } SBP_UNLOCK(sbp); /* We reserve 16 bit space (4 bytes X 64 targets X 256 luns) */ sbp->fwb.start = ((u_int64_t)SBP_BIND_HI << 32) | SBP_DEV2ADDR(0, 0); sbp->fwb.end = sbp->fwb.start + 0xffff; /* pre-allocate xfer */ STAILQ_INIT(&sbp->fwb.xferlist); fw_xferlist_add(&sbp->fwb.xferlist, M_SBP, /*send*/ 0, /*recv*/ SBP_RECV_LEN, SBP_NUM_OCB/2, fc, (void *)sbp, sbp_recv); fw_bindadd(fc, &sbp->fwb); sbp->fd.post_busreset = sbp_post_busreset; sbp->fd.post_explore = sbp_post_explore; if (fc->status != -1) { sbp_post_busreset(sbp); sbp_post_explore(sbp); } SBP_LOCK(sbp); xpt_async(AC_BUS_RESET, sbp->path, /*arg*/ NULL); SBP_UNLOCK(sbp); return (0); fail: SBP_UNLOCK(sbp); cam_sim_free(sbp->sim, /*free_devq*/TRUE); return (ENXIO); } static int sbp_logout_all(struct sbp_softc *sbp) { struct sbp_target *target; struct sbp_dev *sdev; int i, j; SBP_DEBUG(0) printf("sbp_logout_all\n"); END_DEBUG SBP_LOCK_ASSERT(sbp); for (i = 0; i < SBP_NUM_TARGETS; i++) { target = &sbp->targets[i]; if (target->luns == NULL) continue; for (j = 0; j < target->num_lun; j++) { sdev = target->luns[j]; if (sdev == NULL) continue; callout_stop(&sdev->login_callout); if (sdev->status >= SBP_DEV_TOATTACH && sdev->status <= SBP_DEV_ATTACHED) sbp_mgm_orb(sdev, ORB_FUN_LGO, NULL); } } return 0; } static int sbp_shutdown(device_t dev) { struct sbp_softc *sbp = ((struct sbp_softc *)device_get_softc(dev)); SBP_LOCK(sbp); sbp_logout_all(sbp); SBP_UNLOCK(sbp); return (0); } static void sbp_free_sdev(struct sbp_dev *sdev) { struct sbp_softc *sbp; int i; if (sdev == NULL) return; sbp = sdev->target->sbp; SBP_UNLOCK(sbp); callout_drain(&sdev->login_callout); for (i = 0; i < SBP_QUEUE_LEN; i++) { callout_drain(&sdev->ocb[i].timer); bus_dmamap_destroy(sbp->dmat, sdev->ocb[i].dmamap); } fwdma_free(sbp->fd.fc, &sdev->dma); free(sdev, M_SBP); SBP_LOCK(sbp); } static void sbp_free_target(struct sbp_target *target) { struct sbp_softc *sbp; struct fw_xfer *xfer, *next; int i; if (target->luns == NULL) return; sbp = target->sbp; SBP_LOCK_ASSERT(sbp); SBP_UNLOCK(sbp); callout_drain(&target->mgm_ocb_timeout); callout_drain(&target->scan_callout); SBP_LOCK(sbp); for (i = 0; i < target->num_lun; i++) sbp_free_sdev(target->luns[i]); STAILQ_FOREACH_SAFE(xfer, &target->xferlist, link, next) { fw_xfer_free_buf(xfer); } STAILQ_INIT(&target->xferlist); free(target->luns, M_SBP); target->num_lun = 0; target->luns = NULL; target->fwdev = NULL; } static int sbp_detach(device_t dev) { struct sbp_softc *sbp = ((struct sbp_softc *)device_get_softc(dev)); struct firewire_comm *fc = sbp->fd.fc; int i; SBP_DEBUG(0) printf("sbp_detach\n"); END_DEBUG SBP_LOCK(sbp); for (i = 0; i < SBP_NUM_TARGETS; i++) sbp_cam_detach_target(&sbp->targets[i]); xpt_async(AC_LOST_DEVICE, sbp->path, NULL); xpt_free_path(sbp->path); xpt_bus_deregister(cam_sim_path(sbp->sim)); cam_sim_free(sbp->sim, /*free_devq*/ TRUE); sbp_logout_all(sbp); SBP_UNLOCK(sbp); /* XXX wait for logout completion */ pause("sbpdtc", hz/2); SBP_LOCK(sbp); for (i = 0; i < SBP_NUM_TARGETS; i++) sbp_free_target(&sbp->targets[i]); SBP_UNLOCK(sbp); fw_bindremove(fc, &sbp->fwb); fw_xferlist_remove(&sbp->fwb.xferlist); bus_dma_tag_destroy(sbp->dmat); mtx_destroy(&sbp->mtx); return (0); } static void sbp_cam_detach_sdev(struct sbp_dev *sdev) { if (sdev == NULL) return; if (sdev->status == SBP_DEV_DEAD) return; if (sdev->status == SBP_DEV_RESET) return; SBP_LOCK_ASSERT(sdev->target->sbp); sbp_abort_all_ocbs(sdev, CAM_DEV_NOT_THERE); if (sdev->path) { xpt_release_devq(sdev->path, sdev->freeze, TRUE); sdev->freeze = 0; xpt_async(AC_LOST_DEVICE, sdev->path, NULL); xpt_free_path(sdev->path); sdev->path = NULL; } } static void sbp_cam_detach_target(struct sbp_target *target) { int i; SBP_LOCK_ASSERT(target->sbp); if (target->luns != NULL) { SBP_DEBUG(0) printf("sbp_detach_target %d\n", target->target_id); END_DEBUG callout_stop(&target->scan_callout); for (i = 0; i < target->num_lun; i++) sbp_cam_detach_sdev(target->luns[i]); } } static void sbp_target_reset(struct sbp_dev *sdev, int method) { int i; struct sbp_target *target = sdev->target; struct sbp_dev *tsdev; SBP_LOCK_ASSERT(target->sbp); for (i = 0; i < target->num_lun; i++) { tsdev = target->luns[i]; if (tsdev == NULL) continue; if (tsdev->status == SBP_DEV_DEAD) continue; if (tsdev->status == SBP_DEV_RESET) continue; xpt_freeze_devq(tsdev->path, 1); tsdev->freeze++; sbp_abort_all_ocbs(tsdev, CAM_CMD_TIMEOUT); if (method == 2) tsdev->status = SBP_DEV_LOGIN; } switch (method) { case 1: printf("target reset\n"); sbp_mgm_orb(sdev, ORB_FUN_RST, NULL); break; case 2: printf("reset start\n"); sbp_reset_start(sdev); break; } } static void sbp_mgm_timeout(void *arg) { struct sbp_ocb *ocb = (struct sbp_ocb *)arg; struct sbp_dev *sdev = ocb->sdev; struct sbp_target *target = sdev->target; SBP_LOCK_ASSERT(target->sbp); device_printf(sdev->target->sbp->fd.dev, "%s:%s request timeout(mgm orb:0x%08x)\n", __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr); target->mgm_ocb_cur = NULL; sbp_free_ocb(sdev, ocb); #if 0 /* XXX */ printf("run next request\n"); sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL); #endif device_printf(sdev->target->sbp->fd.dev, "%s:%s reset start\n", __func__, sdev->bustgtlun); sbp_reset_start(sdev); } static void sbp_timeout(void *arg) { struct sbp_ocb *ocb = (struct sbp_ocb *)arg; struct sbp_dev *sdev = ocb->sdev; device_printf(sdev->target->sbp->fd.dev, "%s:%s request timeout(cmd orb:0x%08x) ... ", __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr); SBP_LOCK_ASSERT(sdev->target->sbp); sdev->timeout++; switch (sdev->timeout) { case 1: printf("agent reset\n"); xpt_freeze_devq(sdev->path, 1); sdev->freeze++; sbp_abort_all_ocbs(sdev, CAM_CMD_TIMEOUT); sbp_agent_reset(sdev); break; case 2: case 3: sbp_target_reset(sdev, sdev->timeout - 1); break; #if 0 default: /* XXX give up */ sbp_cam_detach_target(target); if (target->luns != NULL) free(target->luns, M_SBP); target->num_lun = 0; target->luns = NULL; target->fwdev = NULL; #endif } } static void sbp_action(struct cam_sim *sim, union ccb *ccb) { struct sbp_softc *sbp = (struct sbp_softc *)sim->softc; struct sbp_target *target = NULL; struct sbp_dev *sdev = NULL; if (sbp != NULL) SBP_LOCK_ASSERT(sbp); /* target:lun -> sdev mapping */ if (sbp != NULL && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD && ccb->ccb_h.target_id < SBP_NUM_TARGETS) { target = &sbp->targets[ccb->ccb_h.target_id]; if (target->fwdev != NULL && ccb->ccb_h.target_lun != CAM_LUN_WILDCARD && ccb->ccb_h.target_lun < target->num_lun) { sdev = target->luns[ccb->ccb_h.target_lun]; if (sdev != NULL && sdev->status != SBP_DEV_ATTACHED && sdev->status != SBP_DEV_PROBE) sdev = NULL; } } SBP_DEBUG(1) if (sdev == NULL) printf("invalid target %d lun %jx\n", ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun); END_DEBUG switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: case XPT_RESET_DEV: case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: case XPT_CALC_GEOMETRY: if (sdev == NULL) { SBP_DEBUG(1) printf("%s:%d:%jx:func_code 0x%04x: " "Invalid target (target needed)\n", device_get_nameunit(sbp->fd.dev), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun, ccb->ccb_h.func_code); END_DEBUG ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } break; case XPT_PATH_INQ: case XPT_NOOP: /* The opcodes sometimes aimed at a target (sc is valid), * sometimes aimed at the SIM (sc is invalid and target is * CAM_TARGET_WILDCARD) */ if (sbp == NULL && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { SBP_DEBUG(0) printf("%s:%d:%jx func_code 0x%04x: " "Invalid target (no wildcard)\n", device_get_nameunit(sbp->fd.dev), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun, ccb->ccb_h.func_code); END_DEBUG ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } break; default: /* XXX Hm, we should check the input parameters */ break; } switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: { struct ccb_scsiio *csio; struct sbp_ocb *ocb; int speed; void *cdb; csio = &ccb->csio; mtx_assert(sim->mtx, MA_OWNED); SBP_DEBUG(2) printf("%s:%d:%jx XPT_SCSI_IO: " "cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x" ", flags: 0x%02x, " "%db cmd/%db data/%db sense\n", device_get_nameunit(sbp->fd.dev), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0], csio->cdb_io.cdb_bytes[1], csio->cdb_io.cdb_bytes[2], csio->cdb_io.cdb_bytes[3], csio->cdb_io.cdb_bytes[4], csio->cdb_io.cdb_bytes[5], csio->cdb_io.cdb_bytes[6], csio->cdb_io.cdb_bytes[7], csio->cdb_io.cdb_bytes[8], csio->cdb_io.cdb_bytes[9], ccb->ccb_h.flags & CAM_DIR_MASK, csio->cdb_len, csio->dxfer_len, csio->sense_len); END_DEBUG if (sdev == NULL) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } if (csio->cdb_len > sizeof(ocb->orb) - 5 * sizeof(uint32_t)) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } #if 0 /* if we are in probe stage, pass only probe commands */ if (sdev->status == SBP_DEV_PROBE) { char *name; name = xpt_path_periph(ccb->ccb_h.path)->periph_name; printf("probe stage, periph name: %s\n", name); if (strcmp(name, "probe") != 0) { ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } } #endif if ((ocb = sbp_get_ocb(sdev)) == NULL) { ccb->ccb_h.status = CAM_RESRC_UNAVAIL; if (sdev->freeze == 0) { xpt_freeze_devq(sdev->path, 1); sdev->freeze++; } xpt_done(ccb); return; } ocb->flags = OCB_ACT_CMD; ocb->sdev = sdev; ocb->ccb = ccb; ccb->ccb_h.ccb_sdev_ptr = sdev; ocb->orb[0] = htonl(1U << 31); ocb->orb[1] = 0; ocb->orb[2] = htonl(((sbp->fd.fc->nodeid | FWLOCALBUS) << 16)); ocb->orb[3] = htonl(ocb->bus_addr + IND_PTR_OFFSET); speed = min(target->fwdev->speed, max_speed); ocb->orb[4] = htonl(ORB_NOTIFY | ORB_CMD_SPD(speed) | ORB_CMD_MAXP(speed + 7)); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { ocb->orb[4] |= htonl(ORB_CMD_IN); } if (csio->ccb_h.flags & CAM_CDB_POINTER) cdb = (void *)csio->cdb_io.cdb_ptr; else cdb = (void *)&csio->cdb_io.cdb_bytes; bcopy(cdb, (void *)&ocb->orb[5], csio->cdb_len); /* printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[0]), ntohl(ocb->orb[1]), ntohl(ocb->orb[2]), ntohl(ocb->orb[3])); printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[4]), ntohl(ocb->orb[5]), ntohl(ocb->orb[6]), ntohl(ocb->orb[7])); */ if (ccb->csio.dxfer_len > 0) { int error; error = bus_dmamap_load_ccb(/*dma tag*/sbp->dmat, /*dma map*/ocb->dmamap, ccb, sbp_execute_ocb, ocb, /*flags*/0); if (error) printf("sbp: bus_dmamap_load error %d\n", error); } else sbp_execute_ocb(ocb, NULL, 0, 0); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; ccg = &ccb->ccg; if (ccg->block_size == 0) { printf("sbp_action: block_size is 0.\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } SBP_DEBUG(1) printf("%s:%d:%d:%jx:XPT_CALC_GEOMETRY: " "Volume size = %jd\n", device_get_nameunit(sbp->fd.dev), cam_sim_path(sbp->sim), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun, (uintmax_t)ccg->volume_size); END_DEBUG cam_calc_geometry(ccg, /*extended*/1); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { SBP_DEBUG(1) printf("%s:%d:XPT_RESET_BUS: \n", device_get_nameunit(sbp->fd.dev), cam_sim_path(sbp->sim)); END_DEBUG ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; SBP_DEBUG(1) printf("%s:%d:%jx XPT_PATH_INQ:.\n", device_get_nameunit(sbp->fd.dev), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun); END_DEBUG cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET | PIM_NO_6_BYTE; cpi->hba_eng_cnt = 0; cpi->max_target = SBP_NUM_TARGETS - 1; cpi->max_lun = SBP_NUM_LUNS - 1; cpi->initiator_id = SBP_INITIATOR; cpi->bus_id = sim->bus_id; cpi->base_transfer_speed = 400 * 1000 / 8; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "SBP", HBA_IDLEN); strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); cpi->unit_number = sim->unit_number; - cpi->transport = XPORT_SPI; /* XX should have a FireWire */ - cpi->transport_version = 2; - cpi->protocol = PROTO_SCSI; - cpi->protocol_version = SCSI_REV_2; + cpi->transport = XPORT_SPI; /* XX should have a FireWire */ + cpi->transport_version = 2; + cpi->protocol = PROTO_SCSI; + cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; /* should have a FireWire */ cts->transport_version = 2; spi->valid = CTS_SPI_VALID_DISC; spi->flags = CTS_SPI_FLAGS_DISC_ENB; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; SBP_DEBUG(1) printf("%s:%d:%jx XPT_GET_TRAN_SETTINGS:.\n", device_get_nameunit(sbp->fd.dev), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun); END_DEBUG cts->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_ABORT: ccb->ccb_h.status = CAM_UA_ABORT; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: /* XXX */ default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } return; } static void sbp_execute_ocb(void *arg, bus_dma_segment_t *segments, int seg, int error) { int i; struct sbp_ocb *ocb; struct sbp_ocb *prev; bus_dma_segment_t *s; if (error) printf("sbp_execute_ocb: error=%d\n", error); ocb = (struct sbp_ocb *)arg; SBP_DEBUG(2) printf("sbp_execute_ocb: seg %d", seg); for (i = 0; i < seg; i++) printf(", %jx:%jd", (uintmax_t)segments[i].ds_addr, (uintmax_t)segments[i].ds_len); printf("\n"); END_DEBUG if (seg == 1) { /* direct pointer */ s = &segments[0]; if (s->ds_len > SBP_SEG_MAX) panic("ds_len > SBP_SEG_MAX, fix busdma code"); ocb->orb[3] = htonl(s->ds_addr); ocb->orb[4] |= htonl(s->ds_len); } else if (seg > 1) { /* page table */ for (i = 0; i < seg; i++) { s = &segments[i]; SBP_DEBUG(0) /* XXX LSI Logic "< 16 byte" bug might be hit */ if (s->ds_len < 16) printf("sbp_execute_ocb: warning, " "segment length(%zd) is less than 16." "(seg=%d/%d)\n", (size_t)s->ds_len, i + 1, seg); END_DEBUG if (s->ds_len > SBP_SEG_MAX) panic("ds_len > SBP_SEG_MAX, fix busdma code"); ocb->ind_ptr[i].hi = htonl(s->ds_len << 16); ocb->ind_ptr[i].lo = htonl(s->ds_addr); } ocb->orb[4] |= htonl(ORB_CMD_PTBL | seg); } if (seg > 0) bus_dmamap_sync(ocb->sdev->target->sbp->dmat, ocb->dmamap, (ntohl(ocb->orb[4]) & ORB_CMD_IN) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); prev = sbp_enqueue_ocb(ocb->sdev, ocb); fwdma_sync(&ocb->sdev->dma, BUS_DMASYNC_PREWRITE); if (use_doorbell) { if (prev == NULL) { if (ocb->sdev->last_ocb != NULL) sbp_doorbell(ocb->sdev); else sbp_orb_pointer(ocb->sdev, ocb); } } else { if (prev == NULL || (ocb->sdev->flags & ORB_LINK_DEAD) != 0) { ocb->sdev->flags &= ~ORB_LINK_DEAD; sbp_orb_pointer(ocb->sdev, ocb); } } } static void sbp_poll(struct cam_sim *sim) { struct sbp_softc *sbp; struct firewire_comm *fc; sbp = (struct sbp_softc *)sim->softc; fc = sbp->fd.fc; fc->poll(fc, 0, -1); return; } static struct sbp_ocb * sbp_dequeue_ocb(struct sbp_dev *sdev, struct sbp_status *sbp_status) { struct sbp_ocb *ocb; struct sbp_ocb *next; int order = 0; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s 0x%08x src %d\n", __func__, sdev->bustgtlun, ntohl(sbp_status->orb_lo), sbp_status->src); END_DEBUG SBP_LOCK_ASSERT(sdev->target->sbp); STAILQ_FOREACH_SAFE(ocb, &sdev->ocbs, ocb, next) { if (OCB_MATCH(ocb, sbp_status)) { /* found */ STAILQ_REMOVE(&sdev->ocbs, ocb, sbp_ocb, ocb); if (ocb->ccb != NULL) callout_stop(&ocb->timer); if (ntohl(ocb->orb[4]) & 0xffff) { bus_dmamap_sync(sdev->target->sbp->dmat, ocb->dmamap, (ntohl(ocb->orb[4]) & ORB_CMD_IN) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sdev->target->sbp->dmat, ocb->dmamap); } if (!use_doorbell) { if (sbp_status->src == SRC_NO_NEXT) { if (next != NULL) sbp_orb_pointer(sdev, next); else if (order > 0) { /* * Unordered execution * We need to send pointer for * next ORB */ sdev->flags |= ORB_LINK_DEAD; } } } else { /* * XXX this is not correct for unordered * execution. */ if (sdev->last_ocb != NULL) { sbp_free_ocb(sdev, sdev->last_ocb); } sdev->last_ocb = ocb; if (next != NULL && sbp_status->src == SRC_NO_NEXT) sbp_doorbell(sdev); } break; } else order++; } SBP_DEBUG(0) if (ocb && order > 0) { device_printf(sdev->target->sbp->fd.dev, "%s:%s unordered execution order:%d\n", __func__, sdev->bustgtlun, order); } END_DEBUG return (ocb); } static struct sbp_ocb * sbp_enqueue_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb) { struct sbp_ocb *prev, *prev2; SBP_LOCK_ASSERT(sdev->target->sbp); SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s 0x%08jx\n", __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr); END_DEBUG prev2 = prev = STAILQ_LAST(&sdev->ocbs, sbp_ocb, ocb); STAILQ_INSERT_TAIL(&sdev->ocbs, ocb, ocb); if (ocb->ccb != NULL) { callout_reset_sbt(&ocb->timer, SBT_1MS * ocb->ccb->ccb_h.timeout, 0, sbp_timeout, ocb, 0); } if (use_doorbell && prev == NULL) prev2 = sdev->last_ocb; if (prev2 != NULL && (ocb->sdev->flags & ORB_LINK_DEAD) == 0) { SBP_DEBUG(1) printf("linking chain 0x%jx -> 0x%jx\n", (uintmax_t)prev2->bus_addr, (uintmax_t)ocb->bus_addr); END_DEBUG /* * Suppress compiler optimization so that orb[1] must be written first. * XXX We may need an explicit memory barrier for other architectures * other than i386/amd64. */ *(volatile uint32_t *)&prev2->orb[1] = htonl(ocb->bus_addr); *(volatile uint32_t *)&prev2->orb[0] = 0; } return prev; } static struct sbp_ocb * sbp_get_ocb(struct sbp_dev *sdev) { struct sbp_ocb *ocb; SBP_LOCK_ASSERT(sdev->target->sbp); ocb = STAILQ_FIRST(&sdev->free_ocbs); if (ocb == NULL) { sdev->flags |= ORB_SHORTAGE; printf("ocb shortage!!!\n"); return NULL; } STAILQ_REMOVE_HEAD(&sdev->free_ocbs, ocb); ocb->ccb = NULL; return (ocb); } static void sbp_free_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb) { ocb->flags = 0; ocb->ccb = NULL; SBP_LOCK_ASSERT(sdev->target->sbp); STAILQ_INSERT_TAIL(&sdev->free_ocbs, ocb, ocb); if ((sdev->flags & ORB_SHORTAGE) != 0) { int count; sdev->flags &= ~ORB_SHORTAGE; count = sdev->freeze; sdev->freeze = 0; xpt_release_devq(sdev->path, count, TRUE); } } static void sbp_abort_ocb(struct sbp_ocb *ocb, int status) { struct sbp_dev *sdev; sdev = ocb->sdev; SBP_LOCK_ASSERT(sdev->target->sbp); SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s 0x%jx\n", __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr); END_DEBUG SBP_DEBUG(1) if (ocb->ccb != NULL) sbp_print_scsi_cmd(ocb); END_DEBUG if (ntohl(ocb->orb[4]) & 0xffff) { bus_dmamap_sync(sdev->target->sbp->dmat, ocb->dmamap, (ntohl(ocb->orb[4]) & ORB_CMD_IN) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sdev->target->sbp->dmat, ocb->dmamap); } if (ocb->ccb != NULL) { callout_stop(&ocb->timer); ocb->ccb->ccb_h.status = status; xpt_done(ocb->ccb); } sbp_free_ocb(sdev, ocb); } static void sbp_abort_all_ocbs(struct sbp_dev *sdev, int status) { struct sbp_ocb *ocb, *next; STAILQ_HEAD(, sbp_ocb) temp; STAILQ_INIT(&temp); SBP_LOCK_ASSERT(sdev->target->sbp); STAILQ_CONCAT(&temp, &sdev->ocbs); STAILQ_INIT(&sdev->ocbs); STAILQ_FOREACH_SAFE(ocb, &temp, ocb, next) { sbp_abort_ocb(ocb, status); } if (sdev->last_ocb != NULL) { sbp_free_ocb(sdev, sdev->last_ocb); sdev->last_ocb = NULL; } } static devclass_t sbp_devclass; static device_method_t sbp_methods[] = { /* device interface */ DEVMETHOD(device_identify, sbp_identify), DEVMETHOD(device_probe, sbp_probe), DEVMETHOD(device_attach, sbp_attach), DEVMETHOD(device_detach, sbp_detach), DEVMETHOD(device_shutdown, sbp_shutdown), { 0, 0 } }; static driver_t sbp_driver = { "sbp", sbp_methods, sizeof(struct sbp_softc), }; DRIVER_MODULE(sbp, firewire, sbp_driver, sbp_devclass, 0, 0); MODULE_VERSION(sbp, 1); MODULE_DEPEND(sbp, firewire, 1, 1, 1); MODULE_DEPEND(sbp, cam, 1, 1, 1); Index: stable/11/sys/dev/mly/mly.c =================================================================== --- stable/11/sys/dev/mly/mly.c (revision 335137) +++ stable/11/sys/dev/mly/mly.c (revision 335138) @@ -1,3011 +1,3011 @@ /*- * Copyright (c) 2000, 2001 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int mly_probe(device_t dev); static int mly_attach(device_t dev); static int mly_pci_attach(struct mly_softc *sc); static int mly_detach(device_t dev); static int mly_shutdown(device_t dev); static void mly_intr(void *arg); static int mly_sg_map(struct mly_softc *sc); static void mly_sg_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int mly_mmbox_map(struct mly_softc *sc); static void mly_mmbox_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void mly_free(struct mly_softc *sc); static int mly_get_controllerinfo(struct mly_softc *sc); static void mly_scan_devices(struct mly_softc *sc); static void mly_rescan_btl(struct mly_softc *sc, int bus, int target); static void mly_complete_rescan(struct mly_command *mc); static int mly_get_eventstatus(struct mly_softc *sc); static int mly_enable_mmbox(struct mly_softc *sc); static int mly_flush(struct mly_softc *sc); static int mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data, size_t datasize, u_int8_t *status, void *sense_buffer, size_t *sense_length); static void mly_check_event(struct mly_softc *sc); static void mly_fetch_event(struct mly_softc *sc); static void mly_complete_event(struct mly_command *mc); static void mly_process_event(struct mly_softc *sc, struct mly_event *me); static void mly_periodic(void *data); static int mly_immediate_command(struct mly_command *mc); static int mly_start(struct mly_command *mc); static void mly_done(struct mly_softc *sc); static void mly_complete(struct mly_softc *sc); static void mly_complete_handler(void *context, int pending); static int mly_alloc_command(struct mly_softc *sc, struct mly_command **mcp); static void mly_release_command(struct mly_command *mc); static void mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int mly_alloc_commands(struct mly_softc *sc); static void mly_release_commands(struct mly_softc *sc); static void mly_map_command(struct mly_command *mc); static void mly_unmap_command(struct mly_command *mc); static int mly_cam_attach(struct mly_softc *sc); static void mly_cam_detach(struct mly_softc *sc); static void mly_cam_rescan_btl(struct mly_softc *sc, int bus, int target); static void mly_cam_action(struct cam_sim *sim, union ccb *ccb); static int mly_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio); static void mly_cam_poll(struct cam_sim *sim); static void mly_cam_complete(struct mly_command *mc); static struct cam_periph *mly_find_periph(struct mly_softc *sc, int bus, int target); static int mly_name_device(struct mly_softc *sc, int bus, int target); static int mly_fwhandshake(struct mly_softc *sc); static void mly_describe_controller(struct mly_softc *sc); #ifdef MLY_DEBUG static void mly_printstate(struct mly_softc *sc); static void mly_print_command(struct mly_command *mc); static void mly_print_packet(struct mly_command *mc); static void mly_panic(struct mly_softc *sc, char *reason); static void mly_timeout(void *arg); #endif void mly_print_controller(int controller); static d_open_t mly_user_open; static d_close_t mly_user_close; static d_ioctl_t mly_user_ioctl; static int mly_user_command(struct mly_softc *sc, struct mly_user_command *uc); static int mly_user_health(struct mly_softc *sc, struct mly_user_health *uh); #define MLY_CMD_TIMEOUT 20 static device_method_t mly_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mly_probe), DEVMETHOD(device_attach, mly_attach), DEVMETHOD(device_detach, mly_detach), DEVMETHOD(device_shutdown, mly_shutdown), { 0, 0 } }; static driver_t mly_pci_driver = { "mly", mly_methods, sizeof(struct mly_softc) }; static devclass_t mly_devclass; DRIVER_MODULE(mly, pci, mly_pci_driver, mly_devclass, 0, 0); MODULE_DEPEND(mly, pci, 1, 1, 1); MODULE_DEPEND(mly, cam, 1, 1, 1); static struct cdevsw mly_cdevsw = { .d_version = D_VERSION, .d_open = mly_user_open, .d_close = mly_user_close, .d_ioctl = mly_user_ioctl, .d_name = "mly", }; /******************************************************************************** ******************************************************************************** Device Interface ******************************************************************************** ********************************************************************************/ static struct mly_ident { u_int16_t vendor; u_int16_t device; u_int16_t subvendor; u_int16_t subdevice; int hwif; char *desc; } mly_identifiers[] = { {0x1069, 0xba56, 0x1069, 0x0040, MLY_HWIF_STRONGARM, "Mylex eXtremeRAID 2000"}, {0x1069, 0xba56, 0x1069, 0x0030, MLY_HWIF_STRONGARM, "Mylex eXtremeRAID 3000"}, {0x1069, 0x0050, 0x1069, 0x0050, MLY_HWIF_I960RX, "Mylex AcceleRAID 352"}, {0x1069, 0x0050, 0x1069, 0x0052, MLY_HWIF_I960RX, "Mylex AcceleRAID 170"}, {0x1069, 0x0050, 0x1069, 0x0054, MLY_HWIF_I960RX, "Mylex AcceleRAID 160"}, {0, 0, 0, 0, 0, 0} }; /******************************************************************************** * Compare the provided PCI device with the list we support. */ static int mly_probe(device_t dev) { struct mly_ident *m; debug_called(1); for (m = mly_identifiers; m->vendor != 0; m++) { if ((m->vendor == pci_get_vendor(dev)) && (m->device == pci_get_device(dev)) && ((m->subvendor == 0) || ((m->subvendor == pci_get_subvendor(dev)) && (m->subdevice == pci_get_subdevice(dev))))) { device_set_desc(dev, m->desc); return(BUS_PROBE_DEFAULT); /* allow room to be overridden */ } } return(ENXIO); } /******************************************************************************** * Initialise the controller and softc */ static int mly_attach(device_t dev) { struct mly_softc *sc = device_get_softc(dev); int error; debug_called(1); sc->mly_dev = dev; mtx_init(&sc->mly_lock, "mly", NULL, MTX_DEF); callout_init_mtx(&sc->mly_periodic, &sc->mly_lock, 0); #ifdef MLY_DEBUG callout_init_mtx(&sc->mly_timeout, &sc->mly_lock, 0); if (device_get_unit(sc->mly_dev) == 0) mly_softc0 = sc; #endif /* * Do PCI-specific initialisation. */ if ((error = mly_pci_attach(sc)) != 0) goto out; /* * Initialise per-controller queues. */ mly_initq_free(sc); mly_initq_busy(sc); mly_initq_complete(sc); /* * Initialise command-completion task. */ TASK_INIT(&sc->mly_task_complete, 0, mly_complete_handler, sc); /* disable interrupts before we start talking to the controller */ MLY_MASK_INTERRUPTS(sc); /* * Wait for the controller to come ready, handshake with the firmware if required. * This is typically only necessary on platforms where the controller BIOS does not * run. */ if ((error = mly_fwhandshake(sc))) goto out; /* * Allocate initial command buffers. */ if ((error = mly_alloc_commands(sc))) goto out; /* * Obtain controller feature information */ MLY_LOCK(sc); error = mly_get_controllerinfo(sc); MLY_UNLOCK(sc); if (error) goto out; /* * Reallocate command buffers now we know how many we want. */ mly_release_commands(sc); if ((error = mly_alloc_commands(sc))) goto out; /* * Get the current event counter for health purposes, populate the initial * health status buffer. */ MLY_LOCK(sc); error = mly_get_eventstatus(sc); /* * Enable memory-mailbox mode. */ if (error == 0) error = mly_enable_mmbox(sc); MLY_UNLOCK(sc); if (error) goto out; /* * Attach to CAM. */ if ((error = mly_cam_attach(sc))) goto out; /* * Print a little information about the controller */ mly_describe_controller(sc); /* * Mark all attached devices for rescan. */ MLY_LOCK(sc); mly_scan_devices(sc); /* * Instigate the first status poll immediately. Rescan completions won't * happen until interrupts are enabled, which should still be before * the SCSI subsystem gets to us, courtesy of the "SCSI settling delay". */ mly_periodic((void *)sc); MLY_UNLOCK(sc); /* * Create the control device. */ sc->mly_dev_t = make_dev(&mly_cdevsw, 0, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "mly%d", device_get_unit(sc->mly_dev)); sc->mly_dev_t->si_drv1 = sc; /* enable interrupts now */ MLY_UNMASK_INTERRUPTS(sc); #ifdef MLY_DEBUG callout_reset(&sc->mly_timeout, MLY_CMD_TIMEOUT * hz, mly_timeout, sc); #endif out: if (error != 0) mly_free(sc); return(error); } /******************************************************************************** * Perform PCI-specific initialisation. */ static int mly_pci_attach(struct mly_softc *sc) { int i, error; debug_called(1); /* assume failure is 'not configured' */ error = ENXIO; /* * Verify that the adapter is correctly set up in PCI space. */ pci_enable_busmaster(sc->mly_dev); /* * Allocate the PCI register window. */ sc->mly_regs_rid = PCIR_BAR(0); /* first base address register */ if ((sc->mly_regs_resource = bus_alloc_resource_any(sc->mly_dev, SYS_RES_MEMORY, &sc->mly_regs_rid, RF_ACTIVE)) == NULL) { mly_printf(sc, "can't allocate register window\n"); goto fail; } /* * Allocate and connect our interrupt. */ sc->mly_irq_rid = 0; if ((sc->mly_irq = bus_alloc_resource_any(sc->mly_dev, SYS_RES_IRQ, &sc->mly_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { mly_printf(sc, "can't allocate interrupt\n"); goto fail; } if (bus_setup_intr(sc->mly_dev, sc->mly_irq, INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE, NULL, mly_intr, sc, &sc->mly_intr)) { mly_printf(sc, "can't set up interrupt\n"); goto fail; } /* assume failure is 'out of memory' */ error = ENOMEM; /* * Allocate the parent bus DMA tag appropriate for our PCI interface. * * Note that all of these controllers are 64-bit capable. */ if (bus_dma_tag_create(bus_get_dma_tag(sc->mly_dev),/* PCI parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &sc->mly_parent_dmat)) { mly_printf(sc, "can't allocate parent DMA tag\n"); goto fail; } /* * Create DMA tag for mapping buffers into controller-addressable space. */ if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ DFLTPHYS, /* maxsize */ MLY_MAX_SGENTRIES, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->mly_lock, /* lockarg */ &sc->mly_buffer_dmat)) { mly_printf(sc, "can't allocate buffer DMA tag\n"); goto fail; } /* * Initialise the DMA tag for command packets. */ if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sizeof(union mly_command_packet) * MLY_MAX_COMMANDS, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mly_packet_dmat)) { mly_printf(sc, "can't allocate command packet DMA tag\n"); goto fail; } /* * Detect the hardware interface version */ for (i = 0; mly_identifiers[i].vendor != 0; i++) { if ((mly_identifiers[i].vendor == pci_get_vendor(sc->mly_dev)) && (mly_identifiers[i].device == pci_get_device(sc->mly_dev))) { sc->mly_hwif = mly_identifiers[i].hwif; switch(sc->mly_hwif) { case MLY_HWIF_I960RX: debug(1, "set hardware up for i960RX"); sc->mly_doorbell_true = 0x00; sc->mly_command_mailbox = MLY_I960RX_COMMAND_MAILBOX; sc->mly_status_mailbox = MLY_I960RX_STATUS_MAILBOX; sc->mly_idbr = MLY_I960RX_IDBR; sc->mly_odbr = MLY_I960RX_ODBR; sc->mly_error_status = MLY_I960RX_ERROR_STATUS; sc->mly_interrupt_status = MLY_I960RX_INTERRUPT_STATUS; sc->mly_interrupt_mask = MLY_I960RX_INTERRUPT_MASK; break; case MLY_HWIF_STRONGARM: debug(1, "set hardware up for StrongARM"); sc->mly_doorbell_true = 0xff; /* doorbell 'true' is 0 */ sc->mly_command_mailbox = MLY_STRONGARM_COMMAND_MAILBOX; sc->mly_status_mailbox = MLY_STRONGARM_STATUS_MAILBOX; sc->mly_idbr = MLY_STRONGARM_IDBR; sc->mly_odbr = MLY_STRONGARM_ODBR; sc->mly_error_status = MLY_STRONGARM_ERROR_STATUS; sc->mly_interrupt_status = MLY_STRONGARM_INTERRUPT_STATUS; sc->mly_interrupt_mask = MLY_STRONGARM_INTERRUPT_MASK; break; } break; } } /* * Create the scatter/gather mappings. */ if ((error = mly_sg_map(sc))) goto fail; /* * Allocate and map the memory mailbox */ if ((error = mly_mmbox_map(sc))) goto fail; error = 0; fail: return(error); } /******************************************************************************** * Shut the controller down and detach all our resources. */ static int mly_detach(device_t dev) { int error; if ((error = mly_shutdown(dev)) != 0) return(error); mly_free(device_get_softc(dev)); return(0); } /******************************************************************************** * Bring the controller to a state where it can be safely left alone. * * Note that it should not be necessary to wait for any outstanding commands, * as they should be completed prior to calling here. * * XXX this applies for I/O, but not status polls; we should beware of * the case where a status command is running while we detach. */ static int mly_shutdown(device_t dev) { struct mly_softc *sc = device_get_softc(dev); debug_called(1); MLY_LOCK(sc); if (sc->mly_state & MLY_STATE_OPEN) { MLY_UNLOCK(sc); return(EBUSY); } /* kill the periodic event */ callout_stop(&sc->mly_periodic); #ifdef MLY_DEBUG callout_stop(&sc->mly_timeout); #endif /* flush controller */ mly_printf(sc, "flushing cache..."); printf("%s\n", mly_flush(sc) ? "failed" : "done"); MLY_MASK_INTERRUPTS(sc); MLY_UNLOCK(sc); return(0); } /******************************************************************************* * Take an interrupt, or be poked by other code to look for interrupt-worthy * status. */ static void mly_intr(void *arg) { struct mly_softc *sc = (struct mly_softc *)arg; debug_called(2); MLY_LOCK(sc); mly_done(sc); MLY_UNLOCK(sc); }; /******************************************************************************** ******************************************************************************** Bus-dependant Resource Management ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Allocate memory for the scatter/gather tables */ static int mly_sg_map(struct mly_softc *sc) { size_t segsize; debug_called(1); /* * Create a single tag describing a region large enough to hold all of * the s/g lists we will need. */ segsize = sizeof(struct mly_sg_entry) * MLY_MAX_COMMANDS *MLY_MAX_SGENTRIES; if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */ 1, 0, /* alignment,boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ segsize, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mly_sg_dmat)) { mly_printf(sc, "can't allocate scatter/gather DMA tag\n"); return(ENOMEM); } /* * Allocate enough s/g maps for all commands and permanently map them into * controller-visible space. * * XXX this assumes we can get enough space for all the s/g maps in one * contiguous slab. */ if (bus_dmamem_alloc(sc->mly_sg_dmat, (void **)&sc->mly_sg_table, BUS_DMA_NOWAIT, &sc->mly_sg_dmamap)) { mly_printf(sc, "can't allocate s/g table\n"); return(ENOMEM); } if (bus_dmamap_load(sc->mly_sg_dmat, sc->mly_sg_dmamap, sc->mly_sg_table, segsize, mly_sg_map_helper, sc, BUS_DMA_NOWAIT) != 0) return (ENOMEM); return(0); } /******************************************************************************** * Save the physical address of the base of the s/g table. */ static void mly_sg_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mly_softc *sc = (struct mly_softc *)arg; debug_called(1); /* save base of s/g table's address in bus space */ sc->mly_sg_busaddr = segs->ds_addr; } /******************************************************************************** * Allocate memory for the memory-mailbox interface */ static int mly_mmbox_map(struct mly_softc *sc) { /* * Create a DMA tag for a single contiguous region large enough for the * memory mailbox structure. */ if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */ 1, 0, /* alignment,boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sizeof(struct mly_mmbox), 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mly_mmbox_dmat)) { mly_printf(sc, "can't allocate memory mailbox DMA tag\n"); return(ENOMEM); } /* * Allocate the buffer */ if (bus_dmamem_alloc(sc->mly_mmbox_dmat, (void **)&sc->mly_mmbox, BUS_DMA_NOWAIT, &sc->mly_mmbox_dmamap)) { mly_printf(sc, "can't allocate memory mailbox\n"); return(ENOMEM); } if (bus_dmamap_load(sc->mly_mmbox_dmat, sc->mly_mmbox_dmamap, sc->mly_mmbox, sizeof(struct mly_mmbox), mly_mmbox_map_helper, sc, BUS_DMA_NOWAIT) != 0) return (ENOMEM); bzero(sc->mly_mmbox, sizeof(*sc->mly_mmbox)); return(0); } /******************************************************************************** * Save the physical address of the memory mailbox */ static void mly_mmbox_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mly_softc *sc = (struct mly_softc *)arg; debug_called(1); sc->mly_mmbox_busaddr = segs->ds_addr; } /******************************************************************************** * Free all of the resources associated with (sc) * * Should not be called if the controller is active. */ static void mly_free(struct mly_softc *sc) { debug_called(1); /* Remove the management device */ destroy_dev(sc->mly_dev_t); if (sc->mly_intr) bus_teardown_intr(sc->mly_dev, sc->mly_irq, sc->mly_intr); callout_drain(&sc->mly_periodic); #ifdef MLY_DEBUG callout_drain(&sc->mly_timeout); #endif /* detach from CAM */ mly_cam_detach(sc); /* release command memory */ mly_release_commands(sc); /* throw away the controllerinfo structure */ if (sc->mly_controllerinfo != NULL) free(sc->mly_controllerinfo, M_DEVBUF); /* throw away the controllerparam structure */ if (sc->mly_controllerparam != NULL) free(sc->mly_controllerparam, M_DEVBUF); /* destroy data-transfer DMA tag */ if (sc->mly_buffer_dmat) bus_dma_tag_destroy(sc->mly_buffer_dmat); /* free and destroy DMA memory and tag for s/g lists */ if (sc->mly_sg_table) { bus_dmamap_unload(sc->mly_sg_dmat, sc->mly_sg_dmamap); bus_dmamem_free(sc->mly_sg_dmat, sc->mly_sg_table, sc->mly_sg_dmamap); } if (sc->mly_sg_dmat) bus_dma_tag_destroy(sc->mly_sg_dmat); /* free and destroy DMA memory and tag for memory mailbox */ if (sc->mly_mmbox) { bus_dmamap_unload(sc->mly_mmbox_dmat, sc->mly_mmbox_dmamap); bus_dmamem_free(sc->mly_mmbox_dmat, sc->mly_mmbox, sc->mly_mmbox_dmamap); } if (sc->mly_mmbox_dmat) bus_dma_tag_destroy(sc->mly_mmbox_dmat); /* disconnect the interrupt handler */ if (sc->mly_irq != NULL) bus_release_resource(sc->mly_dev, SYS_RES_IRQ, sc->mly_irq_rid, sc->mly_irq); /* destroy the parent DMA tag */ if (sc->mly_parent_dmat) bus_dma_tag_destroy(sc->mly_parent_dmat); /* release the register window mapping */ if (sc->mly_regs_resource != NULL) bus_release_resource(sc->mly_dev, SYS_RES_MEMORY, sc->mly_regs_rid, sc->mly_regs_resource); mtx_destroy(&sc->mly_lock); } /******************************************************************************** ******************************************************************************** Command Wrappers ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Fill in the mly_controllerinfo and mly_controllerparam fields in the softc. */ static int mly_get_controllerinfo(struct mly_softc *sc) { struct mly_command_ioctl mci; u_int8_t status; int error; debug_called(1); if (sc->mly_controllerinfo != NULL) free(sc->mly_controllerinfo, M_DEVBUF); /* build the getcontrollerinfo ioctl and send it */ bzero(&mci, sizeof(mci)); sc->mly_controllerinfo = NULL; mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO; if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerinfo, sizeof(*sc->mly_controllerinfo), &status, NULL, NULL))) return(error); if (status != 0) return(EIO); if (sc->mly_controllerparam != NULL) free(sc->mly_controllerparam, M_DEVBUF); /* build the getcontrollerparameter ioctl and send it */ bzero(&mci, sizeof(mci)); sc->mly_controllerparam = NULL; mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER; if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerparam, sizeof(*sc->mly_controllerparam), &status, NULL, NULL))) return(error); if (status != 0) return(EIO); return(0); } /******************************************************************************** * Schedule all possible devices for a rescan. * */ static void mly_scan_devices(struct mly_softc *sc) { int bus, target; debug_called(1); /* * Clear any previous BTL information. */ bzero(&sc->mly_btl, sizeof(sc->mly_btl)); /* * Mark all devices as requiring a rescan, and let the next * periodic scan collect them. */ for (bus = 0; bus < sc->mly_cam_channels; bus++) if (MLY_BUS_IS_VALID(sc, bus)) for (target = 0; target < MLY_MAX_TARGETS; target++) sc->mly_btl[bus][target].mb_flags = MLY_BTL_RESCAN; } /******************************************************************************** * Rescan a device, possibly as a consequence of getting an event which suggests * that it may have changed. * * If we suffer resource starvation, we can abandon the rescan as we'll be * retried. */ static void mly_rescan_btl(struct mly_softc *sc, int bus, int target) { struct mly_command *mc; struct mly_command_ioctl *mci; debug_called(1); /* check that this bus is valid */ if (!MLY_BUS_IS_VALID(sc, bus)) return; /* get a command */ if (mly_alloc_command(sc, &mc)) return; /* set up the data buffer */ if ((mc->mc_data = malloc(sizeof(union mly_devinfo), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { mly_release_command(mc); return; } mc->mc_flags |= MLY_CMD_DATAIN; mc->mc_complete = mly_complete_rescan; /* * Build the ioctl. */ mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl; mci->opcode = MDACMD_IOCTL; mci->addr.phys.controller = 0; mci->timeout.value = 30; mci->timeout.scale = MLY_TIMEOUT_SECONDS; if (MLY_BUS_IS_VIRTUAL(sc, bus)) { mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getlogdevinfovalid); mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID; mci->addr.log.logdev = MLY_LOGDEV_ID(sc, bus, target); debug(1, "logical device %d", mci->addr.log.logdev); } else { mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getphysdevinfovalid); mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID; mci->addr.phys.lun = 0; mci->addr.phys.target = target; mci->addr.phys.channel = bus; debug(1, "physical device %d:%d", mci->addr.phys.channel, mci->addr.phys.target); } /* * Dispatch the command. If we successfully send the command, clear the rescan * bit. */ if (mly_start(mc) != 0) { mly_release_command(mc); } else { sc->mly_btl[bus][target].mb_flags &= ~MLY_BTL_RESCAN; /* success */ } } /******************************************************************************** * Handle the completion of a rescan operation */ static void mly_complete_rescan(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; struct mly_ioctl_getlogdevinfovalid *ldi; struct mly_ioctl_getphysdevinfovalid *pdi; struct mly_command_ioctl *mci; struct mly_btl btl, *btlp; int bus, target, rescan; debug_called(1); /* * Recover the bus and target from the command. We need these even in * the case where we don't have a useful response. */ mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl; if (mci->sub_ioctl == MDACIOCTL_GETLOGDEVINFOVALID) { bus = MLY_LOGDEV_BUS(sc, mci->addr.log.logdev); target = MLY_LOGDEV_TARGET(sc, mci->addr.log.logdev); } else { bus = mci->addr.phys.channel; target = mci->addr.phys.target; } /* XXX validate bus/target? */ /* the default result is 'no device' */ bzero(&btl, sizeof(btl)); /* if the rescan completed OK, we have possibly-new BTL data */ if (mc->mc_status == 0) { if (mc->mc_length == sizeof(*ldi)) { ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data; if ((MLY_LOGDEV_BUS(sc, ldi->logical_device_number) != bus) || (MLY_LOGDEV_TARGET(sc, ldi->logical_device_number) != target)) { mly_printf(sc, "WARNING: BTL rescan for %d:%d returned data for %d:%d instead\n", bus, target, MLY_LOGDEV_BUS(sc, ldi->logical_device_number), MLY_LOGDEV_TARGET(sc, ldi->logical_device_number)); /* XXX what can we do about this? */ } btl.mb_flags = MLY_BTL_LOGICAL; btl.mb_type = ldi->raid_level; btl.mb_state = ldi->state; debug(1, "BTL rescan for %d returns %s, %s", ldi->logical_device_number, mly_describe_code(mly_table_device_type, ldi->raid_level), mly_describe_code(mly_table_device_state, ldi->state)); } else if (mc->mc_length == sizeof(*pdi)) { pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data; if ((pdi->channel != bus) || (pdi->target != target)) { mly_printf(sc, "WARNING: BTL rescan for %d:%d returned data for %d:%d instead\n", bus, target, pdi->channel, pdi->target); /* XXX what can we do about this? */ } btl.mb_flags = MLY_BTL_PHYSICAL; btl.mb_type = MLY_DEVICE_TYPE_PHYSICAL; btl.mb_state = pdi->state; btl.mb_speed = pdi->speed; btl.mb_width = pdi->width; if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED) sc->mly_btl[bus][target].mb_flags |= MLY_BTL_PROTECTED; debug(1, "BTL rescan for %d:%d returns %s", bus, target, mly_describe_code(mly_table_device_state, pdi->state)); } else { mly_printf(sc, "BTL rescan result invalid\n"); } } free(mc->mc_data, M_DEVBUF); mly_release_command(mc); /* * Decide whether we need to rescan the device. */ rescan = 0; /* device type changes (usually between 'nothing' and 'something') */ btlp = &sc->mly_btl[bus][target]; if (btl.mb_flags != btlp->mb_flags) { debug(1, "flags changed, rescanning"); rescan = 1; } /* XXX other reasons? */ /* * Update BTL information. */ *btlp = btl; /* * Perform CAM rescan if required. */ if (rescan) mly_cam_rescan_btl(sc, bus, target); } /******************************************************************************** * Get the current health status and set the 'next event' counter to suit. */ static int mly_get_eventstatus(struct mly_softc *sc) { struct mly_command_ioctl mci; struct mly_health_status *mh; u_int8_t status; int error; /* build the gethealthstatus ioctl and send it */ bzero(&mci, sizeof(mci)); mh = NULL; mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS; if ((error = mly_ioctl(sc, &mci, (void **)&mh, sizeof(*mh), &status, NULL, NULL))) return(error); if (status != 0) return(EIO); /* get the event counter */ sc->mly_event_change = mh->change_counter; sc->mly_event_waiting = mh->next_event; sc->mly_event_counter = mh->next_event; /* save the health status into the memory mailbox */ bcopy(mh, &sc->mly_mmbox->mmm_health.status, sizeof(*mh)); debug(1, "initial change counter %d, event counter %d", mh->change_counter, mh->next_event); free(mh, M_DEVBUF); return(0); } /******************************************************************************** * Enable the memory mailbox mode. */ static int mly_enable_mmbox(struct mly_softc *sc) { struct mly_command_ioctl mci; u_int8_t *sp, status; int error; debug_called(1); /* build the ioctl and send it */ bzero(&mci, sizeof(mci)); mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX; /* set buffer addresses */ mci.param.setmemorymailbox.command_mailbox_physaddr = sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command); mci.param.setmemorymailbox.status_mailbox_physaddr = sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status); mci.param.setmemorymailbox.health_buffer_physaddr = sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health); /* set buffer sizes - abuse of data_size field is revolting */ sp = (u_int8_t *)&mci.data_size; sp[0] = ((sizeof(union mly_command_packet) * MLY_MMBOX_COMMANDS) / 1024); sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) / 1024; mci.param.setmemorymailbox.health_buffer_size = sizeof(union mly_health_region) / 1024; debug(1, "memory mailbox at %p (0x%llx/%d 0x%llx/%d 0x%llx/%d", sc->mly_mmbox, mci.param.setmemorymailbox.command_mailbox_physaddr, sp[0], mci.param.setmemorymailbox.status_mailbox_physaddr, sp[1], mci.param.setmemorymailbox.health_buffer_physaddr, mci.param.setmemorymailbox.health_buffer_size); if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL))) return(error); if (status != 0) return(EIO); sc->mly_state |= MLY_STATE_MMBOX_ACTIVE; debug(1, "memory mailbox active"); return(0); } /******************************************************************************** * Flush all pending I/O from the controller. */ static int mly_flush(struct mly_softc *sc) { struct mly_command_ioctl mci; u_int8_t status; int error; debug_called(1); /* build the ioctl */ bzero(&mci, sizeof(mci)); mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA; mci.param.deviceoperation.operation_device = MLY_OPDEVICE_PHYSICAL_CONTROLLER; /* pass it off to the controller */ if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL))) return(error); return((status == 0) ? 0 : EIO); } /******************************************************************************** * Perform an ioctl command. * * If (data) is not NULL, the command requires data transfer. If (*data) is NULL * the command requires data transfer from the controller, and we will allocate * a buffer for it. If (*data) is not NULL, the command requires data transfer * to the controller. * * XXX passing in the whole ioctl structure is ugly. Better ideas? * * XXX we don't even try to handle the case where datasize > 4k. We should. */ static int mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data, size_t datasize, u_int8_t *status, void *sense_buffer, size_t *sense_length) { struct mly_command *mc; struct mly_command_ioctl *mci; int error; debug_called(1); MLY_ASSERT_LOCKED(sc); mc = NULL; if (mly_alloc_command(sc, &mc)) { error = ENOMEM; goto out; } /* copy the ioctl structure, but save some important fields and then fixup */ mci = &mc->mc_packet->ioctl; ioctl->sense_buffer_address = mci->sense_buffer_address; ioctl->maximum_sense_size = mci->maximum_sense_size; *mci = *ioctl; mci->opcode = MDACMD_IOCTL; mci->timeout.value = 30; mci->timeout.scale = MLY_TIMEOUT_SECONDS; /* handle the data buffer */ if (data != NULL) { if (*data == NULL) { /* allocate data buffer */ if ((mc->mc_data = malloc(datasize, M_DEVBUF, M_NOWAIT)) == NULL) { error = ENOMEM; goto out; } mc->mc_flags |= MLY_CMD_DATAIN; } else { mc->mc_data = *data; mc->mc_flags |= MLY_CMD_DATAOUT; } mc->mc_length = datasize; mc->mc_packet->generic.data_size = datasize; } /* run the command */ if ((error = mly_immediate_command(mc))) goto out; /* clean up and return any data */ *status = mc->mc_status; if ((mc->mc_sense > 0) && (sense_buffer != NULL)) { bcopy(mc->mc_packet, sense_buffer, mc->mc_sense); *sense_length = mc->mc_sense; goto out; } /* should we return a data pointer? */ if ((data != NULL) && (*data == NULL)) *data = mc->mc_data; /* command completed OK */ error = 0; out: if (mc != NULL) { /* do we need to free a data buffer we allocated? */ if (error && (mc->mc_data != NULL) && (*data == NULL)) free(mc->mc_data, M_DEVBUF); mly_release_command(mc); } return(error); } /******************************************************************************** * Check for event(s) outstanding in the controller. */ static void mly_check_event(struct mly_softc *sc) { /* * The controller may have updated the health status information, * so check for it here. Note that the counters are all in host memory, * so this check is very cheap. Also note that we depend on checking on * completion */ if (sc->mly_mmbox->mmm_health.status.change_counter != sc->mly_event_change) { sc->mly_event_change = sc->mly_mmbox->mmm_health.status.change_counter; debug(1, "event change %d, event status update, %d -> %d", sc->mly_event_change, sc->mly_event_waiting, sc->mly_mmbox->mmm_health.status.next_event); sc->mly_event_waiting = sc->mly_mmbox->mmm_health.status.next_event; /* wake up anyone that might be interested in this */ wakeup(&sc->mly_event_change); } if (sc->mly_event_counter != sc->mly_event_waiting) mly_fetch_event(sc); } /******************************************************************************** * Fetch one event from the controller. * * If we fail due to resource starvation, we'll be retried the next time a * command completes. */ static void mly_fetch_event(struct mly_softc *sc) { struct mly_command *mc; struct mly_command_ioctl *mci; int s; u_int32_t event; debug_called(1); /* get a command */ if (mly_alloc_command(sc, &mc)) return; /* set up the data buffer */ if ((mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { mly_release_command(mc); return; } mc->mc_length = sizeof(struct mly_event); mc->mc_flags |= MLY_CMD_DATAIN; mc->mc_complete = mly_complete_event; /* * Get an event number to fetch. It's possible that we've raced with another * context for the last event, in which case there will be no more events. */ s = splcam(); if (sc->mly_event_counter == sc->mly_event_waiting) { mly_release_command(mc); splx(s); return; } event = sc->mly_event_counter++; splx(s); /* * Build the ioctl. * * At this point we are committed to sending this request, as it * will be the only one constructed for this particular event number. */ mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl; mci->opcode = MDACMD_IOCTL; mci->data_size = sizeof(struct mly_event); mci->addr.phys.lun = (event >> 16) & 0xff; mci->addr.phys.target = (event >> 24) & 0xff; mci->addr.phys.channel = 0; mci->addr.phys.controller = 0; mci->timeout.value = 30; mci->timeout.scale = MLY_TIMEOUT_SECONDS; mci->sub_ioctl = MDACIOCTL_GETEVENT; mci->param.getevent.sequence_number_low = event & 0xffff; debug(1, "fetch event %u", event); /* * Submit the command. * * Note that failure of mly_start() will result in this event never being * fetched. */ if (mly_start(mc) != 0) { mly_printf(sc, "couldn't fetch event %u\n", event); mly_release_command(mc); } } /******************************************************************************** * Handle the completion of an event poll. */ static void mly_complete_event(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; struct mly_event *me = (struct mly_event *)mc->mc_data; debug_called(1); /* * If the event was successfully fetched, process it. */ if (mc->mc_status == SCSI_STATUS_OK) { mly_process_event(sc, me); free(me, M_DEVBUF); } mly_release_command(mc); /* * Check for another event. */ mly_check_event(sc); } /******************************************************************************** * Process a controller event. */ static void mly_process_event(struct mly_softc *sc, struct mly_event *me) { struct scsi_sense_data_fixed *ssd; char *fp, *tp; int bus, target, event, class, action; ssd = (struct scsi_sense_data_fixed *)&me->sense[0]; /* * Errors can be reported using vendor-unique sense data. In this case, the * event code will be 0x1c (Request sense data present), the sense key will * be 0x09 (vendor specific), the MSB of the ASC will be set, and the * actual event code will be a 16-bit value comprised of the ASCQ (low byte) * and low seven bits of the ASC (low seven bits of the high byte). */ if ((me->code == 0x1c) && ((ssd->flags & SSD_KEY) == SSD_KEY_Vendor_Specific) && (ssd->add_sense_code & 0x80)) { event = ((int)(ssd->add_sense_code & ~0x80) << 8) + ssd->add_sense_code_qual; } else { event = me->code; } /* look up event, get codes */ fp = mly_describe_code(mly_table_event, event); debug(1, "Event %d code 0x%x", me->sequence_number, me->code); /* quiet event? */ class = fp[0]; if (isupper(class) && bootverbose) class = tolower(class); /* get action code, text string */ action = fp[1]; tp = &fp[2]; /* * Print some information about the event. * * This code uses a table derived from the corresponding portion of the Linux * driver, and thus the parser is very similar. */ switch(class) { case 'p': /* error on physical device */ mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp); if (action == 'r') sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN; break; case 'l': /* error on logical unit */ case 'm': /* message about logical unit */ bus = MLY_LOGDEV_BUS(sc, me->lun); target = MLY_LOGDEV_TARGET(sc, me->lun); mly_name_device(sc, bus, target); mly_printf(sc, "logical device %d (%s) %s\n", me->lun, sc->mly_btl[bus][target].mb_name, tp); if (action == 'r') sc->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN; break; case 's': /* report of sense data */ if (((ssd->flags & SSD_KEY) == SSD_KEY_NO_SENSE) || (((ssd->flags & SSD_KEY) == SSD_KEY_NOT_READY) && (ssd->add_sense_code == 0x04) && ((ssd->add_sense_code_qual == 0x01) || (ssd->add_sense_code_qual == 0x02)))) break; /* ignore NO_SENSE or NOT_READY in one case */ mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp); mly_printf(sc, " sense key %d asc %02x ascq %02x\n", ssd->flags & SSD_KEY, ssd->add_sense_code, ssd->add_sense_code_qual); mly_printf(sc, " info %4D csi %4D\n", ssd->info, "", ssd->cmd_spec_info, ""); if (action == 'r') sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN; break; case 'e': mly_printf(sc, tp, me->target, me->lun); printf("\n"); break; case 'c': mly_printf(sc, "controller %s\n", tp); break; case '?': mly_printf(sc, "%s - %d\n", tp, me->code); break; default: /* probably a 'noisy' event being ignored */ break; } } /******************************************************************************** * Perform periodic activities. */ static void mly_periodic(void *data) { struct mly_softc *sc = (struct mly_softc *)data; int bus, target; debug_called(2); MLY_ASSERT_LOCKED(sc); /* * Scan devices. */ for (bus = 0; bus < sc->mly_cam_channels; bus++) { if (MLY_BUS_IS_VALID(sc, bus)) { for (target = 0; target < MLY_MAX_TARGETS; target++) { /* ignore the controller in this scan */ if (target == sc->mly_controllerparam->initiator_id) continue; /* perform device rescan? */ if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_RESCAN) mly_rescan_btl(sc, bus, target); } } } /* check for controller events */ mly_check_event(sc); /* reschedule ourselves */ callout_schedule(&sc->mly_periodic, MLY_PERIODIC_INTERVAL * hz); } /******************************************************************************** ******************************************************************************** Command Processing ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Run a command and wait for it to complete. * */ static int mly_immediate_command(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; int error; debug_called(1); MLY_ASSERT_LOCKED(sc); if ((error = mly_start(mc))) { return(error); } if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) { /* sleep on the command */ while(!(mc->mc_flags & MLY_CMD_COMPLETE)) { mtx_sleep(mc, &sc->mly_lock, PRIBIO, "mlywait", 0); } } else { /* spin and collect status while we do */ while(!(mc->mc_flags & MLY_CMD_COMPLETE)) { mly_done(mc->mc_sc); } } return(0); } /******************************************************************************** * Deliver a command to the controller. * * XXX it would be good to just queue commands that we can't submit immediately * and send them later, but we probably want a wrapper for that so that * we don't hang on a failed submission for an immediate command. */ static int mly_start(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; union mly_command_packet *pkt; debug_called(2); MLY_ASSERT_LOCKED(sc); /* * Set the command up for delivery to the controller. */ mly_map_command(mc); mc->mc_packet->generic.command_id = mc->mc_slot; #ifdef MLY_DEBUG mc->mc_timestamp = time_second; #endif /* * Do we have to use the hardware mailbox? */ if (!(sc->mly_state & MLY_STATE_MMBOX_ACTIVE)) { /* * Check to see if the controller is ready for us. */ if (MLY_IDBR_TRUE(sc, MLY_HM_CMDSENT)) { return(EBUSY); } mc->mc_flags |= MLY_CMD_BUSY; /* * It's ready, send the command. */ MLY_SET_MBOX(sc, sc->mly_command_mailbox, &mc->mc_packetphys); MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_CMDSENT); } else { /* use memory-mailbox mode */ pkt = &sc->mly_mmbox->mmm_command[sc->mly_mmbox_command_index]; /* check to see if the next index is free yet */ if (pkt->mmbox.flag != 0) { return(EBUSY); } mc->mc_flags |= MLY_CMD_BUSY; /* copy in new command */ bcopy(mc->mc_packet->mmbox.data, pkt->mmbox.data, sizeof(pkt->mmbox.data)); /* barrier to ensure completion of previous write before we write the flag */ bus_barrier(sc->mly_regs_resource, 0, 0, BUS_SPACE_BARRIER_WRITE); /* copy flag last */ pkt->mmbox.flag = mc->mc_packet->mmbox.flag; /* barrier to ensure completion of previous write before we notify the controller */ bus_barrier(sc->mly_regs_resource, 0, 0, BUS_SPACE_BARRIER_WRITE); /* signal controller, update index */ MLY_SET_REG(sc, sc->mly_idbr, MLY_AM_CMDSENT); sc->mly_mmbox_command_index = (sc->mly_mmbox_command_index + 1) % MLY_MMBOX_COMMANDS; } mly_enqueue_busy(mc); return(0); } /******************************************************************************** * Pick up command status from the controller, schedule a completion event */ static void mly_done(struct mly_softc *sc) { struct mly_command *mc; union mly_status_packet *sp; u_int16_t slot; int worked; MLY_ASSERT_LOCKED(sc); worked = 0; /* pick up hardware-mailbox commands */ if (MLY_ODBR_TRUE(sc, MLY_HM_STSREADY)) { slot = MLY_GET_REG2(sc, sc->mly_status_mailbox); if (slot < MLY_SLOT_MAX) { mc = &sc->mly_command[slot - MLY_SLOT_START]; mc->mc_status = MLY_GET_REG(sc, sc->mly_status_mailbox + 2); mc->mc_sense = MLY_GET_REG(sc, sc->mly_status_mailbox + 3); mc->mc_resid = MLY_GET_REG4(sc, sc->mly_status_mailbox + 4); mly_remove_busy(mc); mc->mc_flags &= ~MLY_CMD_BUSY; mly_enqueue_complete(mc); worked = 1; } else { /* slot 0xffff may mean "extremely bogus command" */ mly_printf(sc, "got HM completion for illegal slot %u\n", slot); } /* unconditionally acknowledge status */ MLY_SET_REG(sc, sc->mly_odbr, MLY_HM_STSREADY); MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK); } /* pick up memory-mailbox commands */ if (MLY_ODBR_TRUE(sc, MLY_AM_STSREADY)) { for (;;) { sp = &sc->mly_mmbox->mmm_status[sc->mly_mmbox_status_index]; /* check for more status */ if (sp->mmbox.flag == 0) break; /* get slot number */ slot = sp->status.command_id; if (slot < MLY_SLOT_MAX) { mc = &sc->mly_command[slot - MLY_SLOT_START]; mc->mc_status = sp->status.status; mc->mc_sense = sp->status.sense_length; mc->mc_resid = sp->status.residue; mly_remove_busy(mc); mc->mc_flags &= ~MLY_CMD_BUSY; mly_enqueue_complete(mc); worked = 1; } else { /* slot 0xffff may mean "extremely bogus command" */ mly_printf(sc, "got AM completion for illegal slot %u at %d\n", slot, sc->mly_mmbox_status_index); } /* clear and move to next index */ sp->mmbox.flag = 0; sc->mly_mmbox_status_index = (sc->mly_mmbox_status_index + 1) % MLY_MMBOX_STATUS; } /* acknowledge that we have collected status value(s) */ MLY_SET_REG(sc, sc->mly_odbr, MLY_AM_STSREADY); } if (worked) { if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) taskqueue_enqueue(taskqueue_thread, &sc->mly_task_complete); else mly_complete(sc); } } /******************************************************************************** * Process completed commands */ static void mly_complete_handler(void *context, int pending) { struct mly_softc *sc = (struct mly_softc *)context; MLY_LOCK(sc); mly_complete(sc); MLY_UNLOCK(sc); } static void mly_complete(struct mly_softc *sc) { struct mly_command *mc; void (* mc_complete)(struct mly_command *mc); debug_called(2); /* * Spin pulling commands off the completed queue and processing them. */ while ((mc = mly_dequeue_complete(sc)) != NULL) { /* * Free controller resources, mark command complete. * * Note that as soon as we mark the command complete, it may be freed * out from under us, so we need to save the mc_complete field in * order to later avoid dereferencing mc. (We would not expect to * have a polling/sleeping consumer with mc_complete != NULL). */ mly_unmap_command(mc); mc_complete = mc->mc_complete; mc->mc_flags |= MLY_CMD_COMPLETE; /* * Call completion handler or wake up sleeping consumer. */ if (mc_complete != NULL) { mc_complete(mc); } else { wakeup(mc); } } /* * XXX if we are deferring commands due to controller-busy status, we should * retry submitting them here. */ } /******************************************************************************** ******************************************************************************** Command Buffer Management ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Allocate a command. */ static int mly_alloc_command(struct mly_softc *sc, struct mly_command **mcp) { struct mly_command *mc; debug_called(3); if ((mc = mly_dequeue_free(sc)) == NULL) return(ENOMEM); *mcp = mc; return(0); } /******************************************************************************** * Release a command back to the freelist. */ static void mly_release_command(struct mly_command *mc) { debug_called(3); /* * Fill in parts of the command that may cause confusion if * a consumer doesn't when we are later allocated. */ mc->mc_data = NULL; mc->mc_flags = 0; mc->mc_complete = NULL; mc->mc_private = NULL; /* * By default, we set up to overwrite the command packet with * sense information. */ mc->mc_packet->generic.sense_buffer_address = mc->mc_packetphys; mc->mc_packet->generic.maximum_sense_size = sizeof(union mly_command_packet); mly_enqueue_free(mc); } /******************************************************************************** * Map helper for command allocation. */ static void mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mly_softc *sc = (struct mly_softc *)arg; debug_called(1); sc->mly_packetphys = segs[0].ds_addr; } /******************************************************************************** * Allocate and initialise command and packet structures. * * If the controller supports fewer than MLY_MAX_COMMANDS commands, limit our * allocation to that number. If we don't yet know how many commands the * controller supports, allocate a very small set (suitable for initialisation * purposes only). */ static int mly_alloc_commands(struct mly_softc *sc) { struct mly_command *mc; int i, ncmd; if (sc->mly_controllerinfo == NULL) { ncmd = 4; } else { ncmd = min(MLY_MAX_COMMANDS, sc->mly_controllerinfo->maximum_parallel_commands); } /* * Allocate enough space for all the command packets in one chunk and * map them permanently into controller-visible space. */ if (bus_dmamem_alloc(sc->mly_packet_dmat, (void **)&sc->mly_packet, BUS_DMA_NOWAIT, &sc->mly_packetmap)) { return(ENOMEM); } if (bus_dmamap_load(sc->mly_packet_dmat, sc->mly_packetmap, sc->mly_packet, ncmd * sizeof(union mly_command_packet), mly_alloc_commands_map, sc, BUS_DMA_NOWAIT) != 0) return (ENOMEM); for (i = 0; i < ncmd; i++) { mc = &sc->mly_command[i]; bzero(mc, sizeof(*mc)); mc->mc_sc = sc; mc->mc_slot = MLY_SLOT_START + i; mc->mc_packet = sc->mly_packet + i; mc->mc_packetphys = sc->mly_packetphys + (i * sizeof(union mly_command_packet)); if (!bus_dmamap_create(sc->mly_buffer_dmat, 0, &mc->mc_datamap)) mly_release_command(mc); } return(0); } /******************************************************************************** * Free all the storage held by commands. * * Must be called with all commands on the free list. */ static void mly_release_commands(struct mly_softc *sc) { struct mly_command *mc; /* throw away command buffer DMA maps */ while (mly_alloc_command(sc, &mc) == 0) bus_dmamap_destroy(sc->mly_buffer_dmat, mc->mc_datamap); /* release the packet storage */ if (sc->mly_packet != NULL) { bus_dmamap_unload(sc->mly_packet_dmat, sc->mly_packetmap); bus_dmamem_free(sc->mly_packet_dmat, sc->mly_packet, sc->mly_packetmap); sc->mly_packet = NULL; } } /******************************************************************************** * Command-mapping helper function - populate this command's s/g table * with the s/g entries for its data. */ static void mly_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mly_command *mc = (struct mly_command *)arg; struct mly_softc *sc = mc->mc_sc; struct mly_command_generic *gen = &(mc->mc_packet->generic); struct mly_sg_entry *sg; int i, tabofs; debug_called(2); /* can we use the transfer structure directly? */ if (nseg <= 2) { sg = &gen->transfer.direct.sg[0]; gen->command_control.extended_sg_table = 0; } else { tabofs = ((mc->mc_slot - MLY_SLOT_START) * MLY_MAX_SGENTRIES); sg = sc->mly_sg_table + tabofs; gen->transfer.indirect.entries[0] = nseg; gen->transfer.indirect.table_physaddr[0] = sc->mly_sg_busaddr + (tabofs * sizeof(struct mly_sg_entry)); gen->command_control.extended_sg_table = 1; } /* copy the s/g table */ for (i = 0; i < nseg; i++) { sg[i].physaddr = segs[i].ds_addr; sg[i].length = segs[i].ds_len; } } #if 0 /******************************************************************************** * Command-mapping helper function - save the cdb's physical address. * * We don't support 'large' SCSI commands at this time, so this is unused. */ static void mly_map_command_cdb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mly_command *mc = (struct mly_command *)arg; debug_called(2); /* XXX can we safely assume that a CDB will never cross a page boundary? */ if ((segs[0].ds_addr % PAGE_SIZE) > ((segs[0].ds_addr + mc->mc_packet->scsi_large.cdb_length) % PAGE_SIZE)) panic("cdb crosses page boundary"); /* fix up fields in the command packet */ mc->mc_packet->scsi_large.cdb_physaddr = segs[0].ds_addr; } #endif /******************************************************************************** * Map a command into controller-visible space */ static void mly_map_command(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; debug_called(2); /* don't map more than once */ if (mc->mc_flags & MLY_CMD_MAPPED) return; /* does the command have a data buffer? */ if (mc->mc_data != NULL) { if (mc->mc_flags & MLY_CMD_CCB) bus_dmamap_load_ccb(sc->mly_buffer_dmat, mc->mc_datamap, mc->mc_data, mly_map_command_sg, mc, 0); else bus_dmamap_load(sc->mly_buffer_dmat, mc->mc_datamap, mc->mc_data, mc->mc_length, mly_map_command_sg, mc, 0); if (mc->mc_flags & MLY_CMD_DATAIN) bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREREAD); if (mc->mc_flags & MLY_CMD_DATAOUT) bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREWRITE); } mc->mc_flags |= MLY_CMD_MAPPED; } /******************************************************************************** * Unmap a command from controller-visible space */ static void mly_unmap_command(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; debug_called(2); if (!(mc->mc_flags & MLY_CMD_MAPPED)) return; /* does the command have a data buffer? */ if (mc->mc_data != NULL) { if (mc->mc_flags & MLY_CMD_DATAIN) bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTREAD); if (mc->mc_flags & MLY_CMD_DATAOUT) bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mly_buffer_dmat, mc->mc_datamap); } mc->mc_flags &= ~MLY_CMD_MAPPED; } /******************************************************************************** ******************************************************************************** CAM interface ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Attach the physical and virtual SCSI busses to CAM. * * Physical bus numbering starts from 0, virtual bus numbering from one greater * than the highest physical bus. Physical busses are only registered if * the kernel environment variable "hw.mly.register_physical_channels" is set. * * When we refer to a "bus", we are referring to the bus number registered with * the SIM, whereas a "channel" is a channel number given to the adapter. In order * to keep things simple, we map these 1:1, so "bus" and "channel" may be used * interchangeably. */ static int mly_cam_attach(struct mly_softc *sc) { struct cam_devq *devq; int chn, i; debug_called(1); /* * Allocate a devq for all our channels combined. */ if ((devq = cam_simq_alloc(sc->mly_controllerinfo->maximum_parallel_commands)) == NULL) { mly_printf(sc, "can't allocate CAM SIM queue\n"); return(ENOMEM); } /* * If physical channel registration has been requested, register these first. * Note that we enable tagged command queueing for physical channels. */ if (testenv("hw.mly.register_physical_channels")) { chn = 0; for (i = 0; i < sc->mly_controllerinfo->physical_channels_present; i++, chn++) { if ((sc->mly_cam_sim[chn] = cam_sim_alloc(mly_cam_action, mly_cam_poll, "mly", sc, device_get_unit(sc->mly_dev), &sc->mly_lock, sc->mly_controllerinfo->maximum_parallel_commands, 1, devq)) == NULL) { return(ENOMEM); } MLY_LOCK(sc); if (xpt_bus_register(sc->mly_cam_sim[chn], sc->mly_dev, chn)) { MLY_UNLOCK(sc); mly_printf(sc, "CAM XPT phsyical channel registration failed\n"); return(ENXIO); } MLY_UNLOCK(sc); debug(1, "registered physical channel %d", chn); } } /* * Register our virtual channels, with bus numbers matching channel numbers. */ chn = sc->mly_controllerinfo->physical_channels_present; for (i = 0; i < sc->mly_controllerinfo->virtual_channels_present; i++, chn++) { if ((sc->mly_cam_sim[chn] = cam_sim_alloc(mly_cam_action, mly_cam_poll, "mly", sc, device_get_unit(sc->mly_dev), &sc->mly_lock, sc->mly_controllerinfo->maximum_parallel_commands, 0, devq)) == NULL) { return(ENOMEM); } MLY_LOCK(sc); if (xpt_bus_register(sc->mly_cam_sim[chn], sc->mly_dev, chn)) { MLY_UNLOCK(sc); mly_printf(sc, "CAM XPT virtual channel registration failed\n"); return(ENXIO); } MLY_UNLOCK(sc); debug(1, "registered virtual channel %d", chn); } /* * This is the total number of channels that (might have been) registered with * CAM. Some may not have been; check the mly_cam_sim array to be certain. */ sc->mly_cam_channels = sc->mly_controllerinfo->physical_channels_present + sc->mly_controllerinfo->virtual_channels_present; return(0); } /******************************************************************************** * Detach from CAM */ static void mly_cam_detach(struct mly_softc *sc) { int i; debug_called(1); MLY_LOCK(sc); for (i = 0; i < sc->mly_cam_channels; i++) { if (sc->mly_cam_sim[i] != NULL) { xpt_bus_deregister(cam_sim_path(sc->mly_cam_sim[i])); cam_sim_free(sc->mly_cam_sim[i], 0); } } MLY_UNLOCK(sc); if (sc->mly_cam_devq != NULL) cam_simq_free(sc->mly_cam_devq); } /************************************************************************ * Rescan a device. */ static void mly_cam_rescan_btl(struct mly_softc *sc, int bus, int target) { union ccb *ccb; debug_called(1); if ((ccb = xpt_alloc_ccb()) == NULL) { mly_printf(sc, "rescan failed (can't allocate CCB)\n"); return; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sc->mly_cam_sim[bus]), target, 0) != CAM_REQ_CMP) { mly_printf(sc, "rescan failed (can't create path)\n"); xpt_free_ccb(ccb); return; } debug(1, "rescan target %d:%d", bus, target); xpt_rescan(ccb); } /******************************************************************************** * Handle an action requested by CAM */ static void mly_cam_action(struct cam_sim *sim, union ccb *ccb) { struct mly_softc *sc = cam_sim_softc(sim); debug_called(2); MLY_ASSERT_LOCKED(sc); switch (ccb->ccb_h.func_code) { /* perform SCSI I/O */ case XPT_SCSI_IO: if (!mly_cam_action_io(sim, (struct ccb_scsiio *)&ccb->csio)) return; break; /* perform geometry calculations */ case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg = &ccb->ccg; u_int32_t secs_per_cylinder; debug(2, "XPT_CALC_GEOMETRY %d:%d:%d", cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun); if (sc->mly_controllerparam->bios_geometry == MLY_BIOSGEOM_8G) { ccg->heads = 255; ccg->secs_per_track = 63; } else { /* MLY_BIOSGEOM_2G */ ccg->heads = 128; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; break; } /* handle path attribute inquiry */ case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; debug(2, "XPT_PATH_INQ %d:%d:%d", cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun); cpi->version_num = 1; cpi->hba_inquiry = PI_TAG_ABLE; /* XXX extra flags for physical channels? */ cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->max_target = MLY_MAX_TARGETS - 1; cpi->max_lun = MLY_MAX_LUNS - 1; cpi->initiator_id = sc->mly_controllerparam->initiator_id; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); - strlcpy(cpi->hba_vid, "Mylex", HBA_IDLEN); - strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); - cpi->unit_number = cam_sim_unit(sim); - cpi->bus_id = cam_sim_bus(sim); + strlcpy(cpi->hba_vid, "Mylex", HBA_IDLEN); + strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); + cpi->unit_number = cam_sim_unit(sim); + cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 132 * 1024; /* XXX what to set this to? */ cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; int bus, target; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; scsi->flags = 0; scsi->valid = 0; spi->flags = 0; spi->valid = 0; bus = cam_sim_bus(sim); target = cts->ccb_h.target_id; debug(2, "XPT_GET_TRAN_SETTINGS %d:%d", bus, target); /* logical device? */ if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_LOGICAL) { /* nothing special for these */ /* physical device? */ } else if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_PHYSICAL) { /* allow CAM to try tagged transactions */ scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; scsi->valid |= CTS_SCSI_VALID_TQ; /* convert speed (MHz) to usec */ if (sc->mly_btl[bus][target].mb_speed == 0) { spi->sync_period = 1000000 / 5; } else { spi->sync_period = 1000000 / sc->mly_btl[bus][target].mb_speed; } /* convert bus width to CAM internal encoding */ switch (sc->mly_btl[bus][target].mb_width) { case 32: spi->bus_width = MSG_EXT_WDTR_BUS_32_BIT; break; case 16: spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; case 8: default: spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; } spi->valid |= CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_BUS_WIDTH; /* not a device, bail out */ } else { cts->ccb_h.status = CAM_REQ_CMP_ERR; break; } /* disconnect always OK */ spi->flags |= CTS_SPI_FLAGS_DISC_ENB; spi->valid |= CTS_SPI_VALID_DISC; cts->ccb_h.status = CAM_REQ_CMP; break; } default: /* we can't do this */ debug(2, "unspported func_code = 0x%x", ccb->ccb_h.func_code); ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } /******************************************************************************** * Handle an I/O operation requested by CAM */ static int mly_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio) { struct mly_softc *sc = cam_sim_softc(sim); struct mly_command *mc; struct mly_command_scsi_small *ss; int bus, target; int error; bus = cam_sim_bus(sim); target = csio->ccb_h.target_id; debug(2, "XPT_SCSI_IO %d:%d:%d", bus, target, csio->ccb_h.target_lun); /* validate bus number */ if (!MLY_BUS_IS_VALID(sc, bus)) { debug(0, " invalid bus %d", bus); csio->ccb_h.status = CAM_REQ_CMP_ERR; } /* check for I/O attempt to a protected device */ if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_PROTECTED) { debug(2, " device protected"); csio->ccb_h.status = CAM_REQ_CMP_ERR; } /* check for I/O attempt to nonexistent device */ if (!(sc->mly_btl[bus][target].mb_flags & (MLY_BTL_LOGICAL | MLY_BTL_PHYSICAL))) { debug(2, " device %d:%d does not exist", bus, target); csio->ccb_h.status = CAM_REQ_CMP_ERR; } /* XXX increase if/when we support large SCSI commands */ if (csio->cdb_len > MLY_CMD_SCSI_SMALL_CDB) { debug(0, " command too large (%d > %d)", csio->cdb_len, MLY_CMD_SCSI_SMALL_CDB); csio->ccb_h.status = CAM_REQ_CMP_ERR; } /* check that the CDB pointer is not to a physical address */ if ((csio->ccb_h.flags & CAM_CDB_POINTER) && (csio->ccb_h.flags & CAM_CDB_PHYS)) { debug(0, " CDB pointer is to physical address"); csio->ccb_h.status = CAM_REQ_CMP_ERR; } /* abandon aborted ccbs or those that have failed validation */ if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { debug(2, "abandoning CCB due to abort/validation failure"); return(EINVAL); } /* * Get a command, or push the ccb back to CAM and freeze the queue. */ if ((error = mly_alloc_command(sc, &mc))) { xpt_freeze_simq(sim, 1); csio->ccb_h.status |= CAM_REQUEUE_REQ; sc->mly_qfrzn_cnt++; return(error); } /* build the command */ mc->mc_data = csio; mc->mc_length = csio->dxfer_len; mc->mc_complete = mly_cam_complete; mc->mc_private = csio; mc->mc_flags |= MLY_CMD_CCB; /* XXX This code doesn't set the data direction in mc_flags. */ /* save the bus number in the ccb for later recovery XXX should be a better way */ csio->ccb_h.sim_priv.entries[0].field = bus; /* build the packet for the controller */ ss = &mc->mc_packet->scsi_small; ss->opcode = MDACMD_SCSI; if (csio->ccb_h.flags & CAM_DIS_DISCONNECT) ss->command_control.disable_disconnect = 1; if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) ss->command_control.data_direction = MLY_CCB_WRITE; ss->data_size = csio->dxfer_len; ss->addr.phys.lun = csio->ccb_h.target_lun; ss->addr.phys.target = csio->ccb_h.target_id; ss->addr.phys.channel = bus; if (csio->ccb_h.timeout < (60 * 1000)) { ss->timeout.value = csio->ccb_h.timeout / 1000; ss->timeout.scale = MLY_TIMEOUT_SECONDS; } else if (csio->ccb_h.timeout < (60 * 60 * 1000)) { ss->timeout.value = csio->ccb_h.timeout / (60 * 1000); ss->timeout.scale = MLY_TIMEOUT_MINUTES; } else { ss->timeout.value = csio->ccb_h.timeout / (60 * 60 * 1000); /* overflow? */ ss->timeout.scale = MLY_TIMEOUT_HOURS; } ss->maximum_sense_size = csio->sense_len; ss->cdb_length = csio->cdb_len; if (csio->ccb_h.flags & CAM_CDB_POINTER) { bcopy(csio->cdb_io.cdb_ptr, ss->cdb, csio->cdb_len); } else { bcopy(csio->cdb_io.cdb_bytes, ss->cdb, csio->cdb_len); } /* give the command to the controller */ if ((error = mly_start(mc))) { xpt_freeze_simq(sim, 1); csio->ccb_h.status |= CAM_REQUEUE_REQ; sc->mly_qfrzn_cnt++; return(error); } return(0); } /******************************************************************************** * Check for possibly-completed commands. */ static void mly_cam_poll(struct cam_sim *sim) { struct mly_softc *sc = cam_sim_softc(sim); debug_called(2); mly_done(sc); } /******************************************************************************** * Handle completion of a command - pass results back through the CCB */ static void mly_cam_complete(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; struct ccb_scsiio *csio = (struct ccb_scsiio *)mc->mc_private; struct scsi_inquiry_data *inq = (struct scsi_inquiry_data *)csio->data_ptr; struct mly_btl *btl; u_int8_t cmd; int bus, target; debug_called(2); csio->scsi_status = mc->mc_status; switch(mc->mc_status) { case SCSI_STATUS_OK: /* * In order to report logical device type and status, we overwrite * the result of the INQUIRY command to logical devices. */ bus = csio->ccb_h.sim_priv.entries[0].field; target = csio->ccb_h.target_id; /* XXX validate bus/target? */ if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_LOGICAL) { if (csio->ccb_h.flags & CAM_CDB_POINTER) { cmd = *csio->cdb_io.cdb_ptr; } else { cmd = csio->cdb_io.cdb_bytes[0]; } if (cmd == INQUIRY) { btl = &sc->mly_btl[bus][target]; padstr(inq->vendor, mly_describe_code(mly_table_device_type, btl->mb_type), 8); padstr(inq->product, mly_describe_code(mly_table_device_state, btl->mb_state), 16); padstr(inq->revision, "", 4); } } debug(2, "SCSI_STATUS_OK"); csio->ccb_h.status = CAM_REQ_CMP; break; case SCSI_STATUS_CHECK_COND: debug(1, "SCSI_STATUS_CHECK_COND sense %d resid %d", mc->mc_sense, mc->mc_resid); csio->ccb_h.status = CAM_SCSI_STATUS_ERROR; bzero(&csio->sense_data, SSD_FULL_SIZE); bcopy(mc->mc_packet, &csio->sense_data, mc->mc_sense); csio->sense_len = mc->mc_sense; csio->ccb_h.status |= CAM_AUTOSNS_VALID; csio->resid = mc->mc_resid; /* XXX this is a signed value... */ break; case SCSI_STATUS_BUSY: debug(1, "SCSI_STATUS_BUSY"); csio->ccb_h.status = CAM_SCSI_BUSY; break; default: debug(1, "unknown status 0x%x", csio->scsi_status); csio->ccb_h.status = CAM_REQ_CMP_ERR; break; } if (sc->mly_qfrzn_cnt) { csio->ccb_h.status |= CAM_RELEASE_SIMQ; sc->mly_qfrzn_cnt--; } xpt_done((union ccb *)csio); mly_release_command(mc); } /******************************************************************************** * Find a peripheral attahed at (bus),(target) */ static struct cam_periph * mly_find_periph(struct mly_softc *sc, int bus, int target) { struct cam_periph *periph; struct cam_path *path; int status; status = xpt_create_path(&path, NULL, cam_sim_path(sc->mly_cam_sim[bus]), target, 0); if (status == CAM_REQ_CMP) { periph = cam_periph_find(path, NULL); xpt_free_path(path); } else { periph = NULL; } return(periph); } /******************************************************************************** * Name the device at (bus)(target) */ static int mly_name_device(struct mly_softc *sc, int bus, int target) { struct cam_periph *periph; if ((periph = mly_find_periph(sc, bus, target)) != NULL) { sprintf(sc->mly_btl[bus][target].mb_name, "%s%d", periph->periph_name, periph->unit_number); return(0); } sc->mly_btl[bus][target].mb_name[0] = 0; return(ENOENT); } /******************************************************************************** ******************************************************************************** Hardware Control ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Handshake with the firmware while the card is being initialised. */ static int mly_fwhandshake(struct mly_softc *sc) { u_int8_t error, param0, param1; int spinup = 0; debug_called(1); /* set HM_STSACK and let the firmware initialise */ MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK); DELAY(1000); /* too short? */ /* if HM_STSACK is still true, the controller is initialising */ if (!MLY_IDBR_TRUE(sc, MLY_HM_STSACK)) return(0); mly_printf(sc, "controller initialisation started\n"); /* spin waiting for initialisation to finish, or for a message to be delivered */ while (MLY_IDBR_TRUE(sc, MLY_HM_STSACK)) { /* check for a message */ if (MLY_ERROR_VALID(sc)) { error = MLY_GET_REG(sc, sc->mly_error_status) & ~MLY_MSG_EMPTY; param0 = MLY_GET_REG(sc, sc->mly_command_mailbox); param1 = MLY_GET_REG(sc, sc->mly_command_mailbox + 1); switch(error) { case MLY_MSG_SPINUP: if (!spinup) { mly_printf(sc, "drive spinup in progress\n"); spinup = 1; /* only print this once (should print drive being spun?) */ } break; case MLY_MSG_RACE_RECOVERY_FAIL: mly_printf(sc, "mirror race recovery failed, one or more drives offline\n"); break; case MLY_MSG_RACE_IN_PROGRESS: mly_printf(sc, "mirror race recovery in progress\n"); break; case MLY_MSG_RACE_ON_CRITICAL: mly_printf(sc, "mirror race recovery on a critical drive\n"); break; case MLY_MSG_PARITY_ERROR: mly_printf(sc, "FATAL MEMORY PARITY ERROR\n"); return(ENXIO); default: mly_printf(sc, "unknown initialisation code 0x%x\n", error); } } } return(0); } /******************************************************************************** ******************************************************************************** Debugging and Diagnostics ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Print some information about the controller. */ static void mly_describe_controller(struct mly_softc *sc) { struct mly_ioctl_getcontrollerinfo *mi = sc->mly_controllerinfo; mly_printf(sc, "%16s, %d channel%s, firmware %d.%02d-%d-%02d (%02d%02d%02d%02d), %dMB RAM\n", mi->controller_name, mi->physical_channels_present, (mi->physical_channels_present) > 1 ? "s" : "", mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build, /* XXX turn encoding? */ mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day, mi->memory_size); if (bootverbose) { mly_printf(sc, "%s %s (%x), %dMHz %d-bit %.16s\n", mly_describe_code(mly_table_oemname, mi->oem_information), mly_describe_code(mly_table_controllertype, mi->controller_type), mi->controller_type, mi->interface_speed, mi->interface_width, mi->interface_name); mly_printf(sc, "%dMB %dMHz %d-bit %s%s%s, cache %dMB\n", mi->memory_size, mi->memory_speed, mi->memory_width, mly_describe_code(mly_table_memorytype, mi->memory_type), mi->memory_parity ? "+parity": "",mi->memory_ecc ? "+ECC": "", mi->cache_size); mly_printf(sc, "CPU: %s @ %dMHz\n", mly_describe_code(mly_table_cputype, mi->cpu[0].type), mi->cpu[0].speed); if (mi->l2cache_size != 0) mly_printf(sc, "%dKB L2 cache\n", mi->l2cache_size); if (mi->exmemory_size != 0) mly_printf(sc, "%dMB %dMHz %d-bit private %s%s%s\n", mi->exmemory_size, mi->exmemory_speed, mi->exmemory_width, mly_describe_code(mly_table_memorytype, mi->exmemory_type), mi->exmemory_parity ? "+parity": "",mi->exmemory_ecc ? "+ECC": ""); mly_printf(sc, "battery backup %s\n", mi->bbu_present ? "present" : "not installed"); mly_printf(sc, "maximum data transfer %d blocks, maximum sg entries/command %d\n", mi->maximum_block_count, mi->maximum_sg_entries); mly_printf(sc, "logical devices present/critical/offline %d/%d/%d\n", mi->logical_devices_present, mi->logical_devices_critical, mi->logical_devices_offline); mly_printf(sc, "physical devices present %d\n", mi->physical_devices_present); mly_printf(sc, "physical disks present/offline %d/%d\n", mi->physical_disks_present, mi->physical_disks_offline); mly_printf(sc, "%d physical channel%s, %d virtual channel%s of %d possible\n", mi->physical_channels_present, mi->physical_channels_present == 1 ? "" : "s", mi->virtual_channels_present, mi->virtual_channels_present == 1 ? "" : "s", mi->virtual_channels_possible); mly_printf(sc, "%d parallel commands supported\n", mi->maximum_parallel_commands); mly_printf(sc, "%dMB flash ROM, %d of %d maximum cycles\n", mi->flash_size, mi->flash_age, mi->flash_maximum_age); } } #ifdef MLY_DEBUG /******************************************************************************** * Print some controller state */ static void mly_printstate(struct mly_softc *sc) { mly_printf(sc, "IDBR %02x ODBR %02x ERROR %02x (%x %x %x)\n", MLY_GET_REG(sc, sc->mly_idbr), MLY_GET_REG(sc, sc->mly_odbr), MLY_GET_REG(sc, sc->mly_error_status), sc->mly_idbr, sc->mly_odbr, sc->mly_error_status); mly_printf(sc, "IMASK %02x ISTATUS %02x\n", MLY_GET_REG(sc, sc->mly_interrupt_mask), MLY_GET_REG(sc, sc->mly_interrupt_status)); mly_printf(sc, "COMMAND %02x %02x %02x %02x %02x %02x %02x %02x\n", MLY_GET_REG(sc, sc->mly_command_mailbox), MLY_GET_REG(sc, sc->mly_command_mailbox + 1), MLY_GET_REG(sc, sc->mly_command_mailbox + 2), MLY_GET_REG(sc, sc->mly_command_mailbox + 3), MLY_GET_REG(sc, sc->mly_command_mailbox + 4), MLY_GET_REG(sc, sc->mly_command_mailbox + 5), MLY_GET_REG(sc, sc->mly_command_mailbox + 6), MLY_GET_REG(sc, sc->mly_command_mailbox + 7)); mly_printf(sc, "STATUS %02x %02x %02x %02x %02x %02x %02x %02x\n", MLY_GET_REG(sc, sc->mly_status_mailbox), MLY_GET_REG(sc, sc->mly_status_mailbox + 1), MLY_GET_REG(sc, sc->mly_status_mailbox + 2), MLY_GET_REG(sc, sc->mly_status_mailbox + 3), MLY_GET_REG(sc, sc->mly_status_mailbox + 4), MLY_GET_REG(sc, sc->mly_status_mailbox + 5), MLY_GET_REG(sc, sc->mly_status_mailbox + 6), MLY_GET_REG(sc, sc->mly_status_mailbox + 7)); mly_printf(sc, " %04x %08x\n", MLY_GET_REG2(sc, sc->mly_status_mailbox), MLY_GET_REG4(sc, sc->mly_status_mailbox + 4)); } struct mly_softc *mly_softc0 = NULL; void mly_printstate0(void) { if (mly_softc0 != NULL) mly_printstate(mly_softc0); } /******************************************************************************** * Print a command */ static void mly_print_command(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; mly_printf(sc, "COMMAND @ %p\n", mc); mly_printf(sc, " slot %d\n", mc->mc_slot); mly_printf(sc, " status 0x%x\n", mc->mc_status); mly_printf(sc, " sense len %d\n", mc->mc_sense); mly_printf(sc, " resid %d\n", mc->mc_resid); mly_printf(sc, " packet %p/0x%llx\n", mc->mc_packet, mc->mc_packetphys); if (mc->mc_packet != NULL) mly_print_packet(mc); mly_printf(sc, " data %p/%d\n", mc->mc_data, mc->mc_length); mly_printf(sc, " flags %b\n", mc->mc_flags, "\20\1busy\2complete\3slotted\4mapped\5datain\6dataout\n"); mly_printf(sc, " complete %p\n", mc->mc_complete); mly_printf(sc, " private %p\n", mc->mc_private); } /******************************************************************************** * Print a command packet */ static void mly_print_packet(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; struct mly_command_generic *ge = (struct mly_command_generic *)mc->mc_packet; struct mly_command_scsi_small *ss = (struct mly_command_scsi_small *)mc->mc_packet; struct mly_command_scsi_large *sl = (struct mly_command_scsi_large *)mc->mc_packet; struct mly_command_ioctl *io = (struct mly_command_ioctl *)mc->mc_packet; int transfer; mly_printf(sc, " command_id %d\n", ge->command_id); mly_printf(sc, " opcode %d\n", ge->opcode); mly_printf(sc, " command_control fua %d dpo %d est %d dd %s nas %d ddis %d\n", ge->command_control.force_unit_access, ge->command_control.disable_page_out, ge->command_control.extended_sg_table, (ge->command_control.data_direction == MLY_CCB_WRITE) ? "WRITE" : "READ", ge->command_control.no_auto_sense, ge->command_control.disable_disconnect); mly_printf(sc, " data_size %d\n", ge->data_size); mly_printf(sc, " sense_buffer_address 0x%llx\n", ge->sense_buffer_address); mly_printf(sc, " lun %d\n", ge->addr.phys.lun); mly_printf(sc, " target %d\n", ge->addr.phys.target); mly_printf(sc, " channel %d\n", ge->addr.phys.channel); mly_printf(sc, " logical device %d\n", ge->addr.log.logdev); mly_printf(sc, " controller %d\n", ge->addr.phys.controller); mly_printf(sc, " timeout %d %s\n", ge->timeout.value, (ge->timeout.scale == MLY_TIMEOUT_SECONDS) ? "seconds" : ((ge->timeout.scale == MLY_TIMEOUT_MINUTES) ? "minutes" : "hours")); mly_printf(sc, " maximum_sense_size %d\n", ge->maximum_sense_size); switch(ge->opcode) { case MDACMD_SCSIPT: case MDACMD_SCSI: mly_printf(sc, " cdb length %d\n", ss->cdb_length); mly_printf(sc, " cdb %*D\n", ss->cdb_length, ss->cdb, " "); transfer = 1; break; case MDACMD_SCSILC: case MDACMD_SCSILCPT: mly_printf(sc, " cdb length %d\n", sl->cdb_length); mly_printf(sc, " cdb 0x%llx\n", sl->cdb_physaddr); transfer = 1; break; case MDACMD_IOCTL: mly_printf(sc, " sub_ioctl 0x%x\n", io->sub_ioctl); switch(io->sub_ioctl) { case MDACIOCTL_SETMEMORYMAILBOX: mly_printf(sc, " health_buffer_size %d\n", io->param.setmemorymailbox.health_buffer_size); mly_printf(sc, " health_buffer_phys 0x%llx\n", io->param.setmemorymailbox.health_buffer_physaddr); mly_printf(sc, " command_mailbox 0x%llx\n", io->param.setmemorymailbox.command_mailbox_physaddr); mly_printf(sc, " status_mailbox 0x%llx\n", io->param.setmemorymailbox.status_mailbox_physaddr); transfer = 0; break; case MDACIOCTL_SETREALTIMECLOCK: case MDACIOCTL_GETHEALTHSTATUS: case MDACIOCTL_GETCONTROLLERINFO: case MDACIOCTL_GETLOGDEVINFOVALID: case MDACIOCTL_GETPHYSDEVINFOVALID: case MDACIOCTL_GETPHYSDEVSTATISTICS: case MDACIOCTL_GETLOGDEVSTATISTICS: case MDACIOCTL_GETCONTROLLERSTATISTICS: case MDACIOCTL_GETBDT_FOR_SYSDRIVE: case MDACIOCTL_CREATENEWCONF: case MDACIOCTL_ADDNEWCONF: case MDACIOCTL_GETDEVCONFINFO: case MDACIOCTL_GETFREESPACELIST: case MDACIOCTL_MORE: case MDACIOCTL_SETPHYSDEVPARAMETER: case MDACIOCTL_GETPHYSDEVPARAMETER: case MDACIOCTL_GETLOGDEVPARAMETER: case MDACIOCTL_SETLOGDEVPARAMETER: mly_printf(sc, " param %10D\n", io->param.data.param, " "); transfer = 1; break; case MDACIOCTL_GETEVENT: mly_printf(sc, " event %d\n", io->param.getevent.sequence_number_low + ((u_int32_t)io->addr.log.logdev << 16)); transfer = 1; break; case MDACIOCTL_SETRAIDDEVSTATE: mly_printf(sc, " state %d\n", io->param.setraiddevstate.state); transfer = 0; break; case MDACIOCTL_XLATEPHYSDEVTORAIDDEV: mly_printf(sc, " raid_device %d\n", io->param.xlatephysdevtoraiddev.raid_device); mly_printf(sc, " controller %d\n", io->param.xlatephysdevtoraiddev.controller); mly_printf(sc, " channel %d\n", io->param.xlatephysdevtoraiddev.channel); mly_printf(sc, " target %d\n", io->param.xlatephysdevtoraiddev.target); mly_printf(sc, " lun %d\n", io->param.xlatephysdevtoraiddev.lun); transfer = 0; break; case MDACIOCTL_GETGROUPCONFINFO: mly_printf(sc, " group %d\n", io->param.getgroupconfinfo.group); transfer = 1; break; case MDACIOCTL_GET_SUBSYSTEM_DATA: case MDACIOCTL_SET_SUBSYSTEM_DATA: case MDACIOCTL_STARTDISOCVERY: case MDACIOCTL_INITPHYSDEVSTART: case MDACIOCTL_INITPHYSDEVSTOP: case MDACIOCTL_INITRAIDDEVSTART: case MDACIOCTL_INITRAIDDEVSTOP: case MDACIOCTL_REBUILDRAIDDEVSTART: case MDACIOCTL_REBUILDRAIDDEVSTOP: case MDACIOCTL_MAKECONSISTENTDATASTART: case MDACIOCTL_MAKECONSISTENTDATASTOP: case MDACIOCTL_CONSISTENCYCHECKSTART: case MDACIOCTL_CONSISTENCYCHECKSTOP: case MDACIOCTL_RESETDEVICE: case MDACIOCTL_FLUSHDEVICEDATA: case MDACIOCTL_PAUSEDEVICE: case MDACIOCTL_UNPAUSEDEVICE: case MDACIOCTL_LOCATEDEVICE: case MDACIOCTL_SETMASTERSLAVEMODE: case MDACIOCTL_DELETERAIDDEV: case MDACIOCTL_REPLACEINTERNALDEV: case MDACIOCTL_CLEARCONF: case MDACIOCTL_GETCONTROLLERPARAMETER: case MDACIOCTL_SETCONTRLLERPARAMETER: case MDACIOCTL_CLEARCONFSUSPMODE: case MDACIOCTL_STOREIMAGE: case MDACIOCTL_READIMAGE: case MDACIOCTL_FLASHIMAGES: case MDACIOCTL_RENAMERAIDDEV: default: /* no idea what to print */ transfer = 0; break; } break; case MDACMD_IOCTLCHECK: case MDACMD_MEMCOPY: default: transfer = 0; break; /* print nothing */ } if (transfer) { if (ge->command_control.extended_sg_table) { mly_printf(sc, " sg table 0x%llx/%d\n", ge->transfer.indirect.table_physaddr[0], ge->transfer.indirect.entries[0]); } else { mly_printf(sc, " 0000 0x%llx/%lld\n", ge->transfer.direct.sg[0].physaddr, ge->transfer.direct.sg[0].length); mly_printf(sc, " 0001 0x%llx/%lld\n", ge->transfer.direct.sg[1].physaddr, ge->transfer.direct.sg[1].length); } } } /******************************************************************************** * Panic in a slightly informative fashion */ static void mly_panic(struct mly_softc *sc, char *reason) { mly_printstate(sc); panic(reason); } /******************************************************************************** * Print queue statistics, callable from DDB. */ void mly_print_controller(int controller) { struct mly_softc *sc; if ((sc = devclass_get_softc(devclass_find("mly"), controller)) == NULL) { printf("mly: controller %d invalid\n", controller); } else { device_printf(sc->mly_dev, "queue curr max\n"); device_printf(sc->mly_dev, "free %04d/%04d\n", sc->mly_qstat[MLYQ_FREE].q_length, sc->mly_qstat[MLYQ_FREE].q_max); device_printf(sc->mly_dev, "busy %04d/%04d\n", sc->mly_qstat[MLYQ_BUSY].q_length, sc->mly_qstat[MLYQ_BUSY].q_max); device_printf(sc->mly_dev, "complete %04d/%04d\n", sc->mly_qstat[MLYQ_COMPLETE].q_length, sc->mly_qstat[MLYQ_COMPLETE].q_max); } } #endif /******************************************************************************** ******************************************************************************** Control device interface ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Accept an open operation on the control device. */ static int mly_user_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct mly_softc *sc = dev->si_drv1; MLY_LOCK(sc); sc->mly_state |= MLY_STATE_OPEN; MLY_UNLOCK(sc); return(0); } /******************************************************************************** * Accept the last close on the control device. */ static int mly_user_close(struct cdev *dev, int flags, int fmt, struct thread *td) { struct mly_softc *sc = dev->si_drv1; MLY_LOCK(sc); sc->mly_state &= ~MLY_STATE_OPEN; MLY_UNLOCK(sc); return (0); } /******************************************************************************** * Handle controller-specific control operations. */ static int mly_user_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td) { struct mly_softc *sc = (struct mly_softc *)dev->si_drv1; struct mly_user_command *uc = (struct mly_user_command *)addr; struct mly_user_health *uh = (struct mly_user_health *)addr; switch(cmd) { case MLYIO_COMMAND: return(mly_user_command(sc, uc)); case MLYIO_HEALTH: return(mly_user_health(sc, uh)); default: return(ENOIOCTL); } } /******************************************************************************** * Execute a command passed in from userspace. * * The control structure contains the actual command for the controller, as well * as the user-space data pointer and data size, and an optional sense buffer * size/pointer. On completion, the data size is adjusted to the command * residual, and the sense buffer size to the size of the returned sense data. * */ static int mly_user_command(struct mly_softc *sc, struct mly_user_command *uc) { struct mly_command *mc; int error; /* allocate a command */ MLY_LOCK(sc); if (mly_alloc_command(sc, &mc)) { MLY_UNLOCK(sc); return (ENOMEM); /* XXX Linux version will wait for a command */ } MLY_UNLOCK(sc); /* handle data size/direction */ mc->mc_length = (uc->DataTransferLength >= 0) ? uc->DataTransferLength : -uc->DataTransferLength; if (mc->mc_length > 0) { if ((mc->mc_data = malloc(mc->mc_length, M_DEVBUF, M_NOWAIT)) == NULL) { error = ENOMEM; goto out; } } if (uc->DataTransferLength > 0) { mc->mc_flags |= MLY_CMD_DATAIN; bzero(mc->mc_data, mc->mc_length); } if (uc->DataTransferLength < 0) { mc->mc_flags |= MLY_CMD_DATAOUT; if ((error = copyin(uc->DataTransferBuffer, mc->mc_data, mc->mc_length)) != 0) goto out; } /* copy the controller command */ bcopy(&uc->CommandMailbox, mc->mc_packet, sizeof(uc->CommandMailbox)); /* clear command completion handler so that we get woken up */ mc->mc_complete = NULL; /* execute the command */ MLY_LOCK(sc); if ((error = mly_start(mc)) != 0) { MLY_UNLOCK(sc); goto out; } while (!(mc->mc_flags & MLY_CMD_COMPLETE)) mtx_sleep(mc, &sc->mly_lock, PRIBIO, "mlyioctl", 0); MLY_UNLOCK(sc); /* return the data to userspace */ if (uc->DataTransferLength > 0) if ((error = copyout(mc->mc_data, uc->DataTransferBuffer, mc->mc_length)) != 0) goto out; /* return the sense buffer to userspace */ if ((uc->RequestSenseLength > 0) && (mc->mc_sense > 0)) { if ((error = copyout(mc->mc_packet, uc->RequestSenseBuffer, min(uc->RequestSenseLength, mc->mc_sense))) != 0) goto out; } /* return command results to userspace (caller will copy out) */ uc->DataTransferLength = mc->mc_resid; uc->RequestSenseLength = min(uc->RequestSenseLength, mc->mc_sense); uc->CommandStatus = mc->mc_status; error = 0; out: if (mc->mc_data != NULL) free(mc->mc_data, M_DEVBUF); MLY_LOCK(sc); mly_release_command(mc); MLY_UNLOCK(sc); return(error); } /******************************************************************************** * Return health status to userspace. If the health change index in the user * structure does not match that currently exported by the controller, we * return the current status immediately. Otherwise, we block until either * interrupted or new status is delivered. */ static int mly_user_health(struct mly_softc *sc, struct mly_user_health *uh) { struct mly_health_status mh; int error; /* fetch the current health status from userspace */ if ((error = copyin(uh->HealthStatusBuffer, &mh, sizeof(mh))) != 0) return(error); /* spin waiting for a status update */ MLY_LOCK(sc); error = EWOULDBLOCK; while ((error != 0) && (sc->mly_event_change == mh.change_counter)) error = mtx_sleep(&sc->mly_event_change, &sc->mly_lock, PRIBIO | PCATCH, "mlyhealth", 0); mh = sc->mly_mmbox->mmm_health.status; MLY_UNLOCK(sc); /* copy the controller's health status buffer out */ error = copyout(&mh, uh->HealthStatusBuffer, sizeof(mh)); return(error); } #ifdef MLY_DEBUG static void mly_timeout(void *arg) { struct mly_softc *sc; struct mly_command *mc; int deadline; sc = arg; MLY_ASSERT_LOCKED(sc); deadline = time_second - MLY_CMD_TIMEOUT; TAILQ_FOREACH(mc, &sc->mly_busy, mc_link) { if ((mc->mc_timestamp < deadline)) { device_printf(sc->mly_dev, "COMMAND %p TIMEOUT AFTER %d SECONDS\n", mc, (int)(time_second - mc->mc_timestamp)); } } callout_reset(&sc->mly_timeout, MLY_CMD_TIMEOUT * hz, mly_timeout, sc); } #endif Index: stable/11/sys/dev/ncr/ncr.c =================================================================== --- stable/11/sys/dev/ncr/ncr.c (revision 335137) +++ stable/11/sys/dev/ncr/ncr.c (revision 335138) @@ -1,7115 +1,7115 @@ /************************************************************************** ** ** ** Device driver for the NCR 53C8XX PCI-SCSI-Controller Family. ** **------------------------------------------------------------------------- ** ** Written for 386bsd and FreeBSD by ** Wolfgang Stanglmeier ** Stefan Esser ** **------------------------------------------------------------------------- */ /*- ** Copyright (c) 1994 Wolfgang Stanglmeier. All rights reserved. ** ** Redistribution and use in source and binary forms, with or without ** modification, are permitted provided that the following conditions ** are met: ** 1. Redistributions of source code must retain the above copyright ** notice, this list of conditions and the following disclaimer. ** 2. Redistributions in binary form must reproduce the above copyright ** notice, this list of conditions and the following disclaimer in the ** documentation and/or other materials provided with the distribution. ** 3. The name of the author may not be used to endorse or promote products ** derived from this software without specific prior written permission. ** ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** *************************************************************************** */ #include __FBSDID("$FreeBSD$"); #define NCR_GETCC_WITHMSG #if defined (__FreeBSD__) && defined(_KERNEL) #include "opt_ncr.h" #endif /*========================================================== ** ** Configuration and Debugging ** ** May be overwritten in ** **========================================================== */ /* ** SCSI address of this device. ** The boot routines should have set it. ** If not, use this. */ #ifndef SCSI_NCR_MYADDR #define SCSI_NCR_MYADDR (7) #endif /* SCSI_NCR_MYADDR */ /* ** The default synchronous period factor ** (0=asynchronous) ** If maximum synchronous frequency is defined, use it instead. */ #ifndef SCSI_NCR_MAX_SYNC #ifndef SCSI_NCR_DFLT_SYNC #define SCSI_NCR_DFLT_SYNC (12) #endif /* SCSI_NCR_DFLT_SYNC */ #else #if SCSI_NCR_MAX_SYNC == 0 #define SCSI_NCR_DFLT_SYNC 0 #else #define SCSI_NCR_DFLT_SYNC (250000 / SCSI_NCR_MAX_SYNC) #endif #endif /* ** The minimal asynchronous pre-scaler period (ns) ** Shall be 40. */ #ifndef SCSI_NCR_MIN_ASYNC #define SCSI_NCR_MIN_ASYNC (40) #endif /* SCSI_NCR_MIN_ASYNC */ /* ** The maximal bus with (in log2 byte) ** (0=8 bit, 1=16 bit) */ #ifndef SCSI_NCR_MAX_WIDE #define SCSI_NCR_MAX_WIDE (1) #endif /* SCSI_NCR_MAX_WIDE */ /*========================================================== ** ** Configuration and Debugging ** **========================================================== */ /* ** Number of targets supported by the driver. ** n permits target numbers 0..n-1. ** Default is 7, meaning targets #0..#6. ** #7 .. is myself. */ #define MAX_TARGET (16) /* ** Number of logic units supported by the driver. ** n enables logic unit numbers 0..n-1. ** The common SCSI devices require only ** one lun, so take 1 as the default. */ #ifndef MAX_LUN #define MAX_LUN (8) #endif /* MAX_LUN */ /* ** The maximum number of jobs scheduled for starting. ** There should be one slot per target, and one slot ** for each tag of each target in use. */ #define MAX_START (256) /* ** The maximum number of segments a transfer is split into. */ #define MAX_SCATTER (33) /* ** The maximum transfer length (should be >= 64k). ** MUST NOT be greater than (MAX_SCATTER-1) * PAGE_SIZE. */ #define MAX_SIZE ((MAX_SCATTER-1) * (long) PAGE_SIZE) /* ** other */ #define NCR_SNOOP_TIMEOUT (1000000) /*========================================================== ** ** Include files ** **========================================================== */ #include #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif #include #include #include #include #include #include #include #include #include #include /*========================================================== ** ** Debugging tags ** **========================================================== */ #define DEBUG_ALLOC (0x0001) #define DEBUG_PHASE (0x0002) #define DEBUG_POLL (0x0004) #define DEBUG_QUEUE (0x0008) #define DEBUG_RESULT (0x0010) #define DEBUG_SCATTER (0x0020) #define DEBUG_SCRIPT (0x0040) #define DEBUG_TINY (0x0080) #define DEBUG_TIMING (0x0100) #define DEBUG_NEGO (0x0200) #define DEBUG_TAGS (0x0400) #define DEBUG_FREEZE (0x0800) #define DEBUG_RESTART (0x1000) /* ** Enable/Disable debug messages. ** Can be changed at runtime too. */ #ifdef SCSI_NCR_DEBUG #define DEBUG_FLAGS ncr_debug #else /* SCSI_NCR_DEBUG */ #define SCSI_NCR_DEBUG 0 #define DEBUG_FLAGS 0 #endif /* SCSI_NCR_DEBUG */ /*========================================================== ** ** assert () ** **========================================================== ** ** modified copy from 386bsd:/usr/include/sys/assert.h ** **---------------------------------------------------------- */ #ifdef DIAGNOSTIC #define assert(expression) { \ KASSERT(expression, ("%s", #expression)); \ } #else #define assert(expression) { \ if (!(expression)) { \ (void)printf("assertion \"%s\" failed: " \ "file \"%s\", line %d\n", \ #expression, __FILE__, __LINE__); \ } \ } #endif /*========================================================== ** ** Access to the controller chip. ** **========================================================== */ #define INB(r) bus_read_1(np->reg_res, offsetof(struct ncr_reg, r)) #define INW(r) bus_read_2(np->reg_res, offsetof(struct ncr_reg, r)) #define INL(r) bus_read_4(np->reg_res, offsetof(struct ncr_reg, r)) #define OUTB(r, val) bus_write_1(np->reg_res, offsetof(struct ncr_reg, r), val) #define OUTW(r, val) bus_write_2(np->reg_res, offsetof(struct ncr_reg, r), val) #define OUTL(r, val) bus_write_4(np->reg_res, offsetof(struct ncr_reg, r), val) #define OUTL_OFF(o, val) bus_write_4(np->reg_res, o, val) #define INB_OFF(o) bus_read_1(np->reg_res, o) #define INW_OFF(o) bus_read_2(np->reg_res, o) #define INL_OFF(o) bus_read_4(np->reg_res, o) #define READSCRIPT_OFF(base, off) \ (base ? *((volatile u_int32_t *)((volatile char *)base + (off))) : \ bus_read_4(np->sram_res, off)) #define WRITESCRIPT_OFF(base, off, val) \ do { \ if (base) \ *((volatile u_int32_t *) \ ((volatile char *)base + (off))) = (val); \ else \ bus_write_4(np->sram_res, off, val); \ } while (0) #define READSCRIPT(r) \ READSCRIPT_OFF(np->script, offsetof(struct script, r)) #define WRITESCRIPT(r, val) \ WRITESCRIPT_OFF(np->script, offsetof(struct script, r), val) /* ** Set bit field ON, OFF */ #define OUTONB(r, m) OUTB(r, INB(r) | (m)) #define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m)) #define OUTONW(r, m) OUTW(r, INW(r) | (m)) #define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m)) #define OUTONL(r, m) OUTL(r, INL(r) | (m)) #define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m)) /*========================================================== ** ** Command control block states. ** **========================================================== */ #define HS_IDLE (0) #define HS_BUSY (1) #define HS_NEGOTIATE (2) /* sync/wide data transfer*/ #define HS_DISCONNECT (3) /* Disconnected by target */ #define HS_COMPLETE (4) #define HS_SEL_TIMEOUT (5) /* Selection timeout */ #define HS_RESET (6) /* SCSI reset */ #define HS_ABORTED (7) /* Transfer aborted */ #define HS_TIMEOUT (8) /* Software timeout */ #define HS_FAIL (9) /* SCSI or PCI bus errors */ #define HS_UNEXPECTED (10) /* Unexpected disconnect */ #define HS_STALL (11) /* QUEUE FULL or BUSY */ #define HS_DONEMASK (0xfc) /*========================================================== ** ** Software Interrupt Codes ** **========================================================== */ #define SIR_SENSE_RESTART (1) #define SIR_SENSE_FAILED (2) #define SIR_STALL_RESTART (3) #define SIR_STALL_QUEUE (4) #define SIR_NEGO_SYNC (5) #define SIR_NEGO_WIDE (6) #define SIR_NEGO_FAILED (7) #define SIR_NEGO_PROTO (8) #define SIR_REJECT_RECEIVED (9) #define SIR_REJECT_SENT (10) #define SIR_IGN_RESIDUE (11) #define SIR_MISSING_SAVE (12) #define SIR_MAX (12) /*========================================================== ** ** Extended error codes. ** xerr_status field of struct nccb. ** **========================================================== */ #define XE_OK (0) #define XE_EXTRA_DATA (1) /* unexpected data phase */ #define XE_BAD_PHASE (2) /* illegal phase (4/5) */ /*========================================================== ** ** Negotiation status. ** nego_status field of struct nccb. ** **========================================================== */ #define NS_SYNC (1) #define NS_WIDE (2) /*========================================================== ** ** XXX These are no longer used. Remove once the ** script is updated. ** "Special features" of targets. ** quirks field of struct tcb. ** actualquirks field of struct nccb. ** **========================================================== */ #define QUIRK_AUTOSAVE (0x01) #define QUIRK_NOMSG (0x02) #define QUIRK_NOSYNC (0x10) #define QUIRK_NOWIDE16 (0x20) #define QUIRK_NOTAGS (0x40) #define QUIRK_UPDATE (0x80) /*========================================================== ** ** Misc. ** **========================================================== */ #define CCB_MAGIC (0xf2691ad2) #define MAX_TAGS (32) /* hard limit */ /*========================================================== ** ** OS dependencies. ** **========================================================== */ #define PRINT_ADDR(ccb) xpt_print_path((ccb)->ccb_h.path) /*========================================================== ** ** Declaration of structs. ** **========================================================== */ struct tcb; struct lcb; struct nccb; struct ncb; struct script; typedef struct ncb * ncb_p; typedef struct tcb * tcb_p; typedef struct lcb * lcb_p; typedef struct nccb * nccb_p; struct link { ncrcmd l_cmd; ncrcmd l_paddr; }; struct usrcmd { u_long target; u_long lun; u_long data; u_long cmd; }; #define UC_SETSYNC 10 #define UC_SETTAGS 11 #define UC_SETDEBUG 12 #define UC_SETORDER 13 #define UC_SETWIDE 14 #define UC_SETFLAG 15 #define UF_TRACE (0x01) /*--------------------------------------- ** ** Timestamps for profiling ** **--------------------------------------- */ /* Type of the kernel variable `ticks'. XXX should be declared with the var. */ typedef int ticks_t; struct tstamp { ticks_t start; ticks_t end; ticks_t select; ticks_t command; ticks_t data; ticks_t status; ticks_t disconnect; }; /* ** profiling data (per device) */ struct profile { u_long num_trans; u_long num_bytes; u_long num_disc; u_long num_break; u_long num_int; u_long num_fly; u_long ms_setup; u_long ms_data; u_long ms_disc; u_long ms_post; }; /*========================================================== ** ** Declaration of structs: target control block ** **========================================================== */ #define NCR_TRANS_CUR 0x01 /* Modify current neogtiation status */ #define NCR_TRANS_ACTIVE 0x03 /* Assume this is the active target */ #define NCR_TRANS_GOAL 0x04 /* Modify negotiation goal */ #define NCR_TRANS_USER 0x08 /* Modify user negotiation settings */ struct ncr_transinfo { u_int8_t width; u_int8_t period; u_int8_t offset; }; struct ncr_target_tinfo { /* Hardware version of our sync settings */ u_int8_t disc_tag; #define NCR_CUR_DISCENB 0x01 #define NCR_CUR_TAGENB 0x02 #define NCR_USR_DISCENB 0x04 #define NCR_USR_TAGENB 0x08 u_int8_t sval; struct ncr_transinfo current; struct ncr_transinfo goal; struct ncr_transinfo user; /* Hardware version of our wide settings */ u_int8_t wval; }; struct tcb { /* ** during reselection the ncr jumps to this point ** with SFBR set to the encoded target number ** with bit 7 set. ** if it's not this target, jump to the next. ** ** JUMP IF (SFBR != #target#) ** @(next tcb) */ struct link jump_tcb; /* ** load the actual values for the sxfer and the scntl3 ** register (sync/wide mode). ** ** SCR_COPY (1); ** @(sval field of this tcb) ** @(sxfer register) ** SCR_COPY (1); ** @(wval field of this tcb) ** @(scntl3 register) */ ncrcmd getscr[6]; /* ** if next message is "identify" ** then load the message to SFBR, ** else load 0 to SFBR. ** ** CALL ** */ struct link call_lun; /* ** now look for the right lun. ** ** JUMP ** @(first nccb of this lun) */ struct link jump_lcb; /* ** pointer to interrupted getcc nccb */ nccb_p hold_cp; /* ** pointer to nccb used for negotiating. ** Avoid to start a nego for all queued commands ** when tagged command queuing is enabled. */ nccb_p nego_cp; /* ** statistical data */ u_long transfers; u_long bytes; /* ** user settable limits for sync transfer ** and tagged commands. */ struct ncr_target_tinfo tinfo; /* ** the lcb's of this tcb */ lcb_p lp[MAX_LUN]; }; /*========================================================== ** ** Declaration of structs: lun control block ** **========================================================== */ struct lcb { /* ** during reselection the ncr jumps to this point ** with SFBR set to the "Identify" message. ** if it's not this lun, jump to the next. ** ** JUMP IF (SFBR != #lun#) ** @(next lcb of this target) */ struct link jump_lcb; /* ** if next message is "simple tag", ** then load the tag to SFBR, ** else load 0 to SFBR. ** ** CALL ** */ struct link call_tag; /* ** now look for the right nccb. ** ** JUMP ** @(first nccb of this lun) */ struct link jump_nccb; /* ** start of the nccb chain */ nccb_p next_nccb; /* ** Control of tagged queueing */ u_char reqnccbs; u_char reqlink; u_char actlink; u_char usetags; u_char lasttag; }; /*========================================================== ** ** Declaration of structs: COMMAND control block ** **========================================================== ** ** This substructure is copied from the nccb to a ** global address after selection (or reselection) ** and copied back before disconnect. ** ** These fields are accessible to the script processor. ** **---------------------------------------------------------- */ struct head { /* ** Execution of a nccb starts at this point. ** It's a jump to the "SELECT" label ** of the script. ** ** After successful selection the script ** processor overwrites it with a jump to ** the IDLE label of the script. */ struct link launch; /* ** Saved data pointer. ** Points to the position in the script ** responsible for the actual transfer ** of data. ** It's written after reception of a ** "SAVE_DATA_POINTER" message. ** The goalpointer points after ** the last transfer command. */ u_int32_t savep; u_int32_t lastp; u_int32_t goalp; /* ** The virtual address of the nccb ** containing this header. */ nccb_p cp; /* ** space for some timestamps to gather ** profiling data about devices and this driver. */ struct tstamp stamp; /* ** status fields. */ u_char status[8]; }; /* ** The status bytes are used by the host and the script processor. ** ** The first four byte are copied to the scratchb register ** (declared as scr0..scr3 in ncr_reg.h) just after the select/reselect, ** and copied back just after disconnecting. ** Inside the script the XX_REG are used. ** ** The last four bytes are used inside the script by "COPY" commands. ** Because source and destination must have the same alignment ** in a longword, the fields HAVE to be at the chosen offsets. ** xerr_st (4) 0 (0x34) scratcha ** sync_st (5) 1 (0x05) sxfer ** wide_st (7) 3 (0x03) scntl3 */ /* ** First four bytes (script) */ #define QU_REG scr0 #define HS_REG scr1 #define HS_PRT nc_scr1 #define SS_REG scr2 #define PS_REG scr3 /* ** First four bytes (host) */ #define actualquirks phys.header.status[0] #define host_status phys.header.status[1] #define s_status phys.header.status[2] #define parity_status phys.header.status[3] /* ** Last four bytes (script) */ #define xerr_st header.status[4] /* MUST be ==0 mod 4 */ #define sync_st header.status[5] /* MUST be ==1 mod 4 */ #define nego_st header.status[6] #define wide_st header.status[7] /* MUST be ==3 mod 4 */ /* ** Last four bytes (host) */ #define xerr_status phys.xerr_st #define sync_status phys.sync_st #define nego_status phys.nego_st #define wide_status phys.wide_st /*========================================================== ** ** Declaration of structs: Data structure block ** **========================================================== ** ** During execution of a nccb by the script processor, ** the DSA (data structure address) register points ** to this substructure of the nccb. ** This substructure contains the header with ** the script-processor-changable data and ** data blocks for the indirect move commands. ** **---------------------------------------------------------- */ struct dsb { /* ** Header. ** Has to be the first entry, ** because it's jumped to by the ** script processor */ struct head header; /* ** Table data for Script */ struct scr_tblsel select; struct scr_tblmove smsg ; struct scr_tblmove smsg2 ; struct scr_tblmove cmd ; struct scr_tblmove scmd ; struct scr_tblmove sense ; struct scr_tblmove data [MAX_SCATTER]; }; /*========================================================== ** ** Declaration of structs: Command control block. ** **========================================================== ** ** During execution of a nccb by the script processor, ** the DSA (data structure address) register points ** to this substructure of the nccb. ** This substructure contains the header with ** the script-processor-changable data and then ** data blocks for the indirect move commands. ** **---------------------------------------------------------- */ struct nccb { /* ** This filler ensures that the global header is ** cache line size aligned. */ ncrcmd filler[4]; /* ** during reselection the ncr jumps to this point. ** If a "SIMPLE_TAG" message was received, ** then SFBR is set to the tag. ** else SFBR is set to 0 ** If looking for another tag, jump to the next nccb. ** ** JUMP IF (SFBR != #TAG#) ** @(next nccb of this lun) */ struct link jump_nccb; /* ** After execution of this call, the return address ** (in the TEMP register) points to the following ** data structure block. ** So copy it to the DSA register, and start ** processing of this data structure. ** ** CALL ** */ struct link call_tmp; /* ** This is the data structure which is ** to be executed by the script processor. */ struct dsb phys; /* ** If a data transfer phase is terminated too early ** (after reception of a message (i.e. DISCONNECT)), ** we have to prepare a mini script to transfer ** the rest of the data. */ ncrcmd patch[8]; /* ** The general SCSI driver provides a ** pointer to a control block. */ union ccb *ccb; /* ** We prepare a message to be sent after selection, ** and a second one to be sent after getcc selection. ** Contents are IDENTIFY and SIMPLE_TAG. ** While negotiating sync or wide transfer, ** a SDTM or WDTM message is appended. */ u_char scsi_smsg [8]; u_char scsi_smsg2[8]; /* ** Lock this nccb. ** Flag is used while looking for a free nccb. */ u_long magic; /* ** Physical address of this instance of nccb */ u_long p_nccb; /* ** Completion time out for this job. ** It's set to time of start + allowed number of seconds. */ time_t tlimit; /* ** All nccbs of one hostadapter are chained. */ nccb_p link_nccb; /* ** All nccbs of one target/lun are chained. */ nccb_p next_nccb; /* ** Sense command */ u_char sensecmd[6]; /* ** Tag for this transfer. ** It's patched into jump_nccb. ** If it's not zero, a SIMPLE_TAG ** message is included in smsg. */ u_char tag; }; #define CCB_PHYS(cp,lbl) (cp->p_nccb + offsetof(struct nccb, lbl)) /*========================================================== ** ** Declaration of structs: NCR device descriptor ** **========================================================== */ struct ncb { /* ** The global header. ** Accessible to both the host and the ** script-processor. ** We assume it is cache line size aligned. */ struct head header; device_t dev; /*----------------------------------------------- ** Scripts .. **----------------------------------------------- ** ** During reselection the ncr jumps to this point. ** The SFBR register is loaded with the encoded target id. ** ** Jump to the first target. ** ** JUMP ** @(next tcb) */ struct link jump_tcb; /*----------------------------------------------- ** Configuration .. **----------------------------------------------- ** ** virtual and physical addresses ** of the 53c810 chip. */ int reg_rid; struct resource *reg_res; int sram_rid; struct resource *sram_res; struct resource *irq_res; void *irq_handle; /* ** Scripts instance virtual address. */ struct script *script; struct scripth *scripth; /* ** Scripts instance physical address. */ u_long p_script; u_long p_scripth; /* ** The SCSI address of the host adapter. */ u_char myaddr; /* ** timing parameters */ u_char minsync; /* Minimum sync period factor */ u_char maxsync; /* Maximum sync period factor */ u_char maxoffs; /* Max scsi offset */ u_char clock_divn; /* Number of clock divisors */ u_long clock_khz; /* SCSI clock frequency in KHz */ u_long features; /* Chip features map */ u_char multiplier; /* Clock multiplier (1,2,4) */ u_char maxburst; /* log base 2 of dwords burst */ /* ** BIOS supplied PCI bus options */ u_char rv_scntl3; u_char rv_dcntl; u_char rv_dmode; u_char rv_ctest3; u_char rv_ctest4; u_char rv_ctest5; u_char rv_gpcntl; u_char rv_stest2; /*----------------------------------------------- ** CAM SIM information for this instance **----------------------------------------------- */ struct cam_sim *sim; struct cam_path *path; /*----------------------------------------------- ** Job control **----------------------------------------------- ** ** Commands from user */ struct usrcmd user; /* ** Target data */ struct tcb target[MAX_TARGET]; /* ** Start queue. */ u_int32_t squeue [MAX_START]; u_short squeueput; /* ** Timeout handler */ time_t heartbeat; u_short ticks; u_short latetime; time_t lasttime; struct callout timer; /*----------------------------------------------- ** Debug and profiling **----------------------------------------------- ** ** register dump */ struct ncr_reg regdump; time_t regtime; /* ** Profiling data */ struct profile profile; u_long disc_phys; u_long disc_ref; /* ** Head of list of all nccbs for this controller. */ nccb_p link_nccb; /* ** message buffers. ** Should be longword aligned, ** because they're written with a ** COPY script command. */ u_char msgout[8]; u_char msgin [8]; u_int32_t lastmsg; /* ** Buffer for STATUS_IN phase. */ u_char scratch; /* ** controller chip dependent maximal transfer width. */ u_char maxwide; struct mtx lock; #ifdef NCR_IOMAPPED /* ** address of the ncr control registers in io space */ pci_port_t port; #endif }; #define NCB_SCRIPT_PHYS(np,lbl) (np->p_script + offsetof (struct script, lbl)) #define NCB_SCRIPTH_PHYS(np,lbl) (np->p_scripth + offsetof (struct scripth,lbl)) /*========================================================== ** ** ** Script for NCR-Processor. ** ** Use ncr_script_fill() to create the variable parts. ** Use ncr_script_copy_and_bind() to make a copy and ** bind to physical addresses. ** ** **========================================================== ** ** We have to know the offsets of all labels before ** we reach them (for forward jumps). ** Therefore we declare a struct here. ** If you make changes inside the script, ** DONT FORGET TO CHANGE THE LENGTHS HERE! ** **---------------------------------------------------------- */ /* ** Script fragments which are loaded into the on-board RAM ** of 825A, 875 and 895 chips. */ struct script { ncrcmd start [ 7]; ncrcmd start0 [ 2]; ncrcmd start1 [ 3]; ncrcmd startpos [ 1]; ncrcmd trysel [ 8]; ncrcmd skip [ 8]; ncrcmd skip2 [ 3]; ncrcmd idle [ 2]; ncrcmd select [ 18]; ncrcmd prepare [ 4]; ncrcmd loadpos [ 14]; ncrcmd prepare2 [ 24]; ncrcmd setmsg [ 5]; ncrcmd clrack [ 2]; ncrcmd dispatch [ 33]; ncrcmd no_data [ 17]; ncrcmd checkatn [ 10]; ncrcmd command [ 15]; ncrcmd status [ 27]; ncrcmd msg_in [ 26]; ncrcmd msg_bad [ 6]; ncrcmd complete [ 13]; ncrcmd cleanup [ 12]; ncrcmd cleanup0 [ 9]; ncrcmd signal [ 12]; ncrcmd save_dp [ 5]; ncrcmd restore_dp [ 5]; ncrcmd disconnect [ 12]; ncrcmd disconnect0 [ 5]; ncrcmd disconnect1 [ 23]; ncrcmd msg_out [ 9]; ncrcmd msg_out_done [ 7]; ncrcmd badgetcc [ 6]; ncrcmd reselect [ 8]; ncrcmd reselect1 [ 8]; ncrcmd reselect2 [ 8]; ncrcmd resel_tmp [ 5]; ncrcmd resel_lun [ 18]; ncrcmd resel_tag [ 24]; ncrcmd data_in [MAX_SCATTER * 4 + 7]; ncrcmd data_out [MAX_SCATTER * 4 + 7]; }; /* ** Script fragments which stay in main memory for all chips. */ struct scripth { ncrcmd tryloop [MAX_START*5+2]; ncrcmd msg_parity [ 6]; ncrcmd msg_reject [ 8]; ncrcmd msg_ign_residue [ 32]; ncrcmd msg_extended [ 18]; ncrcmd msg_ext_2 [ 18]; ncrcmd msg_wdtr [ 27]; ncrcmd msg_ext_3 [ 18]; ncrcmd msg_sdtr [ 27]; ncrcmd msg_out_abort [ 10]; ncrcmd getcc [ 4]; ncrcmd getcc1 [ 5]; #ifdef NCR_GETCC_WITHMSG ncrcmd getcc2 [ 29]; #else ncrcmd getcc2 [ 14]; #endif ncrcmd getcc3 [ 6]; ncrcmd aborttag [ 4]; ncrcmd abort [ 22]; ncrcmd snooptest [ 9]; ncrcmd snoopend [ 2]; }; /*========================================================== ** ** ** Function headers. ** ** **========================================================== */ #ifdef _KERNEL static nccb_p ncr_alloc_nccb(ncb_p np, u_long target, u_long lun); static void ncr_complete(ncb_p np, nccb_p cp); static int ncr_delta(int * from, int * to); static void ncr_exception(ncb_p np); static void ncr_free_nccb(ncb_p np, nccb_p cp); static void ncr_freeze_devq(ncb_p np, struct cam_path *path); static void ncr_selectclock(ncb_p np, u_char scntl3); static void ncr_getclock(ncb_p np, u_char multiplier); static nccb_p ncr_get_nccb(ncb_p np, u_long t,u_long l); #if 0 static u_int32_t ncr_info(int unit); #endif static void ncr_init(ncb_p np, char * msg, u_long code); static void ncr_intr(void *vnp); static void ncr_intr_locked(ncb_p np); static void ncr_int_ma(ncb_p np, u_char dstat); static void ncr_int_sir(ncb_p np); static void ncr_int_sto(ncb_p np); #if 0 static void ncr_min_phys(struct buf *bp); #endif static void ncr_poll(struct cam_sim *sim); static void ncb_profile(ncb_p np, nccb_p cp); static void ncr_script_copy_and_bind(ncb_p np, ncrcmd *src, ncrcmd *dst, int len); static void ncr_script_fill(struct script * scr, struct scripth *scrh); static int ncr_scatter(struct dsb* phys, vm_offset_t vaddr, vm_size_t datalen); static void ncr_getsync(ncb_p np, u_char sfac, u_char *fakp, u_char *scntl3p); static void ncr_setsync(ncb_p np, nccb_p cp,u_char scntl3,u_char sxfer, u_char period); static void ncr_setwide(ncb_p np, nccb_p cp, u_char wide, u_char ack); static int ncr_show_msg(u_char * msg); static int ncr_snooptest(ncb_p np); static void ncr_action(struct cam_sim *sim, union ccb *ccb); static void ncr_timeout(void *arg); static void ncr_wakeup(ncb_p np, u_long code); static int ncr_probe(device_t dev); static int ncr_attach(device_t dev); #endif /* _KERNEL */ /*========================================================== ** ** ** Global static data. ** ** **========================================================== */ #ifdef _KERNEL static int ncr_debug = SCSI_NCR_DEBUG; SYSCTL_INT(_debug, OID_AUTO, ncr_debug, CTLFLAG_RW, &ncr_debug, 0, ""); static int ncr_cache; /* to be aligned _NOT_ static */ /*========================================================== ** ** ** Global static data: auto configure ** ** **========================================================== */ #define NCR_810_ID (0x00011000ul) #define NCR_815_ID (0x00041000ul) #define NCR_820_ID (0x00021000ul) #define NCR_825_ID (0x00031000ul) #define NCR_860_ID (0x00061000ul) #define NCR_875_ID (0x000f1000ul) #define NCR_875_ID2 (0x008f1000ul) #define NCR_885_ID (0x000d1000ul) #define NCR_895_ID (0x000c1000ul) #define NCR_896_ID (0x000b1000ul) #define NCR_895A_ID (0x00121000ul) #define NCR_1510D_ID (0x000a1000ul) /*========================================================== ** ** ** Scripts for NCR-Processor. ** ** Use ncr_script_bind for binding to physical addresses. ** ** **========================================================== ** ** NADDR generates a reference to a field of the controller data. ** PADDR generates a reference to another part of the script. ** RADDR generates a reference to a script processor register. ** FADDR generates a reference to a script processor register ** with offset. ** **---------------------------------------------------------- */ #define RELOC_SOFTC 0x40000000 #define RELOC_LABEL 0x50000000 #define RELOC_REGISTER 0x60000000 #define RELOC_KVAR 0x70000000 #define RELOC_LABELH 0x80000000 #define RELOC_MASK 0xf0000000 #define NADDR(label) (RELOC_SOFTC | offsetof(struct ncb, label)) #define PADDR(label) (RELOC_LABEL | offsetof(struct script, label)) #define PADDRH(label) (RELOC_LABELH | offsetof(struct scripth, label)) #define RADDR(label) (RELOC_REGISTER | REG(label)) #define FADDR(label,ofs)(RELOC_REGISTER | ((REG(label))+(ofs))) #define KVAR(which) (RELOC_KVAR | (which)) #define KVAR_SECOND (0) #define KVAR_TICKS (1) #define KVAR_NCR_CACHE (2) #define SCRIPT_KVAR_FIRST (0) #define SCRIPT_KVAR_LAST (3) /* * Kernel variables referenced in the scripts. * THESE MUST ALL BE ALIGNED TO A 4-BYTE BOUNDARY. */ static volatile void *script_kvars[] = { &time_second, &ticks, &ncr_cache }; static struct script script0 = { /*--------------------------< START >-----------------------*/ { /* ** Claim to be still alive ... */ SCR_COPY (sizeof (((struct ncb *)0)->heartbeat)), KVAR (KVAR_SECOND), NADDR (heartbeat), /* ** Make data structure address invalid. ** clear SIGP. */ SCR_LOAD_REG (dsa, 0xff), 0, SCR_FROM_REG (ctest2), 0, }/*-------------------------< START0 >----------------------*/,{ /* ** Hook for interrupted GetConditionCode. ** Will be patched to ... IFTRUE by ** the interrupt handler. */ SCR_INT ^ IFFALSE (0), SIR_SENSE_RESTART, }/*-------------------------< START1 >----------------------*/,{ /* ** Hook for stalled start queue. ** Will be patched to IFTRUE by the interrupt handler. */ SCR_INT ^ IFFALSE (0), SIR_STALL_RESTART, /* ** Then jump to a certain point in tryloop. ** Due to the lack of indirect addressing the code ** is self modifying here. */ SCR_JUMP, }/*-------------------------< STARTPOS >--------------------*/,{ PADDRH(tryloop), }/*-------------------------< TRYSEL >----------------------*/,{ /* ** Now: ** DSA: Address of a Data Structure ** or Address of the IDLE-Label. ** ** TEMP: Address of a script, which tries to ** start the NEXT entry. ** ** Save the TEMP register into the SCRATCHA register. ** Then copy the DSA to TEMP and RETURN. ** This is kind of an indirect jump. ** (The script processor has NO stack, so the ** CALL is actually a jump and link, and the ** RETURN is an indirect jump.) ** ** If the slot was empty, DSA contains the address ** of the IDLE part of this script. The processor ** jumps to IDLE and waits for a reselect. ** It will wake up and try the same slot again ** after the SIGP bit becomes set by the host. ** ** If the slot was not empty, DSA contains ** the address of the phys-part of a nccb. ** The processor jumps to this address. ** phys starts with head, ** head starts with launch, ** so actually the processor jumps to ** the lauch part. ** If the entry is scheduled for execution, ** then launch contains a jump to SELECT. ** If it's not scheduled, it contains a jump to IDLE. */ SCR_COPY (4), RADDR (temp), RADDR (scratcha), SCR_COPY (4), RADDR (dsa), RADDR (temp), SCR_RETURN, 0 }/*-------------------------< SKIP >------------------------*/,{ /* ** This entry has been canceled. ** Next time use the next slot. */ SCR_COPY (4), RADDR (scratcha), PADDR (startpos), /* ** patch the launch field. ** should look like an idle process. */ SCR_COPY_F (4), RADDR (dsa), PADDR (skip2), SCR_COPY (8), PADDR (idle), }/*-------------------------< SKIP2 >-----------------------*/,{ 0, SCR_JUMP, PADDR(start), }/*-------------------------< IDLE >------------------------*/,{ /* ** Nothing to do? ** Wait for reselect. */ SCR_JUMP, PADDR(reselect), }/*-------------------------< SELECT >----------------------*/,{ /* ** DSA contains the address of a scheduled ** data structure. ** ** SCRATCHA contains the address of the script, ** which starts the next entry. ** ** Set Initiator mode. ** ** (Target mode is left as an exercise for the reader) */ SCR_CLR (SCR_TRG), 0, SCR_LOAD_REG (HS_REG, 0xff), 0, /* ** And try to select this target. */ SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select), PADDR (reselect), /* ** Now there are 4 possibilities: ** ** (1) The ncr loses arbitration. ** This is ok, because it will try again, ** when the bus becomes idle. ** (But beware of the timeout function!) ** ** (2) The ncr is reselected. ** Then the script processor takes the jump ** to the RESELECT label. ** ** (3) The ncr completes the selection. ** Then it will execute the next statement. ** ** (4) There is a selection timeout. ** Then the ncr should interrupt the host and stop. ** Unfortunately, it seems to continue execution ** of the script. But it will fail with an ** IID-interrupt on the next WHEN. */ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_IN)), 0, /* ** Send the IDENTIFY and SIMPLE_TAG messages ** (and the MSG_EXT_SDTR message) */ SCR_MOVE_TBL ^ SCR_MSG_OUT, offsetof (struct dsb, smsg), #ifdef undef /* XXX better fail than try to deal with this ... */ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_OUT)), -16, #endif SCR_CLR (SCR_ATN), 0, SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), /* ** Selection complete. ** Next time use the next slot. */ SCR_COPY (4), RADDR (scratcha), PADDR (startpos), }/*-------------------------< PREPARE >----------------------*/,{ /* ** The ncr doesn't have an indirect load ** or store command. So we have to ** copy part of the control block to a ** fixed place, where we can access it. ** ** We patch the address part of a ** COPY command with the DSA-register. */ SCR_COPY_F (4), RADDR (dsa), PADDR (loadpos), /* ** then we do the actual copy. */ SCR_COPY (sizeof (struct head)), /* ** continued after the next label ... */ }/*-------------------------< LOADPOS >---------------------*/,{ 0, NADDR (header), /* ** Mark this nccb as not scheduled. */ SCR_COPY (8), PADDR (idle), NADDR (header.launch), /* ** Set a time stamp for this selection */ SCR_COPY (sizeof (ticks)), KVAR (KVAR_TICKS), NADDR (header.stamp.select), /* ** load the savep (saved pointer) into ** the TEMP register (actual pointer) */ SCR_COPY (4), NADDR (header.savep), RADDR (temp), /* ** Initialize the status registers */ SCR_COPY (4), NADDR (header.status), RADDR (scr0), }/*-------------------------< PREPARE2 >---------------------*/,{ /* ** Load the synchronous mode register */ SCR_COPY (1), NADDR (sync_st), RADDR (sxfer), /* ** Load the wide mode and timing register */ SCR_COPY (1), NADDR (wide_st), RADDR (scntl3), /* ** Initialize the msgout buffer with a NOOP message. */ SCR_LOAD_REG (scratcha, MSG_NOOP), 0, SCR_COPY (1), RADDR (scratcha), NADDR (msgout), SCR_COPY (1), RADDR (scratcha), NADDR (msgin), /* ** Message in phase ? */ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** Extended or reject message ? */ SCR_FROM_REG (sbdl), 0, SCR_JUMP ^ IFTRUE (DATA (MSG_EXTENDED)), PADDR (msg_in), SCR_JUMP ^ IFTRUE (DATA (MSG_MESSAGE_REJECT)), PADDRH (msg_reject), /* ** normal processing */ SCR_JUMP, PADDR (dispatch), }/*-------------------------< SETMSG >----------------------*/,{ SCR_COPY (1), RADDR (scratcha), NADDR (msgout), SCR_SET (SCR_ATN), 0, }/*-------------------------< CLRACK >----------------------*/,{ /* ** Terminate possible pending message phase. */ SCR_CLR (SCR_ACK), 0, }/*-----------------------< DISPATCH >----------------------*/,{ SCR_FROM_REG (HS_REG), 0, SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)), SIR_NEGO_FAILED, /* ** remove bogus output signals */ SCR_REG_REG (socl, SCR_AND, CACK|CATN), 0, SCR_RETURN ^ IFTRUE (WHEN (SCR_DATA_OUT)), 0, SCR_RETURN ^ IFTRUE (IF (SCR_DATA_IN)), 0, SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)), PADDR (msg_out), SCR_JUMP ^ IFTRUE (IF (SCR_MSG_IN)), PADDR (msg_in), SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)), PADDR (command), SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)), PADDR (status), /* ** Discard one illegal phase byte, if required. */ SCR_LOAD_REG (scratcha, XE_BAD_PHASE), 0, SCR_COPY (1), RADDR (scratcha), NADDR (xerr_st), SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_OUT)), 8, SCR_MOVE_ABS (1) ^ SCR_ILG_OUT, NADDR (scratch), SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_IN)), 8, SCR_MOVE_ABS (1) ^ SCR_ILG_IN, NADDR (scratch), SCR_JUMP, PADDR (dispatch), }/*-------------------------< NO_DATA >--------------------*/,{ /* ** The target wants to transfer too much data ** or in the wrong direction. ** Remember that in extended error. */ SCR_LOAD_REG (scratcha, XE_EXTRA_DATA), 0, SCR_COPY (1), RADDR (scratcha), NADDR (xerr_st), /* ** Discard one data byte, if required. */ SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)), 8, SCR_MOVE_ABS (1) ^ SCR_DATA_OUT, NADDR (scratch), SCR_JUMPR ^ IFFALSE (IF (SCR_DATA_IN)), 8, SCR_MOVE_ABS (1) ^ SCR_DATA_IN, NADDR (scratch), /* ** .. and repeat as required. */ SCR_CALL, PADDR (dispatch), SCR_JUMP, PADDR (no_data), }/*-------------------------< CHECKATN >--------------------*/,{ /* ** If AAP (bit 1 of scntl0 register) is set ** and a parity error is detected, ** the script processor asserts ATN. ** ** The target should switch to a MSG_OUT phase ** to get the message. */ SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFFALSE (MASK (CATN, CATN)), PADDR (dispatch), /* ** count it */ SCR_REG_REG (PS_REG, SCR_ADD, 1), 0, /* ** Prepare a MSG_INITIATOR_DET_ERR message ** (initiator detected error). ** The target should retry the transfer. */ SCR_LOAD_REG (scratcha, MSG_INITIATOR_DET_ERR), 0, SCR_JUMP, PADDR (setmsg), }/*-------------------------< COMMAND >--------------------*/,{ /* ** If this is not a GETCC transfer ... */ SCR_FROM_REG (SS_REG), 0, /*<<<*/ SCR_JUMPR ^ IFTRUE (DATA (SCSI_STATUS_CHECK_COND)), 28, /* ** ... set a timestamp ... */ SCR_COPY (sizeof (ticks)), KVAR (KVAR_TICKS), NADDR (header.stamp.command), /* ** ... and send the command */ SCR_MOVE_TBL ^ SCR_COMMAND, offsetof (struct dsb, cmd), SCR_JUMP, PADDR (dispatch), /* ** Send the GETCC command */ /*>>>*/ SCR_MOVE_TBL ^ SCR_COMMAND, offsetof (struct dsb, scmd), SCR_JUMP, PADDR (dispatch), }/*-------------------------< STATUS >--------------------*/,{ /* ** set the timestamp. */ SCR_COPY (sizeof (ticks)), KVAR (KVAR_TICKS), NADDR (header.stamp.status), /* ** If this is a GETCC transfer, */ SCR_FROM_REG (SS_REG), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (SCSI_STATUS_CHECK_COND)), 40, /* ** get the status */ SCR_MOVE_ABS (1) ^ SCR_STATUS, NADDR (scratch), /* ** Save status to scsi_status. ** Mark as complete. ** And wait for disconnect. */ SCR_TO_REG (SS_REG), 0, SCR_REG_REG (SS_REG, SCR_OR, SCSI_STATUS_SENSE), 0, SCR_LOAD_REG (HS_REG, HS_COMPLETE), 0, SCR_JUMP, PADDR (checkatn), /* ** If it was no GETCC transfer, ** save the status to scsi_status. */ /*>>>*/ SCR_MOVE_ABS (1) ^ SCR_STATUS, NADDR (scratch), SCR_TO_REG (SS_REG), 0, /* ** if it was no check condition ... */ SCR_JUMP ^ IFTRUE (DATA (SCSI_STATUS_CHECK_COND)), PADDR (checkatn), /* ** ... mark as complete. */ SCR_LOAD_REG (HS_REG, HS_COMPLETE), 0, SCR_JUMP, PADDR (checkatn), }/*-------------------------< MSG_IN >--------------------*/,{ /* ** Get the first byte of the message ** and save it to SCRATCHA. ** ** The script processor doesn't negate the ** ACK signal after this transfer. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[0]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, /* ** Parity was ok, handle this message. */ SCR_JUMP ^ IFTRUE (DATA (MSG_CMDCOMPLETE)), PADDR (complete), SCR_JUMP ^ IFTRUE (DATA (MSG_SAVEDATAPOINTER)), PADDR (save_dp), SCR_JUMP ^ IFTRUE (DATA (MSG_RESTOREPOINTERS)), PADDR (restore_dp), SCR_JUMP ^ IFTRUE (DATA (MSG_DISCONNECT)), PADDR (disconnect), SCR_JUMP ^ IFTRUE (DATA (MSG_EXTENDED)), PADDRH (msg_extended), SCR_JUMP ^ IFTRUE (DATA (MSG_NOOP)), PADDR (clrack), SCR_JUMP ^ IFTRUE (DATA (MSG_MESSAGE_REJECT)), PADDRH (msg_reject), SCR_JUMP ^ IFTRUE (DATA (MSG_IGN_WIDE_RESIDUE)), PADDRH (msg_ign_residue), /* ** Rest of the messages left as ** an exercise ... ** ** Unimplemented messages: ** fall through to MSG_BAD. */ }/*-------------------------< MSG_BAD >------------------*/,{ /* ** unimplemented message - reject it. */ SCR_INT, SIR_REJECT_SENT, SCR_LOAD_REG (scratcha, MSG_MESSAGE_REJECT), 0, SCR_JUMP, PADDR (setmsg), }/*-------------------------< COMPLETE >-----------------*/,{ /* ** Complete message. ** ** If it's not the get condition code, ** copy TEMP register to LASTP in header. */ SCR_FROM_REG (SS_REG), 0, /*<<<*/ SCR_JUMPR ^ IFTRUE (MASK (SCSI_STATUS_SENSE, SCSI_STATUS_SENSE)), 12, SCR_COPY (4), RADDR (temp), NADDR (header.lastp), /*>>>*/ /* ** When we terminate the cycle by clearing ACK, ** the target may disconnect immediately. ** ** We don't want to be told of an ** "unexpected disconnect", ** so we disable this feature. */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, /* ** Terminate cycle ... */ SCR_CLR (SCR_ACK|SCR_ATN), 0, /* ** ... and wait for the disconnect. */ SCR_WAIT_DISC, 0, }/*-------------------------< CLEANUP >-------------------*/,{ /* ** dsa: Pointer to nccb ** or xxxxxxFF (no nccb) ** ** HS_REG: Host-Status (<>0!) */ SCR_FROM_REG (dsa), 0, SCR_JUMP ^ IFTRUE (DATA (0xff)), PADDR (signal), /* ** dsa is valid. ** save the status registers */ SCR_COPY (4), RADDR (scr0), NADDR (header.status), /* ** and copy back the header to the nccb. */ SCR_COPY_F (4), RADDR (dsa), PADDR (cleanup0), SCR_COPY (sizeof (struct head)), NADDR (header), }/*-------------------------< CLEANUP0 >--------------------*/,{ 0, /* ** If command resulted in "check condition" ** status and is not yet completed, ** try to get the condition code. */ SCR_FROM_REG (HS_REG), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (0, HS_DONEMASK)), 16, SCR_FROM_REG (SS_REG), 0, SCR_JUMP ^ IFTRUE (DATA (SCSI_STATUS_CHECK_COND)), PADDRH(getcc2), }/*-------------------------< SIGNAL >----------------------*/,{ /* ** if status = queue full, ** reinsert in startqueue and stall queue. */ /*>>>*/ SCR_FROM_REG (SS_REG), 0, SCR_INT ^ IFTRUE (DATA (SCSI_STATUS_QUEUE_FULL)), SIR_STALL_QUEUE, /* ** And make the DSA register invalid. */ SCR_LOAD_REG (dsa, 0xff), /* invalid */ 0, /* ** if job completed ... */ SCR_FROM_REG (HS_REG), 0, /* ** ... signal completion to the host */ SCR_INT_FLY ^ IFFALSE (MASK (0, HS_DONEMASK)), 0, /* ** Auf zu neuen Schandtaten! */ SCR_JUMP, PADDR(start), }/*-------------------------< SAVE_DP >------------------*/,{ /* ** SAVE_DP message: ** Copy TEMP register to SAVEP in header. */ SCR_COPY (4), RADDR (temp), NADDR (header.savep), SCR_JUMP, PADDR (clrack), }/*-------------------------< RESTORE_DP >---------------*/,{ /* ** RESTORE_DP message: ** Copy SAVEP in header to TEMP register. */ SCR_COPY (4), NADDR (header.savep), RADDR (temp), SCR_JUMP, PADDR (clrack), }/*-------------------------< DISCONNECT >---------------*/,{ /* ** If QUIRK_AUTOSAVE is set, ** do a "save pointer" operation. */ SCR_FROM_REG (QU_REG), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (QUIRK_AUTOSAVE, QUIRK_AUTOSAVE)), 12, /* ** like SAVE_DP message: ** Copy TEMP register to SAVEP in header. */ SCR_COPY (4), RADDR (temp), NADDR (header.savep), /*>>>*/ /* ** Check if temp==savep or temp==goalp: ** if not, log a missing save pointer message. ** In fact, it's a comparison mod 256. ** ** Hmmm, I hadn't thought that I would be urged to ** write this kind of ugly self modifying code. ** ** It's unbelievable, but the ncr53c8xx isn't able ** to subtract one register from another. */ SCR_FROM_REG (temp), 0, /* ** You are not expected to understand this .. ** ** CAUTION: only little endian architectures supported! XXX */ SCR_COPY_F (1), NADDR (header.savep), PADDR (disconnect0), }/*-------------------------< DISCONNECT0 >--------------*/,{ /*<<<*/ SCR_JUMPR ^ IFTRUE (DATA (1)), 20, /* ** neither this */ SCR_COPY_F (1), NADDR (header.goalp), PADDR (disconnect1), }/*-------------------------< DISCONNECT1 >--------------*/,{ SCR_INT ^ IFFALSE (DATA (1)), SIR_MISSING_SAVE, /*>>>*/ /* ** DISCONNECTing ... ** ** disable the "unexpected disconnect" feature, ** and remove the ACK signal. */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, SCR_CLR (SCR_ACK|SCR_ATN), 0, /* ** Wait for the disconnect. */ SCR_WAIT_DISC, 0, /* ** Profiling: ** Set a time stamp, ** and count the disconnects. */ SCR_COPY (sizeof (ticks)), KVAR (KVAR_TICKS), NADDR (header.stamp.disconnect), SCR_COPY (4), NADDR (disc_phys), RADDR (temp), SCR_REG_REG (temp, SCR_ADD, 0x01), 0, SCR_COPY (4), RADDR (temp), NADDR (disc_phys), /* ** Status is: DISCONNECTED. */ SCR_LOAD_REG (HS_REG, HS_DISCONNECT), 0, SCR_JUMP, PADDR (cleanup), }/*-------------------------< MSG_OUT >-------------------*/,{ /* ** The target requests a message. */ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT, NADDR (msgout), SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), /* ** If it was no ABORT message ... */ SCR_JUMP ^ IFTRUE (DATA (MSG_ABORT)), PADDRH (msg_out_abort), /* ** ... wait for the next phase ** if it's a message out, send it again, ... */ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)), PADDR (msg_out), }/*-------------------------< MSG_OUT_DONE >--------------*/,{ /* ** ... else clear the message ... */ SCR_LOAD_REG (scratcha, MSG_NOOP), 0, SCR_COPY (4), RADDR (scratcha), NADDR (msgout), /* ** ... and process the next phase */ SCR_JUMP, PADDR (dispatch), }/*------------------------< BADGETCC >---------------------*/,{ /* ** If SIGP was set, clear it and try again. */ SCR_FROM_REG (ctest2), 0, SCR_JUMP ^ IFTRUE (MASK (CSIGP,CSIGP)), PADDRH (getcc2), SCR_INT, SIR_SENSE_FAILED, }/*-------------------------< RESELECT >--------------------*/,{ /* ** This NOP will be patched with LED OFF ** SCR_REG_REG (gpreg, SCR_OR, 0x01) */ SCR_NO_OP, 0, /* ** make the DSA invalid. */ SCR_LOAD_REG (dsa, 0xff), 0, SCR_CLR (SCR_TRG), 0, /* ** Sleep waiting for a reselection. ** If SIGP is set, special treatment. ** ** Zu allem bereit .. */ SCR_WAIT_RESEL, PADDR(reselect2), }/*-------------------------< RESELECT1 >--------------------*/,{ /* ** This NOP will be patched with LED ON ** SCR_REG_REG (gpreg, SCR_AND, 0xfe) */ SCR_NO_OP, 0, /* ** ... zu nichts zu gebrauchen ? ** ** load the target id into the SFBR ** and jump to the control block. ** ** Look at the declarations of ** - struct ncb ** - struct tcb ** - struct lcb ** - struct nccb ** to understand what's going on. */ SCR_REG_SFBR (ssid, SCR_AND, 0x8F), 0, SCR_TO_REG (sdid), 0, SCR_JUMP, NADDR (jump_tcb), }/*-------------------------< RESELECT2 >-------------------*/,{ /* ** This NOP will be patched with LED ON ** SCR_REG_REG (gpreg, SCR_AND, 0xfe) */ SCR_NO_OP, 0, /* ** If it's not connected :( ** -> interrupted by SIGP bit. ** Jump to start. */ SCR_FROM_REG (ctest2), 0, SCR_JUMP ^ IFTRUE (MASK (CSIGP,CSIGP)), PADDR (start), SCR_JUMP, PADDR (reselect), }/*-------------------------< RESEL_TMP >-------------------*/,{ /* ** The return address in TEMP ** is in fact the data structure address, ** so copy it to the DSA register. */ SCR_COPY (4), RADDR (temp), RADDR (dsa), SCR_JUMP, PADDR (prepare), }/*-------------------------< RESEL_LUN >-------------------*/,{ /* ** come back to this point ** to get an IDENTIFY message ** Wait for a msg_in phase. */ /*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)), 48, /* ** message phase ** It's not a sony, it's a trick: ** read the data without acknowledging it. */ SCR_FROM_REG (sbdl), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (MSG_IDENTIFYFLAG, 0x98)), 32, /* ** It WAS an Identify message. ** get it and ack it! */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin), SCR_CLR (SCR_ACK), 0, /* ** Mask out the lun. */ SCR_REG_REG (sfbr, SCR_AND, 0x07), 0, SCR_RETURN, 0, /* ** No message phase or no IDENTIFY message: ** return 0. */ /*>>>*/ SCR_LOAD_SFBR (0), 0, SCR_RETURN, 0, }/*-------------------------< RESEL_TAG >-------------------*/,{ /* ** come back to this point ** to get a SIMPLE_TAG message ** Wait for a MSG_IN phase. */ /*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)), 64, /* ** message phase ** It's a trick - read the data ** without acknowledging it. */ SCR_FROM_REG (sbdl), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (MSG_SIMPLE_Q_TAG)), 48, /* ** It WAS a SIMPLE_TAG message. ** get it and ack it! */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin), SCR_CLR (SCR_ACK), 0, /* ** Wait for the second byte (the tag) */ /*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)), 24, /* ** Get it and ack it! */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin), SCR_CLR (SCR_ACK|SCR_CARRY), 0, SCR_RETURN, 0, /* ** No message phase or no SIMPLE_TAG message ** or no second byte: return 0. */ /*>>>*/ SCR_LOAD_SFBR (0), 0, SCR_SET (SCR_CARRY), 0, SCR_RETURN, 0, }/*-------------------------< DATA_IN >--------------------*/,{ /* ** Because the size depends on the ** #define MAX_SCATTER parameter, ** it is filled in at runtime. ** ** SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)), ** PADDR (no_data), ** SCR_COPY (sizeof (ticks)), ** KVAR (KVAR_TICKS), ** NADDR (header.stamp.data), ** SCR_MOVE_TBL ^ SCR_DATA_IN, ** offsetof (struct dsb, data[ 0]), ** ** ##===========< i=1; i========= ** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN)), ** || PADDR (checkatn), ** || SCR_MOVE_TBL ^ SCR_DATA_IN, ** || offsetof (struct dsb, data[ i]), ** ##========================================== ** ** SCR_CALL, ** PADDR (checkatn), ** SCR_JUMP, ** PADDR (no_data), */ 0 }/*-------------------------< DATA_OUT >-------------------*/,{ /* ** Because the size depends on the ** #define MAX_SCATTER parameter, ** it is filled in at runtime. ** ** SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_OUT)), ** PADDR (no_data), ** SCR_COPY (sizeof (ticks)), ** KVAR (KVAR_TICKS), ** NADDR (header.stamp.data), ** SCR_MOVE_TBL ^ SCR_DATA_OUT, ** offsetof (struct dsb, data[ 0]), ** ** ##===========< i=1; i========= ** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT)), ** || PADDR (dispatch), ** || SCR_MOVE_TBL ^ SCR_DATA_OUT, ** || offsetof (struct dsb, data[ i]), ** ##========================================== ** ** SCR_CALL, ** PADDR (dispatch), ** SCR_JUMP, ** PADDR (no_data), ** **--------------------------------------------------------- */ (u_long)0 }/*--------------------------------------------------------*/ }; static struct scripth scripth0 = { /*-------------------------< TRYLOOP >---------------------*/{ /* ** Load an entry of the start queue into dsa ** and try to start it by jumping to TRYSEL. ** ** Because the size depends on the ** #define MAX_START parameter, it is filled ** in at runtime. ** **----------------------------------------------------------- ** ** ##===========< I=0; i=========== ** || SCR_COPY (4), ** || NADDR (squeue[i]), ** || RADDR (dsa), ** || SCR_CALL, ** || PADDR (trysel), ** ##========================================== ** ** SCR_JUMP, ** PADDRH(tryloop), ** **----------------------------------------------------------- */ 0 }/*-------------------------< MSG_PARITY >---------------*/,{ /* ** count it */ SCR_REG_REG (PS_REG, SCR_ADD, 0x01), 0, /* ** send a "message parity error" message. */ SCR_LOAD_REG (scratcha, MSG_PARITY_ERROR), 0, SCR_JUMP, PADDR (setmsg), }/*-------------------------< MSG_MESSAGE_REJECT >---------------*/,{ /* ** If a negotiation was in progress, ** negotiation failed. */ SCR_FROM_REG (HS_REG), 0, SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)), SIR_NEGO_FAILED, /* ** else make host log this message */ SCR_INT ^ IFFALSE (DATA (HS_NEGOTIATE)), SIR_REJECT_RECEIVED, SCR_JUMP, PADDR (clrack), }/*-------------------------< MSG_IGN_RESIDUE >----------*/,{ /* ** Terminate cycle */ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get residue size. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[1]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, /* ** Size is 0 .. ignore message. */ SCR_JUMP ^ IFTRUE (DATA (0)), PADDR (clrack), /* ** Size is not 1 .. have to interrupt. */ /*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (1)), 40, /* ** Check for residue byte in swide register */ SCR_FROM_REG (scntl2), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)), 16, /* ** There IS data in the swide register. ** Discard it. */ SCR_REG_REG (scntl2, SCR_OR, WSR), 0, SCR_JUMP, PADDR (clrack), /* ** Load again the size to the sfbr register. */ /*>>>*/ SCR_FROM_REG (scratcha), 0, /*>>>*/ SCR_INT, SIR_IGN_RESIDUE, SCR_JUMP, PADDR (clrack), }/*-------------------------< MSG_EXTENDED >-------------*/,{ /* ** Terminate cycle */ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get length. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[1]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, /* */ SCR_JUMP ^ IFTRUE (DATA (3)), PADDRH (msg_ext_3), SCR_JUMP ^ IFFALSE (DATA (2)), PADDR (msg_bad), }/*-------------------------< MSG_EXT_2 >----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get extended message code. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[2]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, SCR_JUMP ^ IFTRUE (DATA (MSG_EXT_WDTR)), PADDRH (msg_wdtr), /* ** unknown extended message */ SCR_JUMP, PADDR (msg_bad) }/*-------------------------< MSG_WDTR >-----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get data bus width */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[3]), SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), /* ** let the host do the real work. */ SCR_INT, SIR_NEGO_WIDE, /* ** let the target fetch our answer. */ SCR_SET (SCR_ATN), 0, SCR_CLR (SCR_ACK), 0, SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)), SIR_NEGO_PROTO, /* ** Send the MSG_EXT_WDTR */ SCR_MOVE_ABS (4) ^ SCR_MSG_OUT, NADDR (msgout), SCR_CLR (SCR_ATN), 0, SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), SCR_JUMP, PADDR (msg_out_done), }/*-------------------------< MSG_EXT_3 >----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get extended message code. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[2]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, SCR_JUMP ^ IFTRUE (DATA (MSG_EXT_SDTR)), PADDRH (msg_sdtr), /* ** unknown extended message */ SCR_JUMP, PADDR (msg_bad) }/*-------------------------< MSG_SDTR >-----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get period and offset */ SCR_MOVE_ABS (2) ^ SCR_MSG_IN, NADDR (msgin[3]), SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), /* ** let the host do the real work. */ SCR_INT, SIR_NEGO_SYNC, /* ** let the target fetch our answer. */ SCR_SET (SCR_ATN), 0, SCR_CLR (SCR_ACK), 0, SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)), SIR_NEGO_PROTO, /* ** Send the MSG_EXT_SDTR */ SCR_MOVE_ABS (5) ^ SCR_MSG_OUT, NADDR (msgout), SCR_CLR (SCR_ATN), 0, SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), SCR_JUMP, PADDR (msg_out_done), }/*-------------------------< MSG_OUT_ABORT >-------------*/,{ /* ** After ABORT message, ** ** expect an immediate disconnect, ... */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, SCR_CLR (SCR_ACK|SCR_ATN), 0, SCR_WAIT_DISC, 0, /* ** ... and set the status to "ABORTED" */ SCR_LOAD_REG (HS_REG, HS_ABORTED), 0, SCR_JUMP, PADDR (cleanup), }/*-------------------------< GETCC >-----------------------*/,{ /* ** The ncr doesn't have an indirect load ** or store command. So we have to ** copy part of the control block to a ** fixed place, where we can modify it. ** ** We patch the address part of a COPY command ** with the address of the dsa register ... */ SCR_COPY_F (4), RADDR (dsa), PADDRH (getcc1), /* ** ... then we do the actual copy. */ SCR_COPY (sizeof (struct head)), }/*-------------------------< GETCC1 >----------------------*/,{ 0, NADDR (header), /* ** Initialize the status registers */ SCR_COPY (4), NADDR (header.status), RADDR (scr0), }/*-------------------------< GETCC2 >----------------------*/,{ /* ** Get the condition code from a target. ** ** DSA points to a data structure. ** Set TEMP to the script location ** that receives the condition code. ** ** Because there is no script command ** to load a longword into a register, ** we use a CALL command. */ /*<<<*/ SCR_CALLR, 24, /* ** Get the condition code. */ SCR_MOVE_TBL ^ SCR_DATA_IN, offsetof (struct dsb, sense), /* ** No data phase may follow! */ SCR_CALL, PADDR (checkatn), SCR_JUMP, PADDR (no_data), /*>>>*/ /* ** The CALL jumps to this point. ** Prepare for a RESTORE_POINTER message. ** Save the TEMP register into the saved pointer. */ SCR_COPY (4), RADDR (temp), NADDR (header.savep), /* ** Load scratcha, because in case of a selection timeout, ** the host will expect a new value for startpos in ** the scratcha register. */ SCR_COPY (4), PADDR (startpos), RADDR (scratcha), #ifdef NCR_GETCC_WITHMSG /* ** If QUIRK_NOMSG is set, select without ATN. ** and don't send a message. */ SCR_FROM_REG (QU_REG), 0, SCR_JUMP ^ IFTRUE (MASK (QUIRK_NOMSG, QUIRK_NOMSG)), PADDRH(getcc3), /* ** Then try to connect to the target. ** If we are reselected, special treatment ** of the current job is required before ** accepting the reselection. */ SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select), PADDR(badgetcc), /* ** Send the IDENTIFY message. ** In case of short transfer, remove ATN. */ SCR_MOVE_TBL ^ SCR_MSG_OUT, offsetof (struct dsb, smsg2), SCR_CLR (SCR_ATN), 0, /* ** save the first byte of the message. */ SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), SCR_JUMP, PADDR (prepare2), #endif }/*-------------------------< GETCC3 >----------------------*/,{ /* ** Try to connect to the target. ** If we are reselected, special treatment ** of the current job is required before ** accepting the reselection. ** ** Silly target won't accept a message. ** Select without ATN. */ SCR_SEL_TBL ^ offsetof (struct dsb, select), PADDR(badgetcc), /* ** Force error if selection timeout */ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_IN)), 0, /* ** don't negotiate. */ SCR_JUMP, PADDR (prepare2), }/*-------------------------< ABORTTAG >-------------------*/,{ /* ** Abort a bad reselection. ** Set the message to ABORT vs. ABORT_TAG */ SCR_LOAD_REG (scratcha, MSG_ABORT_TAG), 0, SCR_JUMPR ^ IFFALSE (CARRYSET), 8, }/*-------------------------< ABORT >----------------------*/,{ SCR_LOAD_REG (scratcha, MSG_ABORT), 0, SCR_COPY (1), RADDR (scratcha), NADDR (msgout), SCR_SET (SCR_ATN), 0, SCR_CLR (SCR_ACK), 0, /* ** and send it. ** we expect an immediate disconnect */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, SCR_MOVE_ABS (1) ^ SCR_MSG_OUT, NADDR (msgout), SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), SCR_CLR (SCR_ACK|SCR_ATN), 0, SCR_WAIT_DISC, 0, SCR_JUMP, PADDR (start), }/*-------------------------< SNOOPTEST >-------------------*/,{ /* ** Read the variable. */ SCR_COPY (4), KVAR (KVAR_NCR_CACHE), RADDR (scratcha), /* ** Write the variable. */ SCR_COPY (4), RADDR (temp), KVAR (KVAR_NCR_CACHE), /* ** Read back the variable. */ SCR_COPY (4), KVAR (KVAR_NCR_CACHE), RADDR (temp), }/*-------------------------< SNOOPEND >-------------------*/,{ /* ** And stop. */ SCR_INT, 99, }/*--------------------------------------------------------*/ }; /*========================================================== ** ** ** Fill in #define dependent parts of the script ** ** **========================================================== */ static void ncr_script_fill (struct script * scr, struct scripth * scrh) { int i; ncrcmd *p; p = scrh->tryloop; for (i=0; itryloop + sizeof (scrh->tryloop)); p = scr->data_in; *p++ =SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)); *p++ =PADDR (no_data); *p++ =SCR_COPY (sizeof (ticks)); *p++ =(ncrcmd) KVAR (KVAR_TICKS); *p++ =NADDR (header.stamp.data); *p++ =SCR_MOVE_TBL ^ SCR_DATA_IN; *p++ =offsetof (struct dsb, data[ 0]); for (i=1; idata_in + sizeof (scr->data_in)); p = scr->data_out; *p++ =SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_OUT)); *p++ =PADDR (no_data); *p++ =SCR_COPY (sizeof (ticks)); *p++ =(ncrcmd) KVAR (KVAR_TICKS); *p++ =NADDR (header.stamp.data); *p++ =SCR_MOVE_TBL ^ SCR_DATA_OUT; *p++ =offsetof (struct dsb, data[ 0]); for (i=1; idata_out + sizeof (scr->data_out)); } /*========================================================== ** ** ** Copy and rebind a script. ** ** **========================================================== */ static void ncr_script_copy_and_bind (ncb_p np, ncrcmd *src, ncrcmd *dst, int len) { ncrcmd opcode, new, old, tmp1, tmp2; ncrcmd *start, *end; int relocs, offset; start = src; end = src + len/4; offset = 0; while (src < end) { opcode = *src++; WRITESCRIPT_OFF(dst, offset, opcode); offset += 4; /* ** If we forget to change the length ** in struct script, a field will be ** padded with 0. This is an illegal ** command. */ if (opcode == 0) { device_printf(np->dev, "ERROR0 IN SCRIPT at %d.\n", (int)(src - start - 1)); DELAY (1000000); } if (DEBUG_FLAGS & DEBUG_SCRIPT) printf ("%p: <%x>\n", (src-1), (unsigned)opcode); /* ** We don't have to decode ALL commands */ switch (opcode >> 28) { case 0xc: /* ** COPY has TWO arguments. */ relocs = 2; tmp1 = src[0]; if ((tmp1 & RELOC_MASK) == RELOC_KVAR) tmp1 = 0; tmp2 = src[1]; if ((tmp2 & RELOC_MASK) == RELOC_KVAR) tmp2 = 0; if ((tmp1 ^ tmp2) & 3) { device_printf(np->dev, "ERROR1 IN SCRIPT at %d.\n", (int)(src - start - 1)); DELAY (1000000); } /* ** If PREFETCH feature not enabled, remove ** the NO FLUSH bit if present. */ if ((opcode & SCR_NO_FLUSH) && !(np->features&FE_PFEN)) WRITESCRIPT_OFF(dst, offset - 4, (opcode & ~SCR_NO_FLUSH)); break; case 0x0: /* ** MOVE (absolute address) */ relocs = 1; break; case 0x8: /* ** JUMP / CALL ** dont't relocate if relative :-) */ if (opcode & 0x00800000) relocs = 0; else relocs = 1; break; case 0x4: case 0x5: case 0x6: case 0x7: relocs = 1; break; default: relocs = 0; break; } if (relocs) { while (relocs--) { old = *src++; switch (old & RELOC_MASK) { case RELOC_REGISTER: new = (old & ~RELOC_MASK) + rman_get_start(np->reg_res); break; case RELOC_LABEL: new = (old & ~RELOC_MASK) + np->p_script; break; case RELOC_LABELH: new = (old & ~RELOC_MASK) + np->p_scripth; break; case RELOC_SOFTC: new = (old & ~RELOC_MASK) + vtophys(np); break; case RELOC_KVAR: if (((old & ~RELOC_MASK) < SCRIPT_KVAR_FIRST) || ((old & ~RELOC_MASK) > SCRIPT_KVAR_LAST)) panic("ncr KVAR out of range"); new = vtophys(script_kvars[old & ~RELOC_MASK]); break; case 0: /* Don't relocate a 0 address. */ if (old == 0) { new = old; break; } /* FALLTHROUGH */ default: panic("ncr_script_copy_and_bind: weird relocation %x @ %d\n", old, (int)(src - start)); break; } WRITESCRIPT_OFF(dst, offset, new); offset += 4; } } else { WRITESCRIPT_OFF(dst, offset, *src++); offset += 4; } } } /*========================================================== ** ** ** Auto configuration. ** ** **========================================================== */ #if 0 /*---------------------------------------------------------- ** ** Reduce the transfer length to the max value ** we can transfer safely. ** ** Reading a block greater then MAX_SIZE from the ** raw (character) device exercises a memory leak ** in the vm subsystem. This is common to ALL devices. ** We have submitted a description of this bug to ** . ** It should be fixed in the current release. ** **---------------------------------------------------------- */ void ncr_min_phys (struct buf *bp) { if ((unsigned long)bp->b_bcount > MAX_SIZE) bp->b_bcount = MAX_SIZE; } #endif #if 0 /*---------------------------------------------------------- ** ** Maximal number of outstanding requests per target. ** **---------------------------------------------------------- */ u_int32_t ncr_info (int unit) { return (1); /* may be changed later */ } #endif /*---------------------------------------------------------- ** ** NCR chip devices table and chip look up function. ** Features bit are defined in ncrreg.h. Is it the ** right place? ** **---------------------------------------------------------- */ typedef struct { unsigned long device_id; unsigned short minrevid; char *name; unsigned char maxburst; unsigned char maxoffs; unsigned char clock_divn; unsigned int features; } ncr_chip; static ncr_chip ncr_chip_table[] = { {NCR_810_ID, 0x00, "ncr 53c810 fast10 scsi", 4, 8, 4, FE_ERL} , {NCR_810_ID, 0x10, "ncr 53c810a fast10 scsi", 4, 8, 4, FE_ERL|FE_LDSTR|FE_PFEN|FE_BOF} , {NCR_815_ID, 0x00, "ncr 53c815 fast10 scsi", 4, 8, 4, FE_ERL|FE_BOF} , {NCR_820_ID, 0x00, "ncr 53c820 fast10 wide scsi", 4, 8, 4, FE_WIDE|FE_ERL} , {NCR_825_ID, 0x00, "ncr 53c825 fast10 wide scsi", 4, 8, 4, FE_WIDE|FE_ERL|FE_BOF} , {NCR_825_ID, 0x10, "ncr 53c825a fast10 wide scsi", 7, 8, 4, FE_WIDE|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_860_ID, 0x00, "ncr 53c860 fast20 scsi", 4, 8, 5, FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_LDSTR|FE_PFEN} , {NCR_875_ID, 0x00, "ncr 53c875 fast20 wide scsi", 7, 16, 5, FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_875_ID, 0x02, "ncr 53c875 fast20 wide scsi", 7, 16, 5, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_875_ID2, 0x00, "ncr 53c875j fast20 wide scsi", 7, 16, 5, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_885_ID, 0x00, "ncr 53c885 fast20 wide scsi", 7, 16, 5, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_895_ID, 0x00, "ncr 53c895 fast40 wide scsi", 7, 31, 7, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_896_ID, 0x00, "ncr 53c896 fast40 wide scsi", 7, 31, 7, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_895A_ID, 0x00, "ncr 53c895a fast40 wide scsi", 7, 31, 7, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_1510D_ID, 0x00, "ncr 53c1510d fast40 wide scsi", 7, 31, 7, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} }; static int ncr_chip_lookup(u_long device_id, u_char revision_id) { int i, found; found = -1; for (i = 0; i < nitems(ncr_chip_table); i++) { if (device_id == ncr_chip_table[i].device_id && ncr_chip_table[i].minrevid <= revision_id) { if (found < 0 || ncr_chip_table[found].minrevid < ncr_chip_table[i].minrevid) { found = i; } } } return found; } /*---------------------------------------------------------- ** ** Probe the hostadapter. ** **---------------------------------------------------------- */ static int ncr_probe (device_t dev) { int i; i = ncr_chip_lookup(pci_get_devid(dev), pci_get_revid(dev)); if (i >= 0) { device_set_desc(dev, ncr_chip_table[i].name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /*========================================================== ** ** NCR chip clock divisor table. ** Divisors are multiplied by 10,000,000 in order to make ** calculations more simple. ** **========================================================== */ #define _5M 5000000 static u_long div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; /*=============================================================== ** ** NCR chips allow burst lengths of 2, 4, 8, 16, 32, 64, 128 ** transfers. 32,64,128 are only supported by 875 and 895 chips. ** We use log base 2 (burst length) as internal code, with ** value 0 meaning "burst disabled". ** **=============================================================== */ /* * Burst length from burst code. */ #define burst_length(bc) (!(bc))? 0 : 1 << (bc) /* * Burst code from io register bits. */ #define burst_code(dmode, ctest4, ctest5) \ (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1 /* * Set initial io register bits from burst code. */ static void ncr_init_burst(ncb_p np, u_char bc) { np->rv_ctest4 &= ~0x80; np->rv_dmode &= ~(0x3 << 6); np->rv_ctest5 &= ~0x4; if (!bc) { np->rv_ctest4 |= 0x80; } else { --bc; np->rv_dmode |= ((bc & 0x3) << 6); np->rv_ctest5 |= (bc & 0x4); } } /*========================================================== ** ** ** Auto configuration: attach and init a host adapter. ** ** **========================================================== */ static int ncr_attach (device_t dev) { ncb_p np = (struct ncb*) device_get_softc(dev); u_char rev = 0; u_long period; int i, rid; u_int8_t usrsync; u_int8_t usrwide; struct cam_devq *devq; /* ** allocate and initialize structures. */ np->dev = dev; mtx_init(&np->lock, "ncr", NULL, MTX_DEF); callout_init_mtx(&np->timer, &np->lock, 0); /* ** Try to map the controller chip to ** virtual and physical memory. */ np->reg_rid = PCIR_BAR(1); np->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &np->reg_rid, RF_ACTIVE); if (!np->reg_res) { device_printf(dev, "could not map memory\n"); return ENXIO; } /* ** Now the INB INW INL OUTB OUTW OUTL macros ** can be used safely. */ #ifdef NCR_IOMAPPED /* ** Try to map the controller chip into iospace. */ if (!pci_map_port (config_id, 0x10, &np->port)) return; #endif /* ** Save some controller register default values */ np->rv_scntl3 = INB(nc_scntl3) & 0x77; np->rv_dmode = INB(nc_dmode) & 0xce; np->rv_dcntl = INB(nc_dcntl) & 0xa9; np->rv_ctest3 = INB(nc_ctest3) & 0x01; np->rv_ctest4 = INB(nc_ctest4) & 0x88; np->rv_ctest5 = INB(nc_ctest5) & 0x24; np->rv_gpcntl = INB(nc_gpcntl); np->rv_stest2 = INB(nc_stest2) & 0x20; if (bootverbose >= 2) { printf ("\tBIOS values: SCNTL3:%02x DMODE:%02x DCNTL:%02x\n", np->rv_scntl3, np->rv_dmode, np->rv_dcntl); printf ("\t CTEST3:%02x CTEST4:%02x CTEST5:%02x\n", np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); } np->rv_dcntl |= NOCOM; /* ** Do chip dependent initialization. */ rev = pci_get_revid(dev); /* ** Get chip features from chips table. */ i = ncr_chip_lookup(pci_get_devid(dev), rev); if (i >= 0) { np->maxburst = ncr_chip_table[i].maxburst; np->maxoffs = ncr_chip_table[i].maxoffs; np->clock_divn = ncr_chip_table[i].clock_divn; np->features = ncr_chip_table[i].features; } else { /* Should'nt happen if probe() is ok */ np->maxburst = 4; np->maxoffs = 8; np->clock_divn = 4; np->features = FE_ERL; } np->maxwide = np->features & FE_WIDE ? 1 : 0; np->clock_khz = np->features & FE_CLK80 ? 80000 : 40000; if (np->features & FE_QUAD) np->multiplier = 4; else if (np->features & FE_DBLR) np->multiplier = 2; else np->multiplier = 1; /* ** Get the frequency of the chip's clock. ** Find the right value for scntl3. */ if (np->features & (FE_ULTRA|FE_ULTRA2)) ncr_getclock(np, np->multiplier); #ifdef NCR_TEKRAM_EEPROM if (bootverbose) { device_printf(dev, "Tekram EEPROM read %s\n", read_tekram_eeprom (np, NULL) ? "succeeded" : "failed"); } #endif /* NCR_TEKRAM_EEPROM */ /* * If scntl3 != 0, we assume BIOS is present. */ if (np->rv_scntl3) np->features |= FE_BIOS; /* * Divisor to be used for async (timer pre-scaler). */ i = np->clock_divn - 1; while (i >= 0) { --i; if (10ul * SCSI_NCR_MIN_ASYNC * np->clock_khz > div_10M[i]) { ++i; break; } } np->rv_scntl3 = i+1; /* * Minimum synchronous period factor supported by the chip. * Btw, 'period' is in tenths of nanoseconds. */ period = howmany(4 * div_10M[0], np->clock_khz); if (period <= 250) np->minsync = 10; else if (period <= 303) np->minsync = 11; else if (period <= 500) np->minsync = 12; else np->minsync = howmany(period, 40); /* * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). */ if (np->minsync < 25 && !(np->features & (FE_ULTRA|FE_ULTRA2))) np->minsync = 25; else if (np->minsync < 12 && !(np->features & FE_ULTRA2)) np->minsync = 12; /* * Maximum synchronous period factor supported by the chip. */ period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz); np->maxsync = period > 2540 ? 254 : period / 10; /* * Now, some features available with Symbios compatible boards. * LED support through GPIO0 and DIFF support. */ #ifdef SCSI_NCR_SYMBIOS_COMPAT if (!(np->rv_gpcntl & 0x01)) np->features |= FE_LED0; #if 0 /* Not safe enough without NVRAM support or user settable option */ if (!(INB(nc_gpreg) & 0x08)) np->features |= FE_DIFF; #endif #endif /* SCSI_NCR_SYMBIOS_COMPAT */ /* * Prepare initial IO registers settings. * Trust BIOS only if we believe we have one and if we want to. */ #ifdef SCSI_NCR_TRUST_BIOS if (!(np->features & FE_BIOS)) { #else if (1) { #endif np->rv_dmode = 0; np->rv_dcntl = NOCOM; np->rv_ctest3 = 0; np->rv_ctest4 = MPEE; np->rv_ctest5 = 0; np->rv_stest2 = 0; if (np->features & FE_ERL) np->rv_dmode |= ERL; /* Enable Read Line */ if (np->features & FE_BOF) np->rv_dmode |= BOF; /* Burst Opcode Fetch */ if (np->features & FE_ERMP) np->rv_dmode |= ERMP; /* Enable Read Multiple */ if (np->features & FE_CLSE) np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ if (np->features & FE_WRIE) np->rv_ctest3 |= WRIE; /* Write and Invalidate */ if (np->features & FE_PFEN) np->rv_dcntl |= PFEN; /* Prefetch Enable */ if (np->features & FE_DFS) np->rv_ctest5 |= DFS; /* Dma Fifo Size */ if (np->features & FE_DIFF) np->rv_stest2 |= 0x20; /* Differential mode */ ncr_init_burst(np, np->maxburst); /* Max dwords burst length */ } else { np->maxburst = burst_code(np->rv_dmode, np->rv_ctest4, np->rv_ctest5); } /* ** Get on-chip SRAM address, if supported */ if ((np->features & FE_RAM) && sizeof(struct script) <= 4096) { np->sram_rid = PCIR_BAR(2); np->sram_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &np->sram_rid, RF_ACTIVE); } /* ** Allocate structure for script relocation. */ if (np->sram_res != NULL) { np->script = NULL; np->p_script = rman_get_start(np->sram_res); } else if (sizeof (struct script) > PAGE_SIZE) { np->script = (struct script*) contigmalloc (round_page(sizeof (struct script)), M_DEVBUF, M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0); } else { np->script = (struct script *) malloc (sizeof (struct script), M_DEVBUF, M_WAITOK); } if (sizeof (struct scripth) > PAGE_SIZE) { np->scripth = (struct scripth*) contigmalloc (round_page(sizeof (struct scripth)), M_DEVBUF, M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0); } else { np->scripth = (struct scripth *) malloc (sizeof (struct scripth), M_DEVBUF, M_WAITOK); } #ifdef SCSI_NCR_PCI_CONFIG_FIXUP /* ** If cache line size is enabled, check PCI config space and ** try to fix it up if necessary. */ #ifdef PCIR_CACHELNSZ /* To be sure that new PCI stuff is present */ { u_char cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); u_short command = pci_read_config(dev, PCIR_COMMAND, 2); if (!cachelnsz) { cachelnsz = 8; device_printf(dev, "setting PCI cache line size register to %d.\n", (int)cachelnsz); pci_write_config(dev, PCIR_CACHELNSZ, cachelnsz, 1); } if (!(command & PCIM_CMD_MWRICEN)) { command |= PCIM_CMD_MWRICEN; device_printf(dev, "setting PCI command write and invalidate.\n"); pci_write_config(dev, PCIR_COMMAND, command, 2); } } #endif /* PCIR_CACHELNSZ */ #endif /* SCSI_NCR_PCI_CONFIG_FIXUP */ /* Initialize per-target user settings */ usrsync = 0; if (SCSI_NCR_DFLT_SYNC) { usrsync = SCSI_NCR_DFLT_SYNC; if (usrsync > np->maxsync) usrsync = np->maxsync; if (usrsync < np->minsync) usrsync = np->minsync; } usrwide = (SCSI_NCR_MAX_WIDE); if (usrwide > np->maxwide) usrwide=np->maxwide; for (i=0;itarget[i]; tp->tinfo.user.period = usrsync; tp->tinfo.user.offset = usrsync != 0 ? np->maxoffs : 0; tp->tinfo.user.width = usrwide; tp->tinfo.disc_tag = NCR_CUR_DISCENB | NCR_CUR_TAGENB | NCR_USR_DISCENB | NCR_USR_TAGENB; } /* ** Bells and whistles ;-) */ if (bootverbose) device_printf(dev, "minsync=%d, maxsync=%d, maxoffs=%d, %d dwords burst, %s dma fifo\n", np->minsync, np->maxsync, np->maxoffs, burst_length(np->maxburst), (np->rv_ctest5 & DFS) ? "large" : "normal"); /* ** Print some complementary information that can be helpful. */ if (bootverbose) device_printf(dev, "%s, %s IRQ driver%s\n", np->rv_stest2 & 0x20 ? "differential" : "single-ended", np->rv_dcntl & IRQM ? "totem pole" : "open drain", np->sram_res ? ", using on-chip SRAM" : ""); /* ** Patch scripts to physical addresses */ ncr_script_fill (&script0, &scripth0); if (np->script) np->p_script = vtophys(np->script); np->p_scripth = vtophys(np->scripth); ncr_script_copy_and_bind (np, (ncrcmd *) &script0, (ncrcmd *) np->script, sizeof(struct script)); ncr_script_copy_and_bind (np, (ncrcmd *) &scripth0, (ncrcmd *) np->scripth, sizeof(struct scripth)); /* ** Patch the script for LED support. */ if (np->features & FE_LED0) { WRITESCRIPT(reselect[0], SCR_REG_REG(gpreg, SCR_OR, 0x01)); WRITESCRIPT(reselect1[0], SCR_REG_REG(gpreg, SCR_AND, 0xfe)); WRITESCRIPT(reselect2[0], SCR_REG_REG(gpreg, SCR_AND, 0xfe)); } /* ** init data structure */ np->jump_tcb.l_cmd = SCR_JUMP; np->jump_tcb.l_paddr = NCB_SCRIPTH_PHYS (np, abort); /* ** Get SCSI addr of host adapter (set by bios?). */ np->myaddr = INB(nc_scid) & 0x07; if (!np->myaddr) np->myaddr = SCSI_NCR_MYADDR; #ifdef NCR_DUMP_REG /* ** Log the initial register contents */ { int reg; for (reg=0; reg<256; reg+=4) { if (reg%16==0) printf ("reg[%2x]", reg); printf (" %08x", (int)pci_conf_read (config_id, reg)); if (reg%16==12) printf ("\n"); } } #endif /* NCR_DUMP_REG */ /* ** Reset chip. */ OUTB (nc_istat, SRST); DELAY (1000); OUTB (nc_istat, 0 ); /* ** Now check the cache handling of the pci chipset. */ if (ncr_snooptest (np)) { printf ("CACHE INCORRECTLY CONFIGURED.\n"); return EINVAL; } /* ** Install the interrupt handler. */ rid = 0; np->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (np->irq_res == NULL) { device_printf(dev, "interruptless mode: reduced performance.\n"); } else { bus_setup_intr(dev, np->irq_res, INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE, NULL, ncr_intr, np, &np->irq_handle); } /* ** Create the device queue. We only allow MAX_START-1 concurrent ** transactions so we can be sure to have one element free in our ** start queue to reset to the idle loop. */ devq = cam_simq_alloc(MAX_START - 1); if (devq == NULL) return ENOMEM; /* ** Now tell the generic SCSI layer ** about our bus. */ np->sim = cam_sim_alloc(ncr_action, ncr_poll, "ncr", np, device_get_unit(dev), &np->lock, 1, MAX_TAGS, devq); if (np->sim == NULL) { cam_simq_free(devq); return ENOMEM; } mtx_lock(&np->lock); if (xpt_bus_register(np->sim, dev, 0) != CAM_SUCCESS) { cam_sim_free(np->sim, /*free_devq*/ TRUE); mtx_unlock(&np->lock); return ENOMEM; } if (xpt_create_path(&np->path, /*periph*/NULL, cam_sim_path(np->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(np->sim)); cam_sim_free(np->sim, /*free_devq*/TRUE); mtx_unlock(&np->lock); return ENOMEM; } /* ** start the timeout daemon */ ncr_timeout (np); np->lasttime=0; mtx_unlock(&np->lock); return 0; } /*========================================================== ** ** ** Process pending device interrupts. ** ** **========================================================== */ static void ncr_intr(vnp) void *vnp; { ncb_p np = vnp; mtx_lock(&np->lock); ncr_intr_locked(np); mtx_unlock(&np->lock); } static void ncr_intr_locked(ncb_p np) { if (DEBUG_FLAGS & DEBUG_TINY) printf ("["); if (INB(nc_istat) & (INTF|SIP|DIP)) { /* ** Repeat until no outstanding ints */ do { ncr_exception (np); } while (INB(nc_istat) & (INTF|SIP|DIP)); np->ticks = 100; } if (DEBUG_FLAGS & DEBUG_TINY) printf ("]\n"); } /*========================================================== ** ** ** Start execution of a SCSI command. ** This is called from the generic SCSI driver. ** ** **========================================================== */ static void ncr_action (struct cam_sim *sim, union ccb *ccb) { ncb_p np; np = (ncb_p) cam_sim_softc(sim); mtx_assert(&np->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ { nccb_p cp; lcb_p lp; tcb_p tp; struct ccb_scsiio *csio; u_int8_t *msgptr; u_int msglen; u_int msglen2; int segments; u_int8_t nego; u_int8_t idmsg; int qidx; tp = &np->target[ccb->ccb_h.target_id]; csio = &ccb->csio; /* * Make sure we support this request. We can't do * PHYS pointers. */ if (ccb->ccb_h.flags & CAM_CDB_PHYS) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } /* * Last time we need to check if this CCB needs to * be aborted. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { xpt_done(ccb); return; } ccb->ccb_h.status |= CAM_SIM_QUEUED; /*--------------------------------------------------- ** ** Assign an nccb / bind ccb ** **---------------------------------------------------- */ cp = ncr_get_nccb (np, ccb->ccb_h.target_id, ccb->ccb_h.target_lun); if (cp == NULL) { /* XXX JGibbs - Freeze SIMQ */ ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(ccb); return; } cp->ccb = ccb; /*--------------------------------------------------- ** ** timestamp ** **---------------------------------------------------- */ /* ** XXX JGibbs - Isn't this expensive ** enough to be conditionalized?? */ bzero (&cp->phys.header.stamp, sizeof (struct tstamp)); cp->phys.header.stamp.start = ticks; nego = 0; if (tp->nego_cp == NULL) { if (tp->tinfo.current.width != tp->tinfo.goal.width) { tp->nego_cp = cp; nego = NS_WIDE; } else if ((tp->tinfo.current.period != tp->tinfo.goal.period) || (tp->tinfo.current.offset != tp->tinfo.goal.offset)) { tp->nego_cp = cp; nego = NS_SYNC; } } /*--------------------------------------------------- ** ** choose a new tag ... ** **---------------------------------------------------- */ lp = tp->lp[ccb->ccb_h.target_lun]; if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0 && (ccb->csio.tag_action != CAM_TAG_ACTION_NONE) && (nego == 0)) { /* ** assign a tag to this nccb */ while (!cp->tag) { nccb_p cp2 = lp->next_nccb; lp->lasttag = lp->lasttag % 255 + 1; while (cp2 && cp2->tag != lp->lasttag) cp2 = cp2->next_nccb; if (cp2) continue; cp->tag=lp->lasttag; if (DEBUG_FLAGS & DEBUG_TAGS) { PRINT_ADDR(ccb); printf ("using tag #%d.\n", cp->tag); } } } else { cp->tag=0; } /*---------------------------------------------------- ** ** Build the identify / tag / sdtr message ** **---------------------------------------------------- */ idmsg = MSG_IDENTIFYFLAG | ccb->ccb_h.target_lun; if (tp->tinfo.disc_tag & NCR_CUR_DISCENB) idmsg |= MSG_IDENTIFY_DISCFLAG; msgptr = cp->scsi_smsg; msglen = 0; msgptr[msglen++] = idmsg; if (cp->tag) { msgptr[msglen++] = ccb->csio.tag_action; msgptr[msglen++] = cp->tag; } switch (nego) { case NS_SYNC: msgptr[msglen++] = MSG_EXTENDED; msgptr[msglen++] = MSG_EXT_SDTR_LEN; msgptr[msglen++] = MSG_EXT_SDTR; msgptr[msglen++] = tp->tinfo.goal.period; msgptr[msglen++] = tp->tinfo.goal.offset; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(ccb); printf ("sync msgout: "); ncr_show_msg (&cp->scsi_smsg [msglen-5]); printf (".\n"); }; break; case NS_WIDE: msgptr[msglen++] = MSG_EXTENDED; msgptr[msglen++] = MSG_EXT_WDTR_LEN; msgptr[msglen++] = MSG_EXT_WDTR; msgptr[msglen++] = tp->tinfo.goal.width; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(ccb); printf ("wide msgout: "); ncr_show_msg (&cp->scsi_smsg [msglen-4]); printf (".\n"); }; break; } /*---------------------------------------------------- ** ** Build the identify message for getcc. ** **---------------------------------------------------- */ cp->scsi_smsg2 [0] = idmsg; msglen2 = 1; /*---------------------------------------------------- ** ** Build the data descriptors ** **---------------------------------------------------- */ /* XXX JGibbs - Handle other types of I/O */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { segments = ncr_scatter(&cp->phys, (vm_offset_t)csio->data_ptr, (vm_size_t)csio->dxfer_len); if (segments < 0) { ccb->ccb_h.status = CAM_REQ_TOO_BIG; ncr_free_nccb(np, cp); xpt_done(ccb); return; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { cp->phys.header.savep = NCB_SCRIPT_PHYS (np, data_in); cp->phys.header.goalp = cp->phys.header.savep +20 +segments*16; } else { /* CAM_DIR_OUT */ cp->phys.header.savep = NCB_SCRIPT_PHYS (np, data_out); cp->phys.header.goalp = cp->phys.header.savep +20 +segments*16; } } else { cp->phys.header.savep = NCB_SCRIPT_PHYS (np, no_data); cp->phys.header.goalp = cp->phys.header.savep; } cp->phys.header.lastp = cp->phys.header.savep; /*---------------------------------------------------- ** ** fill in nccb ** **---------------------------------------------------- ** ** ** physical -> virtual backlink ** Generic SCSI command */ cp->phys.header.cp = cp; /* ** Startqueue */ cp->phys.header.launch.l_paddr = NCB_SCRIPT_PHYS (np, select); cp->phys.header.launch.l_cmd = SCR_JUMP; /* ** select */ cp->phys.select.sel_id = ccb->ccb_h.target_id; cp->phys.select.sel_scntl3 = tp->tinfo.wval; cp->phys.select.sel_sxfer = tp->tinfo.sval; /* ** message */ cp->phys.smsg.addr = CCB_PHYS (cp, scsi_smsg); cp->phys.smsg.size = msglen; cp->phys.smsg2.addr = CCB_PHYS (cp, scsi_smsg2); cp->phys.smsg2.size = msglen2; /* ** command */ cp->phys.cmd.addr = vtophys (scsiio_cdb_ptr(csio)); cp->phys.cmd.size = csio->cdb_len; /* ** sense command */ cp->phys.scmd.addr = CCB_PHYS (cp, sensecmd); cp->phys.scmd.size = 6; /* ** patch requested size into sense command */ cp->sensecmd[0] = 0x03; cp->sensecmd[1] = ccb->ccb_h.target_lun << 5; cp->sensecmd[4] = csio->sense_len; /* ** sense data */ cp->phys.sense.addr = vtophys (&csio->sense_data); cp->phys.sense.size = csio->sense_len; /* ** status */ cp->actualquirks = QUIRK_NOMSG; cp->host_status = nego ? HS_NEGOTIATE : HS_BUSY; cp->s_status = SCSI_STATUS_ILLEGAL; cp->parity_status = 0; cp->xerr_status = XE_OK; cp->sync_status = tp->tinfo.sval; cp->nego_status = nego; cp->wide_status = tp->tinfo.wval; /*---------------------------------------------------- ** ** Critical region: start this job. ** **---------------------------------------------------- */ /* ** reselect pattern and activate this job. */ cp->jump_nccb.l_cmd = (SCR_JUMP ^ IFFALSE (DATA (cp->tag))); cp->tlimit = time_second + ccb->ccb_h.timeout / 1000 + 2; cp->magic = CCB_MAGIC; /* ** insert into start queue. */ qidx = np->squeueput + 1; if (qidx >= MAX_START) qidx = 0; np->squeue [qidx ] = NCB_SCRIPT_PHYS (np, idle); np->squeue [np->squeueput] = CCB_PHYS (cp, phys); np->squeueput = qidx; if(DEBUG_FLAGS & DEBUG_QUEUE) device_printf(np->dev, "queuepos=%d tryoffset=%d.\n", np->squeueput, (unsigned)(READSCRIPT(startpos[0]) - (NCB_SCRIPTH_PHYS (np, tryloop)))); /* ** Script processor may be waiting for reselect. ** Wake it up. */ OUTB (nc_istat, SIGP); break; } case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; tcb_p tp; u_int update_type; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; update_type = 0; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) update_type |= NCR_TRANS_GOAL; if (cts->type == CTS_TYPE_USER_SETTINGS) update_type |= NCR_TRANS_USER; tp = &np->target[ccb->ccb_h.target_id]; /* Tag and disc enables */ if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if (update_type & NCR_TRANS_GOAL) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) tp->tinfo.disc_tag |= NCR_CUR_DISCENB; else tp->tinfo.disc_tag &= ~NCR_CUR_DISCENB; } if (update_type & NCR_TRANS_USER) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) tp->tinfo.disc_tag |= NCR_USR_DISCENB; else tp->tinfo.disc_tag &= ~NCR_USR_DISCENB; } } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if (update_type & NCR_TRANS_GOAL) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) tp->tinfo.disc_tag |= NCR_CUR_TAGENB; else tp->tinfo.disc_tag &= ~NCR_CUR_TAGENB; } if (update_type & NCR_TRANS_USER) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) tp->tinfo.disc_tag |= NCR_USR_TAGENB; else tp->tinfo.disc_tag &= ~NCR_USR_TAGENB; } } /* Filter bus width and sync negotiation settings */ if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { if (spi->bus_width > np->maxwide) spi->bus_width = np->maxwide; } if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) { if (spi->sync_period != 0 && (spi->sync_period < np->minsync)) spi->sync_period = np->minsync; } if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) { if (spi->sync_offset == 0) spi->sync_period = 0; if (spi->sync_offset > np->maxoffs) spi->sync_offset = np->maxoffs; } } if ((update_type & NCR_TRANS_USER) != 0) { if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) tp->tinfo.user.period = spi->sync_period; if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) tp->tinfo.user.offset = spi->sync_offset; if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) tp->tinfo.user.width = spi->bus_width; } if ((update_type & NCR_TRANS_GOAL) != 0) { if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) tp->tinfo.goal.period = spi->sync_period; if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) tp->tinfo.goal.offset = spi->sync_offset; if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) tp->tinfo.goal.width = spi->bus_width; } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; struct ncr_transinfo *tinfo; tcb_p tp = &np->target[ccb->ccb_h.target_id]; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { tinfo = &tp->tinfo.current; if (tp->tinfo.disc_tag & NCR_CUR_DISCENB) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; else spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (tp->tinfo.disc_tag & NCR_CUR_TAGENB) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; else scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } else { tinfo = &tp->tinfo.user; if (tp->tinfo.disc_tag & NCR_USR_DISCENB) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; else spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (tp->tinfo.disc_tag & NCR_USR_TAGENB) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; else scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } spi->sync_period = tinfo->period; spi->sync_offset = tinfo->offset; spi->bus_width = tinfo->width; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { /* XXX JGibbs - I'm sure the NCR uses a different strategy, * but it should be able to deal with Adaptec * geometry too. */ cam_calc_geometry(&ccb->ccg, /*extended*/1); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { OUTB (nc_scntl1, CRST); ccb->ccb_h.status = CAM_REQ_CMP; DELAY(10000); /* Wait until our interrupt handler sees it */ xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; if ((np->features & FE_WIDE) != 0) cpi->hba_inquiry |= PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = (np->features & FE_WIDE) ? 15 : 7; cpi->max_lun = MAX_LUN - 1; cpi->initiator_id = np->myaddr; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "Symbios", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); - cpi->transport = XPORT_SPI; - cpi->transport_version = 2; - cpi->protocol = PROTO_SCSI; - cpi->protocol_version = SCSI_REV_2; + cpi->transport = XPORT_SPI; + cpi->transport_version = 2; + cpi->protocol = PROTO_SCSI; + cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } /*========================================================== ** ** ** Complete execution of a SCSI command. ** Signal completion to the generic SCSI driver. ** ** **========================================================== */ static void ncr_complete (ncb_p np, nccb_p cp) { union ccb *ccb; tcb_p tp; /* ** Sanity check */ if (!cp || (cp->magic!=CCB_MAGIC) || !cp->ccb) return; cp->magic = 1; cp->tlimit= 0; /* ** No Reselect anymore. */ cp->jump_nccb.l_cmd = (SCR_JUMP); /* ** No starting. */ cp->phys.header.launch.l_paddr= NCB_SCRIPT_PHYS (np, idle); /* ** timestamp */ ncb_profile (np, cp); if (DEBUG_FLAGS & DEBUG_TINY) printf ("CCB=%x STAT=%x/%x\n", (int)(intptr_t)cp & 0xfff, cp->host_status,cp->s_status); ccb = cp->ccb; cp->ccb = NULL; tp = &np->target[ccb->ccb_h.target_id]; /* ** We do not queue more than 1 nccb per target ** with negotiation at any time. If this nccb was ** used for negotiation, clear this info in the tcb. */ if (cp == tp->nego_cp) tp->nego_cp = NULL; /* ** Check for parity errors. */ /* XXX JGibbs - What about reporting them??? */ if (cp->parity_status) { PRINT_ADDR(ccb); printf ("%d parity error(s), fallback.\n", cp->parity_status); /* ** fallback to asynch transfer. */ tp->tinfo.goal.period = 0; tp->tinfo.goal.offset = 0; } /* ** Check for extended errors. */ if (cp->xerr_status != XE_OK) { PRINT_ADDR(ccb); switch (cp->xerr_status) { case XE_EXTRA_DATA: printf ("extraneous data discarded.\n"); break; case XE_BAD_PHASE: printf ("illegal scsi phase (4/5).\n"); break; default: printf ("extended error %d.\n", cp->xerr_status); break; } if (cp->host_status==HS_COMPLETE) cp->host_status = HS_FAIL; } /* ** Check the status. */ if (cp->host_status == HS_COMPLETE) { if (cp->s_status == SCSI_STATUS_OK) { /* ** All went well. */ /* XXX JGibbs - Properly calculate residual */ tp->bytes += ccb->csio.dxfer_len; tp->transfers ++; ccb->ccb_h.status = CAM_REQ_CMP; } else if ((cp->s_status & SCSI_STATUS_SENSE) != 0) { /* * XXX Could be TERMIO too. Should record * original status. */ ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; cp->s_status &= ~SCSI_STATUS_SENSE; if (cp->s_status == SCSI_STATUS_OK) { ccb->ccb_h.status = CAM_AUTOSNS_VALID|CAM_SCSI_STATUS_ERROR; } else { ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; } } else { ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; ccb->csio.scsi_status = cp->s_status; } } else if (cp->host_status == HS_SEL_TIMEOUT) { /* ** Device failed selection */ ccb->ccb_h.status = CAM_SEL_TIMEOUT; } else if (cp->host_status == HS_TIMEOUT) { /* ** No response */ ccb->ccb_h.status = CAM_CMD_TIMEOUT; } else if (cp->host_status == HS_STALL) { ccb->ccb_h.status = CAM_REQUEUE_REQ; } else { /* ** Other protocol messes */ PRINT_ADDR(ccb); printf ("COMMAND FAILED (%x %x) @%p.\n", cp->host_status, cp->s_status, cp); ccb->ccb_h.status = CAM_CMD_TIMEOUT; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } /* ** Free this nccb */ ncr_free_nccb (np, cp); /* ** signal completion to generic driver. */ xpt_done (ccb); } /*========================================================== ** ** ** Signal all (or one) control block done. ** ** **========================================================== */ static void ncr_wakeup (ncb_p np, u_long code) { /* ** Starting at the default nccb and following ** the links, complete all jobs with a ** host_status greater than "disconnect". ** ** If the "code" parameter is not zero, ** complete all jobs that are not IDLE. */ nccb_p cp = np->link_nccb; while (cp) { switch (cp->host_status) { case HS_IDLE: break; case HS_DISCONNECT: if(DEBUG_FLAGS & DEBUG_TINY) printf ("D"); /* FALLTHROUGH */ case HS_BUSY: case HS_NEGOTIATE: if (!code) break; cp->host_status = code; /* FALLTHROUGH */ default: ncr_complete (np, cp); break; } cp = cp -> link_nccb; } } static void ncr_freeze_devq (ncb_p np, struct cam_path *path) { nccb_p cp; int i; int count; int firstskip; /* ** Starting at the first nccb and following ** the links, complete all jobs that match ** the passed in path and are in the start queue. */ cp = np->link_nccb; count = 0; firstskip = 0; while (cp) { switch (cp->host_status) { case HS_BUSY: case HS_NEGOTIATE: if ((cp->phys.header.launch.l_paddr == NCB_SCRIPT_PHYS (np, select)) && (xpt_path_comp(path, cp->ccb->ccb_h.path) >= 0)) { /* Mark for removal from the start queue */ for (i = 1; i < MAX_START; i++) { int idx; idx = np->squeueput - i; if (idx < 0) idx = MAX_START + idx; if (np->squeue[idx] == CCB_PHYS(cp, phys)) { np->squeue[idx] = NCB_SCRIPT_PHYS (np, skip); if (i > firstskip) firstskip = i; break; } } cp->host_status=HS_STALL; ncr_complete (np, cp); count++; } break; default: break; } cp = cp->link_nccb; } if (count > 0) { int j; int bidx; /* Compress the start queue */ j = 0; bidx = np->squeueput; i = np->squeueput - firstskip; if (i < 0) i = MAX_START + i; for (;;) { bidx = i - j; if (bidx < 0) bidx = MAX_START + bidx; if (np->squeue[i] == NCB_SCRIPT_PHYS (np, skip)) { j++; } else if (j != 0) { np->squeue[bidx] = np->squeue[i]; if (np->squeue[bidx] == NCB_SCRIPT_PHYS(np, idle)) break; } i = (i + 1) % MAX_START; } np->squeueput = bidx; } } /*========================================================== ** ** ** Start NCR chip. ** ** **========================================================== */ static void ncr_init(ncb_p np, char * msg, u_long code) { int i; /* ** Reset chip. */ OUTB (nc_istat, SRST); DELAY (1000); OUTB (nc_istat, 0); /* ** Message. */ if (msg) device_printf(np->dev, "restart (%s).\n", msg); /* ** Clear Start Queue */ for (i=0;i squeue [i] = NCB_SCRIPT_PHYS (np, idle); /* ** Start at first entry. */ np->squeueput = 0; WRITESCRIPT(startpos[0], NCB_SCRIPTH_PHYS (np, tryloop)); WRITESCRIPT(start0 [0], SCR_INT ^ IFFALSE (0)); /* ** Wakeup all pending jobs. */ ncr_wakeup (np, code); /* ** Init chip. */ OUTB (nc_istat, 0x00 ); /* Remove Reset, abort ... */ OUTB (nc_scntl0, 0xca ); /* full arb., ena parity, par->ATN */ OUTB (nc_scntl1, 0x00 ); /* odd parity, and remove CRST!! */ ncr_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ OUTB (nc_scid , RRE|np->myaddr);/* host adapter SCSI address */ OUTW (nc_respid, 1ul<myaddr);/* id to respond to */ OUTB (nc_istat , SIGP ); /* Signal Process */ OUTB (nc_dmode , np->rv_dmode); /* XXX modify burstlen ??? */ OUTB (nc_dcntl , np->rv_dcntl); OUTB (nc_ctest3, np->rv_ctest3); OUTB (nc_ctest5, np->rv_ctest5); OUTB (nc_ctest4, np->rv_ctest4);/* enable master parity checking */ OUTB (nc_stest2, np->rv_stest2|EXT); /* Extended Sreq/Sack filtering */ OUTB (nc_stest3, TE ); /* TolerANT enable */ OUTB (nc_stime0, 0x0b ); /* HTH = disabled, STO = 0.1 sec. */ if (bootverbose >= 2) { printf ("\tACTUAL values:SCNTL3:%02x DMODE:%02x DCNTL:%02x\n", np->rv_scntl3, np->rv_dmode, np->rv_dcntl); printf ("\t CTEST3:%02x CTEST4:%02x CTEST5:%02x\n", np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); } /* ** Enable GPIO0 pin for writing if LED support. */ if (np->features & FE_LED0) { OUTOFFB (nc_gpcntl, 0x01); } /* ** Fill in target structure. */ for (i=0;itarget[i]; tp->tinfo.sval = 0; tp->tinfo.wval = np->rv_scntl3; tp->tinfo.current.period = 0; tp->tinfo.current.offset = 0; tp->tinfo.current.width = MSG_EXT_WDTR_BUS_8_BIT; } /* ** enable ints */ OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST); OUTB (nc_dien , MDPE|BF|ABRT|SSI|SIR|IID); /* ** Start script processor. */ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, start)); /* * Notify the XPT of the event */ if (code == HS_RESET) xpt_async(AC_BUS_RESET, np->path, NULL); } static void ncr_poll(struct cam_sim *sim) { ncr_intr_locked(cam_sim_softc(sim)); } /*========================================================== ** ** Get clock factor and sync divisor for a given ** synchronous factor period. ** Returns the clock factor (in sxfer) and scntl3 ** synchronous divisor field. ** **========================================================== */ static void ncr_getsync(ncb_p np, u_char sfac, u_char *fakp, u_char *scntl3p) { u_long clk = np->clock_khz; /* SCSI clock frequency in kHz */ int div = np->clock_divn; /* Number of divisors supported */ u_long fak; /* Sync factor in sxfer */ u_long per; /* Period in tenths of ns */ u_long kpc; /* (per * clk) */ /* ** Compute the synchronous period in tenths of nano-seconds */ if (sfac <= 10) per = 250; else if (sfac == 11) per = 303; else if (sfac == 12) per = 500; else per = 40 * sfac; /* ** Look for the greatest clock divisor that allows an ** input speed faster than the period. */ kpc = per * clk; while (--div >= 0) if (kpc >= (div_10M[div] * 4)) break; /* ** Calculate the lowest clock factor that allows an output ** speed not faster than the period. */ fak = (kpc - 1) / div_10M[div] + 1; #if 0 /* You can #if 1 if you think this optimization is useful */ per = (fak * div_10M[div]) / clk; /* ** Why not to try the immediate lower divisor and to choose ** the one that allows the fastest output speed ? ** We dont want input speed too much greater than output speed. */ if (div >= 1 && fak < 6) { u_long fak2, per2; fak2 = (kpc - 1) / div_10M[div-1] + 1; per2 = (fak2 * div_10M[div-1]) / clk; if (per2 < per && fak2 <= 6) { fak = fak2; per = per2; --div; } } #endif if (fak < 4) fak = 4; /* Should never happen, too bad ... */ /* ** Compute and return sync parameters for the ncr */ *fakp = fak - 4; *scntl3p = ((div+1) << 4) + (sfac < 25 ? 0x80 : 0); } /*========================================================== ** ** Switch sync mode for current job and its target ** **========================================================== */ static void ncr_setsync(ncb_p np, nccb_p cp, u_char scntl3, u_char sxfer, u_char period) { union ccb *ccb; struct ccb_trans_settings neg; tcb_p tp; int div; u_int target = INB (nc_sdid) & 0x0f; u_int period_10ns; assert (cp); if (!cp) return; ccb = cp->ccb; assert (ccb); if (!ccb) return; assert (target == ccb->ccb_h.target_id); tp = &np->target[target]; if (!scntl3 || !(sxfer & 0x1f)) scntl3 = np->rv_scntl3; scntl3 = (scntl3 & 0xf0) | (tp->tinfo.wval & EWS) | (np->rv_scntl3 & 0x07); /* ** Deduce the value of controller sync period from scntl3. ** period is in tenths of nano-seconds. */ div = ((scntl3 >> 4) & 0x7); if ((sxfer & 0x1f) && div) period_10ns = (((sxfer>>5)+4)*div_10M[div-1])/np->clock_khz; else period_10ns = 0; tp->tinfo.goal.period = period; tp->tinfo.goal.offset = sxfer & 0x1f; tp->tinfo.current.period = period; tp->tinfo.current.offset = sxfer & 0x1f; /* ** Stop there if sync parameters are unchanged */ if (tp->tinfo.sval == sxfer && tp->tinfo.wval == scntl3) return; tp->tinfo.sval = sxfer; tp->tinfo.wval = scntl3; if (sxfer & 0x1f) { /* ** Disable extended Sreq/Sack filtering */ if (period_10ns <= 2000) OUTOFFB (nc_stest2, EXT); } /* ** Tell the SCSI layer about the ** new transfer parameters. */ memset(&neg, 0, sizeof (neg)); neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; neg.xport_specific.spi.sync_period = period; neg.xport_specific.spi.sync_offset = sxfer & 0x1f; neg.xport_specific.spi.valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET; xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg); /* ** set actual value and sync_status */ OUTB (nc_sxfer, sxfer); np->sync_st = sxfer; OUTB (nc_scntl3, scntl3); np->wide_st = scntl3; /* ** patch ALL nccbs of this target. */ for (cp = np->link_nccb; cp; cp = cp->link_nccb) { if (!cp->ccb) continue; if (cp->ccb->ccb_h.target_id != target) continue; cp->sync_status = sxfer; cp->wide_status = scntl3; } } /*========================================================== ** ** Switch wide mode for current job and its target ** SCSI specs say: a SCSI device that accepts a WDTR ** message shall reset the synchronous agreement to ** asynchronous mode. ** **========================================================== */ static void ncr_setwide (ncb_p np, nccb_p cp, u_char wide, u_char ack) { union ccb *ccb; struct ccb_trans_settings neg; u_int target = INB (nc_sdid) & 0x0f; tcb_p tp; u_char scntl3; u_char sxfer; assert (cp); if (!cp) return; ccb = cp->ccb; assert (ccb); if (!ccb) return; assert (target == ccb->ccb_h.target_id); tp = &np->target[target]; tp->tinfo.current.width = wide; tp->tinfo.goal.width = wide; tp->tinfo.current.period = 0; tp->tinfo.current.offset = 0; scntl3 = (tp->tinfo.wval & (~EWS)) | (wide ? EWS : 0); sxfer = ack ? 0 : tp->tinfo.sval; /* ** Stop there if sync/wide parameters are unchanged */ if (tp->tinfo.sval == sxfer && tp->tinfo.wval == scntl3) return; tp->tinfo.sval = sxfer; tp->tinfo.wval = scntl3; /* Tell the SCSI layer about the new transfer params */ memset(&neg, 0, sizeof (neg)); neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; neg.xport_specific.spi.bus_width = (scntl3 & EWS) ? MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT; neg.xport_specific.spi.sync_period = 0; neg.xport_specific.spi.sync_offset = 0; neg.xport_specific.spi.valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH; xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg); /* ** set actual value and sync_status */ OUTB (nc_sxfer, sxfer); np->sync_st = sxfer; OUTB (nc_scntl3, scntl3); np->wide_st = scntl3; /* ** patch ALL nccbs of this target. */ for (cp = np->link_nccb; cp; cp = cp->link_nccb) { if (!cp->ccb) continue; if (cp->ccb->ccb_h.target_id != target) continue; cp->sync_status = sxfer; cp->wide_status = scntl3; } } /*========================================================== ** ** ** ncr timeout handler. ** ** **========================================================== ** ** Misused to keep the driver running when ** interrupts are not configured correctly. ** **---------------------------------------------------------- */ static void ncr_timeout (void *arg) { ncb_p np = arg; time_t thistime = time_second; ticks_t step = np->ticks; u_long count = 0; long signed t; nccb_p cp; mtx_assert(&np->lock, MA_OWNED); if (np->lasttime != thistime) { np->lasttime = thistime; /*---------------------------------------------------- ** ** handle ncr chip timeouts ** ** Assumption: ** We have a chance to arbitrate for the ** SCSI bus at least every 10 seconds. ** **---------------------------------------------------- */ t = thistime - np->heartbeat; if (t<2) np->latetime=0; else np->latetime++; if (np->latetime>2) { /* ** If there are no requests, the script ** processor will sleep on SEL_WAIT_RESEL. ** But we have to check whether it died. ** Let's try to wake it up. */ OUTB (nc_istat, SIGP); } /*---------------------------------------------------- ** ** handle nccb timeouts ** **---------------------------------------------------- */ for (cp=np->link_nccb; cp; cp=cp->link_nccb) { /* ** look for timed out nccbs. */ if (!cp->host_status) continue; count++; if (cp->tlimit > thistime) continue; /* ** Disable reselect. ** Remove it from startqueue. */ cp->jump_nccb.l_cmd = (SCR_JUMP); if (cp->phys.header.launch.l_paddr == NCB_SCRIPT_PHYS (np, select)) { device_printf(np->dev, "timeout nccb=%p (skip)\n", cp); cp->phys.header.launch.l_paddr = NCB_SCRIPT_PHYS (np, skip); } switch (cp->host_status) { case HS_BUSY: case HS_NEGOTIATE: /* FALLTHROUGH */ case HS_DISCONNECT: cp->host_status=HS_TIMEOUT; } cp->tag = 0; /* ** wakeup this nccb. */ ncr_complete (np, cp); } } callout_reset(&np->timer, step ? step : 1, ncr_timeout, np); if (INB(nc_istat) & (INTF|SIP|DIP)) { /* ** Process pending interrupts. */ if (DEBUG_FLAGS & DEBUG_TINY) printf ("{"); ncr_exception (np); if (DEBUG_FLAGS & DEBUG_TINY) printf ("}"); } } /*========================================================== ** ** log message for real hard errors ** ** "ncr0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc)." ** " reg: r0 r1 r2 r3 r4 r5 r6 ..... rf." ** ** exception register: ** ds: dstat ** si: sist ** ** SCSI bus lines: ** so: control lines as driver by NCR. ** si: control lines as seen by NCR. ** sd: scsi data lines as seen by NCR. ** ** wide/fastmode: ** sxfer: (see the manual) ** scntl3: (see the manual) ** ** current script command: ** dsp: script address (relative to start of script). ** dbc: first word of script command. ** ** First 16 register of the chip: ** r0..rf ** **========================================================== */ static void ncr_log_hard_error(ncb_p np, u_short sist, u_char dstat) { u_int32_t dsp; int script_ofs; int script_size; char *script_name; u_char *script_base; int i; dsp = INL (nc_dsp); if (np->p_script < dsp && dsp <= np->p_script + sizeof(struct script)) { script_ofs = dsp - np->p_script; script_size = sizeof(struct script); script_base = (u_char *) np->script; script_name = "script"; } else if (np->p_scripth < dsp && dsp <= np->p_scripth + sizeof(struct scripth)) { script_ofs = dsp - np->p_scripth; script_size = sizeof(struct scripth); script_base = (u_char *) np->scripth; script_name = "scripth"; } else { script_ofs = dsp; script_size = 0; script_base = NULL; script_name = "mem"; } device_printf(np->dev, "%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n", (unsigned)INB (nc_sdid)&0x0f, dstat, sist, (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl), (unsigned)INB (nc_sbdl), (unsigned)INB (nc_sxfer),(unsigned)INB (nc_scntl3), script_name, script_ofs, (unsigned)INL (nc_dbc)); if (((script_ofs & 3) == 0) && (unsigned)script_ofs < script_size) { device_printf(np->dev, "script cmd = %08x\n", (int)READSCRIPT_OFF(script_base, script_ofs)); } device_printf(np->dev, "regdump:"); for (i=0; i<16;i++) printf (" %02x", (unsigned)INB_OFF(i)); printf (".\n"); } /*========================================================== ** ** ** ncr chip exception handler. ** ** **========================================================== */ static void ncr_exception (ncb_p np) { u_char istat, dstat; u_short sist; /* ** interrupt on the fly ? */ while ((istat = INB (nc_istat)) & INTF) { if (DEBUG_FLAGS & DEBUG_TINY) printf ("F "); OUTB (nc_istat, INTF); np->profile.num_fly++; ncr_wakeup (np, 0); } if (!(istat & (SIP|DIP))) { return; } /* ** Steinbach's Guideline for Systems Programming: ** Never test for an error condition you don't know how to handle. */ sist = (istat & SIP) ? INW (nc_sist) : 0; dstat = (istat & DIP) ? INB (nc_dstat) : 0; np->profile.num_int++; if (DEBUG_FLAGS & DEBUG_TINY) printf ("<%d|%x:%x|%x:%x>", INB(nc_scr0), dstat,sist, (unsigned)INL(nc_dsp), (unsigned)INL(nc_dbc)); if ((dstat==DFE) && (sist==PAR)) return; /*========================================================== ** ** First the normal cases. ** **========================================================== */ /*------------------------------------------- ** SCSI reset **------------------------------------------- */ if (sist & RST) { ncr_init (np, bootverbose ? "scsi reset" : NULL, HS_RESET); return; } /*------------------------------------------- ** selection timeout ** ** IID excluded from dstat mask! ** (chip bug) **------------------------------------------- */ if ((sist & STO) && !(sist & (GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR))) { ncr_int_sto (np); return; } /*------------------------------------------- ** Phase mismatch. **------------------------------------------- */ if ((sist & MA) && !(sist & (STO|GEN|HTH|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR|IID))) { ncr_int_ma (np, dstat); return; } /*---------------------------------------- ** move command with length 0 **---------------------------------------- */ if ((dstat & IID) && !(sist & (STO|GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR)) && ((INL(nc_dbc) & 0xf8000000) == SCR_MOVE_TBL)) { /* ** Target wants more data than available. ** The "no_data" script will do it. */ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, no_data)); return; } /*------------------------------------------- ** Programmed interrupt **------------------------------------------- */ if ((dstat & SIR) && !(sist & (STO|GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|IID)) && (INB(nc_dsps) <= SIR_MAX)) { ncr_int_sir (np); return; } /*======================================== ** log message for real hard errors **======================================== */ ncr_log_hard_error(np, sist, dstat); /*======================================== ** do the register dump **======================================== */ if (time_second - np->regtime > 10) { int i; np->regtime = time_second; for (i=0; iregdump); i++) ((volatile char*)&np->regdump)[i] = INB_OFF(i); np->regdump.nc_dstat = dstat; np->regdump.nc_sist = sist; } /*---------------------------------------- ** clean up the dma fifo **---------------------------------------- */ if ( (INB(nc_sstat0) & (ILF|ORF|OLF) ) || (INB(nc_sstat1) & (FF3210) ) || (INB(nc_sstat2) & (ILF1|ORF1|OLF1)) || /* wide .. */ !(dstat & DFE)) { device_printf(np->dev, "have to clear fifos.\n"); OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ } /*---------------------------------------- ** handshake timeout **---------------------------------------- */ if (sist & HTH) { device_printf(np->dev, "handshake timeout\n"); OUTB (nc_scntl1, CRST); DELAY (1000); OUTB (nc_scntl1, 0x00); OUTB (nc_scr0, HS_FAIL); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, cleanup)); return; } /*---------------------------------------- ** unexpected disconnect **---------------------------------------- */ if ((sist & UDC) && !(sist & (STO|GEN|HTH|MA|SGE|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR|IID))) { OUTB (nc_scr0, HS_UNEXPECTED); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, cleanup)); return; } /*---------------------------------------- ** cannot disconnect **---------------------------------------- */ if ((dstat & IID) && !(sist & (STO|GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR)) && ((INL(nc_dbc) & 0xf8000000) == SCR_WAIT_DISC)) { /* ** Unexpected data cycle while waiting for disconnect. */ if (INB(nc_sstat2) & LDSC) { /* ** It's an early reconnect. ** Let's continue ... */ OUTB (nc_dcntl, np->rv_dcntl | STD); /* ** info message */ device_printf(np->dev, "INFO: LDSC while IID.\n"); return; } device_printf(np->dev, "target %d doesn't release the bus.\n", INB (nc_sdid)&0x0f); /* ** return without restarting the NCR. ** timeout will do the real work. */ return; } /*---------------------------------------- ** single step **---------------------------------------- */ if ((dstat & SSI) && !(sist & (STO|GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR|IID))) { OUTB (nc_dcntl, np->rv_dcntl | STD); return; } /* ** @RECOVER@ HTH, SGE, ABRT. ** ** We should try to recover from these interrupts. ** They may occur if there are problems with synch transfers, or ** if targets are switched on or off while the driver is running. */ if (sist & SGE) { /* clear scsi offsets */ OUTB (nc_ctest3, np->rv_ctest3 | CLF); } /* ** Freeze controller to be able to read the messages. */ if (DEBUG_FLAGS & DEBUG_FREEZE) { int i; unsigned char val; for (i=0; i<0x60; i++) { switch (i%16) { case 0: device_printf(np->dev, "reg[%d0]: ", i / 16); break; case 4: case 8: case 12: printf (" "); break; } val = bus_read_1(np->reg_res, i); printf (" %x%x", val/16, val%16); if (i%16==15) printf (".\n"); } callout_stop(&np->timer); device_printf(np->dev, "halted!\n"); /* ** don't restart controller ... */ OUTB (nc_istat, SRST); return; } #ifdef NCR_FREEZE /* ** Freeze system to be able to read the messages. */ printf ("ncr: fatal error: system halted - press reset to reboot ..."); for (;;); #endif /* ** sorry, have to kill ALL jobs ... */ ncr_init (np, "fatal error", HS_FAIL); } /*========================================================== ** ** ncr chip exception handler for selection timeout ** **========================================================== ** ** There seems to be a bug in the 53c810. ** Although a STO-Interrupt is pending, ** it continues executing script commands. ** But it will fail and interrupt (IID) on ** the next instruction where it's looking ** for a valid phase. ** **---------------------------------------------------------- */ static void ncr_int_sto (ncb_p np) { u_long dsa, scratcha, diff; nccb_p cp; if (DEBUG_FLAGS & DEBUG_TINY) printf ("T"); /* ** look for nccb and set the status. */ dsa = INL (nc_dsa); cp = np->link_nccb; while (cp && (CCB_PHYS (cp, phys) != dsa)) cp = cp->link_nccb; if (cp) { cp-> host_status = HS_SEL_TIMEOUT; ncr_complete (np, cp); } /* ** repair start queue */ scratcha = INL (nc_scratcha); diff = scratcha - NCB_SCRIPTH_PHYS (np, tryloop); /* assert ((diff <= MAX_START * 20) && !(diff % 20));*/ if ((diff <= MAX_START * 20) && !(diff % 20)) { WRITESCRIPT(startpos[0], scratcha); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, start)); return; } ncr_init (np, "selection timeout", HS_FAIL); } /*========================================================== ** ** ** ncr chip exception handler for phase errors. ** ** **========================================================== ** ** We have to construct a new transfer descriptor, ** to transfer the rest of the current block. ** **---------------------------------------------------------- */ static void ncr_int_ma (ncb_p np, u_char dstat) { u_int32_t dbc; u_int32_t rest; u_int32_t dsa; u_int32_t dsp; u_int32_t nxtdsp; volatile void *vdsp_base; size_t vdsp_off; u_int32_t oadr, olen; u_int32_t *tblp, *newcmd; u_char cmd, sbcl, ss0, ss2, ctest5; u_short delta; nccb_p cp; dsp = INL (nc_dsp); dsa = INL (nc_dsa); dbc = INL (nc_dbc); ss0 = INB (nc_sstat0); ss2 = INB (nc_sstat2); sbcl= INB (nc_sbcl); cmd = dbc >> 24; rest= dbc & 0xffffff; ctest5 = (np->rv_ctest5 & DFS) ? INB (nc_ctest5) : 0; if (ctest5 & DFS) delta=(((ctest5<<8) | (INB (nc_dfifo) & 0xff)) - rest) & 0x3ff; else delta=(INB (nc_dfifo) - rest) & 0x7f; /* ** The data in the dma fifo has not been transferred to ** the target -> add the amount to the rest ** and clear the data. ** Check the sstat2 register in case of wide transfer. */ if (!(dstat & DFE)) rest += delta; if (ss0 & OLF) rest++; if (ss0 & ORF) rest++; if (INB(nc_scntl3) & EWS) { if (ss2 & OLF1) rest++; if (ss2 & ORF1) rest++; } OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ /* ** locate matching cp */ cp = np->link_nccb; while (cp && (CCB_PHYS (cp, phys) != dsa)) cp = cp->link_nccb; if (!cp) { device_printf(np->dev, "SCSI phase error fixup: CCB already dequeued (%p)\n", (void *)np->header.cp); return; } if (cp != np->header.cp) { device_printf(np->dev, "SCSI phase error fixup: CCB address mismatch " "(%p != %p) np->nccb = %p\n", (void *)cp, (void *)np->header.cp, (void *)np->link_nccb); /* return;*/ } /* ** find the interrupted script command, ** and the address at which to continue. */ if (dsp == vtophys (&cp->patch[2])) { vdsp_base = cp; vdsp_off = offsetof(struct nccb, patch[0]); nxtdsp = READSCRIPT_OFF(vdsp_base, vdsp_off + 3*4); } else if (dsp == vtophys (&cp->patch[6])) { vdsp_base = cp; vdsp_off = offsetof(struct nccb, patch[4]); nxtdsp = READSCRIPT_OFF(vdsp_base, vdsp_off + 3*4); } else if (dsp > np->p_script && dsp <= np->p_script + sizeof(struct script)) { vdsp_base = np->script; vdsp_off = dsp - np->p_script - 8; nxtdsp = dsp; } else { vdsp_base = np->scripth; vdsp_off = dsp - np->p_scripth - 8; nxtdsp = dsp; } /* ** log the information */ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) { printf ("P%x%x ",cmd&7, sbcl&7); printf ("RL=%d D=%d SS0=%x ", (unsigned) rest, (unsigned) delta, ss0); } if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("\nCP=%p CP2=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", cp, np->header.cp, dsp, nxtdsp, (volatile char*)vdsp_base+vdsp_off, cmd); } /* ** get old startaddress and old length. */ oadr = READSCRIPT_OFF(vdsp_base, vdsp_off + 1*4); if (cmd & 0x10) { /* Table indirect */ tblp = (u_int32_t *) ((char*) &cp->phys + oadr); olen = tblp[0]; oadr = tblp[1]; } else { tblp = (u_int32_t *) 0; olen = READSCRIPT_OFF(vdsp_base, vdsp_off) & 0xffffff; } if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("OCMD=%x\nTBLP=%p OLEN=%lx OADR=%lx\n", (unsigned) (READSCRIPT_OFF(vdsp_base, vdsp_off) >> 24), (void *) tblp, (u_long) olen, (u_long) oadr); } /* ** if old phase not dataphase, leave here. */ if (cmd != (READSCRIPT_OFF(vdsp_base, vdsp_off) >> 24)) { PRINT_ADDR(cp->ccb); printf ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n", (unsigned)cmd, (unsigned)READSCRIPT_OFF(vdsp_base, vdsp_off) >> 24); return; } if (cmd & 0x06) { PRINT_ADDR(cp->ccb); printf ("phase change %x-%x %d@%08x resid=%d.\n", cmd&7, sbcl&7, (unsigned)olen, (unsigned)oadr, (unsigned)rest); OUTB (nc_dcntl, np->rv_dcntl | STD); return; } /* ** choose the correct patch area. ** if savep points to one, choose the other. */ newcmd = cp->patch; if (cp->phys.header.savep == vtophys (newcmd)) newcmd+=4; /* ** fillin the commands */ newcmd[0] = ((cmd & 0x0f) << 24) | rest; newcmd[1] = oadr + olen - rest; newcmd[2] = SCR_JUMP; newcmd[3] = nxtdsp; if (DEBUG_FLAGS & DEBUG_PHASE) { PRINT_ADDR(cp->ccb); printf ("newcmd[%d] %x %x %x %x.\n", (int)(newcmd - cp->patch), (unsigned)newcmd[0], (unsigned)newcmd[1], (unsigned)newcmd[2], (unsigned)newcmd[3]); } /* ** fake the return address (to the patch). ** and restart script processor at dispatcher. */ np->profile.num_break++; OUTL (nc_temp, vtophys (newcmd)); if ((cmd & 7) == 0) OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, dispatch)); else OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, checkatn)); } /*========================================================== ** ** ** ncr chip exception handler for programmed interrupts. ** ** **========================================================== */ static int ncr_show_msg (u_char * msg) { u_char i; printf ("%x",*msg); if (*msg==MSG_EXTENDED) { for (i=1;i<8;i++) { if (i-1>msg[1]) break; printf ("-%x",msg[i]); } return (i+1); } else if ((*msg & 0xf0) == 0x20) { printf ("-%x",msg[1]); return (2); } return (1); } static void ncr_int_sir (ncb_p np) { u_char scntl3; u_char chg, ofs, per, fak, wide; u_char num = INB (nc_dsps); nccb_p cp = NULL; u_long dsa; u_int target = INB (nc_sdid) & 0x0f; tcb_p tp = &np->target[target]; int i; if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num); switch (num) { case SIR_SENSE_RESTART: case SIR_STALL_RESTART: break; default: /* ** lookup the nccb */ dsa = INL (nc_dsa); cp = np->link_nccb; while (cp && (CCB_PHYS (cp, phys) != dsa)) cp = cp->link_nccb; assert (cp); if (!cp) goto out; assert (cp == np->header.cp); if (cp != np->header.cp) goto out; } switch (num) { /*-------------------------------------------------------------------- ** ** Processing of interrupted getcc selects ** **-------------------------------------------------------------------- */ case SIR_SENSE_RESTART: /*------------------------------------------ ** Script processor is idle. ** Look for interrupted "check cond" **------------------------------------------ */ if (DEBUG_FLAGS & DEBUG_RESTART) device_printf(np->dev, "int#%d", num); cp = (nccb_p) 0; for (i=0; itarget[i]; if (DEBUG_FLAGS & DEBUG_RESTART) printf ("+"); cp = tp->hold_cp; if (!cp) continue; if (DEBUG_FLAGS & DEBUG_RESTART) printf ("+"); if ((cp->host_status==HS_BUSY) && (cp->s_status==SCSI_STATUS_CHECK_COND)) break; if (DEBUG_FLAGS & DEBUG_RESTART) printf ("- (remove)"); tp->hold_cp = cp = (nccb_p) 0; } if (cp) { if (DEBUG_FLAGS & DEBUG_RESTART) printf ("+ restart job ..\n"); OUTL (nc_dsa, CCB_PHYS (cp, phys)); OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, getcc)); return; } /* ** no job, resume normal processing */ if (DEBUG_FLAGS & DEBUG_RESTART) printf (" -- remove trap\n"); WRITESCRIPT(start0[0], SCR_INT ^ IFFALSE (0)); break; case SIR_SENSE_FAILED: /*------------------------------------------- ** While trying to select for ** getting the condition code, ** a target reselected us. **------------------------------------------- */ if (DEBUG_FLAGS & DEBUG_RESTART) { PRINT_ADDR(cp->ccb); printf ("in getcc reselect by t%d.\n", INB(nc_ssid) & 0x0f); } /* ** Mark this job */ cp->host_status = HS_BUSY; cp->s_status = SCSI_STATUS_CHECK_COND; np->target[cp->ccb->ccb_h.target_id].hold_cp = cp; /* ** And patch code to restart it. */ WRITESCRIPT(start0[0], SCR_INT); break; /*----------------------------------------------------------------------------- ** ** Was Sie schon immer ueber transfermode negotiation wissen wollten ... ** ** We try to negotiate sync and wide transfer only after ** a successful inquire command. We look at byte 7 of the ** inquire data to determine the capabilities if the target. ** ** When we try to negotiate, we append the negotiation message ** to the identify and (maybe) simple tag message. ** The host status field is set to HS_NEGOTIATE to mark this ** situation. ** ** If the target doesn't answer this message immediately ** (as required by the standard), the SIR_NEGO_FAIL interrupt ** will be raised eventually. ** The handler removes the HS_NEGOTIATE status, and sets the ** negotiated value to the default (async / nowide). ** ** If we receive a matching answer immediately, we check it ** for validity, and set the values. ** ** If we receive a Reject message immediately, we assume the ** negotiation has failed, and fall back to standard values. ** ** If we receive a negotiation message while not in HS_NEGOTIATE ** state, it's a target initiated negotiation. We prepare a ** (hopefully) valid answer, set our parameters, and send back ** this answer to the target. ** ** If the target doesn't fetch the answer (no message out phase), ** we assume the negotiation has failed, and fall back to default ** settings. ** ** When we set the values, we adjust them in all nccbs belonging ** to this target, in the controller's register, and in the "phys" ** field of the controller's struct ncb. ** ** Possible cases: hs sir msg_in value send goto ** We try try to negotiate: ** -> target doesnt't msgin NEG FAIL noop defa. - dispatch ** -> target rejected our msg NEG FAIL reject defa. - dispatch ** -> target answered (ok) NEG SYNC sdtr set - clrack ** -> target answered (!ok) NEG SYNC sdtr defa. REJ--->msg_bad ** -> target answered (ok) NEG WIDE wdtr set - clrack ** -> target answered (!ok) NEG WIDE wdtr defa. REJ--->msg_bad ** -> any other msgin NEG FAIL noop defa. - dispatch ** ** Target tries to negotiate: ** -> incoming message --- SYNC sdtr set SDTR - ** -> incoming message --- WIDE wdtr set WDTR - ** We sent our answer: ** -> target doesn't msgout --- PROTO ? defa. - dispatch ** **----------------------------------------------------------------------------- */ case SIR_NEGO_FAILED: /*------------------------------------------------------- ** ** Negotiation failed. ** Target doesn't send an answer message, ** or target rejected our message. ** ** Remove negotiation request. ** **------------------------------------------------------- */ OUTB (HS_PRT, HS_BUSY); /* FALLTHROUGH */ case SIR_NEGO_PROTO: /*------------------------------------------------------- ** ** Negotiation failed. ** Target doesn't fetch the answer message. ** **------------------------------------------------------- */ if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("negotiation failed sir=%x status=%x.\n", num, cp->nego_status); } /* ** any error in negotiation: ** fall back to default mode. */ switch (cp->nego_status) { case NS_SYNC: ncr_setsync (np, cp, 0, 0xe0, 0); break; case NS_WIDE: ncr_setwide (np, cp, 0, 0); break; } np->msgin [0] = MSG_NOOP; np->msgout[0] = MSG_NOOP; cp->nego_status = 0; OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, dispatch)); break; case SIR_NEGO_SYNC: /* ** Synchronous request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("sync msgin: "); (void) ncr_show_msg (np->msgin); printf (".\n"); } /* ** get requested values. */ chg = 0; per = np->msgin[3]; ofs = np->msgin[4]; if (ofs==0) per=255; /* ** check values against driver limits. */ if (per < np->minsync) {chg = 1; per = np->minsync;} if (per < tp->tinfo.user.period) {chg = 1; per = tp->tinfo.user.period;} if (ofs > tp->tinfo.user.offset) {chg = 1; ofs = tp->tinfo.user.offset;} /* ** Check against controller limits. */ fak = 7; scntl3 = 0; if (ofs != 0) { ncr_getsync(np, per, &fak, &scntl3); if (fak > 7) { chg = 1; ofs = 0; } } if (ofs == 0) { fak = 7; per = 0; scntl3 = 0; } if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("sync: per=%d scntl3=0x%x ofs=%d fak=%d chg=%d.\n", per, scntl3, ofs, fak, chg); } if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); switch (cp->nego_status) { case NS_SYNC: /* ** This was an answer message */ if (chg) { /* ** Answer wasn't acceptable. */ ncr_setsync (np, cp, 0, 0xe0, 0); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad)); } else { /* ** Answer is ok. */ ncr_setsync (np,cp,scntl3,(fak<<5)|ofs, per); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack)); } return; case NS_WIDE: ncr_setwide (np, cp, 0, 0); break; } } /* ** It was a request. Set value and ** prepare an answer message */ ncr_setsync (np, cp, scntl3, (fak<<5)|ofs, per); np->msgout[0] = MSG_EXTENDED; np->msgout[1] = 3; np->msgout[2] = MSG_EXT_SDTR; np->msgout[3] = per; np->msgout[4] = ofs; cp->nego_status = NS_SYNC; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("sync msgout: "); (void) ncr_show_msg (np->msgout); printf (".\n"); } if (!ofs) { OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad)); return; } np->msgin [0] = MSG_NOOP; break; case SIR_NEGO_WIDE: /* ** Wide request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("wide msgin: "); (void) ncr_show_msg (np->msgin); printf (".\n"); } /* ** get requested values. */ chg = 0; wide = np->msgin[3]; /* ** check values against driver limits. */ if (wide > tp->tinfo.user.width) {chg = 1; wide = tp->tinfo.user.width;} if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("wide: wide=%d chg=%d.\n", wide, chg); } if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); switch (cp->nego_status) { case NS_WIDE: /* ** This was an answer message */ if (chg) { /* ** Answer wasn't acceptable. */ ncr_setwide (np, cp, 0, 1); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad)); } else { /* ** Answer is ok. */ ncr_setwide (np, cp, wide, 1); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack)); } return; case NS_SYNC: ncr_setsync (np, cp, 0, 0xe0, 0); break; } } /* ** It was a request, set value and ** prepare an answer message */ ncr_setwide (np, cp, wide, 1); np->msgout[0] = MSG_EXTENDED; np->msgout[1] = 2; np->msgout[2] = MSG_EXT_WDTR; np->msgout[3] = wide; np->msgin [0] = MSG_NOOP; cp->nego_status = NS_WIDE; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("wide msgout: "); (void) ncr_show_msg (np->msgout); printf (".\n"); } break; /*-------------------------------------------------------------------- ** ** Processing of special messages ** **-------------------------------------------------------------------- */ case SIR_REJECT_RECEIVED: /*----------------------------------------------- ** ** We received a MSG_MESSAGE_REJECT message. ** **----------------------------------------------- */ PRINT_ADDR(cp->ccb); printf ("MSG_MESSAGE_REJECT received (%x:%x).\n", (unsigned)np->lastmsg, np->msgout[0]); break; case SIR_REJECT_SENT: /*----------------------------------------------- ** ** We received an unknown message ** **----------------------------------------------- */ PRINT_ADDR(cp->ccb); printf ("MSG_MESSAGE_REJECT sent for "); (void) ncr_show_msg (np->msgin); printf (".\n"); break; /*-------------------------------------------------------------------- ** ** Processing of special messages ** **-------------------------------------------------------------------- */ case SIR_IGN_RESIDUE: /*----------------------------------------------- ** ** We received an IGNORE RESIDUE message, ** which couldn't be handled by the script. ** **----------------------------------------------- */ PRINT_ADDR(cp->ccb); printf ("MSG_IGN_WIDE_RESIDUE received, but not yet implemented.\n"); break; case SIR_MISSING_SAVE: /*----------------------------------------------- ** ** We received an DISCONNECT message, ** but the datapointer wasn't saved before. ** **----------------------------------------------- */ PRINT_ADDR(cp->ccb); printf ("MSG_DISCONNECT received, but datapointer not saved:\n" "\tdata=%x save=%x goal=%x.\n", (unsigned) INL (nc_temp), (unsigned) np->header.savep, (unsigned) np->header.goalp); break; /*-------------------------------------------------------------------- ** ** Processing of a "SCSI_STATUS_QUEUE_FULL" status. ** ** XXX JGibbs - We should do the same thing for BUSY status. ** ** The current command has been rejected, ** because there are too many in the command queue. ** We have started too many commands for that target. ** **-------------------------------------------------------------------- */ case SIR_STALL_QUEUE: cp->xerr_status = XE_OK; cp->host_status = HS_COMPLETE; cp->s_status = SCSI_STATUS_QUEUE_FULL; ncr_freeze_devq(np, cp->ccb->ccb_h.path); ncr_complete(np, cp); /* FALLTHROUGH */ case SIR_STALL_RESTART: /*----------------------------------------------- ** ** Enable selecting again, ** if NO disconnected jobs. ** **----------------------------------------------- */ /* ** Look for a disconnected job. */ cp = np->link_nccb; while (cp && cp->host_status != HS_DISCONNECT) cp = cp->link_nccb; /* ** if there is one, ... */ if (cp) { /* ** wait for reselection */ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, reselect)); return; } /* ** else remove the interrupt. */ device_printf(np->dev, "queue empty.\n"); WRITESCRIPT(start1[0], SCR_INT ^ IFFALSE (0)); break; } out: OUTB (nc_dcntl, np->rv_dcntl | STD); } /*========================================================== ** ** ** Acquire a control block ** ** **========================================================== */ static nccb_p ncr_get_nccb (ncb_p np, u_long target, u_long lun) { lcb_p lp; nccb_p cp = NULL; /* ** Lun structure available ? */ lp = np->target[target].lp[lun]; if (lp) { cp = lp->next_nccb; /* ** Look for free CCB */ while (cp && cp->magic) { cp = cp->next_nccb; } } /* ** if nothing available, create one. */ if (cp == NULL) cp = ncr_alloc_nccb(np, target, lun); if (cp != NULL) { if (cp->magic) { device_printf(np->dev, "Bogus free cp found\n"); return (NULL); } cp->magic = 1; } return (cp); } /*========================================================== ** ** ** Release one control block ** ** **========================================================== */ static void ncr_free_nccb (ncb_p np, nccb_p cp) { /* ** sanity */ assert (cp != NULL); cp -> host_status = HS_IDLE; cp -> magic = 0; } /*========================================================== ** ** ** Allocation of resources for Targets/Luns/Tags. ** ** **========================================================== */ static nccb_p ncr_alloc_nccb (ncb_p np, u_long target, u_long lun) { tcb_p tp; lcb_p lp; nccb_p cp; assert (np != NULL); if (target>=MAX_TARGET) return(NULL); if (lun >=MAX_LUN ) return(NULL); tp=&np->target[target]; if (!tp->jump_tcb.l_cmd) { /* ** initialize it. */ tp->jump_tcb.l_cmd = (SCR_JUMP^IFFALSE (DATA (0x80 + target))); tp->jump_tcb.l_paddr = np->jump_tcb.l_paddr; tp->getscr[0] = (np->features & FE_PFEN)? SCR_COPY(1) : SCR_COPY_F(1); tp->getscr[1] = vtophys (&tp->tinfo.sval); tp->getscr[2] = rman_get_start(np->reg_res) + offsetof (struct ncr_reg, nc_sxfer); tp->getscr[3] = (np->features & FE_PFEN)? SCR_COPY(1) : SCR_COPY_F(1); tp->getscr[4] = vtophys (&tp->tinfo.wval); tp->getscr[5] = rman_get_start(np->reg_res) + offsetof (struct ncr_reg, nc_scntl3); assert (((offsetof(struct ncr_reg, nc_sxfer) ^ (offsetof(struct tcb ,tinfo) + offsetof(struct ncr_target_tinfo, sval))) & 3) == 0); assert (((offsetof(struct ncr_reg, nc_scntl3) ^ (offsetof(struct tcb, tinfo) + offsetof(struct ncr_target_tinfo, wval))) &3) == 0); tp->call_lun.l_cmd = (SCR_CALL); tp->call_lun.l_paddr = NCB_SCRIPT_PHYS (np, resel_lun); tp->jump_lcb.l_cmd = (SCR_JUMP); tp->jump_lcb.l_paddr = NCB_SCRIPTH_PHYS (np, abort); np->jump_tcb.l_paddr = vtophys (&tp->jump_tcb); } /* ** Logic unit control block */ lp = tp->lp[lun]; if (!lp) { /* ** Allocate a lcb */ lp = (lcb_p) malloc (sizeof (struct lcb), M_DEVBUF, M_NOWAIT | M_ZERO); if (!lp) return(NULL); /* ** Initialize it */ lp->jump_lcb.l_cmd = (SCR_JUMP ^ IFFALSE (DATA (lun))); lp->jump_lcb.l_paddr = tp->jump_lcb.l_paddr; lp->call_tag.l_cmd = (SCR_CALL); lp->call_tag.l_paddr = NCB_SCRIPT_PHYS (np, resel_tag); lp->jump_nccb.l_cmd = (SCR_JUMP); lp->jump_nccb.l_paddr = NCB_SCRIPTH_PHYS (np, aborttag); lp->actlink = 1; /* ** Chain into LUN list */ tp->jump_lcb.l_paddr = vtophys (&lp->jump_lcb); tp->lp[lun] = lp; } /* ** Allocate a nccb */ cp = (nccb_p) malloc (sizeof (struct nccb), M_DEVBUF, M_NOWAIT|M_ZERO); if (!cp) return (NULL); if (DEBUG_FLAGS & DEBUG_ALLOC) { printf ("new nccb @%p.\n", cp); } /* ** Fill in physical addresses */ cp->p_nccb = vtophys (cp); /* ** Chain into reselect list */ cp->jump_nccb.l_cmd = SCR_JUMP; cp->jump_nccb.l_paddr = lp->jump_nccb.l_paddr; lp->jump_nccb.l_paddr = CCB_PHYS (cp, jump_nccb); cp->call_tmp.l_cmd = SCR_CALL; cp->call_tmp.l_paddr = NCB_SCRIPT_PHYS (np, resel_tmp); /* ** Chain into wakeup list */ cp->link_nccb = np->link_nccb; np->link_nccb = cp; /* ** Chain into CCB list */ cp->next_nccb = lp->next_nccb; lp->next_nccb = cp; return (cp); } /*========================================================== ** ** ** Build Scatter Gather Block ** ** **========================================================== ** ** The transfer area may be scattered among ** several non adjacent physical pages. ** ** We may use MAX_SCATTER blocks. ** **---------------------------------------------------------- */ static int ncr_scatter (struct dsb* phys, vm_offset_t vaddr, vm_size_t datalen) { u_long paddr, pnext; u_short segment = 0; u_long segsize, segaddr; u_long size, csize = 0; u_long chunk = MAX_SIZE; int free; bzero (&phys->data, sizeof (phys->data)); if (!datalen) return (0); paddr = vtophys (vaddr); /* ** insert extra break points at a distance of chunk. ** We try to reduce the number of interrupts caused ** by unexpected phase changes due to disconnects. ** A typical harddisk may disconnect before ANY block. ** If we wanted to avoid unexpected phase changes at all ** we had to use a break point every 512 bytes. ** Of course the number of scatter/gather blocks is ** limited. */ free = MAX_SCATTER - 1; if (vaddr & PAGE_MASK) free -= datalen / PAGE_SIZE; if (free>1) while ((chunk * free >= 2 * datalen) && (chunk>=1024)) chunk /= 2; if(DEBUG_FLAGS & DEBUG_SCATTER) printf("ncr?:\tscattering virtual=%p size=%d chunk=%d.\n", (void *) vaddr, (unsigned) datalen, (unsigned) chunk); /* ** Build data descriptors. */ while (datalen && (segment < MAX_SCATTER)) { /* ** this segment is empty */ segsize = 0; segaddr = paddr; pnext = paddr; if (!csize) csize = chunk; while ((datalen) && (paddr == pnext) && (csize)) { /* ** continue this segment */ pnext = (paddr & (~PAGE_MASK)) + PAGE_SIZE; /* ** Compute max size */ size = pnext - paddr; /* page size */ if (size > datalen) size = datalen; /* data size */ if (size > csize ) size = csize ; /* chunksize */ segsize += size; vaddr += size; csize -= size; datalen -= size; paddr = vtophys (vaddr); } if(DEBUG_FLAGS & DEBUG_SCATTER) printf ("\tseg #%d addr=%x size=%d (rest=%d).\n", segment, (unsigned) segaddr, (unsigned) segsize, (unsigned) datalen); phys->data[segment].addr = segaddr; phys->data[segment].size = segsize; segment++; } if (datalen) { printf("ncr?: scatter/gather failed (residue=%d).\n", (unsigned) datalen); return (-1); } return (segment); } /*========================================================== ** ** ** Test the pci bus snoop logic :-( ** ** Has to be called with interrupts disabled. ** ** **========================================================== */ #ifndef NCR_IOMAPPED static int ncr_regtest (struct ncb* np) { register volatile u_int32_t data; /* ** ncr registers may NOT be cached. ** write 0xffffffff to a read only register area, ** and try to read it back. */ data = 0xffffffff; OUTL_OFF(offsetof(struct ncr_reg, nc_dstat), data); data = INL_OFF(offsetof(struct ncr_reg, nc_dstat)); #if 1 if (data == 0xffffffff) { #else if ((data & 0xe2f0fffd) != 0x02000080) { #endif printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", (unsigned) data); return (0x10); } return (0); } #endif static int ncr_snooptest (struct ncb* np) { u_int32_t ncr_rd, ncr_wr, ncr_bk, host_rd, host_wr, pc; int i, err=0; #ifndef NCR_IOMAPPED err |= ncr_regtest (np); if (err) return (err); #endif /* ** init */ pc = NCB_SCRIPTH_PHYS (np, snooptest); host_wr = 1; ncr_wr = 2; /* ** Set memory and register. */ ncr_cache = host_wr; OUTL (nc_temp, ncr_wr); /* ** Start script (exchange values) */ OUTL (nc_dsp, pc); /* ** Wait 'til done (with timeout) */ for (i=0; i=NCR_SNOOP_TIMEOUT) { printf ("CACHE TEST FAILED: timeout.\n"); return (0x20); } /* ** Check termination position. */ if (pc != NCB_SCRIPTH_PHYS (np, snoopend)+8) { printf ("CACHE TEST FAILED: script execution failed.\n"); printf ("start=%08lx, pc=%08lx, end=%08lx\n", (u_long) NCB_SCRIPTH_PHYS (np, snooptest), (u_long) pc, (u_long) NCB_SCRIPTH_PHYS (np, snoopend) +8); return (0x40); } /* ** Show results. */ if (host_wr != ncr_rd) { printf ("CACHE TEST FAILED: host wrote %d, ncr read %d.\n", (int) host_wr, (int) ncr_rd); err |= 1; } if (host_rd != ncr_wr) { printf ("CACHE TEST FAILED: ncr wrote %d, host read %d.\n", (int) ncr_wr, (int) host_rd); err |= 2; } if (ncr_bk != ncr_wr) { printf ("CACHE TEST FAILED: ncr wrote %d, read back %d.\n", (int) ncr_wr, (int) ncr_bk); err |= 4; } return (err); } /*========================================================== ** ** ** Profiling the drivers and targets performance. ** ** **========================================================== */ /* ** Compute the difference in milliseconds. **/ static int ncr_delta (int *from, int *to) { if (!from) return (-1); if (!to) return (-2); return ((to - from) * 1000 / hz); } #define PROFILE cp->phys.header.stamp static void ncb_profile (ncb_p np, nccb_p cp) { int co, da, st, en, di, se, post,work,disc; u_long diff; PROFILE.end = ticks; st = ncr_delta (&PROFILE.start,&PROFILE.status); if (st<0) return; /* status not reached */ da = ncr_delta (&PROFILE.start,&PROFILE.data); if (da<0) return; /* No data transfer phase */ co = ncr_delta (&PROFILE.start,&PROFILE.command); if (co<0) return; /* command not executed */ en = ncr_delta (&PROFILE.start,&PROFILE.end), di = ncr_delta (&PROFILE.start,&PROFILE.disconnect), se = ncr_delta (&PROFILE.start,&PROFILE.select); post = en - st; /* ** @PROFILE@ Disconnect time invalid if multiple disconnects */ if (di>=0) disc = se-di; else disc = 0; work = (st - co) - disc; diff = (np->disc_phys - np->disc_ref) & 0xff; np->disc_ref += diff; np->profile.num_trans += 1; if (cp->ccb) np->profile.num_bytes += cp->ccb->csio.dxfer_len; np->profile.num_disc += diff; np->profile.ms_setup += co; np->profile.ms_data += work; np->profile.ms_disc += disc; np->profile.ms_post += post; } #undef PROFILE /*========================================================== ** ** Determine the ncr's clock frequency. ** This is essential for the negotiation ** of the synchronous transfer rate. ** **========================================================== ** ** Note: we have to return the correct value. ** THERE IS NO SAVE DEFAULT VALUE. ** ** Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. ** 53C860 and 53C875 rev. 1 support fast20 transfers but ** do not have a clock doubler and so are provided with a ** 80 MHz clock. All other fast20 boards incorporate a doubler ** and so should be delivered with a 40 MHz clock. ** The future fast40 chips (895/895) use a 40 Mhz base clock ** and provide a clock quadrupler (160 Mhz). The code below ** tries to deal as cleverly as possible with all this stuff. ** **---------------------------------------------------------- */ /* * Select NCR SCSI clock frequency */ static void ncr_selectclock(ncb_p np, u_char scntl3) { if (np->multiplier < 2) { OUTB(nc_scntl3, scntl3); return; } if (bootverbose >= 2) device_printf(np->dev, "enabling clock multiplier\n"); OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */ if (np->multiplier > 2) { /* Poll bit 5 of stest4 for quadrupler */ int i = 20; while (!(INB(nc_stest4) & LCKFRQ) && --i > 0) DELAY(20); if (!i) device_printf(np->dev, "the chip cannot lock the frequency\n"); } else /* Wait 20 micro-seconds for doubler */ DELAY(20); OUTB(nc_stest3, HSC); /* Halt the scsi clock */ OUTB(nc_scntl3, scntl3); OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ OUTB(nc_stest3, 0x00); /* Restart scsi clock */ } /* * calculate NCR SCSI clock frequency (in KHz) */ static unsigned ncrgetfreq (ncb_p np, int gen) { int ms = 0; /* * Measure GEN timer delay in order * to calculate SCSI clock frequency * * This code will never execute too * many loop iterations (if DELAY is * reasonably correct). It could get * too low a delay (too high a freq.) * if the CPU is slow executing the * loop for some reason (an NMI, for * example). For this reason we will * if multiple measurements are to be * performed trust the higher delay * (lower frequency returned). */ OUTB (nc_stest1, 0); /* make sure clock doubler is OFF */ OUTW (nc_sien , 0); /* mask all scsi interrupts */ (void) INW (nc_sist); /* clear pending scsi interrupt */ OUTB (nc_dien , 0); /* mask all dma interrupts */ (void) INW (nc_sist); /* another one, just to be sure :) */ OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */ OUTB (nc_stime1, 0); /* disable general purpose timer */ OUTB (nc_stime1, gen); /* set to nominal delay of (1<= 2) printf ("\tDelay (GEN=%d): %u msec\n", gen, ms); /* * adjust for prescaler, and convert into KHz */ return ms ? ((1 << gen) * 4440) / ms : 0; } static void ncr_getclock (ncb_p np, u_char multiplier) { unsigned char scntl3; unsigned char stest1; scntl3 = INB(nc_scntl3); stest1 = INB(nc_stest1); np->multiplier = 1; if (multiplier > 1) { np->multiplier = multiplier; np->clock_khz = 40000 * multiplier; } else { if ((scntl3 & 7) == 0) { unsigned f1, f2; /* throw away first result */ (void) ncrgetfreq (np, 11); f1 = ncrgetfreq (np, 11); f2 = ncrgetfreq (np, 11); if (bootverbose >= 2) printf ("\tNCR clock is %uKHz, %uKHz\n", f1, f2); if (f1 > f2) f1 = f2; /* trust lower result */ if (f1 > 45000) { scntl3 = 5; /* >45Mhz: assume 80MHz */ } else { scntl3 = 3; /* <45Mhz: assume 40MHz */ } } else if ((scntl3 & 7) == 5) np->clock_khz = 80000; /* Probably a 875 rev. 1 ? */ } } /*=========================================================================*/ #ifdef NCR_TEKRAM_EEPROM struct tekram_eeprom_dev { u_char devmode; #define TKR_PARCHK 0x01 #define TKR_TRYSYNC 0x02 #define TKR_ENDISC 0x04 #define TKR_STARTUNIT 0x08 #define TKR_USETAGS 0x10 #define TKR_TRYWIDE 0x20 u_char syncparam; /* max. sync transfer rate (table ?) */ u_char filler1; u_char filler2; }; struct tekram_eeprom { struct tekram_eeprom_dev dev[16]; u_char adaptid; u_char adaptmode; #define TKR_ADPT_GT2DRV 0x01 #define TKR_ADPT_GT1GB 0x02 #define TKR_ADPT_RSTBUS 0x04 #define TKR_ADPT_ACTNEG 0x08 #define TKR_ADPT_NOSEEK 0x10 #define TKR_ADPT_MORLUN 0x20 u_char delay; /* unit ? ( table ??? ) */ u_char tags; /* use 4 times as many ... */ u_char filler[60]; }; static void tekram_write_bit (ncb_p np, int bit) { u_char val = 0x10 + ((bit & 1) << 1); DELAY(10); OUTB (nc_gpreg, val); DELAY(10); OUTB (nc_gpreg, val | 0x04); DELAY(10); OUTB (nc_gpreg, val); DELAY(10); } static int tekram_read_bit (ncb_p np) { OUTB (nc_gpreg, 0x10); DELAY(10); OUTB (nc_gpreg, 0x14); DELAY(10); return INB (nc_gpreg) & 1; } static u_short read_tekram_eeprom_reg (ncb_p np, int reg) { int bit; u_short result = 0; int cmd = 0x80 | reg; OUTB (nc_gpreg, 0x10); tekram_write_bit (np, 1); for (bit = 7; bit >= 0; bit--) { tekram_write_bit (np, cmd >> bit); } for (bit = 0; bit < 16; bit++) { result <<= 1; result |= tekram_read_bit (np); } OUTB (nc_gpreg, 0x00); return result; } static int read_tekram_eeprom(ncb_p np, struct tekram_eeprom *buffer) { u_short *p = (u_short *) buffer; u_short sum = 0; int i; if (INB (nc_gpcntl) != 0x09) { return 0; } for (i = 0; i < 64; i++) { u_short val; if((i&0x0f) == 0) printf ("%02x:", i*2); val = read_tekram_eeprom_reg (np, i); if (p) *p++ = val; sum += val; if((i&0x01) == 0x00) printf (" "); printf ("%02x%02x", val & 0xff, (val >> 8) & 0xff); if((i&0x0f) == 0x0f) printf ("\n"); } printf ("Sum = %04x\n", sum); return sum == 0x1234; } #endif /* NCR_TEKRAM_EEPROM */ static device_method_t ncr_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ncr_probe), DEVMETHOD(device_attach, ncr_attach), { 0, 0 } }; static driver_t ncr_driver = { "ncr", ncr_methods, sizeof(struct ncb), }; static devclass_t ncr_devclass; DRIVER_MODULE(ncr, pci, ncr_driver, ncr_devclass, 0, 0); MODULE_DEPEND(ncr, cam, 1, 1, 1); MODULE_DEPEND(ncr, pci, 1, 1, 1); /*=========================================================================*/ #endif /* _KERNEL */ Index: stable/11/sys/dev/nvme/nvme_sim.c =================================================================== --- stable/11/sys/dev/nvme/nvme_sim.c (revision 335137) +++ stable/11/sys/dev/nvme/nvme_sim.c (revision 335138) @@ -1,416 +1,416 @@ /*- * Copyright (c) 2016 Netflix, Inc * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include // Yes, this is wrong. #include #include #include #include "nvme_private.h" #define ccb_accb_ptr spriv_ptr0 #define ccb_ctrlr_ptr spriv_ptr1 static void nvme_sim_action(struct cam_sim *sim, union ccb *ccb); static void nvme_sim_poll(struct cam_sim *sim); #define sim2softc(sim) ((struct nvme_sim_softc *)cam_sim_softc(sim)) #define sim2ns(sim) (sim2softc(sim)->s_ns) #define sim2ctrlr(sim) (sim2softc(sim)->s_ctrlr) struct nvme_sim_softc { struct nvme_controller *s_ctrlr; struct nvme_namespace *s_ns; struct cam_sim *s_sim; struct cam_path *s_path; }; static void nvme_sim_nvmeio_done(void *ccb_arg, const struct nvme_completion *cpl) { union ccb *ccb = (union ccb *)ccb_arg; /* * Let the periph know the completion, and let it sort out what * it means. Make our best guess, though for the status code. */ memcpy(&ccb->nvmeio.cpl, cpl, sizeof(*cpl)); if (nvme_completion_is_error(cpl)) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } else { ccb->ccb_h.status = CAM_REQ_CMP; xpt_done_direct(ccb); } } static void nvme_sim_nvmeio(struct cam_sim *sim, union ccb *ccb) { struct ccb_nvmeio *nvmeio = &ccb->nvmeio; struct nvme_request *req; void *payload; uint32_t size; struct nvme_controller *ctrlr; ctrlr = sim2ctrlr(sim); payload = nvmeio->data_ptr; size = nvmeio->dxfer_len; /* SG LIST ??? */ if ((nvmeio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO) req = nvme_allocate_request_bio((struct bio *)payload, nvme_sim_nvmeio_done, ccb); else if ((nvmeio->ccb_h.flags & CAM_DATA_SG) == CAM_DATA_SG) req = nvme_allocate_request_ccb(ccb, nvme_sim_nvmeio_done, ccb); else if (payload == NULL) req = nvme_allocate_request_null(nvme_sim_nvmeio_done, ccb); else req = nvme_allocate_request_vaddr(payload, size, nvme_sim_nvmeio_done, ccb); if (req == NULL) { nvmeio->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(ccb); return; } memcpy(&req->cmd, &ccb->nvmeio.cmd, sizeof(ccb->nvmeio.cmd)); if (ccb->ccb_h.func_code == XPT_NVME_IO) nvme_ctrlr_submit_io_request(ctrlr, req); else nvme_ctrlr_submit_admin_request(ctrlr, req); ccb->ccb_h.status |= CAM_SIM_QUEUED; } static void nvme_sim_action(struct cam_sim *sim, union ccb *ccb) { struct nvme_controller *ctrlr; struct nvme_namespace *ns; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_sim_action: func= %#x\n", ccb->ccb_h.func_code)); /* * XXX when we support multiple namespaces in the base driver we'll need * to revisit how all this gets stored and saved in the periph driver's * reserved areas. Right now we store all three in the softc of the sim. */ ns = sim2ns(sim); ctrlr = sim2ctrlr(sim); mtx_assert(&ctrlr->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_CALC_GEOMETRY: /* Calculate Geometry Totally nuts ? XXX */ /* * Only meaningful for old-school SCSI disks since only the SCSI * da driver generates them. Reject all these that slip through. */ /*FALLTHROUGH*/ case XPT_ABORT: /* Abort the specified CCB */ case XPT_EN_LUN: /* Enable LUN as a target */ case XPT_TARGET_IO: /* Execute target I/O request */ case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ /* * Only target mode generates these, and only for SCSI. They are * all invalid/unsupported for NVMe. */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_SET_TRAN_SETTINGS: /* * NVMe doesn't really have different transfer settings, but * other parts of CAM think failure here is a big deal. */ ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; device_t dev = ctrlr->dev; /* * NVMe may have multiple LUNs on the same path. Current generation * of NVMe devives support only a single name space. Multiple name * space drives are coming, but it's unclear how we should report * them up the stack. */ cpi->version_num = 1; cpi->hba_inquiry = 0; cpi->target_sprt = 0; cpi->hba_misc = PIM_UNMAPPED /* | PIM_NOSCAN */; cpi->hba_eng_cnt = 0; cpi->max_target = 0; cpi->max_lun = ctrlr->cdata.nn; cpi->maxio = nvme_ns_get_max_io_xfer_size(ns); cpi->initiator_id = 0; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 4000000; /* 4 GB/s 4 lanes pcie 3 */ strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "NVMe", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); - cpi->transport = XPORT_NVME; /* XXX XPORT_PCIE ? */ - cpi->transport_version = 1; /* XXX Get PCIe spec ? */ - cpi->protocol = PROTO_NVME; - cpi->protocol_version = NVME_REV_1; /* Groks all 1.x NVMe cards */ + cpi->transport = XPORT_NVME; /* XXX XPORT_PCIE ? */ + cpi->transport_version = 1; /* XXX Get PCIe spec ? */ + cpi->protocol = PROTO_NVME; + cpi->protocol_version = NVME_REV_1; /* Groks all 1.x NVMe cards */ cpi->xport_specific.nvme.nsid = ns->id; cpi->xport_specific.nvme.domain = pci_get_domain(dev); cpi->xport_specific.nvme.bus = pci_get_bus(dev); cpi->xport_specific.nvme.slot = pci_get_slot(dev); cpi->xport_specific.nvme.function = pci_get_function(dev); cpi->xport_specific.nvme.extra = 0; cpi->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: /* Get transport settings */ { struct ccb_trans_settings *cts; struct ccb_trans_settings_nvme *nvmep; struct ccb_trans_settings_nvme *nvmex; cts = &ccb->cts; nvmex = &cts->xport_specific.nvme; nvmep = &cts->proto_specific.nvme; nvmex->valid = CTS_NVME_VALID_SPEC; nvmex->spec_major = 1; /* XXX read from card */ nvmex->spec_minor = 2; nvmex->spec_tiny = 0; nvmep->valid = CTS_NVME_VALID_SPEC; nvmep->spec_major = 1; /* XXX read from card */ nvmep->spec_minor = 2; nvmep->spec_tiny = 0; cts->transport = XPORT_NVME; cts->protocol = PROTO_NVME; cts->ccb_h.status = CAM_REQ_CMP; break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* * every driver handles this, but nothing generates it. Assume * it's OK to just say 'that worked'. */ /*FALLTHROUGH*/ case XPT_RESET_DEV: /* Bus Device Reset the specified device */ case XPT_RESET_BUS: /* Reset the specified bus */ /* * NVMe doesn't really support physically resetting the bus. It's part * of the bus scanning dance, so return sucess to tell the process to * proceed. */ ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_NVME_IO: /* Execute the requested I/O operation */ case XPT_NVME_ADMIN: /* or Admin operation */ nvme_sim_nvmeio(sim, ccb); return; /* no done */ default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } static void nvme_sim_poll(struct cam_sim *sim) { nvme_ctrlr_poll(sim2ctrlr(sim)); } static void * nvme_sim_new_controller(struct nvme_controller *ctrlr) { struct cam_devq *devq; int max_trans; int unit; struct nvme_sim_softc *sc = NULL; max_trans = ctrlr->max_hw_pend_io; unit = device_get_unit(ctrlr->dev); devq = cam_simq_alloc(max_trans); if (devq == NULL) return NULL; sc = malloc(sizeof(*sc), M_NVME, M_ZERO | M_WAITOK); sc->s_ctrlr = ctrlr; sc->s_sim = cam_sim_alloc(nvme_sim_action, nvme_sim_poll, "nvme", sc, unit, &ctrlr->lock, max_trans, max_trans, devq); if (sc->s_sim == NULL) { printf("Failed to allocate a sim\n"); cam_simq_free(devq); free(sc, M_NVME); return NULL; } return sc; } static void nvme_sim_rescan_target(struct nvme_controller *ctrlr, struct cam_path *path) { union ccb *ccb; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { printf("unable to alloc CCB for rescan\n"); return; } if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) { printf("unable to copy path for rescan\n"); xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } static void * nvme_sim_new_ns(struct nvme_namespace *ns, void *sc_arg) { struct nvme_sim_softc *sc = sc_arg; struct nvme_controller *ctrlr = sc->s_ctrlr; int i; sc->s_ns = ns; /* * XXX this is creating one bus per ns, but it should be one * XXX target per controller, and one LUN per namespace. * XXX Current drives only support one NS, so there's time * XXX to fix it later when new drives arrive. * * XXX I'm pretty sure the xpt_bus_register() call below is * XXX like super lame and it really belongs in the sim_new_ctrlr * XXX callback. Then the create_path below would be pretty close * XXX to being right. Except we should be per-ns not per-ctrlr * XXX data. */ mtx_lock(&ctrlr->lock); /* Create bus */ /* * XXX do I need to lock ctrlr->lock ? * XXX do I need to lock the path? * ata and scsi seem to in their code, but their discovery is * somewhat more asynchronous. We're only every called one at a * time, and nothing is in parallel. */ i = 0; if (xpt_bus_register(sc->s_sim, ctrlr->dev, 0) != CAM_SUCCESS) goto error; i++; if (xpt_create_path(&sc->s_path, /*periph*/NULL, cam_sim_path(sc->s_sim), 1, ns->id) != CAM_REQ_CMP) goto error; i++; sc->s_path->device->nvme_data = nvme_ns_get_data(ns); sc->s_path->device->nvme_cdata = nvme_ctrlr_get_data(ns->ctrlr); /* Scan bus */ nvme_sim_rescan_target(ctrlr, sc->s_path); mtx_unlock(&ctrlr->lock); return ns; error: switch (i) { case 2: xpt_free_path(sc->s_path); case 1: xpt_bus_deregister(cam_sim_path(sc->s_sim)); case 0: cam_sim_free(sc->s_sim, /*free_devq*/TRUE); } mtx_unlock(&ctrlr->lock); return NULL; } static void nvme_sim_controller_fail(void *ctrlr_arg) { /* XXX cleanup XXX */ } struct nvme_consumer *consumer_cookie; static void nvme_sim_init(void) { if (nvme_use_nvd) return; consumer_cookie = nvme_register_consumer(nvme_sim_new_ns, nvme_sim_new_controller, NULL, nvme_sim_controller_fail); } SYSINIT(nvme_sim_register, SI_SUB_DRIVERS, SI_ORDER_ANY, nvme_sim_init, NULL); static void nvme_sim_uninit(void) { if (nvme_use_nvd) return; /* XXX Cleanup */ nvme_unregister_consumer(consumer_cookie); } SYSUNINIT(nvme_sim_unregister, SI_SUB_DRIVERS, SI_ORDER_ANY, nvme_sim_uninit, NULL); Index: stable/11/sys/dev/twa/tw_osl_cam.c =================================================================== --- stable/11/sys/dev/twa/tw_osl_cam.c (revision 335137) +++ stable/11/sys/dev/twa/tw_osl_cam.c (revision 335138) @@ -1,686 +1,686 @@ /* * Copyright (c) 2004-07 Applied Micro Circuits Corporation. * Copyright (c) 2004-05 Vinod Kashyap. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * AMCC'S 3ware driver for 9000 series storage controllers. * * Author: Vinod Kashyap * Modifications by: Adam Radford * Modifications by: Manjunath Ranganathaiah */ /* * FreeBSD CAM related functions. */ #include #include #include #include #include #include #include #include #include static TW_VOID twa_action(struct cam_sim *sim, union ccb *ccb); static TW_VOID twa_poll(struct cam_sim *sim); static TW_INT32 tw_osli_execute_scsi(struct tw_osli_req_context *req, union ccb *ccb); /* * Function name: tw_osli_cam_attach * Description: Attaches the driver to CAM. * * Input: sc -- ptr to OSL internal ctlr context * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_osli_cam_attach(struct twa_softc *sc) { struct cam_devq *devq; tw_osli_dbg_dprintf(3, sc, "entered"); /* * Create the device queue for our SIM. */ if ((devq = cam_simq_alloc(TW_OSLI_MAX_NUM_IOS)) == NULL) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2100, "Failed to create SIM device queue", ENOMEM); return(ENOMEM); } /* * Create a SIM entry. Though we can support TW_OSLI_MAX_NUM_REQUESTS * simultaneous requests, we claim to be able to handle only * TW_OSLI_MAX_NUM_IOS (two less), so that we always have a request * packet available to service ioctls and AENs. */ tw_osli_dbg_dprintf(3, sc, "Calling cam_sim_alloc"); sc->sim = cam_sim_alloc(twa_action, twa_poll, "twa", sc, device_get_unit(sc->bus_dev), sc->sim_lock, TW_OSLI_MAX_NUM_IOS, 1, devq); if (sc->sim == NULL) { cam_simq_free(devq); tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2101, "Failed to create a SIM entry", ENOMEM); return(ENOMEM); } /* * Register the bus. */ tw_osli_dbg_dprintf(3, sc, "Calling xpt_bus_register"); mtx_lock(sc->sim_lock); if (xpt_bus_register(sc->sim, sc->bus_dev, 0) != CAM_SUCCESS) { cam_sim_free(sc->sim, TRUE); sc->sim = NULL; /* so cam_detach will not try to free it */ tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2102, "Failed to register the bus", ENXIO); mtx_unlock(sc->sim_lock); return(ENXIO); } tw_osli_dbg_dprintf(3, sc, "Calling xpt_create_path"); if (xpt_create_path(&sc->path, NULL, cam_sim_path(sc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path (sc->sim)); /* Passing TRUE to cam_sim_free will free the devq as well. */ cam_sim_free(sc->sim, TRUE); tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2103, "Failed to create path", ENXIO); mtx_unlock(sc->sim_lock); return(ENXIO); } mtx_unlock(sc->sim_lock); tw_osli_dbg_dprintf(3, sc, "exiting"); return(0); } /* * Function name: tw_osli_cam_detach * Description: Detaches the driver from CAM. * * Input: sc -- ptr to OSL internal ctlr context * Output: None * Return value: None */ TW_VOID tw_osli_cam_detach(struct twa_softc *sc) { tw_osli_dbg_dprintf(3, sc, "entered"); mtx_lock(sc->sim_lock); if (sc->path) xpt_free_path(sc->path); if (sc->sim) { xpt_bus_deregister(cam_sim_path(sc->sim)); /* Passing TRUE to cam_sim_free will free the devq as well. */ cam_sim_free(sc->sim, TRUE); } /* It's ok have 1 hold count while destroying the mutex */ mtx_destroy(sc->sim_lock); } /* * Function name: tw_osli_execute_scsi * Description: Build a fw cmd, based on a CAM style ccb, and * send it down. * * Input: req -- ptr to OSL internal request context * ccb -- ptr to CAM style ccb * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_osli_execute_scsi(struct tw_osli_req_context *req, union ccb *ccb) { struct twa_softc *sc = req->ctlr; struct tw_cl_req_packet *req_pkt; struct tw_cl_scsi_req_packet *scsi_req; struct ccb_hdr *ccb_h = &(ccb->ccb_h); struct ccb_scsiio *csio = &(ccb->csio); TW_INT32 error; tw_osli_dbg_dprintf(10, sc, "SCSI I/O request 0x%x", csio->cdb_io.cdb_bytes[0]); if (ccb_h->target_id >= TW_CL_MAX_NUM_UNITS) { tw_osli_dbg_dprintf(3, sc, "Invalid target. PTL = %x %x %jx", ccb_h->path_id, ccb_h->target_id, (uintmax_t)ccb_h->target_lun); ccb_h->status |= CAM_TID_INVALID; xpt_done(ccb); return(1); } if (ccb_h->target_lun >= TW_CL_MAX_NUM_LUNS) { tw_osli_dbg_dprintf(3, sc, "Invalid lun. PTL = %x %x %jx", ccb_h->path_id, ccb_h->target_id, (uintmax_t)ccb_h->target_lun); ccb_h->status |= CAM_LUN_INVALID; xpt_done(ccb); return(1); } if(ccb_h->flags & CAM_CDB_PHYS) { tw_osli_printf(sc, "", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2105, "Physical CDB address!"); ccb_h->status = CAM_REQ_INVALID; xpt_done(ccb); return(1); } /* * We are going to work on this request. Mark it as enqueued (though * we don't actually queue it...) */ ccb_h->status |= CAM_SIM_QUEUED; if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if(ccb_h->flags & CAM_DIR_IN) req->flags |= TW_OSLI_REQ_FLAGS_DATA_IN; else req->flags |= TW_OSLI_REQ_FLAGS_DATA_OUT; } /* Build the CL understood request packet for SCSI cmds. */ req_pkt = &req->req_pkt; req_pkt->status = 0; req_pkt->tw_osl_callback = tw_osl_complete_io; scsi_req = &(req_pkt->gen_req_pkt.scsi_req); scsi_req->unit = ccb_h->target_id; scsi_req->lun = ccb_h->target_lun; scsi_req->sense_len = 0; scsi_req->sense_data = (TW_UINT8 *)(&csio->sense_data); scsi_req->scsi_status = 0; if(ccb_h->flags & CAM_CDB_POINTER) scsi_req->cdb = csio->cdb_io.cdb_ptr; else scsi_req->cdb = csio->cdb_io.cdb_bytes; scsi_req->cdb_len = csio->cdb_len; if (csio->dxfer_len > TW_CL_MAX_IO_SIZE) { tw_osli_printf(sc, "size = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2106, "I/O size too big", csio->dxfer_len); ccb_h->status = CAM_REQ_TOO_BIG; ccb_h->status &= ~CAM_SIM_QUEUED; xpt_done(ccb); return(1); } if ((ccb_h->flags & CAM_DATA_MASK) == CAM_DATA_VADDR) { if ((req->length = csio->dxfer_len) != 0) { req->data = csio->data_ptr; scsi_req->sgl_entries = 1; } } else req->flags |= TW_OSLI_REQ_FLAGS_CCB; req->deadline = tw_osl_get_local_time() + (ccb_h->timeout / 1000); /* * twa_map_load_data_callback will fill in the SGL, * and submit the I/O. */ error = tw_osli_map_request(req); if ((error) && (req->flags & TW_OSLI_REQ_FLAGS_FAILED)) { req->deadline = 0; ccb_h->status = CAM_REQ_CMP_ERR; ccb_h->status &= ~CAM_SIM_QUEUED; xpt_done(ccb); } return(error); } /* * Function name: twa_action * Description: Driver entry point for CAM's use. * * Input: sim -- sim corresponding to the ctlr * ccb -- ptr to CAM request * Output: None * Return value: None */ TW_VOID twa_action(struct cam_sim *sim, union ccb *ccb) { struct twa_softc *sc = (struct twa_softc *)cam_sim_softc(sim); struct ccb_hdr *ccb_h = &(ccb->ccb_h); switch (ccb_h->func_code) { case XPT_SCSI_IO: /* SCSI I/O */ { struct tw_osli_req_context *req; req = tw_osli_get_request(sc); if (req == NULL) { tw_osli_dbg_dprintf(2, sc, "Cannot get request pkt."); /* * Freeze the simq to maintain ccb ordering. The next * ccb that gets completed will unfreeze the simq. */ ccb_h->status &= ~CAM_SIM_QUEUED; ccb_h->status |= CAM_REQUEUE_REQ; xpt_done(ccb); break; } if ((tw_cl_is_reset_needed(&(req->ctlr->ctlr_handle)))) { ccb_h->status &= ~CAM_SIM_QUEUED; ccb_h->status |= CAM_REQUEUE_REQ; xpt_done(ccb); tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); break; } req->req_handle.osl_req_ctxt = req; req->req_handle.is_io = TW_CL_TRUE; req->orig_req = ccb; if (tw_osli_execute_scsi(req, ccb)) tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); break; } case XPT_ABORT: tw_osli_dbg_dprintf(2, sc, "Abort request."); ccb_h->status = CAM_UA_ABORT; xpt_done(ccb); break; case XPT_RESET_BUS: tw_cl_create_event(&(sc->ctlr_handle), TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2108, 0x3, TW_CL_SEVERITY_INFO_STRING, "Received Reset Bus request from CAM", " "); tw_cl_set_reset_needed(&(sc->ctlr_handle)); ccb_h->status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: tw_osli_dbg_dprintf(3, sc, "XPT_SET_TRAN_SETTINGS"); /* * This command is not supported, since it's very specific * to SCSI, and we are doing ATA. */ ccb_h->status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; spi->valid = CTS_SPI_VALID_DISC; spi->flags = CTS_SPI_FLAGS_DISC_ENB; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; tw_osli_dbg_dprintf(3, sc, "XPT_GET_TRAN_SETTINGS"); ccb_h->status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: tw_osli_dbg_dprintf(3, sc, "XPT_CALC_GEOMETRY"); cam_calc_geometry(&ccb->ccg, 1/* extended */); xpt_done(ccb); break; case XPT_PATH_INQ: /* Path inquiry -- get twa properties */ { struct ccb_pathinq *path_inq = &ccb->cpi; tw_osli_dbg_dprintf(3, sc, "XPT_PATH_INQ request"); path_inq->version_num = 1; path_inq->hba_inquiry = 0; path_inq->target_sprt = 0; path_inq->hba_misc = 0; path_inq->hba_eng_cnt = 0; path_inq->max_target = TW_CL_MAX_NUM_UNITS; path_inq->max_lun = TW_CL_MAX_NUM_LUNS - 1; path_inq->unit_number = cam_sim_unit(sim); path_inq->bus_id = cam_sim_bus(sim); path_inq->initiator_id = TW_CL_MAX_NUM_UNITS; path_inq->base_transfer_speed = 100000; strlcpy(path_inq->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(path_inq->hba_vid, "3ware", HBA_IDLEN); strlcpy(path_inq->dev_name, cam_sim_name(sim), DEV_IDLEN); - path_inq->transport = XPORT_SPI; - path_inq->transport_version = 2; - path_inq->protocol = PROTO_SCSI; - path_inq->protocol_version = SCSI_REV_2; - path_inq->maxio = TW_CL_MAX_IO_SIZE; + path_inq->transport = XPORT_SPI; + path_inq->transport_version = 2; + path_inq->protocol = PROTO_SCSI; + path_inq->protocol_version = SCSI_REV_2; + path_inq->maxio = TW_CL_MAX_IO_SIZE; ccb_h->status = CAM_REQ_CMP; xpt_done(ccb); break; } default: tw_osli_dbg_dprintf(3, sc, "func_code = %x", ccb_h->func_code); ccb_h->status = CAM_REQ_INVALID; xpt_done(ccb); break; } } /* * Function name: twa_poll * Description: Driver entry point called when interrupts are not * available. * * Input: sim -- sim corresponding to the controller * Output: None * Return value: None */ TW_VOID twa_poll(struct cam_sim *sim) { struct twa_softc *sc = (struct twa_softc *)(cam_sim_softc(sim)); tw_osli_dbg_dprintf(3, sc, "entering; sc = %p", sc); tw_cl_interrupt(&(sc->ctlr_handle)); tw_osli_dbg_dprintf(3, sc, "exiting; sc = %p", sc); } /* * Function name: tw_osli_request_bus_scan * Description: Requests CAM for a scan of the bus. * * Input: sc -- ptr to per ctlr structure * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_osli_request_bus_scan(struct twa_softc *sc) { union ccb *ccb; tw_osli_dbg_dprintf(3, sc, "entering"); /* If we get here before sc->sim is initialized, return an error. */ if (!(sc->sim)) return(ENXIO); if ((ccb = xpt_alloc_ccb()) == NULL) return(ENOMEM); mtx_lock(sc->sim_lock); if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); mtx_unlock(sc->sim_lock); return(EIO); } xpt_rescan(ccb); mtx_unlock(sc->sim_lock); return(0); } /* * Function name: tw_osli_disallow_new_requests * Description: Calls the appropriate CAM function, so as to freeze * the flow of new requests from CAM to this controller. * * Input: sc -- ptr to OSL internal ctlr context * req_handle -- ptr to request handle sent by OSL. * Output: None * Return value: None */ TW_VOID tw_osli_disallow_new_requests(struct twa_softc *sc, struct tw_cl_req_handle *req_handle) { /* Only freeze/release the simq for IOs */ if (req_handle->is_io) { struct tw_osli_req_context *req = req_handle->osl_req_ctxt; union ccb *ccb = (union ccb *)(req->orig_req); xpt_freeze_simq(sc->sim, 1); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } } /* * Function name: tw_osl_timeout * Description: Call to timeout(). * * Input: req_handle -- ptr to request handle sent by OSL. * Output: None * Return value: None */ TW_VOID tw_osl_timeout(struct tw_cl_req_handle *req_handle) { struct tw_osli_req_context *req = req_handle->osl_req_ctxt; union ccb *ccb = (union ccb *)(req->orig_req); struct ccb_hdr *ccb_h = &(ccb->ccb_h); req->deadline = tw_osl_get_local_time() + (ccb_h->timeout / 1000); } /* * Function name: tw_osl_untimeout * Description: Inverse of call to timeout(). * * Input: req_handle -- ptr to request handle sent by OSL. * Output: None * Return value: None */ TW_VOID tw_osl_untimeout(struct tw_cl_req_handle *req_handle) { struct tw_osli_req_context *req = req_handle->osl_req_ctxt; req->deadline = 0; } /* * Function name: tw_osl_scan_bus * Description: CL calls this function to request for a bus scan. * * Input: ctlr_handle -- ptr to controller handle * Output: None * Return value: None */ TW_VOID tw_osl_scan_bus(struct tw_cl_ctlr_handle *ctlr_handle) { struct twa_softc *sc = ctlr_handle->osl_ctlr_ctxt; TW_INT32 error; if ((error = tw_osli_request_bus_scan(sc))) tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2109, "Bus scan request to CAM failed", error); } /* * Function name: tw_osl_complete_io * Description: Called to complete CAM scsi requests. * * Input: req_handle -- ptr to request handle * Output: None * Return value: None */ TW_VOID tw_osl_complete_io(struct tw_cl_req_handle *req_handle) { struct tw_osli_req_context *req = req_handle->osl_req_ctxt; struct tw_cl_req_packet *req_pkt = (struct tw_cl_req_packet *)(&req->req_pkt); struct tw_cl_scsi_req_packet *scsi_req; struct twa_softc *sc = req->ctlr; union ccb *ccb = (union ccb *)(req->orig_req); tw_osli_dbg_dprintf(10, sc, "entering"); if (req->state != TW_OSLI_REQ_STATE_BUSY) tw_osli_printf(sc, "request = %p, status = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x210A, "Unposted command completed!!", req, req->state); /* * Remove request from the busy queue. Just mark it complete. * There's no need to move it into the complete queue as we are * going to be done with it right now. */ req->state = TW_OSLI_REQ_STATE_COMPLETE; tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q); tw_osli_unmap_request(req); req->deadline = 0; if (req->error_code) { /* This request never got submitted to the firmware. */ if (req->error_code == EBUSY) { /* * Cmd queue is full, or the Common Layer is out of * resources. The simq will already have been frozen. * When this ccb gets completed will unfreeze the simq. */ ccb->ccb_h.status |= CAM_REQUEUE_REQ; } else if (req->error_code == EFBIG) ccb->ccb_h.status = CAM_REQ_TOO_BIG; else ccb->ccb_h.status = CAM_REQ_CMP_ERR; } else { scsi_req = &(req_pkt->gen_req_pkt.scsi_req); if (req_pkt->status == TW_CL_ERR_REQ_SUCCESS) ccb->ccb_h.status = CAM_REQ_CMP; else { if (req_pkt->status & TW_CL_ERR_REQ_INVALID_TARGET) ccb->ccb_h.status |= CAM_SEL_TIMEOUT; else if (req_pkt->status & TW_CL_ERR_REQ_INVALID_LUN) ccb->ccb_h.status |= CAM_DEV_NOT_THERE; else if (req_pkt->status & TW_CL_ERR_REQ_SCSI_ERROR) ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; else if (req_pkt->status & TW_CL_ERR_REQ_BUS_RESET) ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_SCSI_BUS_RESET); /* * If none of the above errors occurred, simply * mark completion error. */ if (ccb->ccb_h.status == 0) ccb->ccb_h.status = CAM_REQ_CMP_ERR; if (req_pkt->status & TW_CL_ERR_REQ_AUTO_SENSE_VALID) { ccb->csio.sense_len = scsi_req->sense_len; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; } } ccb->csio.scsi_status = scsi_req->scsi_status; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mtx_lock(sc->sim_lock); xpt_done(ccb); mtx_unlock(sc->sim_lock); if (! req->error_code) /* twa_action will free the request otherwise */ tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); } Index: stable/11 =================================================================== --- stable/11 (revision 335137) +++ stable/11 (revision 335138) Property changes on: stable/11 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r311351