Index: head/sys/dev/advansys/advansys.c =================================================================== --- head/sys/dev/advansys/advansys.c (revision 328522) +++ head/sys/dev/advansys/advansys.c (revision 328523) @@ -1,1404 +1,1406 @@ /*- * Generic driver for the Advanced Systems Inc. SCSI controllers * Product specific probe and attach routines can be found in: * * i386/isa/adv_isa.c ABP5140, ABP542, ABP5150, ABP842, ABP852 * pci/adv_pci.c ABP920, ABP930, ABP930U, ABP930UA, ABP940, ABP940U, * ABP940UA, ABP950, ABP960, ABP960U, ABP960UA, * ABP970, ABP970U * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1996-2000 Justin Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Ported from: * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters * * Copyright (c) 1995-1997 Advanced System Products, Inc. * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that redistributions of source * code retain the above copyright notice and this comment without * modification. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void adv_action(struct cam_sim *sim, union ccb *ccb); static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, int error); static void adv_intr_locked(struct adv_softc *adv); static void adv_poll(struct cam_sim *sim); static void adv_run_doneq(struct adv_softc *adv); static struct adv_ccb_info * adv_alloc_ccb_info(struct adv_softc *adv); static void adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo); static __inline struct adv_ccb_info * adv_get_ccb_info(struct adv_softc *adv); static __inline void adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo); static __inline void adv_set_state(struct adv_softc *adv, adv_state state); static __inline void adv_clear_state(struct adv_softc *adv, union ccb* ccb); static void adv_clear_state_really(struct adv_softc *adv, union ccb* ccb); static __inline struct adv_ccb_info * adv_get_ccb_info(struct adv_softc *adv) { struct adv_ccb_info *cinfo; if (!dumping) mtx_assert(&adv->lock, MA_OWNED); if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) { SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links); } else { cinfo = adv_alloc_ccb_info(adv); } return (cinfo); } static __inline void adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo) { if (!dumping) mtx_assert(&adv->lock, MA_OWNED); cinfo->state = ACCB_FREE; SLIST_INSERT_HEAD(&adv->free_ccb_infos, cinfo, links); } static __inline void adv_set_state(struct adv_softc *adv, adv_state state) { if (adv->state == 0) xpt_freeze_simq(adv->sim, /*count*/1); adv->state |= state; } static __inline void adv_clear_state(struct adv_softc *adv, union ccb* ccb) { if (adv->state != 0) adv_clear_state_really(adv, ccb); } static void adv_clear_state_really(struct adv_softc *adv, union ccb* ccb) { if (!dumping) mtx_assert(&adv->lock, MA_OWNED); if ((adv->state & ADV_BUSDMA_BLOCK_CLEARED) != 0) adv->state &= ~(ADV_BUSDMA_BLOCK_CLEARED|ADV_BUSDMA_BLOCK); if ((adv->state & ADV_RESOURCE_SHORTAGE) != 0) { int openings; openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q; if (openings >= adv->openings_needed) { adv->state &= ~ADV_RESOURCE_SHORTAGE; adv->openings_needed = 0; } } if ((adv->state & ADV_IN_TIMEOUT) != 0) { struct adv_ccb_info *cinfo; cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; if ((cinfo->state & ACCB_RECOVERY_CCB) != 0) { struct ccb_hdr *ccb_h; /* * We now traverse our list of pending CCBs * and reinstate their timeouts. */ ccb_h = LIST_FIRST(&adv->pending_ccbs); while (ccb_h != NULL) { cinfo = ccb_h->ccb_cinfo_ptr; callout_reset_sbt(&cinfo->timer, SBT_1MS * ccb_h->timeout, 0, adv_timeout, ccb_h, 0); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } adv->state &= ~ADV_IN_TIMEOUT; device_printf(adv->dev, "No longer in timeout\n"); } } if (adv->state == 0) ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } void adv_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t* physaddr; physaddr = (bus_addr_t*)arg; *physaddr = segs->ds_addr; } static void adv_action(struct cam_sim *sim, union ccb *ccb) { struct adv_softc *adv; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n")); adv = (struct adv_softc *)cam_sim_softc(sim); mtx_assert(&adv->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ { struct ccb_hdr *ccb_h; struct ccb_scsiio *csio; struct adv_ccb_info *cinfo; int error; ccb_h = &ccb->ccb_h; csio = &ccb->csio; cinfo = adv_get_ccb_info(adv); if (cinfo == NULL) panic("XXX Handle CCB info error!!!"); ccb_h->ccb_cinfo_ptr = cinfo; cinfo->ccb = ccb; error = bus_dmamap_load_ccb(adv->buffer_dmat, cinfo->dmamap, ccb, adv_execute_ccb, csio, /*flags*/0); if (error == EINPROGRESS) { /* * So as to maintain ordering, freeze the controller * queue until our mapping is returned. */ adv_set_state(adv, ADV_BUSDMA_BLOCK); } break; } case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) #define IS_USER_SETTINGS(c) (c->type == CTS_TYPE_USER_SETTINGS) case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings *cts; target_bit_vector targ_mask; struct adv_transinfo *tconf; u_int update_type; cts = &ccb->cts; targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id); update_type = 0; /* * The user must specify which type of settings he wishes * to change. */ if (IS_CURRENT_SETTINGS(cts) && !IS_USER_SETTINGS(cts)) { tconf = &adv->tinfo[cts->ccb_h.target_id].current; update_type |= ADV_TRANS_GOAL; } else if (IS_USER_SETTINGS(cts) && !IS_CURRENT_SETTINGS(cts)) { tconf = &adv->tinfo[cts->ccb_h.target_id].user; update_type |= ADV_TRANS_USER; } else { ccb->ccb_h.status = CAM_REQ_INVALID; break; } scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; if ((update_type & ADV_TRANS_GOAL) != 0) { if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) adv->disc_enable |= targ_mask; else adv->disc_enable &= ~targ_mask; adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable); } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) adv->cmd_qng_enabled |= targ_mask; else adv->cmd_qng_enabled &= ~targ_mask; } } if ((update_type & ADV_TRANS_USER) != 0) { if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if ((spi->flags & CTS_SPI_VALID_DISC) != 0) adv->user_disc_enable |= targ_mask; else adv->user_disc_enable &= ~targ_mask; } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) adv->user_cmd_qng_enabled |= targ_mask; else adv->user_cmd_qng_enabled &= ~targ_mask; } } /* * If the user specifies either the sync rate, or offset, * but not both, the unspecified parameter defaults to its * current value in transfer negotiations. */ if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { /* * If the user provided a sync rate but no offset, * use the current offset. */ if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) spi->sync_offset = tconf->offset; /* * If the user provided an offset but no sync rate, * use the current sync rate. */ if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) spi->sync_period = tconf->period; adv_period_offset_to_sdtr(adv, &spi->sync_period, &spi->sync_offset, cts->ccb_h.target_id); adv_set_syncrate(adv, /*struct cam_path */NULL, cts->ccb_h.target_id, spi->sync_period, spi->sync_offset, update_type); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings *cts; struct adv_transinfo *tconf; target_bit_vector target_mask; cts = &ccb->cts; target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id); scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { tconf = &adv->tinfo[cts->ccb_h.target_id].current; if ((adv->disc_enable & target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((adv->cmd_qng_enabled & target_mask) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } else { tconf = &adv->tinfo[cts->ccb_h.target_id].user; if ((adv->user_disc_enable & target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((adv->user_cmd_qng_enabled & target_mask) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } spi->sync_period = tconf->period; spi->sync_offset = tconf->offset; spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { int extended; extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0; cam_calc_geometry(&ccb->ccg, extended); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { adv_stop_execution(adv); adv_reset_bus(adv, /*initiate_reset*/TRUE); adv_start_execution(adv); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 7; cpi->max_lun = 7; cpi->initiator_id = adv->scsi_id; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "Advansys", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } /* * Currently, the output of bus_dmammap_load suits our needs just * fine, but should it change, we'd need to do something here. */ #define adv_fixup_dmasegs(adv, dm_segs) (struct adv_sg_entry *)(dm_segs) static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, int error) { struct ccb_scsiio *csio; struct ccb_hdr *ccb_h; struct cam_sim *sim; struct adv_softc *adv; struct adv_ccb_info *cinfo; struct adv_scsi_q scsiq; struct adv_sg_head sghead; csio = (struct ccb_scsiio *)arg; ccb_h = &csio->ccb_h; sim = xpt_path_sim(ccb_h->path); adv = (struct adv_softc *)cam_sim_softc(sim); cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr; if (!dumping) mtx_assert(&adv->lock, MA_OWNED); /* * Setup our done routine to release the simq on * the next ccb that completes. */ if ((adv->state & ADV_BUSDMA_BLOCK) != 0) adv->state |= ADV_BUSDMA_BLOCK_CLEARED; if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { if ((ccb_h->flags & CAM_CDB_PHYS) == 0) { /* XXX Need phystovirt!!!! */ /* How about pmap_kenter??? */ scsiq.cdbptr = csio->cdb_io.cdb_ptr; } else { scsiq.cdbptr = csio->cdb_io.cdb_ptr; } } else { scsiq.cdbptr = csio->cdb_io.cdb_bytes; } /* * Build up the request */ scsiq.q1.status = 0; scsiq.q1.q_no = 0; scsiq.q1.cntl = 0; scsiq.q1.sg_queue_cnt = 0; scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id); scsiq.q1.target_lun = ccb_h->target_lun; scsiq.q1.sense_len = csio->sense_len; scsiq.q1.extra_bytes = 0; scsiq.q2.ccb_index = cinfo - adv->ccb_infos; scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id, ccb_h->target_lun); scsiq.q2.flag = 0; scsiq.q2.cdb_len = csio->cdb_len; if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0) scsiq.q2.tag_code = csio->tag_action; else scsiq.q2.tag_code = 0; scsiq.q2.vm_id = 0; if (nsegments != 0) { bus_dmasync_op_t op; scsiq.q1.data_addr = dm_segs->ds_addr; scsiq.q1.data_cnt = dm_segs->ds_len; if (nsegments > 1) { scsiq.q1.cntl |= QC_SG_HEAD; sghead.entry_cnt = sghead.entry_to_copy = nsegments; sghead.res = 0; sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs); scsiq.sg_head = &sghead; } else { scsiq.sg_head = NULL; } if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_PREREAD; else op = BUS_DMASYNC_PREWRITE; bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op); } else { scsiq.q1.data_addr = 0; scsiq.q1.data_cnt = 0; scsiq.sg_head = NULL; } /* * Last time we need to check if this SCB needs to * be aborted. */ if (ccb_h->status != CAM_REQ_INPROG) { if (nsegments != 0) bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); adv_clear_state(adv, (union ccb *)csio); adv_free_ccb_info(adv, cinfo); xpt_done((union ccb *)csio); return; } if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) { /* Temporary resource shortage */ adv_set_state(adv, ADV_RESOURCE_SHORTAGE); if (nsegments != 0) bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); csio->ccb_h.status = CAM_REQUEUE_REQ; adv_clear_state(adv, (union ccb *)csio); adv_free_ccb_info(adv, cinfo); xpt_done((union ccb *)csio); return; } cinfo->state |= ACCB_ACTIVE; ccb_h->status |= CAM_SIM_QUEUED; LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le); /* Schedule our timeout */ callout_reset_sbt(&cinfo->timer, SBT_1MS * ccb_h->timeout, 0, adv_timeout, csio, 0); } static struct adv_ccb_info * adv_alloc_ccb_info(struct adv_softc *adv) { int error; struct adv_ccb_info *cinfo; cinfo = &adv->ccb_infos[adv->ccb_infos_allocated]; cinfo->state = ACCB_FREE; callout_init_mtx(&cinfo->timer, &adv->lock, 0); error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0, &cinfo->dmamap); if (error != 0) { device_printf(adv->dev, "Unable to allocate CCB info " "dmamap - error %d\n", error); return (NULL); } adv->ccb_infos_allocated++; return (cinfo); } static void adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo) { callout_drain(&cinfo->timer); bus_dmamap_destroy(adv->buffer_dmat, cinfo->dmamap); } void adv_timeout(void *arg) { union ccb *ccb; struct adv_softc *adv; struct adv_ccb_info *cinfo, *cinfo2; ccb = (union ccb *)arg; adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc; cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; mtx_assert(&adv->lock, MA_OWNED); xpt_print_path(ccb->ccb_h.path); printf("Timed out\n"); /* Have we been taken care of already?? */ if (cinfo == NULL || cinfo->state == ACCB_FREE) { return; } adv_stop_execution(adv); if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) { struct ccb_hdr *ccb_h; /* * In order to simplify the recovery process, we ask the XPT * layer to halt the queue of new transactions and we traverse * the list of pending CCBs and remove their timeouts. This * means that the driver attempts to clear only one error * condition at a time. In general, timeouts that occur * close together are related anyway, so there is no benefit * in attempting to handle errors in parallel. Timeouts will * be reinstated when the recovery process ends. */ adv_set_state(adv, ADV_IN_TIMEOUT); /* This CCB is the CCB representing our recovery actions */ cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED; ccb_h = LIST_FIRST(&adv->pending_ccbs); while (ccb_h != NULL) { cinfo2 = ccb_h->ccb_cinfo_ptr; callout_stop(&cinfo2->timer); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } /* XXX Should send a BDR */ /* Attempt an abort as our first tact */ xpt_print_path(ccb->ccb_h.path); printf("Attempting abort\n"); adv_abort_ccb(adv, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb, CAM_CMD_TIMEOUT, /*queued_only*/FALSE); callout_reset(&cinfo->timer, 2 * hz, adv_timeout, ccb); } else { /* Our attempt to perform an abort failed, go for a reset */ xpt_print_path(ccb->ccb_h.path); printf("Resetting bus\n"); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_CMD_TIMEOUT; adv_reset_bus(adv, /*initiate_reset*/TRUE); } adv_start_execution(adv); } struct adv_softc * adv_alloc(device_t dev, struct resource *res, long offset) { struct adv_softc *adv = device_get_softc(dev); /* * Allocate a storage area for us */ LIST_INIT(&adv->pending_ccbs); SLIST_INIT(&adv->free_ccb_infos); adv->dev = dev; adv->res = res; adv->reg_off = offset; mtx_init(&adv->lock, "adv", NULL, MTX_DEF); return(adv); } void adv_free(struct adv_softc *adv) { switch (adv->init_level) { case 6: { struct adv_ccb_info *cinfo; while ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) { SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links); adv_destroy_ccb_info(adv, cinfo); } bus_dmamap_unload(adv->sense_dmat, adv->sense_dmamap); } case 5: bus_dmamem_free(adv->sense_dmat, adv->sense_buffers, adv->sense_dmamap); case 4: bus_dma_tag_destroy(adv->sense_dmat); case 3: bus_dma_tag_destroy(adv->buffer_dmat); case 2: bus_dma_tag_destroy(adv->parent_dmat); case 1: if (adv->ccb_infos != NULL) free(adv->ccb_infos, M_DEVBUF); case 0: mtx_destroy(&adv->lock); break; } } int adv_init(struct adv_softc *adv) { struct adv_eeprom_config eeprom_config; int checksum, i; int max_sync; u_int16_t config_lsw; u_int16_t config_msw; mtx_lock(&adv->lock); adv_lib_init(adv); /* * Stop script execution. */ adv_write_lram_16(adv, ADV_HALTCODE_W, 0x00FE); adv_stop_execution(adv); if (adv_stop_chip(adv) == 0 || adv_is_chip_halted(adv) == 0) { mtx_unlock(&adv->lock); device_printf(adv->dev, "Unable to halt adapter. Initialization failed\n"); return (1); } ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR); if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) { mtx_unlock(&adv->lock); device_printf(adv->dev, "Unable to set program counter. Initialization failed\n"); return (1); } config_msw = ADV_INW(adv, ADV_CONFIG_MSW); config_lsw = ADV_INW(adv, ADV_CONFIG_LSW); if ((config_msw & ADV_CFG_MSW_CLR_MASK) != 0) { config_msw &= ~ADV_CFG_MSW_CLR_MASK; /* * XXX The Linux code flags this as an error, * but what should we report to the user??? * It seems that clearing the config register * makes this error recoverable. */ ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); } /* Suck in the configuration from the EEProm */ checksum = adv_get_eeprom_config(adv, &eeprom_config); if (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_AUTO_CONFIG) { /* * XXX The Linux code sets a warning level for this * condition, yet nothing of meaning is printed to * the user. What does this mean??? */ if (adv->chip_version == 3) { if (eeprom_config.cfg_lsw != config_lsw) eeprom_config.cfg_lsw = config_lsw; if (eeprom_config.cfg_msw != config_msw) { eeprom_config.cfg_msw = config_msw; } } } if (checksum == eeprom_config.chksum) { /* Range/Sanity checking */ if (eeprom_config.max_total_qng < ADV_MIN_TOTAL_QNG) { eeprom_config.max_total_qng = ADV_MIN_TOTAL_QNG; } if (eeprom_config.max_total_qng > ADV_MAX_TOTAL_QNG) { eeprom_config.max_total_qng = ADV_MAX_TOTAL_QNG; } if (eeprom_config.max_tag_qng > eeprom_config.max_total_qng) { eeprom_config.max_tag_qng = eeprom_config.max_total_qng; } if (eeprom_config.max_tag_qng < ADV_MIN_TAG_Q_PER_DVC) { eeprom_config.max_tag_qng = ADV_MIN_TAG_Q_PER_DVC; } adv->max_openings = eeprom_config.max_total_qng; adv->user_disc_enable = eeprom_config.disc_enable; adv->user_cmd_qng_enabled = eeprom_config.use_cmd_qng; adv->isa_dma_speed = EEPROM_DMA_SPEED(eeprom_config); adv->scsi_id = EEPROM_SCSIID(eeprom_config) & ADV_MAX_TID; EEPROM_SET_SCSIID(eeprom_config, adv->scsi_id); adv->control = eeprom_config.cntl; for (i = 0; i <= ADV_MAX_TID; i++) { u_int8_t sync_data; if ((eeprom_config.init_sdtr & (0x1 << i)) == 0) sync_data = 0; else sync_data = eeprom_config.sdtr_data[i]; adv_sdtr_to_period_offset(adv, sync_data, &adv->tinfo[i].user.period, &adv->tinfo[i].user.offset, i); } config_lsw = eeprom_config.cfg_lsw; eeprom_config.cfg_msw = config_msw; } else { u_int8_t sync_data; device_printf(adv->dev, "Warning EEPROM Checksum mismatch. " "Using default device parameters\n"); /* Set reasonable defaults since we can't read the EEPROM */ adv->isa_dma_speed = /*ADV_DEF_ISA_DMA_SPEED*/1; adv->max_openings = ADV_DEF_MAX_TOTAL_QNG; adv->disc_enable = TARGET_BIT_VECTOR_SET; adv->user_disc_enable = TARGET_BIT_VECTOR_SET; adv->cmd_qng_enabled = TARGET_BIT_VECTOR_SET; adv->user_cmd_qng_enabled = TARGET_BIT_VECTOR_SET; adv->scsi_id = 7; adv->control = 0xFFFF; if (adv->chip_version == ADV_CHIP_VER_PCI_ULTRA_3050) /* Default to no Ultra to support the 3030 */ adv->control &= ~ADV_CNTL_SDTR_ENABLE_ULTRA; sync_data = ADV_DEF_SDTR_OFFSET | (ADV_DEF_SDTR_INDEX << 4); for (i = 0; i <= ADV_MAX_TID; i++) { adv_sdtr_to_period_offset(adv, sync_data, &adv->tinfo[i].user.period, &adv->tinfo[i].user.offset, i); } config_lsw |= ADV_CFG_LSW_SCSI_PARITY_ON; } config_msw &= ~ADV_CFG_MSW_CLR_MASK; config_lsw |= ADV_CFG_LSW_HOST_INT_ON; if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA) && (adv->control & ADV_CNTL_SDTR_ENABLE_ULTRA) == 0) /* 25ns or 10MHz */ max_sync = 25; else /* Unlimited */ max_sync = 0; for (i = 0; i <= ADV_MAX_TID; i++) { if (adv->tinfo[i].user.period < max_sync) adv->tinfo[i].user.period = max_sync; } if (adv_test_external_lram(adv) == 0) { if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)) { eeprom_config.max_total_qng = ADV_MAX_PCI_ULTRA_INRAM_TOTAL_QNG; eeprom_config.max_tag_qng = ADV_MAX_PCI_ULTRA_INRAM_TAG_QNG; } else { eeprom_config.cfg_msw |= 0x0800; config_msw |= 0x0800; eeprom_config.max_total_qng = ADV_MAX_PCI_INRAM_TOTAL_QNG; eeprom_config.max_tag_qng = ADV_MAX_INRAM_TAG_QNG; } adv->max_openings = eeprom_config.max_total_qng; } ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); ADV_OUTW(adv, ADV_CONFIG_LSW, config_lsw); #if 0 /* * Don't write the eeprom data back for now. * I'd rather not mess up the user's card. We also don't * fully sanitize the eeprom settings above for the write-back * to be 100% correct. */ if (adv_set_eeprom_config(adv, &eeprom_config) != 0) device_printf(adv->dev, "WARNING! Failure writing to EEPROM.\n"); #endif adv_set_chip_scsiid(adv, adv->scsi_id); if (adv_init_lram_and_mcode(adv)) { mtx_unlock(&adv->lock); return (1); } adv->disc_enable = adv->user_disc_enable; adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable); for (i = 0; i <= ADV_MAX_TID; i++) { /* * Start off in async mode. */ adv_set_syncrate(adv, /*struct cam_path */NULL, i, /*period*/0, /*offset*/0, ADV_TRANS_CUR); /* * Enable the use of tagged commands on all targets. * This allows the kernel driver to make up it's own mind * as it sees fit to tag queue instead of having the * firmware try and second guess the tag_code settins. */ adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + i, adv->max_openings); } adv_write_lram_8(adv, ADVV_USE_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET); adv_write_lram_8(adv, ADVV_CAN_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET); device_printf(adv->dev, "AdvanSys %s Host Adapter, SCSI ID %d, queue depth %d\n", (adv->type & ADV_ULTRA) && (max_sync == 0) ? "Ultra SCSI" : "SCSI", adv->scsi_id, adv->max_openings); mtx_unlock(&adv->lock); return (0); } void adv_intr(void *arg) { struct adv_softc *adv; adv = arg; mtx_lock(&adv->lock); adv_intr_locked(adv); mtx_unlock(&adv->lock); } void adv_intr_locked(struct adv_softc *adv) { u_int16_t chipstat; u_int16_t saved_ram_addr; u_int8_t ctrl_reg; u_int8_t saved_ctrl_reg; u_int8_t host_flag; if (!dumping) mtx_assert(&adv->lock, MA_OWNED); chipstat = ADV_INW(adv, ADV_CHIP_STATUS); /* Is it for us? */ if ((chipstat & (ADV_CSW_INT_PENDING|ADV_CSW_SCSI_RESET_LATCH)) == 0) return; ctrl_reg = ADV_INB(adv, ADV_CHIP_CTRL); saved_ctrl_reg = ctrl_reg & (~(ADV_CC_SCSI_RESET | ADV_CC_CHIP_RESET | ADV_CC_SINGLE_STEP | ADV_CC_DIAG | ADV_CC_TEST)); if ((chipstat & (ADV_CSW_SCSI_RESET_LATCH|ADV_CSW_SCSI_RESET_ACTIVE))) { device_printf(adv->dev, "Detected Bus Reset\n"); adv_reset_bus(adv, /*initiate_reset*/FALSE); return; } if ((chipstat & ADV_CSW_INT_PENDING) != 0) { saved_ram_addr = ADV_INW(adv, ADV_LRAM_ADDR); host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B); adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag | ADV_HOST_FLAG_IN_ISR); adv_ack_interrupt(adv); if ((chipstat & ADV_CSW_HALTED) != 0 && (ctrl_reg & ADV_CC_SINGLE_STEP) != 0) { adv_isr_chip_halted(adv); saved_ctrl_reg &= ~ADV_CC_HALT; } else { adv_run_doneq(adv); } ADV_OUTW(adv, ADV_LRAM_ADDR, saved_ram_addr); #ifdef DIAGNOSTIC if (ADV_INW(adv, ADV_LRAM_ADDR) != saved_ram_addr) panic("adv_intr: Unable to set LRAM addr"); #endif adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag); } ADV_OUTB(adv, ADV_CHIP_CTRL, saved_ctrl_reg); } static void adv_run_doneq(struct adv_softc *adv) { struct adv_q_done_info scsiq; u_int doneq_head; u_int done_qno; doneq_head = adv_read_lram_16(adv, ADVV_DONE_Q_TAIL_W) & 0xFF; done_qno = adv_read_lram_8(adv, ADV_QNO_TO_QADDR(doneq_head) + ADV_SCSIQ_B_FWD); while (done_qno != ADV_QLINK_END) { union ccb* ccb; struct adv_ccb_info *cinfo; u_int done_qaddr; u_int sg_queue_cnt; done_qaddr = ADV_QNO_TO_QADDR(done_qno); /* Pull status from this request */ sg_queue_cnt = adv_copy_lram_doneq(adv, done_qaddr, &scsiq, adv->max_dma_count); /* Mark it as free */ adv_write_lram_8(adv, done_qaddr + ADV_SCSIQ_B_STATUS, scsiq.q_status & ~(QS_READY|QS_ABORTED)); /* Process request based on retrieved info */ if ((scsiq.cntl & QC_SG_HEAD) != 0) { u_int i; /* * S/G based request. Free all of the queue * structures that contained S/G information. */ for (i = 0; i < sg_queue_cnt; i++) { done_qno = adv_read_lram_8(adv, done_qaddr + ADV_SCSIQ_B_FWD); #ifdef DIAGNOSTIC if (done_qno == ADV_QLINK_END) { panic("adv_qdone: Corrupted SG " "list encountered"); } #endif done_qaddr = ADV_QNO_TO_QADDR(done_qno); /* Mark SG queue as free */ adv_write_lram_8(adv, done_qaddr + ADV_SCSIQ_B_STATUS, QS_FREE); } } else sg_queue_cnt = 0; #ifdef DIAGNOSTIC if (adv->cur_active < (sg_queue_cnt + 1)) panic("adv_qdone: Attempting to free more " "queues than are active"); #endif adv->cur_active -= sg_queue_cnt + 1; if ((scsiq.q_status != QS_DONE) && (scsiq.q_status & QS_ABORTED) == 0) panic("adv_qdone: completed scsiq with unknown status"); scsiq.remain_bytes += scsiq.extra_bytes; if ((scsiq.d3.done_stat == QD_WITH_ERROR) && (scsiq.d3.host_stat == QHSTA_M_DATA_OVER_RUN)) { if ((scsiq.cntl & (QC_DATA_IN|QC_DATA_OUT)) == 0) { scsiq.d3.done_stat = QD_NO_ERROR; scsiq.d3.host_stat = QHSTA_NO_ERROR; } } cinfo = &adv->ccb_infos[scsiq.d2.ccb_index]; ccb = cinfo->ccb; ccb->csio.resid = scsiq.remain_bytes; adv_done(adv, ccb, scsiq.d3.done_stat, scsiq.d3.host_stat, scsiq.d3.scsi_stat, scsiq.q_no); doneq_head = done_qno; done_qno = adv_read_lram_8(adv, done_qaddr + ADV_SCSIQ_B_FWD); } adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, doneq_head); } void adv_done(struct adv_softc *adv, union ccb *ccb, u_int done_stat, u_int host_stat, u_int scsi_status, u_int q_no) { struct adv_ccb_info *cinfo; if (!dumping) mtx_assert(&adv->lock, MA_OWNED); cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; LIST_REMOVE(&ccb->ccb_h, sim_links.le); callout_stop(&cinfo->timer); if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op); bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); } switch (done_stat) { case QD_NO_ERROR: if (host_stat == QHSTA_NO_ERROR) { ccb->ccb_h.status = CAM_REQ_CMP; break; } xpt_print_path(ccb->ccb_h.path); printf("adv_done - queue done without error, " "but host status non-zero(%x)\n", host_stat); /*FALLTHROUGH*/ case QD_WITH_ERROR: switch (host_stat) { case QHSTA_M_TARGET_STATUS_BUSY: case QHSTA_M_BAD_QUEUE_FULL_OR_BUSY: /* * Assume that if we were a tagged transaction * the target reported queue full. Otherwise, * report busy. The firmware really should just * pass the original status back up to us even * if it thinks the target was in error for * returning this status as no other transactions * from this initiator are in effect, but this * ignores multi-initiator setups and there is * evidence that the firmware gets its per-device * transaction counts screwed up occasionally. */ ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0 && host_stat != QHSTA_M_TARGET_STATUS_BUSY) scsi_status = SCSI_STATUS_QUEUE_FULL; else scsi_status = SCSI_STATUS_BUSY; adv_abort_ccb(adv, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, /*ccb*/NULL, CAM_REQUEUE_REQ, /*queued_only*/TRUE); /*FALLTHROUGH*/ case QHSTA_M_NO_AUTO_REQ_SENSE: case QHSTA_NO_ERROR: ccb->csio.scsi_status = scsi_status; switch (scsi_status) { case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_CMD_TERMINATED: ccb->ccb_h.status |= CAM_AUTOSNS_VALID; /* Structure copy */ ccb->csio.sense_data = adv->sense_buffers[q_no - 1]; /* FALLTHROUGH */ case SCSI_STATUS_BUSY: case SCSI_STATUS_RESERV_CONFLICT: case SCSI_STATUS_QUEUE_FULL: case SCSI_STATUS_COND_MET: case SCSI_STATUS_INTERMED: case SCSI_STATUS_INTERMED_COND_MET: ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; break; case SCSI_STATUS_OK: ccb->ccb_h.status |= CAM_REQ_CMP; break; } break; case QHSTA_M_SEL_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case QHSTA_M_DATA_OVER_RUN: ccb->ccb_h.status = CAM_DATA_RUN_ERR; break; case QHSTA_M_UNEXPECTED_BUS_FREE: ccb->ccb_h.status = CAM_UNEXP_BUSFREE; break; case QHSTA_M_BAD_BUS_PHASE_SEQ: ccb->ccb_h.status = CAM_SEQUENCE_FAIL; break; case QHSTA_M_BAD_CMPL_STATUS_IN: /* No command complete after a status message */ ccb->ccb_h.status = CAM_SEQUENCE_FAIL; break; case QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT: case QHSTA_M_WTM_TIMEOUT: case QHSTA_M_HUNG_REQ_SCSI_BUS_RESET: /* The SCSI bus hung in a phase */ ccb->ccb_h.status = CAM_SEQUENCE_FAIL; adv_reset_bus(adv, /*initiate_reset*/TRUE); break; case QHSTA_M_AUTO_REQ_SENSE_FAIL: ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; break; case QHSTA_D_QDONE_SG_LIST_CORRUPTED: case QHSTA_D_ASC_DVC_ERROR_CODE_SET: case QHSTA_D_HOST_ABORT_FAILED: case QHSTA_D_EXE_SCSI_Q_FAILED: case QHSTA_D_ASPI_NO_BUF_POOL: case QHSTA_M_BAD_TAG_CODE: case QHSTA_D_LRAM_CMP_ERROR: case QHSTA_M_MICRO_CODE_ERROR_HALT: default: panic("%s: Unhandled Host status error %x", device_get_nameunit(adv->dev), host_stat); /* NOTREACHED */ } break; case QD_ABORTED_BY_HOST: /* Don't clobber any, more explicit, error codes we've set */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) ccb->ccb_h.status = CAM_REQ_ABORTED; break; default: xpt_print_path(ccb->ccb_h.path); printf("adv_done - queue done with unknown status %x:%x\n", done_stat, host_stat); ccb->ccb_h.status = CAM_REQ_CMP_ERR; break; } adv_clear_state(adv, ccb); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP && (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } adv_free_ccb_info(adv, cinfo); /* * Null this out so that we catch driver bugs that cause a * ccb to be completed twice. */ ccb->ccb_h.ccb_cinfo_ptr = NULL; ccb->ccb_h.status &= ~CAM_SIM_QUEUED; xpt_done(ccb); } /* * Function to poll for command completion when * interrupts are disabled (crash dumps) */ static void adv_poll(struct cam_sim *sim) { adv_intr_locked(cam_sim_softc(sim)); } /* * Attach all the sub-devices we can find */ int adv_attach(adv) struct adv_softc *adv; { struct ccb_setasync csa; struct cam_devq *devq; int max_sg; /* * Allocate an array of ccb mapping structures. We put the * index of the ccb_info structure into the queue representing * a transaction and use it for mapping the queue to the * upper level SCSI transaction it represents. */ adv->ccb_infos = malloc(sizeof(*adv->ccb_infos) * adv->max_openings, M_DEVBUF, M_NOWAIT); if (adv->ccb_infos == NULL) return (ENOMEM); adv->init_level++; /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. * * The ASC boards use chains of "queues" (the transactional * resources on the board) to represent long S/G lists. * The first queue represents the command and holds a * single address and data pair. The queues that follow * can each hold ADV_SG_LIST_PER_Q entries. Given the * total number of queues, we can express the largest * transaction we can map. We reserve a few queues for * error recovery. Take those into account as well. * * There is a way to take an interrupt to download the * next batch of S/G entries if there are more than 255 * of them (the counter in the queue structure is a u_int8_t). * We don't use this feature, so limit the S/G list size * accordingly. */ max_sg = (adv->max_openings - ADV_MIN_FREE_Q - 1) * ADV_SG_LIST_PER_Q; if (max_sg > 255) max_sg = 255; /* DMA tag for mapping buffers into device visible space. */ if (bus_dma_tag_create( /* parent */ adv->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ ADV_MAXPHYS, /* nsegments */ max_sg, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ BUS_DMA_ALLOCNOW, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &adv->lock, &adv->buffer_dmat) != 0) { return (ENXIO); } adv->init_level++; /* DMA tag for our sense buffers */ if (bus_dma_tag_create( /* parent */ adv->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ sizeof(struct scsi_sense_data) * adv->max_openings, /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &adv->lock, &adv->sense_dmat) != 0) { return (ENXIO); } adv->init_level++; /* Allocation for our sense buffers */ if (bus_dmamem_alloc(adv->sense_dmat, (void **)&adv->sense_buffers, BUS_DMA_NOWAIT, &adv->sense_dmamap) != 0) { return (ENOMEM); } adv->init_level++; /* And permanently map them */ bus_dmamap_load(adv->sense_dmat, adv->sense_dmamap, adv->sense_buffers, sizeof(struct scsi_sense_data)*adv->max_openings, adv_map, &adv->sense_physbase, /*flags*/0); adv->init_level++; /* * Fire up the chip */ if (adv_start_chip(adv) != 1) { device_printf(adv->dev, "Unable to start on board processor. Aborting.\n"); return (ENXIO); } /* * Create the device queue for our SIM. */ devq = cam_simq_alloc(adv->max_openings); if (devq == NULL) return (ENOMEM); /* * Construct our SIM entry. */ adv->sim = cam_sim_alloc(adv_action, adv_poll, "adv", adv, device_get_unit(adv->dev), &adv->lock, 1, adv->max_openings, devq); if (adv->sim == NULL) return (ENOMEM); /* * Register the bus. */ mtx_lock(&adv->lock); if (xpt_bus_register(adv->sim, adv->dev, 0) != CAM_SUCCESS) { cam_sim_free(adv->sim, /*free devq*/TRUE); mtx_unlock(&adv->lock); return (ENXIO); } if (xpt_create_path(&adv->path, /*periph*/NULL, cam_sim_path(adv->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(adv->sim)); cam_sim_free(adv->sim, /*free devq*/TRUE); mtx_unlock(&adv->lock); return (ENXIO); } xpt_setup_ccb(&csa.ccb_h, adv->path, /*priority*/5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE; csa.callback = advasync; csa.callback_arg = adv; xpt_action((union ccb *)&csa); mtx_unlock(&adv->lock); + gone_in_dev(adv->adv, 12, "adv(4) driver"); + return (0); } MODULE_DEPEND(adv, cam, 1, 1, 1); Index: head/sys/dev/advansys/adwcam.c =================================================================== --- head/sys/dev/advansys/adwcam.c (revision 328522) +++ head/sys/dev/advansys/adwcam.c (revision 328523) @@ -1,1506 +1,1507 @@ /*- * CAM SCSI interface for the Advanced Systems Inc. * Second Generation SCSI controllers. * * Product specific probe and attach routines can be found in: * * adw_pci.c ABP[3]940UW, ABP950UW, ABP3940U2W * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1998, 1999, 2000 Justin Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Ported from: * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters * * Copyright (c) 1995-1998 Advanced System Products, Inc. * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that redistributions of source * code retain the above copyright notice and this comment without * modification. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Definitions for our use of the SIM private CCB area */ #define ccb_acb_ptr spriv_ptr0 #define ccb_adw_ptr spriv_ptr1 static __inline struct acb* adwgetacb(struct adw_softc *adw); static __inline void adwfreeacb(struct adw_softc *adw, struct acb *acb); static void adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error); static struct sg_map_node* adwallocsgmap(struct adw_softc *adw); static int adwallocacbs(struct adw_softc *adw); static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error); static void adw_action(struct cam_sim *sim, union ccb *ccb); static void adw_intr_locked(struct adw_softc *adw); static void adw_poll(struct cam_sim *sim); static void adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void adwprocesserror(struct adw_softc *adw, struct acb *acb); static void adwtimeout(void *arg); static void adw_handle_device_reset(struct adw_softc *adw, u_int target); static void adw_handle_bus_reset(struct adw_softc *adw, int initiated); static __inline struct acb* adwgetacb(struct adw_softc *adw) { struct acb* acb; if (!dumping) mtx_assert(&adw->lock, MA_OWNED); if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) { SLIST_REMOVE_HEAD(&adw->free_acb_list, links); } else if (adw->num_acbs < adw->max_acbs) { adwallocacbs(adw); acb = SLIST_FIRST(&adw->free_acb_list); if (acb == NULL) device_printf(adw->device, "Can't malloc ACB\n"); else { SLIST_REMOVE_HEAD(&adw->free_acb_list, links); } } return (acb); } static __inline void adwfreeacb(struct adw_softc *adw, struct acb *acb) { if (!dumping) mtx_assert(&adw->lock, MA_OWNED); if ((acb->state & ACB_ACTIVE) != 0) LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le); if ((acb->state & ACB_RELEASE_SIMQ) != 0) acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0 && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; adw->state &= ~ADW_RESOURCE_SHORTAGE; } acb->state = ACB_FREE; SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links); } static void adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *busaddrp; busaddrp = (bus_addr_t *)arg; *busaddrp = segs->ds_addr; } static struct sg_map_node * adwallocsgmap(struct adw_softc *adw) { struct sg_map_node *sg_map; sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); if (sg_map == NULL) return (NULL); /* Allocate S/G space for the next batch of ACBS */ if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr, BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { free(sg_map, M_DEVBUF); return (NULL); } SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links); bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0); bzero(sg_map->sg_vaddr, PAGE_SIZE); return (sg_map); } /* * Allocate another chunk of CCB's. Return count of entries added. */ static int adwallocacbs(struct adw_softc *adw) { struct acb *next_acb; struct sg_map_node *sg_map; bus_addr_t busaddr; struct adw_sg_block *blocks; int newcount; int i; next_acb = &adw->acbs[adw->num_acbs]; sg_map = adwallocsgmap(adw); if (sg_map == NULL) return (0); blocks = sg_map->sg_vaddr; busaddr = sg_map->sg_physaddr; newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks))); for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) { int error; error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0, &next_acb->dmamap); if (error != 0) break; next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb); next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb); next_acb->queue.sense_baddr = acbvtob(adw, next_acb) + offsetof(struct acb, sense_data); next_acb->sg_blocks = blocks; next_acb->sg_busaddr = busaddr; next_acb->state = ACB_FREE; callout_init_mtx(&next_acb->timer, &adw->lock, 0); SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links); blocks += ADW_SG_BLOCKCNT; busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks); next_acb++; adw->num_acbs++; } return (i); } static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct acb *acb; union ccb *ccb; struct adw_softc *adw; acb = (struct acb *)arg; ccb = acb->ccb; adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; if (!dumping) mtx_assert(&adw->lock, MA_OWNED); if (error != 0) { if (error != EFBIG) device_printf(adw->device, "Unexepected error 0x%x " "returned from bus_dmamap_load\n", error); if (ccb->ccb_h.status == CAM_REQ_INPROG) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; } adwfreeacb(adw, acb); xpt_done(ccb); return; } if (nseg != 0) { bus_dmasync_op_t op; acb->queue.data_addr = dm_segs[0].ds_addr; acb->queue.data_cnt = ccb->csio.dxfer_len; if (nseg > 1) { struct adw_sg_block *sg_block; struct adw_sg_elm *sg; bus_addr_t sg_busaddr; u_int sg_index; bus_dma_segment_t *end_seg; end_seg = dm_segs + nseg; sg_busaddr = acb->sg_busaddr; sg_index = 0; /* Copy the segments into our SG list */ for (sg_block = acb->sg_blocks;; sg_block++) { u_int i; sg = sg_block->sg_list; for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) { if (dm_segs >= end_seg) break; sg->sg_addr = dm_segs->ds_addr; sg->sg_count = dm_segs->ds_len; sg++; dm_segs++; } sg_block->sg_cnt = i; sg_index += i; if (dm_segs == end_seg) { sg_block->sg_busaddr_next = 0; break; } else { sg_busaddr += sizeof(struct adw_sg_block); sg_block->sg_busaddr_next = sg_busaddr; } } acb->queue.sg_real_addr = acb->sg_busaddr; } else { acb->queue.sg_real_addr = 0; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_PREREAD; else op = BUS_DMASYNC_PREWRITE; bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); } else { acb->queue.data_addr = 0; acb->queue.data_cnt = 0; acb->queue.sg_real_addr = 0; } /* * Last time we need to check if this CCB needs to * be aborted. */ if (ccb->ccb_h.status != CAM_REQ_INPROG) { if (nseg != 0) bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); adwfreeacb(adw, acb); xpt_done(ccb); return; } acb->state |= ACB_ACTIVE; ccb->ccb_h.status |= CAM_SIM_QUEUED; LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le); callout_reset_sbt(&acb->timer, SBT_1MS * ccb->ccb_h.timeout, 0, adwtimeout, acb, 0); adw_send_acb(adw, acb, acbvtob(adw, acb)); } static void adw_action(struct cam_sim *sim, union ccb *ccb) { struct adw_softc *adw; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n")); adw = (struct adw_softc *)cam_sim_softc(sim); if (!dumping) mtx_assert(&adw->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ { struct ccb_scsiio *csio; struct acb *acb; int error; csio = &ccb->csio; /* Max supported CDB length is 12 bytes */ if (csio->cdb_len > 12) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } if ((acb = adwgetacb(adw)) == NULL) { adw->state |= ADW_RESOURCE_SHORTAGE; xpt_freeze_simq(sim, /*count*/1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } /* Link acb and ccb so we can find one from the other */ acb->ccb = ccb; ccb->ccb_h.ccb_acb_ptr = acb; ccb->ccb_h.ccb_adw_ptr = adw; acb->queue.cntl = 0; acb->queue.target_cmd = 0; acb->queue.target_id = ccb->ccb_h.target_id; acb->queue.target_lun = ccb->ccb_h.target_lun; acb->queue.mflag = 0; acb->queue.sense_len = MIN(csio->sense_len, sizeof(acb->sense_data)); acb->queue.cdb_len = csio->cdb_len; if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { switch (csio->tag_action) { case MSG_SIMPLE_Q_TAG: acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG; break; case MSG_HEAD_OF_Q_TAG: acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG; break; case MSG_ORDERED_Q_TAG: acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG; break; default: acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; break; } } else acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) acb->queue.scsi_cntl |= ADW_QSC_NO_DISC; acb->queue.done_status = 0; acb->queue.scsi_status = 0; acb->queue.host_status = 0; acb->queue.sg_wk_ix = 0; if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { bcopy(csio->cdb_io.cdb_ptr, acb->queue.cdb, csio->cdb_len); } else { /* I guess I could map it in... */ ccb->ccb_h.status = CAM_REQ_INVALID; adwfreeacb(adw, acb); xpt_done(ccb); return; } } else { bcopy(csio->cdb_io.cdb_bytes, acb->queue.cdb, csio->cdb_len); } error = bus_dmamap_load_ccb(adw->buffer_dmat, acb->dmamap, ccb, adwexecuteacb, acb, /*flags*/0); if (error == EINPROGRESS) { /* * So as to maintain ordering, freeze the controller * queue until our mapping is returned. */ xpt_freeze_simq(sim, 1); acb->state |= CAM_RELEASE_SIMQ; } break; } case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ { adw_idle_cmd_status_t status; status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, ccb->ccb_h.target_id); if (status == ADW_IDLE_CMD_SUCCESS) { ccb->ccb_h.status = CAM_REQ_CMP; if (bootverbose) { xpt_print_path(ccb->ccb_h.path); printf("BDR Delivered\n"); } } else ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); break; } case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings *cts; u_int target_mask; cts = &ccb->cts; target_mask = 0x01 << ccb->ccb_h.target_id; scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { u_int sdtrdone; sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE); if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { u_int discenb; discenb = adw_lram_read_16(adw, ADW_MC_DISC_ENABLE); if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) discenb |= target_mask; else discenb &= ~target_mask; adw_lram_write_16(adw, ADW_MC_DISC_ENABLE, discenb); } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) adw->tagenb |= target_mask; else adw->tagenb &= ~target_mask; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { u_int wdtrenb_orig; u_int wdtrenb; u_int wdtrdone; wdtrenb_orig = adw_lram_read_16(adw, ADW_MC_WDTR_ABLE); wdtrenb = wdtrenb_orig; wdtrdone = adw_lram_read_16(adw, ADW_MC_WDTR_DONE); switch (spi->bus_width) { case MSG_EXT_WDTR_BUS_32_BIT: case MSG_EXT_WDTR_BUS_16_BIT: wdtrenb |= target_mask; break; case MSG_EXT_WDTR_BUS_8_BIT: default: wdtrenb &= ~target_mask; break; } if (wdtrenb != wdtrenb_orig) { adw_lram_write_16(adw, ADW_MC_WDTR_ABLE, wdtrenb); wdtrdone &= ~target_mask; adw_lram_write_16(adw, ADW_MC_WDTR_DONE, wdtrdone); /* Wide negotiation forces async */ sdtrdone &= ~target_mask; adw_lram_write_16(adw, ADW_MC_SDTR_DONE, sdtrdone); } } if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { u_int sdtr_orig; u_int sdtr; u_int sdtrable_orig; u_int sdtrable; sdtr = adw_get_chip_sdtr(adw, ccb->ccb_h.target_id); sdtr_orig = sdtr; sdtrable = adw_lram_read_16(adw, ADW_MC_SDTR_ABLE); sdtrable_orig = sdtrable; if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) { sdtr = adw_find_sdtr(adw, spi->sync_period); } if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) { if (spi->sync_offset == 0) sdtr = ADW_MC_SDTR_ASYNC; } if (sdtr == ADW_MC_SDTR_ASYNC) sdtrable &= ~target_mask; else sdtrable |= target_mask; if (sdtr != sdtr_orig || sdtrable != sdtrable_orig) { adw_set_chip_sdtr(adw, ccb->ccb_h.target_id, sdtr); sdtrdone &= ~target_mask; adw_lram_write_16(adw, ADW_MC_SDTR_ABLE, sdtrable); adw_lram_write_16(adw, ADW_MC_SDTR_DONE, sdtrdone); } } } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings *cts; u_int target_mask; cts = &ccb->cts; target_mask = 0x01 << ccb->ccb_h.target_id; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { u_int mc_sdtr; spi->flags = 0; if ((adw->user_discenb & target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((adw->user_tagenb & target_mask) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; if ((adw->user_wdtr & target_mask) != 0) spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; else spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id); spi->sync_period = adw_find_period(adw, mc_sdtr); if (spi->sync_period != 0) spi->sync_offset = 15; /* XXX ??? */ else spi->sync_offset = 0; } else { u_int targ_tinfo; spi->flags = 0; if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE) & target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((adw->tagenb & target_mask) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; targ_tinfo = adw_lram_read_16(adw, ADW_MC_DEVICE_HSHK_CFG_TABLE + (2 * ccb->ccb_h.target_id)); if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0) spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; else spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; spi->sync_period = adw_hshk_cfg_period_factor(targ_tinfo); spi->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET; if (spi->sync_period == 0) spi->sync_offset = 0; if (spi->sync_offset == 0) spi->sync_period = 0; } spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { /* * XXX Use Adaptec translation until I find out how to * get this information from the card. */ cam_calc_geometry(&ccb->ccg, /*extended*/1); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { int failure; failure = adw_reset_bus(adw); if (failure != 0) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; } else { if (bootverbose) { xpt_print_path(adw->path); printf("Bus Reset Delivered\n"); } ccb->ccb_h.status = CAM_REQ_CMP; } xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = ADW_MAX_TID; cpi->max_lun = ADW_MAX_LUN; cpi->initiator_id = adw->initiator_id; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void adw_poll(struct cam_sim *sim) { adw_intr_locked(cam_sim_softc(sim)); } static void adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { } struct adw_softc * adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id) { struct adw_softc *adw; adw = device_get_softc(dev); LIST_INIT(&adw->pending_ccbs); SLIST_INIT(&adw->sg_maps); mtx_init(&adw->lock, "adw", NULL, MTX_DEF); adw->device = dev; adw->regs_res_type = regs_type; adw->regs_res_id = regs_id; adw->regs = regs; return(adw); } void adw_free(struct adw_softc *adw) { switch (adw->init_level) { case 9: { struct sg_map_node *sg_map; while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) { SLIST_REMOVE_HEAD(&adw->sg_maps, links); bus_dmamap_unload(adw->sg_dmat, sg_map->sg_dmamap); bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr, sg_map->sg_dmamap); free(sg_map, M_DEVBUF); } bus_dma_tag_destroy(adw->sg_dmat); } case 8: bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap); case 7: bus_dmamem_free(adw->acb_dmat, adw->acbs, adw->acb_dmamap); case 6: bus_dma_tag_destroy(adw->acb_dmat); case 5: bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap); case 4: bus_dmamem_free(adw->carrier_dmat, adw->carriers, adw->carrier_dmamap); case 3: bus_dma_tag_destroy(adw->carrier_dmat); case 2: bus_dma_tag_destroy(adw->buffer_dmat); case 1: bus_dma_tag_destroy(adw->parent_dmat); case 0: break; } if (adw->regs != NULL) bus_release_resource(adw->device, adw->regs_res_type, adw->regs_res_id, adw->regs); if (adw->irq != NULL) bus_release_resource(adw->device, adw->irq_res_type, 0, adw->irq); if (adw->sim != NULL) { if (adw->path != NULL) { xpt_async(AC_LOST_DEVICE, adw->path, NULL); xpt_free_path(adw->path); } xpt_bus_deregister(cam_sim_path(adw->sim)); cam_sim_free(adw->sim, /*free_devq*/TRUE); } mtx_destroy(&adw->lock); } int adw_init(struct adw_softc *adw) { struct adw_eeprom eep_config; u_int tid; u_int i; u_int16_t checksum; u_int16_t scsicfg1; checksum = adw_eeprom_read(adw, &eep_config); bcopy(eep_config.serial_number, adw->serial_number, sizeof(adw->serial_number)); if (checksum != eep_config.checksum) { u_int16_t serial_number[3]; adw->flags |= ADW_EEPROM_FAILED; device_printf(adw->device, "EEPROM checksum failed. Restoring Defaults\n"); /* * Restore the default EEPROM settings. * Assume the 6 byte board serial number that was read * from EEPROM is correct even if the EEPROM checksum * failed. */ bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config)); bcopy(adw->serial_number, eep_config.serial_number, sizeof(serial_number)); adw_eeprom_write(adw, &eep_config); } /* Pull eeprom information into our softc. */ adw->bios_ctrl = eep_config.bios_ctrl; adw->user_wdtr = eep_config.wdtr_able; for (tid = 0; tid < ADW_MAX_TID; tid++) { u_int mc_sdtr; u_int16_t tid_mask; tid_mask = 0x1 << tid; if ((adw->features & ADW_ULTRA) != 0) { /* * Ultra chips store sdtr and ultraenb * bits in their seeprom, so we must * construct valid mc_sdtr entries for * indirectly. */ if (eep_config.sync1.sync_enable & tid_mask) { if (eep_config.sync2.ultra_enable & tid_mask) mc_sdtr = ADW_MC_SDTR_20; else mc_sdtr = ADW_MC_SDTR_10; } else mc_sdtr = ADW_MC_SDTR_ASYNC; } else { switch (ADW_TARGET_GROUP(tid)) { case 3: mc_sdtr = eep_config.sync4.sdtr4; break; case 2: mc_sdtr = eep_config.sync3.sdtr3; break; case 1: mc_sdtr = eep_config.sync2.sdtr2; break; default: /* Shut up compiler */ case 0: mc_sdtr = eep_config.sync1.sdtr1; break; } mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid); mc_sdtr &= 0xFF; } adw_set_user_sdtr(adw, tid, mc_sdtr); } adw->user_tagenb = eep_config.tagqng_able; adw->user_discenb = eep_config.disc_enable; adw->max_acbs = eep_config.max_host_qng; adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID); /* * Sanity check the number of host openings. */ if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG) adw->max_acbs = ADW_DEF_MAX_HOST_QNG; else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) { /* If the value is zero, assume it is uninitialized. */ if (adw->max_acbs == 0) adw->max_acbs = ADW_DEF_MAX_HOST_QNG; else adw->max_acbs = ADW_DEF_MIN_HOST_QNG; } scsicfg1 = 0; if ((adw->features & ADW_ULTRA2) != 0) { switch (eep_config.termination_lvd) { default: device_printf(adw->device, "Invalid EEPROM LVD Termination Settings.\n"); device_printf(adw->device, "Reverting to Automatic LVD Termination\n"); /* FALLTHROUGH */ case ADW_EEPROM_TERM_AUTO: break; case ADW_EEPROM_TERM_BOTH_ON: scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO; /* FALLTHROUGH */ case ADW_EEPROM_TERM_HIGH_ON: scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI; /* FALLTHROUGH */ case ADW_EEPROM_TERM_OFF: scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV; break; } } switch (eep_config.termination_se) { default: device_printf(adw->device, "Invalid SE EEPROM Termination Settings.\n"); device_printf(adw->device, "Reverting to Automatic SE Termination\n"); /* FALLTHROUGH */ case ADW_EEPROM_TERM_AUTO: break; case ADW_EEPROM_TERM_BOTH_ON: scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L; /* FALLTHROUGH */ case ADW_EEPROM_TERM_HIGH_ON: scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H; /* FALLTHROUGH */ case ADW_EEPROM_TERM_OFF: scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL; break; } device_printf(adw->device, "SCSI ID %d, ", adw->initiator_id); /* DMA tag for mapping buffers into device visible space. */ if (bus_dma_tag_create( /* parent */ adw->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ DFLTPHYS, /* nsegments */ ADW_SGSIZE, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ BUS_DMA_ALLOCNOW, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &adw->lock, &adw->buffer_dmat) != 0) { return (ENOMEM); } adw->init_level++; /* DMA tag for our ccb carrier structures */ if (bus_dma_tag_create( /* parent */ adw->parent_dmat, /* alignment */ 0x10, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) * sizeof(struct adw_carrier), /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &adw->carrier_dmat) != 0) { return (ENOMEM); } adw->init_level++; /* Allocation for our ccb carrier structures */ if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers, BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) { return (ENOMEM); } adw->init_level++; /* And permanently map them */ bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap, adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) * sizeof(struct adw_carrier), adwmapmem, &adw->carrier_busbase, /*flags*/0); /* Clear them out. */ bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) * sizeof(struct adw_carrier)); /* Setup our free carrier list */ adw->free_carriers = adw->carriers; for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) { adw->carriers[i].carr_offset = carriervtobo(adw, &adw->carriers[i]); adw->carriers[i].carr_ba = carriervtob(adw, &adw->carriers[i]); adw->carriers[i].areq_ba = 0; adw->carriers[i].next_ba = carriervtobo(adw, &adw->carriers[i+1]); } /* Terminal carrier. Never leaves the freelist */ adw->carriers[i].carr_offset = carriervtobo(adw, &adw->carriers[i]); adw->carriers[i].carr_ba = carriervtob(adw, &adw->carriers[i]); adw->carriers[i].areq_ba = 0; adw->carriers[i].next_ba = ~0; adw->init_level++; /* DMA tag for our acb structures */ if (bus_dma_tag_create( /* parent */ adw->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ adw->max_acbs * sizeof(struct acb), /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &adw->acb_dmat) != 0) { return (ENOMEM); } adw->init_level++; /* Allocation for our ccbs */ if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs, BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0) return (ENOMEM); adw->init_level++; /* And permanently map them */ bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap, adw->acbs, adw->max_acbs * sizeof(struct acb), adwmapmem, &adw->acb_busbase, /*flags*/0); /* Clear them out. */ bzero(adw->acbs, adw->max_acbs * sizeof(struct acb)); /* DMA tag for our S/G structures. We allocate in page sized chunks */ if (bus_dma_tag_create( /* parent */ adw->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ PAGE_SIZE, /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &adw->sg_dmat) != 0) { return (ENOMEM); } adw->init_level++; /* Allocate our first batch of ccbs */ mtx_lock(&adw->lock); if (adwallocacbs(adw) == 0) { mtx_unlock(&adw->lock); return (ENOMEM); } if (adw_init_chip(adw, scsicfg1) != 0) { mtx_unlock(&adw->lock); return (ENXIO); } printf("Queue Depth %d\n", adw->max_acbs); mtx_unlock(&adw->lock); return (0); } /* * Attach all the sub-devices we can find */ int adw_attach(struct adw_softc *adw) { struct ccb_setasync csa; struct cam_devq *devq; int error; /* Hook up our interrupt handler */ error = bus_setup_intr(adw->device, adw->irq, INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE, NULL, adw_intr, adw, &adw->ih); if (error != 0) { device_printf(adw->device, "bus_setup_intr() failed: %d\n", error); return (error); } /* Start the Risc processor now that we are fully configured. */ adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN); /* * Create the device queue for our SIM. */ devq = cam_simq_alloc(adw->max_acbs); if (devq == NULL) return (ENOMEM); /* * Construct our SIM entry. */ adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw, device_get_unit(adw->device), &adw->lock, 1, adw->max_acbs, devq); if (adw->sim == NULL) return (ENOMEM); /* * Register the bus. */ mtx_lock(&adw->lock); if (xpt_bus_register(adw->sim, adw->device, 0) != CAM_SUCCESS) { cam_sim_free(adw->sim, /*free devq*/TRUE); error = ENOMEM; goto fail; } if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) == CAM_REQ_CMP) { xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE; csa.callback = adw_async; csa.callback_arg = adw; xpt_action((union ccb *)&csa); } + gone_in_dev(adv->adv, 12, "adw(4) driver"); fail: mtx_unlock(&adw->lock); return (error); } void adw_intr(void *arg) { struct adw_softc *adw; adw = arg; mtx_lock(&adw->lock); adw_intr_locked(adw); mtx_unlock(&adw->lock); } void adw_intr_locked(struct adw_softc *adw) { u_int int_stat; if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0) return; /* Reading the register clears the interrupt. */ int_stat = adw_inb(adw, ADW_INTR_STATUS_REG); if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) { u_int intrb_code; /* Async Microcode Event */ intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE); switch (intrb_code) { case ADW_ASYNC_CARRIER_READY_FAILURE: /* * The RISC missed our update of * the commandq. */ if (LIST_FIRST(&adw->pending_ccbs) != NULL) adw_tickle_risc(adw, ADW_TICKLE_A); break; case ADW_ASYNC_SCSI_BUS_RESET_DET: /* * The firmware detected a SCSI Bus reset. */ device_printf(adw->device, "Someone Reset the Bus\n"); adw_handle_bus_reset(adw, /*initiated*/FALSE); break; case ADW_ASYNC_RDMA_FAILURE: /* * Handle RDMA failure by resetting the * SCSI Bus and chip. */ #if 0 /* XXX */ AdvResetChipAndSB(adv_dvc_varp); #endif break; case ADW_ASYNC_HOST_SCSI_BUS_RESET: /* * Host generated SCSI bus reset occurred. */ adw_handle_bus_reset(adw, /*initiated*/TRUE); break; default: printf("adw_intr: unknown async code 0x%x\n", intrb_code); break; } } /* * Run down the RequestQ. */ while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) { struct adw_carrier *free_carrier; struct acb *acb; union ccb *ccb; #if 0 printf("0x%x, 0x%x, 0x%x, 0x%x\n", adw->responseq->carr_offset, adw->responseq->carr_ba, adw->responseq->areq_ba, adw->responseq->next_ba); #endif /* * The firmware copies the adw_scsi_req_q.acb_baddr * field into the areq_ba field of the carrier. */ acb = acbbotov(adw, adw->responseq->areq_ba); /* * The least significant four bits of the next_ba * field are used as flags. Mask them out and then * advance through the list. */ free_carrier = adw->responseq; adw->responseq = carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK); free_carrier->next_ba = adw->free_carriers->carr_offset; adw->free_carriers = free_carrier; /* Process CCB */ ccb = acb->ccb; callout_stop(&acb->timer); if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); ccb->csio.resid = acb->queue.data_cnt; } else ccb->csio.resid = 0; /* Common Cases inline... */ if (acb->queue.host_status == QHSTA_NO_ERROR && (acb->queue.done_status == QD_NO_ERROR || acb->queue.done_status == QD_WITH_ERROR)) { ccb->csio.scsi_status = acb->queue.scsi_status; ccb->ccb_h.status = 0; switch (ccb->csio.scsi_status) { case SCSI_STATUS_OK: ccb->ccb_h.status |= CAM_REQ_CMP; break; case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_CMD_TERMINATED: bcopy(&acb->sense_data, &ccb->csio.sense_data, ccb->csio.sense_len); ccb->ccb_h.status |= CAM_AUTOSNS_VALID; ccb->csio.sense_resid = acb->queue.sense_len; /* FALLTHROUGH */ default: ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); break; } adwfreeacb(adw, acb); xpt_done(ccb); } else { adwprocesserror(adw, acb); } } } static void adwprocesserror(struct adw_softc *adw, struct acb *acb) { union ccb *ccb; ccb = acb->ccb; if (acb->queue.done_status == QD_ABORTED_BY_HOST) { ccb->ccb_h.status = CAM_REQ_ABORTED; } else { switch (acb->queue.host_status) { case QHSTA_M_SEL_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case QHSTA_M_SXFR_OFF_UFLW: case QHSTA_M_SXFR_OFF_OFLW: case QHSTA_M_DATA_OVER_RUN: ccb->ccb_h.status = CAM_DATA_RUN_ERR; break; case QHSTA_M_SXFR_DESELECTED: case QHSTA_M_UNEXPECTED_BUS_FREE: ccb->ccb_h.status = CAM_UNEXP_BUSFREE; break; case QHSTA_M_SCSI_BUS_RESET: case QHSTA_M_SCSI_BUS_RESET_UNSOL: ccb->ccb_h.status = CAM_SCSI_BUS_RESET; break; case QHSTA_M_BUS_DEVICE_RESET: ccb->ccb_h.status = CAM_BDR_SENT; break; case QHSTA_M_QUEUE_ABORTED: /* BDR or Bus Reset */ xpt_print_path(adw->path); printf("Saw Queue Aborted\n"); ccb->ccb_h.status = adw->last_reset; break; case QHSTA_M_SXFR_SDMA_ERR: case QHSTA_M_SXFR_SXFR_PERR: case QHSTA_M_RDMA_PERR: ccb->ccb_h.status = CAM_UNCOR_PARITY; break; case QHSTA_M_WTM_TIMEOUT: case QHSTA_M_SXFR_WD_TMO: { /* The SCSI bus hung in a phase */ xpt_print_path(adw->path); printf("Watch Dog timer expired. Resetting bus\n"); adw_reset_bus(adw); break; } case QHSTA_M_SXFR_XFR_PH_ERR: ccb->ccb_h.status = CAM_SEQUENCE_FAIL; break; case QHSTA_M_SXFR_UNKNOWN_ERROR: break; case QHSTA_M_BAD_CMPL_STATUS_IN: /* No command complete after a status message */ ccb->ccb_h.status = CAM_SEQUENCE_FAIL; break; case QHSTA_M_AUTO_REQ_SENSE_FAIL: ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; break; case QHSTA_M_INVALID_DEVICE: ccb->ccb_h.status = CAM_PATH_INVALID; break; case QHSTA_M_NO_AUTO_REQ_SENSE: /* * User didn't request sense, but we got a * check condition. */ ccb->csio.scsi_status = acb->queue.scsi_status; ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; default: panic("%s: Unhandled Host status error %x", device_get_nameunit(adw->device), acb->queue.host_status); /* NOTREACHED */ } } if ((acb->state & ACB_RECOVERY_ACB) != 0) { if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET || ccb->ccb_h.status == CAM_BDR_SENT) ccb->ccb_h.status = CAM_CMD_TIMEOUT; } if (ccb->ccb_h.status != CAM_REQ_CMP) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } adwfreeacb(adw, acb); xpt_done(ccb); } static void adwtimeout(void *arg) { struct acb *acb; union ccb *ccb; struct adw_softc *adw; adw_idle_cmd_status_t status; int target_id; acb = (struct acb *)arg; ccb = acb->ccb; adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; xpt_print_path(ccb->ccb_h.path); printf("ACB %p - timed out\n", (void *)acb); mtx_assert(&adw->lock, MA_OWNED); if ((acb->state & ACB_ACTIVE) == 0) { xpt_print_path(ccb->ccb_h.path); printf("ACB %p - timed out CCB already completed\n", (void *)acb); return; } acb->state |= ACB_RECOVERY_ACB; target_id = ccb->ccb_h.target_id; /* Attempt a BDR first */ status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, ccb->ccb_h.target_id); if (status == ADW_IDLE_CMD_SUCCESS) { device_printf(adw->device, "BDR Delivered. No longer in timeout\n"); adw_handle_device_reset(adw, target_id); } else { adw_reset_bus(adw); xpt_print_path(adw->path); printf("Bus Reset Delivered. No longer in timeout\n"); } } static void adw_handle_device_reset(struct adw_softc *adw, u_int target) { struct cam_path *path; cam_status error; error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim), target, CAM_LUN_WILDCARD); if (error == CAM_REQ_CMP) { xpt_async(AC_SENT_BDR, path, NULL); xpt_free_path(path); } adw->last_reset = CAM_BDR_SENT; } static void adw_handle_bus_reset(struct adw_softc *adw, int initiated) { if (initiated) { /* * The microcode currently sets the SCSI Bus Reset signal * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET * command above. But the SCSI Bus Reset Hold Time in the * microcode is not deterministic (it may in fact be for less * than the SCSI Spec. minimum of 25 us). Therefore on return * the Adv Library sets the SCSI Bus Reset signal for * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater * than 25 us. */ u_int scsi_ctrl; scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT; adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT); DELAY(ADW_SCSI_RESET_HOLD_TIME_US); adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl); /* * We will perform the async notification when the * SCSI Reset interrupt occurs. */ } else xpt_async(AC_BUS_RESET, adw->path, NULL); adw->last_reset = CAM_SCSI_BUS_RESET; } MODULE_DEPEND(adw, cam, 1, 1, 1); Index: head/sys/dev/aha/aha.c =================================================================== --- head/sys/dev/aha/aha.c (revision 328522) +++ head/sys/dev/aha/aha.c (revision 328523) @@ -1,1815 +1,1816 @@ /*- * Generic register and struct definitions for the Adaptech 154x * SCSI host adapters. Product specific probe and attach routines can * be found in: * aha 1542A/1542B/1542C/1542CF/1542CP aha_isa.c */ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1998 M. Warner Losh. * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Derived from bt.c written by: * * Copyright (c) 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PRVERB(x) do { if (bootverbose) device_printf x; } while (0) /* Macro to determine that a rev is potentially a new valid one * so that the driver doesn't keep breaking on new revs as it * did for the CF and CP. */ #define PROBABLY_NEW_BOARD(REV) (REV > 0x43 && REV < 0x56) /* MailBox Management functions */ static __inline void ahanextinbox(struct aha_softc *aha); static __inline void ahanextoutbox(struct aha_softc *aha); #define aha_name(aha) device_get_nameunit(aha->dev) static __inline void ahanextinbox(struct aha_softc *aha) { if (aha->cur_inbox == aha->last_inbox) aha->cur_inbox = aha->in_boxes; else aha->cur_inbox++; } static __inline void ahanextoutbox(struct aha_softc *aha) { if (aha->cur_outbox == aha->last_outbox) aha->cur_outbox = aha->out_boxes; else aha->cur_outbox++; } #define ahautoa24(u,s3) \ (s3)[0] = ((u) >> 16) & 0xff; \ (s3)[1] = ((u) >> 8) & 0xff; \ (s3)[2] = (u) & 0xff; #define aha_a24tou(s3) \ (((s3)[0] << 16) | ((s3)[1] << 8) | (s3)[2]) /* CCB Management functions */ static __inline uint32_t ahaccbvtop(struct aha_softc *aha, struct aha_ccb *accb); static __inline struct aha_ccb* ahaccbptov(struct aha_softc *aha, uint32_t ccb_addr); static __inline uint32_t ahaccbvtop(struct aha_softc *aha, struct aha_ccb *accb) { return (aha->aha_ccb_physbase + (uint32_t)((caddr_t)accb - (caddr_t)aha->aha_ccb_array)); } static __inline struct aha_ccb * ahaccbptov(struct aha_softc *aha, uint32_t ccb_addr) { return (aha->aha_ccb_array + + ((struct aha_ccb*)(uintptr_t)ccb_addr - (struct aha_ccb*)(uintptr_t)aha->aha_ccb_physbase)); } static struct aha_ccb* ahagetccb(struct aha_softc *aha); static __inline void ahafreeccb(struct aha_softc *aha, struct aha_ccb *accb); static void ahaallocccbs(struct aha_softc *aha); static bus_dmamap_callback_t ahaexecuteccb; static void ahadone(struct aha_softc *aha, struct aha_ccb *accb, aha_mbi_comp_code_t comp_code); static void aha_intr_locked(struct aha_softc *aha); /* Host adapter command functions */ static int ahareset(struct aha_softc* aha, int hard_reset); /* Initialization functions */ static int ahainitmboxes(struct aha_softc *aha); static bus_dmamap_callback_t ahamapmboxes; static bus_dmamap_callback_t ahamapccbs; static bus_dmamap_callback_t ahamapsgs; /* Transfer Negotiation Functions */ static void ahafetchtransinfo(struct aha_softc *aha, struct ccb_trans_settings *cts); /* CAM SIM entry points */ #define ccb_accb_ptr spriv_ptr0 #define ccb_aha_ptr spriv_ptr1 static void ahaaction(struct cam_sim *sim, union ccb *ccb); static void ahapoll(struct cam_sim *sim); /* Our timeout handler */ static void ahatimeout(void *arg); /* Exported functions */ void aha_alloc(struct aha_softc *aha) { SLIST_INIT(&aha->free_aha_ccbs); LIST_INIT(&aha->pending_ccbs); SLIST_INIT(&aha->sg_maps); aha->ccb_sg_opcode = INITIATOR_SG_CCB_WRESID; aha->ccb_ccb_opcode = INITIATOR_CCB_WRESID; mtx_init(&aha->lock, "aha", NULL, MTX_DEF); } void aha_free(struct aha_softc *aha) { switch (aha->init_level) { default: case 8: { struct sg_map_node *sg_map; while ((sg_map = SLIST_FIRST(&aha->sg_maps))!= NULL) { SLIST_REMOVE_HEAD(&aha->sg_maps, links); bus_dmamap_unload(aha->sg_dmat, sg_map->sg_dmamap); bus_dmamem_free(aha->sg_dmat, sg_map->sg_vaddr, sg_map->sg_dmamap); free(sg_map, M_DEVBUF); } bus_dma_tag_destroy(aha->sg_dmat); } case 7: bus_dmamap_unload(aha->ccb_dmat, aha->ccb_dmamap); case 6: bus_dmamem_free(aha->ccb_dmat, aha->aha_ccb_array, aha->ccb_dmamap); case 5: bus_dma_tag_destroy(aha->ccb_dmat); case 4: bus_dmamap_unload(aha->mailbox_dmat, aha->mailbox_dmamap); case 3: bus_dmamem_free(aha->mailbox_dmat, aha->in_boxes, aha->mailbox_dmamap); case 2: bus_dma_tag_destroy(aha->buffer_dmat); case 1: bus_dma_tag_destroy(aha->mailbox_dmat); case 0: break; } mtx_destroy(&aha->lock); } /* * Probe the adapter and verify that the card is an Adaptec. */ int aha_probe(struct aha_softc* aha) { u_int status; u_int intstat; int error; board_id_data_t board_id; /* * See if the three I/O ports look reasonable. * Touch the minimal number of registers in the * failure case. */ status = aha_inb(aha, STATUS_REG); if ((status == 0) || (status & (DIAG_ACTIVE|CMD_REG_BUSY | STATUS_REG_RSVD)) != 0) { PRVERB((aha->dev, "status reg test failed %x\n", status)); return (ENXIO); } intstat = aha_inb(aha, INTSTAT_REG); if ((intstat & INTSTAT_REG_RSVD) != 0) { PRVERB((aha->dev, "Failed Intstat Reg Test\n")); return (ENXIO); } /* * Looking good so far. Final test is to reset the * adapter and fetch the board ID and ensure we aren't * looking at a BusLogic. */ if ((error = ahareset(aha, /*hard_reset*/TRUE)) != 0) { PRVERB((aha->dev, "Failed Reset\n")); return (ENXIO); } /* * Get the board ID. We use this to see if we're dealing with * a buslogic card or an aha card (or clone). */ error = aha_cmd(aha, AOP_INQUIRE_BOARD_ID, NULL, /*parmlen*/0, (uint8_t*)&board_id, sizeof(board_id), DEFAULT_CMD_TIMEOUT); if (error != 0) { PRVERB((aha->dev, "INQUIRE failed %x\n", error)); return (ENXIO); } aha->fw_major = board_id.firmware_rev_major; aha->fw_minor = board_id.firmware_rev_minor; aha->boardid = board_id.board_type; /* * The Buslogic cards have an id of either 0x41 or 0x42. So * if those come up in the probe, we test the geometry register * of the board. Adaptec boards that are this old will not have * this register, and return 0xff, while buslogic cards will return * something different. * * It appears that for reasons unknow, for the for the * aha-1542B cards, we need to wait a little bit before trying * to read the geometry register. I picked 10ms since we have * reports that a for loop to 1000 did the trick, and this * errs on the side of conservatism. Besides, no one will * notice a 10mS delay here, even the 1542B card users :-) * * Some compatible cards return 0 here. Some cards also * seem to return 0x7f. * * XXX I'm not sure how this will impact other cloned cards * * This really should be replaced with the esetup command, since * that appears to be more reliable. This becomes more and more * true over time as we discover more cards that don't read the * geometry register consistently. */ if (aha->boardid <= 0x42) { /* Wait 10ms before reading */ DELAY(10000); status = aha_inb(aha, GEOMETRY_REG); if (status != 0xff && status != 0x00 && status != 0x7f) { PRVERB((aha->dev, "Geometry Register test failed %#x\n", status)); return (ENXIO); } } return (0); } /* * Pull the boards setup information and record it in our softc. */ int aha_fetch_adapter_info(struct aha_softc *aha) { setup_data_t setup_info; config_data_t config_data; uint8_t length_param; int error; struct aha_extbios extbios; switch (aha->boardid) { case BOARD_1540_16HEAD_BIOS: snprintf(aha->model, sizeof(aha->model), "1540 16 head BIOS"); break; case BOARD_1540_64HEAD_BIOS: snprintf(aha->model, sizeof(aha->model), "1540 64 head BIOS"); break; case BOARD_1542: snprintf(aha->model, sizeof(aha->model), "1540/1542 64 head BIOS"); break; case BOARD_1542C: snprintf(aha->model, sizeof(aha->model), "1542C"); break; case BOARD_1542CF: snprintf(aha->model, sizeof(aha->model), "1542CF"); break; case BOARD_1542CP: snprintf(aha->model, sizeof(aha->model), "1542CP"); break; default: snprintf(aha->model, sizeof(aha->model), "Unknown"); break; } /* * If we are a new type of 1542 board (anything newer than a 1542C) * then disable the extended bios so that the * mailbox interface is unlocked. * This is also true for the 1542B Version 3.20. First Adaptec * board that supports >1Gb drives. * No need to check the extended bios flags as some of the * extensions that cause us problems are not flagged in that byte. */ if (PROBABLY_NEW_BOARD(aha->boardid) || (aha->boardid == 0x41 && aha->fw_major == 0x31 && aha->fw_minor >= 0x34)) { error = aha_cmd(aha, AOP_RETURN_EXT_BIOS_INFO, NULL, /*paramlen*/0, (u_char *)&extbios, sizeof(extbios), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(aha->dev, "AOP_RETURN_EXT_BIOS_INFO - Failed."); return (error); } error = aha_cmd(aha, AOP_MBOX_IF_ENABLE, (uint8_t *)&extbios, /*paramlen*/2, NULL, 0, DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(aha->dev, "AOP_MBOX_IF_ENABLE - Failed."); return (error); } } if (aha->boardid < 0x41) device_printf(aha->dev, "Warning: aha-1542A won't work.\n"); aha->max_sg = 17; /* Need >= 17 to do 64k I/O */ aha->diff_bus = 0; aha->extended_lun = 0; aha->extended_trans = 0; aha->max_ccbs = 16; /* Determine Sync/Wide/Disc settings */ length_param = sizeof(setup_info); error = aha_cmd(aha, AOP_INQUIRE_SETUP_INFO, &length_param, /*paramlen*/1, (uint8_t*)&setup_info, sizeof(setup_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(aha->dev, "aha_fetch_adapter_info - Failed " "Get Setup Info\n"); return (error); } if (setup_info.initiate_sync != 0) { aha->sync_permitted = ALL_TARGETS; } aha->disc_permitted = ALL_TARGETS; /* We need as many mailboxes as we can have ccbs */ aha->num_boxes = aha->max_ccbs; /* Determine our SCSI ID */ error = aha_cmd(aha, AOP_INQUIRE_CONFIG, NULL, /*parmlen*/0, (uint8_t*)&config_data, sizeof(config_data), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(aha->dev, "aha_fetch_adapter_info - Failed Get Config\n"); return (error); } aha->scsi_id = config_data.scsi_id; return (0); } /* * Start the board, ready for normal operation */ int aha_init(struct aha_softc* aha) { /* Announce the Adapter */ device_printf(aha->dev, "AHA-%s FW Rev. %c.%c (ID=%x) ", aha->model, aha->fw_major, aha->fw_minor, aha->boardid); if (aha->diff_bus != 0) printf("Diff "); printf("SCSI Host Adapter, SCSI ID %d, %d CCBs\n", aha->scsi_id, aha->max_ccbs); /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. */ /* DMA tag for mapping buffers into device visible space. */ if (bus_dma_tag_create( /* parent */ aha->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ DFLTPHYS, /* nsegments */ AHA_NSEG, /* maxsegsz */ BUS_SPACE_MAXSIZE_24BIT, /* flags */ BUS_DMA_ALLOCNOW, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &aha->lock, &aha->buffer_dmat) != 0) { goto error_exit; } aha->init_level++; /* DMA tag for our mailboxes */ if (bus_dma_tag_create( /* parent */ aha->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ aha->num_boxes * (sizeof(aha_mbox_in_t) + sizeof(aha_mbox_out_t)), /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_24BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &aha->mailbox_dmat) != 0) { goto error_exit; } aha->init_level++; /* Allocation for our mailboxes */ if (bus_dmamem_alloc(aha->mailbox_dmat, (void **)&aha->out_boxes, BUS_DMA_NOWAIT, &aha->mailbox_dmamap) != 0) goto error_exit; aha->init_level++; /* And permanently map them */ bus_dmamap_load(aha->mailbox_dmat, aha->mailbox_dmamap, aha->out_boxes, aha->num_boxes * (sizeof(aha_mbox_in_t) + sizeof(aha_mbox_out_t)), ahamapmboxes, aha, /*flags*/0); aha->init_level++; aha->in_boxes = (aha_mbox_in_t *)&aha->out_boxes[aha->num_boxes]; ahainitmboxes(aha); /* DMA tag for our ccb structures */ if (bus_dma_tag_create( /* parent */ aha->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ aha->max_ccbs * sizeof(struct aha_ccb), /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_24BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &aha->ccb_dmat) != 0) { goto error_exit; } aha->init_level++; /* Allocation for our ccbs */ if (bus_dmamem_alloc(aha->ccb_dmat, (void **)&aha->aha_ccb_array, BUS_DMA_NOWAIT, &aha->ccb_dmamap) != 0) goto error_exit; aha->init_level++; /* And permanently map them */ bus_dmamap_load(aha->ccb_dmat, aha->ccb_dmamap, aha->aha_ccb_array, aha->max_ccbs * sizeof(struct aha_ccb), ahamapccbs, aha, /*flags*/0); aha->init_level++; /* DMA tag for our S/G structures. We allocate in page sized chunks */ if (bus_dma_tag_create( /* parent */ aha->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ PAGE_SIZE, /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_24BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &aha->sg_dmat) != 0) goto error_exit; aha->init_level++; /* Perform initial CCB allocation */ bzero(aha->aha_ccb_array, aha->max_ccbs * sizeof(struct aha_ccb)); ahaallocccbs(aha); if (aha->num_ccbs == 0) { device_printf(aha->dev, "aha_init - Unable to allocate initial ccbs\n"); goto error_exit; } /* * Note that we are going and return (to probe) */ return (0); error_exit: return (ENXIO); } int aha_attach(struct aha_softc *aha) { int tagged_dev_openings; struct cam_devq *devq; /* * We don't do tagged queueing, since the aha cards don't * support it. */ tagged_dev_openings = 0; /* * Create the device queue for our SIM. */ devq = cam_simq_alloc(aha->max_ccbs - 1); if (devq == NULL) return (ENOMEM); /* * Construct our SIM entry */ aha->sim = cam_sim_alloc(ahaaction, ahapoll, "aha", aha, device_get_unit(aha->dev), &aha->lock, 2, tagged_dev_openings, devq); if (aha->sim == NULL) { cam_simq_free(devq); return (ENOMEM); } mtx_lock(&aha->lock); if (xpt_bus_register(aha->sim, aha->dev, 0) != CAM_SUCCESS) { cam_sim_free(aha->sim, /*free_devq*/TRUE); mtx_unlock(&aha->lock); return (ENXIO); } if (xpt_create_path(&aha->path, /*periph*/NULL, cam_sim_path(aha->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(aha->sim)); cam_sim_free(aha->sim, /*free_devq*/TRUE); mtx_unlock(&aha->lock); return (ENXIO); } mtx_unlock(&aha->lock); + gone_in_dev(aha->dev, 12, "aha(4) driver"); return (0); } static void ahaallocccbs(struct aha_softc *aha) { struct aha_ccb *next_ccb; struct sg_map_node *sg_map; bus_addr_t physaddr; aha_sg_t *segs; int newcount; int i; next_ccb = &aha->aha_ccb_array[aha->num_ccbs]; sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); if (sg_map == NULL) return; /* Allocate S/G space for the next batch of CCBS */ if (bus_dmamem_alloc(aha->sg_dmat, (void **)&sg_map->sg_vaddr, BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { free(sg_map, M_DEVBUF); return; } SLIST_INSERT_HEAD(&aha->sg_maps, sg_map, links); bus_dmamap_load(aha->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, PAGE_SIZE, ahamapsgs, aha, /*flags*/0); segs = sg_map->sg_vaddr; physaddr = sg_map->sg_physaddr; newcount = (PAGE_SIZE / (AHA_NSEG * sizeof(aha_sg_t))); for (i = 0; aha->num_ccbs < aha->max_ccbs && i < newcount; i++) { int error; next_ccb->sg_list = segs; next_ccb->sg_list_phys = physaddr; next_ccb->flags = ACCB_FREE; callout_init_mtx(&next_ccb->timer, &aha->lock, 0); error = bus_dmamap_create(aha->buffer_dmat, /*flags*/0, &next_ccb->dmamap); if (error != 0) break; SLIST_INSERT_HEAD(&aha->free_aha_ccbs, next_ccb, links); segs += AHA_NSEG; physaddr += (AHA_NSEG * sizeof(aha_sg_t)); next_ccb++; aha->num_ccbs++; } /* Reserve a CCB for error recovery */ if (aha->recovery_accb == NULL) { aha->recovery_accb = SLIST_FIRST(&aha->free_aha_ccbs); SLIST_REMOVE_HEAD(&aha->free_aha_ccbs, links); } } static __inline void ahafreeccb(struct aha_softc *aha, struct aha_ccb *accb) { if (!dumping) mtx_assert(&aha->lock, MA_OWNED); if ((accb->flags & ACCB_ACTIVE) != 0) LIST_REMOVE(&accb->ccb->ccb_h, sim_links.le); if (aha->resource_shortage != 0 && (accb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { accb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; aha->resource_shortage = FALSE; } accb->flags = ACCB_FREE; SLIST_INSERT_HEAD(&aha->free_aha_ccbs, accb, links); aha->active_ccbs--; } static struct aha_ccb* ahagetccb(struct aha_softc *aha) { struct aha_ccb* accb; if (!dumping) mtx_assert(&aha->lock, MA_OWNED); if ((accb = SLIST_FIRST(&aha->free_aha_ccbs)) != NULL) { SLIST_REMOVE_HEAD(&aha->free_aha_ccbs, links); aha->active_ccbs++; } else if (aha->num_ccbs < aha->max_ccbs) { ahaallocccbs(aha); accb = SLIST_FIRST(&aha->free_aha_ccbs); if (accb == NULL) device_printf(aha->dev, "Can't malloc ACCB\n"); else { SLIST_REMOVE_HEAD(&aha->free_aha_ccbs, links); aha->active_ccbs++; } } return (accb); } static void ahaaction(struct cam_sim *sim, union ccb *ccb) { struct aha_softc *aha; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahaaction\n")); aha = (struct aha_softc *)cam_sim_softc(sim); mtx_assert(&aha->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ { struct aha_ccb *accb; struct aha_hccb *hccb; /* * Get an accb to use. */ if ((accb = ahagetccb(aha)) == NULL) { aha->resource_shortage = TRUE; xpt_freeze_simq(aha->sim, /*count*/1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } hccb = &accb->hccb; /* * So we can find the ACCB when an abort is requested */ accb->ccb = ccb; ccb->ccb_h.ccb_accb_ptr = accb; ccb->ccb_h.ccb_aha_ptr = aha; /* * Put all the arguments for the xfer in the accb */ hccb->target = ccb->ccb_h.target_id; hccb->lun = ccb->ccb_h.target_lun; hccb->ahastat = 0; hccb->sdstat = 0; if (ccb->ccb_h.func_code == XPT_SCSI_IO) { struct ccb_scsiio *csio; struct ccb_hdr *ccbh; int error; csio = &ccb->csio; ccbh = &csio->ccb_h; hccb->opcode = aha->ccb_ccb_opcode; hccb->datain = (ccb->ccb_h.flags & CAM_DIR_IN) != 0; hccb->dataout = (ccb->ccb_h.flags & CAM_DIR_OUT) != 0; hccb->cmd_len = csio->cdb_len; if (hccb->cmd_len > sizeof(hccb->scsi_cdb)) { ccb->ccb_h.status = CAM_REQ_INVALID; ahafreeccb(aha, accb); xpt_done(ccb); return; } hccb->sense_len = csio->sense_len; if ((ccbh->flags & CAM_CDB_POINTER) != 0) { if ((ccbh->flags & CAM_CDB_PHYS) == 0) { bcopy(csio->cdb_io.cdb_ptr, hccb->scsi_cdb, hccb->cmd_len); } else { /* I guess I could map it in... */ ccbh->status = CAM_REQ_INVALID; ahafreeccb(aha, accb); xpt_done(ccb); return; } } else { bcopy(csio->cdb_io.cdb_bytes, hccb->scsi_cdb, hccb->cmd_len); } /* * If we have any data to send with this command, * map it into bus space. */ error = bus_dmamap_load_ccb( aha->buffer_dmat, accb->dmamap, ccb, ahaexecuteccb, accb, /*flags*/0); if (error == EINPROGRESS) { /* * So as to maintain ordering, freeze the * controller queue until our mapping is * returned. */ xpt_freeze_simq(aha->sim, 1); csio->ccb_h.status |= CAM_RELEASE_SIMQ; } } else { hccb->opcode = INITIATOR_BUS_DEV_RESET; /* No data transfer */ hccb->datain = TRUE; hccb->dataout = TRUE; hccb->cmd_len = 0; hccb->sense_len = 0; ahaexecuteccb(accb, NULL, 0, 0); } break; } case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: /* XXX Implement */ ccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; u_int target_mask = 0x01 << ccb->ccb_h.target_id; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; if (cts->type == CTS_TYPE_USER_SETTINGS) { spi->flags = 0; if ((aha->disc_permitted & target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; if ((aha->sync_permitted & target_mask) != 0) { if (aha->boardid >= BOARD_1542CF) spi->sync_period = 25; else spi->sync_period = 50; } else { spi->sync_period = 0; } if (spi->sync_period != 0) spi->sync_offset = 15; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; } else { ahafetchtransinfo(aha, cts); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; uint32_t size_mb; uint32_t secs_per_cylinder; ccg = &ccb->ccg; size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); if (size_mb >= 1024 && (aha->extended_trans != 0)) { if (size_mb >= 2048) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 128; ccg->secs_per_track = 32; } } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ ahareset(aha, /*hardreset*/TRUE); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 7; cpi->max_lun = 7; cpi->initiator_id = aha->scsi_id; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void ahaexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct aha_ccb *accb; union ccb *ccb; struct aha_softc *aha; uint32_t paddr; accb = (struct aha_ccb *)arg; ccb = accb->ccb; aha = (struct aha_softc *)ccb->ccb_h.ccb_aha_ptr; if (error != 0) { if (error != EFBIG) device_printf(aha->dev, "Unexepected error 0x%x returned from " "bus_dmamap_load\n", error); if (ccb->ccb_h.status == CAM_REQ_INPROG) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; } ahafreeccb(aha, accb); xpt_done(ccb); return; } if (nseg != 0) { aha_sg_t *sg; bus_dma_segment_t *end_seg; bus_dmasync_op_t op; end_seg = dm_segs + nseg; /* Copy the segments into our SG list */ sg = accb->sg_list; while (dm_segs < end_seg) { ahautoa24(dm_segs->ds_len, sg->len); ahautoa24(dm_segs->ds_addr, sg->addr); sg++; dm_segs++; } if (nseg > 1) { accb->hccb.opcode = aha->ccb_sg_opcode; ahautoa24((sizeof(aha_sg_t) * nseg), accb->hccb.data_len); ahautoa24(accb->sg_list_phys, accb->hccb.data_addr); } else { bcopy(accb->sg_list->len, accb->hccb.data_len, 3); bcopy(accb->sg_list->addr, accb->hccb.data_addr, 3); } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_PREREAD; else op = BUS_DMASYNC_PREWRITE; bus_dmamap_sync(aha->buffer_dmat, accb->dmamap, op); } else { accb->hccb.opcode = INITIATOR_CCB; ahautoa24(0, accb->hccb.data_len); ahautoa24(0, accb->hccb.data_addr); } /* * Last time we need to check if this CCB needs to * be aborted. */ if (ccb->ccb_h.status != CAM_REQ_INPROG) { if (nseg != 0) bus_dmamap_unload(aha->buffer_dmat, accb->dmamap); ahafreeccb(aha, accb); xpt_done(ccb); return; } accb->flags = ACCB_ACTIVE; ccb->ccb_h.status |= CAM_SIM_QUEUED; LIST_INSERT_HEAD(&aha->pending_ccbs, &ccb->ccb_h, sim_links.le); callout_reset_sbt(&accb->timer, SBT_1MS * ccb->ccb_h.timeout, 0, ahatimeout, accb, 0); /* Tell the adapter about this command */ if (aha->cur_outbox->action_code != AMBO_FREE) { /* * We should never encounter a busy mailbox. * If we do, warn the user, and treat it as * a resource shortage. If the controller is * hung, one of the pending transactions will * timeout causing us to start recovery operations. */ device_printf(aha->dev, "Encountered busy mailbox with %d out of %d " "commands active!!!", aha->active_ccbs, aha->max_ccbs); callout_stop(&accb->timer); if (nseg != 0) bus_dmamap_unload(aha->buffer_dmat, accb->dmamap); ahafreeccb(aha, accb); aha->resource_shortage = TRUE; xpt_freeze_simq(aha->sim, /*count*/1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } paddr = ahaccbvtop(aha, accb); ahautoa24(paddr, aha->cur_outbox->ccb_addr); aha->cur_outbox->action_code = AMBO_START; aha_outb(aha, COMMAND_REG, AOP_START_MBOX); ahanextoutbox(aha); } void aha_intr(void *arg) { struct aha_softc *aha; aha = arg; mtx_lock(&aha->lock); aha_intr_locked(aha); mtx_unlock(&aha->lock); } void aha_intr_locked(struct aha_softc *aha) { u_int intstat; uint32_t paddr; while (((intstat = aha_inb(aha, INTSTAT_REG)) & INTR_PENDING) != 0) { if ((intstat & CMD_COMPLETE) != 0) { aha->latched_status = aha_inb(aha, STATUS_REG); aha->command_cmp = TRUE; } aha_outb(aha, CONTROL_REG, RESET_INTR); if ((intstat & IMB_LOADED) != 0) { while (aha->cur_inbox->comp_code != AMBI_FREE) { paddr = aha_a24tou(aha->cur_inbox->ccb_addr); ahadone(aha, ahaccbptov(aha, paddr), aha->cur_inbox->comp_code); aha->cur_inbox->comp_code = AMBI_FREE; ahanextinbox(aha); } } if ((intstat & SCSI_BUS_RESET) != 0) { ahareset(aha, /*hardreset*/FALSE); } } } static void ahadone(struct aha_softc *aha, struct aha_ccb *accb, aha_mbi_comp_code_t comp_code) { union ccb *ccb; struct ccb_scsiio *csio; ccb = accb->ccb; csio = &accb->ccb->csio; if ((accb->flags & ACCB_ACTIVE) == 0) { device_printf(aha->dev, "ahadone - Attempt to free non-active ACCB %p\n", (void *)accb); return; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(aha->buffer_dmat, accb->dmamap, op); bus_dmamap_unload(aha->buffer_dmat, accb->dmamap); } if (accb == aha->recovery_accb) { /* * The recovery ACCB does not have a CCB associated * with it, so short circuit the normal error handling. * We now traverse our list of pending CCBs and process * any that were terminated by the recovery CCBs action. * We also reinstate timeouts for all remaining, pending, * CCBs. */ struct cam_path *path; struct ccb_hdr *ccb_h; cam_status error; /* Notify all clients that a BDR occurred */ error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(aha->sim), accb->hccb.target, CAM_LUN_WILDCARD); if (error == CAM_REQ_CMP) { xpt_async(AC_SENT_BDR, path, NULL); xpt_free_path(path); } ccb_h = LIST_FIRST(&aha->pending_ccbs); while (ccb_h != NULL) { struct aha_ccb *pending_accb; pending_accb = (struct aha_ccb *)ccb_h->ccb_accb_ptr; if (pending_accb->hccb.target == accb->hccb.target) { pending_accb->hccb.ahastat = AHASTAT_HA_BDR; ccb_h = LIST_NEXT(ccb_h, sim_links.le); ahadone(aha, pending_accb, AMBI_ERROR); } else { callout_reset_sbt(&pending_accb->timer, SBT_1MS * ccb_h->timeout, 0, ahatimeout, pending_accb, 0); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } } device_printf(aha->dev, "No longer in timeout\n"); return; } callout_stop(&accb->timer); switch (comp_code) { case AMBI_FREE: device_printf(aha->dev, "ahadone - CCB completed with free status!\n"); break; case AMBI_NOT_FOUND: device_printf(aha->dev, "ahadone - CCB Abort failed to find CCB\n"); break; case AMBI_ABORT: case AMBI_ERROR: /* An error occurred */ if (accb->hccb.opcode < INITIATOR_CCB_WRESID) csio->resid = 0; else csio->resid = aha_a24tou(accb->hccb.data_len); switch(accb->hccb.ahastat) { case AHASTAT_DATARUN_ERROR: { if (csio->resid <= 0) { csio->ccb_h.status = CAM_DATA_RUN_ERR; break; } /* FALLTHROUGH */ } case AHASTAT_NOERROR: csio->scsi_status = accb->hccb.sdstat; csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; switch(csio->scsi_status) { case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_CMD_TERMINATED: csio->ccb_h.status |= CAM_AUTOSNS_VALID; /* * The aha writes the sense data at different * offsets based on the scsi cmd len */ bcopy((caddr_t) &accb->hccb.scsi_cdb + accb->hccb.cmd_len, (caddr_t) &csio->sense_data, accb->hccb.sense_len); break; default: break; case SCSI_STATUS_OK: csio->ccb_h.status = CAM_REQ_CMP; break; } break; case AHASTAT_SELTIMEOUT: csio->ccb_h.status = CAM_SEL_TIMEOUT; break; case AHASTAT_UNEXPECTED_BUSFREE: csio->ccb_h.status = CAM_UNEXP_BUSFREE; break; case AHASTAT_INVALID_PHASE: csio->ccb_h.status = CAM_SEQUENCE_FAIL; break; case AHASTAT_INVALID_ACTION_CODE: panic("%s: Inavlid Action code", aha_name(aha)); break; case AHASTAT_INVALID_OPCODE: if (accb->hccb.opcode < INITIATOR_CCB_WRESID) panic("%s: Invalid CCB Opcode %x hccb = %p", aha_name(aha), accb->hccb.opcode, &accb->hccb); device_printf(aha->dev, "AHA-1540A compensation failed\n"); xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); csio->ccb_h.status = CAM_REQUEUE_REQ; break; case AHASTAT_LINKED_CCB_LUN_MISMATCH: /* We don't even support linked commands... */ panic("%s: Linked CCB Lun Mismatch", aha_name(aha)); break; case AHASTAT_INVALID_CCB_OR_SG_PARAM: panic("%s: Invalid CCB or SG list", aha_name(aha)); break; case AHASTAT_HA_SCSI_BUS_RESET: if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_CMD_TIMEOUT) csio->ccb_h.status = CAM_SCSI_BUS_RESET; break; case AHASTAT_HA_BDR: if ((accb->flags & ACCB_DEVICE_RESET) == 0) csio->ccb_h.status = CAM_BDR_SENT; else csio->ccb_h.status = CAM_CMD_TIMEOUT; break; } if (csio->ccb_h.status != CAM_REQ_CMP) { xpt_freeze_devq(csio->ccb_h.path, /*count*/1); csio->ccb_h.status |= CAM_DEV_QFRZN; } if ((accb->flags & ACCB_RELEASE_SIMQ) != 0) ccb->ccb_h.status |= CAM_RELEASE_SIMQ; ahafreeccb(aha, accb); xpt_done(ccb); break; case AMBI_OK: /* All completed without incident */ /* XXX DO WE NEED TO COPY SENSE BYTES HERE???? XXX */ /* I don't think so since it works???? */ ccb->ccb_h.status |= CAM_REQ_CMP; if ((accb->flags & ACCB_RELEASE_SIMQ) != 0) ccb->ccb_h.status |= CAM_RELEASE_SIMQ; ahafreeccb(aha, accb); xpt_done(ccb); break; } } static int ahareset(struct aha_softc* aha, int hard_reset) { struct ccb_hdr *ccb_h; u_int status; u_int timeout; uint8_t reset_type; if (hard_reset != 0) reset_type = HARD_RESET; else reset_type = SOFT_RESET; aha_outb(aha, CONTROL_REG, reset_type); /* Wait 5sec. for Diagnostic start */ timeout = 5 * 10000; while (--timeout) { status = aha_inb(aha, STATUS_REG); if ((status & DIAG_ACTIVE) != 0) break; DELAY(100); } if (timeout == 0) { PRVERB((aha->dev, "ahareset - Diagnostic Active failed to " "assert. status = %#x\n", status)); return (ETIMEDOUT); } /* Wait 10sec. for Diagnostic end */ timeout = 10 * 10000; while (--timeout) { status = aha_inb(aha, STATUS_REG); if ((status & DIAG_ACTIVE) == 0) break; DELAY(100); } if (timeout == 0) { panic("%s: ahareset - Diagnostic Active failed to drop. " "status = 0x%x\n", aha_name(aha), status); return (ETIMEDOUT); } /* Wait for the host adapter to become ready or report a failure */ timeout = 10000; while (--timeout) { status = aha_inb(aha, STATUS_REG); if ((status & (DIAG_FAIL|HA_READY|DATAIN_REG_READY)) != 0) break; DELAY(100); } if (timeout == 0) { device_printf(aha->dev, "ahareset - Host adapter failed to " "come ready. status = 0x%x\n", status); return (ETIMEDOUT); } /* If the diagnostics failed, tell the user */ if ((status & DIAG_FAIL) != 0 || (status & HA_READY) == 0) { device_printf(aha->dev, "ahareset - Adapter failed diag\n"); if ((status & DATAIN_REG_READY) != 0) device_printf(aha->dev, "ahareset - Host Adapter " "Error code = 0x%x\n", aha_inb(aha, DATAIN_REG)); return (ENXIO); } /* If we've attached to the XPT, tell it about the event */ if (aha->path != NULL) xpt_async(AC_BUS_RESET, aha->path, NULL); /* * Perform completion processing for all outstanding CCBs. */ while ((ccb_h = LIST_FIRST(&aha->pending_ccbs)) != NULL) { struct aha_ccb *pending_accb; pending_accb = (struct aha_ccb *)ccb_h->ccb_accb_ptr; pending_accb->hccb.ahastat = AHASTAT_HA_SCSI_BUS_RESET; ahadone(aha, pending_accb, AMBI_ERROR); } /* If we've allocated mailboxes, initialize them */ /* Must be done after we've aborted our queue, or aha_cmd fails */ if (aha->init_level > 4) ahainitmboxes(aha); return (0); } /* * Send a command to the adapter. */ int aha_cmd(struct aha_softc *aha, aha_op_t opcode, uint8_t *params, u_int param_len, uint8_t *reply_data, u_int reply_len, u_int cmd_timeout) { u_int timeout; u_int status; u_int saved_status; u_int intstat; u_int reply_buf_size; int cmd_complete; int error; /* No data returned to start */ reply_buf_size = reply_len; reply_len = 0; intstat = 0; cmd_complete = 0; saved_status = 0; error = 0; /* * All commands except for the "start mailbox" and the "enable * outgoing mailbox read interrupt" commands cannot be issued * while there are pending transactions. Freeze our SIMQ * and wait for all completions to occur if necessary. */ timeout = 10000; while (LIST_FIRST(&aha->pending_ccbs) != NULL && --timeout) { /* Fire the interrupt handler in case interrupts are blocked */ aha_intr(aha); DELAY(10); } if (timeout == 0) { device_printf(aha->dev, "aha_cmd: Timeout waiting for adapter idle\n"); return (ETIMEDOUT); } aha->command_cmp = 0; /* * Wait up to 10 sec. for the adapter to become * ready to accept commands. */ timeout = 100000; while (--timeout) { status = aha_inb(aha, STATUS_REG); if ((status & HA_READY) != 0 && (status & CMD_REG_BUSY) == 0) break; /* * Throw away any pending data which may be * left over from earlier commands that we * timedout on. */ if ((status & DATAIN_REG_READY) != 0) (void)aha_inb(aha, DATAIN_REG); DELAY(100); } if (timeout == 0) { device_printf(aha->dev, "aha_cmd: Timeout waiting for adapter" " ready, status = 0x%x\n", status); return (ETIMEDOUT); } /* * Send the opcode followed by any necessary parameter bytes. */ aha_outb(aha, COMMAND_REG, opcode); /* * Wait for up to 1sec to get the parameter list sent */ timeout = 10000; while (param_len && --timeout) { DELAY(100); status = aha_inb(aha, STATUS_REG); intstat = aha_inb(aha, INTSTAT_REG); if ((intstat & (INTR_PENDING|CMD_COMPLETE)) == (INTR_PENDING|CMD_COMPLETE)) { saved_status = status; cmd_complete = 1; break; } if (aha->command_cmp != 0) { saved_status = aha->latched_status; cmd_complete = 1; break; } if ((status & DATAIN_REG_READY) != 0) break; if ((status & CMD_REG_BUSY) == 0) { aha_outb(aha, COMMAND_REG, *params++); param_len--; timeout = 10000; } } if (timeout == 0) { device_printf(aha->dev, "aha_cmd: Timeout sending parameters, " "status = 0x%x\n", status); error = ETIMEDOUT; } /* * For all other commands, we wait for any output data * and the final comand completion interrupt. */ while (cmd_complete == 0 && --cmd_timeout) { status = aha_inb(aha, STATUS_REG); intstat = aha_inb(aha, INTSTAT_REG); if (aha->command_cmp != 0) { cmd_complete = 1; saved_status = aha->latched_status; } else if ((intstat & (INTR_PENDING|CMD_COMPLETE)) == (INTR_PENDING|CMD_COMPLETE)) { /* * Our poll (in case interrupts are blocked) * saw the CMD_COMPLETE interrupt. */ cmd_complete = 1; saved_status = status; } if ((status & DATAIN_REG_READY) != 0) { uint8_t data; data = aha_inb(aha, DATAIN_REG); if (reply_len < reply_buf_size) { *reply_data++ = data; } else { device_printf(aha->dev, "aha_cmd - Discarded reply data " "byte for opcode 0x%x\n", opcode); } /* * Reset timeout to ensure at least a second * between response bytes. */ cmd_timeout = MAX(cmd_timeout, 10000); reply_len++; } DELAY(100); } if (cmd_timeout == 0) { device_printf(aha->dev, "aha_cmd: Timeout: status = 0x%x, " "intstat = 0x%x, reply_len = %d\n", status, intstat, reply_len); return (ETIMEDOUT); } /* * Clear any pending interrupts. Block interrupts so our * interrupt handler is not re-entered. */ aha_intr(aha); if (error != 0) return (error); /* * If the command was rejected by the controller, tell the caller. */ if ((saved_status & CMD_INVALID) != 0) { PRVERB((aha->dev, "Invalid Command 0x%x\n", opcode)); /* * Some early adapters may not recover properly from * an invalid command. If it appears that the controller * has wedged (i.e. status was not cleared by our interrupt * reset above), perform a soft reset. */ DELAY(1000); status = aha_inb(aha, STATUS_REG); if ((status & (CMD_INVALID|STATUS_REG_RSVD|DATAIN_REG_READY| CMD_REG_BUSY|DIAG_FAIL|DIAG_ACTIVE)) != 0 || (status & (HA_READY|INIT_REQUIRED)) != (HA_READY|INIT_REQUIRED)) ahareset(aha, /*hard_reset*/FALSE); return (EINVAL); } if (param_len > 0) { /* The controller did not accept the full argument list */ PRVERB((aha->dev, "Controller did not accept full argument " "list (%d > 0)\n", param_len)); return (E2BIG); } if (reply_len != reply_buf_size) { /* Too much or too little data received */ PRVERB((aha->dev, "data received mismatch (%d != %d)\n", reply_len, reply_buf_size)); return (EMSGSIZE); } /* We were successful */ return (0); } static int ahainitmboxes(struct aha_softc *aha) { int error; init_24b_mbox_params_t init_mbox; bzero(aha->in_boxes, sizeof(aha_mbox_in_t) * aha->num_boxes); bzero(aha->out_boxes, sizeof(aha_mbox_out_t) * aha->num_boxes); aha->cur_inbox = aha->in_boxes; aha->last_inbox = aha->in_boxes + aha->num_boxes - 1; aha->cur_outbox = aha->out_boxes; aha->last_outbox = aha->out_boxes + aha->num_boxes - 1; /* Tell the adapter about them */ init_mbox.num_mboxes = aha->num_boxes; ahautoa24(aha->mailbox_physbase, init_mbox.base_addr); error = aha_cmd(aha, AOP_INITIALIZE_MBOX, (uint8_t *)&init_mbox, /*parmlen*/sizeof(init_mbox), /*reply_buf*/NULL, /*reply_len*/0, DEFAULT_CMD_TIMEOUT); if (error != 0) printf("ahainitmboxes: Initialization command failed\n"); return (error); } /* * Update the XPT's idea of the negotiated transfer * parameters for a particular target. */ static void ahafetchtransinfo(struct aha_softc *aha, struct ccb_trans_settings* cts) { setup_data_t setup_info; u_int target; u_int targ_offset; u_int sync_period; int error; uint8_t param; targ_syncinfo_t sync_info; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; target = cts->ccb_h.target_id; targ_offset = (target & 0x7); /* * Inquire Setup Information. This command retreives * the sync info for older models. */ param = sizeof(setup_info); error = aha_cmd(aha, AOP_INQUIRE_SETUP_INFO, ¶m, /*paramlen*/1, (uint8_t*)&setup_info, sizeof(setup_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(aha->dev, "ahafetchtransinfo - Inquire Setup Info Failed %d\n", error); return; } sync_info = setup_info.syncinfo[targ_offset]; if (sync_info.sync == 0) spi->sync_offset = 0; else spi->sync_offset = sync_info.offset; spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; if (aha->boardid >= BOARD_1542CF) sync_period = 1000; else sync_period = 2000; sync_period += 500 * sync_info.period; /* Convert ns value to standard SCSI sync rate */ if (spi->sync_offset != 0) spi->sync_period = scsi_calc_syncparam(sync_period); else spi->sync_period = 0; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH; xpt_async(AC_TRANSFER_NEG, cts->ccb_h.path, cts); } static void ahamapmboxes(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aha_softc* aha; aha = (struct aha_softc*)arg; aha->mailbox_physbase = segs->ds_addr; } static void ahamapccbs(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aha_softc* aha; aha = (struct aha_softc*)arg; aha->aha_ccb_physbase = segs->ds_addr; } static void ahamapsgs(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aha_softc* aha; aha = (struct aha_softc*)arg; SLIST_FIRST(&aha->sg_maps)->sg_physaddr = segs->ds_addr; } static void ahapoll(struct cam_sim *sim) { aha_intr_locked(cam_sim_softc(sim)); } static void ahatimeout(void *arg) { struct aha_ccb *accb; union ccb *ccb; struct aha_softc *aha; uint32_t paddr; struct ccb_hdr *ccb_h; accb = (struct aha_ccb *)arg; ccb = accb->ccb; aha = (struct aha_softc *)ccb->ccb_h.ccb_aha_ptr; mtx_assert(&aha->lock, MA_OWNED); xpt_print_path(ccb->ccb_h.path); printf("CCB %p - timed out\n", (void *)accb); if ((accb->flags & ACCB_ACTIVE) == 0) { xpt_print_path(ccb->ccb_h.path); printf("CCB %p - timed out CCB already completed\n", (void *)accb); return; } /* * In order to simplify the recovery process, we ask the XPT * layer to halt the queue of new transactions and we traverse * the list of pending CCBs and remove their timeouts. This * means that the driver attempts to clear only one error * condition at a time. In general, timeouts that occur * close together are related anyway, so there is no benefit * in attempting to handle errors in parallel. Timeouts will * be reinstated when the recovery process ends. */ if ((accb->flags & ACCB_DEVICE_RESET) == 0) { if ((accb->flags & ACCB_RELEASE_SIMQ) == 0) { xpt_freeze_simq(aha->sim, /*count*/1); accb->flags |= ACCB_RELEASE_SIMQ; } ccb_h = LIST_FIRST(&aha->pending_ccbs); while (ccb_h != NULL) { struct aha_ccb *pending_accb; pending_accb = (struct aha_ccb *)ccb_h->ccb_accb_ptr; callout_stop(&pending_accb->timer); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } } if ((accb->flags & ACCB_DEVICE_RESET) != 0 || aha->cur_outbox->action_code != AMBO_FREE) { /* * Try a full host adapter/SCSI bus reset. * We do this only if we have already attempted * to clear the condition with a BDR, or we cannot * attempt a BDR for lack of mailbox resources. */ ccb->ccb_h.status = CAM_CMD_TIMEOUT; ahareset(aha, /*hardreset*/TRUE); device_printf(aha->dev, "No longer in timeout\n"); } else { /* * Send a Bus Device Reset message: * The target that is holding up the bus may not * be the same as the one that triggered this timeout * (different commands have different timeout lengths), * but we have no way of determining this from our * timeout handler. Our strategy here is to queue a * BDR message to the target of the timed out command. * If this fails, we'll get another timeout 2 seconds * later which will attempt a bus reset. */ accb->flags |= ACCB_DEVICE_RESET; callout_reset(&accb->timer, 2 * hz, ahatimeout, accb); aha->recovery_accb->hccb.opcode = INITIATOR_BUS_DEV_RESET; /* No Data Transfer */ aha->recovery_accb->hccb.datain = TRUE; aha->recovery_accb->hccb.dataout = TRUE; aha->recovery_accb->hccb.ahastat = 0; aha->recovery_accb->hccb.sdstat = 0; aha->recovery_accb->hccb.target = ccb->ccb_h.target_id; /* Tell the adapter about this command */ paddr = ahaccbvtop(aha, aha->recovery_accb); ahautoa24(paddr, aha->cur_outbox->ccb_addr); aha->cur_outbox->action_code = AMBO_START; aha_outb(aha, COMMAND_REG, AOP_START_MBOX); ahanextoutbox(aha); } } int aha_detach(struct aha_softc *aha) { mtx_lock(&aha->lock); xpt_async(AC_LOST_DEVICE, aha->path, NULL); xpt_free_path(aha->path); xpt_bus_deregister(cam_sim_path(aha->sim)); cam_sim_free(aha->sim, /*free_devq*/TRUE); mtx_unlock(&aha->lock); /* XXX: Drain all timers? */ return (0); } MODULE_DEPEND(aha, cam, 1, 1, 1); Index: head/sys/dev/aic/aic.c =================================================================== --- head/sys/dev/aic/aic.c (revision 328522) +++ head/sys/dev/aic/aic.c (revision 328523) @@ -1,1600 +1,1601 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1999 Luoqi Chen. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void aic_action(struct cam_sim *sim, union ccb *ccb); static void aic_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error); static void aic_intr_locked(struct aic_softc *aic); static void aic_start(struct aic_softc *aic); static void aic_select(struct aic_softc *aic); static void aic_selected(struct aic_softc *aic); static void aic_reselected(struct aic_softc *aic); static void aic_reconnect(struct aic_softc *aic, int tag); static void aic_cmd(struct aic_softc *aic); static void aic_msgin(struct aic_softc *aic); static void aic_handle_msgin(struct aic_softc *aic); static void aic_msgout(struct aic_softc *aic); static void aic_datain(struct aic_softc *aic); static void aic_dataout(struct aic_softc *aic); static void aic_done(struct aic_softc *aic, struct aic_scb *scb); static void aic_poll(struct cam_sim *sim); static void aic_timeout(void *arg); static void aic_scsi_reset(struct aic_softc *aic); static void aic_chip_reset(struct aic_softc *aic); static void aic_reset(struct aic_softc *aic, int initiate_reset); devclass_t aic_devclass; static struct aic_scb * aic_get_scb(struct aic_softc *aic) { struct aic_scb *scb; if (!dumping) mtx_assert(&aic->lock, MA_OWNED); if ((scb = SLIST_FIRST(&aic->free_scbs)) != NULL) SLIST_REMOVE_HEAD(&aic->free_scbs, link); return (scb); } static void aic_free_scb(struct aic_softc *aic, struct aic_scb *scb) { if (!dumping) mtx_assert(&aic->lock, MA_OWNED); if ((aic->flags & AIC_RESOURCE_SHORTAGE) != 0 && (scb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { scb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; aic->flags &= ~AIC_RESOURCE_SHORTAGE; } scb->flags = 0; SLIST_INSERT_HEAD(&aic->free_scbs, scb, link); } static void aic_action(struct cam_sim *sim, union ccb *ccb) { struct aic_softc *aic; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("aic_action\n")); aic = (struct aic_softc *)cam_sim_softc(sim); mtx_assert(&aic->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ { struct aic_scb *scb; if ((scb = aic_get_scb(aic)) == NULL) { aic->flags |= AIC_RESOURCE_SHORTAGE; xpt_freeze_simq(aic->sim, /*count*/1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } scb->ccb = ccb; ccb->ccb_h.ccb_scb_ptr = scb; ccb->ccb_h.ccb_aic_ptr = aic; scb->target = ccb->ccb_h.target_id; scb->lun = ccb->ccb_h.target_lun; if (ccb->ccb_h.func_code == XPT_SCSI_IO) { scb->cmd_len = ccb->csio.cdb_len; if (ccb->ccb_h.flags & CAM_CDB_POINTER) { if (ccb->ccb_h.flags & CAM_CDB_PHYS) { ccb->ccb_h.status = CAM_REQ_INVALID; aic_free_scb(aic, scb); xpt_done(ccb); return; } scb->cmd_ptr = ccb->csio.cdb_io.cdb_ptr; } else { scb->cmd_ptr = ccb->csio.cdb_io.cdb_bytes; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) { ccb->ccb_h.status = CAM_REQ_INVALID; aic_free_scb(aic, scb); xpt_done(ccb); return; } scb->data_ptr = ccb->csio.data_ptr; scb->data_len = ccb->csio.dxfer_len; } else { scb->data_ptr = NULL; scb->data_len = 0; } aic_execute_scb(scb, NULL, 0, 0); } else { scb->flags |= SCB_DEVICE_RESET; aic_execute_scb(scb, NULL, 0, 0); } break; } case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct aic_tinfo *ti = &aic->tinfo[ccb->ccb_h.target_id]; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; if ((spi->valid & CTS_SPI_VALID_DISC) != 0 && (aic->flags & AIC_DISC_ENABLE) != 0) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ti->flags |= TINFO_DISC_ENB; else ti->flags &= ~TINFO_DISC_ENB; } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ti->flags |= TINFO_TAG_ENB; else ti->flags &= ~TINFO_TAG_ENB; } if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) { ti->goal.period = spi->sync_period; if (ti->goal.period > aic->min_period) { ti->goal.period = 0; ti->goal.offset = 0; } else if (ti->goal.period < aic->max_period) ti->goal.period = aic->max_period; } if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) { ti->goal.offset = spi->sync_offset; if (ti->goal.offset == 0) ti->goal.period = 0; else if (ti->goal.offset > AIC_SYNC_OFFSET) ti->goal.offset = AIC_SYNC_OFFSET; } if ((ti->goal.period != ti->current.period) || (ti->goal.offset != ti->current.offset)) ti->flags |= TINFO_SDTR_NEGO; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct aic_tinfo *ti = &aic->tinfo[ccb->ccb_h.target_id]; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if ((ti->flags & TINFO_DISC_ENB) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((ti->flags & TINFO_TAG_ENB) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { spi->sync_period = ti->current.period; spi->sync_offset = ti->current.offset; } else { spi->sync_period = ti->user.period; spi->sync_offset = ti->user.offset; } spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { cam_calc_geometry(&ccb->ccg, /*extended*/1); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ aic_reset(aic, /*initiate_reset*/TRUE); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 7; cpi->max_lun = 7; cpi->initiator_id = aic->initiator; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void aic_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct aic_scb *scb = (struct aic_scb *)arg; union ccb *ccb = scb->ccb; struct aic_softc *aic = (struct aic_softc *)ccb->ccb_h.ccb_aic_ptr; if (!dumping) mtx_assert(&aic->lock, MA_OWNED); if (ccb->ccb_h.status != CAM_REQ_INPROG) { aic_free_scb(aic, scb); xpt_done(ccb); return; } scb->flags |= SCB_ACTIVE; ccb->ccb_h.status |= CAM_SIM_QUEUED; TAILQ_INSERT_TAIL(&aic->pending_ccbs, &ccb->ccb_h, sim_links.tqe); callout_reset_sbt(&scb->timer, SBT_1MS * ccb->ccb_h.timeout, 0, aic_timeout, scb, 0); aic_start(aic); } /* * Start another command if the controller is not busy. */ static void aic_start(struct aic_softc *aic) { struct ccb_hdr *ccb_h; struct aic_tinfo *ti; if (aic->state != AIC_IDLE) return; TAILQ_FOREACH(ccb_h, &aic->pending_ccbs, sim_links.tqe) { ti = &aic->tinfo[ccb_h->target_id]; if ((ti->lubusy & (1 << ccb_h->target_lun)) == 0) { TAILQ_REMOVE(&aic->pending_ccbs, ccb_h, sim_links.tqe); aic->nexus = (struct aic_scb *)ccb_h->ccb_scb_ptr; aic_select(aic); return; } } CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_start: idle\n")); aic_outb(aic, SIMODE0, ENSELDI); aic_outb(aic, SIMODE1, ENSCSIRST); aic_outb(aic, SCSISEQ, ENRESELI); } /* * Start a selection. */ static void aic_select(struct aic_softc *aic) { struct aic_scb *scb = aic->nexus; CAM_DEBUG(scb->ccb->ccb_h.path, CAM_DEBUG_TRACE, ("aic_select - ccb %p\n", scb->ccb)); aic->state = AIC_SELECTING; aic_outb(aic, DMACNTRL1, 0); aic_outb(aic, SCSIID, aic->initiator << OID_S | scb->target); aic_outb(aic, SXFRCTL1, STIMO_256ms | ENSTIMER | (aic->flags & AIC_PARITY_ENABLE ? ENSPCHK : 0)); aic_outb(aic, SIMODE0, ENSELDI|ENSELDO); aic_outb(aic, SIMODE1, ENSCSIRST|ENSELTIMO); aic_outb(aic, SCSISEQ, ENRESELI|ENSELO|ENAUTOATNO); } /* * We have successfully selected a target, prepare for the information * transfer phases. */ static void aic_selected(struct aic_softc *aic) { struct aic_scb *scb = aic->nexus; union ccb *ccb = scb->ccb; struct aic_tinfo *ti = &aic->tinfo[scb->target]; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("aic_selected - ccb %p\n", ccb)); aic->state = AIC_HASNEXUS; if (scb->flags & SCB_DEVICE_RESET) { aic->msg_buf[0] = MSG_BUS_DEV_RESET; aic->msg_len = 1; aic->msg_outq = AIC_MSG_MSGBUF; } else { aic->msg_outq = AIC_MSG_IDENTIFY; if ((ti->flags & TINFO_TAG_ENB) != 0 && (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) aic->msg_outq |= AIC_MSG_TAG_Q; else ti->lubusy |= 1 << scb->lun; if ((ti->flags & TINFO_SDTR_NEGO) != 0) aic->msg_outq |= AIC_MSG_SDTR; } aic_outb(aic, CLRSINT0, CLRSELDO); aic_outb(aic, CLRSINT1, CLRBUSFREE); aic_outb(aic, SCSISEQ, ENAUTOATNP); aic_outb(aic, SIMODE0, 0); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); aic_outb(aic, SCSIRATE, ti->scsirate); } /* * We are re-selected by a target, save the target id and wait for the * target to further identify itself. */ static void aic_reselected(struct aic_softc *aic) { u_int8_t selid; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_reselected\n")); /* * If we have started a selection, it must have lost out in * the arbitration, put the command back to the pending queue. */ if (aic->nexus) { TAILQ_INSERT_HEAD(&aic->pending_ccbs, &aic->nexus->ccb->ccb_h, sim_links.tqe); aic->nexus = NULL; } selid = aic_inb(aic, SELID) & ~(1 << aic->initiator); if (selid & (selid - 1)) { /* this should never have happened */ printf("aic_reselected: invalid selid %x\n", selid); aic_reset(aic, /*initiate_reset*/TRUE); return; } aic->state = AIC_RESELECTED; aic->target = ffs(selid) - 1; aic->lun = -1; aic_outb(aic, CLRSINT0, CLRSELDI); aic_outb(aic, CLRSINT1, CLRBUSFREE); aic_outb(aic, SIMODE0, 0); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); aic_outb(aic, SCSISEQ, ENAUTOATNP); aic_outb(aic, SCSIRATE, aic->tinfo[aic->target].scsirate); } /* * Raise ATNO to signal the target that we have a message for it. */ static __inline void aic_sched_msgout(struct aic_softc *aic, u_int8_t msg) { if (msg) { aic->msg_buf[0] = msg; aic->msg_len = 1; } aic->msg_outq |= AIC_MSG_MSGBUF; aic_outb(aic, SCSISIGO, aic_inb(aic, SCSISIGI) | ATNO); } /* * Wait for SPIORDY (SCSI PIO ready) flag, or a phase change. */ static __inline int aic_spiordy(struct aic_softc *aic) { while (!(aic_inb(aic, DMASTAT) & INTSTAT) && !(aic_inb(aic, SSTAT0) & SPIORDY)) ; return !(aic_inb(aic, DMASTAT) & INTSTAT); } /* * Reestablish a disconnected nexus. */ static void aic_reconnect(struct aic_softc *aic, int tag) { struct aic_scb *scb; struct ccb_hdr *ccb_h; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_reconnect\n")); /* Find the nexus */ scb = NULL; TAILQ_FOREACH(ccb_h, &aic->nexus_ccbs, sim_links.tqe) { scb = (struct aic_scb *)ccb_h->ccb_scb_ptr; if (scb->target == aic->target && scb->lun == aic->lun && (tag == -1 || scb->tag == tag)) break; } /* ABORT if nothing is found */ if (!ccb_h) { if (tag == -1) aic_sched_msgout(aic, MSG_ABORT); else aic_sched_msgout(aic, MSG_ABORT_TAG); xpt_async(AC_UNSOL_RESEL, aic->path, NULL); return; } /* Reestablish the nexus */ TAILQ_REMOVE(&aic->nexus_ccbs, ccb_h, sim_links.tqe); aic->nexus = scb; scb->flags &= ~SCB_DISCONNECTED; aic->state = AIC_HASNEXUS; } /* * Read messages. */ static void aic_msgin(struct aic_softc *aic) { int msglen; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_msgin\n")); aic_outb(aic, SIMODE1, ENSCSIRST|ENPHASEMIS|ENBUSFREE); aic_outb(aic, SXFRCTL0, CHEN|SPIOEN); aic->flags &= ~AIC_DROP_MSGIN; aic->msg_len = 0; do { /* * If a parity error is detected, drop the remaining * bytes and inform the target so it could resend * the messages. */ if (aic_inb(aic, SSTAT1) & SCSIPERR) { aic_outb(aic, CLRSINT1, CLRSCSIPERR); aic->flags |= AIC_DROP_MSGIN; aic_sched_msgout(aic, MSG_PARITY_ERROR); } if ((aic->flags & AIC_DROP_MSGIN)) { aic_inb(aic, SCSIDAT); continue; } /* read the message byte without ACKing on it */ aic->msg_buf[aic->msg_len++] = aic_inb(aic, SCSIBUS); if (aic->msg_buf[0] == MSG_EXTENDED) { if (aic->msg_len < 2) { (void) aic_inb(aic, SCSIDAT); continue; } switch (aic->msg_buf[2]) { case MSG_EXT_SDTR: msglen = MSG_EXT_SDTR_LEN; break; case MSG_EXT_WDTR: msglen = MSG_EXT_WDTR_LEN; break; default: msglen = 0; break; } if (aic->msg_buf[1] != msglen) { aic->flags |= AIC_DROP_MSGIN; aic_sched_msgout(aic, MSG_MESSAGE_REJECT); } msglen += 2; } else if (aic->msg_buf[0] >= 0x20 && aic->msg_buf[0] <= 0x2f) msglen = 2; else msglen = 1; /* * If we have a complete message, handle it before the final * ACK (in case we decide to reject the message). */ if (aic->msg_len == msglen) { aic_handle_msgin(aic); aic->msg_len = 0; } /* ACK on the message byte */ (void) aic_inb(aic, SCSIDAT); } while (aic_spiordy(aic)); aic_outb(aic, SXFRCTL0, CHEN); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); } /* * Handle a message. */ static void aic_handle_msgin(struct aic_softc *aic) { struct aic_scb *scb; struct ccb_hdr *ccb_h; struct aic_tinfo *ti; struct ccb_trans_settings neg; struct ccb_trans_settings_spi *spi = &neg.xport_specific.spi; if (aic->state == AIC_RESELECTED) { if (!MSG_ISIDENTIFY(aic->msg_buf[0])) { aic_sched_msgout(aic, MSG_MESSAGE_REJECT); return; } aic->lun = aic->msg_buf[0] & MSG_IDENTIFY_LUNMASK; if (aic->tinfo[aic->target].lubusy & (1 << aic->lun)) aic_reconnect(aic, -1); else aic->state = AIC_RECONNECTING; return; } if (aic->state == AIC_RECONNECTING) { if (aic->msg_buf[0] != MSG_SIMPLE_Q_TAG) { aic_sched_msgout(aic, MSG_MESSAGE_REJECT); return; } aic_reconnect(aic, aic->msg_buf[1]); return; } switch (aic->msg_buf[0]) { case MSG_CMDCOMPLETE: { struct ccb_scsiio *csio; scb = aic->nexus; ccb_h = &scb->ccb->ccb_h; csio = &scb->ccb->csio; if ((scb->flags & SCB_SENSE) != 0) { /* auto REQUEST SENSE command */ scb->flags &= ~SCB_SENSE; csio->sense_resid = scb->data_len; if (scb->status == SCSI_STATUS_OK) { ccb_h->status |= CAM_SCSI_STATUS_ERROR|CAM_AUTOSNS_VALID; /*scsi_sense_print(csio);*/ } else { ccb_h->status |= CAM_AUTOSENSE_FAIL; printf("ccb %p sense failed %x\n", ccb_h, scb->status); } } else { csio->scsi_status = scb->status; csio->resid = scb->data_len; if (scb->status == SCSI_STATUS_OK) { /* everything goes well */ ccb_h->status |= CAM_REQ_CMP; } else if ((ccb_h->flags & CAM_DIS_AUTOSENSE) == 0 && (csio->scsi_status == SCSI_STATUS_CHECK_COND || csio->scsi_status == SCSI_STATUS_CMD_TERMINATED)) { /* try to retrieve sense information */ scb->flags |= SCB_SENSE; aic->flags |= AIC_BUSFREE_OK; return; } else ccb_h->status |= CAM_SCSI_STATUS_ERROR; } aic_done(aic, scb); aic->flags |= AIC_BUSFREE_OK; break; } case MSG_EXTENDED: switch (aic->msg_buf[2]) { case MSG_EXT_SDTR: scb = aic->nexus; ti = &aic->tinfo[scb->target]; if (ti->flags & TINFO_SDTR_SENT) { ti->current.period = aic->msg_buf[3]; ti->current.offset = aic->msg_buf[4]; } else { ti->current.period = aic->msg_buf[3] = max(ti->goal.period, aic->msg_buf[3]); ti->current.offset = aic->msg_buf[4] = min(ti->goal.offset, aic->msg_buf[4]); /* * The target initiated the negotiation, * send back a response. */ aic_sched_msgout(aic, 0); } ti->flags &= ~(TINFO_SDTR_SENT|TINFO_SDTR_NEGO); ti->scsirate = ti->current.offset ? ti->current.offset | ((ti->current.period * 4 + 49) / 50 - 2) << 4 : 0; aic_outb(aic, SCSIRATE, ti->scsirate); memset(&neg, 0, sizeof (neg)); neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; spi->sync_period = ti->goal.period = ti->current.period; spi->sync_offset = ti->goal.offset = ti->current.offset; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET; ccb_h = &scb->ccb->ccb_h; xpt_setup_ccb(&neg.ccb_h, ccb_h->path, 1); xpt_async(AC_TRANSFER_NEG, ccb_h->path, &neg); break; case MSG_EXT_WDTR: default: aic_sched_msgout(aic, MSG_MESSAGE_REJECT); break; } break; case MSG_DISCONNECT: scb = aic->nexus; ccb_h = &scb->ccb->ccb_h; TAILQ_INSERT_TAIL(&aic->nexus_ccbs, ccb_h, sim_links.tqe); scb->flags |= SCB_DISCONNECTED; aic->flags |= AIC_BUSFREE_OK; aic->nexus = NULL; CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE, ("disconnected\n")); break; case MSG_MESSAGE_REJECT: switch (aic->msg_outq & -aic->msg_outq) { case AIC_MSG_TAG_Q: scb = aic->nexus; ti = &aic->tinfo[scb->target]; ti->flags &= ~TINFO_TAG_ENB; ti->lubusy |= 1 << scb->lun; break; case AIC_MSG_SDTR: scb = aic->nexus; ti = &aic->tinfo[scb->target]; ti->current.period = ti->goal.period = 0; ti->current.offset = ti->goal.offset = 0; ti->flags &= ~(TINFO_SDTR_SENT|TINFO_SDTR_NEGO); ti->scsirate = 0; aic_outb(aic, SCSIRATE, ti->scsirate); memset(&neg, 0, sizeof (neg)); neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; spi->sync_period = ti->current.period; spi->sync_offset = ti->current.offset; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET; ccb_h = &scb->ccb->ccb_h; xpt_setup_ccb(&neg.ccb_h, ccb_h->path, 1); xpt_async(AC_TRANSFER_NEG, ccb_h->path, &neg); break; default: break; } break; case MSG_SAVEDATAPOINTER: break; case MSG_RESTOREPOINTERS: break; case MSG_NOOP: break; default: aic_sched_msgout(aic, MSG_MESSAGE_REJECT); break; } } /* * Send messages. */ static void aic_msgout(struct aic_softc *aic) { struct aic_scb *scb; union ccb *ccb; struct aic_tinfo *ti; int msgidx = 0; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_msgout\n")); aic_outb(aic, SIMODE1, ENSCSIRST|ENPHASEMIS|ENBUSFREE); aic_outb(aic, SXFRCTL0, CHEN|SPIOEN); /* * If the previous phase is also the message out phase, * we need to retransmit all the messages, probably * because the target has detected a parity error during * the past transmission. */ if (aic->prev_phase == PH_MSGOUT) aic->msg_outq = aic->msg_sent; do { int q = aic->msg_outq; if (msgidx > 0 && msgidx == aic->msg_len) { /* complete message sent, start the next one */ q &= -q; aic->msg_sent |= q; aic->msg_outq ^= q; q = aic->msg_outq; msgidx = 0; } if (msgidx == 0) { /* setup the message */ switch (q & -q) { case AIC_MSG_IDENTIFY: scb = aic->nexus; ccb = scb->ccb; ti = &aic->tinfo[scb->target]; aic->msg_buf[0] = MSG_IDENTIFY(scb->lun, (ti->flags & TINFO_DISC_ENB) && !(ccb->ccb_h.flags & CAM_DIS_DISCONNECT)); aic->msg_len = 1; break; case AIC_MSG_TAG_Q: scb = aic->nexus; ccb = scb->ccb; aic->msg_buf[0] = ccb->csio.tag_action; aic->msg_buf[1] = scb->tag; aic->msg_len = 2; break; case AIC_MSG_SDTR: scb = aic->nexus; ti = &aic->tinfo[scb->target]; aic->msg_buf[0] = MSG_EXTENDED; aic->msg_buf[1] = MSG_EXT_SDTR_LEN; aic->msg_buf[2] = MSG_EXT_SDTR; aic->msg_buf[3] = ti->goal.period; aic->msg_buf[4] = ti->goal.offset; aic->msg_len = MSG_EXT_SDTR_LEN + 2; ti->flags |= TINFO_SDTR_SENT; break; case AIC_MSG_MSGBUF: /* a single message already in the buffer */ if (aic->msg_buf[0] == MSG_BUS_DEV_RESET || aic->msg_buf[0] == MSG_ABORT || aic->msg_buf[0] == MSG_ABORT_TAG) aic->flags |= AIC_BUSFREE_OK; break; } } /* * If this is the last message byte of all messages, * clear ATNO to signal transmission complete. */ if ((q & (q - 1)) == 0 && msgidx == aic->msg_len - 1) aic_outb(aic, CLRSINT1, CLRATNO); /* transmit the message byte */ aic_outb(aic, SCSIDAT, aic->msg_buf[msgidx++]); } while (aic_spiordy(aic)); aic_outb(aic, SXFRCTL0, CHEN); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); } /* * Read data bytes. */ static void aic_datain(struct aic_softc *aic) { struct aic_scb *scb = aic->nexus; u_int8_t dmastat, dmacntrl0; int n; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_datain\n")); aic_outb(aic, SIMODE1, ENSCSIRST|ENPHASEMIS|ENBUSFREE); aic_outb(aic, SXFRCTL0, SCSIEN|DMAEN|CHEN); dmacntrl0 = ENDMA; if (aic->flags & AIC_DWIO_ENABLE) dmacntrl0 |= DWORDPIO; aic_outb(aic, DMACNTRL0, dmacntrl0); while (scb->data_len > 0) { for (;;) { /* wait for the fifo to fill up or a phase change */ dmastat = aic_inb(aic, DMASTAT); if (dmastat & (INTSTAT|DFIFOFULL)) break; } if (dmastat & DFIFOFULL) { n = FIFOSIZE; } else { /* * No more data, wait for the remaining bytes in * the scsi fifo to be transfer to the host fifo. */ while (!(aic_inb(aic, SSTAT2) & SEMPTY)) ; n = aic_inb(aic, FIFOSTAT); } n = imin(scb->data_len, n); if (aic->flags & AIC_DWIO_ENABLE) { if (n >= 12) { aic_insl(aic, DMADATALONG, scb->data_ptr, n>>2); scb->data_ptr += n & ~3; scb->data_len -= n & ~3; n &= 3; } } else { if (n >= 8) { aic_insw(aic, DMADATA, scb->data_ptr, n >> 1); scb->data_ptr += n & ~1; scb->data_len -= n & ~1; n &= 1; } } if (n) { aic_outb(aic, DMACNTRL0, ENDMA|B8MODE); aic_insb(aic, DMADATA, scb->data_ptr, n); scb->data_ptr += n; scb->data_len -= n; aic_outb(aic, DMACNTRL0, dmacntrl0); } if (dmastat & INTSTAT) break; } aic_outb(aic, SXFRCTL0, CHEN); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); } /* * Send data bytes. */ static void aic_dataout(struct aic_softc *aic) { struct aic_scb *scb = aic->nexus; u_int8_t dmastat, dmacntrl0, sstat2; int n; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_dataout\n")); aic_outb(aic, SIMODE1, ENSCSIRST|ENPHASEMIS|ENBUSFREE); aic_outb(aic, SXFRCTL0, SCSIEN|DMAEN|CHEN); dmacntrl0 = ENDMA|WRITE; if (aic->flags & AIC_DWIO_ENABLE) dmacntrl0 |= DWORDPIO; aic_outb(aic, DMACNTRL0, dmacntrl0); while (scb->data_len > 0) { for (;;) { /* wait for the fifo to clear up or a phase change */ dmastat = aic_inb(aic, DMASTAT); if (dmastat & (INTSTAT|DFIFOEMP)) break; } if (dmastat & INTSTAT) break; n = imin(scb->data_len, FIFOSIZE); if (aic->flags & AIC_DWIO_ENABLE) { if (n >= 12) { aic_outsl(aic, DMADATALONG, scb->data_ptr,n>>2); scb->data_ptr += n & ~3; scb->data_len -= n & ~3; n &= 3; } } else { if (n >= 8) { aic_outsw(aic, DMADATA, scb->data_ptr, n >> 1); scb->data_ptr += n & ~1; scb->data_len -= n & ~1; n &= 1; } } if (n) { aic_outb(aic, DMACNTRL0, ENDMA|WRITE|B8MODE); aic_outsb(aic, DMADATA, scb->data_ptr, n); scb->data_ptr += n; scb->data_len -= n; aic_outb(aic, DMACNTRL0, dmacntrl0); } } for (;;) { /* wait until all bytes in the fifos are transmitted */ dmastat = aic_inb(aic, DMASTAT); sstat2 = aic_inb(aic, SSTAT2); if ((dmastat & DFIFOEMP) && (sstat2 & SEMPTY)) break; if (dmastat & INTSTAT) { /* adjust for untransmitted bytes */ n = aic_inb(aic, FIFOSTAT) + (sstat2 & 0xf); scb->data_ptr -= n; scb->data_len += n; /* clear the fifo */ aic_outb(aic, SXFRCTL0, CHEN|CLRCH); aic_outb(aic, DMACNTRL0, RSTFIFO); break; } } aic_outb(aic, SXFRCTL0, CHEN); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); } /* * Send the scsi command. */ static void aic_cmd(struct aic_softc *aic) { struct aic_scb *scb = aic->nexus; struct scsi_request_sense sense_cmd; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_cmd\n")); if (scb->flags & SCB_SENSE) { /* autosense request */ sense_cmd.opcode = REQUEST_SENSE; sense_cmd.byte2 = scb->lun << 5; sense_cmd.length = scb->ccb->csio.sense_len; sense_cmd.control = 0; sense_cmd.unused[0] = 0; sense_cmd.unused[1] = 0; scb->cmd_ptr = (u_int8_t *)&sense_cmd; scb->cmd_len = sizeof(sense_cmd); scb->data_ptr = (u_int8_t *)&scb->ccb->csio.sense_data; scb->data_len = scb->ccb->csio.sense_len; } aic_outb(aic, SIMODE1, ENSCSIRST|ENPHASEMIS|ENBUSFREE); aic_outb(aic, DMACNTRL0, ENDMA|WRITE); aic_outb(aic, SXFRCTL0, SCSIEN|DMAEN|CHEN); aic_outsw(aic, DMADATA, (u_int16_t *)scb->cmd_ptr, scb->cmd_len >> 1); while ((aic_inb(aic, SSTAT2) & SEMPTY) == 0 && (aic_inb(aic, DMASTAT) & INTSTAT) == 0) ; aic_outb(aic, SXFRCTL0, CHEN); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); } /* * Finish off a command. The caller is responsible to remove the ccb * from any queue. */ static void aic_done(struct aic_softc *aic, struct aic_scb *scb) { union ccb *ccb = scb->ccb; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("aic_done - ccb %p status %x resid %d\n", ccb, ccb->ccb_h.status, ccb->csio.resid)); callout_stop(&scb->timer); if ((scb->flags & SCB_DEVICE_RESET) != 0 && ccb->ccb_h.func_code != XPT_RESET_DEV) { struct cam_path *path; struct ccb_hdr *ccb_h; cam_status error; error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(aic->sim), scb->target, CAM_LUN_WILDCARD); if (error == CAM_REQ_CMP) { xpt_async(AC_SENT_BDR, path, NULL); xpt_free_path(path); } ccb_h = TAILQ_FIRST(&aic->pending_ccbs); while (ccb_h != NULL) { struct aic_scb *pending_scb; pending_scb = (struct aic_scb *)ccb_h->ccb_scb_ptr; if (ccb_h->target_id == scb->target) { ccb_h->status |= CAM_BDR_SENT; ccb_h = TAILQ_NEXT(ccb_h, sim_links.tqe); TAILQ_REMOVE(&aic->pending_ccbs, &pending_scb->ccb->ccb_h, sim_links.tqe); aic_done(aic, pending_scb); } else { callout_reset_sbt(&pending_scb->timer, SBT_1MS * ccb_h->timeout, 0, aic_timeout, pending_scb, 0); ccb_h = TAILQ_NEXT(ccb_h, sim_links.tqe); } } ccb_h = TAILQ_FIRST(&aic->nexus_ccbs); while (ccb_h != NULL) { struct aic_scb *nexus_scb; nexus_scb = (struct aic_scb *)ccb_h->ccb_scb_ptr; if (ccb_h->target_id == scb->target) { ccb_h->status |= CAM_BDR_SENT; ccb_h = TAILQ_NEXT(ccb_h, sim_links.tqe); TAILQ_REMOVE(&aic->nexus_ccbs, &nexus_scb->ccb->ccb_h, sim_links.tqe); aic_done(aic, nexus_scb); } else { callout_reset_sbt(&nexus_scb->timer, SBT_1MS * ccb_h->timeout, 0, aic_timeout, nexus_scb, 0); ccb_h = TAILQ_NEXT(ccb_h, sim_links.tqe); } } } if (aic->nexus == scb || scb->flags & SCB_DISCONNECTED) aic->tinfo[scb->target].lubusy &= ~(1 << scb->lun); if (aic->nexus == scb) { aic->nexus = NULL; } aic_free_scb(aic, scb); xpt_done(ccb); } static void aic_poll(struct cam_sim *sim) { aic_intr_locked(cam_sim_softc(sim)); } static void aic_timeout(void *arg) { struct aic_scb *scb = (struct aic_scb *)arg; union ccb *ccb = scb->ccb; struct aic_softc *aic = (struct aic_softc *)ccb->ccb_h.ccb_aic_ptr; mtx_assert(&aic->lock, MA_OWNED); xpt_print_path(ccb->ccb_h.path); printf("ccb %p - timed out", ccb); if (aic->nexus && aic->nexus != scb) printf(", nexus %p", aic->nexus->ccb); printf(", phase 0x%x, state %d\n", aic_inb(aic, SCSISIGI), aic->state); if ((scb->flags & SCB_ACTIVE) == 0) { xpt_print_path(ccb->ccb_h.path); printf("ccb %p - timed out already completed\n", ccb); return; } if ((scb->flags & SCB_DEVICE_RESET) == 0 && aic->nexus == scb) { struct ccb_hdr *ccb_h = &scb->ccb->ccb_h; struct aic_scb *pending_scb; if ((ccb_h->status & CAM_RELEASE_SIMQ) == 0) { xpt_freeze_simq(aic->sim, /*count*/1); ccb_h->status |= CAM_RELEASE_SIMQ; } TAILQ_FOREACH(ccb_h, &aic->pending_ccbs, sim_links.tqe) { pending_scb = ccb_h->ccb_scb_ptr; callout_stop(&pending_scb->timer); } TAILQ_FOREACH(ccb_h, &aic->nexus_ccbs, sim_links.tqe) { pending_scb = ccb_h->ccb_scb_ptr; callout_stop(&pending_scb->timer); } scb->flags |= SCB_DEVICE_RESET; callout_reset(&scb->timer, 5 * hz, aic_timeout, scb); aic_sched_msgout(aic, MSG_BUS_DEV_RESET); } else { if (aic->nexus == scb) { ccb->ccb_h.status |= CAM_CMD_TIMEOUT; aic_done(aic, scb); } aic_reset(aic, /*initiate_reset*/TRUE); } } void aic_intr(void *arg) { struct aic_softc *aic = (struct aic_softc *)arg; mtx_lock(&aic->lock); aic_intr_locked(aic); mtx_unlock(&aic->lock); } void aic_intr_locked(struct aic_softc *aic) { u_int8_t sstat0, sstat1; union ccb *ccb; struct aic_scb *scb; if (!(aic_inb(aic, DMASTAT) & INTSTAT)) return; aic_outb(aic, DMACNTRL0, 0); sstat0 = aic_inb(aic, SSTAT0); sstat1 = aic_inb(aic, SSTAT1); if ((sstat1 & SCSIRSTI) != 0) { /* a device-initiated bus reset */ aic_outb(aic, CLRSINT1, CLRSCSIRSTI); aic_reset(aic, /*initiate_reset*/FALSE); return; } if ((sstat1 & SCSIPERR) != 0) { aic_outb(aic, CLRSINT1, CLRSCSIPERR); aic_sched_msgout(aic, MSG_PARITY_ERROR); aic_outb(aic, DMACNTRL0, INTEN); return; } if (aic_inb(aic, SSTAT4)) { aic_outb(aic, CLRSERR, CLRSYNCERR|CLRFWERR|CLRFRERR); aic_reset(aic, /*initiate_reset*/TRUE); return; } if (aic->state <= AIC_SELECTING) { if ((sstat0 & SELDI) != 0) { aic_reselected(aic); aic_outb(aic, DMACNTRL0, INTEN); return; } if ((sstat0 & SELDO) != 0) { aic_selected(aic); aic_outb(aic, DMACNTRL0, INTEN); return; } if ((sstat1 & SELTO) != 0) { scb = aic->nexus; ccb = scb->ccb; ccb->ccb_h.status = CAM_SEL_TIMEOUT; aic_done(aic, scb); while ((sstat1 & BUSFREE) == 0) sstat1 = aic_inb(aic, SSTAT1); aic->flags |= AIC_BUSFREE_OK; } } if ((sstat1 & BUSFREE) != 0) { aic_outb(aic, SCSISEQ, 0); aic_outb(aic, CLRSINT0, sstat0); aic_outb(aic, CLRSINT1, sstat1); if ((scb = aic->nexus)) { if ((aic->flags & AIC_BUSFREE_OK) == 0) { ccb = scb->ccb; ccb->ccb_h.status = CAM_UNEXP_BUSFREE; aic_done(aic, scb); } else if (scb->flags & SCB_DEVICE_RESET) { ccb = scb->ccb; if (ccb->ccb_h.func_code == XPT_RESET_DEV) { xpt_async(AC_SENT_BDR, ccb->ccb_h.path, NULL); ccb->ccb_h.status |= CAM_REQ_CMP; } else ccb->ccb_h.status |= CAM_CMD_TIMEOUT; aic_done(aic, scb); } else if (scb->flags & SCB_SENSE) { /* autosense request */ aic->flags &= ~AIC_BUSFREE_OK; aic->tinfo[scb->target].lubusy &= ~(1 << scb->lun); aic_select(aic); aic_outb(aic, DMACNTRL0, INTEN); return; } } aic->flags &= ~AIC_BUSFREE_OK; aic->state = AIC_IDLE; aic_start(aic); aic_outb(aic, DMACNTRL0, INTEN); return; } if ((sstat1 & REQINIT) != 0) { u_int8_t phase = aic_inb(aic, SCSISIGI) & PH_MASK; aic_outb(aic, SCSISIGO, phase); aic_outb(aic, CLRSINT1, CLRPHASECHG); switch (phase) { case PH_MSGOUT: aic_msgout(aic); break; case PH_MSGIN: aic_msgin(aic); break; case PH_STAT: scb = aic->nexus; ccb = scb->ccb; aic_outb(aic, DMACNTRL0, 0); aic_outb(aic, SXFRCTL0, CHEN|SPIOEN); scb->status = aic_inb(aic, SCSIDAT); aic_outb(aic, SXFRCTL0, CHEN); break; case PH_CMD: aic_cmd(aic); break; case PH_DATAIN: aic_datain(aic); break; case PH_DATAOUT: aic_dataout(aic); break; } aic->prev_phase = phase; aic_outb(aic, DMACNTRL0, INTEN); return; } printf("aic_intr: unexpected intr sstat0 %x sstat1 %x\n", sstat0, sstat1); aic_outb(aic, DMACNTRL0, INTEN); } /* * Reset ourselves. */ static void aic_chip_reset(struct aic_softc *aic) { /* * Doc. recommends to clear these two registers before * operations commence */ aic_outb(aic, SCSITEST, 0); aic_outb(aic, TEST, 0); /* Reset SCSI-FIFO and abort any transfers */ aic_outb(aic, SXFRCTL0, CHEN|CLRCH|CLRSTCNT); /* Reset HOST-FIFO */ aic_outb(aic, DMACNTRL0, RSTFIFO); aic_outb(aic, DMACNTRL1, 0); /* Disable all selection features */ aic_outb(aic, SCSISEQ, 0); aic_outb(aic, SXFRCTL1, 0); /* Disable interrupts */ aic_outb(aic, SIMODE0, 0); aic_outb(aic, SIMODE1, 0); /* Clear interrupts */ aic_outb(aic, CLRSINT0, 0x7f); aic_outb(aic, CLRSINT1, 0xef); /* Disable synchronous transfers */ aic_outb(aic, SCSIRATE, 0); /* Haven't seen ant errors (yet) */ aic_outb(aic, CLRSERR, 0x07); /* Set our SCSI-ID */ aic_outb(aic, SCSIID, aic->initiator << OID_S); aic_outb(aic, BRSTCNTRL, EISA_BRST_TIM); } /* * Reset the SCSI bus */ static void aic_scsi_reset(struct aic_softc *aic) { aic_outb(aic, SCSISEQ, SCSIRSTO); DELAY(500); aic_outb(aic, SCSISEQ, 0); DELAY(50); } /* * Reset. Abort all pending commands. */ static void aic_reset(struct aic_softc *aic, int initiate_reset) { struct ccb_hdr *ccb_h; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_reset\n")); if (initiate_reset) aic_scsi_reset(aic); aic_chip_reset(aic); xpt_async(AC_BUS_RESET, aic->path, NULL); while ((ccb_h = TAILQ_FIRST(&aic->pending_ccbs)) != NULL) { TAILQ_REMOVE(&aic->pending_ccbs, ccb_h, sim_links.tqe); ccb_h->status |= CAM_SCSI_BUS_RESET; aic_done(aic, (struct aic_scb *)ccb_h->ccb_scb_ptr); } while ((ccb_h = TAILQ_FIRST(&aic->nexus_ccbs)) != NULL) { TAILQ_REMOVE(&aic->nexus_ccbs, ccb_h, sim_links.tqe); ccb_h->status |= CAM_SCSI_BUS_RESET; aic_done(aic, (struct aic_scb *)ccb_h->ccb_scb_ptr); } if (aic->nexus) { ccb_h = &aic->nexus->ccb->ccb_h; ccb_h->status |= CAM_SCSI_BUS_RESET; aic_done(aic, aic->nexus); } aic->state = AIC_IDLE; aic_outb(aic, DMACNTRL0, INTEN); } static char *aic_chip_names[] = { "AIC6260", "AIC6360", "AIC6370", "GM82C700", }; static struct { int type; char *idstring; } aic_chip_ids[] = { { AIC6360, IDSTRING_AIC6360 }, { AIC6370, IDSTRING_AIC6370 }, { GM82C700, IDSTRING_GM82C700 }, }; static void aic_init(struct aic_softc *aic) { struct aic_scb *scb; struct aic_tinfo *ti; u_int8_t porta, portb; char chip_id[33]; int i; TAILQ_INIT(&aic->pending_ccbs); TAILQ_INIT(&aic->nexus_ccbs); SLIST_INIT(&aic->free_scbs); aic->nexus = NULL; aic->state = AIC_IDLE; aic->prev_phase = -1; aic->flags = 0; aic_chip_reset(aic); aic_scsi_reset(aic); /* determine the chip type from its ID string */ aic->chip_type = AIC6260; aic_insb(aic, ID, chip_id, sizeof(chip_id) - 1); chip_id[sizeof(chip_id) - 1] = '\0'; for (i = 0; i < nitems(aic_chip_ids); i++) { if (!strcmp(chip_id, aic_chip_ids[i].idstring)) { aic->chip_type = aic_chip_ids[i].type; break; } } porta = aic_inb(aic, PORTA); portb = aic_inb(aic, PORTB); aic->initiator = PORTA_ID(porta); if (PORTA_PARITY(porta)) aic->flags |= AIC_PARITY_ENABLE; if (PORTB_DISC(portb)) aic->flags |= AIC_DISC_ENABLE; if (PORTB_DMA(portb)) aic->flags |= AIC_DMA_ENABLE; /* * We can do fast SCSI (10MHz clock rate) if bit 4 of portb * is set and we've got a 6360. The 6260 can only do standard * 5MHz SCSI. */ if (aic->chip_type > AIC6260 || aic_inb(aic, REV)) { if (PORTB_FSYNC(portb)) aic->flags |= AIC_FAST_ENABLE; aic->flags |= AIC_DWIO_ENABLE; } if (aic->flags & AIC_FAST_ENABLE) aic->max_period = AIC_FAST_SYNC_PERIOD; else aic->max_period = AIC_SYNC_PERIOD; aic->min_period = AIC_MIN_SYNC_PERIOD; for (i = 255; i >= 0; i--) { scb = &aic->scbs[i]; scb->tag = i; callout_init_mtx(&scb->timer, &aic->lock, 0); aic_free_scb(aic, scb); } for (i = 0; i < 8; i++) { if (i == aic->initiator) continue; ti = &aic->tinfo[i]; bzero(ti, sizeof(*ti)); ti->flags = TINFO_TAG_ENB; if (aic->flags & AIC_DISC_ENABLE) ti->flags |= TINFO_DISC_ENB; ti->user.period = aic->max_period; ti->user.offset = AIC_SYNC_OFFSET; ti->scsirate = 0; } aic_outb(aic, DMACNTRL0, INTEN); } int aic_probe(struct aic_softc *aic) { int i; /* Remove aic6360 from possible powerdown mode */ aic_outb(aic, DMACNTRL0, 0); #define STSIZE 16 aic_outb(aic, DMACNTRL1, 0); /* Reset stack pointer */ for (i = 0; i < STSIZE; i++) aic_outb(aic, STACK, i); /* See if we can pull out the same sequence */ aic_outb(aic, DMACNTRL1, 0); for (i = 0; i < STSIZE && aic_inb(aic, STACK) == i; i++) ; if (i != STSIZE) return (ENXIO); #undef STSIZE return (0); } int aic_attach(struct aic_softc *aic) { struct cam_devq *devq; /* * Create the device queue for our SIM. */ devq = cam_simq_alloc(256); if (devq == NULL) return (ENOMEM); /* * Construct our SIM entry */ aic->sim = cam_sim_alloc(aic_action, aic_poll, "aic", aic, device_get_unit(aic->dev), &aic->lock, 2, 256, devq); if (aic->sim == NULL) { cam_simq_free(devq); return (ENOMEM); } mtx_lock(&aic->lock); if (xpt_bus_register(aic->sim, aic->dev, 0) != CAM_SUCCESS) { cam_sim_free(aic->sim, /*free_devq*/TRUE); mtx_unlock(&aic->lock); return (ENXIO); } if (xpt_create_path(&aic->path, /*periph*/NULL, cam_sim_path(aic->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(aic->sim)); cam_sim_free(aic->sim, /*free_devq*/TRUE); mtx_unlock(&aic->lock); return (ENXIO); } aic_init(aic); device_printf(aic->dev, "%s", aic_chip_names[aic->chip_type]); if (aic->flags & AIC_DMA_ENABLE) printf(", dma"); if (aic->flags & AIC_DISC_ENABLE) printf(", disconnection"); if (aic->flags & AIC_PARITY_ENABLE) printf(", parity check"); if (aic->flags & AIC_FAST_ENABLE) printf(", fast SCSI"); printf("\n"); mtx_unlock(&aic->lock); + gone_in_dev(aic->dev, 12, "aic(4) driver"); return (0); } int aic_detach(struct aic_softc *aic) { struct aic_scb *scb; int i; mtx_lock(&aic->lock); xpt_async(AC_LOST_DEVICE, aic->path, NULL); xpt_free_path(aic->path); xpt_bus_deregister(cam_sim_path(aic->sim)); cam_sim_free(aic->sim, /*free_devq*/TRUE); mtx_unlock(&aic->lock); for (i = 255; i >= 0; i--) { scb = &aic->scbs[i]; callout_drain(&scb->timer); } return (0); } Index: head/sys/dev/buslogic/bt.c =================================================================== --- head/sys/dev/buslogic/bt.c (revision 328522) +++ head/sys/dev/buslogic/bt.c (revision 328523) @@ -1,2375 +1,2376 @@ /*- * Generic driver for the BusLogic MultiMaster SCSI host adapters * Product specific probe and attach routines can be found in: * sys/dev/buslogic/bt_isa.c BT-54X, BT-445 cards * sys/dev/buslogic/bt_mca.c BT-64X, SDC3211B, SDC3211F * sys/dev/buslogic/bt_pci.c BT-946, BT-948, BT-956, BT-958 cards * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1998, 1999 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Special thanks to Leonard N. Zubkoff for writing such a complete and * well documented Mylex/BusLogic MultiMaster driver for Linux. Support * in this driver for the wide range of MultiMaster controllers and * firmware revisions, with their otherwise undocumented quirks, would not * have been possible without his efforts. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* MailBox Management functions */ static __inline void btnextinbox(struct bt_softc *bt); static __inline void btnextoutbox(struct bt_softc *bt); static __inline void btnextinbox(struct bt_softc *bt) { if (bt->cur_inbox == bt->last_inbox) bt->cur_inbox = bt->in_boxes; else bt->cur_inbox++; } static __inline void btnextoutbox(struct bt_softc *bt) { if (bt->cur_outbox == bt->last_outbox) bt->cur_outbox = bt->out_boxes; else bt->cur_outbox++; } /* CCB Mangement functions */ static __inline u_int32_t btccbvtop(struct bt_softc *bt, struct bt_ccb *bccb); static __inline struct bt_ccb* btccbptov(struct bt_softc *bt, u_int32_t ccb_addr); static __inline u_int32_t btsensepaddr(struct bt_softc *bt, struct bt_ccb *bccb); static __inline struct scsi_sense_data* btsensevaddr(struct bt_softc *bt, struct bt_ccb *bccb); static __inline u_int32_t btccbvtop(struct bt_softc *bt, struct bt_ccb *bccb) { return (bt->bt_ccb_physbase + (u_int32_t)((caddr_t)bccb - (caddr_t)bt->bt_ccb_array)); } static __inline struct bt_ccb * btccbptov(struct bt_softc *bt, u_int32_t ccb_addr) { return (bt->bt_ccb_array + ((struct bt_ccb*)(uintptr_t)ccb_addr - (struct bt_ccb*)(uintptr_t)bt->bt_ccb_physbase)); } static __inline u_int32_t btsensepaddr(struct bt_softc *bt, struct bt_ccb *bccb) { u_int index; index = (u_int)(bccb - bt->bt_ccb_array); return (bt->sense_buffers_physbase + (index * sizeof(struct scsi_sense_data))); } static __inline struct scsi_sense_data * btsensevaddr(struct bt_softc *bt, struct bt_ccb *bccb) { u_int index; index = (u_int)(bccb - bt->bt_ccb_array); return (bt->sense_buffers + index); } static __inline struct bt_ccb* btgetccb(struct bt_softc *bt); static __inline void btfreeccb(struct bt_softc *bt, struct bt_ccb *bccb); static void btallocccbs(struct bt_softc *bt); static bus_dmamap_callback_t btexecuteccb; static void btdone(struct bt_softc *bt, struct bt_ccb *bccb, bt_mbi_comp_code_t comp_code); static void bt_intr_locked(struct bt_softc *bt); /* Host adapter command functions */ static int btreset(struct bt_softc* bt, int hard_reset); /* Initialization functions */ static int btinitmboxes(struct bt_softc *bt); static bus_dmamap_callback_t btmapmboxes; static bus_dmamap_callback_t btmapccbs; static bus_dmamap_callback_t btmapsgs; /* Transfer Negotiation Functions */ static void btfetchtransinfo(struct bt_softc *bt, struct ccb_trans_settings *cts); /* CAM SIM entry points */ #define ccb_bccb_ptr spriv_ptr0 #define ccb_bt_ptr spriv_ptr1 static void btaction(struct cam_sim *sim, union ccb *ccb); static void btpoll(struct cam_sim *sim); /* Our timeout handler */ static void bttimeout(void *arg); /* * XXX * Do our own re-probe protection until a configuration * manager can do it for us. This ensures that we don't * reprobe a card already found by the PCI probes. */ struct bt_isa_port bt_isa_ports[] = { { 0x130, 0, 4 }, { 0x134, 0, 5 }, { 0x230, 0, 2 }, { 0x234, 0, 3 }, { 0x330, 0, 0 }, { 0x334, 0, 1 } }; /* * I/O ports listed in the order enumerated by the * card for certain op codes. */ u_int16_t bt_board_ports[] = { 0x330, 0x334, 0x230, 0x234, 0x130, 0x134 }; /* Exported functions */ void bt_init_softc(device_t dev, struct resource *port, struct resource *irq, struct resource *drq) { struct bt_softc *bt = device_get_softc(dev); SLIST_INIT(&bt->free_bt_ccbs); LIST_INIT(&bt->pending_ccbs); SLIST_INIT(&bt->sg_maps); bt->dev = dev; bt->port = port; bt->irq = irq; bt->drq = drq; mtx_init(&bt->lock, "bt", NULL, MTX_DEF); } void bt_free_softc(device_t dev) { struct bt_softc *bt = device_get_softc(dev); switch (bt->init_level) { default: case 11: bus_dmamap_unload(bt->sense_dmat, bt->sense_dmamap); case 10: bus_dmamem_free(bt->sense_dmat, bt->sense_buffers, bt->sense_dmamap); case 9: bus_dma_tag_destroy(bt->sense_dmat); case 8: { struct sg_map_node *sg_map; while ((sg_map = SLIST_FIRST(&bt->sg_maps))!= NULL) { SLIST_REMOVE_HEAD(&bt->sg_maps, links); bus_dmamap_unload(bt->sg_dmat, sg_map->sg_dmamap); bus_dmamem_free(bt->sg_dmat, sg_map->sg_vaddr, sg_map->sg_dmamap); free(sg_map, M_DEVBUF); } bus_dma_tag_destroy(bt->sg_dmat); } case 7: bus_dmamap_unload(bt->ccb_dmat, bt->ccb_dmamap); /* FALLTHROUGH */ case 6: bus_dmamem_free(bt->ccb_dmat, bt->bt_ccb_array, bt->ccb_dmamap); /* FALLTHROUGH */ case 5: bus_dma_tag_destroy(bt->ccb_dmat); /* FALLTHROUGH */ case 4: bus_dmamap_unload(bt->mailbox_dmat, bt->mailbox_dmamap); /* FALLTHROUGH */ case 3: bus_dmamem_free(bt->mailbox_dmat, bt->in_boxes, bt->mailbox_dmamap); /* FALLTHROUGH */ case 2: bus_dma_tag_destroy(bt->buffer_dmat); /* FALLTHROUGH */ case 1: bus_dma_tag_destroy(bt->mailbox_dmat); /* FALLTHROUGH */ case 0: break; } mtx_destroy(&bt->lock); } int bt_port_probe(device_t dev, struct bt_probe_info *info) { struct bt_softc *bt = device_get_softc(dev); config_data_t config_data; int error; /* See if there is really a card present */ if (bt_probe(dev) || bt_fetch_adapter_info(dev)) return(1); /* * Determine our IRQ, and DMA settings and * export them to the configuration system. */ mtx_lock(&bt->lock); error = bt_cmd(bt, BOP_INQUIRE_CONFIG, NULL, /*parmlen*/0, (u_int8_t*)&config_data, sizeof(config_data), DEFAULT_CMD_TIMEOUT); mtx_unlock(&bt->lock); if (error != 0) { printf("bt_port_probe: Could not determine IRQ or DMA " "settings for adapter.\n"); return (1); } if (bt->model[0] == '5') { /* DMA settings only make sense for ISA cards */ switch (config_data.dma_chan) { case DMA_CHAN_5: info->drq = 5; break; case DMA_CHAN_6: info->drq = 6; break; case DMA_CHAN_7: info->drq = 7; break; default: printf("bt_port_probe: Invalid DMA setting " "detected for adapter.\n"); return (1); } } else { info->drq = -1; } switch (config_data.irq) { case IRQ_9: case IRQ_10: case IRQ_11: case IRQ_12: case IRQ_14: case IRQ_15: info->irq = ffs(config_data.irq) + 8; break; default: printf("bt_port_probe: Invalid IRQ setting %x" "detected for adapter.\n", config_data.irq); return (1); } return (0); } /* * Probe the adapter and verify that the card is a BusLogic. */ int bt_probe(device_t dev) { struct bt_softc *bt = device_get_softc(dev); esetup_info_data_t esetup_info; u_int status; u_int intstat; u_int geometry; int error; u_int8_t param; /* * See if the three I/O ports look reasonable. * Touch the minimal number of registers in the * failure case. */ status = bt_inb(bt, STATUS_REG); if ((status == 0) || (status & (DIAG_ACTIVE|CMD_REG_BUSY| STATUS_REG_RSVD|CMD_INVALID)) != 0) { if (bootverbose) device_printf(dev, "Failed Status Reg Test - %x\n", status); return (ENXIO); } intstat = bt_inb(bt, INTSTAT_REG); if ((intstat & INTSTAT_REG_RSVD) != 0) { device_printf(dev, "Failed Intstat Reg Test\n"); return (ENXIO); } geometry = bt_inb(bt, GEOMETRY_REG); if (geometry == 0xFF) { if (bootverbose) device_printf(dev, "Failed Geometry Reg Test\n"); return (ENXIO); } /* * Looking good so far. Final test is to reset the * adapter and attempt to fetch the extended setup * information. This should filter out all 1542 cards. */ mtx_lock(&bt->lock); if ((error = btreset(bt, /*hard_reset*/TRUE)) != 0) { mtx_unlock(&bt->lock); if (bootverbose) device_printf(dev, "Failed Reset\n"); return (ENXIO); } param = sizeof(esetup_info); error = bt_cmd(bt, BOP_INQUIRE_ESETUP_INFO, ¶m, /*parmlen*/1, (u_int8_t*)&esetup_info, sizeof(esetup_info), DEFAULT_CMD_TIMEOUT); mtx_unlock(&bt->lock); if (error != 0) { return (ENXIO); } return (0); } /* * Pull the boards setup information and record it in our softc. */ int bt_fetch_adapter_info(device_t dev) { struct bt_softc *bt = device_get_softc(dev); board_id_data_t board_id; esetup_info_data_t esetup_info; config_data_t config_data; int error; u_int8_t length_param; /* First record the firmware version */ mtx_lock(&bt->lock); error = bt_cmd(bt, BOP_INQUIRE_BOARD_ID, NULL, /*parmlen*/0, (u_int8_t*)&board_id, sizeof(board_id), DEFAULT_CMD_TIMEOUT); if (error != 0) { mtx_unlock(&bt->lock); device_printf(dev, "bt_fetch_adapter_info - Failed Get Board Info\n"); return (error); } bt->firmware_ver[0] = board_id.firmware_rev_major; bt->firmware_ver[1] = '.'; bt->firmware_ver[2] = board_id.firmware_rev_minor; bt->firmware_ver[3] = '\0'; /* * Depending on the firmware major and minor version, * we may be able to fetch additional minor version info. */ if (bt->firmware_ver[0] > '0') { error = bt_cmd(bt, BOP_INQUIRE_FW_VER_3DIG, NULL, /*parmlen*/0, (u_int8_t*)&bt->firmware_ver[3], 1, DEFAULT_CMD_TIMEOUT); if (error != 0) { mtx_unlock(&bt->lock); device_printf(dev, "bt_fetch_adapter_info - Failed Get " "Firmware 3rd Digit\n"); return (error); } if (bt->firmware_ver[3] == ' ') bt->firmware_ver[3] = '\0'; bt->firmware_ver[4] = '\0'; } if (strcmp(bt->firmware_ver, "3.3") >= 0) { error = bt_cmd(bt, BOP_INQUIRE_FW_VER_4DIG, NULL, /*parmlen*/0, (u_int8_t*)&bt->firmware_ver[4], 1, DEFAULT_CMD_TIMEOUT); if (error != 0) { mtx_unlock(&bt->lock); device_printf(dev, "bt_fetch_adapter_info - Failed Get " "Firmware 4th Digit\n"); return (error); } if (bt->firmware_ver[4] == ' ') bt->firmware_ver[4] = '\0'; bt->firmware_ver[5] = '\0'; } /* * Some boards do not handle the "recently documented" * Inquire Board Model Number command correctly or do not give * exact information. Use the Firmware and Extended Setup * information in these cases to come up with the right answer. * The major firmware revision number indicates: * * 5.xx BusLogic "W" Series Host Adapters: * BT-948/958/958D * 4.xx BusLogic "C" Series Host Adapters: * BT-946C/956C/956CD/747C/757C/757CD/445C/545C/540CF * 3.xx BusLogic "S" Series Host Adapters: * BT-747S/747D/757S/757D/445S/545S/542D * BT-542B/742A (revision H) * 2.xx BusLogic "A" Series Host Adapters: * BT-542B/742A (revision G and below) */ length_param = sizeof(esetup_info); error = bt_cmd(bt, BOP_INQUIRE_ESETUP_INFO, &length_param, /*parmlen*/1, (u_int8_t*)&esetup_info, sizeof(esetup_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { mtx_unlock(&bt->lock); return (error); } bt->bios_addr = esetup_info.bios_addr << 12; bt->mailbox_addrlimit = BUS_SPACE_MAXADDR; if (esetup_info.bus_type == 'A' && bt->firmware_ver[0] == '2') { snprintf(bt->model, sizeof(bt->model), "542B"); } else { ha_model_data_t model_data; int i; length_param = sizeof(model_data); error = bt_cmd(bt, BOP_INQUIRE_MODEL, &length_param, 1, (u_int8_t*)&model_data, sizeof(model_data), DEFAULT_CMD_TIMEOUT); if (error != 0) { mtx_unlock(&bt->lock); device_printf(dev, "bt_fetch_adapter_info - Failed Inquire " "Model Number\n"); return (error); } for (i = 0; i < sizeof(model_data.ascii_model); i++) { bt->model[i] = model_data.ascii_model[i]; if (bt->model[i] == ' ') break; } bt->model[i] = '\0'; } bt->level_trigger_ints = esetup_info.level_trigger_ints ? 1 : 0; /* SG element limits */ bt->max_sg = esetup_info.max_sg; /* Set feature flags */ bt->wide_bus = esetup_info.wide_bus; bt->diff_bus = esetup_info.diff_bus; bt->ultra_scsi = esetup_info.ultra_scsi; if ((bt->firmware_ver[0] == '5') || (bt->firmware_ver[0] == '4' && bt->wide_bus)) bt->extended_lun = TRUE; bt->strict_rr = (strcmp(bt->firmware_ver, "3.31") >= 0); bt->extended_trans = ((bt_inb(bt, GEOMETRY_REG) & EXTENDED_TRANSLATION) != 0); /* * Determine max CCB count and whether tagged queuing is * available based on controller type. Tagged queuing * only works on 'W' series adapters, 'C' series adapters * with firmware of rev 4.42 and higher, and 'S' series * adapters with firmware of rev 3.35 and higher. The * maximum CCB counts are as follows: * * 192 BT-948/958/958D * 100 BT-946C/956C/956CD/747C/757C/757CD/445C * 50 BT-545C/540CF * 30 BT-747S/747D/757S/757D/445S/545S/542D/542B/742A */ if (bt->firmware_ver[0] == '5') { bt->max_ccbs = 192; bt->tag_capable = TRUE; } else if (bt->firmware_ver[0] == '4') { if (bt->model[0] == '5') bt->max_ccbs = 50; else bt->max_ccbs = 100; bt->tag_capable = (strcmp(bt->firmware_ver, "4.22") >= 0); } else { bt->max_ccbs = 30; if (bt->firmware_ver[0] == '3' && (strcmp(bt->firmware_ver, "3.35") >= 0)) bt->tag_capable = TRUE; else bt->tag_capable = FALSE; } if (bt->tag_capable != FALSE) bt->tags_permitted = ALL_TARGETS; /* Determine Sync/Wide/Disc settings */ if (bt->firmware_ver[0] >= '4') { auto_scsi_data_t auto_scsi_data; fetch_lram_params_t fetch_lram_params; int error; /* * These settings are stored in the * AutoSCSI data in LRAM of 'W' and 'C' * adapters. */ fetch_lram_params.offset = AUTO_SCSI_BYTE_OFFSET; fetch_lram_params.response_len = sizeof(auto_scsi_data); error = bt_cmd(bt, BOP_FETCH_LRAM, (u_int8_t*)&fetch_lram_params, sizeof(fetch_lram_params), (u_int8_t*)&auto_scsi_data, sizeof(auto_scsi_data), DEFAULT_CMD_TIMEOUT); if (error != 0) { mtx_unlock(&bt->lock); device_printf(dev, "bt_fetch_adapter_info - Failed " "Get Auto SCSI Info\n"); return (error); } bt->disc_permitted = auto_scsi_data.low_disc_permitted | (auto_scsi_data.high_disc_permitted << 8); bt->sync_permitted = auto_scsi_data.low_sync_permitted | (auto_scsi_data.high_sync_permitted << 8); bt->fast_permitted = auto_scsi_data.low_fast_permitted | (auto_scsi_data.high_fast_permitted << 8); bt->ultra_permitted = auto_scsi_data.low_ultra_permitted | (auto_scsi_data.high_ultra_permitted << 8); bt->wide_permitted = auto_scsi_data.low_wide_permitted | (auto_scsi_data.high_wide_permitted << 8); if (bt->ultra_scsi == FALSE) bt->ultra_permitted = 0; if (bt->wide_bus == FALSE) bt->wide_permitted = 0; } else { /* * 'S' and 'A' series have this information in the setup * information structure. */ setup_data_t setup_info; length_param = sizeof(setup_info); error = bt_cmd(bt, BOP_INQUIRE_SETUP_INFO, &length_param, /*paramlen*/1, (u_int8_t*)&setup_info, sizeof(setup_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { mtx_unlock(&bt->lock); device_printf(dev, "bt_fetch_adapter_info - Failed " "Get Setup Info\n"); return (error); } if (setup_info.initiate_sync != 0) { bt->sync_permitted = ALL_TARGETS; if (bt->model[0] == '7') { if (esetup_info.sync_neg10MB != 0) bt->fast_permitted = ALL_TARGETS; if (strcmp(bt->model, "757") == 0) bt->wide_permitted = ALL_TARGETS; } } bt->disc_permitted = ALL_TARGETS; } /* We need as many mailboxes as we can have ccbs */ bt->num_boxes = bt->max_ccbs; /* Determine our SCSI ID */ error = bt_cmd(bt, BOP_INQUIRE_CONFIG, NULL, /*parmlen*/0, (u_int8_t*)&config_data, sizeof(config_data), DEFAULT_CMD_TIMEOUT); mtx_unlock(&bt->lock); if (error != 0) { device_printf(dev, "bt_fetch_adapter_info - Failed Get Config\n"); return (error); } bt->scsi_id = config_data.scsi_id; return (0); } /* * Start the board, ready for normal operation */ int bt_init(device_t dev) { struct bt_softc *bt = device_get_softc(dev); /* Announce the Adapter */ device_printf(dev, "BT-%s FW Rev. %s ", bt->model, bt->firmware_ver); if (bt->ultra_scsi != 0) printf("Ultra "); if (bt->wide_bus != 0) printf("Wide "); else printf("Narrow "); if (bt->diff_bus != 0) printf("Diff "); printf("SCSI Host Adapter, SCSI ID %d, %d CCBs\n", bt->scsi_id, bt->max_ccbs); /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. */ /* DMA tag for mapping buffers into device visible space. */ if (bus_dma_tag_create( /* parent */ bt->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ DFLTPHYS, /* nsegments */ BT_NSEG, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ BUS_DMA_ALLOCNOW, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &bt->lock, &bt->buffer_dmat) != 0) { goto error_exit; } bt->init_level++; /* DMA tag for our mailboxes */ if (bus_dma_tag_create( /* parent */ bt->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ bt->mailbox_addrlimit, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ bt->num_boxes * (sizeof(bt_mbox_in_t) + sizeof(bt_mbox_out_t)), /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &bt->mailbox_dmat) != 0) { goto error_exit; } bt->init_level++; /* Allocation for our mailboxes */ if (bus_dmamem_alloc(bt->mailbox_dmat, (void **)&bt->out_boxes, BUS_DMA_NOWAIT, &bt->mailbox_dmamap) != 0) { goto error_exit; } bt->init_level++; /* And permanently map them */ bus_dmamap_load(bt->mailbox_dmat, bt->mailbox_dmamap, bt->out_boxes, bt->num_boxes * (sizeof(bt_mbox_in_t) + sizeof(bt_mbox_out_t)), btmapmboxes, bt, /*flags*/0); bt->init_level++; bt->in_boxes = (bt_mbox_in_t *)&bt->out_boxes[bt->num_boxes]; mtx_lock(&bt->lock); btinitmboxes(bt); mtx_unlock(&bt->lock); /* DMA tag for our ccb structures */ if (bus_dma_tag_create( /* parent */ bt->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ bt->max_ccbs * sizeof(struct bt_ccb), /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &bt->ccb_dmat) != 0) { goto error_exit; } bt->init_level++; /* Allocation for our ccbs */ if (bus_dmamem_alloc(bt->ccb_dmat, (void **)&bt->bt_ccb_array, BUS_DMA_NOWAIT, &bt->ccb_dmamap) != 0) { goto error_exit; } bt->init_level++; /* And permanently map them */ bus_dmamap_load(bt->ccb_dmat, bt->ccb_dmamap, bt->bt_ccb_array, bt->max_ccbs * sizeof(struct bt_ccb), btmapccbs, bt, /*flags*/0); bt->init_level++; /* DMA tag for our S/G structures. We allocate in page sized chunks */ if (bus_dma_tag_create( /* parent */ bt->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ PAGE_SIZE, /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &bt->sg_dmat) != 0) { goto error_exit; } bt->init_level++; /* Perform initial CCB allocation */ bzero(bt->bt_ccb_array, bt->max_ccbs * sizeof(struct bt_ccb)); btallocccbs(bt); if (bt->num_ccbs == 0) { device_printf(dev, "bt_init - Unable to allocate initial ccbs\n"); goto error_exit; } /* * Note that we are going and return (to attach) */ return 0; error_exit: return (ENXIO); } int bt_attach(device_t dev) { struct bt_softc *bt = device_get_softc(dev); int tagged_dev_openings; struct cam_devq *devq; int error; /* * We reserve 1 ccb for error recovery, so don't * tell the XPT about it. */ if (bt->tag_capable != 0) tagged_dev_openings = bt->max_ccbs - 1; else tagged_dev_openings = 0; /* * Create the device queue for our SIM. */ devq = cam_simq_alloc(bt->max_ccbs - 1); if (devq == NULL) return (ENOMEM); /* * Construct our SIM entry */ bt->sim = cam_sim_alloc(btaction, btpoll, "bt", bt, device_get_unit(bt->dev), &bt->lock, 2, tagged_dev_openings, devq); if (bt->sim == NULL) { cam_simq_free(devq); return (ENOMEM); } mtx_lock(&bt->lock); if (xpt_bus_register(bt->sim, dev, 0) != CAM_SUCCESS) { cam_sim_free(bt->sim, /*free_devq*/TRUE); mtx_unlock(&bt->lock); return (ENXIO); } if (xpt_create_path(&bt->path, /*periph*/NULL, cam_sim_path(bt->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(bt->sim)); cam_sim_free(bt->sim, /*free_devq*/TRUE); mtx_unlock(&bt->lock); return (ENXIO); } mtx_unlock(&bt->lock); /* * Setup interrupt. */ error = bus_setup_intr(dev, bt->irq, INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE, NULL, bt_intr, bt, &bt->ih); if (error) { device_printf(dev, "bus_setup_intr() failed: %d\n", error); return (error); } + gone_in_dev(dev, 12, "bt(4) driver"); return (0); } int bt_check_probed_iop(u_int ioport) { u_int i; for (i = 0; i < BT_NUM_ISAPORTS; i++) { if (bt_isa_ports[i].addr == ioport) { if (bt_isa_ports[i].probed != 0) return (1); else { return (0); } } } return (1); } void bt_mark_probed_bio(isa_compat_io_t port) { if (port < BIO_DISABLED) bt_mark_probed_iop(bt_board_ports[port]); } void bt_mark_probed_iop(u_int ioport) { u_int i; for (i = 0; i < BT_NUM_ISAPORTS; i++) { if (ioport == bt_isa_ports[i].addr) { bt_isa_ports[i].probed = 1; break; } } } void bt_find_probe_range(int ioport, int *port_index, int *max_port_index) { if (ioport > 0) { int i; for (i = 0;i < BT_NUM_ISAPORTS; i++) if (ioport <= bt_isa_ports[i].addr) break; if ((i >= BT_NUM_ISAPORTS) || (ioport != bt_isa_ports[i].addr)) { printf( "bt_find_probe_range: Invalid baseport of 0x%x specified.\n" "bt_find_probe_range: Nearest valid baseport is 0x%x.\n" "bt_find_probe_range: Failing probe.\n", ioport, (i < BT_NUM_ISAPORTS) ? bt_isa_ports[i].addr : bt_isa_ports[BT_NUM_ISAPORTS - 1].addr); *port_index = *max_port_index = -1; return; } *port_index = *max_port_index = bt_isa_ports[i].bio; } else { *port_index = 0; *max_port_index = BT_NUM_ISAPORTS - 1; } } int bt_iop_from_bio(isa_compat_io_t bio_index) { if (bio_index < BT_NUM_ISAPORTS) return (bt_board_ports[bio_index]); return (-1); } static void btallocccbs(struct bt_softc *bt) { struct bt_ccb *next_ccb; struct sg_map_node *sg_map; bus_addr_t physaddr; bt_sg_t *segs; int newcount; int i; if (bt->num_ccbs >= bt->max_ccbs) /* Can't allocate any more */ return; next_ccb = &bt->bt_ccb_array[bt->num_ccbs]; sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); if (sg_map == NULL) goto error_exit; /* Allocate S/G space for the next batch of CCBS */ if (bus_dmamem_alloc(bt->sg_dmat, (void **)&sg_map->sg_vaddr, BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { free(sg_map, M_DEVBUF); goto error_exit; } SLIST_INSERT_HEAD(&bt->sg_maps, sg_map, links); bus_dmamap_load(bt->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, PAGE_SIZE, btmapsgs, bt, /*flags*/0); segs = sg_map->sg_vaddr; physaddr = sg_map->sg_physaddr; newcount = (PAGE_SIZE / (BT_NSEG * sizeof(bt_sg_t))); for (i = 0; bt->num_ccbs < bt->max_ccbs && i < newcount; i++) { int error; next_ccb->sg_list = segs; next_ccb->sg_list_phys = physaddr; next_ccb->flags = BCCB_FREE; callout_init_mtx(&next_ccb->timer, &bt->lock, 0); error = bus_dmamap_create(bt->buffer_dmat, /*flags*/0, &next_ccb->dmamap); if (error != 0) break; SLIST_INSERT_HEAD(&bt->free_bt_ccbs, next_ccb, links); segs += BT_NSEG; physaddr += (BT_NSEG * sizeof(bt_sg_t)); next_ccb++; bt->num_ccbs++; } /* Reserve a CCB for error recovery */ if (bt->recovery_bccb == NULL) { bt->recovery_bccb = SLIST_FIRST(&bt->free_bt_ccbs); SLIST_REMOVE_HEAD(&bt->free_bt_ccbs, links); } if (SLIST_FIRST(&bt->free_bt_ccbs) != NULL) return; error_exit: device_printf(bt->dev, "Can't malloc BCCBs\n"); } static __inline void btfreeccb(struct bt_softc *bt, struct bt_ccb *bccb) { if (!dumping) mtx_assert(&bt->lock, MA_OWNED); if ((bccb->flags & BCCB_ACTIVE) != 0) LIST_REMOVE(&bccb->ccb->ccb_h, sim_links.le); if (bt->resource_shortage != 0 && (bccb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { bccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; bt->resource_shortage = FALSE; } bccb->flags = BCCB_FREE; SLIST_INSERT_HEAD(&bt->free_bt_ccbs, bccb, links); bt->active_ccbs--; } static __inline struct bt_ccb* btgetccb(struct bt_softc *bt) { struct bt_ccb* bccb; if (!dumping) mtx_assert(&bt->lock, MA_OWNED); if ((bccb = SLIST_FIRST(&bt->free_bt_ccbs)) != NULL) { SLIST_REMOVE_HEAD(&bt->free_bt_ccbs, links); bt->active_ccbs++; } else { btallocccbs(bt); bccb = SLIST_FIRST(&bt->free_bt_ccbs); if (bccb != NULL) { SLIST_REMOVE_HEAD(&bt->free_bt_ccbs, links); bt->active_ccbs++; } } return (bccb); } static void btaction(struct cam_sim *sim, union ccb *ccb) { struct bt_softc *bt; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("btaction\n")); bt = (struct bt_softc *)cam_sim_softc(sim); mtx_assert(&bt->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ { struct bt_ccb *bccb; struct bt_hccb *hccb; /* * get a bccb to use. */ if ((bccb = btgetccb(bt)) == NULL) { bt->resource_shortage = TRUE; xpt_freeze_simq(bt->sim, /*count*/1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } hccb = &bccb->hccb; /* * So we can find the BCCB when an abort is requested */ bccb->ccb = ccb; ccb->ccb_h.ccb_bccb_ptr = bccb; ccb->ccb_h.ccb_bt_ptr = bt; /* * Put all the arguments for the xfer in the bccb */ hccb->target_id = ccb->ccb_h.target_id; hccb->target_lun = ccb->ccb_h.target_lun; hccb->btstat = 0; hccb->sdstat = 0; if (ccb->ccb_h.func_code == XPT_SCSI_IO) { struct ccb_scsiio *csio; struct ccb_hdr *ccbh; int error; csio = &ccb->csio; ccbh = &csio->ccb_h; hccb->opcode = INITIATOR_CCB_WRESID; hccb->datain = (ccb->ccb_h.flags & CAM_DIR_IN) ? 1 : 0; hccb->dataout =(ccb->ccb_h.flags & CAM_DIR_OUT) ? 1 : 0; hccb->cmd_len = csio->cdb_len; if (hccb->cmd_len > sizeof(hccb->scsi_cdb)) { ccb->ccb_h.status = CAM_REQ_INVALID; btfreeccb(bt, bccb); xpt_done(ccb); return; } hccb->sense_len = csio->sense_len; if ((ccbh->flags & CAM_TAG_ACTION_VALID) != 0 && ccb->csio.tag_action != CAM_TAG_ACTION_NONE) { hccb->tag_enable = TRUE; hccb->tag_type = (ccb->csio.tag_action & 0x3); } else { hccb->tag_enable = FALSE; hccb->tag_type = 0; } if ((ccbh->flags & CAM_CDB_POINTER) != 0) { if ((ccbh->flags & CAM_CDB_PHYS) == 0) { bcopy(csio->cdb_io.cdb_ptr, hccb->scsi_cdb, hccb->cmd_len); } else { /* I guess I could map it in... */ ccbh->status = CAM_REQ_INVALID; btfreeccb(bt, bccb); xpt_done(ccb); return; } } else { bcopy(csio->cdb_io.cdb_bytes, hccb->scsi_cdb, hccb->cmd_len); } /* If need be, bounce our sense buffer */ if (bt->sense_buffers != NULL) { hccb->sense_addr = btsensepaddr(bt, bccb); } else { hccb->sense_addr = vtophys(&csio->sense_data); } /* * If we have any data to send with this command, * map it into bus space. */ error = bus_dmamap_load_ccb( bt->buffer_dmat, bccb->dmamap, ccb, btexecuteccb, bccb, /*flags*/0); if (error == EINPROGRESS) { /* * So as to maintain ordering, freeze the * controller queue until our mapping is * returned. */ xpt_freeze_simq(bt->sim, 1); csio->ccb_h.status |= CAM_RELEASE_SIMQ; } } else { hccb->opcode = INITIATOR_BUS_DEV_RESET; /* No data transfer */ hccb->datain = TRUE; hccb->dataout = TRUE; hccb->cmd_len = 0; hccb->sense_len = 0; hccb->tag_enable = FALSE; hccb->tag_type = 0; btexecuteccb(bccb, NULL, 0, 0); } break; } case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: { /* XXX Implement */ ccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts; u_int target_mask; cts = &ccb->cts; target_mask = 0x01 << ccb->ccb_h.target_id; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if ((bt->disc_permitted & target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((bt->tags_permitted & target_mask) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; if ((bt->ultra_permitted & target_mask) != 0) spi->sync_period = 12; else if ((bt->fast_permitted & target_mask) != 0) spi->sync_period = 25; else if ((bt->sync_permitted & target_mask) != 0) spi->sync_period = 50; else spi->sync_period = 0; if (spi->sync_period != 0) spi->sync_offset = 15; spi->valid |= CTS_SPI_VALID_SYNC_RATE; spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->valid |= CTS_SPI_VALID_BUS_WIDTH; if ((bt->wide_permitted & target_mask) != 0) spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; else spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { scsi->valid = CTS_SCSI_VALID_TQ; spi->valid |= CTS_SPI_VALID_DISC; } else scsi->valid = 0; } else { btfetchtransinfo(bt, cts); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; u_int32_t size_mb; u_int32_t secs_per_cylinder; ccg = &ccb->ccg; size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); if (size_mb >= 1024 && (bt->extended_trans != 0)) { if (size_mb >= 2048) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 128; ccg->secs_per_track = 32; } } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { btreset(bt, /*hardreset*/TRUE); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; if (bt->tag_capable != 0) cpi->hba_inquiry |= PI_TAG_ABLE; if (bt->wide_bus != 0) cpi->hba_inquiry |= PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = bt->wide_bus ? 15 : 7; cpi->max_lun = 7; cpi->initiator_id = bt->scsi_id; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "BusLogic", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void btexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct bt_ccb *bccb; union ccb *ccb; struct bt_softc *bt; bccb = (struct bt_ccb *)arg; ccb = bccb->ccb; bt = (struct bt_softc *)ccb->ccb_h.ccb_bt_ptr; if (error != 0) { if (error != EFBIG) device_printf(bt->dev, "Unexepected error 0x%x returned from " "bus_dmamap_load\n", error); if (ccb->ccb_h.status == CAM_REQ_INPROG) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; } btfreeccb(bt, bccb); xpt_done(ccb); return; } if (nseg != 0) { bt_sg_t *sg; bus_dma_segment_t *end_seg; bus_dmasync_op_t op; end_seg = dm_segs + nseg; /* Copy the segments into our SG list */ sg = bccb->sg_list; while (dm_segs < end_seg) { sg->len = dm_segs->ds_len; sg->addr = dm_segs->ds_addr; sg++; dm_segs++; } if (nseg > 1) { bccb->hccb.opcode = INITIATOR_SG_CCB_WRESID; bccb->hccb.data_len = sizeof(bt_sg_t) * nseg; bccb->hccb.data_addr = bccb->sg_list_phys; } else { bccb->hccb.data_len = bccb->sg_list->len; bccb->hccb.data_addr = bccb->sg_list->addr; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_PREREAD; else op = BUS_DMASYNC_PREWRITE; bus_dmamap_sync(bt->buffer_dmat, bccb->dmamap, op); } else { bccb->hccb.opcode = INITIATOR_CCB; bccb->hccb.data_len = 0; bccb->hccb.data_addr = 0; } /* * Last time we need to check if this CCB needs to * be aborted. */ if (ccb->ccb_h.status != CAM_REQ_INPROG) { if (nseg != 0) bus_dmamap_unload(bt->buffer_dmat, bccb->dmamap); btfreeccb(bt, bccb); xpt_done(ccb); return; } bccb->flags = BCCB_ACTIVE; ccb->ccb_h.status |= CAM_SIM_QUEUED; LIST_INSERT_HEAD(&bt->pending_ccbs, &ccb->ccb_h, sim_links.le); callout_reset_sbt(&bccb->timer, SBT_1MS * ccb->ccb_h.timeout, 0, bttimeout, bccb, 0); /* Tell the adapter about this command */ bt->cur_outbox->ccb_addr = btccbvtop(bt, bccb); if (bt->cur_outbox->action_code != BMBO_FREE) { /* * We should never encounter a busy mailbox. * If we do, warn the user, and treat it as * a resource shortage. If the controller is * hung, one of the pending transactions will * timeout causing us to start recovery operations. */ device_printf(bt->dev, "Encountered busy mailbox with %d out of %d " "commands active!!!\n", bt->active_ccbs, bt->max_ccbs); callout_stop(&bccb->timer); if (nseg != 0) bus_dmamap_unload(bt->buffer_dmat, bccb->dmamap); btfreeccb(bt, bccb); bt->resource_shortage = TRUE; xpt_freeze_simq(bt->sim, /*count*/1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } bt->cur_outbox->action_code = BMBO_START; bt_outb(bt, COMMAND_REG, BOP_START_MBOX); btnextoutbox(bt); } void bt_intr(void *arg) { struct bt_softc *bt; bt = arg; mtx_lock(&bt->lock); bt_intr_locked(bt); mtx_unlock(&bt->lock); } void bt_intr_locked(struct bt_softc *bt) { u_int intstat; while (((intstat = bt_inb(bt, INTSTAT_REG)) & INTR_PENDING) != 0) { if ((intstat & CMD_COMPLETE) != 0) { bt->latched_status = bt_inb(bt, STATUS_REG); bt->command_cmp = TRUE; } bt_outb(bt, CONTROL_REG, RESET_INTR); if ((intstat & IMB_LOADED) != 0) { while (bt->cur_inbox->comp_code != BMBI_FREE) { btdone(bt, btccbptov(bt, bt->cur_inbox->ccb_addr), bt->cur_inbox->comp_code); bt->cur_inbox->comp_code = BMBI_FREE; btnextinbox(bt); } } if ((intstat & SCSI_BUS_RESET) != 0) { btreset(bt, /*hardreset*/FALSE); } } } static void btdone(struct bt_softc *bt, struct bt_ccb *bccb, bt_mbi_comp_code_t comp_code) { union ccb *ccb; struct ccb_scsiio *csio; ccb = bccb->ccb; csio = &bccb->ccb->csio; if ((bccb->flags & BCCB_ACTIVE) == 0) { device_printf(bt->dev, "btdone - Attempt to free non-active BCCB %p\n", (void *)bccb); return; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(bt->buffer_dmat, bccb->dmamap, op); bus_dmamap_unload(bt->buffer_dmat, bccb->dmamap); } if (bccb == bt->recovery_bccb) { /* * The recovery BCCB does not have a CCB associated * with it, so short circuit the normal error handling. * We now traverse our list of pending CCBs and process * any that were terminated by the recovery CCBs action. * We also reinstate timeouts for all remaining, pending, * CCBs. */ struct cam_path *path; struct ccb_hdr *ccb_h; cam_status error; /* Notify all clients that a BDR occurred */ error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(bt->sim), bccb->hccb.target_id, CAM_LUN_WILDCARD); if (error == CAM_REQ_CMP) { xpt_async(AC_SENT_BDR, path, NULL); xpt_free_path(path); } ccb_h = LIST_FIRST(&bt->pending_ccbs); while (ccb_h != NULL) { struct bt_ccb *pending_bccb; pending_bccb = (struct bt_ccb *)ccb_h->ccb_bccb_ptr; if (pending_bccb->hccb.target_id == bccb->hccb.target_id) { pending_bccb->hccb.btstat = BTSTAT_HA_BDR; ccb_h = LIST_NEXT(ccb_h, sim_links.le); btdone(bt, pending_bccb, BMBI_ERROR); } else { callout_reset_sbt(&pending_bccb->timer, SBT_1MS * ccb_h->timeout, 0, bttimeout, pending_bccb, 0); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } } device_printf(bt->dev, "No longer in timeout\n"); return; } callout_stop(&bccb->timer); switch (comp_code) { case BMBI_FREE: device_printf(bt->dev, "btdone - CCB completed with free status!\n"); break; case BMBI_NOT_FOUND: device_printf(bt->dev, "btdone - CCB Abort failed to find CCB\n"); break; case BMBI_ABORT: case BMBI_ERROR: if (bootverbose) { printf("bt: ccb %p - error %x occurred. " "btstat = %x, sdstat = %x\n", (void *)bccb, comp_code, bccb->hccb.btstat, bccb->hccb.sdstat); } /* An error occurred */ switch(bccb->hccb.btstat) { case BTSTAT_DATARUN_ERROR: if (bccb->hccb.data_len == 0) { /* * At least firmware 4.22, does this * for a QUEUE FULL condition. */ bccb->hccb.sdstat = SCSI_STATUS_QUEUE_FULL; } else if (bccb->hccb.data_len < 0) { csio->ccb_h.status = CAM_DATA_RUN_ERR; break; } /* FALLTHROUGH */ case BTSTAT_NOERROR: case BTSTAT_LINKED_CMD_COMPLETE: case BTSTAT_LINKED_CMD_FLAG_COMPLETE: case BTSTAT_DATAUNDERUN_ERROR: csio->scsi_status = bccb->hccb.sdstat; csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; switch(csio->scsi_status) { case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_CMD_TERMINATED: csio->ccb_h.status |= CAM_AUTOSNS_VALID; /* Bounce sense back if necessary */ if (bt->sense_buffers != NULL) { csio->sense_data = *btsensevaddr(bt, bccb); } break; default: break; case SCSI_STATUS_OK: csio->ccb_h.status = CAM_REQ_CMP; break; } csio->resid = bccb->hccb.data_len; break; case BTSTAT_SELTIMEOUT: csio->ccb_h.status = CAM_SEL_TIMEOUT; break; case BTSTAT_UNEXPECTED_BUSFREE: csio->ccb_h.status = CAM_UNEXP_BUSFREE; break; case BTSTAT_INVALID_PHASE: csio->ccb_h.status = CAM_SEQUENCE_FAIL; break; case BTSTAT_INVALID_ACTION_CODE: panic("%s: Inavlid Action code", bt_name(bt)); break; case BTSTAT_INVALID_OPCODE: panic("%s: Inavlid CCB Opcode code", bt_name(bt)); break; case BTSTAT_LINKED_CCB_LUN_MISMATCH: /* We don't even support linked commands... */ panic("%s: Linked CCB Lun Mismatch", bt_name(bt)); break; case BTSTAT_INVALID_CCB_OR_SG_PARAM: panic("%s: Invalid CCB or SG list", bt_name(bt)); break; case BTSTAT_AUTOSENSE_FAILED: csio->ccb_h.status = CAM_AUTOSENSE_FAIL; break; case BTSTAT_TAGGED_MSG_REJECTED: { struct ccb_trans_settings neg; struct ccb_trans_settings_scsi *scsi = &neg.proto_specific.scsi; neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = 0; xpt_print_path(csio->ccb_h.path); printf("refuses tagged commands. Performing " "non-tagged I/O\n"); xpt_setup_ccb(&neg.ccb_h, csio->ccb_h.path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, csio->ccb_h.path, &neg); bt->tags_permitted &= ~(0x01 << csio->ccb_h.target_id); csio->ccb_h.status = CAM_MSG_REJECT_REC; break; } case BTSTAT_UNSUPPORTED_MSG_RECEIVED: /* * XXX You would think that this is * a recoverable error... Hmmm. */ csio->ccb_h.status = CAM_REQ_CMP_ERR; break; case BTSTAT_HA_SOFTWARE_ERROR: case BTSTAT_HA_WATCHDOG_ERROR: case BTSTAT_HARDWARE_FAILURE: /* Hardware reset ??? Can we recover ??? */ csio->ccb_h.status = CAM_NO_HBA; break; case BTSTAT_TARGET_IGNORED_ATN: case BTSTAT_OTHER_SCSI_BUS_RESET: case BTSTAT_HA_SCSI_BUS_RESET: if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_CMD_TIMEOUT) csio->ccb_h.status = CAM_SCSI_BUS_RESET; break; case BTSTAT_HA_BDR: if ((bccb->flags & BCCB_DEVICE_RESET) == 0) csio->ccb_h.status = CAM_BDR_SENT; else csio->ccb_h.status = CAM_CMD_TIMEOUT; break; case BTSTAT_INVALID_RECONNECT: case BTSTAT_ABORT_QUEUE_GENERATED: csio->ccb_h.status = CAM_REQ_TERMIO; break; case BTSTAT_SCSI_PERROR_DETECTED: csio->ccb_h.status = CAM_UNCOR_PARITY; break; } if (csio->ccb_h.status != CAM_REQ_CMP) { xpt_freeze_devq(csio->ccb_h.path, /*count*/1); csio->ccb_h.status |= CAM_DEV_QFRZN; } if ((bccb->flags & BCCB_RELEASE_SIMQ) != 0) ccb->ccb_h.status |= CAM_RELEASE_SIMQ; btfreeccb(bt, bccb); xpt_done(ccb); break; case BMBI_OK: /* All completed without incident */ ccb->ccb_h.status |= CAM_REQ_CMP; if ((bccb->flags & BCCB_RELEASE_SIMQ) != 0) ccb->ccb_h.status |= CAM_RELEASE_SIMQ; btfreeccb(bt, bccb); xpt_done(ccb); break; } } static int btreset(struct bt_softc* bt, int hard_reset) { struct ccb_hdr *ccb_h; u_int status; u_int timeout; u_int8_t reset_type; if (hard_reset != 0) reset_type = HARD_RESET; else reset_type = SOFT_RESET; bt_outb(bt, CONTROL_REG, reset_type); /* Wait 5sec. for Diagnostic start */ timeout = 5 * 10000; while (--timeout) { status = bt_inb(bt, STATUS_REG); if ((status & DIAG_ACTIVE) != 0) break; DELAY(100); } if (timeout == 0) { if (bootverbose) device_printf(bt->dev, "btreset - Diagnostic Active failed to " "assert. status = 0x%x\n", status); return (ETIMEDOUT); } /* Wait 10sec. for Diagnostic end */ timeout = 10 * 10000; while (--timeout) { status = bt_inb(bt, STATUS_REG); if ((status & DIAG_ACTIVE) == 0) break; DELAY(100); } if (timeout == 0) { panic("%s: btreset - Diagnostic Active failed to drop. " "status = 0x%x\n", bt_name(bt), status); return (ETIMEDOUT); } /* Wait for the host adapter to become ready or report a failure */ timeout = 10000; while (--timeout) { status = bt_inb(bt, STATUS_REG); if ((status & (DIAG_FAIL|HA_READY|DATAIN_REG_READY)) != 0) break; DELAY(100); } if (timeout == 0) { device_printf(bt->dev, "btreset - Host adapter failed to come ready. " "status = 0x%x\n", status); return (ETIMEDOUT); } /* If the diagnostics failed, tell the user */ if ((status & DIAG_FAIL) != 0 || (status & HA_READY) == 0) { device_printf(bt->dev, "btreset - Adapter failed diagnostics\n"); if ((status & DATAIN_REG_READY) != 0) device_printf(bt->dev, "btreset - Host Adapter Error code = 0x%x\n", bt_inb(bt, DATAIN_REG)); return (ENXIO); } /* If we've allocated mailboxes, initialize them */ if (bt->init_level > 4) btinitmboxes(bt); /* If we've attached to the XPT, tell it about the event */ if (bt->path != NULL) xpt_async(AC_BUS_RESET, bt->path, NULL); /* * Perform completion processing for all outstanding CCBs. */ while ((ccb_h = LIST_FIRST(&bt->pending_ccbs)) != NULL) { struct bt_ccb *pending_bccb; pending_bccb = (struct bt_ccb *)ccb_h->ccb_bccb_ptr; pending_bccb->hccb.btstat = BTSTAT_HA_SCSI_BUS_RESET; btdone(bt, pending_bccb, BMBI_ERROR); } return (0); } /* * Send a command to the adapter. */ int bt_cmd(struct bt_softc *bt, bt_op_t opcode, u_int8_t *params, u_int param_len, u_int8_t *reply_data, u_int reply_len, u_int cmd_timeout) { u_int timeout; u_int status; u_int saved_status; u_int intstat; u_int reply_buf_size; int cmd_complete; int error; /* No data returned to start */ reply_buf_size = reply_len; reply_len = 0; intstat = 0; cmd_complete = 0; saved_status = 0; error = 0; bt->command_cmp = 0; /* * Wait up to 10 sec. for the adapter to become * ready to accept commands. */ timeout = 100000; while (--timeout) { status = bt_inb(bt, STATUS_REG); if ((status & HA_READY) != 0 && (status & CMD_REG_BUSY) == 0) break; /* * Throw away any pending data which may be * left over from earlier commands that we * timedout on. */ if ((status & DATAIN_REG_READY) != 0) (void)bt_inb(bt, DATAIN_REG); DELAY(100); } if (timeout == 0) { device_printf(bt->dev, "bt_cmd: Timeout waiting for adapter ready, " "status = 0x%x\n", status); return (ETIMEDOUT); } /* * Send the opcode followed by any necessary parameter bytes. */ bt_outb(bt, COMMAND_REG, opcode); /* * Wait for up to 1sec for each byte of the * parameter list sent to be sent. */ timeout = 10000; while (param_len && --timeout) { DELAY(100); status = bt_inb(bt, STATUS_REG); intstat = bt_inb(bt, INTSTAT_REG); if ((intstat & (INTR_PENDING|CMD_COMPLETE)) == (INTR_PENDING|CMD_COMPLETE)) { saved_status = status; cmd_complete = 1; break; } if (bt->command_cmp != 0) { saved_status = bt->latched_status; cmd_complete = 1; break; } if ((status & DATAIN_REG_READY) != 0) break; if ((status & CMD_REG_BUSY) == 0) { bt_outb(bt, COMMAND_REG, *params++); param_len--; timeout = 10000; } } if (timeout == 0) { device_printf(bt->dev, "bt_cmd: Timeout sending parameters, " "status = 0x%x\n", status); cmd_complete = 1; saved_status = status; error = ETIMEDOUT; } /* * Wait for the command to complete. */ while (cmd_complete == 0 && --cmd_timeout) { status = bt_inb(bt, STATUS_REG); intstat = bt_inb(bt, INTSTAT_REG); /* * It may be that this command was issued with * controller interrupts disabled. We'll never * get to our command if an incoming mailbox * interrupt is pending, so take care of completed * mailbox commands by calling our interrupt handler. */ if ((intstat & (INTR_PENDING|IMB_LOADED)) == (INTR_PENDING|IMB_LOADED)) bt_intr_locked(bt); if (bt->command_cmp != 0) { /* * Our interrupt handler saw CMD_COMPLETE * status before we did. */ cmd_complete = 1; saved_status = bt->latched_status; } else if ((intstat & (INTR_PENDING|CMD_COMPLETE)) == (INTR_PENDING|CMD_COMPLETE)) { /* * Our poll (in case interrupts are blocked) * saw the CMD_COMPLETE interrupt. */ cmd_complete = 1; saved_status = status; } else if (opcode == BOP_MODIFY_IO_ADDR && (status & CMD_REG_BUSY) == 0) { /* * The BOP_MODIFY_IO_ADDR does not issue a CMD_COMPLETE, * but it should update the status register. So, we * consider this command complete when the CMD_REG_BUSY * status clears. */ saved_status = status; cmd_complete = 1; } else if ((status & DATAIN_REG_READY) != 0) { u_int8_t data; data = bt_inb(bt, DATAIN_REG); if (reply_len < reply_buf_size) { *reply_data++ = data; } else { device_printf(bt->dev, "bt_cmd - Discarded reply data byte " "for opcode 0x%x\n", opcode); } /* * Reset timeout to ensure at least a second * between response bytes. */ cmd_timeout = MAX(cmd_timeout, 10000); reply_len++; } else if ((opcode == BOP_FETCH_LRAM) && (status & HA_READY) != 0) { saved_status = status; cmd_complete = 1; } DELAY(100); } if (cmd_timeout == 0) { device_printf(bt->dev, "bt_cmd: Timeout waiting for command (%x) " "to complete.\n", opcode); device_printf(bt->dev, "status = 0x%x, intstat = 0x%x, " "rlen %d\n", status, intstat, reply_len); error = (ETIMEDOUT); } /* * Clear any pending interrupts. */ bt_intr_locked(bt); if (error != 0) return (error); /* * If the command was rejected by the controller, tell the caller. */ if ((saved_status & CMD_INVALID) != 0) { /* * Some early adapters may not recover properly from * an invalid command. If it appears that the controller * has wedged (i.e. status was not cleared by our interrupt * reset above), perform a soft reset. */ if (bootverbose) device_printf(bt->dev, "Invalid Command 0x%x\n", opcode); DELAY(1000); status = bt_inb(bt, STATUS_REG); if ((status & (CMD_INVALID|STATUS_REG_RSVD|DATAIN_REG_READY| CMD_REG_BUSY|DIAG_FAIL|DIAG_ACTIVE)) != 0 || (status & (HA_READY|INIT_REQUIRED)) != (HA_READY|INIT_REQUIRED)) { btreset(bt, /*hard_reset*/FALSE); } return (EINVAL); } if (param_len > 0) { /* The controller did not accept the full argument list */ return (E2BIG); } if (reply_len != reply_buf_size) { /* Too much or too little data received */ return (EMSGSIZE); } /* We were successful */ return (0); } static int btinitmboxes(struct bt_softc *bt) { init_32b_mbox_params_t init_mbox; int error; bzero(bt->in_boxes, sizeof(bt_mbox_in_t) * bt->num_boxes); bzero(bt->out_boxes, sizeof(bt_mbox_out_t) * bt->num_boxes); bt->cur_inbox = bt->in_boxes; bt->last_inbox = bt->in_boxes + bt->num_boxes - 1; bt->cur_outbox = bt->out_boxes; bt->last_outbox = bt->out_boxes + bt->num_boxes - 1; /* Tell the adapter about them */ init_mbox.num_boxes = bt->num_boxes; init_mbox.base_addr[0] = bt->mailbox_physbase & 0xFF; init_mbox.base_addr[1] = (bt->mailbox_physbase >> 8) & 0xFF; init_mbox.base_addr[2] = (bt->mailbox_physbase >> 16) & 0xFF; init_mbox.base_addr[3] = (bt->mailbox_physbase >> 24) & 0xFF; error = bt_cmd(bt, BOP_INITIALIZE_32BMBOX, (u_int8_t *)&init_mbox, /*parmlen*/sizeof(init_mbox), /*reply_buf*/NULL, /*reply_len*/0, DEFAULT_CMD_TIMEOUT); if (error != 0) printf("btinitmboxes: Initialization command failed\n"); else if (bt->strict_rr != 0) { /* * If the controller supports * strict round robin mode, * enable it */ u_int8_t param; param = 0; error = bt_cmd(bt, BOP_ENABLE_STRICT_RR, ¶m, 1, /*reply_buf*/NULL, /*reply_len*/0, DEFAULT_CMD_TIMEOUT); if (error != 0) { printf("btinitmboxes: Unable to enable strict RR\n"); error = 0; } else if (bootverbose) { device_printf(bt->dev, "Using Strict Round Robin Mailbox Mode\n"); } } return (error); } /* * Update the XPT's idea of the negotiated transfer * parameters for a particular target. */ static void btfetchtransinfo(struct bt_softc *bt, struct ccb_trans_settings *cts) { setup_data_t setup_info; u_int target; u_int targ_offset; u_int targ_mask; u_int sync_period; u_int sync_offset; u_int bus_width; int error; u_int8_t param; targ_syncinfo_t sync_info; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; spi->valid = 0; scsi->valid = 0; target = cts->ccb_h.target_id; targ_offset = (target & 0x7); targ_mask = (0x01 << targ_offset); /* * Inquire Setup Information. This command retreives the * Wide negotiation status for recent adapters as well as * the sync info for older models. */ param = sizeof(setup_info); error = bt_cmd(bt, BOP_INQUIRE_SETUP_INFO, ¶m, /*paramlen*/1, (u_int8_t*)&setup_info, sizeof(setup_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(bt->dev, "btfetchtransinfo - Inquire Setup Info Failed %x\n", error); return; } sync_info = (target < 8) ? setup_info.low_syncinfo[targ_offset] : setup_info.high_syncinfo[targ_offset]; if (sync_info.sync == 0) sync_offset = 0; else sync_offset = sync_info.offset; bus_width = MSG_EXT_WDTR_BUS_8_BIT; if (strcmp(bt->firmware_ver, "5.06L") >= 0) { u_int wide_active; wide_active = (target < 8) ? (setup_info.low_wide_active & targ_mask) : (setup_info.high_wide_active & targ_mask); if (wide_active) bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else if ((bt->wide_permitted & targ_mask) != 0) { struct ccb_getdev cgd; /* * Prior to rev 5.06L, wide status isn't provided, * so we "guess" that wide transfers are in effect * if the user settings allow for wide and the inquiry * data for the device indicates that it can handle * wide transfers. */ xpt_setup_ccb(&cgd.ccb_h, cts->ccb_h.path, /*priority*/1); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if ((cgd.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (cgd.inq_data.flags & SID_WBus16) != 0) bus_width = MSG_EXT_WDTR_BUS_16_BIT; } if (bt->firmware_ver[0] >= '3') { /* * For adapters that can do fast or ultra speeds, * use the more exact Target Sync Information command. */ target_sync_info_data_t sync_info; param = sizeof(sync_info); error = bt_cmd(bt, BOP_TARG_SYNC_INFO, ¶m, /*paramlen*/1, (u_int8_t*)&sync_info, sizeof(sync_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(bt->dev, "btfetchtransinfo - Inquire Sync " "Info Failed 0x%x\n", error); return; } sync_period = sync_info.sync_rate[target] * 100; } else { sync_period = 2000 + (500 * sync_info.period); } cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; spi->sync_period = sync_period; spi->valid |= CTS_SPI_VALID_SYNC_RATE; spi->sync_offset = sync_offset; spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->valid |= CTS_SPI_VALID_BUS_WIDTH; spi->bus_width = bus_width; if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { scsi->valid = CTS_SCSI_VALID_TQ; spi->valid |= CTS_SPI_VALID_DISC; } else scsi->valid = 0; xpt_async(AC_TRANSFER_NEG, cts->ccb_h.path, cts); } static void btmapmboxes(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bt_softc* bt; bt = (struct bt_softc*)arg; bt->mailbox_physbase = segs->ds_addr; } static void btmapccbs(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bt_softc* bt; bt = (struct bt_softc*)arg; bt->bt_ccb_physbase = segs->ds_addr; } static void btmapsgs(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bt_softc* bt; bt = (struct bt_softc*)arg; SLIST_FIRST(&bt->sg_maps)->sg_physaddr = segs->ds_addr; } static void btpoll(struct cam_sim *sim) { bt_intr_locked(cam_sim_softc(sim)); } void bttimeout(void *arg) { struct bt_ccb *bccb; union ccb *ccb; struct bt_softc *bt; bccb = (struct bt_ccb *)arg; ccb = bccb->ccb; bt = (struct bt_softc *)ccb->ccb_h.ccb_bt_ptr; mtx_assert(&bt->lock, MA_OWNED); xpt_print_path(ccb->ccb_h.path); printf("CCB %p - timed out\n", (void *)bccb); if ((bccb->flags & BCCB_ACTIVE) == 0) { xpt_print_path(ccb->ccb_h.path); printf("CCB %p - timed out CCB already completed\n", (void *)bccb); return; } /* * In order to simplify the recovery process, we ask the XPT * layer to halt the queue of new transactions and we traverse * the list of pending CCBs and remove their timeouts. This * means that the driver attempts to clear only one error * condition at a time. In general, timeouts that occur * close together are related anyway, so there is no benefit * in attempting to handle errors in parallel. Timeouts will * be reinstated when the recovery process ends. */ if ((bccb->flags & BCCB_DEVICE_RESET) == 0) { struct ccb_hdr *ccb_h; if ((bccb->flags & BCCB_RELEASE_SIMQ) == 0) { xpt_freeze_simq(bt->sim, /*count*/1); bccb->flags |= BCCB_RELEASE_SIMQ; } ccb_h = LIST_FIRST(&bt->pending_ccbs); while (ccb_h != NULL) { struct bt_ccb *pending_bccb; pending_bccb = (struct bt_ccb *)ccb_h->ccb_bccb_ptr; callout_stop(&pending_bccb->timer); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } } if ((bccb->flags & BCCB_DEVICE_RESET) != 0 || bt->cur_outbox->action_code != BMBO_FREE || ((bccb->hccb.tag_enable == TRUE) && (bt->firmware_ver[0] < '5'))) { /* * Try a full host adapter/SCSI bus reset. * We do this only if we have already attempted * to clear the condition with a BDR, or we cannot * attempt a BDR for lack of mailbox resources * or because of faulty firmware. It turns out * that firmware versions prior to 5.xx treat BDRs * as untagged commands that cannot be sent until * all outstanding tagged commands have been processed. * This makes it somewhat difficult to use a BDR to * clear up a problem with an uncompleted tagged command. */ ccb->ccb_h.status = CAM_CMD_TIMEOUT; btreset(bt, /*hardreset*/TRUE); device_printf(bt->dev, "No longer in timeout\n"); } else { /* * Send a Bus Device Reset message: * The target that is holding up the bus may not * be the same as the one that triggered this timeout * (different commands have different timeout lengths), * but we have no way of determining this from our * timeout handler. Our strategy here is to queue a * BDR message to the target of the timed out command. * If this fails, we'll get another timeout 2 seconds * later which will attempt a bus reset. */ bccb->flags |= BCCB_DEVICE_RESET; callout_reset(&bccb->timer, 2 * hz, bttimeout, bccb); bt->recovery_bccb->hccb.opcode = INITIATOR_BUS_DEV_RESET; /* No Data Transfer */ bt->recovery_bccb->hccb.datain = TRUE; bt->recovery_bccb->hccb.dataout = TRUE; bt->recovery_bccb->hccb.btstat = 0; bt->recovery_bccb->hccb.sdstat = 0; bt->recovery_bccb->hccb.target_id = ccb->ccb_h.target_id; /* Tell the adapter about this command */ bt->cur_outbox->ccb_addr = btccbvtop(bt, bt->recovery_bccb); bt->cur_outbox->action_code = BMBO_START; bt_outb(bt, COMMAND_REG, BOP_START_MBOX); btnextoutbox(bt); } } MODULE_VERSION(bt, 1); MODULE_DEPEND(bt, cam, 1, 1, 1); Index: head/sys/dev/dpt/dpt_pci.c =================================================================== --- head/sys/dev/dpt/dpt_pci.c (revision 328522) +++ head/sys/dev/dpt/dpt_pci.c (revision 328523) @@ -1,188 +1,189 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2000 Matthew N. Dodd * All rights reserved. * * Copyright (c) 1997 Simon Shapiro * All Rights Reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DPT_VENDOR_ID 0x1044 #define DPT_DEVICE_ID 0xa400 #define DPT_PCI_IOADDR PCIR_BAR(0) /* I/O Address */ #define DPT_PCI_MEMADDR PCIR_BAR(1) /* Mem I/O Address */ #define ISA_PRIMARY_WD_ADDRESS 0x1f8 static int dpt_pci_probe (device_t); static int dpt_pci_attach (device_t); static int dpt_pci_probe (device_t dev) { if ((pci_get_vendor(dev) == DPT_VENDOR_ID) && (pci_get_device(dev) == DPT_DEVICE_ID)) { device_set_desc(dev, "DPT Caching SCSI RAID Controller"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int dpt_pci_attach (device_t dev) { dpt_softc_t * dpt; int error = 0; dpt = device_get_softc(dev); dpt->dev = dev; dpt_alloc(dev); #ifdef DPT_ALLOW_MMIO dpt->io_rid = DPT_PCI_MEMADDR; dpt->io_type = SYS_RES_MEMORY; dpt->io_res = bus_alloc_resource_any(dev, dpt->io_type, &dpt->io_rid, RF_ACTIVE); #endif if (dpt->io_res == NULL) { dpt->io_rid = DPT_PCI_IOADDR; dpt->io_type = SYS_RES_IOPORT; dpt->io_res = bus_alloc_resource_any(dev, dpt->io_type, &dpt->io_rid, RF_ACTIVE); } if (dpt->io_res == NULL) { device_printf(dev, "can't allocate register resources\n"); error = ENOMEM; goto bad; } dpt->io_offset = 0x10; dpt->irq_rid = 0; dpt->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &dpt->irq_rid, RF_ACTIVE | RF_SHAREABLE); if (dpt->irq_res == NULL) { device_printf(dev, "No irq?!\n"); error = ENOMEM; goto bad; } /* Ensure busmastering is enabled */ pci_enable_busmaster(dev); if (rman_get_start(dpt->io_res) == (ISA_PRIMARY_WD_ADDRESS - 0x10)) { #ifdef DPT_DEBUG_WARN device_printf(dev, "Mapped as an IDE controller. " "Disabling SCSI setup\n"); #endif error = ENXIO; goto bad; } /* Allocate a dmatag representing the capabilities of this attachment */ if (bus_dma_tag_create( /* PCI parent */ bus_get_dma_tag(dev), /* alignemnt */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ BUS_SPACE_MAXSIZE_32BIT, /* nsegments */ ~0, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &dpt->parent_dmat) != 0) { error = ENXIO; goto bad; } if (dpt_init(dpt) != 0) { error = ENXIO; goto bad; } /* Register with the XPT */ dpt_attach(dpt); if (bus_setup_intr(dev, dpt->irq_res, INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE, NULL, dpt_intr, dpt, &dpt->ih)) { device_printf(dev, "Unable to register interrupt handler\n"); error = ENXIO; goto bad; } + gone_in_dev(dev, 12, "dpt(4) driver"); return (error); bad: dpt_release_resources(dev); dpt_free(dpt); return (error); } static device_method_t dpt_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dpt_pci_probe), DEVMETHOD(device_attach, dpt_pci_attach), DEVMETHOD(device_detach, dpt_detach), { 0, 0 } }; static driver_t dpt_pci_driver = { "dpt", dpt_pci_methods, sizeof(dpt_softc_t), }; DRIVER_MODULE(dpt, pci, dpt_pci_driver, dpt_devclass, 0, 0); MODULE_DEPEND(dpt, pci, 1, 1, 1); MODULE_DEPEND(dpt, cam, 1, 1, 1); Index: head/sys/dev/joy/joy.c =================================================================== --- head/sys/dev/joy/joy.c (revision 328522) +++ head/sys/dev/joy/joy.c (revision 328523) @@ -1,249 +1,251 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1995 Jean-Marc Zucconi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include /* The game port can manage 4 buttons and 4 variable resistors (usually 2 * joysticks, each with 2 buttons and 2 pots.) via the port at address 0x201. * Getting the state of the buttons is done by reading the game port: * buttons 1-4 correspond to bits 4-7 and resistors 1-4 (X1, Y1, X2, Y2) * to bits 0-3. * if button 1 (resp 2, 3, 4) is pressed, the bit 4 (resp 5, 6, 7) is set to 0 * to get the value of a resistor, write the value 0xff at port and * wait until the corresponding bit returns to 0. */ #define joypart(d) (dev2unit(d)&1) #ifndef JOY_TIMEOUT #define JOY_TIMEOUT 2000 /* 2 milliseconds */ #endif static d_open_t joyopen; static d_close_t joyclose; static d_read_t joyread; static d_ioctl_t joyioctl; static struct cdevsw joy_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = joyopen, .d_close = joyclose, .d_read = joyread, .d_ioctl = joyioctl, .d_name = "joy", }; devclass_t joy_devclass; int joy_probe(device_t dev) { #ifdef WANT_JOYSTICK_CONNECTED #ifdef notyet outb(dev->id_iobase, 0xff); DELAY(10000); /* 10 ms delay */ return (inb(dev->id_iobase) & 0x0f) != 0x0f; #else return (0); #endif #else return (0); #endif } int joy_attach(device_t dev) { int unit = device_get_unit(dev); struct joy_softc *joy = device_get_softc(dev); joy->rid = 0; joy->res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &joy->rid, RF_ACTIVE|RF_SHAREABLE); if (joy->res == NULL) return ENXIO; joy->bt = rman_get_bustag(joy->res); joy->port = rman_get_bushandle(joy->res); joy->timeout[0] = joy->timeout[1] = 0; joy->d = make_dev(&joy_cdevsw, unit, 0, 0, 0600, "joy%d", unit); joy->d->si_drv1 = joy; + gone_in_dev(dev, 12, "joy(4) driver"); + return (0); } int joy_detach(device_t dev) { struct joy_softc *joy = device_get_softc(dev); if (joy->res != NULL) bus_release_resource(dev, SYS_RES_IOPORT, joy->rid, joy->res); if (joy->d) destroy_dev(joy->d); return (0); } static int joyopen(struct cdev *dev, int flags, int fmt, struct thread *td) { int i = joypart (dev); struct joy_softc *joy = dev->si_drv1; if (joy->timeout[i]) return (EBUSY); joy->x_off[i] = joy->y_off[i] = 0; joy->timeout[i] = JOY_TIMEOUT; return (0); } static int joyclose(struct cdev *dev, int flags, int fmt, struct thread *td) { int i = joypart (dev); struct joy_softc *joy = dev->si_drv1; joy->timeout[i] = 0; return (0); } static int joyread(struct cdev *dev, struct uio *uio, int flag) { struct joy_softc *joy = dev->si_drv1; bus_space_handle_t port = joy->port; bus_space_tag_t bt = joy->bt; struct timespec t, start, end; int state = 0; struct timespec x, y; struct joystick c; #ifndef __i386__ int s; s = splhigh(); #else disable_intr (); #endif nanotime(&t); end.tv_sec = 0; end.tv_nsec = joy->timeout[joypart(dev)] * 1000; timespecadd(&end, &t); for (; timespeccmp(&t, &end, <) && (bus_space_read_1(bt, port, 0) & 0x0f); nanotime(&t)) ; /* nothing */ bus_space_write_1 (bt, port, 0, 0xff); nanotime(&start); end.tv_sec = 0; end.tv_nsec = joy->timeout[joypart(dev)] * 1000; timespecadd(&end, &start); t = start; timespecclear(&x); timespecclear(&y); while (timespeccmp(&t, &end, <)) { state = bus_space_read_1 (bt, port, 0); if (joypart(dev) == 1) state >>= 2; nanotime(&t); if (!timespecisset(&x) && !(state & 0x01)) x = t; if (!timespecisset(&y) && !(state & 0x02)) y = t; if (timespecisset(&x) && timespecisset(&y)) break; } #ifndef __i386__ splx(s); #else enable_intr (); #endif if (timespecisset(&x)) { timespecsub(&x, &start); c.x = joy->x_off[joypart(dev)] + x.tv_nsec / 1000; } else c.x = 0x80000000; if (timespecisset(&y)) { timespecsub(&y, &start); c.y = joy->y_off[joypart(dev)] + y.tv_nsec / 1000; } else c.y = 0x80000000; state >>= 4; c.b1 = ~state & 1; c.b2 = ~(state >> 1) & 1; return (uiomove((caddr_t)&c, sizeof(struct joystick), uio)); } static int joyioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) { struct joy_softc *joy = dev->si_drv1; int i = joypart (dev); int x; switch (cmd) { case JOY_SETTIMEOUT: x = *(int *) data; if (x < 1 || x > 10000) /* 10ms maximum! */ return EINVAL; joy->timeout[i] = x; break; case JOY_GETTIMEOUT: *(int *) data = joy->timeout[i]; break; case JOY_SET_X_OFFSET: joy->x_off[i] = *(int *) data; break; case JOY_SET_Y_OFFSET: joy->y_off[i] = *(int *) data; break; case JOY_GET_X_OFFSET: *(int *) data = joy->x_off[i]; break; case JOY_GET_Y_OFFSET: *(int *) data = joy->y_off[i]; break; default: return (ENOTTY); } return (0); } Index: head/sys/dev/mse/mse.c =================================================================== --- head/sys/dev/mse/mse.c (revision 328522) +++ head/sys/dev/mse/mse.c (revision 328523) @@ -1,570 +1,572 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2004 M. Warner Losh * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Copyright 1992 by the University of Guelph * * Permission to use, copy and modify this * software and its documentation for any purpose and without * fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting * documentation. * University of Guelph makes no representations about the suitability of * this software for any purpose. It is provided "as is" * without express or implied warranty. */ /* * Driver for the Logitech and ATI Inport Bus mice for use with 386bsd and * the X386 port, courtesy of * Rick Macklem, rick@snowhite.cis.uoguelph.ca * Caveats: The driver currently uses spltty(), but doesn't use any * generic tty code. It could use splmse() (that only masks off the * bus mouse interrupt, but that would require hacking in i386/isa/icu.s. * (This may be worth the effort, since the Logitech generates 30/60 * interrupts/sec continuously while it is open.) * NB: The ATI has NOT been tested yet! */ /* * Modification history: * Sep 6, 1994 -- Lars Fredriksen(fredriks@mcs.com) * improved probe based on input from Logitech. * * Oct 19, 1992 -- E. Stark (stark@cs.sunysb.edu) * fixes to make it work with Microsoft InPort busmouse * * Jan, 1993 -- E. Stark (stark@cs.sunysb.edu) * added patches for new "select" interface * * May 4, 1993 -- E. Stark (stark@cs.sunysb.edu) * changed position of some spl()'s in mseread * * October 8, 1993 -- E. Stark (stark@cs.sunysb.edu) * limit maximum negative x/y value to -127 to work around XFree problem * that causes spurious button pushes. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include devclass_t mse_devclass; static d_open_t mseopen; static d_close_t mseclose; static d_read_t mseread; static d_ioctl_t mseioctl; static d_poll_t msepoll; static struct cdevsw mse_cdevsw = { .d_version = D_VERSION, .d_open = mseopen, .d_close = mseclose, .d_read = mseread, .d_ioctl = mseioctl, .d_poll = msepoll, .d_name = "mse", }; static void mseintr(void *); static void mseintr_locked(mse_softc_t *sc); static void msetimeout(void *); #define MSE_NBLOCKIO(dev) (dev2unit(dev) != 0) #define MSEPRI (PZERO + 3) int mse_common_attach(device_t dev) { mse_softc_t *sc; int unit, flags, rid; sc = device_get_softc(dev); unit = device_get_unit(dev); mtx_init(&sc->sc_lock, "mse", NULL, MTX_DEF); callout_init_mtx(&sc->sc_callout, &sc->sc_lock, 0); rid = 0; sc->sc_intr = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->sc_intr == NULL) { bus_release_resource(dev, SYS_RES_IOPORT, rid, sc->sc_port); mtx_destroy(&sc->sc_lock); return ENXIO; } if (bus_setup_intr(dev, sc->sc_intr, INTR_TYPE_TTY | INTR_MPSAFE, NULL, mseintr, sc, &sc->sc_ih)) { bus_release_resource(dev, SYS_RES_IOPORT, rid, sc->sc_port); bus_release_resource(dev, SYS_RES_IRQ, rid, sc->sc_intr); mtx_destroy(&sc->sc_lock); return ENXIO; } flags = device_get_flags(dev); sc->mode.accelfactor = (flags & MSE_CONFIG_ACCEL) >> 4; sc->sc_dev = make_dev(&mse_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "mse%d", unit); sc->sc_dev->si_drv1 = sc; sc->sc_ndev = make_dev(&mse_cdevsw, 1, UID_ROOT, GID_WHEEL, 0600, "nmse%d", unit); sc->sc_ndev->si_drv1 = sc; + gone_in_dev(dev, 12, "mse(4) driver"); + return 0; } int mse_detach(device_t dev) { mse_softc_t *sc; int rid; sc = device_get_softc(dev); MSE_LOCK(sc); if (sc->sc_flags & MSESC_OPEN) { MSE_UNLOCK(sc); return EBUSY; } /* Sabotage subsequent opens. */ sc->sc_mousetype = MSE_NONE; MSE_UNLOCK(sc); destroy_dev(sc->sc_dev); destroy_dev(sc->sc_ndev); rid = 0; bus_teardown_intr(dev, sc->sc_intr, sc->sc_ih); bus_release_resource(dev, SYS_RES_IRQ, rid, sc->sc_intr); bus_release_resource(dev, SYS_RES_IOPORT, rid, sc->sc_port); callout_drain(&sc->sc_callout); mtx_destroy(&sc->sc_lock); return 0; } /* * Exclusive open the mouse, initialize it and enable interrupts. */ static int mseopen(struct cdev *dev, int flags, int fmt, struct thread *td) { mse_softc_t *sc = dev->si_drv1; MSE_LOCK(sc); if (sc->sc_mousetype == MSE_NONE) { MSE_UNLOCK(sc); return (ENXIO); } if (sc->sc_flags & MSESC_OPEN) { MSE_UNLOCK(sc); return (EBUSY); } sc->sc_flags |= MSESC_OPEN; sc->sc_obuttons = sc->sc_buttons = MOUSE_MSC_BUTTONS; sc->sc_deltax = sc->sc_deltay = 0; sc->sc_bytesread = sc->mode.packetsize = MOUSE_MSC_PACKETSIZE; sc->sc_watchdog = FALSE; callout_reset(&sc->sc_callout, hz * 2, msetimeout, dev); sc->mode.level = 0; sc->status.flags = 0; sc->status.button = sc->status.obutton = 0; sc->status.dx = sc->status.dy = sc->status.dz = 0; /* * Initialize mouse interface and enable interrupts. */ (*sc->sc_enablemouse)(sc->sc_port); MSE_UNLOCK(sc); return (0); } /* * mseclose: just turn off mouse innterrupts. */ static int mseclose(struct cdev *dev, int flags, int fmt, struct thread *td) { mse_softc_t *sc = dev->si_drv1; MSE_LOCK(sc); callout_stop(&sc->sc_callout); (*sc->sc_disablemouse)(sc->sc_port); sc->sc_flags &= ~MSESC_OPEN; MSE_UNLOCK(sc); return(0); } /* * mseread: return mouse info using the MSC serial protocol, but without * using bytes 4 and 5. * (Yes this is cheesy, but it makes the X386 server happy, so...) */ static int mseread(struct cdev *dev, struct uio *uio, int ioflag) { mse_softc_t *sc = dev->si_drv1; int xfer, error; /* * If there are no protocol bytes to be read, set up a new protocol * packet. */ MSE_LOCK(sc); while (sc->sc_flags & MSESC_READING) { if (MSE_NBLOCKIO(dev)) { MSE_UNLOCK(sc); return (0); } sc->sc_flags |= MSESC_WANT; error = mtx_sleep(sc, &sc->sc_lock, MSEPRI | PCATCH, "mseread", 0); if (error) { MSE_UNLOCK(sc); return (error); } } sc->sc_flags |= MSESC_READING; xfer = 0; if (sc->sc_bytesread >= sc->mode.packetsize) { while (sc->sc_deltax == 0 && sc->sc_deltay == 0 && (sc->sc_obuttons ^ sc->sc_buttons) == 0) { if (MSE_NBLOCKIO(dev)) goto out; sc->sc_flags |= MSESC_WANT; error = mtx_sleep(sc, &sc->sc_lock, MSEPRI | PCATCH, "mseread", 0); if (error) goto out; } /* * Generate protocol bytes. * For some reason X386 expects 5 bytes but never uses * the fourth or fifth? */ sc->sc_bytes[0] = sc->mode.syncmask[1] | (sc->sc_buttons & ~sc->mode.syncmask[0]); if (sc->sc_deltax > 127) sc->sc_deltax = 127; if (sc->sc_deltax < -127) sc->sc_deltax = -127; sc->sc_deltay = -sc->sc_deltay; /* Otherwise mousey goes wrong way */ if (sc->sc_deltay > 127) sc->sc_deltay = 127; if (sc->sc_deltay < -127) sc->sc_deltay = -127; sc->sc_bytes[1] = sc->sc_deltax; sc->sc_bytes[2] = sc->sc_deltay; sc->sc_bytes[3] = sc->sc_bytes[4] = 0; sc->sc_bytes[5] = sc->sc_bytes[6] = 0; sc->sc_bytes[7] = MOUSE_SYS_EXTBUTTONS; sc->sc_obuttons = sc->sc_buttons; sc->sc_deltax = sc->sc_deltay = 0; sc->sc_bytesread = 0; } xfer = min(uio->uio_resid, sc->mode.packetsize - sc->sc_bytesread); MSE_UNLOCK(sc); error = uiomove(&sc->sc_bytes[sc->sc_bytesread], xfer, uio); MSE_LOCK(sc); out: sc->sc_flags &= ~MSESC_READING; if (error == 0) sc->sc_bytesread += xfer; if (sc->sc_flags & MSESC_WANT) { sc->sc_flags &= ~MSESC_WANT; MSE_UNLOCK(sc); wakeup(sc); } else MSE_UNLOCK(sc); return (error); } /* * mseioctl: process ioctl commands. */ static int mseioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { mse_softc_t *sc = dev->si_drv1; mousestatus_t status; int err = 0; switch (cmd) { case MOUSE_GETHWINFO: MSE_LOCK(sc); *(mousehw_t *)addr = sc->hw; if (sc->mode.level == 0) ((mousehw_t *)addr)->model = MOUSE_MODEL_GENERIC; MSE_UNLOCK(sc); break; case MOUSE_GETMODE: MSE_LOCK(sc); *(mousemode_t *)addr = sc->mode; switch (sc->mode.level) { case 0: break; case 1: ((mousemode_t *)addr)->protocol = MOUSE_PROTO_SYSMOUSE; ((mousemode_t *)addr)->syncmask[0] = MOUSE_SYS_SYNCMASK; ((mousemode_t *)addr)->syncmask[1] = MOUSE_SYS_SYNC; break; } MSE_UNLOCK(sc); break; case MOUSE_SETMODE: switch (((mousemode_t *)addr)->level) { case 0: case 1: break; default: return (EINVAL); } MSE_LOCK(sc); if (((mousemode_t *)addr)->accelfactor < -1) { MSE_UNLOCK(sc); return (EINVAL); } else if (((mousemode_t *)addr)->accelfactor >= 0) sc->mode.accelfactor = ((mousemode_t *)addr)->accelfactor; sc->mode.level = ((mousemode_t *)addr)->level; switch (sc->mode.level) { case 0: sc->sc_bytesread = sc->mode.packetsize = MOUSE_MSC_PACKETSIZE; break; case 1: sc->sc_bytesread = sc->mode.packetsize = MOUSE_SYS_PACKETSIZE; break; } MSE_UNLOCK(sc); break; case MOUSE_GETLEVEL: MSE_LOCK(sc); *(int *)addr = sc->mode.level; MSE_UNLOCK(sc); break; case MOUSE_SETLEVEL: switch (*(int *)addr) { case 0: MSE_LOCK(sc); sc->mode.level = *(int *)addr; sc->sc_bytesread = sc->mode.packetsize = MOUSE_MSC_PACKETSIZE; MSE_UNLOCK(sc); break; case 1: MSE_LOCK(sc); sc->mode.level = *(int *)addr; sc->sc_bytesread = sc->mode.packetsize = MOUSE_SYS_PACKETSIZE; MSE_UNLOCK(sc); break; default: return (EINVAL); } break; case MOUSE_GETSTATUS: MSE_LOCK(sc); status = sc->status; sc->status.flags = 0; sc->status.obutton = sc->status.button; sc->status.button = 0; sc->status.dx = 0; sc->status.dy = 0; sc->status.dz = 0; MSE_UNLOCK(sc); *(mousestatus_t *)addr = status; break; case MOUSE_READSTATE: case MOUSE_READDATA: return (ENODEV); #if (defined(MOUSE_GETVARS)) case MOUSE_GETVARS: case MOUSE_SETVARS: return (ENODEV); #endif default: return (ENOTTY); } return (err); } /* * msepoll: check for mouse input to be processed. */ static int msepoll(struct cdev *dev, int events, struct thread *td) { mse_softc_t *sc = dev->si_drv1; int revents = 0; MSE_LOCK(sc); if (events & (POLLIN | POLLRDNORM)) { if (sc->sc_bytesread != sc->mode.packetsize || sc->sc_deltax != 0 || sc->sc_deltay != 0 || (sc->sc_obuttons ^ sc->sc_buttons) != 0) revents |= events & (POLLIN | POLLRDNORM); else selrecord(td, &sc->sc_selp); } MSE_UNLOCK(sc); return (revents); } /* * msetimeout: watchdog timer routine. */ static void msetimeout(void *arg) { struct cdev *dev; mse_softc_t *sc; dev = (struct cdev *)arg; sc = dev->si_drv1; MSE_ASSERT_LOCKED(sc); if (sc->sc_watchdog) { if (bootverbose) printf("%s: lost interrupt?\n", devtoname(dev)); mseintr_locked(sc); } sc->sc_watchdog = TRUE; callout_schedule(&sc->sc_callout, hz); } /* * mseintr: update mouse status. sc_deltax and sc_deltay are accumulative. */ static void mseintr(void *arg) { mse_softc_t *sc = arg; MSE_LOCK(sc); mseintr_locked(sc); MSE_UNLOCK(sc); } static void mseintr_locked(mse_softc_t *sc) { /* * the table to turn MouseSystem button bits (MOUSE_MSC_BUTTON?UP) * into `mousestatus' button bits (MOUSE_BUTTON?DOWN). */ static int butmap[8] = { 0, MOUSE_BUTTON3DOWN, MOUSE_BUTTON2DOWN, MOUSE_BUTTON2DOWN | MOUSE_BUTTON3DOWN, MOUSE_BUTTON1DOWN, MOUSE_BUTTON1DOWN | MOUSE_BUTTON3DOWN, MOUSE_BUTTON1DOWN | MOUSE_BUTTON2DOWN, MOUSE_BUTTON1DOWN | MOUSE_BUTTON2DOWN | MOUSE_BUTTON3DOWN }; int dx, dy, but; int sign; #ifdef DEBUG static int mse_intrcnt = 0; if((mse_intrcnt++ % 10000) == 0) printf("mseintr\n"); #endif /* DEBUG */ if ((sc->sc_flags & MSESC_OPEN) == 0) return; (*sc->sc_getmouse)(sc->sc_port, &dx, &dy, &but); if (sc->mode.accelfactor > 0) { sign = (dx < 0); dx = dx * dx / sc->mode.accelfactor; if (dx == 0) dx = 1; if (sign) dx = -dx; sign = (dy < 0); dy = dy * dy / sc->mode.accelfactor; if (dy == 0) dy = 1; if (sign) dy = -dy; } sc->sc_deltax += dx; sc->sc_deltay += dy; sc->sc_buttons = but; but = butmap[~but & MOUSE_MSC_BUTTONS]; sc->status.dx += dx; sc->status.dy += dy; sc->status.flags |= ((dx || dy) ? MOUSE_POSCHANGED : 0) | (sc->status.button ^ but); sc->status.button = but; sc->sc_watchdog = FALSE; /* * If mouse state has changed, wake up anyone wanting to know. */ if (sc->sc_deltax != 0 || sc->sc_deltay != 0 || (sc->sc_obuttons ^ sc->sc_buttons) != 0) { if (sc->sc_flags & MSESC_WANT) { sc->sc_flags &= ~MSESC_WANT; wakeup(sc); } selwakeuppri(&sc->sc_selp, MSEPRI); } } Index: head/sys/dev/ncv/ncr53c500_pccard.c =================================================================== --- head/sys/dev/ncv/ncr53c500_pccard.c (revision 328522) +++ head/sys/dev/ncv/ncr53c500_pccard.c (revision 328523) @@ -1,339 +1,340 @@ /* $NecBSD: ncr53c500_pisa.c,v 1.28 1998/11/26 01:59:11 honda Exp $ */ /* $NetBSD$ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * [Ported for FreeBSD] * Copyright (c) 2000 * Noriaki Mitsunaga, Mitsuru Iwasaki and Takanori Watanabe. * All rights reserved. * [NetBSD for NEC PC-98 series] * Copyright (c) 1995, 1996, 1997, 1998 * NetBSD/pc98 porting staff. All rights reserved. * Copyright (c) 1995, 1996, 1997, 1998 * Naofumi HONDA. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define KME_KXLC004_01 0x100 #define OFFSET_KME_KXLC004_01 0x10 #include "pccarddevs.h" static int ncvprobe(device_t devi); static int ncvattach(device_t devi); static void ncv_card_unload(device_t); static const struct ncv_product { struct pccard_product prod; int flags; } ncv_products[] = { { PCMCIA_CARD(EPSON, SC200), 0}, { PCMCIA_CARD(PANASONIC, KXLC002), 0xb4d00000 }, { PCMCIA_CARD(PANASONIC, KXLC003), 0xb4d00000 }, /* untested */ { PCMCIA_CARD(PANASONIC, KXLC004), 0xb4d00100 }, { PCMCIA_CARD(MACNICA, MPS100), 0xb6250000 }, { PCMCIA_CARD(MACNICA, MPS110), 0 }, { PCMCIA_CARD(NEC, PC9801N_J03R), 0 }, { PCMCIA_CARD(NEWMEDIA, BASICS_SCSI), 0 }, { PCMCIA_CARD(QLOGIC, PC05), 0x84d00000 }, #define FLAGS_REX5572 0x84d00000 { PCMCIA_CARD(RATOC, REX5572), FLAGS_REX5572 }, { PCMCIA_CARD(RATOC, REX9530), 0x84d00000 }, { { NULL }, 0 } }; /* * Additional code for FreeBSD new-bus PCCard frontend */ static void ncv_pccard_intr(void * arg) { struct ncv_softc *sc; sc = arg; SCSI_LOW_LOCK(&sc->sc_sclow); ncvintr(arg); SCSI_LOW_UNLOCK(&sc->sc_sclow); } static void ncv_release_resource(device_t dev) { struct ncv_softc *sc = device_get_softc(dev); if (sc->ncv_intrhand) { bus_teardown_intr(dev, sc->irq_res, sc->ncv_intrhand); } if (sc->port_res) { bus_release_resource(dev, SYS_RES_IOPORT, sc->port_rid, sc->port_res); } if (sc->port_res_dmy) { bus_release_resource(dev, SYS_RES_IOPORT, sc->port_rid_dmy, sc->port_res_dmy); } if (sc->irq_res) { bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); } if (sc->mem_res) { bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); } mtx_destroy(&sc->sc_sclow.sl_lock); } static int ncv_alloc_resource(device_t dev) { struct ncv_softc *sc = device_get_softc(dev); u_int32_t flags = device_get_flags(dev); rman_res_t ioaddr, iosize, maddr, msize; int error; bus_addr_t offset = 0; if(flags & KME_KXLC004_01) offset = OFFSET_KME_KXLC004_01; error = bus_get_resource(dev, SYS_RES_IOPORT, 0, &ioaddr, &iosize); if (error || (iosize < (offset + NCVIOSZ))) { return(ENOMEM); } mtx_init(&sc->sc_sclow.sl_lock, "ncv", NULL, MTX_DEF); sc->port_rid = 0; sc->port_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->port_rid, ioaddr+offset, ioaddr+iosize-offset, iosize-offset, RF_ACTIVE); if (sc->port_res == NULL) { ncv_release_resource(dev); return(ENOMEM); } if (offset != 0) { sc->port_rid_dmy = 0; sc->port_res_dmy = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->port_rid_dmy, ioaddr, ioaddr+offset, offset, RF_ACTIVE); if (sc->port_res_dmy == NULL) { printf("Warning: cannot allocate IOPORT partially.\n"); } } else { sc->port_rid_dmy = 0; sc->port_res_dmy = NULL; } sc->irq_rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (sc->irq_res == NULL) { ncv_release_resource(dev); return(ENOMEM); } error = bus_get_resource(dev, SYS_RES_MEMORY, 0, &maddr, &msize); if (error) { return(0); /* XXX */ } /* no need to allocate memory if not configured */ if (maddr == 0 || msize == 0) { return(0); } sc->mem_rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (sc->mem_res == NULL) { ncv_release_resource(dev); return(ENOMEM); } return(0); } static int ncv_pccard_probe(device_t dev) { const struct ncv_product *pp; const char *vendorstr; const char *prodstr; if ((pp = (const struct ncv_product *) pccard_product_lookup(dev, (const struct pccard_product *) ncv_products, sizeof(ncv_products[0]), NULL)) != NULL) { if (pp->prod.pp_name != NULL) device_set_desc(dev, pp->prod.pp_name); device_set_flags(dev, pp->flags); return(0); } if (pccard_get_vendor_str(dev, &vendorstr)) return(EIO); if (pccard_get_product_str(dev, &prodstr)) return(EIO); if (strcmp(vendorstr, "RATOC System Inc.") == 0 && strncmp(prodstr, "SOUND/SCSI2 CARD", 16) == 0) { device_set_desc(dev, "RATOC REX-5572"); device_set_flags(dev, FLAGS_REX5572); return (BUS_PROBE_DEFAULT); } return(EIO); } static int ncv_pccard_attach(device_t dev) { struct ncv_softc *sc = device_get_softc(dev); int error; error = ncv_alloc_resource(dev); if (error) { return(error); } if (ncvprobe(dev) == 0) { ncv_release_resource(dev); return(ENXIO); } error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE, NULL, ncv_pccard_intr, sc, &sc->ncv_intrhand); if (error) { ncv_release_resource(dev); return(error); } if (ncvattach(dev) == 0) { ncv_release_resource(dev); return(ENXIO); } + gone_in_dev(dev, 12, "ncv(4) driver"); return(0); } static int ncv_pccard_detach(device_t dev) { ncv_card_unload(dev); ncv_release_resource(dev); return (0); } static device_method_t ncv_pccard_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ncv_pccard_probe), DEVMETHOD(device_attach, ncv_pccard_attach), DEVMETHOD(device_detach, ncv_pccard_detach), { 0, 0 } }; static driver_t ncv_pccard_driver = { "ncv", ncv_pccard_methods, sizeof(struct ncv_softc), }; static devclass_t ncv_devclass; MODULE_DEPEND(ncv, scsi_low, 1, 1, 1); DRIVER_MODULE(ncv, pccard, ncv_pccard_driver, ncv_devclass, 0, 0); PCCARD_PNP_INFO(ncv_products); static void ncv_card_unload(device_t devi) { struct ncv_softc *sc = device_get_softc(devi); scsi_low_deactivate(&sc->sc_sclow); scsi_low_detach(&sc->sc_sclow); } static int ncvprobe(device_t devi) { int rv; struct ncv_softc *sc = device_get_softc(devi); u_int32_t flags = device_get_flags(devi); rv = ncvprobesubr(sc->port_res, flags, NCV_HOSTID); return rv; } static int ncvattach(device_t devi) { struct ncv_softc *sc; struct scsi_low_softc *slp; u_int32_t flags = device_get_flags(devi); sc = device_get_softc(devi); slp = &sc->sc_sclow; slp->sl_dev = devi; slp->sl_hostid = NCV_HOSTID; slp->sl_cfgflags = flags; ncvattachsubr(sc); return(NCVIOSZ); } Index: head/sys/dev/nsp/nsp_pccard.c =================================================================== --- head/sys/dev/nsp/nsp_pccard.c (revision 328522) +++ head/sys/dev/nsp/nsp_pccard.c (revision 328523) @@ -1,295 +1,296 @@ /* $NecBSD: nsp_pisa.c,v 1.4 1999/04/15 01:35:54 kmatsuda Exp $ */ /* $NetBSD$ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * [Ported for FreeBSD] * Copyright (c) 2000 * Noriaki Mitsunaga, Mitsuru Iwasaki and Takanori Watanabe. * All rights reserved. * [NetBSD for NEC PC-98 series] * Copyright (c) 1998 * NetBSD/pc98 porting staff. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define NSP_HOSTID 7 #include "pccarddevs.h" #define PIO_MODE 0x100 /* pd_flags */ static int nspprobe(device_t devi); static int nspattach(device_t devi); static void nsp_card_unload (device_t); const struct pccard_product nsp_products[] = { PCMCIA_CARD(IODATA3, CBSC16), PCMCIA_CARD(PANASONIC, KME), PCMCIA_CARD(WORKBIT2, NINJA_SCSI3), PCMCIA_CARD(WORKBIT, ULTRA_NINJA_16), { NULL } }; /* * Additional code for FreeBSD new-bus PC Card frontend */ static void nsp_pccard_intr(void * arg) { struct nsp_softc *sc; sc = arg; SCSI_LOW_LOCK(&sc->sc_sclow); nspintr(sc); SCSI_LOW_UNLOCK(&sc->sc_sclow); } static void nsp_release_resource(device_t dev) { struct nsp_softc *sc = device_get_softc(dev); if (sc->nsp_intrhand) bus_teardown_intr(dev, sc->irq_res, sc->nsp_intrhand); if (sc->port_res) bus_release_resource(dev, SYS_RES_IOPORT, sc->port_rid, sc->port_res); if (sc->irq_res) bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); if (sc->mem_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); mtx_destroy(&sc->sc_sclow.sl_lock); } static int nsp_alloc_resource(device_t dev) { struct nsp_softc *sc = device_get_softc(dev); rman_res_t ioaddr, iosize, maddr, msize; int error; error = bus_get_resource(dev, SYS_RES_IOPORT, 0, &ioaddr, &iosize); if (error || iosize < NSP_IOSIZE) return(ENOMEM); mtx_init(&sc->sc_sclow.sl_lock, "nsp", NULL, MTX_DEF); sc->port_rid = 0; sc->port_res = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT, &sc->port_rid, NSP_IOSIZE, RF_ACTIVE); if (sc->port_res == NULL) { nsp_release_resource(dev); return(ENOMEM); } sc->irq_rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (sc->irq_res == NULL) { nsp_release_resource(dev); return(ENOMEM); } error = bus_get_resource(dev, SYS_RES_MEMORY, 0, &maddr, &msize); if (error) return(0); /* XXX */ /* No need to allocate memory if not configured and it's in PIO mode */ if (maddr == 0 || msize == 0) { if ((device_get_flags(dev) & PIO_MODE) == 0) { printf("Memory window was not configured. Configure or use in PIO mode."); nsp_release_resource(dev); return(ENOMEM); } /* no need to allocate memory if PIO mode */ return(0); } sc->mem_rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (sc->mem_res == NULL) { nsp_release_resource(dev); return(ENOMEM); } return(0); } static int nsp_pccard_probe(device_t dev) { const struct pccard_product *pp; if ((pp = pccard_product_lookup(dev, nsp_products, sizeof(nsp_products[0]), NULL)) != NULL) { if (pp->pp_name) device_set_desc(dev, pp->pp_name); return (BUS_PROBE_DEFAULT); } return(EIO); } static int nsp_pccard_attach(device_t dev) { struct nsp_softc *sc = device_get_softc(dev); int error; error = nsp_alloc_resource(dev); if (error) return(error); if (nspprobe(dev) == 0) { nsp_release_resource(dev); return(ENXIO); } error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE, NULL, nsp_pccard_intr, sc, &sc->nsp_intrhand); if (error) { nsp_release_resource(dev); return(error); } if (nspattach(dev) == 0) { nsp_release_resource(dev); return(ENXIO); } + gone_in_dev(dev, 12, "nsp(4) driver"); return(0); } static int nsp_pccard_detach(device_t dev) { nsp_card_unload(dev); nsp_release_resource(dev); return (0); } static device_method_t nsp_pccard_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nsp_pccard_probe), DEVMETHOD(device_attach, nsp_pccard_attach), DEVMETHOD(device_detach, nsp_pccard_detach), { 0, 0 } }; static driver_t nsp_pccard_driver = { "nsp", nsp_pccard_methods, sizeof(struct nsp_softc), }; static devclass_t nsp_devclass; MODULE_DEPEND(nsp, scsi_low, 1, 1, 1); DRIVER_MODULE(nsp, pccard, nsp_pccard_driver, nsp_devclass, 0, 0); PCCARD_PNP_INFO(nsp_products); static void nsp_card_unload(device_t devi) { struct nsp_softc *sc = device_get_softc(devi); scsi_low_deactivate(&sc->sc_sclow); scsi_low_detach(&sc->sc_sclow); } static int nspprobe(device_t devi) { int rv; struct nsp_softc *sc = device_get_softc(devi); rv = nspprobesubr(sc->port_res, device_get_flags(devi)); return rv; } static int nspattach(device_t devi) { struct nsp_softc *sc; struct scsi_low_softc *slp; u_int32_t flags = device_get_flags(devi); u_int iobase = bus_get_resource_start(devi, SYS_RES_IOPORT, 0); if (iobase == 0) { device_printf(devi, "no ioaddr is given\n"); return (ENXIO); } sc = device_get_softc(devi); slp = &sc->sc_sclow; slp->sl_dev = devi; if (sc->mem_res == NULL) { device_printf(devi, "WARNING: CANNOT GET Memory RESOURCE going PIO mode\n"); flags |= PIO_MODE; } /* slp->sl_irq = devi->pd_irq; */ sc->sc_iclkdiv = CLKDIVR_20M; sc->sc_clkdiv = CLKDIVR_40M; slp->sl_hostid = NSP_HOSTID; slp->sl_cfgflags = flags; nspattachsubr(sc); return(NSP_IOSIZE); } Index: head/sys/dev/stg/tmc18c30.c =================================================================== --- head/sys/dev/stg/tmc18c30.c (revision 328522) +++ head/sys/dev/stg/tmc18c30.c (revision 328523) @@ -1,1329 +1,1330 @@ /* $NecBSD: tmc18c30.c,v 1.28.12.3 2001/06/19 04:35:48 honda Exp $ */ /* $NetBSD$ */ #define STG_DEBUG #define STG_STATICS #define STG_IO_CONTROL_FLAGS (STG_FIFO_INTERRUPTS | STG_WAIT_FOR_SELECT) /*- * SPDX-License-Identifier: BSD-3-Clause * * [NetBSD for NEC PC-98 series] * Copyright (c) 1996, 1997, 1998, 1999, 2000, 2001 * NetBSD/pc98 porting staff. All rights reserved. * Copyright (c) 1996, 1997, 1998, 1999, 2000, 2001 * Naofumi HONDA. All rights reserved. * Copyright (c) 1996, 1997, 1998, 1999 * Kouichi Matsuda. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include /*************************************************** * USER SETTINGS ***************************************************/ /* DEVICE CONFIGURATION FLAGS (MINOR) * * 0x01 DISCONNECT OFF * 0x02 PARITY LINE OFF * 0x04 IDENTIFY MSG OFF ( = single lun) * 0x08 SYNC TRANSFER OFF */ /* #define STG_SYNC_SUPPORT */ /* NOT YET but easy */ /* For the 512 fifo type: change below */ #define TMC18C30_FIFOSZ 0x800 #define TMC18C30_FCBSZ 0x200 #define TMC18C50_FIFOSZ 0x2000 #define TMC18C50_FCBSZ 0x400 #define STG_MAX_DATA_SIZE (64 * 1024) #define STG_DELAY_MAX (2 * 1000 * 1000) #define STG_DELAY_INTERVAL (1) #define STG_DELAY_SELECT_POLLING_MAX (5 * 1000 * 1000) /*************************************************** * PARAMS ***************************************************/ #define STG_NTARGETS 8 #define STG_NLUNS 8 /*************************************************** * DEBUG ***************************************************/ #ifdef STG_DEBUG static int stg_debug; #endif /* STG_DEBUG */ #ifdef STG_STATICS static struct stg_statics { int arbit_fail_0; int arbit_fail_1; int disconnect; int reselect; } stg_statics; #endif /* STG_STATICS */ /*************************************************** * IO control flags ***************************************************/ #define STG_FIFO_INTERRUPTS 0x0001 #define STG_WAIT_FOR_SELECT 0x0100 int stg_io_control = STG_IO_CONTROL_FLAGS; /*************************************************** * DEVICE STRUCTURE ***************************************************/ extern struct cfdriver stg_cd; /************************************************************** * DECLARE **************************************************************/ /* static */ static void stg_pio_read(struct stg_softc *, struct targ_info *, u_int); static void stg_pio_write(struct stg_softc *, struct targ_info *, u_int); static int stg_xfer(struct stg_softc *, u_int8_t *, int, int, int); static int stg_msg(struct stg_softc *, struct targ_info *, u_int); static int stg_reselected(struct stg_softc *); static int stg_disconnected(struct stg_softc *, struct targ_info *); static __inline void stg_pdma_end(struct stg_softc *, struct targ_info *); static int stghw_select_targ_wait(struct stg_softc *, int); static int stghw_check(struct stg_softc *); static void stghw_init(struct stg_softc *); static int stg_negate_signal(struct stg_softc *, u_int8_t, u_char *); static int stg_expect_signal(struct stg_softc *, u_int8_t, u_int8_t); static int stg_world_start(struct stg_softc *, int); static int stghw_start_selection(struct stg_softc *sc, struct slccb *); static void stghw_bus_reset(struct stg_softc *); static void stghw_attention(struct stg_softc *); static int stg_target_nexus_establish(struct stg_softc *); static int stg_lun_nexus_establish(struct stg_softc *); static int stg_ccb_nexus_establish(struct stg_softc *); static int stg_targ_init(struct stg_softc *, struct targ_info *, int); static __inline void stghw_bcr_write_1(struct stg_softc *, u_int8_t); static int stg_timeout(struct stg_softc *); static void stg_selection_done_and_expect_msgout(struct stg_softc *); struct scsi_low_funcs stgfuncs = { SC_LOW_INIT_T stg_world_start, SC_LOW_BUSRST_T stghw_bus_reset, SC_LOW_TARG_INIT_T stg_targ_init, SC_LOW_LUN_INIT_T NULL, SC_LOW_SELECT_T stghw_start_selection, SC_LOW_NEXUS_T stg_lun_nexus_establish, SC_LOW_NEXUS_T stg_ccb_nexus_establish, SC_LOW_ATTEN_T stghw_attention, SC_LOW_MSG_T stg_msg, SC_LOW_TIMEOUT_T stg_timeout, SC_LOW_POLL_T stgintr, NULL, }; /**************************************************** * hwfuncs ****************************************************/ static __inline void stghw_bcr_write_1(struct stg_softc *sc, u_int8_t bcv) { bus_write_1(sc->port_res, tmc_bctl, bcv); sc->sc_busimg = bcv; } static int stghw_check(sc) struct stg_softc *sc; { struct scsi_low_softc *slp = &sc->sc_sclow; u_int fcbsize, fcb; u_int16_t lsb, msb; lsb = bus_read_1(sc->port_res, tmc_idlsb); msb = bus_read_1(sc->port_res, tmc_idmsb); switch (msb << 8 | lsb) { case 0x6127: /* TMCCHIP_1800 not supported. (it's my policy) */ sc->sc_chip = TMCCHIP_1800; return EINVAL; case 0x60e9: if (bus_read_1(sc->port_res, tmc_cfg2) & 0x02) { sc->sc_chip = TMCCHIP_18C30; sc->sc_fsz = TMC18C30_FIFOSZ; fcbsize = TMC18C30_FCBSZ; } else { sc->sc_chip = TMCCHIP_18C50; sc->sc_fsz = TMC18C50_FIFOSZ; fcbsize = TMC18C50_FCBSZ; } break; default: sc->sc_chip = TMCCHIP_UNK; return ENODEV; } sc->sc_fcRinit = FCTL_INTEN; sc->sc_fcWinit = FCTL_PARENB | FCTL_INTEN; if (slp->sl_cfgflags & CFG_NOATTEN) sc->sc_imsg = 0; else sc->sc_imsg = BCTL_ATN; sc->sc_busc = BCTL_BUSEN; sc->sc_wthold = fcbsize + 256; sc->sc_rthold = fcbsize - 256; sc->sc_maxwsize = sc->sc_fsz; fcb = fcbsize / (sc->sc_fsz / 16); sc->sc_icinit = ICTL_CD | ICTL_SEL | ICTL_ARBIT | fcb; return 0; } static void stghw_init(sc) struct stg_softc *sc; { bus_write_1(sc->port_res, tmc_ictl, 0); stghw_bcr_write_1(sc, BCTL_BUSFREE); bus_write_1(sc->port_res, tmc_fctl, sc->sc_fcRinit | FCTL_CLRFIFO | FCTL_CLRINT); bus_write_1(sc->port_res, tmc_fctl, sc->sc_fcRinit); bus_write_1(sc->port_res, tmc_ictl, sc->sc_icinit); bus_write_1(sc->port_res, tmc_ssctl, 0); } static int stg_targ_init(sc, ti, action) struct stg_softc *sc; struct targ_info *ti; int action; { struct stg_targ_info *sti = (void *) ti; if (action == SCSI_LOW_INFO_ALLOC || action == SCSI_LOW_INFO_REVOKE) { ti->ti_width = SCSI_LOW_BUS_WIDTH_8; ti->ti_maxsynch.period = 0; ti->ti_maxsynch.offset = 0; sti->sti_reg_synch = 0; } return 0; } /**************************************************** * scsi low interface ****************************************************/ static void stghw_attention(sc) struct stg_softc *sc; { sc->sc_busc |= BCTL_ATN; sc->sc_busimg |= BCTL_ATN; bus_write_1(sc->port_res, tmc_bctl, sc->sc_busimg); DELAY(10); } static void stghw_bus_reset(sc) struct stg_softc *sc; { bus_write_1(sc->port_res, tmc_ictl, 0); bus_write_1(sc->port_res, tmc_fctl, 0); stghw_bcr_write_1(sc, BCTL_RST); DELAY(100000); stghw_bcr_write_1(sc, BCTL_BUSFREE); } static int stghw_start_selection(sc, cb) struct stg_softc *sc; struct slccb *cb; { struct targ_info *ti = cb->ti; register u_int8_t stat; sc->sc_tmaxcnt = cb->ccb_tcmax * 1000 * 1000; sc->sc_dataout_timeout = 0; sc->sc_ubf_timeout = 0; stghw_bcr_write_1(sc, BCTL_BUSFREE); bus_write_1(sc->port_res, tmc_ictl, sc->sc_icinit); stat = bus_read_1(sc->port_res, tmc_astat); if ((stat & ASTAT_INT) != 0) { return SCSI_LOW_START_FAIL; } bus_write_1(sc->port_res, tmc_scsiid, sc->sc_idbit); bus_write_1(sc->port_res, tmc_fctl, sc->sc_fcRinit | FCTL_ARBIT); SCSI_LOW_SETUP_PHASE(ti, PH_ARBSTART); return SCSI_LOW_START_OK; } static int stg_world_start(sc, fdone) struct stg_softc *sc; int fdone; { struct scsi_low_softc *slp = &sc->sc_sclow; int error; if ((slp->sl_cfgflags & CFG_NOPARITY) == 0) sc->sc_fcRinit |= FCTL_PARENB; else sc->sc_fcRinit &= ~FCTL_PARENB; if ((error = stghw_check(sc)) != 0) return error; stghw_init(sc); scsi_low_bus_reset(slp); stghw_init(sc); return 0; } static int stg_msg(sc, ti, msg) struct stg_softc *sc; struct targ_info *ti; u_int msg; { struct stg_targ_info *sti = (void *) ti; u_int period, offset; if ((msg & SCSI_LOW_MSG_WIDE) != 0) { if (ti->ti_width != SCSI_LOW_BUS_WIDTH_8) { ti->ti_width = SCSI_LOW_BUS_WIDTH_8; return EINVAL; } return 0; } if ((msg & SCSI_LOW_MSG_SYNCH) == 0) return 0; period = ti->ti_maxsynch.period; offset = ti->ti_maxsynch.offset; period = period << 2; if (period >= 200) { sti->sti_reg_synch = (period - 200) / 50; if (period % 50) sti->sti_reg_synch ++; sti->sti_reg_synch |= SSCTL_SYNCHEN; } else if (period >= 100) { sti->sti_reg_synch = (period - 100) / 50; if (period % 50) sti->sti_reg_synch ++; sti->sti_reg_synch |= SSCTL_SYNCHEN | SSCTL_FSYNCHEN; } bus_write_1(sc->port_res, tmc_ssctl, sti->sti_reg_synch); return 0; } /************************************************************** * General probe attach **************************************************************/ int stgprobesubr(struct resource *res, u_int dvcfg) { u_int16_t lsb, msb; lsb = bus_read_1(res, tmc_idlsb); msb = bus_read_1(res, tmc_idmsb); switch (msb << 8 | lsb) { default: return 0; case 0x6127: /* not support! */ return 0; case 0x60e9: return 1; } return 0; } void stgattachsubr(sc) struct stg_softc *sc; { struct scsi_low_softc *slp = &sc->sc_sclow; printf("\n"); sc->sc_idbit = (1 << slp->sl_hostid); slp->sl_funcs = &stgfuncs; sc->sc_tmaxcnt = SCSI_LOW_MIN_TOUT * 1000 * 1000; /* default */ slp->sl_flags |= HW_READ_PADDING; slp->sl_cfgflags |= CFG_ASYNC; /* XXX */ (void) scsi_low_attach(slp, 0, STG_NTARGETS, STG_NLUNS, sizeof(struct stg_targ_info), 0); + gone_in(12, "stg(4) driver"); } /************************************************************** * PDMA functions **************************************************************/ static __inline void stg_pdma_end(sc, ti) struct stg_softc *sc; struct targ_info *ti; { struct scsi_low_softc *slp = &sc->sc_sclow; struct slccb *cb = slp->sl_Qnexus; u_int len, tres; slp->sl_flags &= ~HW_PDMASTART; sc->sc_icinit &= ~ICTL_FIFO; sc->sc_dataout_timeout = 0; if (cb == NULL) { slp->sl_error |= PDMAERR; goto out; } if (ti->ti_phase == PH_DATA) { len = bus_read_2(sc->port_res, tmc_fdcnt); if (slp->sl_scp.scp_direction == SCSI_LOW_WRITE) { if (len != 0) { tres = len + slp->sl_scp.scp_datalen; if (tres <= (u_int) cb->ccb_scp.scp_datalen) { slp->sl_scp.scp_data -= len; slp->sl_scp.scp_datalen = tres; } else { slp->sl_error |= PDMAERR; device_printf(slp->sl_dev, "len %x >= datalen %x\n", len, slp->sl_scp.scp_datalen); } } } else if (slp->sl_scp.scp_direction == SCSI_LOW_READ) { if (len != 0) { slp->sl_error |= PDMAERR; device_printf(slp->sl_dev, "len %x left in fifo\n", len); } } scsi_low_data_finish(slp); } else { device_printf(slp->sl_dev, "data phase miss\n"); slp->sl_error |= PDMAERR; } out: bus_write_1(sc->port_res, tmc_fctl, sc->sc_fcRinit); } static void stg_pio_read(sc, ti, thold) struct stg_softc *sc; struct targ_info *ti; u_int thold; { struct scsi_low_softc *slp = &sc->sc_sclow; struct sc_p *sp = &slp->sl_scp; int tout; u_int res; u_int8_t stat; if ((slp->sl_flags & HW_PDMASTART) == 0) { bus_write_1(sc->port_res, tmc_fctl, sc->sc_fcRinit | FCTL_FIFOEN); slp->sl_flags |= HW_PDMASTART; } tout = sc->sc_tmaxcnt; while (tout -- > 0) { if (thold > 0) { res = bus_read_2(sc->port_res, tmc_fdcnt); if (res < thold) { bus_write_1(sc->port_res, tmc_ictl, sc->sc_icinit); break; } } else { stat = bus_read_1(sc->port_res, tmc_bstat); res = bus_read_2(sc->port_res, tmc_fdcnt); if (res == 0) { if ((stat & PHASE_MASK) != DATA_IN_PHASE) break; if (sp->scp_datalen <= 0) break; DELAY(1); continue; } } /* The assumtion res != 0 is valid here */ if (res > sp->scp_datalen) { if (res == (u_int) -1) break; slp->sl_error |= PDMAERR; if ((slp->sl_flags & HW_READ_PADDING) == 0) { device_printf(slp->sl_dev, "read padding required\n"); break; } sp->scp_datalen = 0; if (res > STG_MAX_DATA_SIZE) res = STG_MAX_DATA_SIZE; while (res -- > 0) { (void) bus_read_1(sc->port_res, tmc_rfifo); } continue; } sp->scp_datalen -= res; if (res & 1) { *sp->scp_data = bus_read_1(sc->port_res, tmc_rfifo); sp->scp_data ++; res --; } bus_read_multi_2(sc->port_res, tmc_rfifo, (u_int16_t *) sp->scp_data, res >> 1); sp->scp_data += res; } if (tout <= 0) device_printf(slp->sl_dev, "pio read timeout\n"); } static void stg_pio_write(sc, ti, thold) struct stg_softc *sc; struct targ_info *ti; u_int thold; { struct scsi_low_softc *slp = &sc->sc_sclow; struct sc_p *sp = &slp->sl_scp; u_int res; int tout; register u_int8_t stat; if ((slp->sl_flags & HW_PDMASTART) == 0) { stat = sc->sc_fcWinit | FCTL_FIFOEN | FCTL_FIFOW; bus_write_1(sc->port_res, tmc_fctl, stat | FCTL_CLRFIFO); bus_write_1(sc->port_res, tmc_fctl, stat); slp->sl_flags |= HW_PDMASTART; } tout = sc->sc_tmaxcnt; while (tout -- > 0) { stat = bus_read_1(sc->port_res, tmc_bstat); if ((stat & PHASE_MASK) != DATA_OUT_PHASE) break; if (sp->scp_datalen <= 0) { if (sc->sc_dataout_timeout == 0) sc->sc_dataout_timeout = SCSI_LOW_TIMEOUT_HZ; break; } if (thold > 0) { res = bus_read_2(sc->port_res, tmc_fdcnt); if (res > thold) { bus_write_1(sc->port_res, tmc_ictl, sc->sc_icinit); break; } } else { res = bus_read_2(sc->port_res, tmc_fdcnt); if (res > sc->sc_maxwsize / 2) { DELAY(1); continue; } } if (res == (u_int) -1) break; res = sc->sc_maxwsize - res; if (res > sp->scp_datalen) res = sp->scp_datalen; sp->scp_datalen -= res; if ((res & 0x1) != 0) { bus_write_1(sc->port_res, tmc_wfifo, *sp->scp_data); sp->scp_data ++; res --; } bus_write_multi_2(sc->port_res, tmc_wfifo, (u_int16_t *) sp->scp_data, res >> 1); sp->scp_data += res; } if (tout <= 0) device_printf(slp->sl_dev, "pio write timeout\n"); } static int stg_negate_signal(struct stg_softc *sc, u_int8_t mask, u_char *s) { struct scsi_low_softc *slp = &sc->sc_sclow; int wc; u_int8_t regv; for (wc = 0; wc < STG_DELAY_MAX / STG_DELAY_INTERVAL; wc ++) { regv = bus_read_1(sc->port_res, tmc_bstat); if (regv == (u_int8_t) -1) return -1; if ((regv & mask) == 0) return 1; DELAY(STG_DELAY_INTERVAL); } device_printf(slp->sl_dev, "%s stg_negate_signal timeout\n", s); return -1; } static int stg_expect_signal(struct stg_softc *sc, u_int8_t phase, u_int8_t mask) { struct scsi_low_softc *slp = &sc->sc_sclow; int wc; u_int8_t ph; phase &= PHASE_MASK; for (wc = 0; wc < STG_DELAY_MAX / STG_DELAY_INTERVAL; wc ++) { ph = bus_read_1(sc->port_res, tmc_bstat); if (ph == (u_int8_t) -1) return -1; if ((ph & PHASE_MASK) != phase) return 0; if ((ph & mask) != 0) return 1; DELAY(STG_DELAY_INTERVAL); } device_printf(slp->sl_dev, "stg_expect_signal timeout\n"); return -1; } static int stg_xfer(sc, buf, len, phase, clear_atn) struct stg_softc *sc; u_int8_t *buf; int len; int phase; int clear_atn; { int rv, ptr; if (phase & BSTAT_IO) bus_write_1(sc->port_res, tmc_fctl, sc->sc_fcRinit); else bus_write_1(sc->port_res, tmc_fctl, sc->sc_fcWinit); for (ptr = 0; len > 0; len --) { rv = stg_expect_signal(sc, phase, BSTAT_REQ); if (rv <= 0) goto bad; if (len == 1 && clear_atn != 0) { sc->sc_busc &= ~BCTL_ATN; stghw_bcr_write_1(sc, sc->sc_busc); SCSI_LOW_DEASSERT_ATN(&sc->sc_sclow); } if (phase & BSTAT_IO) { buf[ptr ++] = bus_read_1(sc->port_res, tmc_rdata); } else { bus_write_1(sc->port_res, tmc_wdata, buf[ptr ++]); } stg_negate_signal(sc, BSTAT_ACK, "xfer"); } bad: bus_write_1(sc->port_res, tmc_fctl, sc->sc_fcRinit); return len; } /************************************************************** * disconnect & reselect (HW low) **************************************************************/ static int stg_reselected(sc) struct stg_softc *sc; { struct scsi_low_softc *slp = &sc->sc_sclow; int tout; u_int sid; u_int8_t regv; if (slp->sl_selid != NULL) { /* XXX: * Selection vs Reselection conflicts. */ bus_write_1(sc->port_res, tmc_fctl, sc->sc_fcRinit); stghw_bcr_write_1(sc, BCTL_BUSFREE); } else if (slp->sl_Tnexus != NULL) { device_printf(slp->sl_dev, "unexpected termination\n"); stg_disconnected(sc, slp->sl_Tnexus); } /* XXX: * We should ack the reselection as soon as possible, * because the target would abort the current reselection seq * due to reselection timeout. */ tout = STG_DELAY_SELECT_POLLING_MAX; while (tout -- > 0) { regv = bus_read_1(sc->port_res, tmc_bstat); if ((regv & (BSTAT_IO | BSTAT_SEL | BSTAT_BSY)) == (BSTAT_IO | BSTAT_SEL)) { DELAY(1); regv = bus_read_1(sc->port_res, tmc_bstat); if ((regv & (BSTAT_IO | BSTAT_SEL | BSTAT_BSY)) == (BSTAT_IO | BSTAT_SEL)) goto reselect_start; } DELAY(1); } device_printf(slp->sl_dev, "reselction timeout I\n"); return EJUSTRETURN; reselect_start: sid = (u_int) bus_read_1(sc->port_res, tmc_scsiid); if ((sid & sc->sc_idbit) == 0) { /* not us */ return EJUSTRETURN; } bus_write_1(sc->port_res, tmc_fctl, sc->sc_fcRinit | FCTL_CLRFIFO | FCTL_CLRINT); bus_write_1(sc->port_res, tmc_fctl, sc->sc_fcRinit); stghw_bcr_write_1(sc, sc->sc_busc | BCTL_BSY); while (tout -- > 0) { regv = bus_read_1(sc->port_res, tmc_bstat); if ((regv & (BSTAT_SEL | BSTAT_BSY)) == BSTAT_BSY) goto reselected; DELAY(1); } device_printf(slp->sl_dev, "reselction timeout II\n"); return EJUSTRETURN; reselected: sid &= ~sc->sc_idbit; sid = ffs(sid) - 1; if (scsi_low_reselected(slp, sid) == NULL) return EJUSTRETURN; #ifdef STG_STATICS stg_statics.reselect ++; #endif /* STG_STATICS */ return EJUSTRETURN; } static int stg_disconnected(sc, ti) struct stg_softc *sc; struct targ_info *ti; { struct scsi_low_softc *slp = &sc->sc_sclow; /* clear bus status & fifo */ bus_write_1(sc->port_res, tmc_fctl, sc->sc_fcRinit | FCTL_CLRFIFO); bus_write_1(sc->port_res, tmc_fctl, sc->sc_fcRinit); stghw_bcr_write_1(sc, BCTL_BUSFREE); sc->sc_icinit &= ~ICTL_FIFO; sc->sc_busc &= ~BCTL_ATN; sc->sc_dataout_timeout = 0; sc->sc_ubf_timeout = 0; #ifdef STG_STATICS stg_statics.disconnect ++; #endif /* STG_STATICS */ scsi_low_disconnected(slp, ti); return 1; } /************************************************************** * SEQUENCER **************************************************************/ static int stg_target_nexus_establish(sc) struct stg_softc *sc; { struct scsi_low_softc *slp = &sc->sc_sclow; struct targ_info *ti = slp->sl_Tnexus; struct stg_targ_info *sti = (void *) ti; bus_write_1(sc->port_res, tmc_ssctl, sti->sti_reg_synch); if ((stg_io_control & STG_FIFO_INTERRUPTS) != 0) { sc->sc_icinit |= ICTL_FIFO; } return 0; } static int stg_lun_nexus_establish(sc) struct stg_softc *sc; { return 0; } static int stg_ccb_nexus_establish(sc) struct stg_softc *sc; { struct scsi_low_softc *slp = &sc->sc_sclow; struct slccb *cb = slp->sl_Qnexus; sc->sc_tmaxcnt = cb->ccb_tcmax * 1000 * 1000; return 0; } #define STGHW_SELECT_INTERVAL 10 static int stghw_select_targ_wait(sc, mu) struct stg_softc *sc; int mu; { mu = mu / STGHW_SELECT_INTERVAL; while (mu -- > 0) { if ((bus_read_1(sc->port_res, tmc_bstat) & BSTAT_BSY) == 0) { DELAY(STGHW_SELECT_INTERVAL); continue; } DELAY(1); if ((bus_read_1(sc->port_res, tmc_bstat) & BSTAT_BSY) != 0) { return 0; } } return ENXIO; } static void stg_selection_done_and_expect_msgout(sc) struct stg_softc *sc; { struct scsi_low_softc *slp = &sc->sc_sclow; bus_write_1(sc->port_res, tmc_fctl, sc->sc_fcRinit | FCTL_CLRFIFO); bus_write_1(sc->port_res, tmc_fctl, sc->sc_fcRinit); stghw_bcr_write_1(sc, sc->sc_imsg | sc->sc_busc); SCSI_LOW_ASSERT_ATN(slp); } int stgintr(arg) void *arg; { struct stg_softc *sc = arg; struct scsi_low_softc *slp = &sc->sc_sclow; struct targ_info *ti; struct buf *bp; u_int derror, flags; int len; u_int8_t status, astatus, regv; /******************************************* * interrupt check *******************************************/ if (slp->sl_flags & HW_INACTIVE) return 0; astatus = bus_read_1(sc->port_res, tmc_astat); status = bus_read_1(sc->port_res, tmc_bstat); if ((astatus & ASTAT_STATMASK) == 0 || astatus == (u_int8_t) -1) return 0; bus_write_1(sc->port_res, tmc_ictl, 0); if (astatus & ASTAT_SCSIRST) { bus_write_1(sc->port_res, tmc_fctl, sc->sc_fcRinit | FCTL_CLRFIFO); bus_write_1(sc->port_res, tmc_fctl, sc->sc_fcRinit); bus_write_1(sc->port_res, tmc_ictl, 0); scsi_low_restart(slp, SCSI_LOW_RESTART_SOFT, "bus reset (power off?)"); return 1; } /******************************************* * debug section *******************************************/ #ifdef STG_DEBUG if (stg_debug) { scsi_low_print(slp, NULL); device_printf(slp->sl_dev, "st %x ist %x\n\n", status, astatus); #ifdef KDB if (stg_debug > 1) kdb_enter(KDB_WHY_CAM, "stg"); #endif /* KDB */ } #endif /* STG_DEBUG */ /******************************************* * reselection & nexus *******************************************/ if ((status & RESEL_PHASE_MASK)== PHASE_RESELECTED) { if (stg_reselected(sc) == EJUSTRETURN) goto out; } if ((ti = slp->sl_Tnexus) == NULL) return 0; derror = 0; if ((astatus & ASTAT_PARERR) != 0 && ti->ti_phase != PH_ARBSTART && (sc->sc_fcRinit & FCTL_PARENB) != 0) { slp->sl_error |= PARITYERR; derror = SCSI_LOW_DATA_PE; if ((status & PHASE_MASK) == MESSAGE_IN_PHASE) scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_PARITY, 0); else scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ERROR, 1); } /******************************************* * aribitration & selection *******************************************/ switch (ti->ti_phase) { case PH_ARBSTART: if ((astatus & ASTAT_ARBIT) == 0) { #ifdef STG_STATICS stg_statics.arbit_fail_0 ++; #endif /* STG_STATICS */ goto arb_fail; } status = bus_read_1(sc->port_res, tmc_bstat); if ((status & BSTAT_IO) != 0) { /* XXX: * Selection vs Reselection conflicts. */ #ifdef STG_STATICS stg_statics.arbit_fail_1 ++; #endif /* STG_STATICS */ arb_fail: bus_write_1(sc->port_res, tmc_fctl, sc->sc_fcRinit); stghw_bcr_write_1(sc, BCTL_BUSFREE); scsi_low_arbit_fail(slp, slp->sl_Qnexus); goto out; } /* * selection assert start. */ SCSI_LOW_SETUP_PHASE(ti, PH_SELSTART); scsi_low_arbit_win(slp); bus_write_1(sc->port_res, tmc_scsiid, sc->sc_idbit | (1 << ti->ti_id)); stghw_bcr_write_1(sc, sc->sc_imsg | sc->sc_busc | BCTL_SEL); bus_write_1(sc->port_res, tmc_fctl, sc->sc_fcWinit); if ((stg_io_control & STG_WAIT_FOR_SELECT) != 0) { /* selection abort delay 200 + 100 micro sec */ if (stghw_select_targ_wait(sc, 300) == 0) { SCSI_LOW_SETUP_PHASE(ti, PH_SELECTED); stg_selection_done_and_expect_msgout(sc); } } goto out; case PH_SELSTART: if ((status & BSTAT_BSY) == 0) { /* selection timeout delay 250 ms */ if (stghw_select_targ_wait(sc, 250 * 1000) != 0) { stg_disconnected(sc, ti); goto out; } } SCSI_LOW_SETUP_PHASE(ti, PH_SELECTED); stg_selection_done_and_expect_msgout(sc); goto out; case PH_SELECTED: if ((status & BSTAT_REQ) == 0) goto out; stg_target_nexus_establish(sc); break; case PH_RESEL: if ((status & BSTAT_REQ) == 0) goto out; /* clear a busy line */ bus_write_1(sc->port_res, tmc_fctl, sc->sc_fcRinit); stghw_bcr_write_1(sc, sc->sc_busc); stg_target_nexus_establish(sc); if ((status & PHASE_MASK) != MESSAGE_IN_PHASE) { device_printf(slp->sl_dev, "unexpected phase after reselect\n"); slp->sl_error |= FATALIO; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT, 1); goto out; } break; } /******************************************* * data phase *******************************************/ if ((slp->sl_flags & HW_PDMASTART) && STG_IS_PHASE_DATA(status) == 0) { if (slp->sl_scp.scp_direction == SCSI_LOW_READ) stg_pio_read(sc, ti, 0); stg_pdma_end(sc, ti); } /******************************************* * scsi seq *******************************************/ switch (status & PHASE_MASK) { case COMMAND_PHASE: if (stg_expect_signal(sc, COMMAND_PHASE, BSTAT_REQ) <= 0) break; SCSI_LOW_SETUP_PHASE(ti, PH_CMD); if (scsi_low_cmd(slp, ti) != 0) { scsi_low_attention(slp); } if (stg_xfer(sc, slp->sl_scp.scp_cmd, slp->sl_scp.scp_cmdlen, COMMAND_PHASE, 0) != 0) { device_printf(slp->sl_dev, "CMDOUT short\n"); } break; case DATA_OUT_PHASE: SCSI_LOW_SETUP_PHASE(ti, PH_DATA); if (scsi_low_data(slp, ti, &bp, SCSI_LOW_WRITE) != 0) { scsi_low_attention(slp); } if ((sc->sc_icinit & ICTL_FIFO) != 0) stg_pio_write(sc, ti, sc->sc_wthold); else stg_pio_write(sc, ti, 0); break; case DATA_IN_PHASE: SCSI_LOW_SETUP_PHASE(ti, PH_DATA); if (scsi_low_data(slp, ti, &bp, SCSI_LOW_READ) != 0) { scsi_low_attention(slp); } if ((sc->sc_icinit & ICTL_FIFO) != 0) stg_pio_read(sc, ti, sc->sc_rthold); else stg_pio_read(sc, ti, 0); break; case STATUS_PHASE: regv = stg_expect_signal(sc, STATUS_PHASE, BSTAT_REQ); if (regv <= 0) break; SCSI_LOW_SETUP_PHASE(ti, PH_STAT); regv = bus_read_1(sc->port_res, tmc_sdna); if (scsi_low_statusin(slp, ti, regv | derror) != 0) { scsi_low_attention(slp); } if (regv != bus_read_1(sc->port_res, tmc_rdata)) { device_printf(slp->sl_dev, "STATIN: data mismatch\n"); } stg_negate_signal(sc, BSTAT_ACK, "statin"); break; case MESSAGE_OUT_PHASE: if (stg_expect_signal(sc, MESSAGE_OUT_PHASE, BSTAT_REQ) <= 0) break; SCSI_LOW_SETUP_PHASE(ti, PH_MSGOUT); flags = (ti->ti_ophase != ti->ti_phase) ? SCSI_LOW_MSGOUT_INIT : 0; len = scsi_low_msgout(slp, ti, flags); if (len > 1 && slp->sl_atten == 0) { scsi_low_attention(slp); } if (stg_xfer(sc, ti->ti_msgoutstr, len, MESSAGE_OUT_PHASE, slp->sl_clear_atten) != 0) { device_printf(slp->sl_dev, "MSGOUT short\n"); } else { if (slp->sl_msgphase >= MSGPH_ABORT) { stg_disconnected(sc, ti); } } break; case MESSAGE_IN_PHASE: /* confirm phase and req signal */ if (stg_expect_signal(sc, MESSAGE_IN_PHASE, BSTAT_REQ) <= 0) break; SCSI_LOW_SETUP_PHASE(ti, PH_MSGIN); /* read data with NOACK */ regv = bus_read_1(sc->port_res, tmc_sdna); if (scsi_low_msgin(slp, ti, derror | regv) == 0) { if (scsi_low_is_msgout_continue(ti, 0) != 0) { scsi_low_attention(slp); } } /* read data with ACK */ if (regv != bus_read_1(sc->port_res, tmc_rdata)) { device_printf(slp->sl_dev, "MSGIN: data mismatch\n"); } /* wait for the ack negated */ stg_negate_signal(sc, BSTAT_ACK, "msgin"); if (slp->sl_msgphase != 0 && slp->sl_msgphase < MSGPH_ABORT) { stg_disconnected(sc, ti); } break; case BUSFREE_PHASE: device_printf(slp->sl_dev, "unexpected disconnect\n"); stg_disconnected(sc, ti); break; default: slp->sl_error |= FATALIO; device_printf(slp->sl_dev, "unknown phase bus %x intr %x\n", status, astatus); break; } out: bus_write_1(sc->port_res, tmc_ictl, sc->sc_icinit); return 1; } static int stg_timeout(sc) struct stg_softc *sc; { struct scsi_low_softc *slp = &sc->sc_sclow; int tout, count; u_int8_t status; if (slp->sl_Tnexus == NULL) return 0; status = bus_read_1(sc->port_res, tmc_bstat); if ((status & PHASE_MASK) == 0) { if (sc->sc_ubf_timeout ++ == 0) return 0; device_printf(slp->sl_dev, "unexpected bus free detected\n"); slp->sl_error |= FATALIO; scsi_low_print(slp, slp->sl_Tnexus); stg_disconnected(sc, slp->sl_Tnexus); return 0; } switch (status & PHASE_MASK) { case DATA_OUT_PHASE: if (sc->sc_dataout_timeout == 0) break; if ((status & BSTAT_REQ) == 0) break; if (bus_read_2(sc->port_res, tmc_fdcnt) != 0) break; if ((-- sc->sc_dataout_timeout) > 0) break; slp->sl_error |= PDMAERR; if ((slp->sl_flags & HW_WRITE_PADDING) == 0) { device_printf(slp->sl_dev, "write padding required\n"); break; } bus_write_1(sc->port_res, tmc_ictl, 0); tout = STG_DELAY_MAX; while (tout --) { status = bus_read_1(sc->port_res, tmc_bstat); if ((status & PHASE_MASK) != DATA_OUT_PHASE) break; if (bus_read_2(sc->port_res, tmc_fdcnt) != 0) { DELAY(1); continue; } for (count = sc->sc_maxwsize; count > 0; count --) bus_write_1(sc->port_res, tmc_wfifo, 0); } status = bus_read_1(sc->port_res, tmc_bstat); if ((status & PHASE_MASK) == DATA_OUT_PHASE) sc->sc_dataout_timeout = SCSI_LOW_TIMEOUT_HZ; bus_write_1(sc->port_res, tmc_ictl, sc->sc_icinit); break; default: break; } return 0; }