diff --git a/sys/dev/smartpqi/smartpqi_cam.c b/sys/dev/smartpqi/smartpqi_cam.c index 1278434131d9..1ba83f4918bb 100644 --- a/sys/dev/smartpqi/smartpqi_cam.c +++ b/sys/dev/smartpqi/smartpqi_cam.c @@ -1,1354 +1,1354 @@ /*- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ /* * CAM interface for smartpqi driver */ #include "smartpqi_includes.h" /* * Set cam sim properties of the smartpqi adapter. */ static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi) { pqisrc_softstate_t *softs = (struct pqisrc_softstate *) cam_sim_softc(sim); device_t dev = softs->os_specific.pqi_dev; DBG_FUNC("IN\n"); cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; cpi->hba_eng_cnt = 0; cpi->max_lun = PQI_MAX_MULTILUN; cpi->max_target = 1088; cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE; cpi->initiator_id = 255; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "Microsemi", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */ cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC4; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->ccb_h.status = CAM_REQ_CMP; cpi->hba_vendor = pci_get_vendor(dev); cpi->hba_device = pci_get_device(dev); cpi->hba_subvendor = pci_get_subvendor(dev); cpi->hba_subdevice = pci_get_subdevice(dev); DBG_FUNC("OUT\n"); } /* * Get transport settings of the smartpqi adapter */ static void get_transport_settings(struct pqisrc_softstate *softs, struct ccb_trans_settings *cts) { struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; DBG_FUNC("IN\n"); cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC4; cts->transport = XPORT_SPI; cts->transport_version = 2; spi->valid = CTS_SPI_VALID_DISC; spi->flags = CTS_SPI_FLAGS_DISC_ENB; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; sas->valid = CTS_SAS_VALID_SPEED; cts->ccb_h.status = CAM_REQ_CMP; DBG_FUNC("OUT\n"); } /* * Add the target to CAM layer and rescan, when a new device is found */ void os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { union ccb *ccb; DBG_FUNC("IN\n"); if(softs->os_specific.sim_registered) { if ((ccb = xpt_alloc_ccb_nowait()) == NULL) { DBG_ERR("rescan failed (can't allocate CCB)\n"); return; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(softs->os_specific.sim), device->target, device->lun) != CAM_REQ_CMP) { DBG_ERR("rescan failed (can't create path)\n"); xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } DBG_FUNC("OUT\n"); } /* * Remove the device from CAM layer when deleted or hot removed */ void os_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { struct cam_path *tmppath; DBG_FUNC("IN\n"); if(softs->os_specific.sim_registered) { if (xpt_create_path(&tmppath, NULL, cam_sim_path(softs->os_specific.sim), device->target, device->lun) != CAM_REQ_CMP) { DBG_ERR("unable to create path for async event"); return; } xpt_async(AC_LOST_DEVICE, tmppath, NULL); xpt_free_path(tmppath); softs->device_list[device->target][device->lun] = NULL; pqisrc_free_device(softs, device); } DBG_FUNC("OUT\n"); } /* * Function to release the frozen simq */ static void pqi_release_camq(rcb_t *rcb) { pqisrc_softstate_t *softs; struct ccb_scsiio *csio; csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; softs = rcb->softs; DBG_FUNC("IN\n"); if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) { softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY; if (csio->ccb_h.status & CAM_RELEASE_SIMQ) xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0); else csio->ccb_h.status |= CAM_RELEASE_SIMQ; } DBG_FUNC("OUT\n"); } static void pqi_synch_request(rcb_t *rcb) { pqisrc_softstate_t *softs = rcb->softs; DBG_IO("IN rcb = %p\n", rcb); if (!(rcb->cm_flags & PQI_CMD_MAPPED)) return; if (rcb->bcount != 0 ) { if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE) bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, rcb->cm_datamap, BUS_DMASYNC_POSTREAD); if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE) bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, rcb->cm_datamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat, rcb->cm_datamap); } rcb->cm_flags &= ~PQI_CMD_MAPPED; if(rcb->sgt && rcb->nseg) os_mem_free(rcb->softs, (void*)rcb->sgt, rcb->nseg*sizeof(sgt_t)); DBG_IO("OUT\n"); } /* * Function to dma-unmap the completed request */ static inline void pqi_unmap_request(rcb_t *rcb) { DBG_IO("IN rcb = %p\n", rcb); pqi_synch_request(rcb); pqisrc_put_tag(&rcb->softs->taglist, rcb->tag); DBG_IO("OUT\n"); } /* * Construct meaningful LD name for volume here. */ static void smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio) { struct scsi_inquiry_data *inq = NULL; uint8_t *cdb = NULL; pqi_scsi_dev_t *device = NULL; DBG_FUNC("IN\n"); if (pqisrc_ctrl_offline(softs)) return; cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ? (uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes; if(cdb[0] == INQUIRY && (cdb[1] & SI_EVPD) == 0 && (csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN && csio->dxfer_len >= SHORT_INQUIRY_LENGTH) { inq = (struct scsi_inquiry_data *)csio->data_ptr; device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun]; /* Let the disks be probed and dealt with via CAM. Only for LD let it fall through and inquiry be tweaked */ if (!device || !pqisrc_is_logical_device(device) || (device->devtype != DISK_DEVICE) || pqisrc_is_external_raid_device(device)) { return; } strncpy(inq->vendor, device->vendor, SID_VENDOR_SIZE); strncpy(inq->product, pqisrc_raidlevel_to_string(device->raid_level), SID_PRODUCT_SIZE); strncpy(inq->revision, device->volume_offline?"OFF":"OK", SID_REVISION_SIZE); } DBG_FUNC("OUT\n"); } static void pqi_complete_scsi_io(struct ccb_scsiio *csio, rcb_t *rcb) { uint32_t release_tag; pqisrc_softstate_t *softs = rcb->softs; DBG_IO("IN scsi io = %p\n", csio); pqi_synch_request(rcb); smartpqi_fix_ld_inquiry(rcb->softs, csio); pqi_release_camq(rcb); release_tag = rcb->tag; os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, release_tag); xpt_done((union ccb *)csio); DBG_FUNC("OUT\n"); } /* * Handle completion of a command - pass results back through the CCB */ void os_io_response_success(rcb_t *rcb) { struct ccb_scsiio *csio; DBG_IO("IN rcb = %p\n", rcb); if (rcb == NULL) panic("rcb is null"); csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; if (csio == NULL) panic("csio is null"); rcb->status = REQUEST_SUCCESS; csio->ccb_h.status = CAM_REQ_CMP; pqi_complete_scsi_io(csio, rcb); DBG_IO("OUT\n"); } static void copy_sense_data_to_csio(struct ccb_scsiio *csio, uint8_t *sense_data, uint16_t sense_data_len) { DBG_IO("IN csio = %p\n", csio); memset(&csio->sense_data, 0, csio->sense_len); sense_data_len = (sense_data_len > csio->sense_len) ? csio->sense_len : sense_data_len; if (sense_data) memcpy(&csio->sense_data, sense_data, sense_data_len); if (csio->sense_len > sense_data_len) csio->sense_resid = csio->sense_len - sense_data_len; else csio->sense_resid = 0; DBG_IO("OUT\n"); } /* * Error response handling for raid IO */ void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info) { struct ccb_scsiio *csio; pqisrc_softstate_t *softs; DBG_IO("IN\n"); csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; if (csio == NULL) panic("csio is null"); softs = rcb->softs; csio->ccb_h.status = CAM_REQ_CMP_ERR; if (!err_info || !rcb->dvp) { DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n", err_info, rcb->dvp); goto error_out; } csio->scsi_status = err_info->status; if (csio->ccb_h.func_code == XPT_SCSI_IO) { /* * Handle specific SCSI status values. */ switch(csio->scsi_status) { case PQI_RAID_STATUS_QUEUE_FULL: csio->ccb_h.status = CAM_REQ_CMP; DBG_ERR("Queue Full error\n"); break; /* check condition, sense data included */ case PQI_RAID_STATUS_CHECK_CONDITION: { uint16_t sense_data_len = LE_16(err_info->sense_data_len); uint8_t *sense_data = NULL; if (sense_data_len) sense_data = err_info->data; copy_sense_data_to_csio(csio, sense_data, sense_data_len); csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID | CAM_REQ_CMP_ERR; } break; case PQI_RAID_DATA_IN_OUT_UNDERFLOW: { uint32_t resid = 0; resid = rcb->bcount-err_info->data_out_transferred; csio->resid = resid; csio->ccb_h.status = CAM_REQ_CMP; } break; default: csio->ccb_h.status = CAM_REQ_CMP; break; } } error_out: pqi_complete_scsi_io(csio, rcb); DBG_IO("OUT\n"); } /* * Error response handling for aio. */ void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info) { struct ccb_scsiio *csio; pqisrc_softstate_t *softs; DBG_IO("IN\n"); if (rcb == NULL) panic("rcb is null"); rcb->status = REQUEST_SUCCESS; csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; if (csio == NULL) panic("csio is null"); softs = rcb->softs; if (!err_info || !rcb->dvp) { csio->ccb_h.status = CAM_REQ_CMP_ERR; DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n", err_info, rcb->dvp); goto error_out; } switch (err_info->service_resp) { case PQI_AIO_SERV_RESPONSE_COMPLETE: csio->ccb_h.status = err_info->status; break; case PQI_AIO_SERV_RESPONSE_FAILURE: switch(err_info->status) { case PQI_AIO_STATUS_IO_ABORTED: csio->ccb_h.status = CAM_REQ_ABORTED; DBG_WARN_BTL(rcb->dvp, "IO aborted\n"); break; case PQI_AIO_STATUS_UNDERRUN: csio->ccb_h.status = CAM_REQ_CMP; csio->resid = LE_32(err_info->resd_count); break; case PQI_AIO_STATUS_OVERRUN: csio->ccb_h.status = CAM_REQ_CMP; break; case PQI_AIO_STATUS_AIO_PATH_DISABLED: DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n"); /* Timed out TMF response comes here */ if (rcb->tm_req) { rcb->req_pending = false; rcb->status = REQUEST_SUCCESS; DBG_ERR("AIO Disabled for TMF\n"); return; } rcb->dvp->aio_enabled = false; rcb->dvp->offload_enabled = false; csio->ccb_h.status |= CAM_REQUEUE_REQ; break; case PQI_AIO_STATUS_IO_ERROR: case PQI_AIO_STATUS_IO_NO_DEVICE: case PQI_AIO_STATUS_INVALID_DEVICE: default: DBG_WARN_BTL(rcb->dvp,"IO Error/Invalid/No device\n"); csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; break; } break; case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n", (err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_COMPLETE) ? "COMPLETE" : "SUCCEEDED"); rcb->status = REQUEST_SUCCESS; rcb->req_pending = false; return; case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n", (err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_REJECTED) ? "REJECTED" : "INCORRECT LUN"); rcb->status = REQUEST_FAILED; rcb->req_pending = false; return; default: DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n"); csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; break; } if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) { csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION; uint8_t *sense_data = NULL; unsigned sense_data_len = LE_16(err_info->data_len); if (sense_data_len) sense_data = err_info->data; - DBG_ERR_BTL(rcb->dvp, "SCSI_STATUS_CHECK_COND sense size %u\n", + DBG_INFO("SCSI_STATUS_CHECK_COND sense size %u\n", sense_data_len); copy_sense_data_to_csio(csio, sense_data, sense_data_len); csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; } error_out: pqi_complete_scsi_io(csio, rcb); DBG_IO("OUT\n"); } static void pqi_freeze_ccb(union ccb *ccb) { if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); } } /* * Command-mapping helper function - populate this command's s/g table. */ static void pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { rcb_t *rcb = (rcb_t *)arg; pqisrc_softstate_t *softs = rcb->softs; union ccb *ccb; if (error || nseg > softs->pqi_cap.max_sg_elem) { DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n", error, nseg, softs->pqi_cap.max_sg_elem); goto error_io; } rcb->sgt = os_mem_alloc(softs, nseg * sizeof(sgt_t)); if (!rcb->sgt) { DBG_ERR_BTL(rcb->dvp, "os_mem_alloc() failed; nseg = %d\n", nseg); goto error_io; } rcb->nseg = nseg; for (int i = 0; i < nseg; i++) { rcb->sgt[i].addr = segs[i].ds_addr; rcb->sgt[i].len = segs[i].ds_len; rcb->sgt[i].flags = 0; } if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE) bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, rcb->cm_datamap, BUS_DMASYNC_PREREAD); if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE) bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, rcb->cm_datamap, BUS_DMASYNC_PREWRITE); /* Call IO functions depending on pd or ld */ rcb->status = REQUEST_PENDING; error = pqisrc_build_send_io(softs, rcb); if (error) { rcb->req_pending = false; DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error); } else { /* Successfully IO was submitted to the device. */ return; } error_io: ccb = rcb->cm_ccb; ccb->ccb_h.status = CAM_RESRC_UNAVAIL; pqi_freeze_ccb(ccb); pqi_unmap_request(rcb); xpt_done(ccb); return; } /* * Function to dma-map the request buffer */ static int pqi_map_request(rcb_t *rcb) { pqisrc_softstate_t *softs = rcb->softs; int bsd_status = BSD_SUCCESS; union ccb *ccb = rcb->cm_ccb; DBG_FUNC("IN\n"); /* check that mapping is necessary */ if (rcb->cm_flags & PQI_CMD_MAPPED) return BSD_SUCCESS; rcb->cm_flags |= PQI_CMD_MAPPED; if (rcb->bcount) { bsd_status = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat, rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0); if (bsd_status != BSD_SUCCESS && bsd_status != EINPROGRESS) { DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed, return status = %d transfer length = %d\n", bsd_status, rcb->bcount); return bsd_status; } } else { /* * Set up the command to go to the controller. If there are no * data buffers associated with the command then it can bypass * busdma. */ /* Call IO functions depending on pd or ld */ rcb->status = REQUEST_PENDING; if (pqisrc_build_send_io(softs, rcb) != PQI_STATUS_SUCCESS) { bsd_status = EIO; } } DBG_FUNC("OUT error = %d\n", bsd_status); return bsd_status; } /* * Function to clear the request control block */ void os_reset_rcb(rcb_t *rcb) { rcb->error_info = NULL; rcb->req = NULL; rcb->status = -1; rcb->tag = INVALID_ELEM; rcb->dvp = NULL; rcb->cdbp = NULL; rcb->softs = NULL; rcb->cm_flags = 0; rcb->cm_data = NULL; rcb->bcount = 0; rcb->nseg = 0; rcb->sgt = NULL; rcb->cm_ccb = NULL; rcb->encrypt_enable = false; rcb->ioaccel_handle = 0; rcb->resp_qid = 0; rcb->req_pending = false; rcb->tm_req = false; } /* * Callback function for the lun rescan */ static void smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb) { xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); } /* * Function to rescan the lun */ static void smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target, int lun) { union ccb *ccb = NULL; cam_status status = 0; struct cam_path *path = NULL; DBG_FUNC("IN\n"); ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { DBG_ERR("Unable to alloc ccb for lun rescan\n"); return; } status = xpt_create_path(&path, NULL, cam_sim_path(softs->os_specific.sim), target, lun); if (status != CAM_REQ_CMP) { DBG_ERR("xpt_create_path status(%d) != CAM_REQ_CMP \n", status); xpt_free_ccb(ccb); return; } bzero(ccb, sizeof(union ccb)); xpt_setup_ccb(&ccb->ccb_h, path, 5); ccb->ccb_h.func_code = XPT_SCAN_LUN; ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb; ccb->crcn.flags = CAM_FLAG_NONE; xpt_action(ccb); DBG_FUNC("OUT\n"); } /* * Function to rescan the lun under each target */ void smartpqi_target_rescan(struct pqisrc_softstate *softs) { int target = 0, lun = 0; DBG_FUNC("IN\n"); for(target = 0; target < PQI_MAX_DEVICES; target++){ for(lun = 0; lun < PQI_MAX_MULTILUN; lun++){ if(softs->device_list[target][lun]){ smartpqi_lun_rescan(softs, target, lun); } } } DBG_FUNC("OUT\n"); } /* * Set the mode of tagged command queueing for the current task. */ uint8_t os_get_task_attr(rcb_t *rcb) { union ccb *ccb = rcb->cm_ccb; uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE; switch(ccb->csio.tag_action) { case MSG_HEAD_OF_Q_TAG: tag_action = SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE; break; case MSG_ORDERED_Q_TAG: tag_action = SOP_TASK_ATTRIBUTE_ORDERED; break; case MSG_SIMPLE_Q_TAG: default: tag_action = SOP_TASK_ATTRIBUTE_SIMPLE; break; } return tag_action; } /* * Complete all outstanding commands */ void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs) { int tag = 0; pqi_scsi_dev_t *dvp = NULL; DBG_FUNC("IN\n"); for (tag = 1; tag <= softs->max_outstanding_io; tag++) { rcb_t *prcb = &softs->rcb[tag]; dvp = prcb->dvp; if(prcb->req_pending && prcb->cm_ccb ) { prcb->req_pending = false; prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP; pqi_complete_scsi_io(&prcb->cm_ccb->csio, prcb); if (dvp) pqisrc_decrement_device_active_io(softs, dvp); } } DBG_FUNC("OUT\n"); } /* * IO handling functionality entry point */ static int pqisrc_io_start(struct cam_sim *sim, union ccb *ccb) { rcb_t *rcb; uint32_t tag, no_transfer = 0; pqisrc_softstate_t *softs = (struct pqisrc_softstate *) cam_sim_softc(sim); int32_t error; pqi_scsi_dev_t *dvp; DBG_FUNC("IN\n"); if (softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; DBG_INFO("Device = %d not there\n", ccb->ccb_h.target_id); return ENXIO; } dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; /* Check controller state */ if (IN_PQI_RESET(softs)) { ccb->ccb_h.status = CAM_SCSI_BUS_RESET | CAM_BUSY | CAM_REQ_INPROG; DBG_WARN("Device = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id); return ENXIO; } /* Check device state */ if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) { ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP; DBG_WARN("Device = %d GONE/OFFLINE\n", ccb->ccb_h.target_id); return ENXIO; } /* Check device reset */ if (DEVICE_RESET(dvp)) { ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY; DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id); return EBUSY; } if (dvp->expose_device == false) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; DBG_INFO("Device = %d not exposed\n", ccb->ccb_h.target_id); return ENXIO; } tag = pqisrc_get_tag(&softs->taglist); if (tag == INVALID_ELEM) { DBG_ERR("Get Tag failed\n"); xpt_freeze_simq(softs->os_specific.sim, 1); softs->os_specific.pqi_flags |= PQI_FLAG_BUSY; ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ); return EIO; } DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist); rcb = &softs->rcb[tag]; os_reset_rcb(rcb); rcb->tag = tag; rcb->softs = softs; rcb->cmdlen = ccb->csio.cdb_len; ccb->ccb_h.sim_priv.entries[0].ptr = rcb; switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: rcb->data_dir = SOP_DATA_DIR_FROM_DEVICE; break; case CAM_DIR_OUT: rcb->data_dir = SOP_DATA_DIR_TO_DEVICE; break; case CAM_DIR_NONE: no_transfer = 1; break; default: DBG_ERR("Unknown Dir\n"); break; } rcb->cm_ccb = ccb; rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; if (!no_transfer) { rcb->cm_data = (void *)ccb->csio.data_ptr; rcb->bcount = ccb->csio.dxfer_len; } else { rcb->cm_data = NULL; rcb->bcount = 0; } /* * Submit the request to the adapter. * * Note that this may fail if we're unable to map the request (and * if we ever learn a transport layer other than simple, may fail * if the adapter rejects the command). */ if ((error = pqi_map_request(rcb)) != BSD_SUCCESS) { xpt_freeze_simq(softs->os_specific.sim, 1); if (error == EINPROGRESS) { /* Release simq in the completion */ softs->os_specific.pqi_flags |= PQI_FLAG_BUSY; error = BSD_SUCCESS; } else { rcb->req_pending = false; ccb->ccb_h.status |= CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; DBG_WARN("Requeue req error = %d target = %d\n", error, ccb->ccb_h.target_id); pqi_unmap_request(rcb); error = EIO; } } DBG_FUNC("OUT error = %d\n", error); return error; } static inline int pqi_tmf_status_to_bsd_tmf_status(int pqi_status, rcb_t *rcb) { if (PQI_STATUS_SUCCESS == pqi_status && REQUEST_SUCCESS == rcb->status) return BSD_SUCCESS; else return EIO; } /* * Abort a task, task management functionality */ static int pqisrc_scsi_abort_task(pqisrc_softstate_t *softs, union ccb *ccb) { struct ccb_hdr *ccb_h = &ccb->ccb_h; rcb_t *rcb = NULL; rcb_t *prcb = ccb->ccb_h.sim_priv.entries[0].ptr; uint32_t tag; int rval; DBG_FUNC("IN\n"); tag = pqisrc_get_tag(&softs->taglist); rcb = &softs->rcb[tag]; rcb->tag = tag; if (!rcb->dvp) { DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code); rval = ENXIO; goto error_tmf; } rcb->tm_req = true; rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, prcb, SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK); if ((rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb)) == BSD_SUCCESS) ccb->ccb_h.status = CAM_REQ_ABORTED; error_tmf: os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, tag); DBG_FUNC("OUT rval = %d\n", rval); return rval; } /* * Abort a taskset, task management functionality */ static int pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb) { struct ccb_hdr *ccb_h = &ccb->ccb_h; rcb_t *rcb = NULL; uint32_t tag; int rval; DBG_FUNC("IN\n"); tag = pqisrc_get_tag(&softs->taglist); rcb = &softs->rcb[tag]; rcb->tag = tag; if (!rcb->dvp) { DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code); rval = ENXIO; goto error_tmf; } rcb->tm_req = true; rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, NULL, SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET); rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb); error_tmf: os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, tag); DBG_FUNC("OUT rval = %d\n", rval); return rval; } /* * Target reset task management functionality */ static int pqisrc_target_reset( pqisrc_softstate_t *softs, union ccb *ccb) { struct ccb_hdr *ccb_h = &ccb->ccb_h; pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; rcb_t *rcb = NULL; uint32_t tag; int rval; DBG_FUNC("IN\n"); if (devp == NULL) { DBG_ERR("bad target %d, tmf type : 0x%x\n", ccb_h->target_id, ccb_h->func_code); return ENXIO; } tag = pqisrc_get_tag(&softs->taglist); rcb = &softs->rcb[tag]; rcb->tag = tag; devp->reset_in_progress = true; rcb->tm_req = true; rval = pqisrc_send_tmf(softs, devp, rcb, NULL, SOP_TASK_MANAGEMENT_LUN_RESET); rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb); devp->reset_in_progress = false; os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, tag); DBG_FUNC("OUT rval = %d\n", rval); return rval; } /* * cam entry point of the smartpqi module. */ static void smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb) { struct pqisrc_softstate *softs = cam_sim_softc(sim); struct ccb_hdr *ccb_h = &ccb->ccb_h; DBG_FUNC("IN\n"); switch (ccb_h->func_code) { case XPT_SCSI_IO: { if(!pqisrc_io_start(sim, ccb)) { return; } break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; ccg = &ccb->ccg; if (ccg->block_size == 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status |= CAM_REQ_INVALID; break; } cam_calc_geometry(ccg, /* extended */ 1); ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_PATH_INQ: { update_sim_properties(sim, &ccb->cpi); ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: get_transport_settings(softs, &ccb->cts); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_ABORT: if(pqisrc_scsi_abort_task(softs, ccb)) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); DBG_ERR("Abort task failed on %d\n", ccb->ccb_h.target_id); return; } break; case XPT_TERM_IO: if (pqisrc_scsi_abort_task_set(softs, ccb)) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; DBG_ERR("Abort task set failed on %d\n", ccb->ccb_h.target_id); xpt_done(ccb); return; } break; case XPT_RESET_DEV: if(pqisrc_target_reset(softs, ccb)) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; DBG_ERR("Target reset failed on %d\n", ccb->ccb_h.target_id); xpt_done(ccb); return; } else { ccb->ccb_h.status = CAM_REQ_CMP; } break; case XPT_RESET_BUS: ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; return; default: DBG_WARN("UNSUPPORTED FUNC CODE\n"); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; } xpt_done(ccb); DBG_FUNC("OUT\n"); } /* * Function to poll the response, when interrupts are unavailable * This also serves supporting crash dump. */ static void smartpqi_poll(struct cam_sim *sim) { struct pqisrc_softstate *softs = cam_sim_softc(sim); int i; for (i = 1; i < softs->intr_count; i++ ) pqisrc_process_response_queue(softs, i); } /* * Function to adjust the queue depth of a device */ void smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth) { struct ccb_relsim crs; DBG_INFO("IN\n"); memset(&crs, 0, sizeof(crs)); xpt_setup_ccb(&crs.ccb_h, path, 5); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.ccb_h.flags = CAM_DEV_QFREEZE; crs.release_flags = RELSIM_ADJUST_OPENINGS; crs.openings = queue_depth; xpt_action((union ccb *)&crs); if(crs.ccb_h.status != CAM_REQ_CMP) { printf("XPT_REL_SIMQ failed stat=%d\n", crs.ccb_h.status); } DBG_INFO("OUT\n"); } /* * Function to register async callback for setting queue depth */ static void smartpqi_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct pqisrc_softstate *softs; softs = (struct pqisrc_softstate*)callback_arg; DBG_FUNC("IN\n"); switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { break; } uint32_t t_id = cgd->ccb_h.target_id; if (t_id <= (PQI_CTLR_INDEX - 1)) { if (softs != NULL) { pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun]; if (dvp == NULL) { DBG_ERR("Target is null, target id=%d\n", t_id); break; } smartpqi_adjust_queue_depth(path, dvp->queue_depth); } } break; } default: break; } DBG_FUNC("OUT\n"); } /* * Function to register sim with CAM layer for smartpqi driver */ int register_sim(struct pqisrc_softstate *softs, int card_index) { int max_transactions; union ccb *ccb = NULL; int error; struct ccb_setasync csa; struct cam_sim *sim; DBG_FUNC("IN\n"); max_transactions = softs->max_io_for_scsi_ml; softs->os_specific.devq = cam_simq_alloc(max_transactions); if (softs->os_specific.devq == NULL) { DBG_ERR("cam_simq_alloc failed txns = %d\n", max_transactions); return ENOMEM; } sim = cam_sim_alloc(smartpqi_cam_action, \ smartpqi_poll, "smartpqi", softs, \ card_index, &softs->os_specific.cam_lock, \ 1, max_transactions, softs->os_specific.devq); if (sim == NULL) { DBG_ERR("cam_sim_alloc failed txns = %d\n", max_transactions); cam_simq_free(softs->os_specific.devq); return ENOMEM; } softs->os_specific.sim = sim; mtx_lock(&softs->os_specific.cam_lock); error = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0); if (error != CAM_SUCCESS) { DBG_ERR("xpt_bus_register failed errno %d\n", error); cam_sim_free(softs->os_specific.sim, FALSE); cam_simq_free(softs->os_specific.devq); mtx_unlock(&softs->os_specific.cam_lock); return ENXIO; } softs->os_specific.sim_registered = TRUE; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { DBG_ERR("xpt_create_path failed\n"); return ENXIO; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(softs->os_specific.sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { DBG_ERR("xpt_create_path failed\n"); xpt_free_ccb(ccb); xpt_bus_deregister(cam_sim_path(softs->os_specific.sim)); cam_sim_free(softs->os_specific.sim, TRUE); mtx_unlock(&softs->os_specific.cam_lock); return ENXIO; } /* * Callback to set the queue depth per target which is * derived from the FW. */ softs->os_specific.path = ccb->ccb_h.path; memset(&csa, 0, sizeof(csa)); xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_FOUND_DEVICE; csa.callback = smartpqi_async; csa.callback_arg = softs; xpt_action((union ccb *)&csa); if (csa.ccb_h.status != CAM_REQ_CMP) { DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n", csa.ccb_h.status); } mtx_unlock(&softs->os_specific.cam_lock); DBG_INFO("OUT\n"); return BSD_SUCCESS; } /* * Function to deregister smartpqi sim from cam layer */ void deregister_sim(struct pqisrc_softstate *softs) { struct ccb_setasync csa; DBG_FUNC("IN\n"); if (softs->os_specific.mtx_init) { mtx_lock(&softs->os_specific.cam_lock); } memset(&csa, 0, sizeof(csa)); xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = 0; csa.callback = smartpqi_async; csa.callback_arg = softs; xpt_action((union ccb *)&csa); xpt_free_path(softs->os_specific.path); if (softs->os_specific.sim) { xpt_release_simq(softs->os_specific.sim, 0); xpt_bus_deregister(cam_sim_path(softs->os_specific.sim)); softs->os_specific.sim_registered = FALSE; cam_sim_free(softs->os_specific.sim, FALSE); softs->os_specific.sim = NULL; } if (softs->os_specific.mtx_init) { mtx_unlock(&softs->os_specific.cam_lock); } if (softs->os_specific.devq != NULL) { cam_simq_free(softs->os_specific.devq); } if (softs->os_specific.mtx_init) { mtx_destroy(&softs->os_specific.cam_lock); softs->os_specific.mtx_init = FALSE; } mtx_destroy(&softs->os_specific.map_lock); DBG_FUNC("OUT\n"); } void os_rescan_target(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { struct cam_path *tmppath; DBG_FUNC("IN\n"); if(softs->os_specific.sim_registered) { if (xpt_create_path(&tmppath, NULL, cam_sim_path(softs->os_specific.sim), device->target, device->lun) != CAM_REQ_CMP) { DBG_ERR("unable to create path for async event!!! Bus: %d Target: %d Lun: %d\n", device->bus, device->target, device->lun); return; } xpt_async(AC_INQ_CHANGED, tmppath, NULL); xpt_free_path(tmppath); } device->scsi_rescan = false; DBG_FUNC("OUT\n"); } diff --git a/sys/dev/smartpqi/smartpqi_defines.h b/sys/dev/smartpqi/smartpqi_defines.h index 94000a06a4c5..10a1a78d4599 100644 --- a/sys/dev/smartpqi/smartpqi_defines.h +++ b/sys/dev/smartpqi/smartpqi_defines.h @@ -1,1170 +1,1169 @@ /*- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #ifndef _PQI_DEFINES_H #define _PQI_DEFINES_H #define PQI_STATUS_FAILURE -1 #define PQI_STATUS_TIMEOUT -2 #define PQI_STATUS_QFULL -3 #define PQI_STATUS_SUCCESS 0 /* Maximum timeout for internal command completion */ #define TIMEOUT_INFINITE ((uint32_t) (-1)) #define PQISRC_CMD_TIMEOUT TIMEOUT_INFINITE #define PQISRC_PASSTHROUGH_CMD_TIMEOUT PQISRC_CMD_TIMEOUT /* Delay in milli seconds */ #define PQISRC_TMF_TIMEOUT (OS_TMF_TIMEOUT_SEC * 1000) /* Delay in micro seconds */ #define PQISRC_PENDING_IO_TIMEOUT_USEC 30000000 /* 30 seconds */ /* If want to disable atomic operations on device active io, then set to zero */ #define PQISRC_DEVICE_IO_COUNTER 1 #define INVALID_ELEM 0xffff #ifndef MIN #define MIN(a,b) ((a) < (b) ? (a) : (b)) #endif #ifndef MAX #define MAX(a,b) ((a) > (b) ? (a) : (b)) #endif #define PQISRC_ROUNDUP(x, y) (((x) + (y) - 1) / (y) * (y)) #define PQISRC_DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) #define ALIGN_BOUNDARY(a, n) { \ if (a % n) \ a = a + (n - a % n); \ } /* Busy wait timeout on a condition */ #define COND_BUSYWAIT(cond, timeout /* in millisecond */) { \ if (!(cond)) { \ while (timeout) { \ OS_BUSYWAIT(1000); \ if (cond) \ break; \ timeout--; \ } \ } \ } /* Wait timeout on a condition*/ #define COND_WAIT(cond, timeout /* in millisecond */) { \ if (!(cond)) { \ while (timeout) { \ OS_SLEEP(1000); \ if (cond) \ break; \ timeout--; \ } \ } \ } #define FILL_QUEUE_ARRAY_ADDR(q,virt,dma) { \ q->array_virt_addr = virt; \ q->array_dma_addr = dma; \ } #define true 1 #define false 0 enum INTR_TYPE { LOCK_INTR, LOCK_SLEEP }; #define LOCKNAME_SIZE 32 #define INTR_TYPE_NONE 0x0 #define INTR_TYPE_FIXED 0x1 #define INTR_TYPE_MSI 0x2 #define INTR_TYPE_MSIX 0x4 #define SIS_ENABLE_MSIX 0x40 #define SIS_ENABLE_INTX 0x80 #define PQISRC_LEGACY_INTX_MASK 0x1 #define DMA_TO_VIRT(mem) ((mem)->virt_addr) #define DMA_PHYS_LOW(mem) (((mem)->dma_addr) & 0x00000000ffffffff) #define DMA_PHYS_HIGH(mem) ((((mem)->dma_addr) & 0xffffffff00000000) >> 32) typedef enum REQUEST_STATUS { REQUEST_SUCCESS = 0, REQUEST_PENDING = -1, REQUEST_FAILED = -2, }REQUEST_STATUS_T; typedef enum IO_PATH { AIO_PATH, RAID_PATH }IO_PATH_T; typedef enum device_type { DISK_DEVICE, TAPE_DEVICE, ROM_DEVICE = 5, SES_DEVICE, CONTROLLER_DEVICE, MEDIUM_CHANGER_DEVICE, RAID_DEVICE = 0x0c, ENCLOSURE_DEVICE, ZBC_DEVICE = 0x14 } device_type_t; typedef enum controller_state { PQI_UP_RUNNING, PQI_BUS_RESET, }controller_state_t; #define PQISRC_MAX_MSIX_SUPPORTED 64 /* SIS Specific */ #define PQISRC_INIT_STRUCT_REVISION 9 #define PQISRC_SECTOR_SIZE 512 #define PQISRC_BLK_SIZE PQISRC_SECTOR_SIZE #define PQISRC_DEFAULT_DMA_ALIGN 4 #define PQISRC_DMA_ALIGN_MASK (PQISRC_DEFAULT_DMA_ALIGN - 1) #define PQISRC_ERR_BUF_DMA_ALIGN 32 #define PQISRC_ERR_BUF_ELEM_SIZE MAX(sizeof(raid_path_error_info_elem_t),sizeof(aio_path_error_info_elem_t)) #define PQISRC_INIT_STRUCT_DMA_ALIGN 16 #define SIS_CMD_GET_ADAPTER_PROPERTIES 0x19 #define SIS_CMD_GET_COMM_PREFERRED_SETTINGS 0x26 #define SIS_CMD_GET_PQI_CAPABILITIES 0x3000 #define SIS_CMD_INIT_BASE_STRUCT_ADDRESS 0x1b #define SIS_SUPPORT_EXT_OPT 0x00800000 #define SIS_SUPPORT_PQI 0x00000004 #define SIS_SUPPORT_PQI_RESET_QUIESCE 0x00000008 #define SIS_PQI_RESET_QUIESCE 0x1000000 #define SIS_STATUS_OK_TIMEOUT 120000 /* in milli sec, 5 sec */ #define SIS_CMD_COMPLETE_TIMEOUT 30000 /* in milli sec, 30 secs */ #define SIS_POLL_START_WAIT_TIME 20000 /* in micro sec, 20 milli sec */ #define SIS_DB_BIT_CLEAR_TIMEOUT_CNT 120000 /* 500usec * 120000 = 60 sec */ #define SIS_ENABLE_TIMEOUT 3000 #define REENABLE_SIS 0x1 #define TRIGGER_NMI_SIS 0x800000 /*SIS Register status defines */ #define PQI_CTRL_KERNEL_UP_AND_RUNNING 0x80 #define PQI_CTRL_KERNEL_PANIC 0x100 #define SIS_CTL_TO_HOST_DB_DISABLE_ALL 0xFFFFFFFF #define SIS_CTL_TO_HOST_DB_CLEAR 0x00001000 #define SIS_CMD_SUBMIT 0x00000200 /* Bit 9 */ #define SIS_CMD_COMPLETE 0x00001000 /* Bit 12 */ #define SIS_CMD_STATUS_SUCCESS 0x1 /* PQI specific */ /* defines */ #define PQISRC_PQI_REG_OFFSET 0x4000 #define PQISRC_MAX_OUTSTANDING_REQ 4096 #define PQISRC_MAX_ADMIN_IB_QUEUE_ELEM_NUM 16 #define PQISRC_MAX_ADMIN_OB_QUEUE_ELEM_NUM 16 #define PQI_MIN_OP_IB_QUEUE_ID 1 #define PQI_OP_EVENT_QUEUE_ID 1 #define PQI_MIN_OP_OB_QUEUE_ID 2 #define PQISRC_MAX_SUPPORTED_OP_IB_Q 128 #define PQISRC_MAX_SUPPORTED_OP_RAID_IB_Q (PQISRC_MAX_SUPPORTED_OP_IB_Q / 2) #define PQISRC_MAX_SUPPORTED_OP_AIO_IB_Q (PQISRC_MAX_SUPPORTED_OP_RAID_IB_Q) #define PQISRC_MAX_OP_IB_QUEUE_ELEM_NUM (PQISRC_MAX_OUTSTANDING_REQ / PQISRC_MAX_SUPPORTED_OP_IB_Q) #define PQISRC_MAX_OP_OB_QUEUE_ELEM_NUM PQISRC_MAX_OUTSTANDING_REQ #define PQISRC_MIN_OP_OB_QUEUE_ELEM_NUM 2 #define PQISRC_MAX_SUPPORTED_OP_OB_Q 64 #define PQISRC_OP_MAX_IBQ_ELEM_SIZE 8 /* 8 * 16 = 128 bytes */ #define PQISRC_OP_MIN_IBQ_ELEM_SIZE 2 /* 2 * 16 = 32 bytes */ #define PQISRC_OP_OBQ_ELEM_SIZE 1 /* 16 bytes */ #define PQISRC_ADMIN_IBQ_ELEM_SIZE 2 /* 2 * 16 = 32 bytes */ #define PQISRC_INTR_COALSC_GRAN 0 #define PQISRC_PROTO_BIT_MASK 0 #define PQISRC_SGL_SUPPORTED_BIT_MASK 0 #define PQISRC_NUM_EVENT_Q_ELEM 32 #define PQISRC_EVENT_Q_ELEM_SIZE 32 /* PQI Registers state status */ #define PQI_RESET_ACTION_RESET 0x1 #define PQI_RESET_ACTION_COMPLETED 0x2 #define PQI_RESET_TYPE_NO_RESET 0x0 #define PQI_RESET_TYPE_SOFT_RESET 0x1 #define PQI_RESET_TYPE_FIRM_RESET 0x2 #define PQI_RESET_TYPE_HARD_RESET 0x3 #define PQI_RESET_POLL_INTERVAL 100000 /*100 msec*/ enum pqisrc_ctrl_mode{ CTRL_SIS_MODE = 0, CTRL_PQI_MODE }; /* PQI device performing internal initialization (e.g., POST). */ #define PQI_DEV_STATE_POWER_ON_AND_RESET 0x0 /* Upon entry to this state PQI device initialization begins. */ #define PQI_DEV_STATE_PQI_STATUS_AVAILABLE 0x1 /* PQI device Standard registers are available to the driver. */ #define PQI_DEV_STATE_ALL_REGISTERS_READY 0x2 /* PQI device is initialized and ready to process any PCI transactions. */ #define PQI_DEV_STATE_ADMIN_QUEUE_PAIR_READY 0x3 /* The PQI Device Error register indicates the error. */ #define PQI_DEV_STATE_ERROR 0x4 #define PQI_DEV_STATE_AT_INIT ( PQI_DEV_STATE_PQI_STATUS_AVAILABLE | \ PQI_DEV_STATE_ALL_REGISTERS_READY | \ PQI_DEV_STATE_ADMIN_QUEUE_PAIR_READY ) #define PQISRC_PQI_DEVICE_SIGNATURE "PQI DREG" #define PQI_ADMINQ_ELEM_ARRAY_ALIGN 64 #define PQI_ADMINQ_CI_PI_ALIGN 64 #define PQI_OPQ_ELEM_ARRAY_ALIGN 64 #define PQI_OPQ_CI_PI_ALIGN 4 #define PQI_ADDR_ALIGN_MASK_64 0x3F /* lsb 6 bits */ #define PQI_ADDR_ALIGN_MASK_4 0x3 /* lsb 2 bits */ #define PQISRC_PQIMODE_READY_TIMEOUT (30 * 1000 ) /* 30 secs */ #define PQISRC_MODE_READY_POLL_INTERVAL 1000 /* 1 msec */ #define PRINT_PQI_SIGNATURE(sign) { int i = 0; \ char si[9]; \ for(i=0;i<8;i++) \ si[i] = *((char *)&(sign)+i); \ si[i] = '\0'; \ DBG_INFO("Signature is %s",si); \ } #define PQI_CONF_TABLE_MAX_LEN ((uint16_t)~0) #define PQI_CONF_TABLE_SIGNATURE "CFGTABLE" /* PQI configuration table section IDs */ #define PQI_CONF_TABLE_ALL_SECTIONS (-1) #define PQI_CONF_TABLE_SECTION_GENERAL_INFO 0 #define PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES 1 #define PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA 2 #define PQI_CONF_TABLE_SECTION_DEBUG 3 #define PQI_CONF_TABLE_SECTION_HEARTBEAT 4 #define PQI_FIRMWARE_FEATURE_OFA 0 #define PQI_FIRMWARE_FEATURE_SMP 1 #define PQI_FIRMWARE_FEATURE_MAX_KNOWN 2 #define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_0 3 #define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_1_10 4 #define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_5_50 5 #define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_6_60 6 #define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_0 7 #define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_1_10 8 #define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_5_50 9 #define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_6_60 10 #define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11 #define PQI_FIRMWARE_FEATURE_SATA_WWN_FOR_DEV_UNIQUE_ID 12 #define PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT 13 #define PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT 14 #define PQI_FIRMWARE_FEATURE_MAXIMUM 14 #define CTRLR_HEARTBEAT_CNT(softs) \ LE_64(PCI_MEM_GET64(softs, softs->heartbeat_counter_abs_addr, softs->heartbeat_counter_off)) #define PQI_HEARTBEAT_TIMEOUT_SEC (10) /* 10 sec interval */ #define PQI_HOST_WELLNESS_TIMEOUT_SEC (24*3600) /* pqi-2r00a table 36 */ #define PQI_ADMIN_QUEUE_MSIX_DISABLE (0x80000000) #define PQI_ADMIN_QUEUE_MSIX_ENABLE (0 << 31) #define PQI_ADMIN_QUEUE_CONF_FUNC_CREATE_Q_PAIR 0x01 #define PQI_ADMIN_QUEUE_CONF_FUNC_DEL_Q_PAIR 0x02 #define PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE 0x00 #define PQISRC_ADMIN_QUEUE_CREATE_TIMEOUT 1000 /* in miLLI sec, 1 sec, 100 ms is standard */ #define PQISRC_ADMIN_QUEUE_DELETE_TIMEOUT 100 /* 100 ms is standard */ #define PQISRC_ADMIN_CMD_RESP_TIMEOUT 3000 /* 3 sec */ #define PQISRC_RAIDPATH_CMD_TIMEOUT 30000 /* 30 sec */ #define REPORT_PQI_DEV_CAP_DATA_BUF_SIZE sizeof(pqi_dev_cap_t) #define REPORT_MANUFACTURER_INFO_DATA_BUF_SIZE 0x80 /* Data buffer size specified in bytes 0-1 of data buffer. 128 bytes. */ /* PQI IUs */ /* Admin IU request length not including header. */ #define PQI_STANDARD_IU_LENGTH 0x003C /* 60 bytes. */ #define PQI_IU_TYPE_GENERAL_ADMIN_REQUEST 0x60 #define PQI_IU_TYPE_GENERAL_ADMIN_RESPONSE 0xe0 /* PQI / Vendor specific IU */ #define PQI_FUNCTION_REPORT_DEV_CAP 0x00 #define PQI_REQUEST_IU_RAID_TASK_MANAGEMENT 0x13 #define PQI_IU_TYPE_RAID_PATH_IO_REQUEST 0x14 #define PQI_IU_TYPE_AIO_PATH_IO_REQUEST 0x15 #define PQI_REQUEST_IU_AIO_TASK_MANAGEMENT 0x16 #define PQI_REQUEST_IU_GENERAL_ADMIN 0x60 #define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG 0x72 #define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG 0x73 #define PQI_REQUEST_IU_VENDOR_GENERAL 0x75 #define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81 #define PQI_RESPONSE_IU_TASK_MANAGEMENT 0x93 #define PQI_RESPONSE_IU_GENERAL_ADMIN 0xe0 #define PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS 0xf0 #define PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS 0xf1 #define PQI_RESPONSE_IU_RAID_PATH_IO_ERROR 0xf2 #define PQI_RESPONSE_IU_AIO_PATH_IO_ERROR 0xf3 #define PQI_RESPONSE_IU_AIO_PATH_IS_OFF 0xf4 #define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6 #define PQI_RESPONSE_IU_VENDOR_GENERAL 0xf7 #define PQI_REQUEST_HEADER_LENGTH 4 #define PQI_FUNCTION_CREATE_OPERATIONAL_IQ 0x10 #define PQI_FUNCTION_CREATE_OPERATIONAL_OQ 0x11 #define PQI_FUNCTION_DELETE_OPERATIONAL_IQ 0x12 #define PQI_FUNCTION_DELETE_OPERATIONAL_OQ 0x13 #define PQI_FUNCTION_CHANGE_OPERATIONAL_IQ_PROP 0x14 #define PQI_CHANGE_OP_IQ_PROP_ASSIGN_AIO 1 #define PQI_DEFAULT_IB_QUEUE 0 #define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0 #define PQI_VENDOR_RESPONSE_IU_SUCCESS 0 #define PQI_VENDOR_RESPONSE_IU_UNSUCCESS 1 #define PQI_VENDOR_RESPONSE_IU_INVALID_PARAM 2 #define PQI_VENDOR_RESPONSE_IU_INSUFF_RESRC 3 /* Interface macros */ #define GET_FW_STATUS(softs) \ (PCI_MEM_GET32(softs, &softs->ioa_reg->scratchpad3_fw_status, LEGACY_SIS_OMR)) #define SIS_IS_KERNEL_PANIC(softs) \ (GET_FW_STATUS(softs) & PQI_CTRL_KERNEL_PANIC) #define SIS_IS_KERNEL_UP(softs) \ (GET_FW_STATUS(softs) & PQI_CTRL_KERNEL_UP_AND_RUNNING) #define PQI_GET_CTRL_MODE(softs) \ (PCI_MEM_GET32(softs, &softs->ioa_reg->scratchpad0, LEGACY_SIS_SCR0)) #define PQI_SAVE_CTRL_MODE(softs, mode) \ PCI_MEM_PUT32(softs, &softs->ioa_reg->scratchpad0, LEGACY_SIS_SCR0, mode) #define PQISRC_MAX_TARGETID 1024 #define PQISRC_MAX_TARGETLUN 64 /* Vendor specific IU Type for Event config Cmds */ #define PQI_REQUEST_IU_REPORT_EVENT_CONFIG 0x72 #define PQI_REQUEST_IU_SET_EVENT_CONFIG 0x73 #define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6 #define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81 #define PQI_MANAGEMENT_CMD_RESP_TIMEOUT 3000 #define PQISRC_EVENT_ACK_RESP_TIMEOUT 1000 /* Supported Event types by controller */ #define PQI_NUM_SUPPORTED_EVENTS 6 #define PQI_EVENT_TYPE_HOTPLUG 0x1 #define PQI_EVENT_TYPE_HARDWARE 0x2 #define PQI_EVENT_TYPE_PHYSICAL_DEVICE 0x4 #define PQI_EVENT_TYPE_LOGICAL_DEVICE 0x5 #define PQI_EVENT_TYPE_AIO_STATE_CHANGE 0xfd #define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE 0xfe /* for indexing into the pending_events[] field of struct pqisrc_softstate */ #define PQI_EVENT_HOTPLUG 0 #define PQI_EVENT_HARDWARE 1 #define PQI_EVENT_PHYSICAL_DEVICE 2 #define PQI_EVENT_LOGICAL_DEVICE 3 #define PQI_EVENT_AIO_STATE_CHANGE 4 #define PQI_EVENT_AIO_CONFIG_CHANGE 5 /* Device flags */ #define PQISRC_DFLAG_VALID (1 << 0) #define PQISRC_DFLAG_CONFIGURING (1 << 1) #define MAX_EMBEDDED_SG_IN_FIRST_IU 4 #define MAX_EMBEDDED_SG_IN_IU 8 #define SG_FLAG_LAST 0x40000000 #define SG_FLAG_CHAIN 0x80000000 #define IN_PQI_RESET(softs) (softs->ctlr_state & PQI_BUS_RESET) #define DEV_GONE(dev) (!dev || (dev->invalid == true)) #define IS_AIO_PATH(dev) (dev->aio_enabled) #define IS_RAID_PATH(dev) (!dev->aio_enabled) #define DEVICE_RESET(dvp) (dvp->reset_in_progress) /* SOP data direction flags */ #define SOP_DATA_DIR_NONE 0x00 #define SOP_DATA_DIR_FROM_DEVICE 0x01 #define SOP_DATA_DIR_TO_DEVICE 0x02 #define SOP_DATA_DIR_BIDIRECTIONAL 0x03 #define SOP_PARTIAL_DATA_BUFFER 0x04 #define PQISRC_DMA_VALID (1 << 0) #define PQISRC_CMD_NO_INTR (1 << 1) #define SOP_TASK_ATTRIBUTE_SIMPLE 0 #define SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE 1 #define SOP_TASK_ATTRIBUTE_ORDERED 2 #define SOP_TASK_ATTRIBUTE_ACA 4 #define SOP_TASK_MANAGEMENT_FUNCTION_COMPLETE 0x0 #define SOP_TASK_MANAGEMENT_FUNCTION_REJECTED 0x4 #define SOP_TASK_MANAGEMENT_FUNCTION_FAILED 0x5 #define SOP_TASK_MANAGEMENT_FUNCTION_SUCCEEDED 0x8 #define SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK 0x01 #define SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET 0x02 #define SOP_TASK_MANAGEMENT_LUN_RESET 0x8 /* Additional CDB bytes */ #define PQI_ADDITIONAL_CDB_BYTES_0 0 /* 16 byte CDB */ #define PQI_ADDITIONAL_CDB_BYTES_4 1 /* 20 byte CDB */ #define PQI_ADDITIONAL_CDB_BYTES_8 2 /* 24 byte CDB */ #define PQI_ADDITIONAL_CDB_BYTES_12 3 /* 28 byte CDB */ #define PQI_ADDITIONAL_CDB_BYTES_16 4 /* 32 byte CDB */ #define PQI_PROTOCOL_SOP 0x0 #define PQI_AIO_STATUS_GOOD 0x0 #define PQI_AIO_STATUS_CHECK_CONDITION 0x2 #define PQI_AIO_STATUS_CONDITION_MET 0x4 #define PQI_AIO_STATUS_DEVICE_BUSY 0x8 #define PQI_AIO_STATUS_INT_GOOD 0x10 #define PQI_AIO_STATUS_INT_COND_MET 0x14 #define PQI_AIO_STATUS_RESERV_CONFLICT 0x18 #define PQI_AIO_STATUS_CMD_TERMINATED 0x22 #define PQI_AIO_STATUS_QUEUE_FULL 0x28 #define PQI_AIO_STATUS_TASK_ABORTED 0x40 #define PQI_AIO_STATUS_UNDERRUN 0x51 #define PQI_AIO_STATUS_OVERRUN 0x75 /* Status when Target Failure */ #define PQI_AIO_STATUS_IO_ERROR 0x1 #define PQI_AIO_STATUS_IO_ABORTED 0x2 #define PQI_AIO_STATUS_IO_NO_DEVICE 0x3 #define PQI_AIO_STATUS_INVALID_DEVICE 0x4 #define PQI_AIO_STATUS_AIO_PATH_DISABLED 0xe /* Service Response */ #define PQI_AIO_SERV_RESPONSE_COMPLETE 0 #define PQI_AIO_SERV_RESPONSE_FAILURE 1 #define PQI_AIO_SERV_RESPONSE_TMF_COMPLETE 2 #define PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED 3 #define PQI_AIO_SERV_RESPONSE_TMF_REJECTED 4 #define PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN 5 #define PQI_TMF_WAIT_DELAY 10000000 /* 10 seconds */ #define PQI_RAID_STATUS_GOOD PQI_AIO_STATUS_GOOD #define PQI_RAID_STATUS_CHECK_CONDITION PQI_AIO_STATUS_CHECK_CONDITION #define PQI_RAID_STATUS_CONDITION_MET PQI_AIO_STATUS_CONDITION_MET #define PQI_RAID_STATUS_DEVICE_BUSY PQI_AIO_STATUS_DEVICE_BUSY #define PQI_RAID_STATUS_INT_GOOD PQI_AIO_STATUS_INT_GOOD #define PQI_RAID_STATUS_INT_COND_MET PQI_AIO_STATUS_INT_COND_MET #define PQI_RAID_STATUS_RESERV_CONFLICT PQI_AIO_STATUS_RESERV_CONFLICT #define PQI_RAID_STATUS_CMD_TERMINATED PQI_AIO_STATUS_CMD_TERMINATED #define PQI_RAID_STATUS_QUEUE_FULL PQI_AIO_STATUS_QUEUE_FULL #define PQI_RAID_STATUS_TASK_ABORTED PQI_AIO_STATUS_TASK_ABORTED #define PQI_RAID_STATUS_UNDERRUN PQI_AIO_STATUS_UNDERRUN #define PQI_RAID_STATUS_OVERRUN PQI_AIO_STATUS_OVERRUN /* VPD inquiry pages */ #define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */ #define SCSI_VPD_DEVICE_ID 0x83 /* standard page */ #define SA_VPD_PHYS_DEVICE_ID 0xc0 /* vendor-specific page */ #define SA_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */ #define SA_VPD_LV_IOACCEL_STATUS 0xc2 /* vendor-specific page */ #define SA_VPD_LV_STATUS 0xc3 /* vendor-specific page */ #define VPD_PAGE (1 << 8) /* logical volume states */ #define SA_LV_OK 0x0 #define SA_LV_FAILED 0x1 #define SA_LV_NOT_CONFIGURED 0x2 #define SA_LV_DEGRADED 0x3 #define SA_LV_READY_FOR_RECOVERY 0x4 #define SA_LV_UNDERGOING_RECOVERY 0x5 #define SA_LV_WRONG_PHYSICAL_DRIVE_REPLACED 0x6 #define SA_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM 0x7 #define SA_LV_HARDWARE_OVERHEATING 0x8 #define SA_LV_HARDWARE_HAS_OVERHEATED 0x9 #define SA_LV_UNDERGOING_EXPANSION 0xA #define SA_LV_NOT_AVAILABLE 0xb #define SA_LV_QUEUED_FOR_EXPANSION 0xc #define SA_LV_DISABLED_SCSI_ID_CONFLICT 0xd #define SA_LV_EJECTED 0xe #define SA_LV_UNDERGOING_ERASE 0xf #define SA_LV_UNDERGOING_RPI 0x12 #define SA_LV_PENDING_RPI 0x13 #define SA_LV_ENCRYPTED_NO_KEY 0x14 #define SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER 0x15 #define SA_LV_UNDERGOING_ENCRYPTION 0x16 #define SA_LV_UNDERGOING_ENCRYPTION_REKEYING 0x17 #define SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER 0x18 #define SA_LV_PENDING_ENCRYPTION 0x19 #define SA_LV_PENDING_ENCRYPTION_REKEYING 0x1a #define SA_LV_STATUS_VPD_UNSUPPORTED 0xff /* constants for flags field of ciss_vpd_logical_volume_status */ #define SA_LV_FLAGS_NO_HOST_IO 0x1 /* volume not available for */ /* * assume worst case: SATA queue depth of 31 minus 4 internal firmware commands */ #define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 27 /* 0 = no limit */ #define PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 0 #define PQI_LOG_EXT_QUEUE_DEPTH_ENABLED 0x20 #define PQI_LOG_EXT_QUEUE_ENABLE 0x56 #define MAX_RAW_M256_QDEPTH 32512 #define MAX_RAW_M16_QDEPTH 2032 #define PQI_PTRAID_UPDATE_ON_RESCAN_LUNS 0x80000000 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0" #define SA_CACHE_FLUSH 0x1 #define PQISRC_INQUIRY_TIMEOUT 30 #define SA_INQUIRY 0x12 #define SA_REPORT_LOG 0xc2 /* Report Logical LUNs */ #define SA_REPORT_PHYS 0xc3 /* Report Physical LUNs */ #define SA_CISS_READ 0xc0 #define SA_GET_RAID_MAP 0xc8 #define SCSI_SENSE_RESPONSE_70 0x70 #define SCSI_SENSE_RESPONSE_71 0x71 #define SCSI_SENSE_RESPONSE_72 0x72 #define SCSI_SENSE_RESPONSE_73 0x73 #define SA_REPORT_LOG_EXTENDED 0x1 #define SA_REPORT_PHYS_EXTENDED 0x2 #define SA_CACHE_FLUSH_BUF_LEN 4 #define GET_SCSI_SNO(cmd) (cmd->cmdId.serialNumber) #define REPORT_LUN_DEV_FLAG_AIO_ENABLED 0x8 #define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U) #define RAID_MAP_MAX_ENTRIES 1024 #define RAID_MAP_ENCRYPTION_ENABLED 0x1 #define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 27 #define ASC_LUN_NOT_READY 0x4 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x4 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x2 #define OBDR_SIG_OFFSET 43 #define OBDR_TAPE_SIG "$DR-10" #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) #define IOACCEL_STATUS_BYTE 4 #define OFFLOAD_CONFIGURED_BIT 0x1 #define OFFLOAD_ENABLED_BIT 0x2 #define PQI_RAID_DATA_IN_OUT_GOOD 0x0 #define PQI_RAID_DATA_IN_OUT_UNDERFLOW 0x1 #define PQI_RAID_DATA_IN_OUT_BUFFER_ERROR 0x40 #define PQI_RAID_DATA_IN_OUT_BUFFER_OVERFLOW 0x41 #define PQI_RAID_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA 0x42 #define PQI_RAID_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE 0x43 #define PQI_RAID_DATA_IN_OUT_PCIE_FABRIC_ERROR 0x60 #define PQI_RAID_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT 0x61 #define PQI_RAID_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED 0x62 #define PQI_RAID_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ 0x63 #define PQI_RAID_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED 0x64 #define PQI_RAID_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST 0x65 #define PQI_RAID_DATA_IN_OUT_PCIE_ACS_VIOLATION 0x66 #define PQI_RAID_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED 0x67 #define PQI_RAID_DATA_IN_OUT_ERROR 0xf0 #define PQI_RAID_DATA_IN_OUT_PROTOCOL_ERROR 0xf1 #define PQI_RAID_DATA_IN_OUT_HARDWARE_ERROR 0xf2 #define PQI_RAID_DATA_IN_OUT_UNSOLICITED_ABORT 0xf3 #define PQI_RAID_DATA_IN_OUT_ABORTED 0xf4 #define PQI_RAID_DATA_IN_OUT_TIMEOUT 0xf5 #define PQI_PHYSICAL_DEVICE_BUS 0 #define PQI_RAID_VOLUME_BUS 1 #define PQI_HBA_BUS 2 #define PQI_EXTERNAL_RAID_VOLUME_BUS 3 #define PQI_MAX_BUS PQI_EXTERNAL_RAID_VOLUME_BUS #define TEST_UNIT_READY 0x00 #define SCSI_VPD_HEADER_LENGTH 64 #define PQI_MAX_MULTILUN 256 #define PQI_MAX_LOGICALS 64 #define PQI_MAX_PHYSICALS 1024 #define PQI_MAX_DEVICES (PQI_MAX_LOGICALS + PQI_MAX_PHYSICALS + 1) /* 1 for controller device entry */ #define PQI_MAX_EXT_TARGETS 32 #define PQI_CTLR_INDEX (PQI_MAX_DEVICES - 1) #define PQI_PD_INDEX(t) (t + PQI_MAX_LOGICALS) #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define MAX_TARGET_DEVICES 1024 #define PQI_NO_MEM 2 typedef enum pqisrc_device_status { DEVICE_NOT_FOUND, DEVICE_CHANGED, DEVICE_UNCHANGED, } device_status_t; #define SA_RAID_0 0 #define SA_RAID_4 1 #define SA_RAID_1 2 /* also used for RAID 10 */ #define SA_RAID_5 3 /* also used for RAID 50 */ #define SA_RAID_51 4 #define SA_RAID_6 5 /* also used for RAID 60 */ #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ #define SA_RAID_MAX SA_RAID_ADM #define SA_RAID_UNKNOWN 0xff #define BIT0 (1 << 0) #define BIT1 (1 << 1) #define BIT2 (1 << 2) #define BIT3 (1 << 3) #define BITS_PER_BYTE 8 /* BMIC commands */ #define BMIC_IDENTIFY_CONTROLLER 0x11 #define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15 #define BMIC_READ 0x26 #define BMIC_WRITE 0x27 #define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64 #define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66 #define BMIC_CACHE_FLUSH 0xc2 #define BMIC_FLASH_FIRMWARE 0xf7 #define BMIC_WRITE_HOST_WELLNESS 0xa5 #define BMIC_SET_DIAGS_OPTIONS 0xf4 #define BMIC_SENSE_DIAGS_OPTIONS 0xf5 #define MASKED_DEVICE(lunid) ((lunid)[3] & 0xC0) #define BMIC_GET_LEVEL_2_BUS(lunid) ((lunid)[7] & 0x3F) #define BMIC_GET_LEVEL_TWO_TARGET(lunid) ((lunid)[6]) #define BMIC_GET_DRIVE_NUMBER(lunid) \ (((BMIC_GET_LEVEL_2_BUS((lunid)) - 1) << 8) + \ BMIC_GET_LEVEL_TWO_TARGET((lunid))) #define NON_DISK_PHYS_DEV(rle) \ (((reportlun_ext_entry_t *)(rle))->device_flags & 0x1) #define NO_TIMEOUT ((unsigned long) -1) #define BMIC_DEVICE_TYPE_SATA 0x1 /* No of IO slots required for internal requests */ #define PQI_RESERVED_IO_SLOTS_SYNC_REQUESTS 3 #define PQI_RESERVED_IO_SLOTS_TMF 1 #define PQI_RESERVED_IO_SLOTS_CNT (PQI_NUM_SUPPORTED_EVENTS + \ PQI_RESERVED_IO_SLOTS_TMF + \ PQI_RESERVED_IO_SLOTS_SYNC_REQUESTS) /* Defines for print flags */ #define PRINT_FLAG_HDR_COLUMN 0x0001 static inline uint16_t GET_LE16(const uint8_t *p) { return p[0] | p[1] << 8; } static inline uint32_t GET_LE32(const uint8_t *p) { return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24; } static inline uint64_t GET_LE64(const uint8_t *p) { return (((uint64_t)GET_LE32(p + 4) << 32) | GET_LE32(p)); } static inline uint16_t GET_BE16(const uint8_t *p) { return p[0] << 8 | p[1]; } static inline uint32_t GET_BE32(const uint8_t *p) { return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3]; } static inline uint64_t GET_BE64(const uint8_t *p) { return (((uint64_t)GET_BE32(p) << 32) | GET_BE32(p + 4)); } static inline void PUT_BE16(uint16_t val, uint8_t *p) { *p++ = val >> 8; *p++ = val; } static inline void PUT_BE32(uint32_t val, uint8_t *p) { PUT_BE16(val >> 16, p); PUT_BE16(val, p + 2); } static inline void PUT_BE64(uint64_t val, uint8_t *p) { PUT_BE32(val >> 32, p); PUT_BE32(val, p + 4); } #define OS_FREEBSD #define SIS_POLL_WAIT #define OS_ATTRIBUTE_PACKED __attribute__((__packed__)) #define OS_ATTRIBUTE_ALIGNED(n) __attribute__((aligned(n))) /* Management Interface */ #define CCISS_IOC_MAGIC 'C' #define SMARTPQI_IOCTL_BASE 'M' #define CCISS_GETDRIVVER _IOWR(SMARTPQI_IOCTL_BASE, 0, driver_info) #define CCISS_GETPCIINFO _IOWR(SMARTPQI_IOCTL_BASE, 1, pqi_pci_info_t) #define SMARTPQI_PASS_THRU _IOWR(SMARTPQI_IOCTL_BASE, 2, IOCTL_Command_struct) #define CCISS_PASSTHRU _IOWR('C', 210, IOCTL_Command_struct) #define CCISS_REGNEWD _IO(CCISS_IOC_MAGIC, 14) /*IOCTL pci_info structure */ typedef struct pqi_pci_info { unsigned char bus; unsigned char dev_fn; unsigned short domain; uint32_t board_id; uint32_t chip_id; }pqi_pci_info_t; typedef struct _driver_info { unsigned char major_version; unsigned long minor_version; unsigned char release_version; unsigned long build_revision; unsigned long max_targets; unsigned long max_io; unsigned long max_transfer_length; }driver_info, *pdriver_info; typedef uint8_t *passthru_buf_type_t; #define PQISRC_OS_VERSION 1 #define PQISRC_FEATURE_VERSION 4014 #define PQISRC_PATCH_VERSION 0 #define PQISRC_BUILD_VERSION 105 #define STR(s) # s #define PQISRC_VERSION(a, b, c, d) STR(a.b.c.d) #define PQISRC_DRIVER_VERSION PQISRC_VERSION(PQISRC_OS_VERSION, \ PQISRC_FEATURE_VERSION, \ PQISRC_PATCH_VERSION, \ PQISRC_BUILD_VERSION) /* End Management interface */ #ifdef ASSERT #undef ASSERT #endif /* *os_atomic64_cas-- * *Atomically read, compare, and conditionally write. *i.e. compare and swap. *retval True On Success *retval False On Failure * */ static inline boolean_t os_atomic64_cas(volatile uint64_t* var, uint64_t old_val, uint64_t new_val) { return (atomic_cmpset_64(var, old_val, new_val)); } #define ASSERT(cond) {\ if (!(cond)) { \ printf("Assertion failed at file %s line %d\n",__FILE__,__LINE__); \ } \ } /* Atomic */ typedef volatile uint64_t OS_ATOMIC64_T; #define OS_ATOMIC64_READ(p) atomic_load_acq_64(p) #define OS_ATOMIC64_INIT(p,val) atomic_store_rel_64(p, val) /* 64-bit post atomic increment and decrement operations on value in pointer.*/ #define OS_ATOMIC64_DEC(p) (atomic_fetchadd_64(p, -1) - 1) #define OS_ATOMIC64_INC(p) (atomic_fetchadd_64(p, 1) + 1) #define PQI_MAX_MSIX 64 /* vectors */ #define PQI_MSI_CTX_SIZE sizeof(pqi_intr_ctx)+1 #define IS_POLLING_REQUIRED(softs) if (cold) {\ pqisrc_process_event_intr_src(softs, 0);\ pqisrc_process_response_queue(softs, 1);\ } #define OS_GET_TASK_ATTR(rcb) os_get_task_attr(rcb) #define OS_FW_HEARTBEAT_TIMER_INTERVAL (5) typedef struct PCI_ACC_HANDLE { bus_space_tag_t pqi_btag; bus_space_handle_t pqi_bhandle; } PCI_ACC_HANDLE_T; /* * Legacy SIS Register definitions for the Adaptec PMC SRC/SRCv/smartraid adapters. */ /* accessible via BAR0 */ #define LEGACY_SIS_IOAR 0x18 /* IOA->host interrupt register */ #define LEGACY_SIS_IDBR 0x20 /* inbound doorbell register */ #define LEGACY_SIS_IISR 0x24 /* inbound interrupt status register */ #define LEGACY_SIS_OIMR 0x34 /* outbound interrupt mask register */ #define LEGACY_SIS_ODBR_R 0x9c /* outbound doorbell register read */ #define LEGACY_SIS_ODBR_C 0xa0 /* outbound doorbell register clear */ #define LEGACY_SIS_SCR0 0xb0 /* scratchpad 0 */ #define LEGACY_SIS_OMR 0xbc /* outbound message register */ #define LEGACY_SIS_IQUE64_L 0xc0 /* inbound queue address 64-bit (low) */ #define LEGACY_SIS_IQUE64_H 0xc4 /* inbound queue address 64-bit (high)*/ #define LEGACY_SIS_ODBR_MSI 0xc8 /* MSI register for sync./AIF */ #define LEGACY_SIS_IQN_L 0xd0 /* inbound queue native mode (low) */ #define LEGACY_SIS_IQN_H 0xd4 /* inbound queue native mode (high)*/ #define LEGACY_SIS_MAILBOX 0x7fc60 /* mailbox (20 bytes) */ #define LEGACY_SIS_SRCV_MAILBOX 0x1000 /* mailbox (20 bytes) */ #define LEGACY_SIS_SRCV_OFFSET_MAILBOX_7 0x101C /* mailbox 7 register offset */ #define LEGACY_SIS_ODR_SHIFT 12 /* outbound doorbell shift */ #define LEGACY_SIS_IDR_SHIFT 9 /* inbound doorbell shift */ /* * PQI Register definitions for the smartraid adapters */ /* accessible via BAR0 */ #define PQI_SIGNATURE 0x4000 #define PQI_ADMINQ_CONFIG 0x4008 #define PQI_ADMINQ_CAP 0x4010 #define PQI_LEGACY_INTR_STATUS 0x4018 #define PQI_LEGACY_INTR_MASK_SET 0x401C #define PQI_LEGACY_INTR_MASK_CLR 0x4020 #define PQI_DEV_STATUS 0x4040 #define PQI_ADMIN_IBQ_PI_OFFSET 0x4048 #define PQI_ADMIN_OBQ_CI_OFFSET 0x4050 #define PQI_ADMIN_IBQ_ELEM_ARRAY_ADDR 0x4058 #define PQI_ADMIN_OBQ_ELEM_ARRAY_ADDR 0x4060 #define PQI_ADMIN_IBQ_CI_ADDR 0x4068 #define PQI_ADMIN_OBQ_PI_ADDR 0x4070 #define PQI_ADMINQ_PARAM 0x4078 #define PQI_DEV_ERR 0x4080 #define PQI_DEV_ERR_DETAILS 0x4088 #define PQI_DEV_RESET 0x4090 #define PQI_POWER_ACTION 0x4094 /* Busy wait micro seconds */ #define OS_BUSYWAIT(x) DELAY(x) #define OS_SLEEP(timeout) \ DELAY(timeout); /* TMF request timeout is 600 Sec */ #define OS_TMF_TIMEOUT_SEC (10 * 60) #define LE_16(x) htole16(x) #define LE_32(x) htole32(x) #define LE_64(x) htole64(x) #define BE_16(x) htobe16(x) #define BE_32(x) htobe32(x) #define BE_64(x) htobe64(x) #define PQI_HWIF_SRCV 0 #define PQI_HWIF_UNKNOWN -1 #define SMART_STATE_SUSPEND (1<<0) #define SMART_STATE_UNUSED0 (1<<1) #define SMART_STATE_INTERRUPTS_ON (1<<2) #define SMART_STATE_AIF_SLEEPER (1<<3) #define SMART_STATE_RESET (1<<4) #define PQI_FLAG_BUSY (1<<0) #define PQI_MSI_ENABLED (1<<1) #define PQI_SIM_REGISTERED (1<<2) #define PQI_MTX_INIT (1<<3) #define PQI_CMD_MAPPED (1<<2) /* Interrupt context to get oq_id */ typedef struct pqi_intr_ctx { int oq_id; device_t pqi_dev; }pqi_intr_ctx_t; typedef uint8_t os_dev_info_t; typedef struct OS_SPECIFIC { device_t pqi_dev; struct resource *pqi_regs_res0; /* reg. if. window */ int pqi_regs_rid0; /* resource ID */ bus_dma_tag_t pqi_parent_dmat; /* parent DMA tag */ bus_dma_tag_t pqi_buffer_dmat; /* controller hardware interface */ int pqi_hwif; struct resource *pqi_irq[PQI_MAX_MSIX]; /* interrupt */ int pqi_irq_rid[PQI_MAX_MSIX]; void *intrcookie[PQI_MAX_MSIX]; bool intr_registered[PQI_MAX_MSIX]; bool msi_enabled; /* MSI/MSI-X enabled */ pqi_intr_ctx_t *msi_ctx; int oq_id; int pqi_state; uint32_t pqi_flags; struct mtx cam_lock; struct mtx map_lock; int mtx_init; int sim_registered; struct cam_devq *devq; struct cam_sim *sim; struct cam_path *path; struct task event_task; struct cdev *cdev; struct callout wellness_periodic; /* periodic event handling */ struct callout heartbeat_timeout_id; /* heart beat event handling */ } OS_SPECIFIC_T; typedef bus_addr_t dma_addr_t; /* Register access macros */ #define PCI_MEM_GET32( _softs, _absaddr, _offset ) \ bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \ _softs->pci_mem_handle.pqi_bhandle, _offset) #if defined(__i386__) #define PCI_MEM_GET64( _softs, _absaddr, _offset ) ({ \ (uint64_t)bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \ _softs->pci_mem_handle.pqi_bhandle, _offset) + \ ((uint64_t)bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \ _softs->pci_mem_handle.pqi_bhandle, _offset + 4) << 32); \ }) #else #define PCI_MEM_GET64(_softs, _absaddr, _offset ) \ bus_space_read_8(_softs->pci_mem_handle.pqi_btag, \ _softs->pci_mem_handle.pqi_bhandle, _offset) #endif #define PCI_MEM_PUT32( _softs, _absaddr, _offset, _val ) \ bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \ _softs->pci_mem_handle.pqi_bhandle, _offset, _val) #if defined(__i386__) #define PCI_MEM_PUT64( _softs, _absaddr, _offset, _val ) \ bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \ _softs->pci_mem_handle.pqi_bhandle, _offset, _val); \ bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \ _softs->pci_mem_handle.pqi_bhandle, _offset + 4, _val >> 32); #else #define PCI_MEM_PUT64( _softs, _absaddr, _offset, _val ) \ bus_space_write_8(_softs->pci_mem_handle.pqi_btag, \ _softs->pci_mem_handle.pqi_bhandle, _offset, _val) #endif #define PCI_MEM_GET_BUF(_softs, _absaddr, _offset, buf, size) \ bus_space_read_region_1(_softs->pci_mem_handle.pqi_btag,\ _softs->pci_mem_handle.pqi_bhandle, _offset, buf, size) /* Lock */ typedef struct mtx OS_LOCK_T; typedef struct sema OS_SEMA_LOCK_T; #define OS_PQILOCK_T OS_LOCK_T #define OS_ACQUIRE_SPINLOCK(_lock) mtx_lock_spin(_lock) #define OS_RELEASE_SPINLOCK(_lock) mtx_unlock_spin(_lock) #define OS_INIT_PQILOCK(_softs,_lock,_lockname) os_init_spinlock(_softs,_lock,_lockname) #define OS_UNINIT_PQILOCK(_lock) os_uninit_spinlock(_lock) #define PQI_LOCK(_lock) OS_ACQUIRE_SPINLOCK(_lock) #define PQI_UNLOCK(_lock) OS_RELEASE_SPINLOCK(_lock) #define OS_GET_CDBP(rcb) \ ((rcb->cm_ccb->ccb_h.flags & CAM_CDB_POINTER) ? rcb->cm_ccb->csio.cdb_io.cdb_ptr : rcb->cm_ccb->csio.cdb_io.cdb_bytes) #define GET_SCSI_BUFFLEN(rcb) (rcb->cm_ccb->csio.dxfer_len) #define IS_OS_SCSICMD(rcb) (rcb && !rcb->tm_req && rcb->cm_ccb) #define OS_GET_IO_QINDEX(softs,rcb) curcpu % softs->num_op_obq #define OS_GET_IO_RESP_QID(softs,rcb) (softs->op_ob_q[(OS_GET_IO_QINDEX(softs,rcb))].q_id) #define OS_GET_IO_REQ_QINDEX(softs,rcb) OS_GET_IO_QINDEX(softs,rcb) #define OS_GET_TMF_RESP_QID OS_GET_IO_RESP_QID #define OS_GET_TMF_REQ_QINDEX OS_GET_IO_REQ_QINDEX /* check request type */ #define is_internal_req(rcb) (!(rcb->cm_ccb)) #define os_io_memcpy(dest, src, len) memcpy(dest, src, len) /* sg elements addr, len, flags */ #define OS_GET_IO_SG_COUNT(rcb) rcb->nseg #define OS_GET_IO_SG_ADDR(rcb,i) rcb->sgt[i].addr #define OS_GET_IO_SG_LEN(rcb,i) rcb->sgt[i].len /* scsi commands used in pqilib for RAID bypass*/ #define SCMD_READ_6 READ_6 #define SCMD_WRITE_6 WRITE_6 #define SCMD_READ_10 READ_10 #define SCMD_WRITE_10 WRITE_10 #define SCMD_READ_12 READ_12 #define SCMD_WRITE_12 WRITE_12 #define SCMD_READ_16 READ_16 #define SCMD_WRITE_16 WRITE_16 /* FreeBSD status macros */ #define BSD_SUCCESS 0 /* Debug facility */ -#define PQISRC_LOG_LEVEL 0x60 - -static int logging_level = PQISRC_LOG_LEVEL; - #define PQISRC_FLAGS_MASK 0x0000ffff #define PQISRC_FLAGS_INIT 0x00000001 #define PQISRC_FLAGS_INFO 0x00000002 #define PQISRC_FLAGS_FUNC 0x00000004 #define PQISRC_FLAGS_TRACEIO 0x00000008 #define PQISRC_FLAGS_DISC 0x00000010 #define PQISRC_FLAGS_WARN 0x00000020 #define PQISRC_FLAGS_ERROR 0x00000040 #define PQISRC_FLAGS_NOTE 0x00000080 +#define PQISRC_LOG_LEVEL (PQISRC_FLAGS_WARN | PQISRC_FLAGS_ERROR | PQISRC_FLAGS_NOTE) + +static int logging_level = PQISRC_LOG_LEVEL; #define DBG_INIT(fmt,args...) \ do { \ if (logging_level & PQISRC_FLAGS_INIT) { \ printf("[INIT]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \ } \ }while(0); #define DBG_INFO(fmt,args...) \ do { \ if (logging_level & PQISRC_FLAGS_INFO) { \ printf("[INFO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \ } \ }while(0); #define DBG_FUNC(fmt,args...) \ do { \ if (logging_level & PQISRC_FLAGS_FUNC) { \ printf("[FUNC]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \ } \ }while(0); #define DBG_TRACEIO(fmt,args...) \ do { \ if (logging_level & PQISRC_FLAGS_TRACEIO) { \ printf("[TRACEIO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \ } \ }while(0); #define DBG_DISC(fmt,args...) \ do { \ if (logging_level & PQISRC_FLAGS_DISC) { \ printf("[DISC]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \ } \ }while(0); #define DBG_WARN(fmt,args...) \ do { \ if (logging_level & PQISRC_FLAGS_WARN) { \ printf("[WARN]:[%u:%u.%u][CPU %d][%s][%d]:"fmt,softs->bus_id,softs->device_id,softs->func_id,curcpu,__func__,__LINE__,##args);\ } \ }while(0); #define DBG_ERR(fmt,args...) \ do { \ if (logging_level & PQISRC_FLAGS_ERROR) { \ printf("[ERROR]::[%u:%u.%u][CPU %d][%s][%d]:"fmt,softs->bus_id,softs->device_id,softs->func_id,curcpu,__func__,__LINE__,##args); \ } \ }while(0); #define DBG_IO(fmt,args...) \ do { \ if (logging_level & PQISRC_FLAGS_TRACEIO) { \ printf("[IO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \ } \ }while(0); #define DBG_ERR_BTL(device,fmt,args...) \ do { \ if (logging_level & PQISRC_FLAGS_ERROR) { \ printf("[ERROR]::[%u:%u.%u][%u,%u,%u][CPU %d][%s][%d]:"fmt, softs->bus_id, softs->device_id, softs->func_id, device->bus, device->target, device->lun,curcpu,__func__,__LINE__,##args); \ } \ }while(0); #define DBG_WARN_BTL(device,fmt,args...) \ do { \ if (logging_level & PQISRC_FLAGS_WARN) { \ printf("[WARN]:[%u:%u.%u][%u,%u,%u][CPU %d][%s][%d]:"fmt, softs->bus_id, softs->device_id, softs->func_id, device->bus, device->target, device->lun,curcpu,__func__,__LINE__,##args);\ } \ }while(0); #define DBG_NOTE(fmt,args...) \ do { \ if (logging_level & PQISRC_FLAGS_NOTE) { \ printf("[INFO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \ } \ }while(0); #endif /* _PQI_DEFINES_H */ diff --git a/sys/dev/smartpqi/smartpqi_helper.c b/sys/dev/smartpqi/smartpqi_helper.c index b49fb2d5a718..45d9fca071d3 100644 --- a/sys/dev/smartpqi/smartpqi_helper.c +++ b/sys/dev/smartpqi/smartpqi_helper.c @@ -1,487 +1,532 @@ /*- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #include "smartpqi_includes.h" /* read and modify controller diagnostic option - PQI_PTRAID_UPDATE_ON_RESCAN_LUNS */ void pqisrc_ctrl_diagnostic_options(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; uint32_t diags_options = 0; pqisrc_raid_req_t request; DBG_NOTE("IN\n"); memset(&request, 0, sizeof(request)); /* read diags options of controller */ ret = pqisrc_build_send_raid_request(softs, &request, (void*)&diags_options, sizeof(diags_options), BMIC_SENSE_DIAGS_OPTIONS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL); if (ret != PQI_STATUS_SUCCESS) { DBG_WARN("Request failed for BMIC Sense Diags Option command." "ret:%d\n",ret); return; } DBG_NOTE("diags options data after read: %#x\n",diags_options); diags_options |= PQI_PTRAID_UPDATE_ON_RESCAN_LUNS; DBG_NOTE("diags options data to write: %#x\n",diags_options); memset(&request, 0, sizeof(request)); /* write specified diags options to controller */ ret = pqisrc_build_send_raid_request(softs, &request, (void*)&diags_options, sizeof(diags_options), BMIC_SET_DIAGS_OPTIONS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL); if (ret != PQI_STATUS_SUCCESS) DBG_WARN("Request failed for BMIC Set Diags Option command." "ret:%d\n",ret); #if 0 diags_options = 0; memset(&request, 0, sizeof(request)); ret = pqisrc_build_send_raid_request(softs, &request, (void*)&diags_options, sizeof(diags_options), BMIC_SENSE_DIAGS_OPTIONS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL); if (ret != PQI_STATUS_SUCCESS) DBG_WARN("Request failed for BMIC Sense Diags Option command." "ret:%d\n",ret); DBG_NOTE("diags options after re-read: %#x\n",diags_options); #endif DBG_NOTE("OUT\n"); } /* * Function used to validate the adapter health. */ boolean_t pqisrc_ctrl_offline(pqisrc_softstate_t *softs) { DBG_FUNC("IN\n"); DBG_FUNC("OUT\n"); return !softs->ctrl_online; } /* Function used set/clear legacy INTx bit in Legacy Interrupt INTx * mask clear pqi register */ void pqisrc_configure_legacy_intx(pqisrc_softstate_t *softs, boolean_t enable_intx) { uint32_t intx_mask; uint32_t *reg_addr __unused; DBG_FUNC("IN\n"); if (enable_intx) reg_addr = &softs->pqi_reg->legacy_intr_mask_clr; else reg_addr = &softs->pqi_reg->legacy_intr_mask_set; intx_mask = PCI_MEM_GET32(softs, reg_addr, PQI_LEGACY_INTR_MASK_CLR); intx_mask |= PQISRC_LEGACY_INTX_MASK; PCI_MEM_PUT32(softs, reg_addr, PQI_LEGACY_INTR_MASK_CLR ,intx_mask); DBG_FUNC("OUT\n"); } /* * Function used to take exposed devices to OS as offline. */ void pqisrc_take_devices_offline(pqisrc_softstate_t *softs) { pqi_scsi_dev_t *device = NULL; int i,j; DBG_FUNC("IN\n"); for(i = 0; i < PQI_MAX_DEVICES; i++) { for(j = 0; j < PQI_MAX_MULTILUN; j++) { if(softs->device_list[i][j] == NULL) continue; device = softs->device_list[i][j]; pqisrc_remove_device(softs, device); } } DBG_FUNC("OUT\n"); } /* * Function used to take adapter offline. */ void pqisrc_take_ctrl_offline(pqisrc_softstate_t *softs) { DBG_FUNC("IN\n"); softs->ctrl_online = false; int lockupcode = 0; if (SIS_IS_KERNEL_PANIC(softs)) { lockupcode = PCI_MEM_GET32(softs, &softs->ioa_reg->mb[7], LEGACY_SIS_SRCV_OFFSET_MAILBOX_7); DBG_ERR("Controller FW is not runnning, Lockup code = %x\n", lockupcode); } else { pqisrc_trigger_nmi_sis(softs); } os_complete_outstanding_cmds_nodevice(softs); pqisrc_wait_for_rescan_complete(softs); pqisrc_take_devices_offline(softs); DBG_FUNC("OUT\n"); } /* * Timer handler for the adapter heart-beat. */ void pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *softs) { uint8_t take_offline = false; DBG_FUNC("IN\n"); if (CTRLR_HEARTBEAT_CNT(softs) == softs->prev_heartbeat_count) { take_offline = true; goto take_ctrl_offline; } softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs); DBG_INFO("CTRLR_HEARTBEAT_CNT(softs) = %lx \ softs->prev_heartbeat_count = %lx\n", CTRLR_HEARTBEAT_CNT(softs), softs->prev_heartbeat_count); take_ctrl_offline: if (take_offline){ DBG_ERR("controller is offline\n"); pqisrc_take_ctrl_offline(softs); os_stop_heartbeat_timer(softs); } DBG_FUNC("OUT\n"); } /* * Conditional variable management routine for internal commands. */ int pqisrc_wait_on_condition(pqisrc_softstate_t *softs, rcb_t *rcb, uint32_t timeout_in_msec) { DBG_FUNC("IN\n"); int ret = PQI_STATUS_SUCCESS; /* 1 msec = 500 usec * 2 */ uint32_t loop_cnt = timeout_in_msec * 2; uint32_t i = 0; while (rcb->req_pending == true) { OS_SLEEP(500); /* Micro sec */ /* Polling needed for FreeBSD : since ithread routine is not scheduled * during bootup, we could use polling until interrupts are * enabled (using 'if (cold)'to check for the boot time before * interrupts are enabled). */ IS_POLLING_REQUIRED(softs); if ((timeout_in_msec != TIMEOUT_INFINITE) && (i++ == loop_cnt)) { DBG_ERR("ERR: Requested cmd timed out !!!\n"); ret = PQI_STATUS_TIMEOUT; rcb->timedout = true; break; } if (pqisrc_ctrl_offline(softs)) { DBG_ERR("Controller is Offline"); ret = PQI_STATUS_FAILURE; break; } } rcb->req_pending = true; DBG_FUNC("OUT\n"); return ret; } /* Function used to validate the device wwid. */ boolean_t pqisrc_device_equal(pqi_scsi_dev_t *dev1, pqi_scsi_dev_t *dev2) { return dev1->wwid == dev2->wwid; } /* Function used to validate the device scsi3addr. */ boolean_t pqisrc_scsi3addr_equal(uint8_t *scsi3addr1, uint8_t *scsi3addr2) { return memcmp(scsi3addr1, scsi3addr2, 8) == 0; } /* Function used to validate hba_lunid */ boolean_t pqisrc_is_hba_lunid(uint8_t *scsi3addr) { return pqisrc_scsi3addr_equal(scsi3addr, (uint8_t*)RAID_CTLR_LUNID); } /* Function used to validate type of device */ boolean_t pqisrc_is_logical_device(pqi_scsi_dev_t *device) { return !device->is_physical_device; } /* Function used to sanitize inquiry string */ void pqisrc_sanitize_inquiry_string(unsigned char *s, int len) { boolean_t terminated = false; DBG_FUNC("IN\n"); for (; len > 0; (--len, ++s)) { if (*s == 0) terminated = true; if (terminated || *s < 0x20 || *s > 0x7e) *s = ' '; } DBG_FUNC("OUT\n"); } static char *raid_levels[] = { "RAID 0", "RAID 4", "RAID 1(1+0)", "RAID 5", "RAID 5+1", "RAID ADG", "RAID 1(ADM)", }; /* Get the RAID level from the index */ char * pqisrc_raidlevel_to_string(uint8_t raid_level) { DBG_FUNC("IN\n"); if (raid_level < ARRAY_SIZE(raid_levels)) return raid_levels[raid_level]; DBG_FUNC("OUT\n"); return " "; } /* Debug routine for displaying device info */ -void -pqisrc_display_device_info(pqisrc_softstate_t *softs, +void pqisrc_display_device_info(pqisrc_softstate_t *softs, char *action, pqi_scsi_dev_t *device) { - DBG_INFO( "%s scsi BTL %d:%d:%d: %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n", + if (device->is_physical_device) { + DBG_NOTE("%s scsi BTL %d:%d:%d: %.8s %.16s %-12s " + "SSDSmartPathCap%c En%c Exp%c qd=%d\n", action, device->bus, device->target, device->lun, device->vendor, device->model, - pqisrc_raidlevel_to_string(device->raid_level), + "Physical", device->offload_config ? '+' : '-', device->offload_enabled_pending ? '+' : '-', device->expose_device ? '+' : '-', device->queue_depth); + } else if (device->devtype == RAID_DEVICE) { + DBG_NOTE("%s scsi BTL %d:%d:%d: %.8s %.16s %-12s " + "SSDSmartPathCap%c En%c Exp%c qd=%d\n", + action, + device->bus, + device->target, + device->lun, + device->vendor, + device->model, + "Controller", + device->offload_config ? '+' : '-', + device->offload_enabled_pending ? '+' : '-', + device->expose_device ? '+' : '-', + device->queue_depth); + } else if (device->devtype == CONTROLLER_DEVICE) { + DBG_NOTE("%s scsi BTL %d:%d:%d: %.8s %.16s %-12s " + "SSDSmartPathCap%c En%c Exp%c qd=%d\n", + action, + device->bus, + device->target, + device->lun, + device->vendor, + device->model, + "External", + device->offload_config ? '+' : '-', + device->offload_enabled_pending ? '+' : '-', + device->expose_device ? '+' : '-', + device->queue_depth); + } else { + DBG_NOTE("%s scsi BTL %d:%d:%d: %.8s %.16s %-12s " + "SSDSmartPathCap%c En%c Exp%c qd=%d devtype=%d\n", + action, + device->bus, + device->target, + device->lun, + device->vendor, + device->model, + pqisrc_raidlevel_to_string(device->raid_level), + device->offload_config ? '+' : '-', + device->offload_enabled_pending ? '+' : '-', + device->expose_device ? '+' : '-', + device->queue_depth, + device->devtype); pqisrc_raidlevel_to_string(device->raid_level); /* To use this function */ + } } /* validate the structure sizes */ void check_struct_sizes() { ASSERT(sizeof(SCSI3Addr_struct)== 2); ASSERT(sizeof(PhysDevAddr_struct) == 8); ASSERT(sizeof(LogDevAddr_struct)== 8); ASSERT(sizeof(LUNAddr_struct)==8); ASSERT(sizeof(RequestBlock_struct) == 20); ASSERT(sizeof(MoreErrInfo_struct)== 8); ASSERT(sizeof(ErrorInfo_struct)== 48); /* Checking the size of IOCTL_Command_struct for both 64 bit and 32 bit system*/ ASSERT(sizeof(IOCTL_Command_struct)== 86 || sizeof(IOCTL_Command_struct)== 82); ASSERT(sizeof(struct bmic_host_wellness_driver_version)== 42); ASSERT(sizeof(struct bmic_host_wellness_time)== 20); ASSERT(sizeof(struct pqi_dev_adminq_cap)== 8); ASSERT(sizeof(struct admin_q_param)== 4); ASSERT(sizeof(struct pqi_registers)== 256); ASSERT(sizeof(struct ioa_registers)== 4128); ASSERT(sizeof(struct pqi_pref_settings)==4); ASSERT(sizeof(struct pqi_cap)== 20); ASSERT(sizeof(iu_header_t)== 4); ASSERT(sizeof(gen_adm_req_iu_t)== 64); ASSERT(sizeof(gen_adm_resp_iu_t)== 64); ASSERT(sizeof(op_q_params) == 9); ASSERT(sizeof(raid_path_error_info_elem_t)== 276); ASSERT(sizeof(aio_path_error_info_elem_t)== 276); ASSERT(sizeof(struct init_base_struct)== 24); ASSERT(sizeof(pqi_iu_layer_desc_t)== 16); ASSERT(sizeof(pqi_dev_cap_t)== 576); ASSERT(sizeof(pqi_aio_req_t)== 128); ASSERT(sizeof(pqisrc_raid_req_t)== 128); ASSERT(sizeof(pqi_raid_tmf_req_t)== 32); ASSERT(sizeof(pqi_aio_tmf_req_t)== 32); ASSERT(sizeof(struct pqi_io_response)== 16); ASSERT(sizeof(struct sense_header_scsi)== 8); ASSERT(sizeof(reportlun_header_t)==8); ASSERT(sizeof(reportlun_ext_entry_t)== 24); ASSERT(sizeof(reportlun_data_ext_t)== 32); ASSERT(sizeof(raidmap_data_t)==8); ASSERT(sizeof(pqisrc_raid_map_t)== 8256); ASSERT(sizeof(bmic_ident_ctrl_t)== 325); ASSERT(sizeof(bmic_ident_physdev_t)==2048); } uint32_t pqisrc_count_num_scsi_active_requests_on_dev(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { uint32_t i, active_io = 0; rcb_t* rcb; for(i = 1; i <= softs->max_outstanding_io; i++) { rcb = &softs->rcb[i]; if(rcb && IS_OS_SCSICMD(rcb) && (rcb->dvp == device) && rcb->req_pending) { active_io++; } } return active_io; } void check_device_pending_commands_to_complete(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { uint32_t tag = softs->max_outstanding_io, active_requests; uint64_t timeout = 0, delay_in_usec = 1000; //In micro Seconds rcb_t* rcb; DBG_FUNC("IN\n"); active_requests = pqisrc_count_num_scsi_active_requests_on_dev(softs, device); DBG_WARN("Device Outstanding IO count = %u\n", active_requests); if(!active_requests) return; do { rcb = &softs->rcb[tag]; if(rcb && IS_OS_SCSICMD(rcb) && (rcb->dvp == device) && rcb->req_pending) { OS_BUSYWAIT(delay_in_usec); timeout += delay_in_usec; } else tag--; if(timeout >= PQISRC_PENDING_IO_TIMEOUT_USEC) { DBG_WARN("timed out waiting for pending IO\n"); return; } } while(tag); } inline uint64_t pqisrc_increment_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { #if PQISRC_DEVICE_IO_COUNTER /*Increment device active io count by one*/ return OS_ATOMIC64_INC(&device->active_requests); #endif } inline uint64_t pqisrc_decrement_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { #if PQISRC_DEVICE_IO_COUNTER /*Decrement device active io count by one*/ return OS_ATOMIC64_DEC(&device->active_requests); #endif } inline void pqisrc_init_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { #if PQISRC_DEVICE_IO_COUNTER /* Reset device count to Zero */ OS_ATOMIC64_INIT(&device->active_requests, 0); #endif } inline uint64_t pqisrc_read_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { #if PQISRC_DEVICE_IO_COUNTER /* read device active count*/ return OS_ATOMIC64_READ(&device->active_requests); #endif } void pqisrc_wait_for_device_commands_to_complete(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { uint64_t timeout_in_usec = 0, delay_in_usec = 1000; //In microseconds DBG_FUNC("IN\n"); if(!softs->ctrl_online) return; #if PQISRC_DEVICE_IO_COUNTER DBG_NOTE("Device Outstanding IO count = %ld\n", pqisrc_read_device_active_io(softs, device)); while(pqisrc_read_device_active_io(softs, device)) { OS_BUSYWAIT(delay_in_usec); // In microseconds if(!softs->ctrl_online) { DBG_WARN("Controller Offline was detected.\n"); } timeout_in_usec += delay_in_usec; if(timeout_in_usec >= PQISRC_PENDING_IO_TIMEOUT_USEC) { DBG_WARN("timed out waiting for pending IO. DeviceOutStandingIo's=%ld\n", pqisrc_read_device_active_io(softs, device)); return; } } #else check_device_pending_commands_to_complete(softs, device); #endif } diff --git a/sys/dev/smartpqi/smartpqi_main.c b/sys/dev/smartpqi/smartpqi_main.c index b4efa26b9784..97747be50d24 100644 --- a/sys/dev/smartpqi/smartpqi_main.c +++ b/sys/dev/smartpqi/smartpqi_main.c @@ -1,565 +1,589 @@ /*- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ /* * Driver for the Microsemi Smart storage controllers */ #include "smartpqi_includes.h" #include "smartpqi_prototypes.h" CTASSERT(BSD_SUCCESS == PQI_STATUS_SUCCESS); /* * Supported devices */ struct pqi_ident { u_int16_t vendor; u_int16_t device; u_int16_t subvendor; u_int16_t subdevice; int hwif; char *desc; } pqi_identifiers[] = { /* (MSCC PM8205 8x12G based) */ {0x9005, 0x028f, 0x103c, 0x600, PQI_HWIF_SRCV, "P408i-p SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x601, PQI_HWIF_SRCV, "P408e-p SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x602, PQI_HWIF_SRCV, "P408i-a SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x603, PQI_HWIF_SRCV, "P408i-c SR Gen10"}, {0x9005, 0x028f, 0x1028, 0x1FE0, PQI_HWIF_SRCV, "SmartRAID 3162-8i/eDell"}, {0x9005, 0x028f, 0x9005, 0x608, PQI_HWIF_SRCV, "SmartRAID 3162-8i/e"}, {0x9005, 0x028f, 0x103c, 0x609, PQI_HWIF_SRCV, "P408i-sb SR G10"}, /* (MSCC PM8225 8x12G based) */ {0x9005, 0x028f, 0x103c, 0x650, PQI_HWIF_SRCV, "E208i-p SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x651, PQI_HWIF_SRCV, "E208e-p SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x652, PQI_HWIF_SRCV, "E208i-c SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x654, PQI_HWIF_SRCV, "E208i-a SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x655, PQI_HWIF_SRCV, "P408e-m SR Gen10"}, /* (MSCC PM8221 8x12G based) */ {0x9005, 0x028f, 0x103c, 0x700, PQI_HWIF_SRCV, "P204i-c SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x701, PQI_HWIF_SRCV, "P204i-b SR Gen10"}, {0x9005, 0x028f, 0x193d, 0x1104, PQI_HWIF_SRCV, "UN RAID P2404-Mf-4i-2GB"}, {0x9005, 0x028f, 0x193d, 0x1106, PQI_HWIF_SRCV, "UN RAID P2404-Mf-4i-1GB"}, {0x9005, 0x028f, 0x193d, 0x1108, PQI_HWIF_SRCV, "UN RAID P4408-Ma-8i-2GB"}, /* (MSCC PM8204 8x12G based) */ {0x9005, 0x028f, 0x9005, 0x800, PQI_HWIF_SRCV, "SmartRAID 3154-8i"}, {0x9005, 0x028f, 0x9005, 0x801, PQI_HWIF_SRCV, "SmartRAID 3152-8i"}, {0x9005, 0x028f, 0x9005, 0x802, PQI_HWIF_SRCV, "SmartRAID 3151-4i"}, {0x9005, 0x028f, 0x9005, 0x803, PQI_HWIF_SRCV, "SmartRAID 3101-4i"}, {0x9005, 0x028f, 0x9005, 0x804, PQI_HWIF_SRCV, "SmartRAID 3154-8e"}, {0x9005, 0x028f, 0x9005, 0x805, PQI_HWIF_SRCV, "SmartRAID 3102-8i"}, {0x9005, 0x028f, 0x9005, 0x806, PQI_HWIF_SRCV, "SmartRAID 3100"}, {0x9005, 0x028f, 0x9005, 0x807, PQI_HWIF_SRCV, "SmartRAID 3162-8i"}, {0x9005, 0x028f, 0x152d, 0x8a22, PQI_HWIF_SRCV, "QS-8204-8i"}, {0x9005, 0x028f, 0x193d, 0xf460, PQI_HWIF_SRCV, "UN RAID P460-M4"}, {0x9005, 0x028f, 0x193d, 0xf461, PQI_HWIF_SRCV, "UN RAID P460-B4"}, {0x9005, 0x028f, 0x1bd4, 0x004b, PQI_HWIF_SRCV, "INSPUR PM8204-2GB"}, {0x9005, 0x028f, 0x1bd4, 0x004c, PQI_HWIF_SRCV, "INSPUR PM8204-4GB"}, {0x9005, 0x028f, 0x193d, 0x1105, PQI_HWIF_SRCV, "UN RAID P4408-Mf-8i-2GB"}, {0x9005, 0x028f, 0x193d, 0x1107, PQI_HWIF_SRCV, "UN RAID P4408-Mf-8i-4GB"}, {0x9005, 0x028f, 0x1d8d, 0x800, PQI_HWIF_SRCV, "Fiberhome SmartRAID AIS-8204-8i"}, {0x9005, 0x028f, 0x9005, 0x0808, PQI_HWIF_SRCV, "SmartRAID 3101E-4i"}, {0x9005, 0x028f, 0x9005, 0x0809, PQI_HWIF_SRCV, "SmartRAID 3102E-8i"}, {0x9005, 0x028f, 0x9005, 0x080a, PQI_HWIF_SRCV, "SmartRAID 3152-8i/N"}, /* (MSCC PM8222 8x12G based) */ {0x9005, 0x028f, 0x9005, 0x900, PQI_HWIF_SRCV, "SmartHBA 2100-8i"}, {0x9005, 0x028f, 0x9005, 0x901, PQI_HWIF_SRCV, "SmartHBA 2100-4i"}, {0x9005, 0x028f, 0x9005, 0x902, PQI_HWIF_SRCV, "HBA 1100-8i"}, {0x9005, 0x028f, 0x9005, 0x903, PQI_HWIF_SRCV, "HBA 1100-4i"}, {0x9005, 0x028f, 0x9005, 0x904, PQI_HWIF_SRCV, "SmartHBA 2100-8e"}, {0x9005, 0x028f, 0x9005, 0x905, PQI_HWIF_SRCV, "HBA 1100-8e"}, {0x9005, 0x028f, 0x9005, 0x906, PQI_HWIF_SRCV, "SmartHBA 2100-4i4e"}, {0x9005, 0x028f, 0x9005, 0x907, PQI_HWIF_SRCV, "HBA 1100"}, {0x9005, 0x028f, 0x9005, 0x908, PQI_HWIF_SRCV, "SmartHBA 2100"}, {0x9005, 0x028f, 0x9005, 0x90a, PQI_HWIF_SRCV, "SmartHBA 2100A-8i"}, {0x9005, 0x028f, 0x193d, 0x8460, PQI_HWIF_SRCV, "UN HBA H460-M1"}, {0x9005, 0x028f, 0x193d, 0x8461, PQI_HWIF_SRCV, "UN HBA H460-B1"}, {0x9005, 0x028f, 0x193d, 0xc460, PQI_HWIF_SRCV, "UN RAID P460-M2"}, {0x9005, 0x028f, 0x193d, 0xc461, PQI_HWIF_SRCV, "UN RAID P460-B2"}, {0x9005, 0x028f, 0x1bd4, 0x004a, PQI_HWIF_SRCV, "INSPUR PM8222-SHBA"}, {0x9005, 0x028f, 0x13fe, 0x8312, PQI_HWIF_SRCV, "MIC-8312BridgeB"}, {0x9005, 0x028f, 0x1bd4, 0x004f, PQI_HWIF_SRCV, "INSPUR PM8222-HBA"}, {0x9005, 0x028f, 0x1d8d, 0x908, PQI_HWIF_SRCV, "Fiberhome SmartHBA AIS-8222-8i"}, + {0x9005, 0x028f, 0x1bd4, 0x006C, PQI_HWIF_SRCV, "INSPUR RS0800M5E8i"}, + {0x9005, 0x028f, 0x1bd4, 0x006D, PQI_HWIF_SRCV, "INSPUR RS0800M5H8i"}, /* (SRCx MSCC FVB 24x12G based) */ {0x9005, 0x028f, 0x103c, 0x1001, PQI_HWIF_SRCV, "MSCC FVB"}, /* (MSCC PM8241 24x12G based) */ /* (MSCC PM8242 24x12G based) */ {0x9005, 0x028f, 0x152d, 0x8a37, PQI_HWIF_SRCV, "QS-8242-24i"}, {0x9005, 0x028f, 0x9005, 0x1300, PQI_HWIF_SRCV, "HBA 1100-8i8e"}, {0x9005, 0x028f, 0x9005, 0x1301, PQI_HWIF_SRCV, "HBA 1100-24i"}, {0x9005, 0x028f, 0x9005, 0x1302, PQI_HWIF_SRCV, "SmartHBA 2100-8i8e"}, {0x9005, 0x028f, 0x9005, 0x1303, PQI_HWIF_SRCV, "SmartHBA 2100-24i"}, {0x9005, 0x028f, 0x105b, 0x1321, PQI_HWIF_SRCV, "8242-24i"}, {0x9005, 0x028f, 0x1bd4, 0x0045, PQI_HWIF_SRCV, "INSPUR SMART-HBA 8242-24i"}, /* (MSCC PM8236 16x12G based) */ {0x9005, 0x028f, 0x152d, 0x8a24, PQI_HWIF_SRCV, "QS-8236-16i"}, {0x9005, 0x028f, 0x9005, 0x1380, PQI_HWIF_SRCV, "SmartRAID 3154-16i"}, {0x9005, 0x028f, 0x1bd4, 0x0046, PQI_HWIF_SRCV, "INSPUR RAID 8236-16i"}, {0x9005, 0x028f, 0x1d8d, 0x806, PQI_HWIF_SRCV, "Fiberhome SmartRAID AIS-8236-16i"}, + {0x9005, 0x028f, 0x1cf2, 0x5449, PQI_HWIF_SRCV, "ZTE SmartROC3100 RS241-18i 2G"}, + {0x9005, 0x028f, 0x1cf2, 0x544A, PQI_HWIF_SRCV, "ZTE SmartROC3100 RS242-18i 4G"}, + {0x9005, 0x028f, 0x1cf2, 0x544D, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM241B-18i 2G"}, + {0x9005, 0x028f, 0x1cf2, 0x544E, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM242B-18i 4G"}, /* (MSCC PM8237 24x12G based) */ {0x9005, 0x028f, 0x103c, 0x1100, PQI_HWIF_SRCV, "P816i-a SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x1101, PQI_HWIF_SRCV, "P416ie-m SR G10"}, /* (MSCC PM8238 16x12G based) */ {0x9005, 0x028f, 0x152d, 0x8a23, PQI_HWIF_SRCV, "QS-8238-16i"}, {0x9005, 0x028f, 0x9005, 0x1280, PQI_HWIF_SRCV, "HBA 1100-16i"}, {0x9005, 0x028f, 0x9005, 0x1281, PQI_HWIF_SRCV, "HBA 1100-16e"}, {0x9005, 0x028f, 0x105b, 0x1211, PQI_HWIF_SRCV, "8238-16i"}, {0x9005, 0x028f, 0x1bd4, 0x0048, PQI_HWIF_SRCV, "INSPUR SMART-HBA 8238-16i"}, {0x9005, 0x028f, 0x9005, 0x1282, PQI_HWIF_SRCV, "SmartHBA 2100-16i"}, {0x9005, 0x028f, 0x1d8d, 0x916, PQI_HWIF_SRCV, "Fiberhome SmartHBA AIS-8238-16i"}, {0x9005, 0x028f, 0x1458, 0x1000, PQI_HWIF_SRCV, "GIGABYTE SmartHBA CLN1832"}, + {0x9005, 0x028f, 0x1cf2, 0x544F, PQI_HWIF_SRCV, "ZTE SmartIOC2100 RM243B-18i"}, /* (MSCC PM8240 24x12G based) */ {0x9005, 0x028f, 0x152d, 0x8a36, PQI_HWIF_SRCV, "QS-8240-24i"}, {0x9005, 0x028f, 0x9005, 0x1200, PQI_HWIF_SRCV, "SmartRAID 3154-24i"}, {0x9005, 0x028f, 0x9005, 0x1201, PQI_HWIF_SRCV, "SmartRAID 3154-8i16e"}, {0x9005, 0x028f, 0x9005, 0x1202, PQI_HWIF_SRCV, "SmartRAID 3154-8i8e"}, {0x9005, 0x028f, 0x1bd4, 0x0047, PQI_HWIF_SRCV, "INSPUR RAID 8240-24i"}, + {0x9005, 0x028f, 0x1F0C, 0x3161, PQI_HWIF_SRCV, "NT RAID 3100-24i"}, /* Huawei ID's */ {0x9005, 0x028f, 0x19e5, 0xd227, PQI_HWIF_SRCV, "SR465C-M 4G"}, {0x9005, 0x028f, 0x19e5, 0xd22a, PQI_HWIF_SRCV, "SR765-M"}, {0x9005, 0x028f, 0x19e5, 0xd228, PQI_HWIF_SRCV, "SR455C-M 2G"}, {0x9005, 0x028f, 0x19e5, 0xd22c, PQI_HWIF_SRCV, "SR455C-M 4G"}, {0x9005, 0x028f, 0x19e5, 0xd229, PQI_HWIF_SRCV, "SR155-M"}, {0x9005, 0x028f, 0x19e5, 0xd22b, PQI_HWIF_SRCV, "SR455C-ME 4G"}, + /* (MSCC PM8254 32x12G based) */ + {0x9005, 0x028f, 0x9005, 0x14a2, PQI_HWIF_SRCV, "SmartRAID 3252-8i"}, + {0x9005, 0x028f, 0x9005, 0x14a4, PQI_HWIF_SRCV, "SmartRAID 3254-8i /e"}, + {0x9005, 0x028f, 0x9005, 0x14a5, PQI_HWIF_SRCV, "SmartRAID 3252-8i /e"}, + {0x9005, 0x028f, 0x9005, 0x14a6, PQI_HWIF_SRCV, "SmartRAID 3204-8i /e"}, +/* (MSCC PM8265 16x12G based) */ + {0x9005, 0x028f, 0x9005, 0x1474, PQI_HWIF_SRCV, "SmartRAID 3254-16io /e"}, +/* (MSCC PM8270 16x12G based) */ + {0x9005, 0x028f, 0x9005, 0x1463, PQI_HWIF_SRCV, "SmartHBA 2200-8io /e"}, + {0x9005, 0x028f, 0x9005, 0x14c2, PQI_HWIF_SRCV, "SmartHBA 2200-16io /e"}, + /* (MSCC PM8279 32x12G based) */ + {0x9005, 0x028f, 0x1590, 0x0381, PQI_HWIF_SRCV, "SR932i-p Gen11"}, + {0x9005, 0x028f, 0x1590, 0x0382, PQI_HWIF_SRCV, "SR308i-p Gen11"}, + {0x9005, 0x028f, 0x1590, 0x0383, PQI_HWIF_SRCV, "SR308i-o Gen11"}, + {0x9005, 0x028f, 0x1590, 0x02db, PQI_HWIF_SRCV, "SR416ie-m Gen11"}, + {0x9005, 0x028f, 0x1590, 0x032e, PQI_HWIF_SRCV, "SR416i-o Gen11"}, {0, 0, 0, 0, 0, 0} }; struct pqi_ident pqi_family_identifiers[] = { {0x9005, 0x028f, 0, 0, PQI_HWIF_SRCV, "Smart Array Storage Controller"}, {0, 0, 0, 0, 0, 0} }; /* * Function to identify the installed adapter. */ static struct pqi_ident *pqi_find_ident(device_t dev) { struct pqi_ident *m; u_int16_t vendid, devid, sub_vendid, sub_devid; vendid = pci_get_vendor(dev); devid = pci_get_device(dev); sub_vendid = pci_get_subvendor(dev); sub_devid = pci_get_subdevice(dev); for (m = pqi_identifiers; m->vendor != 0; m++) { if ((m->vendor == vendid) && (m->device == devid) && (m->subvendor == sub_vendid) && (m->subdevice == sub_devid)) { return (m); } } for (m = pqi_family_identifiers; m->vendor != 0; m++) { if ((m->vendor == vendid) && (m->device == devid)) { return (m); } } return (NULL); } /* * Determine whether this is one of our supported adapters. */ static int smartpqi_probe(device_t dev) { struct pqi_ident *id; if ((id = pqi_find_ident(dev)) != NULL) { device_set_desc(dev, id->desc); return(BUS_PROBE_VENDOR); } return(ENXIO); } /* * Store Bus/Device/Function in softs */ void pqisrc_save_controller_info(struct pqisrc_softstate *softs) { device_t dev = softs->os_specific.pqi_dev; softs->bus_id = (uint32_t)pci_get_bus(dev); softs->device_id = (uint32_t)pci_get_device(dev); softs->func_id = (uint32_t)pci_get_function(dev); } /* * Allocate resources for our device, set up the bus interface. * Initialize the PQI related functionality, scan devices, register sim to * upper layer, create management interface device node etc. */ static int smartpqi_attach(device_t dev) { struct pqisrc_softstate *softs = NULL; struct pqi_ident *id = NULL; int error = BSD_SUCCESS; u_int32_t command = 0, i = 0; int card_index = device_get_unit(dev); rcb_t *rcbp = NULL; /* * Initialise softc. */ softs = device_get_softc(dev); if (!softs) { printf("Could not get softc\n"); error = EINVAL; goto out; } memset(softs, 0, sizeof(*softs)); softs->os_specific.pqi_dev = dev; DBG_FUNC("IN\n"); /* assume failure is 'not configured' */ error = ENXIO; /* * Verify that the adapter is correctly set up in PCI space. */ pci_enable_busmaster(softs->os_specific.pqi_dev); command = pci_read_config(softs->os_specific.pqi_dev, PCIR_COMMAND, 2); if ((command & PCIM_CMD_MEMEN) == 0) { DBG_ERR("memory window not available command = %d\n", command); error = ENXIO; goto out; } /* * Detect the hardware interface version, set up the bus interface * indirection. */ id = pqi_find_ident(dev); if (!id) { DBG_ERR("NULL return value from pqi_find_ident\n"); goto out; } softs->os_specific.pqi_hwif = id->hwif; switch(softs->os_specific.pqi_hwif) { case PQI_HWIF_SRCV: DBG_INFO("set hardware up for PMC SRCv for %p\n", softs); break; default: softs->os_specific.pqi_hwif = PQI_HWIF_UNKNOWN; DBG_ERR("unknown hardware type\n"); error = ENXIO; goto out; } pqisrc_save_controller_info(softs); /* * Allocate the PCI register window. */ softs->os_specific.pqi_regs_rid0 = PCIR_BAR(0); if ((softs->os_specific.pqi_regs_res0 = bus_alloc_resource_any(softs->os_specific.pqi_dev, SYS_RES_MEMORY, &softs->os_specific.pqi_regs_rid0, RF_ACTIVE)) == NULL) { DBG_ERR("couldn't allocate register window 0\n"); /* assume failure is 'out of memory' */ error = ENOMEM; goto out; } bus_get_resource_start(softs->os_specific.pqi_dev, SYS_RES_MEMORY, softs->os_specific.pqi_regs_rid0); softs->pci_mem_handle.pqi_btag = rman_get_bustag(softs->os_specific.pqi_regs_res0); softs->pci_mem_handle.pqi_bhandle = rman_get_bushandle(softs->os_specific.pqi_regs_res0); /* softs->pci_mem_base_vaddr = (uintptr_t)rman_get_virtual(softs->os_specific.pqi_regs_res0); */ softs->pci_mem_base_vaddr = (char *)rman_get_virtual(softs->os_specific.pqi_regs_res0); /* * Allocate the parent bus DMA tag appropriate for our PCI interface. * * Note that some of these controllers are 64-bit capable. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ PAGE_SIZE, 0, /* algnmnt, boundary */ - BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ + BUS_SPACE_MAXADDR,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ - BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ + BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ - BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ + BUS_SPACE_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* No locking needed */ &softs->os_specific.pqi_parent_dmat)) { DBG_ERR("can't allocate parent DMA tag\n"); /* assume failure is 'out of memory' */ error = ENOMEM; goto dma_out; } softs->os_specific.sim_registered = FALSE; softs->os_name = "FreeBSD "; /* Initialize the PQI library */ error = pqisrc_init(softs); if (error != PQI_STATUS_SUCCESS) { DBG_ERR("Failed to initialize pqi lib error = %d\n", error); error = ENXIO; goto out; } else { error = BSD_SUCCESS; } mtx_init(&softs->os_specific.cam_lock, "cam_lock", NULL, MTX_DEF); softs->os_specific.mtx_init = TRUE; mtx_init(&softs->os_specific.map_lock, "map_lock", NULL, MTX_DEF); callout_init(&softs->os_specific.wellness_periodic, 1); callout_init(&softs->os_specific.heartbeat_timeout_id, 1); /* * Create DMA tag for mapping buffers into controller-addressable space. */ if (bus_dma_tag_create(softs->os_specific.pqi_parent_dmat,/* parent */ PAGE_SIZE, 0, /* algnmnt, boundary */ - BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ + BUS_SPACE_MAXADDR,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ (bus_size_t)softs->pqi_cap.max_sg_elem*PAGE_SIZE,/* maxsize */ softs->pqi_cap.max_sg_elem, /* nsegments */ - BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ + BUS_SPACE_MAXSIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &softs->os_specific.map_lock, /* lockfuncarg*/ &softs->os_specific.pqi_buffer_dmat)) { DBG_ERR("can't allocate buffer DMA tag for pqi_buffer_dmat\n"); return (ENOMEM); } rcbp = &softs->rcb[1]; for( i = 1; i <= softs->pqi_cap.max_outstanding_io; i++, rcbp++ ) { if ((error = bus_dmamap_create(softs->os_specific.pqi_buffer_dmat, 0, &rcbp->cm_datamap)) != 0) { DBG_ERR("Cant create datamap for buf @" "rcbp = %p maxio = %d error = %d\n", rcbp, softs->pqi_cap.max_outstanding_io, error); goto dma_out; } } os_start_heartbeat_timer((void *)softs); /* Start the heart-beat timer */ callout_reset(&softs->os_specific.wellness_periodic, 120 * hz, os_wellness_periodic, softs); error = pqisrc_scan_devices(softs); if (error != PQI_STATUS_SUCCESS) { DBG_ERR("Failed to scan lib error = %d\n", error); error = ENXIO; goto out; } error = register_sim(softs, card_index); if (error) { DBG_ERR("Failed to register sim index = %d error = %d\n", card_index, error); goto out; } smartpqi_target_rescan(softs); TASK_INIT(&softs->os_specific.event_task, 0, pqisrc_event_worker,softs); error = create_char_dev(softs, card_index); if (error) { DBG_ERR("Failed to register character device index=%d r=%d\n", card_index, error); goto out; } goto out; dma_out: if (softs->os_specific.pqi_regs_res0 != NULL) bus_release_resource(softs->os_specific.pqi_dev, SYS_RES_MEMORY, softs->os_specific.pqi_regs_rid0, softs->os_specific.pqi_regs_res0); out: DBG_FUNC("OUT error = %d\n", error); return(error); } /* * Deallocate resources for our device. */ static int smartpqi_detach(device_t dev) { struct pqisrc_softstate *softs = device_get_softc(dev); int rval = BSD_SUCCESS; DBG_FUNC("IN\n"); if (softs == NULL) return ENXIO; /* kill the periodic event */ callout_drain(&softs->os_specific.wellness_periodic); /* Kill the heart beat event */ callout_drain(&softs->os_specific.heartbeat_timeout_id); if (!pqisrc_ctrl_offline(softs)) { rval = pqisrc_flush_cache(softs, PQISRC_NONE_CACHE_FLUSH_ONLY); if (rval != PQI_STATUS_SUCCESS) { DBG_ERR("Unable to flush adapter cache! rval = %d\n", rval); rval = EIO; } } destroy_char_dev(softs); pqisrc_uninit(softs); deregister_sim(softs); pci_release_msi(dev); DBG_FUNC("OUT\n"); return rval; } /* * Bring the controller to a quiescent state, ready for system suspend. */ static int smartpqi_suspend(device_t dev) { struct pqisrc_softstate *softs = device_get_softc(dev); DBG_FUNC("IN\n"); if (softs == NULL) return ENXIO; DBG_INFO("Suspending the device %p\n", softs); softs->os_specific.pqi_state |= SMART_STATE_SUSPEND; DBG_FUNC("OUT\n"); return BSD_SUCCESS; } /* * Bring the controller back to a state ready for operation. */ static int smartpqi_resume(device_t dev) { struct pqisrc_softstate *softs = device_get_softc(dev); DBG_FUNC("IN\n"); if (softs == NULL) return ENXIO; softs->os_specific.pqi_state &= ~SMART_STATE_SUSPEND; DBG_FUNC("OUT\n"); return BSD_SUCCESS; } /* * Do whatever is needed during a system shutdown. */ static int smartpqi_shutdown(device_t dev) { struct pqisrc_softstate *softs = device_get_softc(dev); int bsd_status = BSD_SUCCESS; int pqi_status; DBG_FUNC("IN\n"); if (softs == NULL) return ENXIO; if (pqisrc_ctrl_offline(softs)) return BSD_SUCCESS; pqi_status = pqisrc_flush_cache(softs, PQISRC_SHUTDOWN); if (pqi_status != PQI_STATUS_SUCCESS) { DBG_ERR("Unable to flush adapter cache! rval = %d\n", pqi_status); bsd_status = EIO; } DBG_FUNC("OUT\n"); return bsd_status; } /* * PCI bus interface. */ static device_method_t pqi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, smartpqi_probe), DEVMETHOD(device_attach, smartpqi_attach), DEVMETHOD(device_detach, smartpqi_detach), DEVMETHOD(device_suspend, smartpqi_suspend), DEVMETHOD(device_resume, smartpqi_resume), DEVMETHOD(device_shutdown, smartpqi_shutdown), { 0, 0 } }; static devclass_t pqi_devclass; static driver_t smartpqi_pci_driver = { "smartpqi", pqi_methods, sizeof(struct pqisrc_softstate) }; DRIVER_MODULE(smartpqi, pci, smartpqi_pci_driver, pqi_devclass, 0, 0); MODULE_DEPEND(smartpqi, pci, 1, 1, 1); diff --git a/sys/dev/smartpqi/smartpqi_request.c b/sys/dev/smartpqi/smartpqi_request.c index 222852fbe088..81f9e106bd8a 100644 --- a/sys/dev/smartpqi/smartpqi_request.c +++ b/sys/dev/smartpqi/smartpqi_request.c @@ -1,1047 +1,1055 @@ /*- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #include "smartpqi_includes.h" /* * Attempt to perform offload RAID mapping for a logical volume I/O. */ #define HPSA_RAID_0 0 #define HPSA_RAID_4 1 #define HPSA_RAID_1 2 /* also used for RAID 10 */ #define HPSA_RAID_5 3 /* also used for RAID 50 */ #define HPSA_RAID_51 4 #define HPSA_RAID_6 5 /* also used for RAID 60 */ #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ #define HPSA_RAID_MAX HPSA_RAID_ADM #define HPSA_RAID_UNKNOWN 0xff #define SG_FLAG_LAST 0x40000000 #define SG_FLAG_CHAIN 0x80000000 /* Subroutine to find out embedded sgl count in IU */ static inline uint32_t pqisrc_embedded_sgl_count(uint32_t elem_alloted) { uint32_t embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU; DBG_FUNC(" IN "); /** calculate embedded sgl count using num_elem_alloted for IO **/ if(elem_alloted - 1) embedded_sgl_count += ((elem_alloted - 1) * MAX_EMBEDDED_SG_IN_IU); DBG_IO("embedded_sgl_count :%d\n",embedded_sgl_count); DBG_FUNC(" OUT "); return embedded_sgl_count; } /* Subroutine to find out contiguous free elem in IU */ static inline uint32_t pqisrc_contiguous_free_elem(uint32_t pi, uint32_t ci, uint32_t elem_in_q) { uint32_t contiguous_free_elem = 0; DBG_FUNC(" IN "); if(pi >= ci) { contiguous_free_elem = (elem_in_q - pi); if(ci == 0) contiguous_free_elem -= 1; } else { contiguous_free_elem = (ci - pi - 1); } DBG_FUNC(" OUT "); return contiguous_free_elem; } /* Subroutine to find out num of elements need for the request */ static uint32_t pqisrc_num_elem_needed(pqisrc_softstate_t *softs, uint32_t SG_Count) { uint32_t num_sg; uint32_t num_elem_required = 1; DBG_FUNC(" IN "); DBG_IO("SGL_Count :%d",SG_Count); /******** If SG_Count greater than max sg per IU i.e 4 or 68 (4 is with out spanning or 68 is with spanning) chaining is required. OR, If SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU then, on these two cases one element is enough. ********/ if(SG_Count > softs->max_sg_per_iu || SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU) return num_elem_required; /* SGL Count Other Than First IU */ num_sg = SG_Count - MAX_EMBEDDED_SG_IN_FIRST_IU; num_elem_required += PQISRC_DIV_ROUND_UP(num_sg, MAX_EMBEDDED_SG_IN_IU); DBG_FUNC(" OUT "); return num_elem_required; } /* Subroutine to build SG list for the IU submission*/ static boolean_t pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr, uint32_t num_elem_alloted) { uint32_t i; uint32_t num_sg = OS_GET_IO_SG_COUNT(rcb); sgt_t *sgt = sg_array; sgt_t *sg_chain = NULL; boolean_t partial = false; DBG_FUNC(" IN "); DBG_IO("SGL_Count :%d",num_sg); if (0 == num_sg) { goto out; } if (num_sg <= pqisrc_embedded_sgl_count(num_elem_alloted)) { for (i = 0; i < num_sg; i++, sgt++) { sgt->addr= OS_GET_IO_SG_ADDR(rcb,i); sgt->len= OS_GET_IO_SG_LEN(rcb,i); sgt->flags= 0; } sg_array[num_sg - 1].flags = SG_FLAG_LAST; } else { /** SGL Chaining **/ sg_chain = rcb->sg_chain_virt; sgt->addr = rcb->sg_chain_dma; sgt->len = num_sg * sizeof(sgt_t); sgt->flags = SG_FLAG_CHAIN; sgt = sg_chain; for (i = 0; i < num_sg; i++, sgt++) { sgt->addr = OS_GET_IO_SG_ADDR(rcb,i); sgt->len = OS_GET_IO_SG_LEN(rcb,i); sgt->flags = 0; } sg_chain[num_sg - 1].flags = SG_FLAG_LAST; num_sg = 1; partial = true; } out: iu_hdr->iu_length = num_sg * sizeof(sgt_t); DBG_FUNC(" OUT "); return partial; } /*Subroutine used to Build the RAID request */ static void pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb, pqisrc_raid_req_t *raid_req, uint32_t num_elem_alloted) { DBG_FUNC(" IN "); raid_req->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST; raid_req->header.comp_feature = 0; raid_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb); raid_req->work_area[0] = 0; raid_req->work_area[1] = 0; raid_req->request_id = rcb->tag; raid_req->nexus_id = 0; raid_req->buffer_length = GET_SCSI_BUFFLEN(rcb); memcpy(raid_req->lun_number, rcb->dvp->scsi3addr, sizeof(raid_req->lun_number)); raid_req->protocol_spec = 0; raid_req->data_direction = rcb->data_dir; raid_req->reserved1 = 0; raid_req->fence = 0; raid_req->error_index = raid_req->request_id; raid_req->reserved2 = 0; raid_req->task_attribute = OS_GET_TASK_ATTR(rcb); raid_req->command_priority = 0; raid_req->reserved3 = 0; raid_req->reserved4 = 0; raid_req->reserved5 = 0; /* As cdb and additional_cdb_bytes are contiguous, update them in a single statement */ memcpy(raid_req->cdb, rcb->cdbp, rcb->cmdlen); #if 0 DBG_IO("CDB :"); for(i = 0; i < rcb->cmdlen ; i++) DBG_IO(" 0x%x \n ",raid_req->cdb[i]); #endif switch (rcb->cmdlen) { case 6: case 10: case 12: case 16: raid_req->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0; break; case 20: raid_req->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_4; break; case 24: raid_req->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_8; break; case 28: raid_req->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_12; break; case 32: default: /* todo:review again */ raid_req->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_16; break; } /* Frame SGL Descriptor */ raid_req->partial = pqisrc_build_sgl(&raid_req->sg_descriptors[0], rcb, &raid_req->header, num_elem_alloted); raid_req->header.iu_length += offsetof(pqisrc_raid_req_t, sg_descriptors) - sizeof(iu_header_t); #if 0 DBG_IO("raid_req->header.iu_type : 0x%x", raid_req->header.iu_type); DBG_IO("raid_req->response_queue_id :%d\n"raid_req->response_queue_id); DBG_IO("raid_req->request_id : 0x%x", raid_req->request_id); DBG_IO("raid_req->buffer_length : 0x%x", raid_req->buffer_length); DBG_IO("raid_req->task_attribute : 0x%x", raid_req->task_attribute); DBG_IO("raid_req->lun_number : 0x%x", raid_req->lun_number); DBG_IO("raid_req->error_index : 0x%x", raid_req->error_index); DBG_IO("raid_req->sg_descriptors[0].addr : %p", (void*)raid_req->sg_descriptors[0].addr); DBG_IO("raid_req->sg_descriptors[0].len : 0x%x", raid_req->sg_descriptors[0].len); DBG_IO("raid_req->sg_descriptors[0].flags : 0%x", raid_req->sg_descriptors[0].flags); #endif rcb->success_cmp_callback = pqisrc_process_io_response_success; rcb->error_cmp_callback = pqisrc_process_raid_response_error; rcb->resp_qid = raid_req->response_queue_id; DBG_FUNC(" OUT "); } /*Subroutine used to Build the AIO request */ static void pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb, pqi_aio_req_t *aio_req, uint32_t num_elem_alloted) { DBG_FUNC(" IN "); aio_req->header.iu_type = PQI_IU_TYPE_AIO_PATH_IO_REQUEST; aio_req->header.comp_feature = 0; aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb); aio_req->work_area[0] = 0; aio_req->work_area[1] = 0; aio_req->req_id = rcb->tag; aio_req->res1[0] = 0; aio_req->res1[1] = 0; aio_req->nexus = rcb->ioaccel_handle; aio_req->buf_len = GET_SCSI_BUFFLEN(rcb); aio_req->data_dir = rcb->data_dir; aio_req->mem_type = 0; aio_req->fence = 0; aio_req->res2 = 0; aio_req->task_attr = OS_GET_TASK_ATTR(rcb); aio_req->cmd_prio = 0; aio_req->res3 = 0; aio_req->err_idx = aio_req->req_id; aio_req->cdb_len = rcb->cmdlen; if(rcb->cmdlen > sizeof(aio_req->cdb)) rcb->cmdlen = sizeof(aio_req->cdb); memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen); #if 0 DBG_IO("CDB : \n"); for(int i = 0; i < rcb->cmdlen ; i++) DBG_IO(" 0x%x \n",aio_req->cdb[i]); #endif memset(aio_req->lun,0,sizeof(aio_req->lun)); memset(aio_req->res4,0,sizeof(aio_req->res4)); if(rcb->encrypt_enable == true) { aio_req->encrypt_enable = true; aio_req->encrypt_key_index = LE_16(rcb->enc_info.data_enc_key_index); aio_req->encrypt_twk_low = LE_32(rcb->enc_info.encrypt_tweak_lower); aio_req->encrypt_twk_high = LE_32(rcb->enc_info.encrypt_tweak_upper); } else { aio_req->encrypt_enable = 0; aio_req->encrypt_key_index = 0; aio_req->encrypt_twk_high = 0; aio_req->encrypt_twk_low = 0; } /* Frame SGL Descriptor */ aio_req->partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb, &aio_req->header, num_elem_alloted); aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t); DBG_INFO("aio_req->num_sg :%d",aio_req->num_sg); aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) - sizeof(iu_header_t); #if 0 DBG_IO("aio_req->header.iu_type : 0x%x \n",aio_req->header.iu_type); DBG_IO("aio_req->resp_qid :0x%x",aio_req->resp_qid); DBG_IO("aio_req->req_id : 0x%x \n",aio_req->req_id); DBG_IO("aio_req->nexus : 0x%x \n",aio_req->nexus); DBG_IO("aio_req->buf_len : 0x%x \n",aio_req->buf_len); DBG_IO("aio_req->data_dir : 0x%x \n",aio_req->data_dir); DBG_IO("aio_req->task_attr : 0x%x \n",aio_req->task_attr); DBG_IO("aio_req->err_idx : 0x%x \n",aio_req->err_idx); DBG_IO("aio_req->num_sg :%d",aio_req->num_sg); DBG_IO("aio_req->sg_desc[0].addr : %p \n", (void*)aio_req->sg_desc[0].addr); DBG_IO("aio_req->sg_desc[0].len : 0%x \n", aio_req->sg_desc[0].len); DBG_IO("aio_req->sg_desc[0].flags : 0%x \n", aio_req->sg_desc[0].flags); #endif rcb->success_cmp_callback = pqisrc_process_io_response_success; rcb->error_cmp_callback = pqisrc_process_aio_response_error; rcb->resp_qid = aio_req->response_queue_id; DBG_FUNC(" OUT "); } /*Function used to build and send RAID/AIO */ int pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb) { ib_queue_t *ib_q_array = softs->op_aio_ib_q; ib_queue_t *ib_q = NULL; char *ib_iu = NULL; IO_PATH_T io_path = AIO_PATH; uint32_t TraverseCount = 0; int first_qindex = OS_GET_IO_REQ_QINDEX(softs, rcb); int qindex = first_qindex; uint32_t num_op_ib_q = softs->num_op_aio_ibq; uint32_t num_elem_needed; uint32_t num_elem_alloted = 0; pqi_scsi_dev_t *devp = rcb->dvp; uint8_t raidbypass_cdb[16]; DBG_FUNC(" IN "); if(!rcb->aio_retry) { rcb->cdbp = OS_GET_CDBP(rcb); if(IS_AIO_PATH(devp)) { /** IO for Physical Drive **/ /** Send in AIO PATH**/ rcb->ioaccel_handle = devp->ioaccel_handle; } else { int ret = PQI_STATUS_FAILURE; /** IO for RAID Volume **/ if (devp->offload_enabled) { /** ByPass IO ,Send in AIO PATH **/ ret = pqisrc_send_scsi_cmd_raidbypass(softs, devp, rcb, raidbypass_cdb); } if (PQI_STATUS_FAILURE == ret) { /** Send in RAID PATH **/ io_path = RAID_PATH; num_op_ib_q = softs->num_op_raid_ibq; ib_q_array = softs->op_raid_ib_q; } else { rcb->cdbp = raidbypass_cdb; } } } else { /* Retrying failed AIO IO */ io_path = RAID_PATH; rcb->cdbp = OS_GET_CDBP(rcb); num_op_ib_q = softs->num_op_raid_ibq; ib_q_array = softs->op_raid_ib_q; } num_elem_needed = pqisrc_num_elem_needed(softs, OS_GET_IO_SG_COUNT(rcb)); DBG_IO("num_elem_needed :%d",num_elem_needed); do { uint32_t num_elem_available; ib_q = (ib_q_array + qindex); PQI_LOCK(&ib_q->lock); num_elem_available = pqisrc_contiguous_free_elem(ib_q->pi_local, *(ib_q->ci_virt_addr), ib_q->num_elem); DBG_IO("num_elem_avialable :%d\n",num_elem_available); if(num_elem_available >= num_elem_needed) { num_elem_alloted = num_elem_needed; break; } DBG_IO("Current queue is busy! Hop to next queue\n"); PQI_UNLOCK(&ib_q->lock); qindex = (qindex + 1) % num_op_ib_q; if(qindex == first_qindex) { if (num_elem_needed == 1) break; TraverseCount += 1; num_elem_needed = 1; } }while(TraverseCount < 2); DBG_IO("num_elem_alloted :%d",num_elem_alloted); if (num_elem_alloted == 0) { DBG_WARN("OUT: IB Queues were full\n"); return PQI_STATUS_QFULL; } pqisrc_increment_device_active_io(softs,devp); /* Get IB Queue Slot address to build IU */ ib_iu = ib_q->array_virt_addr + (ib_q->pi_local * ib_q->elem_size); if(io_path == AIO_PATH) { /** Build AIO structure **/ pqisrc_build_aio_io(softs, rcb, (pqi_aio_req_t*)ib_iu, num_elem_alloted); } else { /** Build RAID structure **/ pqisrc_build_raid_io(softs, rcb, (pqisrc_raid_req_t*)ib_iu, num_elem_alloted); } rcb->req_pending = true; rcb->req_q = ib_q; rcb->path = io_path; /* Update the local PI */ ib_q->pi_local = (ib_q->pi_local + num_elem_alloted) % ib_q->num_elem; DBG_INFO("ib_q->pi_local : %x\n", ib_q->pi_local); DBG_INFO("*ib_q->ci_virt_addr: %x\n",*(ib_q->ci_virt_addr)); /* Inform the fw about the new IU */ PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local); PQI_UNLOCK(&ib_q->lock); DBG_FUNC(" OUT "); return PQI_STATUS_SUCCESS; } /* Subroutine used to set encryption info as part of RAID bypass IO*/ static inline void pqisrc_set_enc_info(struct pqi_enc_info *enc_info, struct raid_map *raid_map, uint64_t first_block) { uint32_t volume_blk_size; /* * Set the encryption tweak values based on logical block address. * If the block size is 512, the tweak value is equal to the LBA. * For other block sizes, tweak value is (LBA * block size) / 512. */ volume_blk_size = GET_LE32((uint8_t *)&raid_map->volume_blk_size); if (volume_blk_size != 512) first_block = (first_block * volume_blk_size) / 512; enc_info->data_enc_key_index = GET_LE16((uint8_t *)&raid_map->data_encryption_key_index); enc_info->encrypt_tweak_upper = ((uint32_t)(((first_block) >> 16) >> 16)); enc_info->encrypt_tweak_lower = ((uint32_t)(first_block)); } /* Subroutine used to parse the scsi opcode and build the CDB for RAID bypass*/ int check_for_scsi_opcode(uint8_t *cdb, boolean_t *is_write, uint64_t *fst_blk, uint32_t *blk_cnt) { switch (cdb[0]) { case SCMD_WRITE_6: *is_write = true; case SCMD_READ_6: *fst_blk = (uint64_t)(((cdb[1] & 0x1F) << 16) | (cdb[2] << 8) | cdb[3]); *blk_cnt = (uint32_t)cdb[4]; if (*blk_cnt == 0) *blk_cnt = 256; break; case SCMD_WRITE_10: *is_write = true; case SCMD_READ_10: *fst_blk = (uint64_t)GET_BE32(&cdb[2]); *blk_cnt = (uint32_t)GET_BE16(&cdb[7]); break; case SCMD_WRITE_12: *is_write = true; case SCMD_READ_12: *fst_blk = (uint64_t)GET_BE32(&cdb[2]); *blk_cnt = GET_BE32(&cdb[6]); break; case SCMD_WRITE_16: *is_write = true; case SCMD_READ_16: *fst_blk = GET_BE64(&cdb[2]); *blk_cnt = GET_BE32(&cdb[10]); break; default: /* Process via normal I/O path. */ return PQI_STATUS_FAILURE; } return PQI_STATUS_SUCCESS; } /* print any arbitrary buffer of length total_len */ void pqisrc_print_buffer(pqisrc_softstate_t *softs, char *msg, void *user_buf, uint32_t total_len, uint32_t flags) { #define LINE_BUF_LEN 60 #define INDEX_PER_LINE 16 uint32_t buf_consumed = 0; int ii; char line_buf[LINE_BUF_LEN]; int line_len; /* written length per line */ uint8_t this_char; if (user_buf == NULL) return; /* Print index columns */ if (flags & PRINT_FLAG_HDR_COLUMN) { for (ii = 0, line_len = 0; ii < MIN(total_len, 16); ii++) { line_len += snprintf(line_buf + line_len, (LINE_BUF_LEN - line_len), "%02d ", ii); if ((line_len + 4) >= LINE_BUF_LEN) break; } DBG_NOTE("%15.15s:[ %s ]\n", "header", line_buf); } /* Print index columns */ while(buf_consumed < total_len) { memset(line_buf, 0, LINE_BUF_LEN); for (ii = 0, line_len = 0; ii < INDEX_PER_LINE; ii++) { this_char = *((char*)(user_buf) + buf_consumed); line_len += snprintf(line_buf + line_len, (LINE_BUF_LEN - line_len), "%02x ", this_char); buf_consumed++; if (buf_consumed >= total_len || (line_len + 4) >= LINE_BUF_LEN) break; } DBG_NOTE("%15.15s:[ %s ]\n", msg, line_buf); } } /* * Function used to build and send RAID bypass request to the adapter */ int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t *cdb) { struct raid_map *raid_map; boolean_t is_write = false; uint32_t map_idx; uint64_t fst_blk, lst_blk; uint32_t blk_cnt, blks_per_row; uint64_t fst_row, lst_row; uint32_t fst_row_offset, lst_row_offset; uint32_t fst_col, lst_col; uint32_t r5or6_blks_per_row; uint64_t r5or6_fst_row, r5or6_lst_row; uint32_t r5or6_fst_row_offset, r5or6_lst_row_offset; uint32_t r5or6_fst_col, r5or6_lst_col; uint16_t data_disks_per_row, total_disks_per_row; uint16_t layout_map_count; uint32_t stripesz; uint16_t strip_sz; uint32_t fst_grp, lst_grp, cur_grp; uint32_t map_row; uint64_t disk_block; uint32_t disk_blk_cnt; uint8_t cdb_length; int offload_to_mirror; int i; DBG_FUNC(" IN \n"); DBG_IO("!!!!!\n"); /* Check for eligible opcode, get LBA and block count. */ memcpy(cdb, OS_GET_CDBP(rcb), rcb->cmdlen); for(i = 0; i < rcb->cmdlen ; i++) DBG_IO(" CDB [ %d ] : %x\n",i,cdb[i]); if(check_for_scsi_opcode(cdb, &is_write, &fst_blk, &blk_cnt) == PQI_STATUS_FAILURE) return PQI_STATUS_FAILURE; /* Check for write to non-RAID-0. */ if (is_write && device->raid_level != SA_RAID_0) return PQI_STATUS_FAILURE; if(blk_cnt == 0) return PQI_STATUS_FAILURE; lst_blk = fst_blk + blk_cnt - 1; raid_map = device->raid_map; /* Check for invalid block or wraparound. */ if (lst_blk >= GET_LE64((uint8_t *)&raid_map->volume_blk_cnt) || lst_blk < fst_blk) return PQI_STATUS_FAILURE; data_disks_per_row = GET_LE16((uint8_t *)&raid_map->data_disks_per_row); strip_sz = GET_LE16((uint8_t *)(&raid_map->strip_size)); layout_map_count = GET_LE16((uint8_t *)(&raid_map->layout_map_count)); /* Calculate stripe information for the request. */ blks_per_row = data_disks_per_row * strip_sz; if (!blks_per_row) return PQI_STATUS_FAILURE; /*Send the IO in raid path itself, not AIO or raidbypass*/ /* use __udivdi3 ? */ fst_row = fst_blk / blks_per_row; lst_row = lst_blk / blks_per_row; fst_row_offset = (uint32_t)(fst_blk - (fst_row * blks_per_row)); lst_row_offset = (uint32_t)(lst_blk - (lst_row * blks_per_row)); fst_col = fst_row_offset / strip_sz; lst_col = lst_row_offset / strip_sz; /* If this isn't a single row/column then give to the controller. */ if (fst_row != lst_row || fst_col != lst_col) return PQI_STATUS_FAILURE; /* Proceeding with driver mapping. */ total_disks_per_row = data_disks_per_row + GET_LE16((uint8_t *)(&raid_map->metadata_disks_per_row)); map_row = ((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) % GET_LE16((uint8_t *)(&raid_map->row_cnt)); map_idx = (map_row * total_disks_per_row) + fst_col; /* RAID 1 */ if (device->raid_level == SA_RAID_1) { if (device->offload_to_mirror) map_idx += data_disks_per_row; device->offload_to_mirror = !device->offload_to_mirror; } else if (device->raid_level == SA_RAID_ADM) { /* RAID ADM */ /* * Handles N-way mirrors (R1-ADM) and R10 with # of drives * divisible by 3. */ offload_to_mirror = device->offload_to_mirror; if (offload_to_mirror == 0) { /* use physical disk in the first mirrored group. */ map_idx %= data_disks_per_row; } else { do { /* * Determine mirror group that map_idx * indicates. */ cur_grp = map_idx / data_disks_per_row; if (offload_to_mirror != cur_grp) { if (cur_grp < layout_map_count - 1) { /* * Select raid index from * next group. */ map_idx += data_disks_per_row; cur_grp++; } else { /* * Select raid index from first * group. */ map_idx %= data_disks_per_row; cur_grp = 0; } } } while (offload_to_mirror != cur_grp); } /* Set mirror group to use next time. */ offload_to_mirror = (offload_to_mirror >= layout_map_count - 1) ? 0 : offload_to_mirror + 1; if(offload_to_mirror >= layout_map_count) return PQI_STATUS_FAILURE; device->offload_to_mirror = offload_to_mirror; /* * Avoid direct use of device->offload_to_mirror within this * function since multiple threads might simultaneously * increment it beyond the range of device->layout_map_count -1. */ } else if ((device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_6) && layout_map_count > 1) { /* RAID 50/60 */ /* Verify first and last block are in same RAID group */ r5or6_blks_per_row = strip_sz * data_disks_per_row; stripesz = r5or6_blks_per_row * layout_map_count; fst_grp = (fst_blk % stripesz) / r5or6_blks_per_row; lst_grp = (lst_blk % stripesz) / r5or6_blks_per_row; if (fst_grp != lst_grp) return PQI_STATUS_FAILURE; /* Verify request is in a single row of RAID 5/6 */ fst_row = r5or6_fst_row = fst_blk / stripesz; r5or6_lst_row = lst_blk / stripesz; if (r5or6_fst_row != r5or6_lst_row) return PQI_STATUS_FAILURE; /* Verify request is in a single column */ fst_row_offset = r5or6_fst_row_offset = (uint32_t)((fst_blk % stripesz) % r5or6_blks_per_row); r5or6_lst_row_offset = (uint32_t)((lst_blk % stripesz) % r5or6_blks_per_row); fst_col = r5or6_fst_row_offset / strip_sz; r5or6_fst_col = fst_col; r5or6_lst_col = r5or6_lst_row_offset / strip_sz; if (r5or6_fst_col != r5or6_lst_col) return PQI_STATUS_FAILURE; /* Request is eligible */ map_row = ((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) % GET_LE16((uint8_t *)(&raid_map->row_cnt)); map_idx = (fst_grp * (GET_LE16((uint8_t *)(&raid_map->row_cnt)) * total_disks_per_row)) + (map_row * total_disks_per_row) + fst_col; } rcb->ioaccel_handle = raid_map->dev_data[map_idx].ioaccel_handle; disk_block = GET_LE64((uint8_t *)(&raid_map->disk_starting_blk)) + fst_row * strip_sz + (fst_row_offset - fst_col * strip_sz); disk_blk_cnt = blk_cnt; /* Handle differing logical/physical block sizes. */ if (raid_map->phys_blk_shift) { disk_block <<= raid_map->phys_blk_shift; disk_blk_cnt <<= raid_map->phys_blk_shift; } if (disk_blk_cnt > 0xffff) return PQI_STATUS_FAILURE; /* Build the new CDB for the physical disk I/O. */ if (disk_block > 0xffffffff) { cdb[0] = is_write ? SCMD_WRITE_16 : SCMD_READ_16; cdb[1] = 0; PUT_BE64(disk_block, &cdb[2]); PUT_BE32(disk_blk_cnt, &cdb[10]); cdb[14] = 0; cdb[15] = 0; cdb_length = 16; } else { cdb[0] = is_write ? SCMD_WRITE_10 : SCMD_READ_10; cdb[1] = 0; PUT_BE32(disk_block, &cdb[2]); cdb[6] = 0; PUT_BE16(disk_blk_cnt, &cdb[7]); cdb[9] = 0; cdb_length = 10; } if (GET_LE16((uint8_t *)(&raid_map->flags)) & RAID_MAP_ENCRYPTION_ENABLED) { pqisrc_set_enc_info(&rcb->enc_info, raid_map, fst_blk); rcb->encrypt_enable = true; } else { rcb->encrypt_enable = false; } rcb->cmdlen = cdb_length; DBG_FUNC("OUT"); return PQI_STATUS_SUCCESS; } /* Function used to submit an AIO TMF to the adapter * DEVICE_RESET is not supported. */ static int pqisrc_send_aio_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp, rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type) { int rval = PQI_STATUS_SUCCESS; pqi_aio_tmf_req_t tmf_req; ib_queue_t *op_ib_q = NULL; memset(&tmf_req, 0, sizeof(pqi_aio_tmf_req_t)); DBG_FUNC("IN"); tmf_req.header.iu_type = PQI_REQUEST_IU_AIO_TASK_MANAGEMENT; tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t); tmf_req.req_id = rcb->tag; tmf_req.error_idx = rcb->tag; tmf_req.nexus = devp->ioaccel_handle; //memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun)); tmf_req.tmf = tmf_type; tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb); op_ib_q = &softs->op_aio_ib_q[0]; if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) { tmf_req.req_id_to_manage = rcb_to_manage->tag; tmf_req.nexus = rcb_to_manage->ioaccel_handle; } DBG_INFO("tmf_req.header.iu_type : %x tmf_req.req_id_to_manage :%d \n",tmf_req.header.iu_type,tmf_req.req_id_to_manage); DBG_INFO("tmf_req.req_id : %d tmf_req.nexus : %x tmf_req.tmf %x QID : %d\n",tmf_req.req_id,tmf_req.nexus,tmf_req.tmf,op_ib_q->q_id); + DBG_WARN("aio tmf: iu_type=0x%x req_id_to_manage=0x%x\n", + tmf_req.header.iu_type, tmf_req.req_id_to_manage); + DBG_WARN("aio tmf: req_id=0x%x nexus=0x%x tmf=0x%x QID=%d\n", + tmf_req.req_id, tmf_req.nexus, tmf_req.tmf, op_ib_q->q_id); + + rcb->path = AIO_PATH; rcb->req_pending = true; /* Timedout tmf response goes here */ rcb->error_cmp_callback = pqisrc_process_aio_response_error; rval = pqisrc_submit_cmnd(softs, op_ib_q, &tmf_req); if (rval != PQI_STATUS_SUCCESS) { DBG_ERR("Unable to submit command rval=%d\n", rval); return rval; } rval = pqisrc_wait_on_condition(softs, rcb, PQISRC_TMF_TIMEOUT); if (rval != PQI_STATUS_SUCCESS){ DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type); rcb->status = rval; } if (rcb->status != REQUEST_SUCCESS) { DBG_ERR_BTL(devp, "Task Management failed tmf_type:%d " "stat:0x%x\n", tmf_type, rcb->status); rval = PQI_STATUS_FAILURE; } DBG_FUNC("OUT"); return rval; } /* Function used to submit a Raid TMF to the adapter */ static int pqisrc_send_raid_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp, rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type) { int rval = PQI_STATUS_SUCCESS; pqi_raid_tmf_req_t tmf_req; ib_queue_t *op_ib_q = NULL; memset(&tmf_req, 0, sizeof(pqi_raid_tmf_req_t)); DBG_FUNC("IN"); tmf_req.header.iu_type = PQI_REQUEST_IU_RAID_TASK_MANAGEMENT; tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t); tmf_req.req_id = rcb->tag; memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun)); tmf_req.tmf = tmf_type; tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb); /* Decide the queue where the tmf request should be submitted */ if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) { tmf_req.obq_id_to_manage = rcb_to_manage->resp_qid; tmf_req.req_id_to_manage = rcb_to_manage->tag; } if (softs->timeout_in_tmf && tmf_type == SOP_TASK_MANAGEMENT_LUN_RESET) { /* OS_TMF_TIMEOUT_SEC - 1 to accommodate driver processing */ tmf_req.timeout_in_sec = OS_TMF_TIMEOUT_SEC - 1; /* if OS tmf timeout is 0, set minimum value for timeout */ if (!tmf_req.timeout_in_sec) tmf_req.timeout_in_sec = 1; } op_ib_q = &softs->op_raid_ib_q[0]; + rcb->path = RAID_PATH; rcb->req_pending = true; /* Timedout tmf response goes here */ rcb->error_cmp_callback = pqisrc_process_raid_response_error; rval = pqisrc_submit_cmnd(softs, op_ib_q, &tmf_req); if (rval != PQI_STATUS_SUCCESS) { DBG_ERR("Unable to submit command rval=%d\n", rval); return rval; } rval = pqisrc_wait_on_condition(softs, rcb, PQISRC_TMF_TIMEOUT); if (rval != PQI_STATUS_SUCCESS) { DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type); rcb->status = rval; } if (rcb->status != REQUEST_SUCCESS) { DBG_NOTE("Task Management failed tmf_type:%d " "stat:0x%x\n", tmf_type, rcb->status); rval = PQI_STATUS_FAILURE; } DBG_FUNC("OUT"); return rval; } int pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp, rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type) { int ret = PQI_STATUS_SUCCESS; DBG_FUNC("IN"); + rcb->softs = softs; if(!devp->is_physical_device) { if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) { if(rcb_to_manage->path == AIO_PATH) { if(devp->offload_enabled) ret = pqisrc_send_aio_tmf(softs, devp, rcb, rcb_to_manage, tmf_type); } else { DBG_INFO("TASK ABORT not supported in raid\n"); ret = PQI_STATUS_FAILURE; } } else { ret = pqisrc_send_raid_tmf(softs, devp, rcb, rcb_to_manage, tmf_type); } } else { if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) ret = pqisrc_send_aio_tmf(softs, devp, rcb, rcb_to_manage, tmf_type); else ret = pqisrc_send_raid_tmf(softs, devp, rcb, rcb_to_manage, tmf_type); } DBG_FUNC("IN"); return ret; } /* * Function used to build and send the vendor general request * Used for configuring PQI feature bits between firmware and driver */ int pqisrc_build_send_vendor_request( pqisrc_softstate_t *softs, pqi_vendor_general_request_t *request, raid_path_error_info_elem_t *error_info) { int ret = PQI_STATUS_SUCCESS; ib_queue_t *op_ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE]; ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE]; rcb_t *rcb = NULL; uint16_t request_id = 0; /* Get the tag */ request_id = pqisrc_get_tag(&softs->taglist); if (INVALID_ELEM == request_id) { DBG_ERR("Tag not available\n"); ret = PQI_STATUS_FAILURE; goto err_notag; } ((pqi_vendor_general_request_t *)request)->request_id = request_id; ((pqi_vendor_general_request_t *)request)->response_queue_id = ob_q->q_id; rcb = &softs->rcb[request_id]; rcb->req_pending = true; rcb->tag = request_id; ret = pqisrc_submit_cmnd(softs, op_ib_q, request); if (ret != PQI_STATUS_SUCCESS) { DBG_ERR("Unable to submit command\n"); goto err_out; } ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT); if (ret != PQI_STATUS_SUCCESS) { DBG_ERR("Management request timed out!\n"); goto err_out; } ret = rcb->status; if (ret) { ret = PQI_STATUS_FAILURE; if(error_info) { // TODO: config table err handling. } } else { if(error_info) { ret = PQI_STATUS_SUCCESS; memset(error_info, 0, sizeof(*error_info)); } } os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, ((pqi_vendor_general_request_t *)request)->request_id); DBG_FUNC("OUT\n"); return ret; err_out: DBG_ERR("Vender general request submission failed.\n"); os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, ((pqi_vendor_general_request_t *)request)->request_id); err_notag: DBG_FUNC("FAILED \n"); return ret; } /* return the path as ASCII-string */ char * io_path_to_ascii(IO_PATH_T path) { switch (path) { case AIO_PATH: return "Aio"; case RAID_PATH: return "Raid"; default: return "Unknown"; } }