Index: head/sys/dev/smartpqi/smartpqi_cam.c =================================================================== --- head/sys/dev/smartpqi/smartpqi_cam.c (revision 336267) +++ head/sys/dev/smartpqi/smartpqi_cam.c (revision 336268) @@ -1,1204 +1,1200 @@ /*- * Copyright (c) 2018 Microsemi Corporation. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ /* * CAM interface for smartpqi driver */ #include "smartpqi_includes.h" /* * Set cam sim properties of the smartpqi adapter. */ static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi) { pqisrc_softstate_t *softs = (struct pqisrc_softstate *) cam_sim_softc(sim); DBG_FUNC("IN\n"); cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; cpi->hba_eng_cnt = 0; cpi->max_lun = PQI_MAX_MULTILUN; cpi->max_target = 1088; cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE; cpi->initiator_id = 255; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Microsemi", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */ cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC4; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->ccb_h.status = CAM_REQ_CMP; DBG_FUNC("OUT\n"); } /* * Get transport settings of the smartpqi adapter */ static void get_transport_settings(struct pqisrc_softstate *softs, struct ccb_trans_settings *cts) { struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; DBG_FUNC("IN\n"); cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC4; cts->transport = XPORT_SPI; cts->transport_version = 2; spi->valid = CTS_SPI_VALID_DISC; spi->flags = CTS_SPI_FLAGS_DISC_ENB; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; sas->valid = CTS_SAS_VALID_SPEED; cts->ccb_h.status = CAM_REQ_CMP; DBG_FUNC("OUT\n"); } /* * Add the target to CAM layer and rescan, when a new device is found */ void os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { union ccb *ccb; DBG_FUNC("IN\n"); if(softs->os_specific.sim_registered) { if ((ccb = xpt_alloc_ccb_nowait()) == NULL) { DBG_ERR("rescan failed (can't allocate CCB)\n"); return; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(softs->os_specific.sim), device->target, device->lun) != CAM_REQ_CMP) { DBG_ERR("rescan failed (can't create path)\n"); xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } DBG_FUNC("OUT\n"); } /* * Remove the device from CAM layer when deleted or hot removed */ void os_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { struct cam_path *tmppath; DBG_FUNC("IN\n"); if(softs->os_specific.sim_registered) { if (xpt_create_path(&tmppath, NULL, cam_sim_path(softs->os_specific.sim), device->target, device->lun) != CAM_REQ_CMP) { DBG_ERR("unable to create path for async event"); return; } xpt_async(AC_LOST_DEVICE, tmppath, NULL); xpt_free_path(tmppath); pqisrc_free_device(softs, device); } DBG_FUNC("OUT\n"); } /* * Function to release the frozen simq */ static void pqi_release_camq( rcb_t *rcb ) { pqisrc_softstate_t *softs; struct ccb_scsiio *csio; csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; softs = rcb->softs; DBG_FUNC("IN\n"); if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) { softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY; if (csio->ccb_h.status & CAM_RELEASE_SIMQ) xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0); else csio->ccb_h.status |= CAM_RELEASE_SIMQ; } DBG_FUNC("OUT\n"); } /* * Function to dma-unmap the completed request */ static void pqi_unmap_request(void *arg) { pqisrc_softstate_t *softs; rcb_t *rcb; DBG_IO("IN rcb = %p\n", arg); rcb = (rcb_t *)arg; softs = rcb->softs; if (!(rcb->cm_flags & PQI_CMD_MAPPED)) return; if (rcb->bcount != 0 ) { if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE) bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, rcb->cm_datamap, BUS_DMASYNC_POSTREAD); if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE) bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, rcb->cm_datamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat, rcb->cm_datamap); } rcb->cm_flags &= ~PQI_CMD_MAPPED; if(rcb->sgt && rcb->nseg) os_mem_free(rcb->softs, (void*)rcb->sgt, rcb->nseg*sizeof(sgt_t)); pqisrc_put_tag(&softs->taglist, rcb->tag); DBG_IO("OUT\n"); } /* * Construct meaningful LD name for volume here. */ static void smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio) { struct scsi_inquiry_data *inq = NULL; uint8_t *cdb = NULL; pqi_scsi_dev_t *device = NULL; DBG_FUNC("IN\n"); cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ? (uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes; if(cdb[0] == INQUIRY && (cdb[1] & SI_EVPD) == 0 && (csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN && csio->dxfer_len >= SHORT_INQUIRY_LENGTH) { inq = (struct scsi_inquiry_data *)csio->data_ptr; device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun]; /* Let the disks be probed and dealt with via CAM. Only for LD let it fall through and inquiry be tweaked */ if( !device || !pqisrc_is_logical_device(device) || (device->devtype != DISK_DEVICE) || pqisrc_is_external_raid_device(device)) { return; } strncpy(inq->vendor, "MSCC", SID_VENDOR_SIZE); strncpy(inq->product, pqisrc_raidlevel_to_string(device->raid_level), SID_PRODUCT_SIZE); strncpy(inq->revision, device->volume_offline?"OFF":"OK", SID_REVISION_SIZE); } DBG_FUNC("OUT\n"); } /* * Handle completion of a command - pass results back through the CCB */ void os_io_response_success(rcb_t *rcb) { struct ccb_scsiio *csio; DBG_IO("IN rcb = %p\n", rcb); if (rcb == NULL) panic("rcb is null"); csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; if (csio == NULL) panic("csio is null"); rcb->status = REQUEST_SUCCESS; csio->ccb_h.status = CAM_REQ_CMP; smartpqi_fix_ld_inquiry(rcb->softs, csio); pqi_release_camq(rcb); pqi_unmap_request(rcb); xpt_done((union ccb *)csio); DBG_IO("OUT\n"); } /* * Error response handling for raid IO */ void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info) { struct ccb_scsiio *csio; pqisrc_softstate_t *softs; DBG_IO("IN\n"); csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; if (csio == NULL) panic("csio is null"); softs = rcb->softs; ASSERT(err_info != NULL); csio->scsi_status = err_info->status; csio->ccb_h.status = CAM_REQ_CMP_ERR; if (csio->ccb_h.func_code == XPT_SCSI_IO) { /* * Handle specific SCSI status values. */ switch(csio->scsi_status) { case PQI_RAID_STATUS_QUEUE_FULL: csio->ccb_h.status = CAM_REQ_CMP; DBG_ERR("Queue Full error"); break; /* check condition, sense data included */ case PQI_RAID_STATUS_CHECK_CONDITION: { uint16_t sense_data_len = LE_16(err_info->sense_data_len); uint8_t *sense_data = NULL; if (sense_data_len) sense_data = err_info->data; memset(&csio->sense_data, 0, csio->sense_len); sense_data_len = (sense_data_len > csio->sense_len) ? csio->sense_len : sense_data_len; if (sense_data) memcpy(&csio->sense_data, sense_data, sense_data_len); if (csio->sense_len > sense_data_len) csio->sense_resid = csio->sense_len - sense_data_len; else csio->sense_resid = 0; csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID | CAM_REQ_CMP_ERR; } break; case PQI_RAID_DATA_IN_OUT_UNDERFLOW: { uint32_t resid = 0; resid = rcb->bcount-err_info->data_out_transferred; csio->resid = resid; csio->ccb_h.status = CAM_REQ_CMP; break; } default: csio->ccb_h.status = CAM_REQ_CMP; break; } } if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) { softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY; if (csio->ccb_h.status & CAM_RELEASE_SIMQ) xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0); else csio->ccb_h.status |= CAM_RELEASE_SIMQ; } pqi_unmap_request(rcb); xpt_done((union ccb *)csio); DBG_IO("OUT\n"); } /* * Error response handling for aio. */ void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info) { struct ccb_scsiio *csio; pqisrc_softstate_t *softs; DBG_IO("IN\n"); if (rcb == NULL) panic("rcb is null"); rcb->status = REQUEST_SUCCESS; csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; if (csio == NULL) panic("csio is null"); softs = rcb->softs; switch (err_info->service_resp) { case PQI_AIO_SERV_RESPONSE_COMPLETE: csio->ccb_h.status = err_info->status; break; case PQI_AIO_SERV_RESPONSE_FAILURE: switch(err_info->status) { case PQI_AIO_STATUS_IO_ABORTED: csio->ccb_h.status = CAM_REQ_ABORTED; DBG_WARN_BTL(rcb->dvp, "IO aborted\n"); break; case PQI_AIO_STATUS_UNDERRUN: csio->ccb_h.status = CAM_REQ_CMP; csio->resid = LE_32(err_info->resd_count); break; case PQI_AIO_STATUS_OVERRUN: csio->ccb_h.status = CAM_REQ_CMP; break; case PQI_AIO_STATUS_AIO_PATH_DISABLED: DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n"); rcb->dvp->offload_enabled = false; csio->ccb_h.status |= CAM_REQUEUE_REQ; break; case PQI_AIO_STATUS_IO_ERROR: case PQI_AIO_STATUS_IO_NO_DEVICE: case PQI_AIO_STATUS_INVALID_DEVICE: default: DBG_WARN_BTL(rcb->dvp,"IO Error/Invalid/No device\n"); csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; break; } break; case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: csio->ccb_h.status = CAM_REQ_CMP; break; case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: DBG_WARN_BTL(rcb->dvp,"TMF rejected/Incorrect Lun\n"); csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; break; default: DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n"); csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; break; } if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) { csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION; uint8_t *sense_data = NULL; unsigned sense_data_len = LE_16(err_info->data_len); if (sense_data_len) sense_data = err_info->data; DBG_ERR_BTL(rcb->dvp, "SCSI_STATUS_CHECK_COND sense size %u\n", sense_data_len); memset(&csio->sense_data, 0, csio->sense_len); if (sense_data) memcpy(&csio->sense_data, sense_data, ((sense_data_len > csio->sense_len) ? csio->sense_len : sense_data_len)); if (csio->sense_len > sense_data_len) csio->sense_resid = csio->sense_len - sense_data_len; else csio->sense_resid = 0; csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; } smartpqi_fix_ld_inquiry(softs, csio); pqi_release_camq(rcb); pqi_unmap_request(rcb); xpt_done((union ccb *)csio); DBG_IO("OUT\n"); } /* * Command-mapping helper function - populate this command's s/g table. */ static void pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { pqisrc_softstate_t *softs; rcb_t *rcb; rcb = (rcb_t *)arg; softs = rcb->softs; if( error || nseg > softs->pqi_cap.max_sg_elem ) { xpt_freeze_simq(softs->os_specific.sim, 1); rcb->cm_ccb->ccb_h.status |= (CAM_REQUEUE_REQ| CAM_RELEASE_SIMQ); DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n", error, nseg, softs->pqi_cap.max_sg_elem); pqi_unmap_request(rcb); xpt_done((union ccb *)rcb->cm_ccb); return; } rcb->sgt = os_mem_alloc(softs, nseg * sizeof(rcb_t)); rcb->nseg = nseg; if (rcb->sgt != NULL) { for (int i = 0; i < nseg; i++) { rcb->sgt[i].addr = segs[i].ds_addr; rcb->sgt[i].len = segs[i].ds_len; rcb->sgt[i].flags = 0; } } if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE) bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, rcb->cm_datamap, BUS_DMASYNC_PREREAD); if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE) bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, rcb->cm_datamap, BUS_DMASYNC_PREWRITE); /* Call IO functions depending on pd or ld */ rcb->status = REQUEST_PENDING; error = pqisrc_build_send_io(softs, rcb); if (error) { rcb->req_pending = false; xpt_freeze_simq(softs->os_specific.sim, 1); rcb->cm_ccb->ccb_h.status |= (CAM_REQUEUE_REQ |CAM_RELEASE_SIMQ); DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error); pqi_unmap_request(rcb); xpt_done((union ccb *)rcb->cm_ccb); return; } } /* * Function to dma-map the request buffer */ static int pqi_map_request( rcb_t *rcb ) { pqisrc_softstate_t *softs = rcb->softs; int error = PQI_STATUS_SUCCESS; union ccb *ccb = rcb->cm_ccb; DBG_FUNC("IN\n"); /* check that mapping is necessary */ if (rcb->cm_flags & PQI_CMD_MAPPED) return(0); rcb->cm_flags |= PQI_CMD_MAPPED; if (rcb->bcount) { error = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat, rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0); if (error != 0){ DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed = %d count = %d\n", error, rcb->bcount); return error; } } else { /* * Set up the command to go to the controller. If there are no * data buffers associated with the command then it can bypass * busdma. */ /* Call IO functions depending on pd or ld */ rcb->status = REQUEST_PENDING; error = pqisrc_build_send_io(softs, rcb); } DBG_FUNC("OUT error = %d\n", error); return error; } /* * Function to clear the request control block */ void os_reset_rcb( rcb_t *rcb ) { rcb->error_info = NULL; rcb->req = NULL; rcb->status = -1; rcb->tag = INVALID_ELEM; rcb->dvp = NULL; rcb->cdbp = NULL; rcb->softs = NULL; rcb->cm_flags = 0; rcb->cm_data = NULL; rcb->bcount = 0; rcb->nseg = 0; rcb->sgt = NULL; rcb->cm_ccb = NULL; rcb->encrypt_enable = false; rcb->ioaccel_handle = 0; rcb->resp_qid = 0; rcb->req_pending = false; } /* * Callback function for the lun rescan */ static void smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb) { xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); } /* * Function to rescan the lun */ static void smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target, int lun) { union ccb *ccb = NULL; cam_status status = 0; struct cam_path *path = NULL; DBG_FUNC("IN\n"); ccb = xpt_alloc_ccb_nowait(); status = xpt_create_path(&path, NULL, cam_sim_path(softs->os_specific.sim), target, lun); if (status != CAM_REQ_CMP) { DBG_ERR("xpt_create_path status(%d) != CAM_REQ_CMP \n", status); xpt_free_ccb(ccb); return; } bzero(ccb, sizeof(union ccb)); xpt_setup_ccb(&ccb->ccb_h, path, 5); ccb->ccb_h.func_code = XPT_SCAN_LUN; ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb; ccb->crcn.flags = CAM_FLAG_NONE; xpt_action(ccb); DBG_FUNC("OUT\n"); } /* * Function to rescan the lun under each target */ void smartpqi_target_rescan(struct pqisrc_softstate *softs) { int target = 0, lun = 0; DBG_FUNC("IN\n"); for(target = 0; target < PQI_MAX_DEVICES; target++){ for(lun = 0; lun < PQI_MAX_MULTILUN; lun++){ if(softs->device_list[target][lun]){ smartpqi_lun_rescan(softs, target, lun); } } } DBG_FUNC("OUT\n"); } /* * Set the mode of tagged command queueing for the current task. */ uint8_t os_get_task_attr(rcb_t *rcb) { union ccb *ccb = rcb->cm_ccb; uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE; switch(ccb->csio.tag_action) { case MSG_HEAD_OF_Q_TAG: tag_action = SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE; break; case MSG_ORDERED_Q_TAG: tag_action = SOP_TASK_ATTRIBUTE_ORDERED; break; case MSG_SIMPLE_Q_TAG: default: tag_action = SOP_TASK_ATTRIBUTE_SIMPLE; break; } return tag_action; } /* * Complete all outstanding commands */ void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs) { int tag = 0; DBG_FUNC("IN\n"); for (tag = 1; tag < softs->max_outstanding_io; tag++) { rcb_t *prcb = &softs->rcb[tag]; if(prcb->req_pending && prcb->cm_ccb ) { prcb->req_pending = false; prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP; xpt_done((union ccb *)prcb->cm_ccb); prcb->cm_ccb = NULL; } } DBG_FUNC("OUT\n"); } /* * IO handling functionality entry point */ static int pqisrc_io_start(struct cam_sim *sim, union ccb *ccb) { rcb_t *rcb; uint32_t tag, no_transfer = 0; pqisrc_softstate_t *softs = (struct pqisrc_softstate *) cam_sim_softc(sim); int32_t error = PQI_STATUS_FAILURE; pqi_scsi_dev_t *dvp; DBG_FUNC("IN\n"); if( softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL ) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; DBG_INFO("Device = %d not there\n", ccb->ccb_h.target_id); return PQI_STATUS_FAILURE; } dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; /* Check controller state */ if (IN_PQI_RESET(softs)) { ccb->ccb_h.status = CAM_SCSI_BUS_RESET | CAM_BUSY | CAM_REQ_INPROG; DBG_WARN("Device = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id); return error; } /* Check device state */ if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) { ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP; DBG_WARN("Device = %d GONE/OFFLINE\n", ccb->ccb_h.target_id); return error; } /* Check device reset */ if (DEV_RESET(dvp)) { ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY; DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id); return error; } if (dvp->expose_device == false) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; DBG_INFO("Device = %d not exposed\n", ccb->ccb_h.target_id); return error; } tag = pqisrc_get_tag(&softs->taglist); if( tag == INVALID_ELEM ) { DBG_ERR("Get Tag failed\n"); xpt_freeze_simq(softs->os_specific.sim, 1); softs->os_specific.pqi_flags |= PQI_FLAG_BUSY; ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ); return PQI_STATUS_FAILURE; } DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist); rcb = &softs->rcb[tag]; os_reset_rcb( rcb ); rcb->tag = tag; rcb->softs = softs; rcb->cmdlen = ccb->csio.cdb_len; ccb->ccb_h.sim_priv.entries[0].ptr = rcb; switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: rcb->data_dir = SOP_DATA_DIR_FROM_DEVICE; break; case CAM_DIR_OUT: rcb->data_dir = SOP_DATA_DIR_TO_DEVICE; break; case CAM_DIR_NONE: no_transfer = 1; break; default: DBG_ERR("Unknown Dir\n"); break; } rcb->cm_ccb = ccb; rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; if (!no_transfer) { rcb->cm_data = (void *)ccb->csio.data_ptr; rcb->bcount = ccb->csio.dxfer_len; } else { rcb->cm_data = NULL; rcb->bcount = 0; } /* * Submit the request to the adapter. * * Note that this may fail if we're unable to map the request (and * if we ever learn a transport layer other than simple, may fail * if the adapter rejects the command). */ if ((error = pqi_map_request(rcb)) != 0) { rcb->req_pending = false; xpt_freeze_simq(softs->os_specific.sim, 1); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; if (error == EINPROGRESS) { DBG_WARN("In Progress on %d\n", ccb->ccb_h.target_id); error = 0; } else { ccb->ccb_h.status |= CAM_REQUEUE_REQ; DBG_WARN("Requeue req error = %d target = %d\n", error, ccb->ccb_h.target_id); pqi_unmap_request(rcb); } } DBG_FUNC("OUT error = %d\n", error); return error; } /* * Abort a task, task management functionality */ static int pqisrc_scsi_abort_task(pqisrc_softstate_t *softs, union ccb *ccb) { rcb_t *rcb = ccb->ccb_h.sim_priv.entries[0].ptr; uint32_t abort_tag = rcb->tag; uint32_t tag = 0; int rval = PQI_STATUS_SUCCESS; uint16_t qid; DBG_FUNC("IN\n"); qid = (uint16_t)rcb->resp_qid; tag = pqisrc_get_tag(&softs->taglist); rcb = &softs->rcb[tag]; rcb->tag = tag; rcb->resp_qid = qid; rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, abort_tag, SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK); if (PQI_STATUS_SUCCESS == rval) { rval = rcb->status; if (REQUEST_SUCCESS == rval) { ccb->ccb_h.status = CAM_REQ_ABORTED; } } pqisrc_put_tag(&softs->taglist, abort_tag); pqisrc_put_tag(&softs->taglist,rcb->tag); DBG_FUNC("OUT rval = %d\n", rval); return rval; } /* * Abort a taskset, task management functionality */ static int pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb) { rcb_t *rcb = NULL; uint32_t tag = 0; int rval = PQI_STATUS_SUCCESS; DBG_FUNC("IN\n"); tag = pqisrc_get_tag(&softs->taglist); rcb = &softs->rcb[tag]; rcb->tag = tag; rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, 0, SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET); if (rval == PQI_STATUS_SUCCESS) { rval = rcb->status; } pqisrc_put_tag(&softs->taglist,rcb->tag); DBG_FUNC("OUT rval = %d\n", rval); return rval; } /* * Target reset task management functionality */ static int pqisrc_target_reset( pqisrc_softstate_t *softs, union ccb *ccb) { pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; rcb_t *rcb = NULL; uint32_t tag = 0; int rval = PQI_STATUS_SUCCESS; DBG_FUNC("IN\n"); if (devp == NULL) { DBG_ERR("bad target t%d\n", ccb->ccb_h.target_id); return (-1); } tag = pqisrc_get_tag(&softs->taglist); rcb = &softs->rcb[tag]; rcb->tag = tag; devp->reset_in_progress = true; rval = pqisrc_send_tmf(softs, devp, rcb, 0, SOP_TASK_MANAGEMENT_LUN_RESET); if (PQI_STATUS_SUCCESS == rval) { rval = rcb->status; } devp->reset_in_progress = false; pqisrc_put_tag(&softs->taglist,rcb->tag); DBG_FUNC("OUT rval = %d\n", rval); return ((rval == REQUEST_SUCCESS) ? PQI_STATUS_SUCCESS : PQI_STATUS_FAILURE); } /* * cam entry point of the smartpqi module. */ static void smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb) { struct pqisrc_softstate *softs = cam_sim_softc(sim); struct ccb_hdr *ccb_h = &ccb->ccb_h; DBG_FUNC("IN\n"); switch (ccb_h->func_code) { case XPT_SCSI_IO: { if(!pqisrc_io_start(sim, ccb)) { return; } break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; ccg = &ccb->ccg; if (ccg->block_size == 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status = CAM_REQ_INVALID; break; } cam_calc_geometry(ccg, /* extended */ 1); ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_PATH_INQ: { update_sim_properties(sim, &ccb->cpi); ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: get_transport_settings(softs, &ccb->cts); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_ABORT: if(pqisrc_scsi_abort_task(softs, ccb)) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); DBG_ERR("Abort task failed on %d\n", ccb->ccb_h.target_id); return; } break; case XPT_TERM_IO: if (pqisrc_scsi_abort_task_set(softs, ccb)) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; DBG_ERR("Abort task set failed on %d\n", ccb->ccb_h.target_id); xpt_done(ccb); return; } break; case XPT_RESET_DEV: if(pqisrc_target_reset(softs, ccb)) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; DBG_ERR("Target reset failed on %d\n", ccb->ccb_h.target_id); xpt_done(ccb); return; } else { ccb->ccb_h.status = CAM_REQ_CMP; } break; case XPT_RESET_BUS: ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; return; default: DBG_WARN("UNSUPPORTED FUNC CODE\n"); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; } xpt_done(ccb); DBG_FUNC("OUT\n"); } /* * Function to poll the response, when interrupts are unavailable * This also serves supporting crash dump. */ static void smartpqi_poll(struct cam_sim *sim) { struct pqisrc_softstate *softs = cam_sim_softc(sim); int i; for (i = 1; i < softs->intr_count; i++ ) pqisrc_process_response_queue(softs, i); } /* * Function to adjust the queue depth of a device */ void smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth) { struct ccb_relsim crs; DBG_INFO("IN\n"); xpt_setup_ccb(&crs.ccb_h, path, 5); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.ccb_h.flags = CAM_DEV_QFREEZE; crs.release_flags = RELSIM_ADJUST_OPENINGS; crs.openings = queue_depth; xpt_action((union ccb *)&crs); if(crs.ccb_h.status != CAM_REQ_CMP) { printf("XPT_REL_SIMQ failed stat=%d\n", crs.ccb_h.status); } DBG_INFO("OUT\n"); } /* * Function to register async callback for setting queue depth */ static void smartpqi_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct pqisrc_softstate *softs; softs = (struct pqisrc_softstate*)callback_arg; DBG_FUNC("IN\n"); switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { break; } uint32_t t_id = cgd->ccb_h.target_id; if (t_id <= (PQI_CTLR_INDEX - 1)) { if (softs != NULL) { pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun]; smartpqi_adjust_queue_depth(path, dvp->queue_depth); } } break; } default: break; } DBG_FUNC("OUT\n"); } /* * Function to register sim with CAM layer for smartpqi driver */ int register_sim(struct pqisrc_softstate *softs, int card_index) { int error = 0; int max_transactions; union ccb *ccb = NULL; cam_status status = 0; struct ccb_setasync csa; struct cam_sim *sim; DBG_FUNC("IN\n"); max_transactions = softs->max_io_for_scsi_ml; softs->os_specific.devq = cam_simq_alloc(max_transactions); if (softs->os_specific.devq == NULL) { DBG_ERR("cam_simq_alloc failed txns = %d\n", max_transactions); return PQI_STATUS_FAILURE; } sim = cam_sim_alloc(smartpqi_cam_action, \ smartpqi_poll, "smartpqi", softs, \ card_index, &softs->os_specific.cam_lock, \ 1, max_transactions, softs->os_specific.devq); if (sim == NULL) { DBG_ERR("cam_sim_alloc failed txns = %d\n", max_transactions); cam_simq_free(softs->os_specific.devq); return PQI_STATUS_FAILURE; } softs->os_specific.sim = sim; mtx_lock(&softs->os_specific.cam_lock); status = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0); if (status != CAM_SUCCESS) { DBG_ERR("xpt_bus_register failed status=%d\n", status); cam_sim_free(softs->os_specific.sim, FALSE); cam_simq_free(softs->os_specific.devq); mtx_unlock(&softs->os_specific.cam_lock); return PQI_STATUS_FAILURE; } softs->os_specific.sim_registered = TRUE; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { DBG_ERR("xpt_create_path failed\n"); return PQI_STATUS_FAILURE; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(softs->os_specific.sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { DBG_ERR("xpt_create_path failed\n"); xpt_free_ccb(ccb); xpt_bus_deregister(cam_sim_path(softs->os_specific.sim)); cam_sim_free(softs->os_specific.sim, TRUE); mtx_unlock(&softs->os_specific.cam_lock); return PQI_STATUS_FAILURE; } /* * Callback to set the queue depth per target which is * derived from the FW. */ softs->os_specific.path = ccb->ccb_h.path; xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_FOUND_DEVICE; csa.callback = smartpqi_async; csa.callback_arg = softs; xpt_action((union ccb *)&csa); if (csa.ccb_h.status != CAM_REQ_CMP) { DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n", csa.ccb_h.status); } mtx_unlock(&softs->os_specific.cam_lock); DBG_INFO("OUT\n"); return error; } /* * Function to deregister smartpqi sim from cam layer */ void deregister_sim(struct pqisrc_softstate *softs) { struct ccb_setasync csa; DBG_FUNC("IN\n"); if (softs->os_specific.mtx_init) { mtx_lock(&softs->os_specific.cam_lock); } xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = 0; csa.callback = smartpqi_async; csa.callback_arg = softs; xpt_action((union ccb *)&csa); xpt_free_path(softs->os_specific.path); xpt_release_simq(softs->os_specific.sim, 0); xpt_bus_deregister(cam_sim_path(softs->os_specific.sim)); softs->os_specific.sim_registered = FALSE; if (softs->os_specific.sim) { cam_sim_free(softs->os_specific.sim, FALSE); softs->os_specific.sim = NULL; } if (softs->os_specific.mtx_init) { mtx_unlock(&softs->os_specific.cam_lock); } if (softs->os_specific.devq != NULL) { cam_simq_free(softs->os_specific.devq); } if (softs->os_specific.mtx_init) { mtx_destroy(&softs->os_specific.cam_lock); softs->os_specific.mtx_init = FALSE; } mtx_destroy(&softs->os_specific.map_lock); DBG_FUNC("OUT\n"); } - -static void smartpqi_cam_action(struct cam_sim *, union ccb *); -static void smartpqi_poll(struct cam_sim *); - Index: head/sys/dev/smartpqi/smartpqi_ioctl.c =================================================================== --- head/sys/dev/smartpqi/smartpqi_ioctl.c (revision 336267) +++ head/sys/dev/smartpqi/smartpqi_ioctl.c (revision 336268) @@ -1,407 +1,403 @@ /*- * Copyright (c) 2018 Microsemi Corporation. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ /* * Management interface for smartpqi driver */ #include "smartpqi_includes.h" /* * Wrapper function to copy to user from kernel */ int os_copy_to_user(struct pqisrc_softstate *softs, void *dest_buf, void *src_buf, int size, int mode) { return(copyout(src_buf, dest_buf, size)); } /* * Wrapper function to copy from user to kernel */ int os_copy_from_user(struct pqisrc_softstate *softs, void *dest_buf, void *src_buf, int size, int mode) { return(copyin(src_buf, dest_buf, size)); } /* * Device open function for ioctl entry */ static int smartpqi_open(struct cdev *cdev, int flags, int devtype, struct thread *td) { int error = PQI_STATUS_SUCCESS; return error; } /* * Device close function for ioctl entry */ static int smartpqi_close(struct cdev *cdev, int flags, int devtype, struct thread *td) { int error = PQI_STATUS_SUCCESS; return error; } /* * ioctl for getting driver info */ static void smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev *cdev) { struct pqisrc_softstate *softs = cdev->si_drv1; pdriver_info driver_info = (pdriver_info)udata; DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev); driver_info->major_version = PQISRC_DRIVER_MAJOR; driver_info->minor_version = PQISRC_DRIVER_MINOR; driver_info->release_version = PQISRC_DRIVER_RELEASE; driver_info->build_revision = PQISRC_DRIVER_REVISION; driver_info->max_targets = PQI_MAX_DEVICES - 1; driver_info->max_io = softs->max_io_for_scsi_ml; driver_info->max_transfer_length = softs->pqi_cap.max_transfer_size; DBG_FUNC("OUT\n"); } /* * ioctl for getting controller info */ static void smartpqi_get_pci_info_ioctl(caddr_t udata, struct cdev *cdev) { struct pqisrc_softstate *softs = cdev->si_drv1; device_t dev = softs->os_specific.pqi_dev; pqi_pci_info_t *pci_info = (pqi_pci_info_t *)udata; uint32_t sub_vendor = 0; uint32_t sub_device = 0; uint32_t vendor = 0; uint32_t device = 0; DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev); pci_info->bus = pci_get_bus(dev); pci_info->dev_fn = pci_get_function(dev); pci_info->domain = pci_get_domain(dev); sub_vendor = pci_read_config(dev, PCIR_SUBVEND_0, 2); sub_device = pci_read_config(dev, PCIR_SUBDEV_0, 2); pci_info->board_id = ((sub_device << 16) & 0xffff0000) | sub_vendor; vendor = pci_get_vendor(dev); device = pci_get_device(dev); pci_info->chip_id = ((device << 16) & 0xffff0000) | vendor; DBG_FUNC("OUT\n"); } /* * ioctl entry point for user */ static int smartpqi_ioctl(struct cdev *cdev, u_long cmd, caddr_t udata, int flags, struct thread *td) { int error = PQI_STATUS_SUCCESS; struct pqisrc_softstate *softs = cdev->si_drv1; DBG_FUNC("IN cmd = 0x%lx udata = %p cdev = %p\n", cmd, udata, cdev); if (!udata) { DBG_ERR("udata is null !!\n"); } if (pqisrc_ctrl_offline(softs)){ DBG_ERR("Controller s offline !!\n"); return ENOTTY; } switch (cmd) { case CCISS_GETDRIVVER: smartpqi_get_driver_info_ioctl(udata, cdev); break; case CCISS_GETPCIINFO: smartpqi_get_pci_info_ioctl(udata, cdev); break; case SMARTPQI_PASS_THRU: case CCISS_PASSTHRU: error = pqisrc_passthru_ioctl(softs, udata, 0); error = PQI_STATUS_SUCCESS; break; case CCISS_REGNEWD: error = pqisrc_scan_devices(softs); break; default: DBG_WARN( "!IOCTL cmd 0x%lx not supported", cmd); error = ENOTTY; break; } DBG_FUNC("OUT error = %d\n", error); return error; } -static d_open_t smartpqi_open; -static d_ioctl_t smartpqi_ioctl; -static d_close_t smartpqi_close; - static struct cdevsw smartpqi_cdevsw = { .d_version = D_VERSION, .d_open = smartpqi_open, .d_close = smartpqi_close, .d_ioctl = smartpqi_ioctl, .d_name = "smartpqi", }; /* * Function to create device node for ioctl */ int create_char_dev(struct pqisrc_softstate *softs, int card_index) { int error = PQI_STATUS_SUCCESS; DBG_FUNC("IN idx = %d\n", card_index); softs->os_specific.cdev = make_dev(&smartpqi_cdevsw, card_index, UID_ROOT, GID_OPERATOR, 0640, "smartpqi%u", card_index); if(softs->os_specific.cdev) { softs->os_specific.cdev->si_drv1 = softs; } else { error = PQI_STATUS_FAILURE; } DBG_FUNC("OUT error = %d\n", error); return error; } /* * Function to destroy device node for ioctl */ void destroy_char_dev(struct pqisrc_softstate *softs) { DBG_FUNC("IN\n"); if (softs->os_specific.cdev) { destroy_dev(softs->os_specific.cdev); softs->os_specific.cdev = NULL; } DBG_FUNC("OUT\n"); } /* * Function used to send passthru commands to adapter * to support management tools. For eg. ssacli, sscon. */ int pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode) { int ret = PQI_STATUS_SUCCESS; char *drv_buf = NULL; uint32_t tag = 0; IOCTL_Command_struct *iocommand = (IOCTL_Command_struct *)arg; dma_mem_t ioctl_dma_buf; pqisrc_raid_req_t request; raid_path_error_info_elem_t error_info; ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE]; ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE]; rcb_t *rcb = NULL; memset(&request, 0, sizeof(request)); memset(&error_info, 0, sizeof(error_info)); DBG_FUNC("IN"); if (pqisrc_ctrl_offline(softs)) return PQI_STATUS_FAILURE; if (!arg) return (PQI_STATUS_FAILURE); if (iocommand->buf_size < 1 && iocommand->Request.Type.Direction != PQIIOCTL_NONE) return PQI_STATUS_FAILURE; if (iocommand->Request.CDBLen > sizeof(request.cdb)) return PQI_STATUS_FAILURE; switch (iocommand->Request.Type.Direction) { case PQIIOCTL_NONE: case PQIIOCTL_WRITE: case PQIIOCTL_READ: case PQIIOCTL_BIDIRECTIONAL: break; default: return PQI_STATUS_FAILURE; } if (iocommand->buf_size > 0) { memset(&ioctl_dma_buf, 0, sizeof(struct dma_mem)); ioctl_dma_buf.tag = "Ioctl_PassthruCmd_Buffer"; ioctl_dma_buf.size = iocommand->buf_size; ioctl_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN; /* allocate memory */ ret = os_dma_mem_alloc(softs, &ioctl_dma_buf); if (ret) { DBG_ERR("Failed to Allocate dma mem for Ioctl PassthruCmd Buffer : %d\n", ret); ret = PQI_STATUS_FAILURE; goto out; } DBG_INFO("ioctl_dma_buf.dma_addr = %p\n",(void*)ioctl_dma_buf.dma_addr); DBG_INFO("ioctl_dma_buf.virt_addr = %p\n",(void*)ioctl_dma_buf.virt_addr); drv_buf = (char *)ioctl_dma_buf.virt_addr; if (iocommand->Request.Type.Direction & PQIIOCTL_WRITE) { if ((ret = os_copy_from_user(softs, (void *)drv_buf, (void *)iocommand->buf, iocommand->buf_size, mode)) != 0) { ret = PQI_STATUS_FAILURE; goto free_mem; } } } request.header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST; request.header.iu_length = offsetof(pqisrc_raid_req_t, sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH; memcpy(request.lun_number, iocommand->LUN_info.LunAddrBytes, sizeof(request.lun_number)); memcpy(request.cdb, iocommand->Request.CDB, iocommand->Request.CDBLen); request.additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0; switch (iocommand->Request.Type.Direction) { case PQIIOCTL_NONE: request.data_direction = SOP_DATA_DIR_NONE; break; case PQIIOCTL_WRITE: request.data_direction = SOP_DATA_DIR_FROM_DEVICE; break; case PQIIOCTL_READ: request.data_direction = SOP_DATA_DIR_TO_DEVICE; break; case PQIIOCTL_BIDIRECTIONAL: request.data_direction = SOP_DATA_DIR_BIDIRECTIONAL; break; } request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; if (iocommand->buf_size > 0) { request.buffer_length = iocommand->buf_size; request.sg_descriptors[0].addr = ioctl_dma_buf.dma_addr; request.sg_descriptors[0].len = iocommand->buf_size; request.sg_descriptors[0].flags = SG_FLAG_LAST; } tag = pqisrc_get_tag(&softs->taglist); if (INVALID_ELEM == tag) { DBG_ERR("Tag not available\n"); ret = PQI_STATUS_FAILURE; goto free_mem; } request.request_id = tag; request.response_queue_id = ob_q->q_id; request.error_index = request.request_id; rcb = &softs->rcb[tag]; rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success; rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error; rcb->tag = tag; rcb->req_pending = true; /* Submit Command */ ret = pqisrc_submit_cmnd(softs, ib_q, &request); if (ret != PQI_STATUS_SUCCESS) { DBG_ERR("Unable to submit command\n"); goto err_out; } ret = pqisrc_wait_on_condition(softs, rcb); if (ret != PQI_STATUS_SUCCESS) { DBG_ERR("Passthru IOCTL cmd timed out !!\n"); goto err_out; } memset(&iocommand->error_info, 0, sizeof(iocommand->error_info)); if (rcb->status) { size_t sense_data_length; memcpy(&error_info, rcb->error_info, sizeof(error_info)); iocommand->error_info.ScsiStatus = error_info.status; sense_data_length = error_info.sense_data_len; if (!sense_data_length) sense_data_length = error_info.resp_data_len; if (sense_data_length && (sense_data_length > sizeof(error_info.data))) sense_data_length = sizeof(error_info.data); if (sense_data_length) { if (sense_data_length > sizeof(iocommand->error_info.SenseInfo)) sense_data_length = sizeof(iocommand->error_info.SenseInfo); memcpy (iocommand->error_info.SenseInfo, error_info.data, sense_data_length); iocommand->error_info.SenseLen = sense_data_length; } if (error_info.data_out_result == PQI_RAID_DATA_IN_OUT_UNDERFLOW){ rcb->status = REQUEST_SUCCESS; } } if (rcb->status == REQUEST_SUCCESS && iocommand->buf_size > 0 && (iocommand->Request.Type.Direction & PQIIOCTL_READ)) { if ((ret = os_copy_to_user(softs, (void*)iocommand->buf, (void*)drv_buf, iocommand->buf_size, mode)) != 0) { DBG_ERR("Failed to copy the response\n"); goto err_out; } } os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, request.request_id); if (iocommand->buf_size > 0) os_dma_mem_free(softs,&ioctl_dma_buf); DBG_FUNC("OUT\n"); return ret; err_out: os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, request.request_id); free_mem: if (iocommand->buf_size > 0) os_dma_mem_free(softs, &ioctl_dma_buf); out: DBG_FUNC("Failed OUT\n"); return PQI_STATUS_FAILURE; } Index: head/sys/dev/smartpqi/smartpqi_main.c =================================================================== --- head/sys/dev/smartpqi/smartpqi_main.c (revision 336267) +++ head/sys/dev/smartpqi/smartpqi_main.c (revision 336268) @@ -1,512 +1,506 @@ /*- * Copyright (c) 2018 Microsemi Corporation. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ /* * Driver for the Microsemi Smart storage controllers */ #include "smartpqi_includes.h" #include "smartpqi_prototypes.h" /* * Supported devices */ struct pqi_ident { u_int16_t vendor; u_int16_t device; u_int16_t subvendor; u_int16_t subdevice; int hwif; char *desc; } pqi_identifiers[] = { /* (MSCC PM8205 8x12G based) */ {0x9005, 0x028f, 0x103c, 0x600, PQI_HWIF_SRCV, "P408i-p SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x601, PQI_HWIF_SRCV, "P408e-p SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x602, PQI_HWIF_SRCV, "P408i-a SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x603, PQI_HWIF_SRCV, "P408i-c SR Gen10"}, {0x9005, 0x028f, 0x1028, 0x1FE0, PQI_HWIF_SRCV, "SmartRAID 3162-8i/eDell"}, {0x9005, 0x028f, 0x9005, 0x608, PQI_HWIF_SRCV, "SmartRAID 3162-8i/e"}, {0x9005, 0x028f, 0x103c, 0x609, PQI_HWIF_SRCV, "P408i-sb SR G10"}, /* (MSCC PM8225 8x12G based) */ {0x9005, 0x028f, 0x103c, 0x650, PQI_HWIF_SRCV, "E208i-p SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x651, PQI_HWIF_SRCV, "E208e-p SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x652, PQI_HWIF_SRCV, "E208i-c SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x654, PQI_HWIF_SRCV, "E208i-a SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x655, PQI_HWIF_SRCV, "P408e-m SR Gen10"}, /* (MSCC PM8221 8x12G based) */ {0x9005, 0x028f, 0x103c, 0x700, PQI_HWIF_SRCV, "P204i-c SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x701, PQI_HWIF_SRCV, "P204i-b SR Gen10"}, /* (MSCC PM8204 8x12G based) */ {0x9005, 0x028f, 0x9005, 0x800, PQI_HWIF_SRCV, "SmartRAID 3154-8i"}, {0x9005, 0x028f, 0x9005, 0x801, PQI_HWIF_SRCV, "SmartRAID 3152-8i"}, {0x9005, 0x028f, 0x9005, 0x802, PQI_HWIF_SRCV, "SmartRAID 3151-4i"}, {0x9005, 0x028f, 0x9005, 0x803, PQI_HWIF_SRCV, "SmartRAID 3101-4i"}, {0x9005, 0x028f, 0x9005, 0x804, PQI_HWIF_SRCV, "SmartRAID 3154-8e"}, {0x9005, 0x028f, 0x9005, 0x805, PQI_HWIF_SRCV, "SmartRAID 3102-8i"}, {0x9005, 0x028f, 0x9005, 0x806, PQI_HWIF_SRCV, "SmartRAID 3100"}, {0x9005, 0x028f, 0x9005, 0x807, PQI_HWIF_SRCV, "SmartRAID 3162-8i"}, {0x9005, 0x028f, 0x152d, 0x8a22, PQI_HWIF_SRCV, "QS-8204-8i"}, {0x9005, 0x028f, 0x193d, 0xf460, PQI_HWIF_SRCV, "UN RAID P460-M4"}, {0x9005, 0x028f, 0x193d, 0xf461, PQI_HWIF_SRCV, "UN RAID P460-B4"}, {0x9005, 0x028f, 0x1bd4, 0x004b, PQI_HWIF_SRCV, "INSPUR RAID PM8204-2GB"}, {0x9005, 0x028f, 0x1bd4, 0x004c, PQI_HWIF_SRCV, "INSPUR RAID PM8204-4GB"}, /* (MSCC PM8222 8x12G based) */ {0x9005, 0x028f, 0x9005, 0x900, PQI_HWIF_SRCV, "SmartHBA 2100-8i"}, {0x9005, 0x028f, 0x9005, 0x901, PQI_HWIF_SRCV, "SmartHBA 2100-4i"}, {0x9005, 0x028f, 0x9005, 0x902, PQI_HWIF_SRCV, "HBA 1100-8i"}, {0x9005, 0x028f, 0x9005, 0x903, PQI_HWIF_SRCV, "HBA 1100-4i"}, {0x9005, 0x028f, 0x9005, 0x904, PQI_HWIF_SRCV, "SmartHBA 2100-8e"}, {0x9005, 0x028f, 0x9005, 0x905, PQI_HWIF_SRCV, "HBA 1100-8e"}, {0x9005, 0x028f, 0x9005, 0x906, PQI_HWIF_SRCV, "SmartHBA 2100-4i4e"}, {0x9005, 0x028f, 0x9005, 0x907, PQI_HWIF_SRCV, "HBA 1100"}, {0x9005, 0x028f, 0x9005, 0x908, PQI_HWIF_SRCV, "SmartHBA 2100"}, {0x9005, 0x028f, 0x9005, 0x90a, PQI_HWIF_SRCV, "SmartHBA 2100A-8i"}, {0x9005, 0x028f, 0x193d, 0x8460, PQI_HWIF_SRCV, "UN HBA H460-M1"}, {0x9005, 0x028f, 0x193d, 0x8461, PQI_HWIF_SRCV, "UN HBA H460-B1"}, {0x9005, 0x028f, 0x1bd4, 0x004a, PQI_HWIF_SRCV, "INSPUR SMART-HBA PM8222-SHBA"}, {0x9005, 0x028f, 0x13fe, 0x8312, PQI_HWIF_SRCV, "MIC-8312BridgeB"}, /* (SRCx MSCC FVB 24x12G based) */ {0x9005, 0x028f, 0x103c, 0x1001, PQI_HWIF_SRCV, "MSCC FVB"}, /* (MSCC PM8241 24x12G based) */ /* (MSCC PM8242 24x12G based) */ {0x9005, 0x028f, 0x152d, 0x8a37, PQI_HWIF_SRCV, "QS-8242-24i"}, {0x9005, 0x028f, 0x9005, 0x1300, PQI_HWIF_SRCV, "HBA 1100-8i8e"}, {0x9005, 0x028f, 0x9005, 0x1301, PQI_HWIF_SRCV, "HBA 1100-24i"}, {0x9005, 0x028f, 0x9005, 0x1302, PQI_HWIF_SRCV, "SmartHBA 2100-8i8e"}, {0x9005, 0x028f, 0x9005, 0x1303, PQI_HWIF_SRCV, "SmartHBA 2100-24i"}, {0x9005, 0x028f, 0x105b, 0x1321, PQI_HWIF_SRCV, "8242-24i"}, {0x9005, 0x028f, 0x1bd4, 0x0045, PQI_HWIF_SRCV, "INSPUR SMART-HBA 8242-24i"}, /* (MSCC PM8236 16x12G based) */ {0x9005, 0x028f, 0x152d, 0x8a24, PQI_HWIF_SRCV, "QS-8236-16i"}, {0x9005, 0x028f, 0x9005, 0x1380, PQI_HWIF_SRCV, "SmartRAID 3154-16i"}, {0x9005, 0x028f, 0x1bd4, 0x0046, PQI_HWIF_SRCV, "INSPUR RAID 8236-16i"}, /* (MSCC PM8237 24x12G based) */ {0x9005, 0x028f, 0x103c, 0x1100, PQI_HWIF_SRCV, "P816i-a SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x1101, PQI_HWIF_SRCV, "P416ie-m SR G10"}, /* (MSCC PM8238 16x12G based) */ {0x9005, 0x028f, 0x152d, 0x8a23, PQI_HWIF_SRCV, "QS-8238-16i"}, {0x9005, 0x028f, 0x9005, 0x1280, PQI_HWIF_SRCV, "HBA 1100-16i"}, {0x9005, 0x028f, 0x9005, 0x1281, PQI_HWIF_SRCV, "HBA 1100-16e"}, {0x9005, 0x028f, 0x105b, 0x1211, PQI_HWIF_SRCV, "8238-16i"}, {0x9005, 0x028f, 0x1bd4, 0x0048, PQI_HWIF_SRCV, "INSPUR SMART-HBA 8238-16i"}, {0x9005, 0x028f, 0x9005, 0x1282, PQI_HWIF_SRCV, "SmartHBA 2100-16i"}, /* (MSCC PM8240 24x12G based) */ {0x9005, 0x028f, 0x152d, 0x8a36, PQI_HWIF_SRCV, "QS-8240-24i"}, {0x9005, 0x028f, 0x9005, 0x1200, PQI_HWIF_SRCV, "SmartRAID 3154-24i"}, {0x9005, 0x028f, 0x9005, 0x1201, PQI_HWIF_SRCV, "SmartRAID 3154-8i16e"}, {0x9005, 0x028f, 0x9005, 0x1202, PQI_HWIF_SRCV, "SmartRAID 3154-8i8e"}, {0x9005, 0x028f, 0x1bd4, 0x0047, PQI_HWIF_SRCV, "INSPUR RAID 8240-24i"}, {0, 0, 0, 0, 0, 0} }; struct pqi_ident pqi_family_identifiers[] = { {0x9005, 0x028f, 0, 0, PQI_HWIF_SRCV, "Smart Array Storage Controller"}, {0, 0, 0, 0, 0, 0} }; /* * Function to identify the installed adapter. */ static struct pqi_ident * pqi_find_ident(device_t dev) { struct pqi_ident *m; u_int16_t vendid, devid, sub_vendid, sub_devid; vendid = pci_get_vendor(dev); devid = pci_get_device(dev); sub_vendid = pci_get_subvendor(dev); sub_devid = pci_get_subdevice(dev); for (m = pqi_identifiers; m->vendor != 0; m++) { if ((m->vendor == vendid) && (m->device == devid) && (m->subvendor == sub_vendid) && (m->subdevice == sub_devid)) { return (m); } } for (m = pqi_family_identifiers; m->vendor != 0; m++) { if ((m->vendor == vendid) && (m->device == devid)) { return (m); } } return (NULL); } /* * Determine whether this is one of our supported adapters. */ static int smartpqi_probe(device_t dev) { struct pqi_ident *id; if ((id = pqi_find_ident(dev)) != NULL) { device_set_desc(dev, id->desc); return(BUS_PROBE_VENDOR); } return(ENXIO); } /* * Store Bus/Device/Function in softs */ void pqisrc_save_controller_info(struct pqisrc_softstate *softs) { device_t dev = softs->os_specific.pqi_dev; softs->bus_id = (uint32_t)pci_get_bus(dev); softs->device_id = (uint32_t)pci_get_device(dev); softs->func_id = (uint32_t)pci_get_function(dev); } /* * Allocate resources for our device, set up the bus interface. * Initialize the PQI related functionality, scan devices, register sim to * upper layer, create management interface device node etc. */ static int smartpqi_attach(device_t dev) { struct pqisrc_softstate *softs = NULL; struct pqi_ident *id = NULL; int error = 0; u_int32_t command = 0, i = 0; int card_index = device_get_unit(dev); rcb_t *rcbp = NULL; /* * Initialise softc. */ softs = device_get_softc(dev); if (!softs) { printf("Could not get softc\n"); error = EINVAL; goto out; } memset(softs, 0, sizeof(*softs)); softs->os_specific.pqi_dev = dev; DBG_FUNC("IN\n"); /* assume failure is 'not configured' */ error = ENXIO; /* * Verify that the adapter is correctly set up in PCI space. */ pci_enable_busmaster(softs->os_specific.pqi_dev); command = pci_read_config(softs->os_specific.pqi_dev, PCIR_COMMAND, 2); if ((command & PCIM_CMD_MEMEN) == 0) { DBG_ERR("memory window not available command = %d\n", command); error = ENXIO; goto out; } /* * Detect the hardware interface version, set up the bus interface * indirection. */ id = pqi_find_ident(dev); softs->os_specific.pqi_hwif = id->hwif; switch(softs->os_specific.pqi_hwif) { case PQI_HWIF_SRCV: DBG_INFO("set hardware up for PMC SRCv for %p", softs); break; default: softs->os_specific.pqi_hwif = PQI_HWIF_UNKNOWN; DBG_ERR("unknown hardware type\n"); error = ENXIO; goto out; } pqisrc_save_controller_info(softs); /* * Allocate the PCI register window. */ softs->os_specific.pqi_regs_rid0 = PCIR_BAR(0); if ((softs->os_specific.pqi_regs_res0 = bus_alloc_resource_any(softs->os_specific.pqi_dev, SYS_RES_MEMORY, &softs->os_specific.pqi_regs_rid0, RF_ACTIVE)) == NULL) { DBG_ERR("couldn't allocate register window 0\n"); /* assume failure is 'out of memory' */ error = ENOMEM; goto out; } bus_get_resource_start(softs->os_specific.pqi_dev, SYS_RES_MEMORY, softs->os_specific.pqi_regs_rid0); softs->pci_mem_handle.pqi_btag = rman_get_bustag(softs->os_specific.pqi_regs_res0); softs->pci_mem_handle.pqi_bhandle = rman_get_bushandle(softs->os_specific.pqi_regs_res0); /* softs->pci_mem_base_vaddr = (uintptr_t)rman_get_virtual(softs->os_specific.pqi_regs_res0); */ softs->pci_mem_base_vaddr = (char *)rman_get_virtual(softs->os_specific.pqi_regs_res0); /* * Allocate the parent bus DMA tag appropriate for our PCI interface. * * Note that some of these controllers are 64-bit capable. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ PAGE_SIZE, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* No locking needed */ &softs->os_specific.pqi_parent_dmat)) { DBG_ERR("can't allocate parent DMA tag\n"); /* assume failure is 'out of memory' */ error = ENOMEM; goto dma_out; } softs->os_specific.sim_registered = FALSE; softs->os_name = "FreeBSD "; /* Initialize the PQI library */ error = pqisrc_init(softs); if (error) { DBG_ERR("Failed to initialize pqi lib error = %d\n", error); error = PQI_STATUS_FAILURE; goto out; } mtx_init(&softs->os_specific.cam_lock, "cam_lock", NULL, MTX_DEF); softs->os_specific.mtx_init = TRUE; mtx_init(&softs->os_specific.map_lock, "map_lock", NULL, MTX_DEF); /* * Create DMA tag for mapping buffers into controller-addressable space. */ if (bus_dma_tag_create(softs->os_specific.pqi_parent_dmat,/* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ softs->pqi_cap.max_sg_elem*PAGE_SIZE,/*maxsize*/ softs->pqi_cap.max_sg_elem, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &softs->os_specific.map_lock, /* lockfuncarg*/ &softs->os_specific.pqi_buffer_dmat)) { DBG_ERR("can't allocate buffer DMA tag for pqi_buffer_dmat\n"); return (ENOMEM); } rcbp = &softs->rcb[1]; for( i = 1; i <= softs->pqi_cap.max_outstanding_io; i++, rcbp++ ) { if ((error = bus_dmamap_create(softs->os_specific.pqi_buffer_dmat, 0, &rcbp->cm_datamap)) != 0) { DBG_ERR("Cant create datamap for buf @" "rcbp = %p maxio = %d error = %d\n", rcbp, softs->pqi_cap.max_outstanding_io, error); goto dma_out; } } os_start_heartbeat_timer((void *)softs); /* Start the heart-beat timer */ softs->os_specific.wellness_periodic = timeout( os_wellness_periodic, softs, 120*hz); /* Register our shutdown handler. */ softs->os_specific.eh = EVENTHANDLER_REGISTER(shutdown_final, smartpqi_shutdown, softs, SHUTDOWN_PRI_DEFAULT); error = pqisrc_scan_devices(softs); if (error) { DBG_ERR("Failed to scan lib error = %d\n", error); error = PQI_STATUS_FAILURE; goto out; } error = register_sim(softs, card_index); if (error) { DBG_ERR("Failed to register sim index = %d error = %d\n", card_index, error); goto out; } smartpqi_target_rescan(softs); TASK_INIT(&softs->os_specific.event_task, 0, pqisrc_event_worker,softs); error = create_char_dev(softs, card_index); if (error) { DBG_ERR("Failed to register character device index=%d r=%d\n", card_index, error); goto out; } goto out; dma_out: if (softs->os_specific.pqi_regs_res0 != NULL) bus_release_resource(softs->os_specific.pqi_dev, SYS_RES_MEMORY, softs->os_specific.pqi_regs_rid0, softs->os_specific.pqi_regs_res0); out: DBG_FUNC("OUT error = %d\n", error); return(error); } /* * Deallocate resources for our device. */ static int smartpqi_detach(device_t dev) { struct pqisrc_softstate *softs = NULL; softs = device_get_softc(dev); DBG_FUNC("IN\n"); EVENTHANDLER_DEREGISTER(shutdown_final, softs->os_specific.eh); /* kill the periodic event */ untimeout(os_wellness_periodic, softs, softs->os_specific.wellness_periodic); /* Kill the heart beat event */ untimeout(os_start_heartbeat_timer, softs, softs->os_specific.heartbeat_timeout_id); smartpqi_shutdown(softs); destroy_char_dev(softs); pqisrc_uninit(softs); deregister_sim(softs); pci_release_msi(dev); DBG_FUNC("OUT\n"); return 0; } /* * Bring the controller to a quiescent state, ready for system suspend. */ static int smartpqi_suspend(device_t dev) { struct pqisrc_softstate *softs; softs = device_get_softc(dev); DBG_FUNC("IN\n"); DBG_INFO("Suspending the device %p\n", softs); softs->os_specific.pqi_state |= SMART_STATE_SUSPEND; DBG_FUNC("OUT\n"); return(0); } /* * Bring the controller back to a state ready for operation. */ static int smartpqi_resume(device_t dev) { struct pqisrc_softstate *softs; softs = device_get_softc(dev); DBG_FUNC("IN\n"); softs->os_specific.pqi_state &= ~SMART_STATE_SUSPEND; DBG_FUNC("OUT\n"); return(0); } /* * Do whatever is needed during a system shutdown. */ int smartpqi_shutdown(void *arg) { struct pqisrc_softstate *softs = NULL; int rval = 0; DBG_FUNC("IN\n"); softs = (struct pqisrc_softstate *)arg; rval = pqisrc_flush_cache(softs, PQISRC_SHUTDOWN); if (rval != PQI_STATUS_SUCCESS) { DBG_ERR("Unable to flush adapter cache! rval = %d", rval); } DBG_FUNC("OUT\n"); return rval; } -static int smartpqi_probe(device_t dev); -static int smartpqi_attach(device_t dev); -static int smartpqi_detach(device_t dev); -static int smartpqi_suspend(device_t dev); -static int smartpqi_resume(device_t dev); - /* * PCI bus interface. */ static device_method_t pqi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, smartpqi_probe), DEVMETHOD(device_attach, smartpqi_attach), DEVMETHOD(device_detach, smartpqi_detach), DEVMETHOD(device_suspend, smartpqi_suspend), DEVMETHOD(device_resume, smartpqi_resume), { 0, 0 } }; static devclass_t pqi_devclass; static driver_t smartpqi_pci_driver = { "smartpqi", pqi_methods, sizeof(struct pqisrc_softstate) }; DRIVER_MODULE(smartpqi, pci, smartpqi_pci_driver, pqi_devclass, 0, 0); MODULE_DEPEND(smartpqi, pci, 1, 1, 1); Index: head/sys/dev/smartpqi/smartpqi_prototypes.h =================================================================== --- head/sys/dev/smartpqi/smartpqi_prototypes.h (revision 336267) +++ head/sys/dev/smartpqi/smartpqi_prototypes.h (revision 336268) @@ -1,270 +1,266 @@ /*- * Copyright (c) 2018 Microsemi Corporation. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #ifndef _PQI_PROTOTYPES_H #define _PQI_PROTOTYPES_H /* Function prototypes */ /*pqi_init.c */ int pqisrc_init(pqisrc_softstate_t *); void pqisrc_uninit(pqisrc_softstate_t *); void pqisrc_pqi_uninit(pqisrc_softstate_t *); int pqisrc_process_config_table(pqisrc_softstate_t *); int pqisrc_flush_cache(pqisrc_softstate_t *, enum pqisrc_flush_cache_event_type); int pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *); int pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *); void pqisrc_complete_internal_cmds(pqisrc_softstate_t *); /* pqi_sis.c*/ int pqisrc_sis_init(pqisrc_softstate_t *); void pqisrc_sis_uninit(pqisrc_softstate_t *); int pqisrc_reenable_sis(pqisrc_softstate_t *); void pqisrc_trigger_nmi_sis(pqisrc_softstate_t *); void sis_disable_msix(pqisrc_softstate_t *); void sis_enable_intx(pqisrc_softstate_t *); void sis_disable_intx(pqisrc_softstate_t *softs); int pqisrc_force_sis(pqisrc_softstate_t *); int pqisrc_sis_wait_for_db_bit_to_clear(pqisrc_softstate_t *, uint32_t); void sis_disable_interrupt(pqisrc_softstate_t*); /* pqi_queue.c */ int pqisrc_submit_admin_req(pqisrc_softstate_t *, gen_adm_req_iu_t *, gen_adm_resp_iu_t *); int pqisrc_create_admin_queue(pqisrc_softstate_t *); int pqisrc_destroy_admin_queue(pqisrc_softstate_t *); int pqisrc_create_op_queues(pqisrc_softstate_t *); /* pqi_cmd.c */ int pqisrc_submit_cmnd(pqisrc_softstate_t *,ib_queue_t *,void *); /* pqi_tag.c */ #ifndef LOCKFREE_STACK int pqisrc_init_taglist(pqisrc_softstate_t *,pqi_taglist_t *,uint32_t); void pqisrc_destroy_taglist(pqisrc_softstate_t *,pqi_taglist_t *); void pqisrc_put_tag(pqi_taglist_t *,uint32_t); uint32_t pqisrc_get_tag(pqi_taglist_t *); #else int pqisrc_init_taglist(pqisrc_softstate_t *, lockless_stack_t *, uint32_t); void pqisrc_destroy_taglist(pqisrc_softstate_t *, lockless_stack_t *); void pqisrc_put_tag(lockless_stack_t *,uint32_t); uint32_t pqisrc_get_tag(lockless_stack_t *); #endif /* LOCKFREE_STACK */ /* pqi_discovery.c */ void pqisrc_remove_device(pqisrc_softstate_t *, pqi_scsi_dev_t *); int pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *); int pqisrc_rescan_devices(pqisrc_softstate_t *); int pqisrc_scan_devices(pqisrc_softstate_t *); void pqisrc_process_raid_path_io_response(pqisrc_softstate_t *, uint16_t, struct pqi_io_response *); void pqisrc_process_io_error_response(pqisrc_softstate_t *, int, uint16_t, struct pqi_io_response *); void pqisrc_cleanup_devices(pqisrc_softstate_t *); void pqisrc_device_mem_free(pqisrc_softstate_t *, pqi_scsi_dev_t *); boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t *device); void pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device); void pqisrc_init_targetid_pool(pqisrc_softstate_t *softs); int pqisrc_alloc_tid(pqisrc_softstate_t *softs); void pqisrc_free_tid(pqisrc_softstate_t *softs, int); /* pqi_helper.c */ boolean_t pqisrc_ctrl_offline(pqisrc_softstate_t *); void pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *); int pqisrc_wait_on_condition(pqisrc_softstate_t *, rcb_t *); boolean_t pqisrc_device_equal(pqi_scsi_dev_t *, pqi_scsi_dev_t *); boolean_t pqisrc_is_hba_lunid(uint8_t *); boolean_t pqisrc_is_logical_device(pqi_scsi_dev_t *); void pqisrc_sanitize_inquiry_string(unsigned char *, int ); void pqisrc_display_device_info(pqisrc_softstate_t *, char *, pqi_scsi_dev_t *); boolean_t pqisrc_scsi3addr_equal(uint8_t *, uint8_t *); void check_struct_sizes(void); char *pqisrc_raidlevel_to_string(uint8_t); void pqisrc_configure_legacy_intx(pqisrc_softstate_t*, boolean_t); /* pqi_response.c */ void pqisrc_signal_event(pqisrc_softstate_t *softs, rcb_t *rcb); void pqisrc_process_internal_raid_response_success(pqisrc_softstate_t *, rcb_t *); void pqisrc_process_internal_raid_response_error(pqisrc_softstate_t *, rcb_t *, uint16_t); void pqisrc_process_io_response_success(pqisrc_softstate_t *, rcb_t *); void pqisrc_process_aio_response_error(pqisrc_softstate_t *, rcb_t *, uint16_t); void pqisrc_process_raid_response_error(pqisrc_softstate_t *, rcb_t *, uint16_t); void pqisrc_process_response_queue(pqisrc_softstate_t *, int); /* pqi_request.c */ int pqisrc_build_send_io(pqisrc_softstate_t *,rcb_t *); int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t*); int pqisrc_send_tmf(pqisrc_softstate_t *, pqi_scsi_dev_t *, rcb_t *, int, int); int pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs); int pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs); /* pqi_event.c*/ int pqisrc_report_event_config(pqisrc_softstate_t *); int pqisrc_set_event_config(pqisrc_softstate_t *); int pqisrc_process_event_intr_src(pqisrc_softstate_t *,int); void pqisrc_ack_all_events(void *arg); void pqisrc_event_worker(void *, int); int pqisrc_scsi_setup(struct pqisrc_softstate *); void pqisrc_scsi_cleanup(struct pqisrc_softstate *); boolean_t pqisrc_update_scsi_sense(const uint8_t *, int, struct sense_header_scsi *); int pqisrc_build_send_raid_request(pqisrc_softstate_t *, pqisrc_raid_req_t *, void *, size_t, uint8_t, uint16_t, uint8_t *, raid_path_error_info_elem_t *); int pqisrc_submit_management_req(pqisrc_softstate_t *, pqi_event_config_request_t *); void pqisrc_take_devices_offline(pqisrc_softstate_t *); void pqisrc_take_ctrl_offline(pqisrc_softstate_t *); void pqisrc_free_rcb(pqisrc_softstate_t *, int); void pqisrc_decide_opq_config(pqisrc_softstate_t *); int pqisrc_configure_op_queues(pqisrc_softstate_t *); int pqisrc_pqi_init(pqisrc_softstate_t *); int pqi_reset(pqisrc_softstate_t *); int pqisrc_check_pqimode(pqisrc_softstate_t *); int pqisrc_check_fw_status(pqisrc_softstate_t *); int pqisrc_init_struct_base(pqisrc_softstate_t *); int pqisrc_get_sis_pqi_cap(pqisrc_softstate_t *); int pqisrc_get_preferred_settings(pqisrc_softstate_t *); int pqisrc_get_adapter_properties(pqisrc_softstate_t *, uint32_t *, uint32_t *); void pqisrc_get_admin_queue_config(pqisrc_softstate_t *); void pqisrc_decide_admin_queue_config(pqisrc_softstate_t *); int pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *); int pqisrc_create_delete_adminq(pqisrc_softstate_t *, uint32_t); void pqisrc_print_adminq_config(pqisrc_softstate_t *); int pqisrc_delete_op_queue(pqisrc_softstate_t *, uint32_t, boolean_t); void pqisrc_destroy_event_queue(pqisrc_softstate_t *); void pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *); void pqisrc_destroy_op_ob_queues(pqisrc_softstate_t *); int pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *, ib_queue_t *, uint32_t); int pqisrc_create_op_obq(pqisrc_softstate_t *, ob_queue_t *); int pqisrc_create_op_ibq(pqisrc_softstate_t *, ib_queue_t *); int pqisrc_create_op_aio_ibq(pqisrc_softstate_t *, ib_queue_t *); int pqisrc_create_op_raid_ibq(pqisrc_softstate_t *, ib_queue_t *); int pqisrc_alloc_and_create_event_queue(pqisrc_softstate_t *); int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *); int pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *); int pqisrc_process_task_management_response(pqisrc_softstate_t *, pqi_tmf_resp_t *); void pqisrc_wait_for_rescan_complete(pqisrc_softstate_t *softs); /* pqi_ioctl.c*/ int pqisrc_passthru_ioctl(struct pqisrc_softstate *, void *, int); /* Functions Prototypes */ /* FreeBSD_mem.c */ int os_dma_mem_alloc(pqisrc_softstate_t *,struct dma_mem *); void os_dma_mem_free(pqisrc_softstate_t *,struct dma_mem *); void *os_mem_alloc(pqisrc_softstate_t *,size_t); void os_mem_free(pqisrc_softstate_t *,char *,size_t); void os_resource_free(pqisrc_softstate_t *); /* FreeBSD intr.c */ int os_get_intr_config(pqisrc_softstate_t *); int os_setup_intr(pqisrc_softstate_t *); int os_destroy_intr(pqisrc_softstate_t *); int os_get_processor_config(pqisrc_softstate_t *); void os_free_intr_config(pqisrc_softstate_t *); /* FreeBSD_ioctl.c */ int os_copy_to_user(struct pqisrc_softstate *, void *, void *, int, int); int os_copy_from_user(struct pqisrc_softstate *, void *, void *, int, int); int create_char_dev(struct pqisrc_softstate *, int); void destroy_char_dev(struct pqisrc_softstate *); /* FreeBSD_misc.c*/ int os_init_spinlock(struct pqisrc_softstate *, struct mtx *, char *); void os_uninit_spinlock(struct mtx *); int os_create_semaphore(const char *, int,struct sema *); int os_destroy_semaphore(struct sema *); void os_sema_lock(struct sema *); void os_sema_unlock(struct sema *); int os_strlcpy(char *dst, char *src, int len); void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *); void os_stop_heartbeat_timer(pqisrc_softstate_t *); void os_start_heartbeat_timer(void *); /* FreeBSD_cam.c */ -int pqisrc_scsi_setup(struct pqisrc_softstate *); -void pqisrc_scsi_cleanup(struct pqisrc_softstate *); uint8_t os_get_task_attr(rcb_t *); void os_wellness_periodic(void *); void smartpqi_target_rescan(struct pqisrc_softstate *); /* FreeBSD_intr.c FreeBSD_main.c */ -void pqisrc_event_worker(void *, int); void os_add_device(pqisrc_softstate_t *, pqi_scsi_dev_t *); void os_remove_device(pqisrc_softstate_t *, pqi_scsi_dev_t *); void os_io_response_success(rcb_t *); void os_aio_response_error(rcb_t *, aio_path_error_info_elem_t *); void smartpqi_adjust_queue_depth(struct cam_path *, uint32_t ); void os_raid_response_error(rcb_t *, raid_path_error_info_elem_t *); -void os_wellness_periodic(void *); void os_reset_rcb( rcb_t *); int register_sim(struct pqisrc_softstate *, int); void deregister_sim(struct pqisrc_softstate *); int check_for_scsi_opcode(uint8_t *, boolean_t *, uint64_t *, uint32_t *); int register_legacy_intr(pqisrc_softstate_t *); int register_msix_intr(pqisrc_softstate_t *); void deregister_pqi_intx(pqisrc_softstate_t *); void deregister_pqi_msix(pqisrc_softstate_t *); void os_get_time(struct bmic_host_wellness_time *); void os_eventtaskqueue_enqueue(pqisrc_softstate_t *); void pqisrc_save_controller_info(struct pqisrc_softstate *); int smartpqi_shutdown(void *); #endif // _SMARTPQI_PROTOTYPES_H