Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/smartpqi/smartpqi_cam.c
/*- | /*- | ||||
* Copyright (c) 2018 Microsemi Corporation. | * Copyright (c) 2016-2019 Microsemi Corporation. | ||||
* Copyright (c) 2020 Microchip Technology Inc. and it's subsidiaries. | |||||
imp: This could likely be just
```
Copyright 2016-2021 Microchip Technology Inc. and it's… | |||||
* | |||||
* All rights reserved. | * All rights reserved. | ||||
Not Done Inline ActionsThese two lines likely can just be removed, unless your legal department insists on the language. In which case it should be put on the same line as each of the prior copyright line(s). imp: These two lines likely can just be removed, unless your legal department insists on the… | |||||
Done Inline Actionsalso, s/it's/its/ ? yuripv: also, s/it's/its/ ? | |||||
* | * | ||||
* Redistribution and use in source and binary forms, with or without | * Redistribution and use in source and binary forms, with or without | ||||
* modification, are permitted provided that the following conditions | * modification, are permitted provided that the following conditions | ||||
* are met: | * are met: | ||||
* 1. Redistributions of source code must retain the above copyright | * 1. Redistributions of source code must retain the above copyright | ||||
* notice, this list of conditions and the following disclaimer. | * notice, this list of conditions and the following disclaimer. | ||||
* 2. Redistributions in binary form must reproduce the above copyright | * 2. Redistributions in binary form must reproduce the above copyright | ||||
* notice, this list of conditions and the following disclaimer in the | * notice, this list of conditions and the following disclaimer in the | ||||
Show All 17 Lines | |||||
* CAM interface for smartpqi driver | * CAM interface for smartpqi driver | ||||
*/ | */ | ||||
#include "smartpqi_includes.h" | #include "smartpqi_includes.h" | ||||
/* | /* | ||||
* Set cam sim properties of the smartpqi adapter. | * Set cam sim properties of the smartpqi adapter. | ||||
*/ | */ | ||||
static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi) | static void | ||||
update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi) | |||||
{ | { | ||||
pqisrc_softstate_t *softs = (struct pqisrc_softstate *) | pqisrc_softstate_t *softs = (struct pqisrc_softstate *) | ||||
cam_sim_softc(sim); | cam_sim_softc(sim); | ||||
device_t dev = softs->os_specific.pqi_dev; | |||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
cpi->version_num = 1; | cpi->version_num = 1; | ||||
cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; | cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; | ||||
cpi->target_sprt = 0; | cpi->target_sprt = 0; | ||||
cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; | cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; | ||||
cpi->hba_eng_cnt = 0; | cpi->hba_eng_cnt = 0; | ||||
cpi->max_lun = PQI_MAX_MULTILUN; | cpi->max_lun = PQI_MAX_MULTILUN; | ||||
cpi->max_target = 1088; | cpi->max_target = 1088; | ||||
Done Inline ActionsThere is strlcpy() to be safe without this. mav: There is strlcpy() to be safe without this. | |||||
cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE; | cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE; | ||||
cpi->initiator_id = 255; | cpi->initiator_id = 255; | ||||
strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); | strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); | ||||
strncpy(cpi->hba_vid, "Microsemi", HBA_IDLEN); | strlcpy(cpi->hba_vid, "Microsemi", HBA_IDLEN); | ||||
strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); | strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); | ||||
cpi->unit_number = cam_sim_unit(sim); | cpi->unit_number = cam_sim_unit(sim); | ||||
cpi->bus_id = cam_sim_bus(sim); | cpi->bus_id = cam_sim_bus(sim); | ||||
cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */ | cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */ | ||||
cpi->protocol = PROTO_SCSI; | cpi->protocol = PROTO_SCSI; | ||||
cpi->protocol_version = SCSI_REV_SPC4; | cpi->protocol_version = SCSI_REV_SPC4; | ||||
cpi->transport = XPORT_SPI; | cpi->transport = XPORT_SPI; | ||||
cpi->transport_version = 2; | cpi->transport_version = 2; | ||||
cpi->ccb_h.status = CAM_REQ_CMP; | cpi->ccb_h.status = CAM_REQ_CMP; | ||||
cpi->hba_vendor = pci_get_vendor(dev); | |||||
cpi->hba_device = pci_get_device(dev); | |||||
cpi->hba_subvendor = pci_get_subvendor(dev); | |||||
cpi->hba_subdevice = pci_get_subdevice(dev); | |||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
} | } | ||||
/* | /* | ||||
* Get transport settings of the smartpqi adapter | * Get transport settings of the smartpqi adapter | ||||
*/ | */ | ||||
static void get_transport_settings(struct pqisrc_softstate *softs, | static void | ||||
get_transport_settings(struct pqisrc_softstate *softs, | |||||
struct ccb_trans_settings *cts) | struct ccb_trans_settings *cts) | ||||
{ | { | ||||
struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; | struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; | ||||
struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; | struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; | ||||
struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; | struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
Show All 9 Lines | get_transport_settings(struct pqisrc_softstate *softs, | ||||
cts->ccb_h.status = CAM_REQ_CMP; | cts->ccb_h.status = CAM_REQ_CMP; | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
} | } | ||||
/* | /* | ||||
* Add the target to CAM layer and rescan, when a new device is found | * Add the target to CAM layer and rescan, when a new device is found | ||||
*/ | */ | ||||
void os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { | void | ||||
os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) | |||||
{ | |||||
union ccb *ccb; | union ccb *ccb; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
if(softs->os_specific.sim_registered) { | if(softs->os_specific.sim_registered) { | ||||
if ((ccb = xpt_alloc_ccb_nowait()) == NULL) { | if ((ccb = xpt_alloc_ccb_nowait()) == NULL) { | ||||
DBG_ERR("rescan failed (can't allocate CCB)\n"); | DBG_ERR("rescan failed (can't allocate CCB)\n"); | ||||
return; | return; | ||||
} | } | ||||
Show All 9 Lines | os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) | ||||
} | } | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
} | } | ||||
/* | /* | ||||
* Remove the device from CAM layer when deleted or hot removed | * Remove the device from CAM layer when deleted or hot removed | ||||
*/ | */ | ||||
void os_remove_device(pqisrc_softstate_t *softs, | void | ||||
pqi_scsi_dev_t *device) { | os_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) | ||||
{ | |||||
struct cam_path *tmppath; | struct cam_path *tmppath; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
if(softs->os_specific.sim_registered) { | if(softs->os_specific.sim_registered) { | ||||
if (xpt_create_path(&tmppath, NULL, | if (xpt_create_path(&tmppath, NULL, | ||||
cam_sim_path(softs->os_specific.sim), | cam_sim_path(softs->os_specific.sim), | ||||
device->target, device->lun) != CAM_REQ_CMP) { | device->target, device->lun) != CAM_REQ_CMP) { | ||||
DBG_ERR("unable to create path for async event"); | DBG_ERR("unable to create path for async event"); | ||||
return; | return; | ||||
} | } | ||||
xpt_async(AC_LOST_DEVICE, tmppath, NULL); | xpt_async(AC_LOST_DEVICE, tmppath, NULL); | ||||
xpt_free_path(tmppath); | xpt_free_path(tmppath); | ||||
softs->device_list[device->target][device->lun] = NULL; | |||||
pqisrc_free_device(softs, device); | pqisrc_free_device(softs, device); | ||||
} | } | ||||
Done Inline ActionsIf you are fixing style, then according to style(9) function name should start on a new line. mav: If you are fixing style, then according to style(9) function name should start on a new line. | |||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
} | } | ||||
/* | /* | ||||
* Function to release the frozen simq | * Function to release the frozen simq | ||||
*/ | */ | ||||
static void pqi_release_camq( rcb_t *rcb ) | static void | ||||
pqi_release_camq(rcb_t *rcb) | |||||
{ | { | ||||
pqisrc_softstate_t *softs; | pqisrc_softstate_t *softs; | ||||
struct ccb_scsiio *csio; | struct ccb_scsiio *csio; | ||||
csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; | csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; | ||||
softs = rcb->softs; | softs = rcb->softs; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) { | if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) { | ||||
softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY; | softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY; | ||||
if (csio->ccb_h.status & CAM_RELEASE_SIMQ) | if (csio->ccb_h.status & CAM_RELEASE_SIMQ) | ||||
xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0); | xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0); | ||||
else | else | ||||
csio->ccb_h.status |= CAM_RELEASE_SIMQ; | csio->ccb_h.status |= CAM_RELEASE_SIMQ; | ||||
} | } | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
} | } | ||||
/* | static void | ||||
* Function to dma-unmap the completed request | pqi_synch_request(rcb_t *rcb) | ||||
*/ | |||||
static void pqi_unmap_request(void *arg) | |||||
{ | { | ||||
pqisrc_softstate_t *softs; | pqisrc_softstate_t *softs = rcb->softs; | ||||
rcb_t *rcb; | |||||
DBG_IO("IN rcb = %p\n", arg); | DBG_IO("IN rcb = %p\n", rcb); | ||||
rcb = (rcb_t *)arg; | |||||
softs = rcb->softs; | |||||
if (!(rcb->cm_flags & PQI_CMD_MAPPED)) | if (!(rcb->cm_flags & PQI_CMD_MAPPED)) | ||||
return; | return; | ||||
if (rcb->bcount != 0 ) { | if (rcb->bcount != 0 ) { | ||||
if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE) | if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE) | ||||
bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, | bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, | ||||
rcb->cm_datamap, | rcb->cm_datamap, | ||||
BUS_DMASYNC_POSTREAD); | BUS_DMASYNC_POSTREAD); | ||||
if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE) | if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE) | ||||
bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, | bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, | ||||
rcb->cm_datamap, | rcb->cm_datamap, | ||||
BUS_DMASYNC_POSTWRITE); | BUS_DMASYNC_POSTWRITE); | ||||
bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat, | bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat, | ||||
rcb->cm_datamap); | rcb->cm_datamap); | ||||
} | } | ||||
rcb->cm_flags &= ~PQI_CMD_MAPPED; | rcb->cm_flags &= ~PQI_CMD_MAPPED; | ||||
if(rcb->sgt && rcb->nseg) | if(rcb->sgt && rcb->nseg) | ||||
os_mem_free(rcb->softs, (void*)rcb->sgt, | os_mem_free(rcb->softs, (void*)rcb->sgt, | ||||
rcb->nseg*sizeof(sgt_t)); | rcb->nseg*sizeof(sgt_t)); | ||||
pqisrc_put_tag(&softs->taglist, rcb->tag); | DBG_IO("OUT\n"); | ||||
} | |||||
/* | |||||
* Function to dma-unmap the completed request | |||||
*/ | |||||
static inline void | |||||
pqi_unmap_request(rcb_t *rcb) | |||||
{ | |||||
DBG_IO("IN rcb = %p\n", rcb); | |||||
pqi_synch_request(rcb); | |||||
pqisrc_put_tag(&rcb->softs->taglist, rcb->tag); | |||||
DBG_IO("OUT\n"); | DBG_IO("OUT\n"); | ||||
} | } | ||||
/* | /* | ||||
* Construct meaningful LD name for volume here. | * Construct meaningful LD name for volume here. | ||||
*/ | */ | ||||
static void | static void | ||||
smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio) | smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio) | ||||
{ | { | ||||
struct scsi_inquiry_data *inq = NULL; | struct scsi_inquiry_data *inq = NULL; | ||||
uint8_t *cdb = NULL; | uint8_t *cdb = NULL; | ||||
pqi_scsi_dev_t *device = NULL; | pqi_scsi_dev_t *device = NULL; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
if (pqisrc_ctrl_offline(softs)) | |||||
return; | |||||
cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ? | cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ? | ||||
(uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes; | (uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes; | ||||
if(cdb[0] == INQUIRY && | if(cdb[0] == INQUIRY && | ||||
(cdb[1] & SI_EVPD) == 0 && | (cdb[1] & SI_EVPD) == 0 && | ||||
(csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN && | (csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN && | ||||
csio->dxfer_len >= SHORT_INQUIRY_LENGTH) { | csio->dxfer_len >= SHORT_INQUIRY_LENGTH) { | ||||
inq = (struct scsi_inquiry_data *)csio->data_ptr; | inq = (struct scsi_inquiry_data *)csio->data_ptr; | ||||
device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun]; | device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun]; | ||||
Done Inline ActionsI was going to mention strlcpy() again, but IIRC strings in SCSI INQUIRY are not required to be NULL-terminated, so strncpy() should be just fine there. mav: I was going to mention strlcpy() again, but IIRC strings in SCSI INQUIRY are not required to be… | |||||
/* Let the disks be probed and dealt with via CAM. Only for LD | /* Let the disks be probed and dealt with via CAM. Only for LD | ||||
let it fall through and inquiry be tweaked */ | let it fall through and inquiry be tweaked */ | ||||
if( !device || !pqisrc_is_logical_device(device) || | if (!device || !pqisrc_is_logical_device(device) || | ||||
(device->devtype != DISK_DEVICE) || | (device->devtype != DISK_DEVICE) || | ||||
pqisrc_is_external_raid_device(device)) { | pqisrc_is_external_raid_device(device)) { | ||||
return; | return; | ||||
} | } | ||||
strncpy(inq->vendor, "MSCC", | strncpy(inq->vendor, device->vendor, | ||||
SID_VENDOR_SIZE); | SID_VENDOR_SIZE-1); | ||||
inq->vendor[sizeof(inq->vendor)-1] = '\0'; | |||||
strncpy(inq->product, | strncpy(inq->product, | ||||
pqisrc_raidlevel_to_string(device->raid_level), | pqisrc_raidlevel_to_string(device->raid_level), | ||||
SID_PRODUCT_SIZE); | SID_PRODUCT_SIZE-1); | ||||
inq->product[sizeof(inq->product)-1] = '\0'; | |||||
strncpy(inq->revision, device->volume_offline?"OFF":"OK", | strncpy(inq->revision, device->volume_offline?"OFF":"OK", | ||||
SID_REVISION_SIZE); | SID_REVISION_SIZE-1); | ||||
inq->revision[sizeof(inq->revision)-1] = '\0'; | |||||
} | } | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
} | } | ||||
static void | |||||
pqi_complete_scsi_io(struct ccb_scsiio *csio, rcb_t *rcb) | |||||
{ | |||||
uint32_t release_tag; | |||||
pqisrc_softstate_t *softs = rcb->softs; | |||||
DBG_IO("IN scsi io = %p\n", csio); | |||||
pqi_synch_request(rcb); | |||||
smartpqi_fix_ld_inquiry(rcb->softs, csio); | |||||
pqi_release_camq(rcb); | |||||
release_tag = rcb->tag; | |||||
os_reset_rcb(rcb); | |||||
pqisrc_put_tag(&softs->taglist, release_tag); | |||||
xpt_done((union ccb *)csio); | |||||
DBG_FUNC("OUT\n"); | |||||
} | |||||
/* | /* | ||||
* Handle completion of a command - pass results back through the CCB | * Handle completion of a command - pass results back through the CCB | ||||
*/ | */ | ||||
void | void | ||||
os_io_response_success(rcb_t *rcb) | os_io_response_success(rcb_t *rcb) | ||||
{ | { | ||||
struct ccb_scsiio *csio; | struct ccb_scsiio *csio; | ||||
DBG_IO("IN rcb = %p\n", rcb); | DBG_IO("IN rcb = %p\n", rcb); | ||||
if (rcb == NULL) | if (rcb == NULL) | ||||
panic("rcb is null"); | panic("rcb is null"); | ||||
csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; | csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; | ||||
if (csio == NULL) | if (csio == NULL) | ||||
panic("csio is null"); | panic("csio is null"); | ||||
rcb->status = REQUEST_SUCCESS; | rcb->status = REQUEST_SUCCESS; | ||||
csio->ccb_h.status = CAM_REQ_CMP; | csio->ccb_h.status = CAM_REQ_CMP; | ||||
smartpqi_fix_ld_inquiry(rcb->softs, csio); | pqi_complete_scsi_io(csio, rcb); | ||||
pqi_release_camq(rcb); | |||||
pqi_unmap_request(rcb); | |||||
xpt_done((union ccb *)csio); | |||||
DBG_IO("OUT\n"); | DBG_IO("OUT\n"); | ||||
} | } | ||||
static void | |||||
copy_sense_data_to_csio(struct ccb_scsiio *csio, | |||||
uint8_t *sense_data, uint16_t sense_data_len) | |||||
{ | |||||
DBG_IO("IN csio = %p\n", csio); | |||||
memset(&csio->sense_data, 0, csio->sense_len); | |||||
sense_data_len = (sense_data_len > csio->sense_len) ? | |||||
csio->sense_len : sense_data_len; | |||||
if (sense_data) | |||||
memcpy(&csio->sense_data, sense_data, sense_data_len); | |||||
if (csio->sense_len > sense_data_len) | |||||
csio->sense_resid = csio->sense_len - sense_data_len; | |||||
else | |||||
csio->sense_resid = 0; | |||||
DBG_IO("OUT\n"); | |||||
} | |||||
/* | /* | ||||
* Error response handling for raid IO | * Error response handling for raid IO | ||||
*/ | */ | ||||
void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info) | void | ||||
os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info) | |||||
{ | { | ||||
struct ccb_scsiio *csio; | struct ccb_scsiio *csio; | ||||
pqisrc_softstate_t *softs; | pqisrc_softstate_t *softs; | ||||
DBG_IO("IN\n"); | DBG_IO("IN\n"); | ||||
csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; | csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; | ||||
if (csio == NULL) | if (csio == NULL) | ||||
panic("csio is null"); | panic("csio is null"); | ||||
softs = rcb->softs; | softs = rcb->softs; | ||||
ASSERT(err_info != NULL); | |||||
csio->scsi_status = err_info->status; | |||||
csio->ccb_h.status = CAM_REQ_CMP_ERR; | csio->ccb_h.status = CAM_REQ_CMP_ERR; | ||||
if (!err_info || !rcb->dvp) { | |||||
DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n", | |||||
err_info, rcb->dvp); | |||||
goto error_out; | |||||
} | |||||
csio->scsi_status = err_info->status; | |||||
if (csio->ccb_h.func_code == XPT_SCSI_IO) { | if (csio->ccb_h.func_code == XPT_SCSI_IO) { | ||||
/* | /* | ||||
* Handle specific SCSI status values. | * Handle specific SCSI status values. | ||||
*/ | */ | ||||
switch(csio->scsi_status) { | switch(csio->scsi_status) { | ||||
case PQI_RAID_STATUS_QUEUE_FULL: | case PQI_RAID_STATUS_QUEUE_FULL: | ||||
csio->ccb_h.status = CAM_REQ_CMP; | csio->ccb_h.status = CAM_REQ_CMP; | ||||
DBG_ERR("Queue Full error"); | DBG_ERR("Queue Full error\n"); | ||||
break; | break; | ||||
/* check condition, sense data included */ | /* check condition, sense data included */ | ||||
case PQI_RAID_STATUS_CHECK_CONDITION: | case PQI_RAID_STATUS_CHECK_CONDITION: | ||||
{ | { | ||||
uint16_t sense_data_len = | uint16_t sense_data_len = | ||||
LE_16(err_info->sense_data_len); | LE_16(err_info->sense_data_len); | ||||
uint8_t *sense_data = NULL; | uint8_t *sense_data = NULL; | ||||
if (sense_data_len) | if (sense_data_len) | ||||
sense_data = err_info->data; | sense_data = err_info->data; | ||||
memset(&csio->sense_data, 0, csio->sense_len); | copy_sense_data_to_csio(csio, sense_data, sense_data_len); | ||||
sense_data_len = (sense_data_len > | |||||
csio->sense_len) ? | |||||
csio->sense_len : | |||||
sense_data_len; | |||||
if (sense_data) | |||||
memcpy(&csio->sense_data, sense_data, | |||||
sense_data_len); | |||||
if (csio->sense_len > sense_data_len) | |||||
csio->sense_resid = csio->sense_len | |||||
- sense_data_len; | |||||
else | |||||
csio->sense_resid = 0; | |||||
csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | ||||
| CAM_AUTOSNS_VALID | | CAM_AUTOSNS_VALID | ||||
| CAM_REQ_CMP_ERR; | | CAM_REQ_CMP_ERR; | ||||
} | } | ||||
break; | break; | ||||
case PQI_RAID_DATA_IN_OUT_UNDERFLOW: | case PQI_RAID_DATA_IN_OUT_UNDERFLOW: | ||||
{ | { | ||||
uint32_t resid = 0; | uint32_t resid = 0; | ||||
resid = rcb->bcount-err_info->data_out_transferred; | resid = rcb->bcount-err_info->data_out_transferred; | ||||
csio->resid = resid; | csio->resid = resid; | ||||
csio->ccb_h.status = CAM_REQ_CMP; | csio->ccb_h.status = CAM_REQ_CMP; | ||||
break; | |||||
} | } | ||||
break; | |||||
default: | default: | ||||
csio->ccb_h.status = CAM_REQ_CMP; | csio->ccb_h.status = CAM_REQ_CMP; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) { | error_out: | ||||
softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY; | pqi_complete_scsi_io(csio, rcb); | ||||
if (csio->ccb_h.status & CAM_RELEASE_SIMQ) | |||||
xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0); | |||||
else | |||||
csio->ccb_h.status |= CAM_RELEASE_SIMQ; | |||||
} | |||||
pqi_unmap_request(rcb); | |||||
xpt_done((union ccb *)csio); | |||||
DBG_IO("OUT\n"); | DBG_IO("OUT\n"); | ||||
} | } | ||||
/* | /* | ||||
* Error response handling for aio. | * Error response handling for aio. | ||||
*/ | */ | ||||
void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info) | void | ||||
os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info) | |||||
{ | { | ||||
struct ccb_scsiio *csio; | struct ccb_scsiio *csio; | ||||
pqisrc_softstate_t *softs; | pqisrc_softstate_t *softs; | ||||
DBG_IO("IN\n"); | DBG_IO("IN\n"); | ||||
if (rcb == NULL) | if (rcb == NULL) | ||||
panic("rcb is null"); | panic("rcb is null"); | ||||
rcb->status = REQUEST_SUCCESS; | rcb->status = REQUEST_SUCCESS; | ||||
csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; | csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; | ||||
if (csio == NULL) | if (csio == NULL) | ||||
panic("csio is null"); | panic("csio is null"); | ||||
softs = rcb->softs; | softs = rcb->softs; | ||||
if (!err_info || !rcb->dvp) { | |||||
csio->ccb_h.status = CAM_REQ_CMP_ERR; | |||||
DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n", | |||||
err_info, rcb->dvp); | |||||
goto error_out; | |||||
} | |||||
switch (err_info->service_resp) { | switch (err_info->service_resp) { | ||||
case PQI_AIO_SERV_RESPONSE_COMPLETE: | case PQI_AIO_SERV_RESPONSE_COMPLETE: | ||||
csio->ccb_h.status = err_info->status; | csio->ccb_h.status = err_info->status; | ||||
break; | break; | ||||
case PQI_AIO_SERV_RESPONSE_FAILURE: | case PQI_AIO_SERV_RESPONSE_FAILURE: | ||||
switch(err_info->status) { | switch(err_info->status) { | ||||
case PQI_AIO_STATUS_IO_ABORTED: | case PQI_AIO_STATUS_IO_ABORTED: | ||||
csio->ccb_h.status = CAM_REQ_ABORTED; | csio->ccb_h.status = CAM_REQ_ABORTED; | ||||
DBG_WARN_BTL(rcb->dvp, "IO aborted\n"); | DBG_WARN_BTL(rcb->dvp, "IO aborted\n"); | ||||
break; | break; | ||||
case PQI_AIO_STATUS_UNDERRUN: | case PQI_AIO_STATUS_UNDERRUN: | ||||
csio->ccb_h.status = CAM_REQ_CMP; | csio->ccb_h.status = CAM_REQ_CMP; | ||||
csio->resid = | csio->resid = | ||||
LE_32(err_info->resd_count); | LE_32(err_info->resd_count); | ||||
break; | break; | ||||
case PQI_AIO_STATUS_OVERRUN: | case PQI_AIO_STATUS_OVERRUN: | ||||
csio->ccb_h.status = CAM_REQ_CMP; | csio->ccb_h.status = CAM_REQ_CMP; | ||||
break; | break; | ||||
case PQI_AIO_STATUS_AIO_PATH_DISABLED: | case PQI_AIO_STATUS_AIO_PATH_DISABLED: | ||||
DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n"); | DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n"); | ||||
/* Timed out TMF response comes here */ | |||||
if (rcb->tm_req) { | |||||
rcb->req_pending = false; | |||||
rcb->status = REQUEST_SUCCESS; | |||||
DBG_ERR("AIO Disabled for TMF\n"); | |||||
return; | |||||
} | |||||
rcb->dvp->aio_enabled = false; | |||||
rcb->dvp->offload_enabled = false; | rcb->dvp->offload_enabled = false; | ||||
csio->ccb_h.status |= CAM_REQUEUE_REQ; | csio->ccb_h.status |= CAM_REQUEUE_REQ; | ||||
break; | break; | ||||
case PQI_AIO_STATUS_IO_ERROR: | case PQI_AIO_STATUS_IO_ERROR: | ||||
case PQI_AIO_STATUS_IO_NO_DEVICE: | case PQI_AIO_STATUS_IO_NO_DEVICE: | ||||
case PQI_AIO_STATUS_INVALID_DEVICE: | case PQI_AIO_STATUS_INVALID_DEVICE: | ||||
default: | default: | ||||
DBG_WARN_BTL(rcb->dvp,"IO Error/Invalid/No device\n"); | DBG_WARN_BTL(rcb->dvp,"IO Error/Invalid/No device\n"); | ||||
csio->ccb_h.status |= | csio->ccb_h.status |= | ||||
CAM_SCSI_STATUS_ERROR; | CAM_SCSI_STATUS_ERROR; | ||||
break; | break; | ||||
} | } | ||||
break; | break; | ||||
case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: | case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: | ||||
case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: | case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: | ||||
csio->ccb_h.status = CAM_REQ_CMP; | DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n", | ||||
break; | (err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_COMPLETE) ? "COMPLETE" : "SUCCEEDED"); | ||||
rcb->status = REQUEST_SUCCESS; | |||||
rcb->req_pending = false; | |||||
return; | |||||
case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: | case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: | ||||
case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: | case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: | ||||
DBG_WARN_BTL(rcb->dvp,"TMF rejected/Incorrect Lun\n"); | DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n", | ||||
csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; | (err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_REJECTED) ? "REJECTED" : "INCORRECT LUN"); | ||||
break; | rcb->status = REQUEST_FAILED; | ||||
rcb->req_pending = false; | |||||
return; | |||||
default: | default: | ||||
DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n"); | DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n"); | ||||
csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; | csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; | ||||
break; | break; | ||||
} | } | ||||
if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) { | if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) { | ||||
Done Inline ActionsI suppose this is your local style and consistency within a file is good, but for note this change contradicts FreeBSD style(9). mav: I suppose this is your local style and consistency within a file is good, but for note this… | |||||
csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION; | csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION; | ||||
uint8_t *sense_data = NULL; | uint8_t *sense_data = NULL; | ||||
unsigned sense_data_len = LE_16(err_info->data_len); | unsigned sense_data_len = LE_16(err_info->data_len); | ||||
if (sense_data_len) | if (sense_data_len) | ||||
sense_data = err_info->data; | sense_data = err_info->data; | ||||
DBG_ERR_BTL(rcb->dvp, "SCSI_STATUS_CHECK_COND sense size %u\n", | DBG_ERR_BTL(rcb->dvp, "SCSI_STATUS_CHECK_COND sense size %u\n", | ||||
sense_data_len); | sense_data_len); | ||||
memset(&csio->sense_data, 0, csio->sense_len); | copy_sense_data_to_csio(csio, sense_data, sense_data_len); | ||||
if (sense_data) | |||||
memcpy(&csio->sense_data, sense_data, ((sense_data_len > | |||||
csio->sense_len) ? csio->sense_len : sense_data_len)); | |||||
if (csio->sense_len > sense_data_len) | |||||
csio->sense_resid = csio->sense_len - sense_data_len; | |||||
else | |||||
csio->sense_resid = 0; | |||||
csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; | csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; | ||||
} | } | ||||
smartpqi_fix_ld_inquiry(softs, csio); | error_out: | ||||
pqi_release_camq(rcb); | pqi_complete_scsi_io(csio, rcb); | ||||
pqi_unmap_request(rcb); | |||||
xpt_done((union ccb *)csio); | |||||
DBG_IO("OUT\n"); | DBG_IO("OUT\n"); | ||||
} | } | ||||
static void | static void | ||||
pqi_freeze_ccb(union ccb *ccb) | pqi_freeze_ccb(union ccb *ccb) | ||||
Done Inline ActionsIf you are fixing style, there should be a space after "if". mav: If you are fixing style, there should be a space after "if". | |||||
{ | { | ||||
if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { | if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { | ||||
ccb->ccb_h.status |= CAM_DEV_QFRZN; | ccb->ccb_h.status |= CAM_DEV_QFRZN; | ||||
xpt_freeze_devq(ccb->ccb_h.path, 1); | xpt_freeze_devq(ccb->ccb_h.path, 1); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Command-mapping helper function - populate this command's s/g table. | * Command-mapping helper function - populate this command's s/g table. | ||||
*/ | */ | ||||
static void | static void | ||||
pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) | pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) | ||||
{ | { | ||||
pqisrc_softstate_t *softs; | rcb_t *rcb = (rcb_t *)arg; | ||||
rcb_t *rcb; | pqisrc_softstate_t *softs = rcb->softs; | ||||
union ccb *ccb; | |||||
rcb = (rcb_t *)arg; | if (error || nseg > softs->pqi_cap.max_sg_elem) { | ||||
softs = rcb->softs; | |||||
if( error || nseg > softs->pqi_cap.max_sg_elem ) | |||||
{ | |||||
rcb->cm_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; | |||||
pqi_freeze_ccb(rcb->cm_ccb); | |||||
DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n", | DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n", | ||||
error, nseg, softs->pqi_cap.max_sg_elem); | error, nseg, softs->pqi_cap.max_sg_elem); | ||||
pqi_unmap_request(rcb); | goto error_io; | ||||
xpt_done((union ccb *)rcb->cm_ccb); | |||||
return; | |||||
} | } | ||||
rcb->sgt = os_mem_alloc(softs, nseg * sizeof(rcb_t)); | rcb->sgt = os_mem_alloc(softs, nseg * sizeof(sgt_t)); | ||||
if (rcb->sgt == NULL) { | |||||
rcb->cm_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; | if (!rcb->sgt) { | ||||
pqi_freeze_ccb(rcb->cm_ccb); | |||||
DBG_ERR_BTL(rcb->dvp, "os_mem_alloc() failed; nseg = %d\n", nseg); | DBG_ERR_BTL(rcb->dvp, "os_mem_alloc() failed; nseg = %d\n", nseg); | ||||
pqi_unmap_request(rcb); | goto error_io; | ||||
xpt_done((union ccb *)rcb->cm_ccb); | |||||
return; | |||||
} | } | ||||
rcb->nseg = nseg; | rcb->nseg = nseg; | ||||
for (int i = 0; i < nseg; i++) { | for (int i = 0; i < nseg; i++) { | ||||
rcb->sgt[i].addr = segs[i].ds_addr; | rcb->sgt[i].addr = segs[i].ds_addr; | ||||
rcb->sgt[i].len = segs[i].ds_len; | rcb->sgt[i].len = segs[i].ds_len; | ||||
rcb->sgt[i].flags = 0; | rcb->sgt[i].flags = 0; | ||||
} | } | ||||
if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE) | if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE) | ||||
bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, | bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, | ||||
rcb->cm_datamap, BUS_DMASYNC_PREREAD); | rcb->cm_datamap, BUS_DMASYNC_PREREAD); | ||||
if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE) | if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE) | ||||
bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, | bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, | ||||
rcb->cm_datamap, BUS_DMASYNC_PREWRITE); | rcb->cm_datamap, BUS_DMASYNC_PREWRITE); | ||||
/* Call IO functions depending on pd or ld */ | /* Call IO functions depending on pd or ld */ | ||||
rcb->status = REQUEST_PENDING; | rcb->status = REQUEST_PENDING; | ||||
error = pqisrc_build_send_io(softs, rcb); | error = pqisrc_build_send_io(softs, rcb); | ||||
if (error) { | if (error) { | ||||
rcb->req_pending = false; | rcb->req_pending = false; | ||||
rcb->cm_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; | |||||
pqi_freeze_ccb(rcb->cm_ccb); | |||||
DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error); | DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error); | ||||
} else { | |||||
/* Successfully IO was submitted to the device. */ | |||||
return; | |||||
} | |||||
error_io: | |||||
ccb = rcb->cm_ccb; | |||||
ccb->ccb_h.status = CAM_RESRC_UNAVAIL; | |||||
pqi_freeze_ccb(ccb); | |||||
pqi_unmap_request(rcb); | pqi_unmap_request(rcb); | ||||
xpt_done((union ccb *)rcb->cm_ccb); | xpt_done(ccb); | ||||
return; | return; | ||||
} | } | ||||
} | |||||
/* | /* | ||||
* Function to dma-map the request buffer | * Function to dma-map the request buffer | ||||
*/ | */ | ||||
static int pqi_map_request( rcb_t *rcb ) | static int | ||||
pqi_map_request(rcb_t *rcb) | |||||
{ | { | ||||
pqisrc_softstate_t *softs = rcb->softs; | pqisrc_softstate_t *softs = rcb->softs; | ||||
int error = PQI_STATUS_SUCCESS; | int bsd_status = BSD_SUCCESS; | ||||
union ccb *ccb = rcb->cm_ccb; | union ccb *ccb = rcb->cm_ccb; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
/* check that mapping is necessary */ | /* check that mapping is necessary */ | ||||
if (rcb->cm_flags & PQI_CMD_MAPPED) | if (rcb->cm_flags & PQI_CMD_MAPPED) | ||||
return(0); | return BSD_SUCCESS; | ||||
rcb->cm_flags |= PQI_CMD_MAPPED; | rcb->cm_flags |= PQI_CMD_MAPPED; | ||||
if (rcb->bcount) { | if (rcb->bcount) { | ||||
error = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat, | bsd_status = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat, | ||||
rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0); | rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0); | ||||
if (error != 0){ | if (bsd_status != BSD_SUCCESS && bsd_status != EINPROGRESS) { | ||||
DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed = %d count = %d\n", | DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed, return status = %d transfer length = %d\n", | ||||
error, rcb->bcount); | bsd_status, rcb->bcount); | ||||
return error; | return bsd_status; | ||||
} | } | ||||
} else { | } else { | ||||
/* | /* | ||||
* Set up the command to go to the controller. If there are no | * Set up the command to go to the controller. If there are no | ||||
* data buffers associated with the command then it can bypass | * data buffers associated with the command then it can bypass | ||||
* busdma. | * busdma. | ||||
*/ | */ | ||||
/* Call IO functions depending on pd or ld */ | /* Call IO functions depending on pd or ld */ | ||||
rcb->status = REQUEST_PENDING; | rcb->status = REQUEST_PENDING; | ||||
error = pqisrc_build_send_io(softs, rcb); | if (pqisrc_build_send_io(softs, rcb) != PQI_STATUS_SUCCESS) { | ||||
bsd_status = EIO; | |||||
} | } | ||||
} | |||||
DBG_FUNC("OUT error = %d\n", error); | DBG_FUNC("OUT error = %d\n", bsd_status); | ||||
return error; | return bsd_status; | ||||
} | } | ||||
/* | /* | ||||
* Function to clear the request control block | * Function to clear the request control block | ||||
*/ | */ | ||||
void os_reset_rcb( rcb_t *rcb ) | void | ||||
os_reset_rcb(rcb_t *rcb) | |||||
{ | { | ||||
rcb->error_info = NULL; | rcb->error_info = NULL; | ||||
rcb->req = NULL; | rcb->req = NULL; | ||||
rcb->status = -1; | rcb->status = -1; | ||||
rcb->tag = INVALID_ELEM; | rcb->tag = INVALID_ELEM; | ||||
rcb->dvp = NULL; | rcb->dvp = NULL; | ||||
rcb->cdbp = NULL; | rcb->cdbp = NULL; | ||||
rcb->softs = NULL; | rcb->softs = NULL; | ||||
rcb->cm_flags = 0; | rcb->cm_flags = 0; | ||||
rcb->cm_data = NULL; | rcb->cm_data = NULL; | ||||
rcb->bcount = 0; | rcb->bcount = 0; | ||||
rcb->nseg = 0; | rcb->nseg = 0; | ||||
rcb->sgt = NULL; | rcb->sgt = NULL; | ||||
rcb->cm_ccb = NULL; | rcb->cm_ccb = NULL; | ||||
rcb->encrypt_enable = false; | rcb->encrypt_enable = false; | ||||
rcb->ioaccel_handle = 0; | rcb->ioaccel_handle = 0; | ||||
rcb->resp_qid = 0; | rcb->resp_qid = 0; | ||||
rcb->req_pending = false; | rcb->req_pending = false; | ||||
rcb->tm_req = false; | |||||
} | } | ||||
/* | /* | ||||
* Callback function for the lun rescan | * Callback function for the lun rescan | ||||
*/ | */ | ||||
static void smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb) | static void | ||||
smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb) | |||||
{ | { | ||||
xpt_free_path(ccb->ccb_h.path); | xpt_free_path(ccb->ccb_h.path); | ||||
xpt_free_ccb(ccb); | xpt_free_ccb(ccb); | ||||
} | } | ||||
/* | /* | ||||
* Function to rescan the lun | * Function to rescan the lun | ||||
*/ | */ | ||||
static void smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target, | static void | ||||
smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target, | |||||
int lun) | int lun) | ||||
{ | { | ||||
union ccb *ccb = NULL; | union ccb *ccb = NULL; | ||||
cam_status status = 0; | cam_status status = 0; | ||||
struct cam_path *path = NULL; | struct cam_path *path = NULL; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
ccb = xpt_alloc_ccb_nowait(); | ccb = xpt_alloc_ccb_nowait(); | ||||
if (ccb == NULL) { | |||||
DBG_ERR("Unable to alloc ccb for lun rescan\n"); | |||||
return; | |||||
} | |||||
status = xpt_create_path(&path, NULL, | status = xpt_create_path(&path, NULL, | ||||
cam_sim_path(softs->os_specific.sim), target, lun); | cam_sim_path(softs->os_specific.sim), target, lun); | ||||
if (status != CAM_REQ_CMP) { | if (status != CAM_REQ_CMP) { | ||||
DBG_ERR("xpt_create_path status(%d) != CAM_REQ_CMP \n", | DBG_ERR("xpt_create_path status(%d) != CAM_REQ_CMP \n", | ||||
status); | status); | ||||
xpt_free_ccb(ccb); | xpt_free_ccb(ccb); | ||||
return; | return; | ||||
} | } | ||||
bzero(ccb, sizeof(union ccb)); | bzero(ccb, sizeof(union ccb)); | ||||
xpt_setup_ccb(&ccb->ccb_h, path, 5); | xpt_setup_ccb(&ccb->ccb_h, path, 5); | ||||
ccb->ccb_h.func_code = XPT_SCAN_LUN; | ccb->ccb_h.func_code = XPT_SCAN_LUN; | ||||
ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb; | ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb; | ||||
ccb->crcn.flags = CAM_FLAG_NONE; | ccb->crcn.flags = CAM_FLAG_NONE; | ||||
xpt_action(ccb); | xpt_action(ccb); | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
} | } | ||||
/* | /* | ||||
* Function to rescan the lun under each target | * Function to rescan the lun under each target | ||||
*/ | */ | ||||
void smartpqi_target_rescan(struct pqisrc_softstate *softs) | void | ||||
smartpqi_target_rescan(struct pqisrc_softstate *softs) | |||||
{ | { | ||||
int target = 0, lun = 0; | int target = 0, lun = 0; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
for(target = 0; target < PQI_MAX_DEVICES; target++){ | for(target = 0; target < PQI_MAX_DEVICES; target++){ | ||||
for(lun = 0; lun < PQI_MAX_MULTILUN; lun++){ | for(lun = 0; lun < PQI_MAX_MULTILUN; lun++){ | ||||
if(softs->device_list[target][lun]){ | if(softs->device_list[target][lun]){ | ||||
smartpqi_lun_rescan(softs, target, lun); | smartpqi_lun_rescan(softs, target, lun); | ||||
} | } | ||||
} | } | ||||
} | } | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
} | } | ||||
/* | /* | ||||
* Set the mode of tagged command queueing for the current task. | * Set the mode of tagged command queueing for the current task. | ||||
*/ | */ | ||||
uint8_t os_get_task_attr(rcb_t *rcb) | uint8_t | ||||
os_get_task_attr(rcb_t *rcb) | |||||
{ | { | ||||
union ccb *ccb = rcb->cm_ccb; | union ccb *ccb = rcb->cm_ccb; | ||||
uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE; | uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE; | ||||
switch(ccb->csio.tag_action) { | switch(ccb->csio.tag_action) { | ||||
case MSG_HEAD_OF_Q_TAG: | case MSG_HEAD_OF_Q_TAG: | ||||
tag_action = SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE; | tag_action = SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE; | ||||
break; | break; | ||||
case MSG_ORDERED_Q_TAG: | case MSG_ORDERED_Q_TAG: | ||||
tag_action = SOP_TASK_ATTRIBUTE_ORDERED; | tag_action = SOP_TASK_ATTRIBUTE_ORDERED; | ||||
break; | break; | ||||
case MSG_SIMPLE_Q_TAG: | case MSG_SIMPLE_Q_TAG: | ||||
default: | default: | ||||
tag_action = SOP_TASK_ATTRIBUTE_SIMPLE; | tag_action = SOP_TASK_ATTRIBUTE_SIMPLE; | ||||
break; | break; | ||||
} | } | ||||
return tag_action; | return tag_action; | ||||
} | } | ||||
/* | /* | ||||
* Complete all outstanding commands | * Complete all outstanding commands | ||||
*/ | */ | ||||
void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs) | void | ||||
os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs) | |||||
{ | { | ||||
int tag = 0; | int tag = 0; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
for (tag = 1; tag < softs->max_outstanding_io; tag++) { | for (tag = 1; tag <= softs->max_outstanding_io; tag++) { | ||||
rcb_t *prcb = &softs->rcb[tag]; | rcb_t *prcb = &softs->rcb[tag]; | ||||
if(prcb->req_pending && prcb->cm_ccb ) { | if(prcb->req_pending && prcb->cm_ccb ) { | ||||
prcb->req_pending = false; | prcb->req_pending = false; | ||||
prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP; | prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP; | ||||
xpt_done((union ccb *)prcb->cm_ccb); | pqisrc_decrement_device_active_io(softs, prcb); | ||||
prcb->cm_ccb = NULL; | pqi_complete_scsi_io(&prcb->cm_ccb->csio, prcb); | ||||
} | } | ||||
} | } | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
} | } | ||||
/* | /* | ||||
* IO handling functionality entry point | * IO handling functionality entry point | ||||
*/ | */ | ||||
static int pqisrc_io_start(struct cam_sim *sim, union ccb *ccb) | static int | ||||
pqisrc_io_start(struct cam_sim *sim, union ccb *ccb) | |||||
{ | { | ||||
rcb_t *rcb; | rcb_t *rcb; | ||||
uint32_t tag, no_transfer = 0; | uint32_t tag, no_transfer = 0; | ||||
pqisrc_softstate_t *softs = (struct pqisrc_softstate *) | pqisrc_softstate_t *softs = (struct pqisrc_softstate *) | ||||
cam_sim_softc(sim); | cam_sim_softc(sim); | ||||
int32_t error = PQI_STATUS_FAILURE; | int32_t error; | ||||
pqi_scsi_dev_t *dvp; | pqi_scsi_dev_t *dvp; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
if( softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL ) { | if (softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL) { | ||||
ccb->ccb_h.status = CAM_DEV_NOT_THERE; | ccb->ccb_h.status = CAM_DEV_NOT_THERE; | ||||
DBG_INFO("Device = %d not there\n", ccb->ccb_h.target_id); | DBG_INFO("Device = %d not there\n", ccb->ccb_h.target_id); | ||||
return PQI_STATUS_FAILURE; | return ENXIO; | ||||
} | } | ||||
dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; | dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; | ||||
/* Check controller state */ | /* Check controller state */ | ||||
if (IN_PQI_RESET(softs)) { | if (IN_PQI_RESET(softs)) { | ||||
ccb->ccb_h.status = CAM_SCSI_BUS_RESET | ccb->ccb_h.status = CAM_SCSI_BUS_RESET | ||||
| CAM_BUSY | CAM_REQ_INPROG; | | CAM_BUSY | CAM_REQ_INPROG; | ||||
DBG_WARN("Device = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id); | DBG_WARN("Device = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id); | ||||
return error; | return ENXIO; | ||||
} | } | ||||
/* Check device state */ | /* Check device state */ | ||||
if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) { | if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) { | ||||
ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP; | ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP; | ||||
DBG_WARN("Device = %d GONE/OFFLINE\n", ccb->ccb_h.target_id); | DBG_WARN("Device = %d GONE/OFFLINE\n", ccb->ccb_h.target_id); | ||||
return error; | return ENXIO; | ||||
} | } | ||||
/* Check device reset */ | /* Check device reset */ | ||||
if (dvp->reset_in_progress) { | if (DEVICE_RESET(dvp)) { | ||||
ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY; | ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY; | ||||
DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id); | DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id); | ||||
return error; | return EBUSY; | ||||
} | } | ||||
if (dvp->expose_device == false) { | if (dvp->expose_device == false) { | ||||
ccb->ccb_h.status = CAM_DEV_NOT_THERE; | ccb->ccb_h.status = CAM_DEV_NOT_THERE; | ||||
DBG_INFO("Device = %d not exposed\n", ccb->ccb_h.target_id); | DBG_INFO("Device = %d not exposed\n", ccb->ccb_h.target_id); | ||||
return error; | return ENXIO; | ||||
} | } | ||||
tag = pqisrc_get_tag(&softs->taglist); | tag = pqisrc_get_tag(&softs->taglist); | ||||
if( tag == INVALID_ELEM ) { | if (tag == INVALID_ELEM) { | ||||
DBG_ERR("Get Tag failed\n"); | DBG_ERR("Get Tag failed\n"); | ||||
xpt_freeze_simq(softs->os_specific.sim, 1); | xpt_freeze_simq(softs->os_specific.sim, 1); | ||||
softs->os_specific.pqi_flags |= PQI_FLAG_BUSY; | softs->os_specific.pqi_flags |= PQI_FLAG_BUSY; | ||||
ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ); | ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ); | ||||
return PQI_STATUS_FAILURE; | return EIO; | ||||
} | } | ||||
DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist); | DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist); | ||||
rcb = &softs->rcb[tag]; | rcb = &softs->rcb[tag]; | ||||
os_reset_rcb( rcb ); | os_reset_rcb(rcb); | ||||
rcb->tag = tag; | rcb->tag = tag; | ||||
rcb->softs = softs; | rcb->softs = softs; | ||||
rcb->cmdlen = ccb->csio.cdb_len; | rcb->cmdlen = ccb->csio.cdb_len; | ||||
ccb->ccb_h.sim_priv.entries[0].ptr = rcb; | ccb->ccb_h.sim_priv.entries[0].ptr = rcb; | ||||
switch (ccb->ccb_h.flags & CAM_DIR_MASK) { | switch (ccb->ccb_h.flags & CAM_DIR_MASK) { | ||||
case CAM_DIR_IN: | case CAM_DIR_IN: | ||||
rcb->data_dir = SOP_DATA_DIR_FROM_DEVICE; | rcb->data_dir = SOP_DATA_DIR_FROM_DEVICE; | ||||
Show All 20 Lines | pqisrc_io_start(struct cam_sim *sim, union ccb *ccb) | ||||
} | } | ||||
/* | /* | ||||
* Submit the request to the adapter. | * Submit the request to the adapter. | ||||
* | * | ||||
* Note that this may fail if we're unable to map the request (and | * Note that this may fail if we're unable to map the request (and | ||||
* if we ever learn a transport layer other than simple, may fail | * if we ever learn a transport layer other than simple, may fail | ||||
* if the adapter rejects the command). | * if the adapter rejects the command). | ||||
*/ | */ | ||||
if ((error = pqi_map_request(rcb)) != 0) { | if ((error = pqi_map_request(rcb)) != BSD_SUCCESS) { | ||||
rcb->req_pending = false; | |||||
xpt_freeze_simq(softs->os_specific.sim, 1); | xpt_freeze_simq(softs->os_specific.sim, 1); | ||||
ccb->ccb_h.status |= CAM_RELEASE_SIMQ; | |||||
if (error == EINPROGRESS) { | if (error == EINPROGRESS) { | ||||
DBG_WARN("In Progress on %d\n", ccb->ccb_h.target_id); | /* Release simq in the completion */ | ||||
error = 0; | softs->os_specific.pqi_flags |= PQI_FLAG_BUSY; | ||||
error = BSD_SUCCESS; | |||||
} else { | } else { | ||||
ccb->ccb_h.status |= CAM_REQUEUE_REQ; | rcb->req_pending = false; | ||||
ccb->ccb_h.status |= CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; | |||||
DBG_WARN("Requeue req error = %d target = %d\n", error, | DBG_WARN("Requeue req error = %d target = %d\n", error, | ||||
ccb->ccb_h.target_id); | ccb->ccb_h.target_id); | ||||
pqi_unmap_request(rcb); | pqi_unmap_request(rcb); | ||||
error = EIO; | |||||
} | } | ||||
} | } | ||||
DBG_FUNC("OUT error = %d\n", error); | DBG_FUNC("OUT error = %d\n", error); | ||||
return error; | return error; | ||||
} | } | ||||
static inline int | |||||
pqi_tmf_status_to_bsd_tmf_status(int pqi_status, rcb_t *rcb) | |||||
{ | |||||
if (PQI_STATUS_SUCCESS == pqi_status && | |||||
REQUEST_SUCCESS == rcb->status) | |||||
return BSD_SUCCESS; | |||||
else | |||||
return EIO; | |||||
} | |||||
/* | /* | ||||
* Abort a task, task management functionality | * Abort a task, task management functionality | ||||
*/ | */ | ||||
static int | static int | ||||
pqisrc_scsi_abort_task(pqisrc_softstate_t *softs, union ccb *ccb) | pqisrc_scsi_abort_task(pqisrc_softstate_t *softs, union ccb *ccb) | ||||
{ | { | ||||
rcb_t *rcb = ccb->ccb_h.sim_priv.entries[0].ptr; | struct ccb_hdr *ccb_h = &ccb->ccb_h; | ||||
uint32_t abort_tag = rcb->tag; | rcb_t *rcb = NULL; | ||||
uint32_t tag = 0; | rcb_t *prcb = ccb->ccb_h.sim_priv.entries[0].ptr; | ||||
int rval = PQI_STATUS_SUCCESS; | uint32_t tag; | ||||
uint16_t qid; | int rval; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
qid = (uint16_t)rcb->resp_qid; | |||||
tag = pqisrc_get_tag(&softs->taglist); | tag = pqisrc_get_tag(&softs->taglist); | ||||
rcb = &softs->rcb[tag]; | rcb = &softs->rcb[tag]; | ||||
rcb->tag = tag; | rcb->tag = tag; | ||||
rcb->resp_qid = qid; | |||||
rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, abort_tag, | if (!rcb->dvp) { | ||||
DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code); | |||||
rval = ENXIO; | |||||
goto error_tmf; | |||||
} | |||||
rcb->tm_req = true; | |||||
rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, prcb, | |||||
SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK); | SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK); | ||||
if (PQI_STATUS_SUCCESS == rval) { | if ((rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb)) == BSD_SUCCESS) | ||||
rval = rcb->status; | |||||
if (REQUEST_SUCCESS == rval) { | |||||
ccb->ccb_h.status = CAM_REQ_ABORTED; | ccb->ccb_h.status = CAM_REQ_ABORTED; | ||||
} | |||||
} | |||||
pqisrc_put_tag(&softs->taglist, abort_tag); | |||||
pqisrc_put_tag(&softs->taglist,rcb->tag); | |||||
error_tmf: | |||||
os_reset_rcb(rcb); | |||||
pqisrc_put_tag(&softs->taglist, tag); | |||||
DBG_FUNC("OUT rval = %d\n", rval); | DBG_FUNC("OUT rval = %d\n", rval); | ||||
return rval; | return rval; | ||||
} | } | ||||
/* | /* | ||||
* Abort a taskset, task management functionality | * Abort a taskset, task management functionality | ||||
*/ | */ | ||||
static int | static int | ||||
pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb) | pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb) | ||||
{ | { | ||||
struct ccb_hdr *ccb_h = &ccb->ccb_h; | |||||
rcb_t *rcb = NULL; | rcb_t *rcb = NULL; | ||||
uint32_t tag = 0; | uint32_t tag; | ||||
int rval = PQI_STATUS_SUCCESS; | int rval; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
tag = pqisrc_get_tag(&softs->taglist); | tag = pqisrc_get_tag(&softs->taglist); | ||||
rcb = &softs->rcb[tag]; | rcb = &softs->rcb[tag]; | ||||
rcb->tag = tag; | rcb->tag = tag; | ||||
rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, 0, | if (!rcb->dvp) { | ||||
DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code); | |||||
rval = ENXIO; | |||||
goto error_tmf; | |||||
} | |||||
rcb->tm_req = true; | |||||
rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, NULL, | |||||
SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET); | SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET); | ||||
if (rval == PQI_STATUS_SUCCESS) { | rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb); | ||||
rval = rcb->status; | |||||
} | |||||
pqisrc_put_tag(&softs->taglist,rcb->tag); | error_tmf: | ||||
os_reset_rcb(rcb); | |||||
pqisrc_put_tag(&softs->taglist, tag); | |||||
DBG_FUNC("OUT rval = %d\n", rval); | DBG_FUNC("OUT rval = %d\n", rval); | ||||
return rval; | return rval; | ||||
} | } | ||||
/* | /* | ||||
* Target reset task management functionality | * Target reset task management functionality | ||||
*/ | */ | ||||
static int | static int | ||||
pqisrc_target_reset( pqisrc_softstate_t *softs, union ccb *ccb) | pqisrc_target_reset( pqisrc_softstate_t *softs, union ccb *ccb) | ||||
{ | { | ||||
struct ccb_hdr *ccb_h = &ccb->ccb_h; | |||||
pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; | pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; | ||||
rcb_t *rcb = NULL; | rcb_t *rcb = NULL; | ||||
uint32_t tag = 0; | uint32_t tag; | ||||
int rval = PQI_STATUS_SUCCESS; | int rval; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
if (devp == NULL) { | if (devp == NULL) { | ||||
DBG_ERR("bad target t%d\n", ccb->ccb_h.target_id); | DBG_ERR("bad target %d, tmf type : 0x%x\n", ccb_h->target_id, ccb_h->func_code); | ||||
return (-1); | return ENXIO; | ||||
} | } | ||||
tag = pqisrc_get_tag(&softs->taglist); | tag = pqisrc_get_tag(&softs->taglist); | ||||
rcb = &softs->rcb[tag]; | rcb = &softs->rcb[tag]; | ||||
rcb->tag = tag; | rcb->tag = tag; | ||||
devp->reset_in_progress = true; | devp->reset_in_progress = true; | ||||
rval = pqisrc_send_tmf(softs, devp, rcb, 0, | |||||
rcb->tm_req = true; | |||||
rval = pqisrc_send_tmf(softs, devp, rcb, NULL, | |||||
SOP_TASK_MANAGEMENT_LUN_RESET); | SOP_TASK_MANAGEMENT_LUN_RESET); | ||||
if (PQI_STATUS_SUCCESS == rval) { | |||||
rval = rcb->status; | rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb); | ||||
} | |||||
devp->reset_in_progress = false; | devp->reset_in_progress = false; | ||||
pqisrc_put_tag(&softs->taglist,rcb->tag); | |||||
os_reset_rcb(rcb); | |||||
pqisrc_put_tag(&softs->taglist, tag); | |||||
DBG_FUNC("OUT rval = %d\n", rval); | DBG_FUNC("OUT rval = %d\n", rval); | ||||
return ((rval == REQUEST_SUCCESS) ? | return rval; | ||||
PQI_STATUS_SUCCESS : PQI_STATUS_FAILURE); | |||||
} | } | ||||
/* | /* | ||||
* cam entry point of the smartpqi module. | * cam entry point of the smartpqi module. | ||||
*/ | */ | ||||
static void smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb) | static void | ||||
smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb) | |||||
{ | { | ||||
struct pqisrc_softstate *softs = cam_sim_softc(sim); | struct pqisrc_softstate *softs = cam_sim_softc(sim); | ||||
struct ccb_hdr *ccb_h = &ccb->ccb_h; | struct ccb_hdr *ccb_h = &ccb->ccb_h; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
switch (ccb_h->func_code) { | switch (ccb_h->func_code) { | ||||
case XPT_SCSI_IO: | case XPT_SCSI_IO: | ||||
{ | { | ||||
if(!pqisrc_io_start(sim, ccb)) { | if(!pqisrc_io_start(sim, ccb)) { | ||||
return; | return; | ||||
} | } | ||||
break; | break; | ||||
} | } | ||||
case XPT_CALC_GEOMETRY: | case XPT_CALC_GEOMETRY: | ||||
{ | { | ||||
struct ccb_calc_geometry *ccg; | struct ccb_calc_geometry *ccg; | ||||
ccg = &ccb->ccg; | ccg = &ccb->ccg; | ||||
if (ccg->block_size == 0) { | if (ccg->block_size == 0) { | ||||
ccb->ccb_h.status &= ~CAM_SIM_QUEUED; | ccb->ccb_h.status &= ~CAM_SIM_QUEUED; | ||||
ccb->ccb_h.status = CAM_REQ_INVALID; | ccb->ccb_h.status |= CAM_REQ_INVALID; | ||||
break; | break; | ||||
} | } | ||||
cam_calc_geometry(ccg, /* extended */ 1); | cam_calc_geometry(ccg, /* extended */ 1); | ||||
ccb->ccb_h.status = CAM_REQ_CMP; | ccb->ccb_h.status = CAM_REQ_CMP; | ||||
break; | break; | ||||
} | } | ||||
case XPT_PATH_INQ: | case XPT_PATH_INQ: | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb) | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
} | } | ||||
/* | /* | ||||
* Function to poll the response, when interrupts are unavailable | * Function to poll the response, when interrupts are unavailable | ||||
* This also serves supporting crash dump. | * This also serves supporting crash dump. | ||||
*/ | */ | ||||
static void smartpqi_poll(struct cam_sim *sim) | static void | ||||
smartpqi_poll(struct cam_sim *sim) | |||||
{ | { | ||||
struct pqisrc_softstate *softs = cam_sim_softc(sim); | struct pqisrc_softstate *softs = cam_sim_softc(sim); | ||||
int i; | int i; | ||||
for (i = 1; i < softs->intr_count; i++ ) | for (i = 1; i < softs->intr_count; i++ ) | ||||
pqisrc_process_response_queue(softs, i); | pqisrc_process_response_queue(softs, i); | ||||
} | } | ||||
/* | /* | ||||
* Function to adjust the queue depth of a device | * Function to adjust the queue depth of a device | ||||
*/ | */ | ||||
void smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth) | void | ||||
smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth) | |||||
{ | { | ||||
struct ccb_relsim crs; | struct ccb_relsim crs; | ||||
DBG_INFO("IN\n"); | DBG_INFO("IN\n"); | ||||
xpt_setup_ccb(&crs.ccb_h, path, 5); | xpt_setup_ccb(&crs.ccb_h, path, 5); | ||||
crs.ccb_h.func_code = XPT_REL_SIMQ; | crs.ccb_h.func_code = XPT_REL_SIMQ; | ||||
crs.ccb_h.flags = CAM_DEV_QFREEZE; | crs.ccb_h.flags = CAM_DEV_QFREEZE; | ||||
Show All 27 Lines | case AC_FOUND_DEVICE: | ||||
if (cgd == NULL) { | if (cgd == NULL) { | ||||
break; | break; | ||||
} | } | ||||
uint32_t t_id = cgd->ccb_h.target_id; | uint32_t t_id = cgd->ccb_h.target_id; | ||||
if (t_id <= (PQI_CTLR_INDEX - 1)) { | if (t_id <= (PQI_CTLR_INDEX - 1)) { | ||||
if (softs != NULL) { | if (softs != NULL) { | ||||
pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun]; | pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun]; | ||||
if (dvp == NULL) { | |||||
DBG_ERR("Target is null, target id=%d\n", t_id); | |||||
break; | |||||
} | |||||
smartpqi_adjust_queue_depth(path, | smartpqi_adjust_queue_depth(path, | ||||
dvp->queue_depth); | dvp->queue_depth); | ||||
} | } | ||||
} | } | ||||
break; | break; | ||||
} | } | ||||
default: | default: | ||||
break; | break; | ||||
} | } | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
} | } | ||||
/* | /* | ||||
* Function to register sim with CAM layer for smartpqi driver | * Function to register sim with CAM layer for smartpqi driver | ||||
*/ | */ | ||||
int register_sim(struct pqisrc_softstate *softs, int card_index) | int | ||||
register_sim(struct pqisrc_softstate *softs, int card_index) | |||||
{ | { | ||||
int error = 0; | |||||
int max_transactions; | int max_transactions; | ||||
union ccb *ccb = NULL; | union ccb *ccb = NULL; | ||||
cam_status status = 0; | cam_status status = 0; | ||||
struct ccb_setasync csa; | struct ccb_setasync csa; | ||||
struct cam_sim *sim; | struct cam_sim *sim; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
max_transactions = softs->max_io_for_scsi_ml; | max_transactions = softs->max_io_for_scsi_ml; | ||||
softs->os_specific.devq = cam_simq_alloc(max_transactions); | softs->os_specific.devq = cam_simq_alloc(max_transactions); | ||||
if (softs->os_specific.devq == NULL) { | if (softs->os_specific.devq == NULL) { | ||||
DBG_ERR("cam_simq_alloc failed txns = %d\n", | DBG_ERR("cam_simq_alloc failed txns = %d\n", | ||||
max_transactions); | max_transactions); | ||||
return PQI_STATUS_FAILURE; | return ENOMEM; | ||||
} | } | ||||
sim = cam_sim_alloc(smartpqi_cam_action, \ | sim = cam_sim_alloc(smartpqi_cam_action, \ | ||||
smartpqi_poll, "smartpqi", softs, \ | smartpqi_poll, "smartpqi", softs, \ | ||||
card_index, &softs->os_specific.cam_lock, \ | card_index, &softs->os_specific.cam_lock, \ | ||||
1, max_transactions, softs->os_specific.devq); | 1, max_transactions, softs->os_specific.devq); | ||||
if (sim == NULL) { | if (sim == NULL) { | ||||
DBG_ERR("cam_sim_alloc failed txns = %d\n", | DBG_ERR("cam_sim_alloc failed txns = %d\n", | ||||
max_transactions); | max_transactions); | ||||
cam_simq_free(softs->os_specific.devq); | cam_simq_free(softs->os_specific.devq); | ||||
return PQI_STATUS_FAILURE; | return ENOMEM; | ||||
} | } | ||||
softs->os_specific.sim = sim; | softs->os_specific.sim = sim; | ||||
mtx_lock(&softs->os_specific.cam_lock); | mtx_lock(&softs->os_specific.cam_lock); | ||||
status = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0); | status = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0); | ||||
if (status != CAM_SUCCESS) { | if (status != CAM_SUCCESS) { | ||||
DBG_ERR("xpt_bus_register failed status=%d\n", status); | DBG_ERR("xpt_bus_register failed status=%d\n", status); | ||||
cam_sim_free(softs->os_specific.sim, FALSE); | cam_sim_free(softs->os_specific.sim, FALSE); | ||||
cam_simq_free(softs->os_specific.devq); | cam_simq_free(softs->os_specific.devq); | ||||
mtx_unlock(&softs->os_specific.cam_lock); | mtx_unlock(&softs->os_specific.cam_lock); | ||||
return PQI_STATUS_FAILURE; | return ENXIO; | ||||
} | } | ||||
softs->os_specific.sim_registered = TRUE; | softs->os_specific.sim_registered = TRUE; | ||||
ccb = xpt_alloc_ccb_nowait(); | ccb = xpt_alloc_ccb_nowait(); | ||||
if (ccb == NULL) { | if (ccb == NULL) { | ||||
DBG_ERR("xpt_create_path failed\n"); | DBG_ERR("xpt_create_path failed\n"); | ||||
return PQI_STATUS_FAILURE; | return ENXIO; | ||||
} | } | ||||
if (xpt_create_path(&ccb->ccb_h.path, NULL, | if (xpt_create_path(&ccb->ccb_h.path, NULL, | ||||
cam_sim_path(softs->os_specific.sim), | cam_sim_path(softs->os_specific.sim), | ||||
CAM_TARGET_WILDCARD, | CAM_TARGET_WILDCARD, | ||||
CAM_LUN_WILDCARD) != CAM_REQ_CMP) { | CAM_LUN_WILDCARD) != CAM_REQ_CMP) { | ||||
DBG_ERR("xpt_create_path failed\n"); | DBG_ERR("xpt_create_path failed\n"); | ||||
xpt_free_ccb(ccb); | xpt_free_ccb(ccb); | ||||
xpt_bus_deregister(cam_sim_path(softs->os_specific.sim)); | xpt_bus_deregister(cam_sim_path(softs->os_specific.sim)); | ||||
cam_sim_free(softs->os_specific.sim, TRUE); | cam_sim_free(softs->os_specific.sim, TRUE); | ||||
mtx_unlock(&softs->os_specific.cam_lock); | mtx_unlock(&softs->os_specific.cam_lock); | ||||
return PQI_STATUS_FAILURE; | return ENXIO; | ||||
} | } | ||||
/* | /* | ||||
* Callback to set the queue depth per target which is | * Callback to set the queue depth per target which is | ||||
* derived from the FW. | * derived from the FW. | ||||
*/ | */ | ||||
softs->os_specific.path = ccb->ccb_h.path; | softs->os_specific.path = ccb->ccb_h.path; | ||||
xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5); | xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5); | ||||
csa.ccb_h.func_code = XPT_SASYNC_CB; | csa.ccb_h.func_code = XPT_SASYNC_CB; | ||||
csa.event_enable = AC_FOUND_DEVICE; | csa.event_enable = AC_FOUND_DEVICE; | ||||
csa.callback = smartpqi_async; | csa.callback = smartpqi_async; | ||||
csa.callback_arg = softs; | csa.callback_arg = softs; | ||||
xpt_action((union ccb *)&csa); | xpt_action((union ccb *)&csa); | ||||
if (csa.ccb_h.status != CAM_REQ_CMP) { | if (csa.ccb_h.status != CAM_REQ_CMP) { | ||||
DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n", | DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n", | ||||
csa.ccb_h.status); | csa.ccb_h.status); | ||||
} | } | ||||
mtx_unlock(&softs->os_specific.cam_lock); | mtx_unlock(&softs->os_specific.cam_lock); | ||||
DBG_INFO("OUT\n"); | DBG_INFO("OUT\n"); | ||||
return error; | |||||
return BSD_SUCCESS; | |||||
} | } | ||||
/* | /* | ||||
* Function to deregister smartpqi sim from cam layer | * Function to deregister smartpqi sim from cam layer | ||||
*/ | */ | ||||
void deregister_sim(struct pqisrc_softstate *softs) | void | ||||
deregister_sim(struct pqisrc_softstate *softs) | |||||
{ | { | ||||
struct ccb_setasync csa; | struct ccb_setasync csa; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
if (softs->os_specific.mtx_init) { | if (softs->os_specific.mtx_init) { | ||||
mtx_lock(&softs->os_specific.cam_lock); | mtx_lock(&softs->os_specific.cam_lock); | ||||
} | } | ||||
xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5); | xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5); | ||||
csa.ccb_h.func_code = XPT_SASYNC_CB; | csa.ccb_h.func_code = XPT_SASYNC_CB; | ||||
csa.event_enable = 0; | csa.event_enable = 0; | ||||
csa.callback = smartpqi_async; | csa.callback = smartpqi_async; | ||||
csa.callback_arg = softs; | csa.callback_arg = softs; | ||||
xpt_action((union ccb *)&csa); | xpt_action((union ccb *)&csa); | ||||
xpt_free_path(softs->os_specific.path); | xpt_free_path(softs->os_specific.path); | ||||
if (softs->os_specific.sim) { | |||||
xpt_release_simq(softs->os_specific.sim, 0); | xpt_release_simq(softs->os_specific.sim, 0); | ||||
xpt_bus_deregister(cam_sim_path(softs->os_specific.sim)); | xpt_bus_deregister(cam_sim_path(softs->os_specific.sim)); | ||||
softs->os_specific.sim_registered = FALSE; | softs->os_specific.sim_registered = FALSE; | ||||
if (softs->os_specific.sim) { | |||||
cam_sim_free(softs->os_specific.sim, FALSE); | cam_sim_free(softs->os_specific.sim, FALSE); | ||||
softs->os_specific.sim = NULL; | softs->os_specific.sim = NULL; | ||||
} | } | ||||
if (softs->os_specific.mtx_init) { | if (softs->os_specific.mtx_init) { | ||||
mtx_unlock(&softs->os_specific.cam_lock); | mtx_unlock(&softs->os_specific.cam_lock); | ||||
} | } | ||||
if (softs->os_specific.devq != NULL) { | if (softs->os_specific.devq != NULL) { | ||||
cam_simq_free(softs->os_specific.devq); | cam_simq_free(softs->os_specific.devq); | ||||
} | } | ||||
if (softs->os_specific.mtx_init) { | if (softs->os_specific.mtx_init) { | ||||
mtx_destroy(&softs->os_specific.cam_lock); | mtx_destroy(&softs->os_specific.cam_lock); | ||||
softs->os_specific.mtx_init = FALSE; | softs->os_specific.mtx_init = FALSE; | ||||
} | } | ||||
mtx_destroy(&softs->os_specific.map_lock); | mtx_destroy(&softs->os_specific.map_lock); | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
} | } |
This could likely be just
since microsemi is now owned by microchip technologies, iirc.
But that's a question for your legal department.