Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/smartpqi/smartpqi_request.c
/*- | /*- | ||||
* Copyright (c) 2018 Microsemi Corporation. | * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. | ||||
* All rights reserved. | |||||
* | * | ||||
* Redistribution and use in source and binary forms, with or without | * Redistribution and use in source and binary forms, with or without | ||||
* modification, are permitted provided that the following conditions | * modification, are permitted provided that the following conditions | ||||
* are met: | * are met: | ||||
* 1. Redistributions of source code must retain the above copyright | * 1. Redistributions of source code must retain the above copyright | ||||
* notice, this list of conditions and the following disclaimer. | * notice, this list of conditions and the following disclaimer. | ||||
* 2. Redistributions in binary form must reproduce the above copyright | * 2. Redistributions in binary form must reproduce the above copyright | ||||
* notice, this list of conditions and the following disclaimer in the | * notice, this list of conditions and the following disclaimer in the | ||||
Show All 11 Lines | |||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||||
* SUCH DAMAGE. | * SUCH DAMAGE. | ||||
*/ | */ | ||||
/* $FreeBSD$ */ | /* $FreeBSD$ */ | ||||
#include "smartpqi_includes.h" | #include "smartpqi_includes.h" | ||||
/* | |||||
* Attempt to perform offload RAID mapping for a logical volume I/O. | |||||
*/ | |||||
#define HPSA_RAID_0 0 | |||||
#define HPSA_RAID_4 1 | |||||
#define HPSA_RAID_1 2 /* also used for RAID 10 */ | |||||
#define HPSA_RAID_5 3 /* also used for RAID 50 */ | |||||
#define HPSA_RAID_51 4 | |||||
#define HPSA_RAID_6 5 /* also used for RAID 60 */ | |||||
#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ | |||||
#define HPSA_RAID_MAX HPSA_RAID_ADM | |||||
#define HPSA_RAID_UNKNOWN 0xff | |||||
#define SG_FLAG_LAST 0x40000000 | #define SG_FLAG_LAST 0x40000000 | ||||
#define SG_FLAG_CHAIN 0x80000000 | #define SG_FLAG_CHAIN 0x80000000 | ||||
/* Subroutine to find out embedded sgl count in IU */ | /* Subroutine to find out embedded sgl count in IU */ | ||||
static inline | static inline uint32_t | ||||
uint32_t pqisrc_embedded_sgl_count(uint32_t elem_alloted) | pqisrc_embedded_sgl_count(uint32_t elem_alloted) | ||||
{ | { | ||||
uint32_t embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU; | uint32_t embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU; | ||||
DBG_FUNC(" IN "); | DBG_FUNC(" IN "); | ||||
/** | /** | ||||
calculate embedded sgl count using num_elem_alloted for IO | calculate embedded sgl count using num_elem_alloted for IO | ||||
**/ | **/ | ||||
if(elem_alloted - 1) | if(elem_alloted - 1) | ||||
embedded_sgl_count += ((elem_alloted - 1) * MAX_EMBEDDED_SG_IN_IU); | embedded_sgl_count += ((elem_alloted - 1) * MAX_EMBEDDED_SG_IN_IU); | ||||
DBG_IO("embedded_sgl_count :%d\n",embedded_sgl_count); | DBG_IO("embedded_sgl_count :%d\n",embedded_sgl_count); | ||||
DBG_FUNC(" OUT "); | DBG_FUNC(" OUT "); | ||||
return embedded_sgl_count; | return embedded_sgl_count; | ||||
} | } | ||||
/* Subroutine to find out contiguous free elem in IU */ | /* Subroutine to find out contiguous free elem in IU */ | ||||
static inline | static inline uint32_t | ||||
uint32_t pqisrc_contiguous_free_elem(uint32_t pi, uint32_t ci, uint32_t elem_in_q) | pqisrc_contiguous_free_elem(uint32_t pi, uint32_t ci, uint32_t elem_in_q) | ||||
{ | { | ||||
uint32_t contiguous_free_elem = 0; | uint32_t contiguous_free_elem = 0; | ||||
DBG_FUNC(" IN "); | DBG_FUNC(" IN "); | ||||
if(pi >= ci) { | if(pi >= ci) { | ||||
contiguous_free_elem = (elem_in_q - pi); | contiguous_free_elem = (elem_in_q - pi); | ||||
if(ci == 0) | if(ci == 0) | ||||
contiguous_free_elem -= 1; | contiguous_free_elem -= 1; | ||||
} else { | } else { | ||||
contiguous_free_elem = (ci - pi - 1); | contiguous_free_elem = (ci - pi - 1); | ||||
} | } | ||||
DBG_FUNC(" OUT "); | DBG_FUNC(" OUT "); | ||||
return contiguous_free_elem; | return contiguous_free_elem; | ||||
} | } | ||||
/* Subroutine to find out num of elements need for the request */ | /* Subroutine to find out num of elements need for the request */ | ||||
static uint32_t | static uint32_t | ||||
pqisrc_num_elem_needed(pqisrc_softstate_t *softs, uint32_t SG_Count) | pqisrc_num_elem_needed(pqisrc_softstate_t *softs, uint32_t SG_Count) | ||||
{ | { | ||||
uint32_t num_sg; | uint32_t num_sg; | ||||
uint32_t num_elem_required = 1; | uint32_t num_elem_required = 1; | ||||
DBG_FUNC(" IN "); | DBG_FUNC(" IN "); | ||||
DBG_IO("SGL_Count :%d",SG_Count); | DBG_IO("SGL_Count :%d",SG_Count); | ||||
/******** | /******** | ||||
If SG_Count greater than max sg per IU i.e 4 or 68 | If SG_Count greater than max sg per IU i.e 4 or 68 | ||||
(4 is with out spanning or 68 is with spanning) chaining is required. | (4 is with out spanning or 68 is with spanning) chaining is required. | ||||
OR, If SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU then, | OR, If SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU then, | ||||
on these two cases one element is enough. | on these two cases one element is enough. | ||||
********/ | ********/ | ||||
if(SG_Count > softs->max_sg_per_iu || SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU) | if(SG_Count > softs->max_sg_per_iu || SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU) | ||||
return num_elem_required; | return num_elem_required; | ||||
/* | /* | ||||
SGL Count Other Than First IU | SGL Count Other Than First IU | ||||
*/ | */ | ||||
num_sg = SG_Count - MAX_EMBEDDED_SG_IN_FIRST_IU; | num_sg = SG_Count - MAX_EMBEDDED_SG_IN_FIRST_IU; | ||||
num_elem_required += PQISRC_DIV_ROUND_UP(num_sg, MAX_EMBEDDED_SG_IN_IU); | num_elem_required += PQISRC_DIV_ROUND_UP(num_sg, MAX_EMBEDDED_SG_IN_IU); | ||||
DBG_FUNC(" OUT "); | DBG_FUNC(" OUT "); | ||||
return num_elem_required; | return num_elem_required; | ||||
} | } | ||||
/* Subroutine to build SG list for the IU submission*/ | /* Subroutine to build SG list for the IU submission*/ | ||||
static | static boolean_t | ||||
boolean_t pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr, | pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr, | ||||
uint32_t num_elem_alloted) | uint32_t num_elem_alloted) | ||||
{ | { | ||||
uint32_t i; | uint32_t i; | ||||
uint32_t num_sg = OS_GET_IO_SG_COUNT(rcb); | uint32_t num_sg = OS_GET_IO_SG_COUNT(rcb); | ||||
sgt_t *sgt = sg_array; | sgt_t *sgt = sg_array; | ||||
sgt_t *sg_chain = NULL; | sgt_t *sg_chain = NULL; | ||||
boolean_t partial = false; | boolean_t partial = false; | ||||
DBG_FUNC(" IN "); | DBG_FUNC(" IN "); | ||||
DBG_IO("SGL_Count :%d",num_sg); | DBG_IO("SGL_Count :%d",num_sg); | ||||
if (0 == num_sg) { | if (0 == num_sg) { | ||||
goto out; | goto out; | ||||
} | } | ||||
if (num_sg <= pqisrc_embedded_sgl_count(num_elem_alloted)) { | if (num_sg <= pqisrc_embedded_sgl_count(num_elem_alloted)) { | ||||
for (i = 0; i < num_sg; i++, sgt++) { | for (i = 0; i < num_sg; i++, sgt++) { | ||||
sgt->addr= OS_GET_IO_SG_ADDR(rcb,i); | sgt->addr= OS_GET_IO_SG_ADDR(rcb,i); | ||||
sgt->len= OS_GET_IO_SG_LEN(rcb,i); | sgt->len= OS_GET_IO_SG_LEN(rcb,i); | ||||
sgt->flags= 0; | sgt->flags= 0; | ||||
} | } | ||||
sg_array[num_sg - 1].flags = SG_FLAG_LAST; | sg_array[num_sg - 1].flags = SG_FLAG_LAST; | ||||
} else { | } else { | ||||
/** | /** | ||||
SGL Chaining | SGL Chaining | ||||
**/ | **/ | ||||
sg_chain = rcb->sg_chain_virt; | sg_chain = rcb->sg_chain_virt; | ||||
sgt->addr = rcb->sg_chain_dma; | sgt->addr = rcb->sg_chain_dma; | ||||
sgt->len = num_sg * sizeof(sgt_t); | sgt->len = num_sg * sizeof(sgt_t); | ||||
sgt->flags = SG_FLAG_CHAIN; | sgt->flags = SG_FLAG_CHAIN; | ||||
sgt = sg_chain; | sgt = sg_chain; | ||||
for (i = 0; i < num_sg; i++, sgt++) { | for (i = 0; i < num_sg; i++, sgt++) { | ||||
sgt->addr = OS_GET_IO_SG_ADDR(rcb,i); | sgt->addr = OS_GET_IO_SG_ADDR(rcb,i); | ||||
sgt->len = OS_GET_IO_SG_LEN(rcb,i); | sgt->len = OS_GET_IO_SG_LEN(rcb,i); | ||||
sgt->flags = 0; | sgt->flags = 0; | ||||
} | } | ||||
sg_chain[num_sg - 1].flags = SG_FLAG_LAST; | sg_chain[num_sg - 1].flags = SG_FLAG_LAST; | ||||
num_sg = 1; | num_sg = 1; | ||||
partial = true; | partial = true; | ||||
} | } | ||||
out: | out: | ||||
iu_hdr->iu_length = num_sg * sizeof(sgt_t); | iu_hdr->iu_length = num_sg * sizeof(sgt_t); | ||||
DBG_FUNC(" OUT "); | DBG_FUNC(" OUT "); | ||||
return partial; | return partial; | ||||
} | } | ||||
/*Subroutine used to Build the RAID request */ | /*Subroutine used to Build the RAID request */ | ||||
static void | static void | ||||
pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb, | pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb, | ||||
pqisrc_raid_req_t *raid_req, uint32_t num_elem_alloted) | pqisrc_raid_req_t *raid_req, uint32_t num_elem_alloted) | ||||
{ | { | ||||
DBG_FUNC(" IN "); | DBG_FUNC(" IN "); | ||||
raid_req->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST; | raid_req->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST; | ||||
raid_req->header.comp_feature = 0; | raid_req->header.comp_feature = 0; | ||||
raid_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb); | raid_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb); | ||||
raid_req->work_area[0] = 0; | raid_req->work_area[0] = 0; | ||||
raid_req->work_area[1] = 0; | raid_req->work_area[1] = 0; | ||||
raid_req->request_id = rcb->tag; | raid_req->request_id = rcb->tag; | ||||
raid_req->nexus_id = 0; | raid_req->nexus_id = 0; | ||||
raid_req->buffer_length = GET_SCSI_BUFFLEN(rcb); | raid_req->buffer_length = GET_SCSI_BUFFLEN(rcb); | ||||
memcpy(raid_req->lun_number, rcb->dvp->scsi3addr, | memcpy(raid_req->lun_number, rcb->dvp->scsi3addr, | ||||
sizeof(raid_req->lun_number)); | sizeof(raid_req->lun_number)); | ||||
raid_req->protocol_spec = 0; | raid_req->protocol_spec = 0; | ||||
raid_req->data_direction = rcb->data_dir; | raid_req->data_direction = rcb->data_dir; | ||||
raid_req->reserved1 = 0; | raid_req->reserved1 = 0; | ||||
raid_req->fence = 0; | raid_req->fence = 0; | ||||
raid_req->error_index = raid_req->request_id; | raid_req->error_index = raid_req->request_id; | ||||
raid_req->reserved2 = 0; | raid_req->reserved2 = 0; | ||||
raid_req->task_attribute = OS_GET_TASK_ATTR(rcb); | raid_req->task_attribute = OS_GET_TASK_ATTR(rcb); | ||||
raid_req->command_priority = 0; | raid_req->command_priority = 0; | ||||
raid_req->reserved3 = 0; | raid_req->reserved3 = 0; | ||||
raid_req->reserved4 = 0; | raid_req->reserved4 = 0; | ||||
raid_req->reserved5 = 0; | raid_req->reserved5 = 0; | ||||
/* As cdb and additional_cdb_bytes are contiguous, | /* As cdb and additional_cdb_bytes are contiguous, | ||||
update them in a single statement */ | update them in a single statement */ | ||||
memcpy(raid_req->cdb, rcb->cdbp, rcb->cmdlen); | memcpy(raid_req->cdb, rcb->cdbp, rcb->cmdlen); | ||||
#if 0 | #if 0 | ||||
DBG_IO("CDB :"); | DBG_IO("CDB :"); | ||||
for(i = 0; i < rcb->cmdlen ; i++) | for(i = 0; i < rcb->cmdlen ; i++) | ||||
DBG_IO(" 0x%x \n ",raid_req->cdb[i]); | DBG_IO(" 0x%x \n ",raid_req->cdb[i]); | ||||
#endif | #endif | ||||
Show All 21 Lines | switch (rcb->cmdlen) { | ||||
default: /* todo:review again */ | default: /* todo:review again */ | ||||
raid_req->additional_cdb_bytes_usage = | raid_req->additional_cdb_bytes_usage = | ||||
PQI_ADDITIONAL_CDB_BYTES_16; | PQI_ADDITIONAL_CDB_BYTES_16; | ||||
break; | break; | ||||
} | } | ||||
/* Frame SGL Descriptor */ | /* Frame SGL Descriptor */ | ||||
raid_req->partial = pqisrc_build_sgl(&raid_req->sg_descriptors[0], rcb, | raid_req->partial = pqisrc_build_sgl(&raid_req->sg_descriptors[0], rcb, | ||||
&raid_req->header, num_elem_alloted); | &raid_req->header, num_elem_alloted); | ||||
raid_req->header.iu_length += | raid_req->header.iu_length += | ||||
offsetof(pqisrc_raid_req_t, sg_descriptors) - sizeof(iu_header_t); | offsetof(pqisrc_raid_req_t, sg_descriptors) - sizeof(iu_header_t); | ||||
#if 0 | #if 0 | ||||
DBG_IO("raid_req->header.iu_type : 0x%x", raid_req->header.iu_type); | DBG_IO("raid_req->header.iu_type : 0x%x", raid_req->header.iu_type); | ||||
DBG_IO("raid_req->response_queue_id :%d\n"raid_req->response_queue_id); | DBG_IO("raid_req->response_queue_id :%d\n"raid_req->response_queue_id); | ||||
DBG_IO("raid_req->request_id : 0x%x", raid_req->request_id); | DBG_IO("raid_req->request_id : 0x%x", raid_req->request_id); | ||||
DBG_IO("raid_req->buffer_length : 0x%x", raid_req->buffer_length); | DBG_IO("raid_req->buffer_length : 0x%x", raid_req->buffer_length); | ||||
DBG_IO("raid_req->task_attribute : 0x%x", raid_req->task_attribute); | DBG_IO("raid_req->task_attribute : 0x%x", raid_req->task_attribute); | ||||
DBG_IO("raid_req->lun_number : 0x%x", raid_req->lun_number); | DBG_IO("raid_req->lun_number : 0x%x", raid_req->lun_number); | ||||
DBG_IO("raid_req->error_index : 0x%x", raid_req->error_index); | DBG_IO("raid_req->error_index : 0x%x", raid_req->error_index); | ||||
DBG_IO("raid_req->sg_descriptors[0].addr : %p", (void*)raid_req->sg_descriptors[0].addr); | DBG_IO("raid_req->sg_descriptors[0].addr : %p", (void*)raid_req->sg_descriptors[0].addr); | ||||
DBG_IO("raid_req->sg_descriptors[0].len : 0x%x", raid_req->sg_descriptors[0].len); | DBG_IO("raid_req->sg_descriptors[0].len : 0x%x", raid_req->sg_descriptors[0].len); | ||||
DBG_IO("raid_req->sg_descriptors[0].flags : 0%x", raid_req->sg_descriptors[0].flags); | DBG_IO("raid_req->sg_descriptors[0].flags : 0%x", raid_req->sg_descriptors[0].flags); | ||||
#endif | #endif | ||||
rcb->success_cmp_callback = pqisrc_process_io_response_success; | rcb->success_cmp_callback = pqisrc_process_io_response_success; | ||||
rcb->error_cmp_callback = pqisrc_process_raid_response_error; | rcb->error_cmp_callback = pqisrc_process_raid_response_error; | ||||
rcb->resp_qid = raid_req->response_queue_id; | rcb->resp_qid = raid_req->response_queue_id; | ||||
DBG_FUNC(" OUT "); | DBG_FUNC(" OUT "); | ||||
} | } | ||||
/*Subroutine used to Build the AIO request */ | /*Subroutine used to Build the AIO request */ | ||||
static void | static void | ||||
pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb, | pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb, | ||||
pqi_aio_req_t *aio_req, uint32_t num_elem_alloted) | pqi_aio_req_t *aio_req, uint32_t num_elem_alloted) | ||||
{ | { | ||||
DBG_FUNC(" IN "); | DBG_FUNC(" IN "); | ||||
aio_req->header.iu_type = PQI_IU_TYPE_AIO_PATH_IO_REQUEST; | aio_req->header.iu_type = PQI_IU_TYPE_AIO_PATH_IO_REQUEST; | ||||
aio_req->header.comp_feature = 0; | aio_req->header.comp_feature = 0; | ||||
aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb); | aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb); | ||||
aio_req->work_area[0] = 0; | aio_req->work_area[0] = 0; | ||||
aio_req->work_area[1] = 0; | aio_req->work_area[1] = 0; | ||||
aio_req->req_id = rcb->tag; | aio_req->req_id = rcb->tag; | ||||
aio_req->res1[0] = 0; | aio_req->res1[0] = 0; | ||||
aio_req->res1[1] = 0; | aio_req->res1[1] = 0; | ||||
aio_req->nexus = rcb->ioaccel_handle; | aio_req->nexus = rcb->ioaccel_handle; | ||||
aio_req->buf_len = GET_SCSI_BUFFLEN(rcb); | aio_req->buf_len = GET_SCSI_BUFFLEN(rcb); | ||||
aio_req->data_dir = rcb->data_dir; | aio_req->data_dir = rcb->data_dir; | ||||
aio_req->mem_type = 0; | aio_req->mem_type = 0; | ||||
aio_req->fence = 0; | aio_req->fence = 0; | ||||
aio_req->res2 = 0; | aio_req->res2 = 0; | ||||
aio_req->task_attr = OS_GET_TASK_ATTR(rcb); | aio_req->task_attr = OS_GET_TASK_ATTR(rcb); | ||||
aio_req->cmd_prio = 0; | aio_req->cmd_prio = 0; | ||||
aio_req->res3 = 0; | aio_req->res3 = 0; | ||||
aio_req->err_idx = aio_req->req_id; | aio_req->err_idx = aio_req->req_id; | ||||
aio_req->cdb_len = rcb->cmdlen; | aio_req->cdb_len = rcb->cmdlen; | ||||
if(rcb->cmdlen > sizeof(aio_req->cdb)) | if(rcb->cmdlen > sizeof(aio_req->cdb)) | ||||
rcb->cmdlen = sizeof(aio_req->cdb); | rcb->cmdlen = sizeof(aio_req->cdb); | ||||
memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen); | memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen); | ||||
#if 0 | #if 0 | ||||
DBG_IO("CDB : \n"); | DBG_IO("CDB : \n"); | ||||
for(int i = 0; i < rcb->cmdlen ; i++) | for(int i = 0; i < rcb->cmdlen ; i++) | ||||
DBG_IO(" 0x%x \n",aio_req->cdb[i]); | DBG_IO(" 0x%x \n",aio_req->cdb[i]); | ||||
#endif | #endif | ||||
memset(aio_req->lun,0,sizeof(aio_req->lun)); | memset(aio_req->lun,0,sizeof(aio_req->lun)); | ||||
memset(aio_req->res4,0,sizeof(aio_req->res4)); | memset(aio_req->res4,0,sizeof(aio_req->res4)); | ||||
if(rcb->encrypt_enable == true) { | if(rcb->encrypt_enable == true) { | ||||
aio_req->encrypt_enable = true; | aio_req->encrypt_enable = true; | ||||
aio_req->encrypt_key_index = LE_16(rcb->enc_info.data_enc_key_index); | aio_req->encrypt_key_index = LE_16(rcb->enc_info.data_enc_key_index); | ||||
aio_req->encrypt_twk_low = LE_32(rcb->enc_info.encrypt_tweak_lower); | aio_req->encrypt_twk_low = LE_32(rcb->enc_info.encrypt_tweak_lower); | ||||
aio_req->encrypt_twk_high = LE_32(rcb->enc_info.encrypt_tweak_upper); | aio_req->encrypt_twk_high = LE_32(rcb->enc_info.encrypt_tweak_upper); | ||||
} else { | } else { | ||||
aio_req->encrypt_enable = 0; | aio_req->encrypt_enable = 0; | ||||
aio_req->encrypt_key_index = 0; | aio_req->encrypt_key_index = 0; | ||||
aio_req->encrypt_twk_high = 0; | aio_req->encrypt_twk_high = 0; | ||||
aio_req->encrypt_twk_low = 0; | aio_req->encrypt_twk_low = 0; | ||||
} | } | ||||
/* Frame SGL Descriptor */ | /* Frame SGL Descriptor */ | ||||
aio_req->partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb, | aio_req->partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb, | ||||
&aio_req->header, num_elem_alloted); | &aio_req->header, num_elem_alloted); | ||||
aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t); | aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t); | ||||
DBG_INFO("aio_req->num_sg :%d",aio_req->num_sg); | DBG_INFO("aio_req->num_sg :%d",aio_req->num_sg); | ||||
aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) - | aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) - | ||||
sizeof(iu_header_t); | sizeof(iu_header_t); | ||||
#if 0 | #if 0 | ||||
DBG_IO("aio_req->header.iu_type : 0x%x \n",aio_req->header.iu_type); | DBG_IO("aio_req->header.iu_type : 0x%x \n",aio_req->header.iu_type); | ||||
DBG_IO("aio_req->resp_qid :0x%x",aio_req->resp_qid); | DBG_IO("aio_req->resp_qid :0x%x",aio_req->resp_qid); | ||||
DBG_IO("aio_req->req_id : 0x%x \n",aio_req->req_id); | DBG_IO("aio_req->req_id : 0x%x \n",aio_req->req_id); | ||||
DBG_IO("aio_req->nexus : 0x%x \n",aio_req->nexus); | DBG_IO("aio_req->nexus : 0x%x \n",aio_req->nexus); | ||||
DBG_IO("aio_req->buf_len : 0x%x \n",aio_req->buf_len); | DBG_IO("aio_req->buf_len : 0x%x \n",aio_req->buf_len); | ||||
DBG_IO("aio_req->data_dir : 0x%x \n",aio_req->data_dir); | DBG_IO("aio_req->data_dir : 0x%x \n",aio_req->data_dir); | ||||
DBG_IO("aio_req->task_attr : 0x%x \n",aio_req->task_attr); | DBG_IO("aio_req->task_attr : 0x%x \n",aio_req->task_attr); | ||||
DBG_IO("aio_req->err_idx : 0x%x \n",aio_req->err_idx); | DBG_IO("aio_req->err_idx : 0x%x \n",aio_req->err_idx); | ||||
DBG_IO("aio_req->num_sg :%d",aio_req->num_sg); | DBG_IO("aio_req->num_sg :%d",aio_req->num_sg); | ||||
DBG_IO("aio_req->sg_desc[0].addr : %p \n", (void*)aio_req->sg_desc[0].addr); | DBG_IO("aio_req->sg_desc[0].addr : %p \n", (void*)aio_req->sg_desc[0].addr); | ||||
DBG_IO("aio_req->sg_desc[0].len : 0%x \n", aio_req->sg_desc[0].len); | DBG_IO("aio_req->sg_desc[0].len : 0%x \n", aio_req->sg_desc[0].len); | ||||
DBG_IO("aio_req->sg_desc[0].flags : 0%x \n", aio_req->sg_desc[0].flags); | DBG_IO("aio_req->sg_desc[0].flags : 0%x \n", aio_req->sg_desc[0].flags); | ||||
#endif | #endif | ||||
rcb->success_cmp_callback = pqisrc_process_io_response_success; | rcb->success_cmp_callback = pqisrc_process_io_response_success; | ||||
rcb->error_cmp_callback = pqisrc_process_aio_response_error; | rcb->error_cmp_callback = pqisrc_process_aio_response_error; | ||||
rcb->resp_qid = aio_req->response_queue_id; | rcb->resp_qid = aio_req->response_queue_id; | ||||
DBG_FUNC(" OUT "); | DBG_FUNC(" OUT "); | ||||
} | } | ||||
/*Function used to build and send RAID/AIO */ | /*Function used to build and send RAID/AIO */ | ||||
int pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb) | int | ||||
pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb) | |||||
{ | { | ||||
ib_queue_t *ib_q_array = softs->op_aio_ib_q; | ib_queue_t *ib_q_array = softs->op_aio_ib_q; | ||||
ib_queue_t *ib_q = NULL; | ib_queue_t *ib_q = NULL; | ||||
char *ib_iu = NULL; | char *ib_iu = NULL; | ||||
IO_PATH_T io_path = AIO_PATH; | IO_PATH_T io_path = AIO_PATH; | ||||
uint32_t TraverseCount = 0; | uint32_t TraverseCount = 0; | ||||
int first_qindex = OS_GET_IO_REQ_QINDEX(softs, rcb); | int first_qindex = OS_GET_IO_REQ_QINDEX(softs, rcb); | ||||
int qindex = first_qindex; | int qindex = first_qindex; | ||||
uint32_t num_op_ib_q = softs->num_op_aio_ibq; | uint32_t num_op_ib_q = softs->num_op_aio_ibq; | ||||
uint32_t num_elem_needed; | uint32_t num_elem_needed; | ||||
uint32_t num_elem_alloted = 0; | uint32_t num_elem_alloted = 0; | ||||
pqi_scsi_dev_t *devp = rcb->dvp; | pqi_scsi_dev_t *devp = rcb->dvp; | ||||
uint8_t raidbypass_cdb[16]; | uint8_t raidbypass_cdb[16]; | ||||
DBG_FUNC(" IN "); | DBG_FUNC(" IN "); | ||||
if(!rcb->aio_retry) { | |||||
rcb->cdbp = OS_GET_CDBP(rcb); | rcb->cdbp = OS_GET_CDBP(rcb); | ||||
if(IS_AIO_PATH(devp)) { | if(IS_AIO_PATH(devp)) { | ||||
/** IO for Physical Drive **/ | /** IO for Physical Drive **/ | ||||
/** Send in AIO PATH**/ | /** Send in AIO PATH**/ | ||||
rcb->ioaccel_handle = devp->ioaccel_handle; | rcb->ioaccel_handle = devp->ioaccel_handle; | ||||
} else { | } else { | ||||
int ret = PQI_STATUS_FAILURE; | int ret = PQI_STATUS_FAILURE; | ||||
/** IO for RAID Volume **/ | /** IO for RAID Volume **/ | ||||
if (devp->offload_enabled) { | if (devp->offload_enabled) { | ||||
/** ByPass IO ,Send in AIO PATH **/ | /** ByPass IO ,Send in AIO PATH **/ | ||||
ret = pqisrc_send_scsi_cmd_raidbypass(softs, | ret = pqisrc_send_scsi_cmd_raidbypass(softs, | ||||
devp, rcb, raidbypass_cdb); | devp, rcb, raidbypass_cdb); | ||||
} | } | ||||
if (PQI_STATUS_FAILURE == ret) { | if (PQI_STATUS_FAILURE == ret) { | ||||
/** Send in RAID PATH **/ | /** Send in RAID PATH **/ | ||||
io_path = RAID_PATH; | io_path = RAID_PATH; | ||||
num_op_ib_q = softs->num_op_raid_ibq; | num_op_ib_q = softs->num_op_raid_ibq; | ||||
ib_q_array = softs->op_raid_ib_q; | ib_q_array = softs->op_raid_ib_q; | ||||
} else { | } else { | ||||
rcb->cdbp = raidbypass_cdb; | rcb->cdbp = raidbypass_cdb; | ||||
} | } | ||||
} | } | ||||
} else { | |||||
/* Retrying failed AIO IO */ | |||||
io_path = RAID_PATH; | |||||
rcb->cdbp = OS_GET_CDBP(rcb); | |||||
num_op_ib_q = softs->num_op_raid_ibq; | |||||
ib_q_array = softs->op_raid_ib_q; | |||||
} | |||||
num_elem_needed = pqisrc_num_elem_needed(softs, OS_GET_IO_SG_COUNT(rcb)); | num_elem_needed = pqisrc_num_elem_needed(softs, OS_GET_IO_SG_COUNT(rcb)); | ||||
DBG_IO("num_elem_needed :%d",num_elem_needed); | DBG_IO("num_elem_needed :%d",num_elem_needed); | ||||
do { | do { | ||||
uint32_t num_elem_available; | uint32_t num_elem_available; | ||||
ib_q = (ib_q_array + qindex); | ib_q = (ib_q_array + qindex); | ||||
PQI_LOCK(&ib_q->lock); | PQI_LOCK(&ib_q->lock); | ||||
num_elem_available = pqisrc_contiguous_free_elem(ib_q->pi_local, | num_elem_available = pqisrc_contiguous_free_elem(ib_q->pi_local, | ||||
*(ib_q->ci_virt_addr), ib_q->num_elem); | *(ib_q->ci_virt_addr), ib_q->num_elem); | ||||
DBG_IO("num_elem_avialable :%d\n",num_elem_available); | DBG_IO("num_elem_avialable :%d\n",num_elem_available); | ||||
if(num_elem_available >= num_elem_needed) { | if(num_elem_available >= num_elem_needed) { | ||||
num_elem_alloted = num_elem_needed; | num_elem_alloted = num_elem_needed; | ||||
break; | break; | ||||
} | } | ||||
DBG_IO("Current queue is busy! Hop to next queue\n"); | DBG_IO("Current queue is busy! Hop to next queue\n"); | ||||
PQI_UNLOCK(&ib_q->lock); | PQI_UNLOCK(&ib_q->lock); | ||||
qindex = (qindex + 1) % num_op_ib_q; | qindex = (qindex + 1) % num_op_ib_q; | ||||
if(qindex == first_qindex) { | if(qindex == first_qindex) { | ||||
if (num_elem_needed == 1) | if (num_elem_needed == 1) | ||||
break; | break; | ||||
TraverseCount += 1; | TraverseCount += 1; | ||||
num_elem_needed = 1; | num_elem_needed = 1; | ||||
} | } | ||||
}while(TraverseCount < 2); | }while(TraverseCount < 2); | ||||
DBG_IO("num_elem_alloted :%d",num_elem_alloted); | DBG_IO("num_elem_alloted :%d",num_elem_alloted); | ||||
if (num_elem_alloted == 0) { | if (num_elem_alloted == 0) { | ||||
DBG_WARN("OUT: IB Queues were full\n"); | DBG_WARN("OUT: IB Queues were full\n"); | ||||
return PQI_STATUS_QFULL; | return PQI_STATUS_QFULL; | ||||
} | } | ||||
pqisrc_increment_device_active_io(softs,devp); | |||||
/* Get IB Queue Slot address to build IU */ | /* Get IB Queue Slot address to build IU */ | ||||
ib_iu = ib_q->array_virt_addr + (ib_q->pi_local * ib_q->elem_size); | ib_iu = ib_q->array_virt_addr + (ib_q->pi_local * ib_q->elem_size); | ||||
if(io_path == AIO_PATH) { | if(io_path == AIO_PATH) { | ||||
/** Build AIO structure **/ | /** Build AIO structure **/ | ||||
pqisrc_build_aio_io(softs, rcb, (pqi_aio_req_t*)ib_iu, | pqisrc_build_aio_io(softs, rcb, (pqi_aio_req_t*)ib_iu, | ||||
num_elem_alloted); | num_elem_alloted); | ||||
} else { | } else { | ||||
/** Build RAID structure **/ | /** Build RAID structure **/ | ||||
pqisrc_build_raid_io(softs, rcb, (pqisrc_raid_req_t*)ib_iu, | pqisrc_build_raid_io(softs, rcb, (pqisrc_raid_req_t*)ib_iu, | ||||
num_elem_alloted); | num_elem_alloted); | ||||
} | } | ||||
rcb->req_pending = true; | rcb->req_pending = true; | ||||
rcb->req_q = ib_q; | |||||
rcb->path = io_path; | |||||
/* Update the local PI */ | /* Update the local PI */ | ||||
ib_q->pi_local = (ib_q->pi_local + num_elem_alloted) % ib_q->num_elem; | ib_q->pi_local = (ib_q->pi_local + num_elem_alloted) % ib_q->num_elem; | ||||
DBG_INFO("ib_q->pi_local : %x\n", ib_q->pi_local); | DBG_INFO("ib_q->pi_local : %x\n", ib_q->pi_local); | ||||
DBG_INFO("*ib_q->ci_virt_addr: %x\n",*(ib_q->ci_virt_addr)); | DBG_INFO("*ib_q->ci_virt_addr: %x\n",*(ib_q->ci_virt_addr)); | ||||
/* Inform the fw about the new IU */ | /* Inform the fw about the new IU */ | ||||
PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local); | PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local); | ||||
PQI_UNLOCK(&ib_q->lock); | PQI_UNLOCK(&ib_q->lock); | ||||
DBG_FUNC(" OUT "); | DBG_FUNC(" OUT "); | ||||
return PQI_STATUS_SUCCESS; | return PQI_STATUS_SUCCESS; | ||||
} | } | ||||
/* Subroutine used to set encryption info as part of RAID bypass IO*/ | /* Subroutine used to set encryption info as part of RAID bypass IO*/ | ||||
static inline void pqisrc_set_enc_info( | static inline void | ||||
struct pqi_enc_info *enc_info, struct raid_map *raid_map, | pqisrc_set_enc_info(struct pqi_enc_info *enc_info, | ||||
uint64_t first_block) | struct raid_map *raid_map, uint64_t first_block) | ||||
{ | { | ||||
uint32_t volume_blk_size; | uint32_t volume_blk_size; | ||||
/* | /* | ||||
* Set the encryption tweak values based on logical block address. | * Set the encryption tweak values based on logical block address. | ||||
* If the block size is 512, the tweak value is equal to the LBA. | * If the block size is 512, the tweak value is equal to the LBA. | ||||
* For other block sizes, tweak value is (LBA * block size) / 512. | * For other block sizes, tweak value is (LBA * block size) / 512. | ||||
*/ | */ | ||||
volume_blk_size = GET_LE32((uint8_t *)&raid_map->volume_blk_size); | volume_blk_size = GET_LE32((uint8_t *)&raid_map->volume_blk_size); | ||||
if (volume_blk_size != 512) | if (volume_blk_size != 512) | ||||
first_block = (first_block * volume_blk_size) / 512; | first_block = (first_block * volume_blk_size) / 512; | ||||
enc_info->data_enc_key_index = | enc_info->data_enc_key_index = | ||||
GET_LE16((uint8_t *)&raid_map->data_encryption_key_index); | GET_LE16((uint8_t *)&raid_map->data_encryption_key_index); | ||||
enc_info->encrypt_tweak_upper = ((uint32_t)(((first_block) >> 16) >> 16)); | enc_info->encrypt_tweak_upper = ((uint32_t)(((first_block) >> 16) >> 16)); | ||||
enc_info->encrypt_tweak_lower = ((uint32_t)(first_block)); | enc_info->encrypt_tweak_lower = ((uint32_t)(first_block)); | ||||
} | } | ||||
/* | |||||
* Attempt to perform offload RAID mapping for a logical volume I/O. | |||||
*/ | |||||
#define HPSA_RAID_0 0 | |||||
#define HPSA_RAID_4 1 | |||||
#define HPSA_RAID_1 2 /* also used for RAID 10 */ | |||||
#define HPSA_RAID_5 3 /* also used for RAID 50 */ | |||||
#define HPSA_RAID_51 4 | |||||
#define HPSA_RAID_6 5 /* also used for RAID 60 */ | |||||
#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ | |||||
#define HPSA_RAID_MAX HPSA_RAID_ADM | |||||
#define HPSA_RAID_UNKNOWN 0xff | |||||
/* Subroutine used to parse the scsi opcode and build the CDB for RAID bypass*/ | /* Subroutine used to parse the scsi opcode and build the CDB for RAID bypass*/ | ||||
int check_for_scsi_opcode(uint8_t *cdb, boolean_t *is_write, uint64_t *fst_blk, | int | ||||
uint32_t *blk_cnt) { | check_for_scsi_opcode(uint8_t *cdb, boolean_t *is_write, uint64_t *fst_blk, | ||||
uint32_t *blk_cnt) | |||||
{ | |||||
switch (cdb[0]) { | switch (cdb[0]) { | ||||
case SCMD_WRITE_6: | case SCMD_WRITE_6: | ||||
*is_write = true; | *is_write = true; | ||||
case SCMD_READ_6: | case SCMD_READ_6: | ||||
*fst_blk = (uint64_t)(((cdb[1] & 0x1F) << 16) | | *fst_blk = (uint64_t)(((cdb[1] & 0x1F) << 16) | | ||||
(cdb[2] << 8) | cdb[3]); | (cdb[2] << 8) | cdb[3]); | ||||
*blk_cnt = (uint32_t)cdb[4]; | *blk_cnt = (uint32_t)cdb[4]; | ||||
if (*blk_cnt == 0) | if (*blk_cnt == 0) | ||||
Show All 19 Lines | case SCMD_READ_16: | ||||
break; | break; | ||||
default: | default: | ||||
/* Process via normal I/O path. */ | /* Process via normal I/O path. */ | ||||
return PQI_STATUS_FAILURE; | return PQI_STATUS_FAILURE; | ||||
} | } | ||||
return PQI_STATUS_SUCCESS; | return PQI_STATUS_SUCCESS; | ||||
} | } | ||||
/* print any arbitrary buffer of length total_len */ | |||||
void | |||||
pqisrc_print_buffer(pqisrc_softstate_t *softs, char *msg, void *user_buf, | |||||
uint32_t total_len, uint32_t flags) | |||||
{ | |||||
#define LINE_BUF_LEN 60 | |||||
#define INDEX_PER_LINE 16 | |||||
uint32_t buf_consumed = 0; | |||||
int ii; | |||||
char line_buf[LINE_BUF_LEN]; | |||||
int line_len; /* written length per line */ | |||||
uint8_t this_char; | |||||
if (user_buf == NULL) | |||||
return; | |||||
/* Print index columns */ | |||||
if (flags & PRINT_FLAG_HDR_COLUMN) | |||||
{ | |||||
for (ii = 0, line_len = 0; ii < MIN(total_len, 16); ii++) | |||||
{ | |||||
line_len += snprintf(line_buf + line_len, (LINE_BUF_LEN - line_len), "%02d ", ii); | |||||
if ((line_len + 4) >= LINE_BUF_LEN) | |||||
break; | |||||
} | |||||
DBG_NOTE("%15.15s:[ %s ]\n", "header", line_buf); | |||||
} | |||||
/* Print index columns */ | |||||
while(buf_consumed < total_len) | |||||
{ | |||||
memset(line_buf, 0, LINE_BUF_LEN); | |||||
for (ii = 0, line_len = 0; ii < INDEX_PER_LINE; ii++) | |||||
{ | |||||
this_char = *((char*)(user_buf) + buf_consumed); | |||||
line_len += snprintf(line_buf + line_len, (LINE_BUF_LEN - line_len), "%02x ", this_char); | |||||
buf_consumed++; | |||||
if (buf_consumed >= total_len || (line_len + 4) >= LINE_BUF_LEN) | |||||
break; | |||||
} | |||||
DBG_NOTE("%15.15s:[ %s ]\n", msg, line_buf); | |||||
} | |||||
} | |||||
/* | /* | ||||
* Function used to build and send RAID bypass request to the adapter | * Function used to build and send RAID bypass request to the adapter | ||||
*/ | */ | ||||
int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs, | int | ||||
pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs, | |||||
pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t *cdb) | pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t *cdb) | ||||
{ | { | ||||
struct raid_map *raid_map; | struct raid_map *raid_map; | ||||
boolean_t is_write = false; | boolean_t is_write = false; | ||||
uint32_t map_idx; | uint32_t map_idx; | ||||
uint64_t fst_blk, lst_blk; | uint64_t fst_blk, lst_blk; | ||||
uint32_t blk_cnt, blks_per_row; | uint32_t blk_cnt, blks_per_row; | ||||
uint64_t fst_row, lst_row; | uint64_t fst_row, lst_row; | ||||
Show All 17 Lines | pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs, | ||||
DBG_FUNC(" IN \n"); | DBG_FUNC(" IN \n"); | ||||
DBG_IO("!!!!!\n"); | DBG_IO("!!!!!\n"); | ||||
/* Check for eligible opcode, get LBA and block count. */ | /* Check for eligible opcode, get LBA and block count. */ | ||||
memcpy(cdb, OS_GET_CDBP(rcb), rcb->cmdlen); | memcpy(cdb, OS_GET_CDBP(rcb), rcb->cmdlen); | ||||
for(i = 0; i < rcb->cmdlen ; i++) | for(i = 0; i < rcb->cmdlen ; i++) | ||||
DBG_IO(" CDB [ %d ] : %x\n",i,cdb[i]); | DBG_IO(" CDB [ %d ] : %x\n",i,cdb[i]); | ||||
if(check_for_scsi_opcode(cdb, &is_write, | if(check_for_scsi_opcode(cdb, &is_write, | ||||
&fst_blk, &blk_cnt) == PQI_STATUS_FAILURE) | &fst_blk, &blk_cnt) == PQI_STATUS_FAILURE) | ||||
return PQI_STATUS_FAILURE; | return PQI_STATUS_FAILURE; | ||||
/* Check for write to non-RAID-0. */ | /* Check for write to non-RAID-0. */ | ||||
if (is_write && device->raid_level != SA_RAID_0) | if (is_write && device->raid_level != SA_RAID_0) | ||||
return PQI_STATUS_FAILURE; | return PQI_STATUS_FAILURE; | ||||
if(blk_cnt == 0) | if(blk_cnt == 0) | ||||
return PQI_STATUS_FAILURE; | return PQI_STATUS_FAILURE; | ||||
lst_blk = fst_blk + blk_cnt - 1; | lst_blk = fst_blk + blk_cnt - 1; | ||||
raid_map = device->raid_map; | raid_map = device->raid_map; | ||||
/* Check for invalid block or wraparound. */ | /* Check for invalid block or wraparound. */ | ||||
if (lst_blk >= GET_LE64((uint8_t *)&raid_map->volume_blk_cnt) || | if (lst_blk >= GET_LE64((uint8_t *)&raid_map->volume_blk_cnt) || | ||||
lst_blk < fst_blk) | lst_blk < fst_blk) | ||||
return PQI_STATUS_FAILURE; | return PQI_STATUS_FAILURE; | ||||
data_disks_per_row = GET_LE16((uint8_t *)&raid_map->data_disks_per_row); | data_disks_per_row = GET_LE16((uint8_t *)&raid_map->data_disks_per_row); | ||||
strip_sz = GET_LE16((uint8_t *)(&raid_map->strip_size)); | strip_sz = GET_LE16((uint8_t *)(&raid_map->strip_size)); | ||||
layout_map_count = GET_LE16((uint8_t *)(&raid_map->layout_map_count)); | layout_map_count = GET_LE16((uint8_t *)(&raid_map->layout_map_count)); | ||||
/* Calculate stripe information for the request. */ | /* Calculate stripe information for the request. */ | ||||
blks_per_row = data_disks_per_row * strip_sz; | blks_per_row = data_disks_per_row * strip_sz; | ||||
if (!blks_per_row) | if (!blks_per_row) | ||||
return PQI_STATUS_FAILURE; | return PQI_STATUS_FAILURE; /*Send the IO in raid path itself, not AIO or raidbypass*/ | ||||
/* use __udivdi3 ? */ | /* use __udivdi3 ? */ | ||||
fst_row = fst_blk / blks_per_row; | fst_row = fst_blk / blks_per_row; | ||||
lst_row = lst_blk / blks_per_row; | lst_row = lst_blk / blks_per_row; | ||||
fst_row_offset = (uint32_t)(fst_blk - (fst_row * blks_per_row)); | fst_row_offset = (uint32_t)(fst_blk - (fst_row * blks_per_row)); | ||||
lst_row_offset = (uint32_t)(lst_blk - (lst_row * blks_per_row)); | lst_row_offset = (uint32_t)(lst_blk - (lst_row * blks_per_row)); | ||||
fst_col = fst_row_offset / strip_sz; | fst_col = fst_row_offset / strip_sz; | ||||
lst_col = lst_row_offset / strip_sz; | lst_col = lst_row_offset / strip_sz; | ||||
▲ Show 20 Lines • Show All 108 Lines • ▼ Show 20 Lines | map_row = | ||||
GET_LE16((uint8_t *)(&raid_map->row_cnt)); | GET_LE16((uint8_t *)(&raid_map->row_cnt)); | ||||
map_idx = (fst_grp * | map_idx = (fst_grp * | ||||
(GET_LE16((uint8_t *)(&raid_map->row_cnt)) * | (GET_LE16((uint8_t *)(&raid_map->row_cnt)) * | ||||
total_disks_per_row)) + | total_disks_per_row)) + | ||||
(map_row * total_disks_per_row) + fst_col; | (map_row * total_disks_per_row) + fst_col; | ||||
} | } | ||||
if (map_idx >= RAID_MAP_MAX_ENTRIES) | |||||
return PQI_STATUS_FAILURE; | |||||
rcb->ioaccel_handle = raid_map->dev_data[map_idx].ioaccel_handle; | rcb->ioaccel_handle = raid_map->dev_data[map_idx].ioaccel_handle; | ||||
disk_block = GET_LE64((uint8_t *)(&raid_map->disk_starting_blk)) + | disk_block = GET_LE64((uint8_t *)(&raid_map->disk_starting_blk)) + | ||||
fst_row * strip_sz + | fst_row * strip_sz + | ||||
(fst_row_offset - fst_col * strip_sz); | (fst_row_offset - fst_col * strip_sz); | ||||
disk_blk_cnt = blk_cnt; | disk_blk_cnt = blk_cnt; | ||||
/* Handle differing logical/physical block sizes. */ | /* Handle differing logical/physical block sizes. */ | ||||
if (raid_map->phys_blk_shift) { | if (raid_map->phys_blk_shift) { | ||||
Show All 29 Lines | pqisrc_set_enc_info(&rcb->enc_info, raid_map, | ||||
fst_blk); | fst_blk); | ||||
rcb->encrypt_enable = true; | rcb->encrypt_enable = true; | ||||
} else { | } else { | ||||
rcb->encrypt_enable = false; | rcb->encrypt_enable = false; | ||||
} | } | ||||
rcb->cmdlen = cdb_length; | rcb->cmdlen = cdb_length; | ||||
DBG_FUNC("OUT"); | DBG_FUNC("OUT"); | ||||
return PQI_STATUS_SUCCESS; | return PQI_STATUS_SUCCESS; | ||||
} | } | ||||
/* Function used to submit a TMF to the adater */ | /* Function used to submit an AIO TMF to the adapter | ||||
int pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp, | * DEVICE_RESET is not supported. | ||||
rcb_t *rcb, int req_id, int tmf_type) | */ | ||||
static int | |||||
pqisrc_send_aio_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp, | |||||
rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type) | |||||
{ | { | ||||
int rval = PQI_STATUS_SUCCESS; | int rval = PQI_STATUS_SUCCESS; | ||||
pqi_tmf_req_t tmf_req; | pqi_aio_tmf_req_t tmf_req; | ||||
ib_queue_t *op_ib_q = NULL; | |||||
memset(&tmf_req, 0, sizeof(pqi_tmf_req_t)); | memset(&tmf_req, 0, sizeof(pqi_aio_tmf_req_t)); | ||||
DBG_FUNC("IN"); | DBG_FUNC("IN"); | ||||
tmf_req.header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; | tmf_req.header.iu_type = PQI_REQUEST_IU_AIO_TASK_MANAGEMENT; | ||||
tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t); | tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t); | ||||
tmf_req.req_id = rcb->tag; | tmf_req.req_id = rcb->tag; | ||||
tmf_req.error_idx = rcb->tag; | |||||
tmf_req.nexus = devp->ioaccel_handle; | |||||
//memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun)); | |||||
tmf_req.tmf = tmf_type; | |||||
tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb); | |||||
op_ib_q = &softs->op_aio_ib_q[0]; | |||||
if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) { | |||||
tmf_req.req_id_to_manage = rcb_to_manage->tag; | |||||
tmf_req.nexus = rcb_to_manage->ioaccel_handle; | |||||
} | |||||
DBG_INFO("tmf_req.header.iu_type : %x tmf_req.req_id_to_manage :%d \n",tmf_req.header.iu_type,tmf_req.req_id_to_manage); | |||||
DBG_INFO("tmf_req.req_id : %d tmf_req.nexus : %x tmf_req.tmf %x QID : %d\n",tmf_req.req_id,tmf_req.nexus,tmf_req.tmf,op_ib_q->q_id); | |||||
rcb->req_pending = true; | |||||
/* Timedout tmf response goes here */ | |||||
rcb->error_cmp_callback = pqisrc_process_aio_response_error; | |||||
rval = pqisrc_submit_cmnd(softs, op_ib_q, &tmf_req); | |||||
if (rval != PQI_STATUS_SUCCESS) { | |||||
DBG_ERR("Unable to submit command rval=%d\n", rval); | |||||
return rval; | |||||
} | |||||
rval = pqisrc_wait_on_condition(softs, rcb, PQISRC_TMF_TIMEOUT); | |||||
if (rval != PQI_STATUS_SUCCESS){ | |||||
DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type); | |||||
rcb->status = rval; | |||||
} | |||||
if (rcb->status != REQUEST_SUCCESS) { | |||||
DBG_ERR_BTL(devp, "Task Management failed tmf_type:%d " | |||||
"stat:0x%x\n", tmf_type, rcb->status); | |||||
rval = PQI_STATUS_FAILURE; | |||||
} | |||||
DBG_FUNC("OUT"); | |||||
return rval; | |||||
} | |||||
/* Function used to submit a Raid TMF to the adapter */ | |||||
static int | |||||
pqisrc_send_raid_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp, | |||||
rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type) | |||||
{ | |||||
int rval = PQI_STATUS_SUCCESS; | |||||
pqi_raid_tmf_req_t tmf_req; | |||||
ib_queue_t *op_ib_q = NULL; | |||||
memset(&tmf_req, 0, sizeof(pqi_raid_tmf_req_t)); | |||||
DBG_FUNC("IN"); | |||||
tmf_req.header.iu_type = PQI_REQUEST_IU_RAID_TASK_MANAGEMENT; | |||||
tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t); | |||||
tmf_req.req_id = rcb->tag; | |||||
memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun)); | memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun)); | ||||
tmf_req.tmf = tmf_type; | tmf_req.tmf = tmf_type; | ||||
tmf_req.req_id_to_manage = req_id; | |||||
tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb); | tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb); | ||||
tmf_req.obq_id_to_manage = rcb->resp_qid; | |||||
/* Decide the queue where the tmf request should be submitted */ | |||||
if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) { | |||||
tmf_req.obq_id_to_manage = rcb_to_manage->resp_qid; | |||||
tmf_req.req_id_to_manage = rcb_to_manage->tag; | |||||
} | |||||
if (softs->timeout_in_tmf && | |||||
tmf_type == SOP_TASK_MANAGEMENT_LUN_RESET) { | |||||
/* OS_TMF_TIMEOUT_SEC - 1 to accommodate driver processing */ | |||||
tmf_req.timeout_in_sec = OS_TMF_TIMEOUT_SEC - 1; | |||||
/* if OS tmf timeout is 0, set minimum value for timeout */ | |||||
if (!tmf_req.timeout_in_sec) | |||||
tmf_req.timeout_in_sec = 1; | |||||
} | |||||
op_ib_q = &softs->op_raid_ib_q[0]; | |||||
rcb->req_pending = true; | rcb->req_pending = true; | ||||
/* Timedout tmf response goes here */ | |||||
rcb->error_cmp_callback = pqisrc_process_raid_response_error; | |||||
rval = pqisrc_submit_cmnd(softs, | rval = pqisrc_submit_cmnd(softs, op_ib_q, &tmf_req); | ||||
&softs->op_raid_ib_q[OS_GET_TMF_REQ_QINDEX(softs, rcb)], &tmf_req); | |||||
if (rval != PQI_STATUS_SUCCESS) { | if (rval != PQI_STATUS_SUCCESS) { | ||||
DBG_ERR("Unable to submit command rval=%d\n", rval); | DBG_ERR("Unable to submit command rval=%d\n", rval); | ||||
return rval; | return rval; | ||||
} | } | ||||
rval = pqisrc_wait_on_condition(softs, rcb); | rval = pqisrc_wait_on_condition(softs, rcb, PQISRC_TMF_TIMEOUT); | ||||
if (rval != PQI_STATUS_SUCCESS){ | if (rval != PQI_STATUS_SUCCESS) { | ||||
DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type); | DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type); | ||||
rcb->status = REQUEST_FAILED; | rcb->status = rval; | ||||
} | } | ||||
if (rcb->status != REQUEST_SUCCESS) { | if (rcb->status != REQUEST_SUCCESS) { | ||||
DBG_ERR_BTL(devp, "Task Management failed tmf_type:%d " | DBG_NOTE("Task Management failed tmf_type:%d " | ||||
"stat:0x%x\n", tmf_type, rcb->status); | "stat:0x%x\n", tmf_type, rcb->status); | ||||
rval = PQI_STATUS_FAILURE; | rval = PQI_STATUS_FAILURE; | ||||
} | } | ||||
DBG_FUNC("OUT"); | DBG_FUNC("OUT"); | ||||
return rval; | return rval; | ||||
} | |||||
int | |||||
pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp, | |||||
rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type) | |||||
{ | |||||
int ret = PQI_STATUS_SUCCESS; | |||||
DBG_FUNC("IN"); | |||||
if(!devp->is_physical_device) { | |||||
if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) { | |||||
if(rcb_to_manage->path == AIO_PATH) { | |||||
if(devp->offload_enabled) | |||||
ret = pqisrc_send_aio_tmf(softs, devp, rcb, rcb_to_manage, tmf_type); | |||||
} | |||||
else { | |||||
DBG_INFO("TASK ABORT not supported in raid\n"); | |||||
ret = PQI_STATUS_FAILURE; | |||||
} | |||||
} | |||||
else { | |||||
ret = pqisrc_send_raid_tmf(softs, devp, rcb, rcb_to_manage, tmf_type); | |||||
} | |||||
} else { | |||||
if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) | |||||
ret = pqisrc_send_aio_tmf(softs, devp, rcb, rcb_to_manage, tmf_type); | |||||
else | |||||
ret = pqisrc_send_raid_tmf(softs, devp, rcb, rcb_to_manage, tmf_type); | |||||
} | |||||
DBG_FUNC("IN"); | |||||
return ret; | |||||
} | |||||
/* | |||||
* Function used to build and send the vendor general request | |||||
* Used for configuring PQI feature bits between firmware and driver | |||||
*/ | |||||
int | |||||
pqisrc_build_send_vendor_request( | |||||
pqisrc_softstate_t *softs, | |||||
pqi_vendor_general_request_t *request, | |||||
raid_path_error_info_elem_t *error_info) | |||||
{ | |||||
int ret = PQI_STATUS_SUCCESS; | |||||
ib_queue_t *op_ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE]; | |||||
ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE]; | |||||
rcb_t *rcb = NULL; | |||||
uint16_t request_id = 0; | |||||
/* Get the tag */ | |||||
request_id = pqisrc_get_tag(&softs->taglist); | |||||
if (INVALID_ELEM == request_id) { | |||||
DBG_ERR("Tag not available\n"); | |||||
ret = PQI_STATUS_FAILURE; | |||||
goto err_notag; | |||||
} | |||||
((pqi_vendor_general_request_t *)request)->request_id = request_id; | |||||
((pqi_vendor_general_request_t *)request)->response_queue_id = ob_q->q_id; | |||||
rcb = &softs->rcb[request_id]; | |||||
rcb->req_pending = true; | |||||
rcb->tag = request_id; | |||||
ret = pqisrc_submit_cmnd(softs, op_ib_q, request); | |||||
if (ret != PQI_STATUS_SUCCESS) { | |||||
DBG_ERR("Unable to submit command\n"); | |||||
goto err_out; | |||||
} | |||||
ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT); | |||||
if (ret != PQI_STATUS_SUCCESS) { | |||||
DBG_ERR("Management request timed out!\n"); | |||||
goto err_out; | |||||
} | |||||
ret = rcb->status; | |||||
if (ret) { | |||||
ret = PQI_STATUS_FAILURE; | |||||
if(error_info) { | |||||
// TODO: config table err handling. | |||||
} | |||||
} else { | |||||
if(error_info) { | |||||
ret = PQI_STATUS_SUCCESS; | |||||
memset(error_info, 0, sizeof(*error_info)); | |||||
} | |||||
} | |||||
os_reset_rcb(rcb); | |||||
pqisrc_put_tag(&softs->taglist, ((pqi_vendor_general_request_t *)request)->request_id); | |||||
DBG_FUNC("OUT\n"); | |||||
return ret; | |||||
err_out: | |||||
DBG_ERR("Vender general request submission failed.\n"); | |||||
os_reset_rcb(rcb); | |||||
pqisrc_put_tag(&softs->taglist, ((pqi_vendor_general_request_t *)request)->request_id); | |||||
err_notag: | |||||
DBG_FUNC("FAILED \n"); | |||||
return ret; | |||||
} | |||||
/* return the path as ASCII-string */ | |||||
char * | |||||
io_path_to_ascii(IO_PATH_T path) | |||||
{ | |||||
switch (path) | |||||
{ | |||||
case AIO_PATH: return "Aio"; | |||||
case RAID_PATH: return "Raid"; | |||||
default: return "Unknown"; | |||||
} | |||||
} | } |