Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/smartpqi/smartpqi_init.c
/*- | /*- | ||||
* Copyright (c) 2018 Microsemi Corporation. | * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. | ||||
* All rights reserved. | |||||
* | * | ||||
* Redistribution and use in source and binary forms, with or without | * Redistribution and use in source and binary forms, with or without | ||||
* modification, are permitted provided that the following conditions | * modification, are permitted provided that the following conditions | ||||
* are met: | * are met: | ||||
* 1. Redistributions of source code must retain the above copyright | * 1. Redistributions of source code must retain the above copyright | ||||
* notice, this list of conditions and the following disclaimer. | * notice, this list of conditions and the following disclaimer. | ||||
* 2. Redistributions in binary form must reproduce the above copyright | * 2. Redistributions in binary form must reproduce the above copyright | ||||
* notice, this list of conditions and the following disclaimer in the | * notice, this list of conditions and the following disclaimer in the | ||||
Show All 11 Lines | |||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||||
* SUCH DAMAGE. | * SUCH DAMAGE. | ||||
*/ | */ | ||||
/* $FreeBSD$ */ | /* $FreeBSD$ */ | ||||
#include "smartpqi_includes.h" | #include "smartpqi_includes.h" | ||||
/* 5 mins timeout for quiesce */ | |||||
#define PQI_QUIESCE_TIMEOUT 300000 | |||||
/* | /* | ||||
* Request the adapter to get PQI capabilities supported. | * Request the adapter to get PQI capabilities supported. | ||||
*/ | */ | ||||
static int pqisrc_report_pqi_capability(pqisrc_softstate_t *softs) | static int | ||||
pqisrc_report_pqi_capability(pqisrc_softstate_t *softs) | |||||
{ | { | ||||
int ret = PQI_STATUS_SUCCESS; | int ret = PQI_STATUS_SUCCESS; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
gen_adm_req_iu_t admin_req; | gen_adm_req_iu_t admin_req; | ||||
gen_adm_resp_iu_t admin_resp; | gen_adm_resp_iu_t admin_resp; | ||||
dma_mem_t pqi_cap_dma_buf; | dma_mem_t pqi_cap_dma_buf; | ||||
pqi_dev_cap_t *capability = NULL; | pqi_dev_cap_t *capability = NULL; | ||||
pqi_iu_layer_desc_t *iu_layer_desc = NULL; | pqi_iu_layer_desc_t *iu_layer_desc = NULL; | ||||
Show All 28 Lines | pqisrc_report_pqi_capability(pqisrc_softstate_t *softs) | ||||
ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp); | ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp); | ||||
if( PQI_STATUS_SUCCESS == ret) { | if( PQI_STATUS_SUCCESS == ret) { | ||||
memcpy(capability, | memcpy(capability, | ||||
pqi_cap_dma_buf.virt_addr, | pqi_cap_dma_buf.virt_addr, | ||||
pqi_cap_dma_buf.size); | pqi_cap_dma_buf.size); | ||||
} else { | } else { | ||||
DBG_ERR("Failed to send admin req report pqi device capability\n"); | DBG_ERR("Failed to send admin req report pqi device capability\n"); | ||||
goto err_admin_req; | goto err_admin_req; | ||||
} | } | ||||
softs->pqi_dev_cap.max_iqs = capability->max_iqs; | softs->pqi_dev_cap.max_iqs = capability->max_iqs; | ||||
softs->pqi_dev_cap.max_iq_elements = capability->max_iq_elements; | softs->pqi_dev_cap.max_iq_elements = capability->max_iq_elements; | ||||
softs->pqi_dev_cap.max_iq_elem_len = capability->max_iq_elem_len; | softs->pqi_dev_cap.max_iq_elem_len = capability->max_iq_elem_len; | ||||
softs->pqi_dev_cap.min_iq_elem_len = capability->min_iq_elem_len; | softs->pqi_dev_cap.min_iq_elem_len = capability->min_iq_elem_len; | ||||
softs->pqi_dev_cap.max_oqs = capability->max_oqs; | softs->pqi_dev_cap.max_oqs = capability->max_oqs; | ||||
softs->pqi_dev_cap.max_oq_elements = capability->max_oq_elements; | softs->pqi_dev_cap.max_oq_elements = capability->max_oq_elements; | ||||
Show All 12 Lines | pqisrc_report_pqi_capability(pqisrc_softstate_t *softs) | ||||
DBG_INIT("softs->pqi_dev_cap.max_oqs: %d\n", softs->pqi_dev_cap.max_oqs); | DBG_INIT("softs->pqi_dev_cap.max_oqs: %d\n", softs->pqi_dev_cap.max_oqs); | ||||
DBG_INIT("softs->pqi_dev_cap.max_oq_elements: %d\n", softs->pqi_dev_cap.max_oq_elements); | DBG_INIT("softs->pqi_dev_cap.max_oq_elements: %d\n", softs->pqi_dev_cap.max_oq_elements); | ||||
DBG_INIT("softs->pqi_dev_cap.max_oq_elem_len: %d\n", softs->pqi_dev_cap.max_oq_elem_len); | DBG_INIT("softs->pqi_dev_cap.max_oq_elem_len: %d\n", softs->pqi_dev_cap.max_oq_elem_len); | ||||
DBG_INIT("softs->pqi_dev_cap.intr_coales_time_granularity: %d\n", softs->pqi_dev_cap.intr_coales_time_granularity); | DBG_INIT("softs->pqi_dev_cap.intr_coales_time_granularity: %d\n", softs->pqi_dev_cap.intr_coales_time_granularity); | ||||
DBG_INIT("softs->max_ib_iu_length_per_fw: %d\n", softs->max_ib_iu_length_per_fw); | DBG_INIT("softs->max_ib_iu_length_per_fw: %d\n", softs->max_ib_iu_length_per_fw); | ||||
DBG_INIT("softs->ib_spanning_supported: %d\n", softs->ib_spanning_supported); | DBG_INIT("softs->ib_spanning_supported: %d\n", softs->ib_spanning_supported); | ||||
DBG_INIT("softs->ob_spanning_supported: %d\n", softs->ob_spanning_supported); | DBG_INIT("softs->ob_spanning_supported: %d\n", softs->ob_spanning_supported); | ||||
os_mem_free(softs, (void *)capability, | os_mem_free(softs, (void *)capability, | ||||
REPORT_PQI_DEV_CAP_DATA_BUF_SIZE); | REPORT_PQI_DEV_CAP_DATA_BUF_SIZE); | ||||
os_dma_mem_free(softs, &pqi_cap_dma_buf); | os_dma_mem_free(softs, &pqi_cap_dma_buf); | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
return ret; | return ret; | ||||
err_admin_req: | err_admin_req: | ||||
os_dma_mem_free(softs, &pqi_cap_dma_buf); | os_dma_mem_free(softs, &pqi_cap_dma_buf); | ||||
err_dma_alloc: | err_dma_alloc: | ||||
if (capability) | if (capability) | ||||
os_mem_free(softs, (void *)capability, | os_mem_free(softs, (void *)capability, | ||||
REPORT_PQI_DEV_CAP_DATA_BUF_SIZE); | REPORT_PQI_DEV_CAP_DATA_BUF_SIZE); | ||||
err_out: | err_out: | ||||
DBG_FUNC("failed OUT\n"); | DBG_FUNC("failed OUT\n"); | ||||
return PQI_STATUS_FAILURE; | return PQI_STATUS_FAILURE; | ||||
} | } | ||||
/* | /* | ||||
* Function used to deallocate the used rcb. | * Function used to deallocate the used rcb. | ||||
*/ | */ | ||||
void pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count) | void | ||||
pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count) | |||||
{ | { | ||||
uint32_t num_req; | uint32_t num_req; | ||||
size_t size; | size_t size; | ||||
int i; | int i; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
num_req = softs->max_outstanding_io + 1; | num_req = softs->max_outstanding_io + 1; | ||||
size = num_req * sizeof(rcb_t); | size = num_req * sizeof(rcb_t); | ||||
for (i = 1; i < req_count; i++) | for (i = 1; i < req_count; i++) | ||||
os_dma_mem_free(softs, &softs->sg_dma_desc[i]); | os_dma_mem_free(softs, &softs->sg_dma_desc[i]); | ||||
os_mem_free(softs, (void *)softs->rcb, size); | os_mem_free(softs, (void *)softs->rcb, size); | ||||
softs->rcb = NULL; | softs->rcb = NULL; | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
} | } | ||||
/* | /* | ||||
* Allocate memory for rcb and SG descriptors. | * Allocate memory for rcb and SG descriptors. | ||||
*/ | */ | ||||
static int pqisrc_allocate_rcb(pqisrc_softstate_t *softs) | static int | ||||
pqisrc_allocate_rcb(pqisrc_softstate_t *softs) | |||||
{ | { | ||||
int ret = PQI_STATUS_SUCCESS; | int ret = PQI_STATUS_SUCCESS; | ||||
int i = 0; | int i = 0; | ||||
uint32_t num_req = 0; | uint32_t num_req = 0; | ||||
uint32_t sg_buf_size = 0; | uint32_t sg_buf_size = 0; | ||||
uint64_t alloc_size = 0; | uint64_t alloc_size = 0; | ||||
rcb_t *rcb = NULL; | rcb_t *rcb = NULL; | ||||
rcb_t *prcb = NULL; | rcb_t *prcb = NULL; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
/* Set maximum outstanding requests */ | /* Set maximum outstanding requests */ | ||||
/* The valid tag values are from 1, 2, ..., softs->max_outstanding_io | /* The valid tag values are from 1, 2, ..., softs->max_outstanding_io | ||||
* The rcb will be accessed by using the tag as index | * The rcb will be accessed by using the tag as index | ||||
* * As 0 tag index is not used, we need to allocate one extra. | * As 0 tag index is not used, we need to allocate one extra. | ||||
*/ | */ | ||||
softs->max_outstanding_io = softs->pqi_cap.max_outstanding_io; | softs->max_outstanding_io = softs->pqi_cap.max_outstanding_io; | ||||
num_req = softs->max_outstanding_io + 1; | num_req = softs->max_outstanding_io + 1; | ||||
DBG_INIT("Max Outstanding IO reset to %d\n", num_req); | DBG_INIT("Max Outstanding IO reset to %d\n", num_req); | ||||
alloc_size = num_req * sizeof(rcb_t); | alloc_size = num_req * sizeof(rcb_t); | ||||
/* Allocate Non DMA memory */ | /* Allocate Non DMA memory */ | ||||
Show All 37 Lines | err_out: | ||||
DBG_FUNC("failed OUT\n"); | DBG_FUNC("failed OUT\n"); | ||||
return ret; | return ret; | ||||
} | } | ||||
/* | /* | ||||
* Function used to decide the operational queue configuration params | * Function used to decide the operational queue configuration params | ||||
* - no of ibq/obq, shared/non-shared interrupt resource, IU spanning support | * - no of ibq/obq, shared/non-shared interrupt resource, IU spanning support | ||||
*/ | */ | ||||
void pqisrc_decide_opq_config(pqisrc_softstate_t *softs) | void | ||||
pqisrc_decide_opq_config(pqisrc_softstate_t *softs) | |||||
{ | { | ||||
uint16_t total_iq_elements; | uint16_t total_iq_elements; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
DBG_INIT("softs->intr_count : %d softs->num_cpus_online : %d", | DBG_INIT("softs->intr_count : %d softs->num_cpus_online : %d", | ||||
softs->intr_count, softs->num_cpus_online); | softs->intr_count, softs->num_cpus_online); | ||||
if (softs->intr_count == 1 || softs->num_cpus_online == 1) { | if (softs->intr_count == 1 || softs->num_cpus_online == 1) { | ||||
/* Share the event and Operational queue. */ | /* Share the event and Operational queue. */ | ||||
softs->num_op_obq = 1; | softs->num_op_obq = 1; | ||||
softs->share_opq_and_eventq = true; | softs->share_opq_and_eventq = true; | ||||
} | } | ||||
else { | else { | ||||
/* Note : One OBQ (OBQ0) reserved for event queue */ | /* Note : One OBQ (OBQ0) reserved for event queue */ | ||||
softs->num_op_obq = MIN(softs->num_cpus_online, | softs->num_op_obq = MIN(softs->num_cpus_online, | ||||
softs->intr_count) - 1; | softs->intr_count) - 1; | ||||
softs->num_op_obq = softs->intr_count - 1; | |||||
softs->share_opq_and_eventq = false; | softs->share_opq_and_eventq = false; | ||||
} | } | ||||
/* If the available interrupt count is more than one, | |||||
/* | we dont need to share the interrupt for IO and event queue */ | ||||
* softs->num_cpus_online is set as number of physical CPUs, | |||||
* So we can have more queues/interrupts . | |||||
*/ | |||||
if (softs->intr_count > 1) | if (softs->intr_count > 1) | ||||
softs->share_opq_and_eventq = false; | softs->share_opq_and_eventq = false; | ||||
DBG_INIT("softs->num_op_obq : %d\n",softs->num_op_obq); | DBG_INIT("softs->num_op_obq : %d\n",softs->num_op_obq); | ||||
softs->num_op_raid_ibq = softs->num_op_obq; | softs->num_op_raid_ibq = softs->num_op_obq; | ||||
softs->num_op_aio_ibq = softs->num_op_raid_ibq; | softs->num_op_aio_ibq = softs->num_op_raid_ibq; | ||||
softs->ibq_elem_size = softs->pqi_dev_cap.max_iq_elem_len * 16; | softs->ibq_elem_size = softs->pqi_dev_cap.max_iq_elem_len * 16; | ||||
softs->obq_elem_size = softs->pqi_dev_cap.max_oq_elem_len * 16; | softs->obq_elem_size = softs->pqi_dev_cap.max_oq_elem_len * 16; | ||||
if (softs->max_ib_iu_length_per_fw == 256 && | if (softs->max_ib_iu_length_per_fw == 256 && | ||||
softs->ob_spanning_supported) { | softs->ob_spanning_supported) { | ||||
/* older f/w that doesn't actually support spanning. */ | /* older f/w that doesn't actually support spanning. */ | ||||
softs->max_ib_iu_length = softs->ibq_elem_size; | softs->max_ib_iu_length = softs->ibq_elem_size; | ||||
} else { | } else { | ||||
/* max. inbound IU length is an multiple of our inbound element size. */ | /* max. inbound IU length is an multiple of our inbound element size. */ | ||||
softs->max_ib_iu_length = | softs->max_ib_iu_length = | ||||
(softs->max_ib_iu_length_per_fw / softs->ibq_elem_size) * | (softs->max_ib_iu_length_per_fw / softs->ibq_elem_size) * | ||||
softs->ibq_elem_size; | softs->ibq_elem_size; | ||||
} | } | ||||
/* If Max. Outstanding IO came with Max. Spanning element count then, | /* If Max. Outstanding IO came with Max. Spanning element count then, | ||||
needed elements per IO are multiplication of | needed elements per IO are multiplication of | ||||
Max.Outstanding IO and Max.Spanning element */ | Max.Outstanding IO and Max.Spanning element */ | ||||
total_iq_elements = (softs->max_outstanding_io * | total_iq_elements = (softs->max_outstanding_io * | ||||
(softs->max_ib_iu_length / softs->ibq_elem_size)); | (softs->max_ib_iu_length / softs->ibq_elem_size)); | ||||
softs->num_elem_per_op_ibq = total_iq_elements / softs->num_op_raid_ibq; | softs->num_elem_per_op_ibq = total_iq_elements / softs->num_op_raid_ibq; | ||||
softs->num_elem_per_op_ibq = MIN(softs->num_elem_per_op_ibq, | softs->num_elem_per_op_ibq = MIN(softs->num_elem_per_op_ibq, | ||||
softs->pqi_dev_cap.max_iq_elements); | softs->pqi_dev_cap.max_iq_elements); | ||||
softs->num_elem_per_op_obq = softs->max_outstanding_io / softs->num_op_obq; | softs->num_elem_per_op_obq = softs->max_outstanding_io / softs->num_op_obq; | ||||
softs->num_elem_per_op_obq = MIN(softs->num_elem_per_op_obq, | softs->num_elem_per_op_obq = MIN(softs->num_elem_per_op_obq, | ||||
softs->pqi_dev_cap.max_oq_elements); | softs->pqi_dev_cap.max_oq_elements); | ||||
softs->max_sg_per_iu = ((softs->max_ib_iu_length - | softs->max_sg_per_iu = ((softs->max_ib_iu_length - | ||||
softs->ibq_elem_size) / | softs->ibq_elem_size) / | ||||
sizeof(sgt_t)) + | sizeof(sgt_t)) + | ||||
MAX_EMBEDDED_SG_IN_FIRST_IU; | MAX_EMBEDDED_SG_IN_FIRST_IU; | ||||
DBG_INIT("softs->max_ib_iu_length: %d\n", softs->max_ib_iu_length); | DBG_INIT("softs->max_ib_iu_length: %d\n", softs->max_ib_iu_length); | ||||
DBG_INIT("softs->num_elem_per_op_ibq: %d\n", softs->num_elem_per_op_ibq); | DBG_INIT("softs->num_elem_per_op_ibq: %d\n", softs->num_elem_per_op_ibq); | ||||
DBG_INIT("softs->num_elem_per_op_obq: %d\n", softs->num_elem_per_op_obq); | DBG_INIT("softs->num_elem_per_op_obq: %d\n", softs->num_elem_per_op_obq); | ||||
DBG_INIT("softs->max_sg_per_iu: %d\n", softs->max_sg_per_iu); | DBG_INIT("softs->max_sg_per_iu: %d\n", softs->max_sg_per_iu); | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
} | } | ||||
/* | /* | ||||
* Configure the operational queue parameters. | * Configure the operational queue parameters. | ||||
*/ | */ | ||||
int pqisrc_configure_op_queues(pqisrc_softstate_t *softs) | int | ||||
pqisrc_configure_op_queues(pqisrc_softstate_t *softs) | |||||
{ | { | ||||
int ret = PQI_STATUS_SUCCESS; | int ret = PQI_STATUS_SUCCESS; | ||||
/* Get the PQI capability, | /* Get the PQI capability, | ||||
REPORT PQI DEVICE CAPABILITY request */ | REPORT PQI DEVICE CAPABILITY request */ | ||||
ret = pqisrc_report_pqi_capability(softs); | ret = pqisrc_report_pqi_capability(softs); | ||||
if (ret) { | if (ret) { | ||||
DBG_ERR("Failed to send report pqi dev capability request : %d\n", | DBG_ERR("Failed to send report pqi dev capability request : %d\n", | ||||
ret); | ret); | ||||
goto err_out; | goto err_out; | ||||
} | } | ||||
/* Reserve required no of slots for internal requests */ | /* Reserve required no of slots for internal requests */ | ||||
softs->max_io_for_scsi_ml = softs->max_outstanding_io - PQI_RESERVED_IO_SLOTS_CNT; | softs->max_io_for_scsi_ml = softs->max_outstanding_io - PQI_RESERVED_IO_SLOTS_CNT; | ||||
/* Decide the Op queue configuration */ | /* Decide the Op queue configuration */ | ||||
pqisrc_decide_opq_config(softs); | pqisrc_decide_opq_config(softs); | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
return ret; | return ret; | ||||
err_out: | err_out: | ||||
DBG_FUNC("OUT failed\n"); | DBG_FUNC("OUT failed\n"); | ||||
return ret; | return ret; | ||||
} | } | ||||
/* | /* | ||||
* Validate the PQI mode of adapter. | * Validate the PQI mode of adapter. | ||||
*/ | */ | ||||
int pqisrc_check_pqimode(pqisrc_softstate_t *softs) | int | ||||
pqisrc_check_pqimode(pqisrc_softstate_t *softs) | |||||
{ | { | ||||
int ret = PQI_STATUS_FAILURE; | int ret = PQI_STATUS_FAILURE; | ||||
int tmo = 0; | int tmo = 0; | ||||
uint64_t signature = 0; | uint64_t signature = 0; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
/* Check the PQI device signature */ | /* Check the PQI device signature */ | ||||
tmo = PQISRC_PQIMODE_READY_TIMEOUT; | tmo = PQISRC_PQIMODE_READY_TIMEOUT; | ||||
do { | do { | ||||
signature = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->signature, PQI_SIGNATURE)); | signature = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->signature, PQI_SIGNATURE)); | ||||
if (memcmp(&signature, PQISRC_PQI_DEVICE_SIGNATURE, | if (memcmp(&signature, PQISRC_PQI_DEVICE_SIGNATURE, | ||||
sizeof(uint64_t)) == 0) { | sizeof(uint64_t)) == 0) { | ||||
ret = PQI_STATUS_SUCCESS; | ret = PQI_STATUS_SUCCESS; | ||||
break; | break; | ||||
} | } | ||||
OS_SLEEP(PQISRC_MODE_READY_POLL_INTERVAL); | OS_SLEEP(PQISRC_MODE_READY_POLL_INTERVAL); | ||||
} while (tmo--); | } while (tmo--); | ||||
Show All 10 Lines | pqisrc_check_pqimode(pqisrc_softstate_t *softs) | ||||
COND_WAIT((PCI_MEM_GET64(softs, &softs->pqi_reg->admin_q_config, | COND_WAIT((PCI_MEM_GET64(softs, &softs->pqi_reg->admin_q_config, | ||||
PQI_ADMINQ_CONFIG) == PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE), tmo); | PQI_ADMINQ_CONFIG) == PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE), tmo); | ||||
if (!tmo) { | if (!tmo) { | ||||
DBG_ERR("PQI device is not in IDLE state\n"); | DBG_ERR("PQI device is not in IDLE state\n"); | ||||
ret = PQI_STATUS_TIMEOUT; | ret = PQI_STATUS_TIMEOUT; | ||||
goto err_out; | goto err_out; | ||||
} | } | ||||
tmo = PQISRC_PQIMODE_READY_TIMEOUT; | tmo = PQISRC_PQIMODE_READY_TIMEOUT; | ||||
/* Check the PQI device status register */ | /* Check the PQI device status register */ | ||||
COND_WAIT(LE_32(PCI_MEM_GET32(softs, &softs->pqi_reg->pqi_dev_status, PQI_DEV_STATUS)) & | COND_WAIT(LE_32(PCI_MEM_GET32(softs, &softs->pqi_reg->pqi_dev_status, PQI_DEV_STATUS)) & | ||||
PQI_DEV_STATE_AT_INIT, tmo); | PQI_DEV_STATE_AT_INIT, tmo); | ||||
if (!tmo) { | if (!tmo) { | ||||
DBG_ERR("PQI Registers are not ready\n"); | DBG_ERR("PQI Registers are not ready\n"); | ||||
ret = PQI_STATUS_TIMEOUT; | ret = PQI_STATUS_TIMEOUT; | ||||
goto err_out; | goto err_out; | ||||
} | } | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
return ret; | return ret; | ||||
err_out: | err_out: | ||||
DBG_FUNC("OUT failed\n"); | DBG_FUNC("OUT failed\n"); | ||||
return ret; | return ret; | ||||
} | } | ||||
/* PQI Feature processing */ | |||||
static int | |||||
pqisrc_config_table_update(struct pqisrc_softstate *softs, | |||||
uint16_t first_section, uint16_t last_section) | |||||
{ | |||||
pqi_vendor_general_request_t request; | |||||
int ret = PQI_STATUS_FAILURE; | |||||
memset(&request, 0, sizeof(request)); | |||||
request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; | |||||
request.header.iu_length = sizeof(request) - PQI_REQUEST_HEADER_LENGTH; | |||||
request.function_code = PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE; | |||||
request.data.config_table_update.first_section = first_section; | |||||
request.data.config_table_update.last_section = last_section; | |||||
ret = pqisrc_build_send_vendor_request(softs, &request, NULL); | |||||
if (ret != PQI_STATUS_SUCCESS) { | |||||
DBG_ERR("Failed to submit vendor general request IU, Ret status: %d\n", ret); | |||||
return PQI_STATUS_FAILURE; | |||||
} | |||||
return PQI_STATUS_SUCCESS; | |||||
} | |||||
static inline | |||||
boolean_t pqi_is_firmware_feature_supported( | |||||
struct pqi_conf_table_firmware_features *firmware_feature_list, | |||||
unsigned int bit_position) | |||||
{ | |||||
unsigned int byte_index; | |||||
byte_index = bit_position / BITS_PER_BYTE; | |||||
if (byte_index >= firmware_feature_list->num_elements) | |||||
return false; | |||||
return firmware_feature_list->features_supported[byte_index] & | |||||
(1 << (bit_position % BITS_PER_BYTE)) ? true : false; | |||||
} | |||||
static inline | |||||
boolean_t pqi_is_firmware_feature_enabled( | |||||
struct pqi_conf_table_firmware_features *firmware_feature_list, | |||||
uint8_t *firmware_features_addr, unsigned int bit_position) | |||||
{ | |||||
unsigned int byte_index; | |||||
uint8_t *feature_enabled_addr; | |||||
byte_index = (bit_position / BITS_PER_BYTE) + | |||||
(firmware_feature_list->num_elements * 2); | |||||
feature_enabled_addr = firmware_features_addr + | |||||
offsetof(struct pqi_conf_table_firmware_features, | |||||
features_supported) + byte_index; | |||||
return *feature_enabled_addr & | |||||
(1 << (bit_position % BITS_PER_BYTE)) ? true : false; | |||||
} | |||||
static inline void | |||||
pqi_request_firmware_feature( | |||||
struct pqi_conf_table_firmware_features *firmware_feature_list, | |||||
unsigned int bit_position) | |||||
{ | |||||
unsigned int byte_index; | |||||
byte_index = (bit_position / BITS_PER_BYTE) + | |||||
firmware_feature_list->num_elements; | |||||
firmware_feature_list->features_supported[byte_index] |= | |||||
(1 << (bit_position % BITS_PER_BYTE)); | |||||
} | |||||
/* Update PQI config table firmware features section and inform the firmware */ | |||||
static int | |||||
pqisrc_set_host_requested_firmware_feature(pqisrc_softstate_t *softs, | |||||
struct pqi_conf_table_firmware_features *firmware_feature_list) | |||||
{ | |||||
uint8_t *request_feature_addr; | |||||
void *request_feature_abs_addr; | |||||
request_feature_addr = firmware_feature_list->features_supported + | |||||
firmware_feature_list->num_elements; | |||||
request_feature_abs_addr = softs->fw_features_section_abs_addr + | |||||
(request_feature_addr - (uint8_t*)firmware_feature_list); | |||||
os_io_memcpy(request_feature_abs_addr, request_feature_addr, | |||||
firmware_feature_list->num_elements); | |||||
return pqisrc_config_table_update(softs, | |||||
PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES, | |||||
PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES); | |||||
} | |||||
/* Check firmware has enabled the feature specified in the respective bit position. */ | |||||
inline boolean_t | |||||
pqisrc_is_firmware_feature_enabled(pqisrc_softstate_t *softs, | |||||
struct pqi_conf_table_firmware_features *firmware_feature_list, uint16_t bit_position) | |||||
{ | |||||
uint16_t byte_index; | |||||
uint8_t *features_enabled_abs_addr; | |||||
byte_index = (bit_position / BITS_PER_BYTE) + | |||||
(firmware_feature_list->num_elements * 2); | |||||
features_enabled_abs_addr = softs->fw_features_section_abs_addr + | |||||
offsetof(struct pqi_conf_table_firmware_features,features_supported) + byte_index; | |||||
return *features_enabled_abs_addr & | |||||
(1 << (bit_position % BITS_PER_BYTE)) ? true : false; | |||||
} | |||||
static void | |||||
pqi_firmware_feature_status(struct pqisrc_softstate *softs, | |||||
struct pqi_firmware_feature *firmware_feature) | |||||
{ | |||||
switch(firmware_feature->feature_bit) { | |||||
case PQI_FIRMWARE_FEATURE_OFA: | |||||
break; | |||||
case PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT: | |||||
softs->timeout_in_passthrough = true; | |||||
break; | |||||
case PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT: | |||||
softs->timeout_in_tmf = true; | |||||
break; | |||||
default: | |||||
DBG_NOTE("Nothing to do \n"); | |||||
} | |||||
} | |||||
/* Firmware features supported by the driver */ | |||||
static struct | |||||
pqi_firmware_feature pqi_firmware_features[] = { | |||||
{ | |||||
.feature_name = "Support timeout for pass-through commands", | |||||
.feature_bit = PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT, | |||||
.feature_status = pqi_firmware_feature_status, | |||||
}, | |||||
{ | |||||
.feature_name = "Support timeout for LUN Reset TMF", | |||||
.feature_bit = PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT, | |||||
.feature_status = pqi_firmware_feature_status, | |||||
} | |||||
}; | |||||
static void | |||||
pqisrc_process_firmware_features(pqisrc_softstate_t *softs) | |||||
{ | |||||
int rc; | |||||
struct pqi_conf_table_firmware_features *firmware_feature_list; | |||||
unsigned int i; | |||||
unsigned int num_features_requested; | |||||
firmware_feature_list = (struct pqi_conf_table_firmware_features*) | |||||
softs->fw_features_section_abs_addr; | |||||
/* Check features and request those supported by firmware and driver.*/ | |||||
for (i = 0, num_features_requested = 0; | |||||
i < ARRAY_SIZE(pqi_firmware_features); i++) { | |||||
/* Firmware support it ? */ | |||||
if (pqi_is_firmware_feature_supported(firmware_feature_list, | |||||
pqi_firmware_features[i].feature_bit)) { | |||||
pqi_request_firmware_feature(firmware_feature_list, | |||||
pqi_firmware_features[i].feature_bit); | |||||
pqi_firmware_features[i].supported = true; | |||||
num_features_requested++; | |||||
DBG_NOTE("%s supported by driver, requesting firmware to enable it\n", | |||||
pqi_firmware_features[i].feature_name); | |||||
} else { | |||||
DBG_NOTE("%s supported by driver, but not by current firmware\n", | |||||
pqi_firmware_features[i].feature_name); | |||||
} | |||||
} | |||||
if (num_features_requested == 0) | |||||
return; | |||||
rc = pqisrc_set_host_requested_firmware_feature(softs, firmware_feature_list); | |||||
if (rc) { | |||||
DBG_ERR("Failed to update pqi config table\n"); | |||||
return; | |||||
} | |||||
for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { | |||||
if (pqi_is_firmware_feature_enabled(firmware_feature_list, | |||||
softs->fw_features_section_abs_addr, pqi_firmware_features[i].feature_bit)) { | |||||
pqi_firmware_features[i].enabled = true; | |||||
DBG_NOTE("Firmware feature %s enabled \n",pqi_firmware_features[i].feature_name); | |||||
if(pqi_firmware_features[i].feature_status) | |||||
pqi_firmware_features[i].feature_status(softs, &(pqi_firmware_features[i])); | |||||
} | |||||
} | |||||
} | |||||
/* | /* | ||||
* Get the PQI configuration table parameters. | * Get the PQI configuration table parameters. | ||||
* Currently using for heart-beat counter scratch-pad register. | * Currently using for heart-beat counter scratch-pad register. | ||||
*/ | */ | ||||
int pqisrc_process_config_table(pqisrc_softstate_t *softs) | int | ||||
pqisrc_process_config_table(pqisrc_softstate_t *softs) | |||||
{ | { | ||||
int ret = PQI_STATUS_FAILURE; | int ret = PQI_STATUS_FAILURE; | ||||
uint32_t config_table_size; | uint32_t config_table_size; | ||||
uint32_t section_off; | uint32_t section_off; | ||||
uint8_t *config_table_abs_addr; | uint8_t *config_table_abs_addr; | ||||
struct pqi_conf_table *conf_table; | struct pqi_conf_table *conf_table; | ||||
struct pqi_conf_table_section_header *section_hdr; | struct pqi_conf_table_section_header *section_hdr; | ||||
config_table_size = softs->pqi_cap.conf_tab_sz; | config_table_size = softs->pqi_cap.conf_tab_sz; | ||||
if (config_table_size < sizeof(*conf_table) || | if (config_table_size < sizeof(*conf_table) || | ||||
config_table_size > PQI_CONF_TABLE_MAX_LEN) { | config_table_size > PQI_CONF_TABLE_MAX_LEN) { | ||||
DBG_ERR("Invalid PQI conf table length of %u\n", | DBG_ERR("Invalid PQI conf table length of %u\n", | ||||
config_table_size); | config_table_size); | ||||
return ret; | return ret; | ||||
} | } | ||||
conf_table = os_mem_alloc(softs, config_table_size); | conf_table = os_mem_alloc(softs, config_table_size); | ||||
if (!conf_table) { | if (!conf_table) { | ||||
DBG_ERR("Failed to allocate memory for PQI conf table\n"); | DBG_ERR("Failed to allocate memory for PQI conf table\n"); | ||||
return ret; | return ret; | ||||
} | } | ||||
if (config_table_size < sizeof(conf_table) || | |||||
config_table_size > PQI_CONF_TABLE_MAX_LEN) { | |||||
DBG_ERR("Invalid PQI conf table length of %u\n", | |||||
config_table_size); | |||||
goto out; | |||||
} | |||||
config_table_abs_addr = (uint8_t *)(softs->pci_mem_base_vaddr + | config_table_abs_addr = (uint8_t *)(softs->pci_mem_base_vaddr + | ||||
softs->pqi_cap.conf_tab_off); | softs->pqi_cap.conf_tab_off); | ||||
PCI_MEM_GET_BUF(softs, config_table_abs_addr, | PCI_MEM_GET_BUF(softs, config_table_abs_addr, | ||||
softs->pqi_cap.conf_tab_off, | softs->pqi_cap.conf_tab_off, | ||||
(uint8_t*)conf_table, config_table_size); | (uint8_t*)conf_table, config_table_size); | ||||
if (memcmp(conf_table->sign, PQI_CONF_TABLE_SIGNATURE, | if (memcmp(conf_table->sign, PQI_CONF_TABLE_SIGNATURE, | ||||
sizeof(conf_table->sign)) != 0) { | sizeof(conf_table->sign)) != 0) { | ||||
DBG_ERR("Invalid PQI config signature\n"); | DBG_ERR("Invalid PQI config signature\n"); | ||||
goto out; | goto out; | ||||
} | } | ||||
section_off = LE_32(conf_table->first_section_off); | section_off = LE_32(conf_table->first_section_off); | ||||
while (section_off) { | while (section_off) { | ||||
if (section_off+ sizeof(*section_hdr) >= config_table_size) { | if (section_off+ sizeof(*section_hdr) >= config_table_size) { | ||||
DBG_ERR("PQI config table section offset (%u) beyond \ | DBG_INFO("Reached end of PQI config table. Breaking off.\n"); | ||||
end of config table (config table length: %u)\n", | |||||
section_off, config_table_size); | |||||
break; | break; | ||||
} | } | ||||
section_hdr = (struct pqi_conf_table_section_header *)((uint8_t *)conf_table + section_off); | section_hdr = (struct pqi_conf_table_section_header *)((uint8_t *)conf_table + section_off); | ||||
switch (LE_16(section_hdr->section_id)) { | switch (LE_16(section_hdr->section_id)) { | ||||
case PQI_CONF_TABLE_SECTION_GENERAL_INFO: | case PQI_CONF_TABLE_SECTION_GENERAL_INFO: | ||||
case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES: | |||||
case PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA: | case PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA: | ||||
case PQI_CONF_TABLE_SECTION_DEBUG: | case PQI_CONF_TABLE_SECTION_DEBUG: | ||||
break; | break; | ||||
case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES: | |||||
softs->fw_features_section_off = softs->pqi_cap.conf_tab_off + section_off; | |||||
softs->fw_features_section_abs_addr = softs->pci_mem_base_vaddr + softs->fw_features_section_off; | |||||
pqisrc_process_firmware_features(softs); | |||||
break; | |||||
case PQI_CONF_TABLE_SECTION_HEARTBEAT: | case PQI_CONF_TABLE_SECTION_HEARTBEAT: | ||||
softs->heartbeat_counter_off = softs->pqi_cap.conf_tab_off + | softs->heartbeat_counter_off = softs->pqi_cap.conf_tab_off + | ||||
section_off + | section_off + | ||||
offsetof(struct pqi_conf_table_heartbeat, | offsetof(struct pqi_conf_table_heartbeat, | ||||
heartbeat_counter); | heartbeat_counter); | ||||
softs->heartbeat_counter_abs_addr = (uint64_t *)(softs->pci_mem_base_vaddr + | softs->heartbeat_counter_abs_addr = (uint64_t *)(softs->pci_mem_base_vaddr + | ||||
softs->heartbeat_counter_off); | softs->heartbeat_counter_off); | ||||
ret = PQI_STATUS_SUCCESS; | ret = PQI_STATUS_SUCCESS; | ||||
break; | break; | ||||
default: | default: | ||||
DBG_INFO("unrecognized PQI config table section ID: 0x%x\n", | DBG_INFO("unrecognized PQI config table section ID: 0x%x\n", | ||||
LE_16(section_hdr->section_id)); | LE_16(section_hdr->section_id)); | ||||
break; | break; | ||||
} | } | ||||
section_off = LE_16(section_hdr->next_section_off); | section_off = LE_16(section_hdr->next_section_off); | ||||
} | } | ||||
out: | out: | ||||
os_mem_free(softs, (void *)conf_table,config_table_size); | os_mem_free(softs, (void *)conf_table,config_table_size); | ||||
return ret; | return ret; | ||||
} | } | ||||
/* Wait for PQI reset completion for the adapter*/ | /* Wait for PQI reset completion for the adapter*/ | ||||
int pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs) | int | ||||
pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs) | |||||
{ | { | ||||
int ret = PQI_STATUS_SUCCESS; | int ret = PQI_STATUS_SUCCESS; | ||||
pqi_reset_reg_t reset_reg; | pqi_reset_reg_t reset_reg; | ||||
int pqi_reset_timeout = 0; | int pqi_reset_timeout = 0; | ||||
uint64_t val = 0; | uint64_t val = 0; | ||||
uint32_t max_timeout = 0; | uint32_t max_timeout = 0; | ||||
val = PCI_MEM_GET64(softs, &softs->pqi_reg->pqi_dev_adminq_cap, PQI_ADMINQ_CAP); | val = PCI_MEM_GET64(softs, &softs->pqi_reg->pqi_dev_adminq_cap, PQI_ADMINQ_CAP); | ||||
max_timeout = (val & 0xFFFF00000000) >> 32; | max_timeout = (val & 0xFFFF00000000) >> 32; | ||||
DBG_INIT("max_timeout for PQI reset completion in 100 msec units = %u\n", max_timeout); | DBG_INIT("max_timeout for PQI reset completion in 100 msec units = %u\n", max_timeout); | ||||
while(1) { | while(1) { | ||||
if (pqi_reset_timeout++ == max_timeout) { | if (pqi_reset_timeout++ == max_timeout) { | ||||
return PQI_STATUS_TIMEOUT; | return PQI_STATUS_TIMEOUT; | ||||
} | } | ||||
OS_SLEEP(PQI_RESET_POLL_INTERVAL);/* 100 msec */ | OS_SLEEP(PQI_RESET_POLL_INTERVAL);/* 100 msec */ | ||||
reset_reg.all_bits = PCI_MEM_GET32(softs, | reset_reg.all_bits = PCI_MEM_GET32(softs, | ||||
&softs->pqi_reg->dev_reset, PQI_DEV_RESET); | &softs->pqi_reg->dev_reset, PQI_DEV_RESET); | ||||
if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) | if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) | ||||
break; | break; | ||||
} | } | ||||
return ret; | return ret; | ||||
} | } | ||||
/* | /* | ||||
* Function used to perform PQI hard reset. | * Function used to perform PQI hard reset. | ||||
*/ | */ | ||||
int pqi_reset(pqisrc_softstate_t *softs) | int | ||||
pqi_reset(pqisrc_softstate_t *softs) | |||||
{ | { | ||||
int ret = PQI_STATUS_SUCCESS; | int ret = PQI_STATUS_SUCCESS; | ||||
uint32_t val = 0; | uint32_t val = 0; | ||||
pqi_reset_reg_t pqi_reset_reg; | pqi_reset_reg_t pqi_reset_reg; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
if (true == softs->ctrl_in_pqi_mode) { | if (true == softs->ctrl_in_pqi_mode) { | ||||
if (softs->pqi_reset_quiesce_allowed) { | if (softs->pqi_reset_quiesce_allowed) { | ||||
val = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db, | val = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db, | ||||
LEGACY_SIS_IDBR); | LEGACY_SIS_IDBR); | ||||
val |= SIS_PQI_RESET_QUIESCE; | val |= SIS_PQI_RESET_QUIESCE; | ||||
PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db, | PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db, | ||||
LEGACY_SIS_IDBR, LE_32(val)); | LEGACY_SIS_IDBR, LE_32(val)); | ||||
ret = pqisrc_sis_wait_for_db_bit_to_clear(softs, SIS_PQI_RESET_QUIESCE); | ret = pqisrc_sis_wait_for_db_bit_to_clear(softs, SIS_PQI_RESET_QUIESCE); | ||||
Show All 19 Lines | pqi_reset(pqisrc_softstate_t *softs) | ||||
softs->ctrl_in_pqi_mode = false; | softs->ctrl_in_pqi_mode = false; | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
return ret; | return ret; | ||||
} | } | ||||
/* | /* | ||||
* Initialize the adapter with supported PQI configuration. | * Initialize the adapter with supported PQI configuration. | ||||
*/ | */ | ||||
int pqisrc_pqi_init(pqisrc_softstate_t *softs) | int | ||||
pqisrc_pqi_init(pqisrc_softstate_t *softs) | |||||
{ | { | ||||
int ret = PQI_STATUS_SUCCESS; | int ret = PQI_STATUS_SUCCESS; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
/* Check the PQI signature */ | /* Check the PQI signature */ | ||||
ret = pqisrc_check_pqimode(softs); | ret = pqisrc_check_pqimode(softs); | ||||
if(ret) { | if(ret) { | ||||
DBG_ERR("failed to switch to pqi\n"); | DBG_ERR("failed to switch to pqi\n"); | ||||
goto err_out; | goto err_out; | ||||
} | } | ||||
PQI_SAVE_CTRL_MODE(softs, CTRL_PQI_MODE); | PQI_SAVE_CTRL_MODE(softs, CTRL_PQI_MODE); | ||||
softs->ctrl_in_pqi_mode = true; | softs->ctrl_in_pqi_mode = true; | ||||
/* Get the No. of Online CPUs,NUMA/Processor config from OS */ | /* Get the No. of Online CPUs,NUMA/Processor config from OS */ | ||||
ret = os_get_processor_config(softs); | ret = os_get_processor_config(softs); | ||||
if (ret) { | if (ret) { | ||||
DBG_ERR("Failed to get processor config from OS %d\n", | DBG_ERR("Failed to get processor config from OS %d\n", | ||||
ret); | ret); | ||||
goto err_out; | goto err_out; | ||||
} | } | ||||
softs->intr_type = INTR_TYPE_NONE; | softs->intr_type = INTR_TYPE_NONE; | ||||
/* Get the interrupt count, type, priority available from OS */ | /* Get the interrupt count, type, priority available from OS */ | ||||
ret = os_get_intr_config(softs); | ret = os_get_intr_config(softs); | ||||
if (ret) { | if (ret) { | ||||
DBG_ERR("Failed to get interrupt config from OS %d\n", | DBG_ERR("Failed to get interrupt config from OS %d\n", | ||||
ret); | ret); | ||||
goto err_out; | goto err_out; | ||||
} | } | ||||
/*Enable/Set Legacy INTx Interrupt mask clear pqi register, | /*Enable/Set Legacy INTx Interrupt mask clear pqi register, | ||||
*if allocated interrupt is legacy type. | *if allocated interrupt is legacy type. | ||||
*/ | */ | ||||
if (INTR_TYPE_FIXED == softs->intr_type) { | if (INTR_TYPE_FIXED == softs->intr_type) { | ||||
pqisrc_configure_legacy_intx(softs, true); | pqisrc_configure_legacy_intx(softs, true); | ||||
sis_enable_intx(softs); | sis_enable_intx(softs); | ||||
} | } | ||||
/* Create Admin Queue pair*/ | /* Create Admin Queue pair*/ | ||||
ret = pqisrc_create_admin_queue(softs); | ret = pqisrc_create_admin_queue(softs); | ||||
if(ret) { | if(ret) { | ||||
DBG_ERR("Failed to configure admin queue\n"); | DBG_ERR("Failed to configure admin queue\n"); | ||||
goto err_admin_queue; | goto err_admin_queue; | ||||
} | } | ||||
/* For creating event and IO operational queues we have to submit | /* For creating event and IO operational queues we have to submit | ||||
admin IU requests.So Allocate resources for submitting IUs */ | admin IU requests.So Allocate resources for submitting IUs */ | ||||
/* Allocate the request container block (rcb) */ | /* Allocate the request container block (rcb) */ | ||||
ret = pqisrc_allocate_rcb(softs); | ret = pqisrc_allocate_rcb(softs); | ||||
if (ret == PQI_STATUS_FAILURE) { | if (ret == PQI_STATUS_FAILURE) { | ||||
DBG_ERR("Failed to allocate rcb \n"); | DBG_ERR("Failed to allocate rcb \n"); | ||||
goto err_rcb; | goto err_rcb; | ||||
} | } | ||||
/* Allocate & initialize request id queue */ | /* Allocate & initialize request id queue */ | ||||
Show All 23 Lines | pqisrc_pqi_init(pqisrc_softstate_t *softs) | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
return ret; | return ret; | ||||
err_create_opq: | err_create_opq: | ||||
err_config_opq: | err_config_opq: | ||||
pqisrc_destroy_taglist(softs,&softs->taglist); | pqisrc_destroy_taglist(softs,&softs->taglist); | ||||
err_taglist: | err_taglist: | ||||
pqisrc_free_rcb(softs, softs->max_outstanding_io + 1); | pqisrc_free_rcb(softs, softs->max_outstanding_io + 1); | ||||
err_rcb: | err_rcb: | ||||
pqisrc_destroy_admin_queue(softs); | pqisrc_destroy_admin_queue(softs); | ||||
err_admin_queue: | err_admin_queue: | ||||
os_free_intr_config(softs); | os_free_intr_config(softs); | ||||
err_out: | err_out: | ||||
DBG_FUNC("OUT failed\n"); | DBG_FUNC("OUT failed\n"); | ||||
return PQI_STATUS_FAILURE; | return PQI_STATUS_FAILURE; | ||||
} | } | ||||
/* */ | int | ||||
int pqisrc_force_sis(pqisrc_softstate_t *softs) | pqisrc_force_sis(pqisrc_softstate_t *softs) | ||||
{ | { | ||||
int ret = PQI_STATUS_SUCCESS; | int ret = PQI_STATUS_SUCCESS; | ||||
if (SIS_IS_KERNEL_PANIC(softs)) { | if (SIS_IS_KERNEL_PANIC(softs)) { | ||||
DBG_INIT("Controller FW is not runnning"); | DBG_INIT("Controller FW is not runnning"); | ||||
return PQI_STATUS_FAILURE; | return PQI_STATUS_FAILURE; | ||||
} | } | ||||
if (PQI_GET_CTRL_MODE(softs) == CTRL_SIS_MODE) { | if (PQI_GET_CTRL_MODE(softs) == CTRL_SIS_MODE) { | ||||
return ret; | return ret; | ||||
} | } | ||||
if (SIS_IS_KERNEL_UP(softs)) { | if (SIS_IS_KERNEL_UP(softs)) { | ||||
PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE); | PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE); | ||||
return ret; | return ret; | ||||
} | } | ||||
/* Disable interrupts ? */ | /* Disable interrupts ? */ | ||||
sis_disable_interrupt(softs); | sis_disable_interrupt(softs); | ||||
/* reset pqi, this will delete queues */ | /* reset pqi, this will delete queues */ | ||||
ret = pqi_reset(softs); | ret = pqi_reset(softs); | ||||
if (ret) { | if (ret) { | ||||
return ret; | return ret; | ||||
} | } | ||||
/* Re enable SIS */ | /* Re enable SIS */ | ||||
ret = pqisrc_reenable_sis(softs); | ret = pqisrc_reenable_sis(softs); | ||||
if (ret) { | if (ret) { | ||||
return ret; | return ret; | ||||
} | } | ||||
PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE); | PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE); | ||||
return ret; | return ret; | ||||
} | } | ||||
int pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *softs) | static int | ||||
pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *softs) | |||||
{ | { | ||||
int count = 0; | |||||
int ret = PQI_STATUS_SUCCESS; | int ret = PQI_STATUS_SUCCESS; | ||||
int tmo = PQI_CMND_COMPLETE_TMO; | |||||
COND_WAIT((softs->taglist.num_elem == softs->max_outstanding_io), tmo); | DBG_NOTE("softs->taglist.num_elem : %d",softs->taglist.num_elem); | ||||
if (!tmo) { | |||||
DBG_ERR("Pending commands %x!!!",softs->taglist.num_elem); | if (softs->taglist.num_elem == softs->max_outstanding_io) | ||||
ret = PQI_STATUS_TIMEOUT; | return ret; | ||||
else { | |||||
DBG_WARN("%d commands pending\n", | |||||
softs->max_outstanding_io - softs->taglist.num_elem); | |||||
while(1) { | |||||
/* Since heartbeat timer stopped ,check for firmware status*/ | |||||
if (SIS_IS_KERNEL_PANIC(softs)) { | |||||
DBG_ERR("Controller FW is not running\n"); | |||||
return PQI_STATUS_FAILURE; | |||||
} | } | ||||
if (softs->taglist.num_elem != softs->max_outstanding_io) { | |||||
/* Sleep for 1 msec */ | |||||
OS_SLEEP(1000); | |||||
count++; | |||||
if(count % 1000 == 0) { | |||||
DBG_WARN("Waited for %d seconds", count/1000); | |||||
} | |||||
if (count >= PQI_QUIESCE_TIMEOUT) { | |||||
return PQI_STATUS_FAILURE; | |||||
} | |||||
continue; | |||||
} | |||||
break; | |||||
} | |||||
} | |||||
return ret; | return ret; | ||||
} | } | ||||
void pqisrc_complete_internal_cmds(pqisrc_softstate_t *softs) | static void | ||||
pqisrc_complete_internal_cmds(pqisrc_softstate_t *softs) | |||||
{ | { | ||||
int tag = 0; | int tag = 0; | ||||
rcb_t *rcb; | rcb_t *rcb; | ||||
for (tag = 1; tag <= softs->max_outstanding_io; tag++) { | for (tag = 1; tag <= softs->max_outstanding_io; tag++) { | ||||
rcb = &softs->rcb[tag]; | rcb = &softs->rcb[tag]; | ||||
if(rcb->req_pending && is_internal_req(rcb)) { | if(rcb->req_pending && is_internal_req(rcb)) { | ||||
rcb->status = REQUEST_FAILED; | rcb->status = REQUEST_FAILED; | ||||
rcb->req_pending = false; | rcb->req_pending = false; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Uninitialize the resources used during PQI initialization. | * Uninitialize the resources used during PQI initialization. | ||||
*/ | */ | ||||
void pqisrc_pqi_uninit(pqisrc_softstate_t *softs) | void | ||||
pqisrc_pqi_uninit(pqisrc_softstate_t *softs) | |||||
{ | { | ||||
int i, ret; | int i, ret; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
/* Wait for any rescan to finish */ | /* Wait for any rescan to finish */ | ||||
pqisrc_wait_for_rescan_complete(softs); | pqisrc_wait_for_rescan_complete(softs); | ||||
/* Wait for commands to complete */ | /* Wait for commands to complete */ | ||||
ret = pqisrc_wait_for_cmnd_complete(softs); | ret = pqisrc_wait_for_cmnd_complete(softs); | ||||
/* disable and free the interrupt resources */ | |||||
os_destroy_intr(softs); | |||||
/* Complete all pending commands. */ | /* Complete all pending commands. */ | ||||
if(ret != PQI_STATUS_SUCCESS) { | if(ret != PQI_STATUS_SUCCESS) { | ||||
pqisrc_complete_internal_cmds(softs); | pqisrc_complete_internal_cmds(softs); | ||||
os_complete_outstanding_cmds_nodevice(softs); | os_complete_outstanding_cmds_nodevice(softs); | ||||
} | } | ||||
if(softs->devlist_lockcreated==true){ | if(softs->devlist_lockcreated==true){ | ||||
os_uninit_spinlock(&softs->devlist_lock); | os_uninit_spinlock(&softs->devlist_lock); | ||||
softs->devlist_lockcreated = false; | softs->devlist_lockcreated = false; | ||||
} | } | ||||
for (i = 0; i < softs->num_op_raid_ibq; i++) { | for (i = 0; i < softs->num_op_raid_ibq; i++) { | ||||
/* OP RAID IB Q */ | /* OP RAID IB Q */ | ||||
if(softs->op_raid_ib_q[i].lockcreated==true){ | if(softs->op_raid_ib_q[i].lockcreated==true){ | ||||
OS_UNINIT_PQILOCK(&softs->op_raid_ib_q[i].lock); | OS_UNINIT_PQILOCK(&softs->op_raid_ib_q[i].lock); | ||||
softs->op_raid_ib_q[i].lockcreated = false; | softs->op_raid_ib_q[i].lockcreated = false; | ||||
} | } | ||||
/* OP AIO IB Q */ | /* OP AIO IB Q */ | ||||
if(softs->op_aio_ib_q[i].lockcreated==true){ | if(softs->op_aio_ib_q[i].lockcreated==true){ | ||||
OS_UNINIT_PQILOCK(&softs->op_aio_ib_q[i].lock); | OS_UNINIT_PQILOCK(&softs->op_aio_ib_q[i].lock); | ||||
softs->op_aio_ib_q[i].lockcreated = false; | softs->op_aio_ib_q[i].lockcreated = false; | ||||
} | } | ||||
} | } | ||||
/* Free Op queues */ | /* Free Op queues */ | ||||
os_dma_mem_free(softs, &softs->op_ibq_dma_mem); | os_dma_mem_free(softs, &softs->op_ibq_dma_mem); | ||||
os_dma_mem_free(softs, &softs->op_obq_dma_mem); | os_dma_mem_free(softs, &softs->op_obq_dma_mem); | ||||
os_dma_mem_free(softs, &softs->event_q_dma_mem); | os_dma_mem_free(softs, &softs->event_q_dma_mem); | ||||
/* Free rcb */ | /* Free rcb */ | ||||
pqisrc_free_rcb(softs, softs->max_outstanding_io + 1); | pqisrc_free_rcb(softs, softs->max_outstanding_io + 1); | ||||
/* Free request id lists */ | /* Free request id lists */ | ||||
pqisrc_destroy_taglist(softs,&softs->taglist); | pqisrc_destroy_taglist(softs,&softs->taglist); | ||||
if(softs->admin_ib_queue.lockcreated==true){ | if(softs->admin_ib_queue.lockcreated==true) { | ||||
OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock); | OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock); | ||||
softs->admin_ib_queue.lockcreated = false; | softs->admin_ib_queue.lockcreated = false; | ||||
} | } | ||||
/* Free Admin Queue */ | /* Free Admin Queue */ | ||||
os_dma_mem_free(softs, &softs->admin_queue_dma_mem); | os_dma_mem_free(softs, &softs->admin_queue_dma_mem); | ||||
/* Switch back to SIS mode */ | /* Switch back to SIS mode */ | ||||
if (pqisrc_force_sis(softs)) { | if (pqisrc_force_sis(softs)) { | ||||
DBG_ERR("Failed to switch back the adapter to SIS mode!\n"); | DBG_ERR("Failed to switch back the adapter to SIS mode!\n"); | ||||
} | } | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
} | } | ||||
/* | /* | ||||
* Function to initialize the adapter settings. | * Function to initialize the adapter settings. | ||||
*/ | */ | ||||
int pqisrc_init(pqisrc_softstate_t *softs) | int | ||||
pqisrc_init(pqisrc_softstate_t *softs) | |||||
{ | { | ||||
int ret = 0; | int ret = 0; | ||||
int i = 0, j = 0; | int i = 0, j = 0; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
check_struct_sizes(); | check_struct_sizes(); | ||||
/* Init the Sync interface */ | /* Init the Sync interface */ | ||||
ret = pqisrc_sis_init(softs); | ret = pqisrc_sis_init(softs); | ||||
if (ret) { | if (ret) { | ||||
DBG_ERR("SIS Init failed with error %d\n", ret); | DBG_ERR("SIS Init failed with error %d\n", ret); | ||||
goto err_out; | goto err_out; | ||||
} | } | ||||
ret = os_create_semaphore("scan_lock", 1, &softs->scan_lock); | ret = os_create_semaphore("scan_lock", 1, &softs->scan_lock); | ||||
Show All 17 Lines | pqisrc_init(pqisrc_softstate_t *softs) | ||||
} | } | ||||
/* Report event configuration */ | /* Report event configuration */ | ||||
ret = pqisrc_report_event_config(softs); | ret = pqisrc_report_event_config(softs); | ||||
if(ret){ | if(ret){ | ||||
DBG_ERR(" Failed to configure Report events\n"); | DBG_ERR(" Failed to configure Report events\n"); | ||||
goto err_event; | goto err_event; | ||||
} | } | ||||
/* Set event configuration*/ | /* Set event configuration*/ | ||||
ret = pqisrc_set_event_config(softs); | ret = pqisrc_set_event_config(softs); | ||||
if(ret){ | if(ret){ | ||||
DBG_ERR(" Failed to configure Set events\n"); | DBG_ERR(" Failed to configure Set events\n"); | ||||
goto err_event; | goto err_event; | ||||
} | } | ||||
/* Check for For PQI spanning */ | /* Check for For PQI spanning */ | ||||
ret = pqisrc_get_ctrl_fw_version(softs); | ret = pqisrc_get_ctrl_fw_version(softs); | ||||
if(ret){ | if(ret){ | ||||
DBG_ERR(" Failed to get ctrl fw version\n"); | DBG_ERR(" Failed to get ctrl fw version\n"); | ||||
goto err_fw_version; | goto err_fw_version; | ||||
} | } | ||||
/* update driver version in to FW */ | /* update driver version in to FW */ | ||||
ret = pqisrc_write_driver_version_to_host_wellness(softs); | ret = pqisrc_write_driver_version_to_host_wellness(softs); | ||||
if (ret) { | if (ret) { | ||||
DBG_ERR(" Failed to update driver version in to FW"); | DBG_ERR(" Failed to update driver version in to FW"); | ||||
goto err_host_wellness; | goto err_host_wellness; | ||||
} | } | ||||
os_strlcpy(softs->devlist_lock_name, "devlist_lock", LOCKNAME_SIZE); | os_strlcpy(softs->devlist_lock_name, "devlist_lock", LOCKNAME_SIZE); | ||||
ret = os_init_spinlock(softs, &softs->devlist_lock, softs->devlist_lock_name); | ret = os_init_spinlock(softs, &softs->devlist_lock, softs->devlist_lock_name); | ||||
if(ret){ | if(ret){ | ||||
DBG_ERR(" Failed to initialize devlist_lock\n"); | DBG_ERR(" Failed to initialize devlist_lock\n"); | ||||
softs->devlist_lockcreated=false; | softs->devlist_lockcreated=false; | ||||
goto err_lock; | goto err_lock; | ||||
} | } | ||||
softs->devlist_lockcreated = true; | softs->devlist_lockcreated = true; | ||||
OS_ATOMIC64_SET(softs, num_intrs, 0); | |||||
softs->prev_num_intrs = softs->num_intrs; | |||||
/* Get the PQI configuration table to read heart-beat counter*/ | /* Get the PQI configuration table to read heart-beat counter*/ | ||||
if (PQI_NEW_HEARTBEAT_MECHANISM(softs)) { | |||||
ret = pqisrc_process_config_table(softs); | ret = pqisrc_process_config_table(softs); | ||||
if (ret) { | if (ret) { | ||||
DBG_ERR("Failed to process PQI configuration table %d\n", ret); | DBG_ERR("Failed to process PQI configuration table %d\n", ret); | ||||
goto err_config_tab; | goto err_config_tab; | ||||
} | } | ||||
} | |||||
if (PQI_NEW_HEARTBEAT_MECHANISM(softs)) | |||||
softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs) - OS_FW_HEARTBEAT_TIMER_INTERVAL; | softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs) - OS_FW_HEARTBEAT_TIMER_INTERVAL; | ||||
/* Init device list */ | /* Init device list */ | ||||
for(i = 0; i < PQI_MAX_DEVICES; i++) | for(i = 0; i < PQI_MAX_DEVICES; i++) | ||||
for(j = 0; j < PQI_MAX_MULTILUN; j++) | for(j = 0; j < PQI_MAX_MULTILUN; j++) | ||||
softs->device_list[i][j] = NULL; | softs->device_list[i][j] = NULL; | ||||
pqisrc_init_targetid_pool(softs); | pqisrc_init_targetid_pool(softs); | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
return ret; | return ret; | ||||
err_config_tab: | err_config_tab: | ||||
if(softs->devlist_lockcreated==true){ | if(softs->devlist_lockcreated==true){ | ||||
os_uninit_spinlock(&softs->devlist_lock); | os_uninit_spinlock(&softs->devlist_lock); | ||||
softs->devlist_lockcreated = false; | softs->devlist_lockcreated = false; | ||||
} | } | ||||
err_lock: | err_lock: | ||||
err_fw_version: | err_fw_version: | ||||
err_event: | err_event: | ||||
err_host_wellness: | err_host_wellness: | ||||
os_destroy_intr(softs); | |||||
err_intr: | err_intr: | ||||
pqisrc_pqi_uninit(softs); | pqisrc_pqi_uninit(softs); | ||||
err_pqi: | err_pqi: | ||||
os_destroy_semaphore(&softs->scan_lock); | os_destroy_semaphore(&softs->scan_lock); | ||||
err_scan_lock: | err_scan_lock: | ||||
pqisrc_sis_uninit(softs); | pqisrc_sis_uninit(softs); | ||||
err_out: | err_out: | ||||
DBG_FUNC("OUT failed\n"); | DBG_FUNC("OUT failed\n"); | ||||
return ret; | return ret; | ||||
} | } | ||||
/* | /* | ||||
* Write all data in the adapter's battery-backed cache to | * Write all data in the adapter's battery-backed cache to | ||||
* storage. | * storage. | ||||
*/ | */ | ||||
int pqisrc_flush_cache( pqisrc_softstate_t *softs, | int | ||||
pqisrc_flush_cache( pqisrc_softstate_t *softs, | |||||
enum pqisrc_flush_cache_event_type event_type) | enum pqisrc_flush_cache_event_type event_type) | ||||
{ | { | ||||
int rval = PQI_STATUS_SUCCESS; | int rval = PQI_STATUS_SUCCESS; | ||||
pqisrc_raid_req_t request; | pqisrc_raid_req_t request; | ||||
pqisrc_bmic_flush_cache_t *flush_buff = NULL; | pqisrc_bmic_flush_cache_t *flush_buff = NULL; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
if (pqisrc_ctrl_offline(softs)) | if (pqisrc_ctrl_offline(softs)) | ||||
return PQI_STATUS_FAILURE; | return PQI_STATUS_FAILURE; | ||||
flush_buff = os_mem_alloc(softs, sizeof(pqisrc_bmic_flush_cache_t)); | flush_buff = os_mem_alloc(softs, sizeof(pqisrc_bmic_flush_cache_t)); | ||||
if (!flush_buff) { | if (!flush_buff) { | ||||
DBG_ERR("Failed to allocate memory for flush cache params\n"); | DBG_ERR("Failed to allocate memory for flush cache params\n"); | ||||
rval = PQI_STATUS_FAILURE; | rval = PQI_STATUS_FAILURE; | ||||
return rval; | return rval; | ||||
} | } | ||||
flush_buff->halt_event = event_type; | flush_buff->halt_event = event_type; | ||||
Show All 13 Lines | pqisrc_flush_cache( pqisrc_softstate_t *softs, | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
return rval; | return rval; | ||||
} | } | ||||
/* | /* | ||||
* Uninitialize the adapter. | * Uninitialize the adapter. | ||||
*/ | */ | ||||
void pqisrc_uninit(pqisrc_softstate_t *softs) | void | ||||
pqisrc_uninit(pqisrc_softstate_t *softs) | |||||
{ | { | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
pqisrc_pqi_uninit(softs); | pqisrc_pqi_uninit(softs); | ||||
pqisrc_sis_uninit(softs); | pqisrc_sis_uninit(softs); | ||||
os_destroy_semaphore(&softs->scan_lock); | os_destroy_semaphore(&softs->scan_lock); | ||||
os_destroy_intr(softs); | |||||
pqisrc_cleanup_devices(softs); | pqisrc_cleanup_devices(softs); | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
} | } |