Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/smartpqi/smartpqi_helper.c
/*- | /*- | ||||
* Copyright (c) 2018 Microsemi Corporation. | * Copyright (c) 2016-2019 Microsemi Corporation. | ||||
* Copyright (c) 2020 Microchip Technology Inc. and it's subsidiaries. | |||||
* | |||||
* All rights reserved. | * All rights reserved. | ||||
* | * | ||||
* Redistribution and use in source and binary forms, with or without | * Redistribution and use in source and binary forms, with or without | ||||
Context not available. | |||||
/* $FreeBSD$ */ | /* $FreeBSD$ */ | ||||
#include "smartpqi_includes.h" | #include "smartpqi_includes.h" | ||||
/* read and modify controller diagnostic option - PQI_PTRAID_UPDATE_ON_RESCAN_LUNS */ | |||||
void pqisrc_ctrl_diagnostic_options(pqisrc_softstate_t *softs) | |||||
{ | |||||
int ret = PQI_STATUS_SUCCESS; | |||||
uint32_t diags_options = 0; | |||||
pqisrc_raid_req_t request; | |||||
DBG_NOTE("IN\n"); | |||||
memset(&request, 0, sizeof(request)); | |||||
/* read diags options of controller */ | |||||
ret = pqisrc_build_send_raid_request(softs, &request, | |||||
(void*)&diags_options, | |||||
sizeof(diags_options), | |||||
BMIC_SENSE_DIAGS_OPTIONS, | |||||
0, (uint8_t *)RAID_CTLR_LUNID, NULL); | |||||
if (ret != PQI_STATUS_SUCCESS) { | |||||
DBG_WARN("Request failed for BMIC Sense Diags Option command." | |||||
"ret:%d\n",ret); | |||||
return; | |||||
} | |||||
DBG_NOTE("diags options data after read: %#x\n",diags_options); | |||||
diags_options |= PQI_PTRAID_UPDATE_ON_RESCAN_LUNS; | |||||
DBG_NOTE("diags options data to write: %#x\n",diags_options); | |||||
memset(&request, 0, sizeof(request)); | |||||
/* write specifed diags options to controller */ | |||||
ret = pqisrc_build_send_raid_request(softs, &request, | |||||
(void*)&diags_options, | |||||
sizeof(diags_options), | |||||
BMIC_SET_DIAGS_OPTIONS, | |||||
0, (uint8_t *)RAID_CTLR_LUNID, NULL); | |||||
if (ret != PQI_STATUS_SUCCESS) | |||||
DBG_WARN("Request failed for BMIC Set Diags Option command." | |||||
"ret:%d\n",ret); | |||||
#if 0 | |||||
diags_options = 0; | |||||
memset(&request, 0, sizeof(request)); | |||||
ret = pqisrc_build_send_raid_request(softs, &request, | |||||
(void*)&diags_options, | |||||
sizeof(diags_options), | |||||
BMIC_SENSE_DIAGS_OPTIONS, | |||||
0, (uint8_t *)RAID_CTLR_LUNID, NULL); | |||||
if (ret != PQI_STATUS_SUCCESS) | |||||
DBG_WARN("Request failed for BMIC Sense Diags Option command." | |||||
"ret:%d\n",ret); | |||||
DBG_NOTE("diags options after re-read: %#x\n",diags_options); | |||||
#endif | |||||
DBG_NOTE("OUT\n"); | |||||
} | |||||
/* | /* | ||||
* Function used to validate the adapter health. | * Function used to validate the adapter health. | ||||
*/ | */ | ||||
Context not available. | |||||
return !softs->ctrl_online; | return !softs->ctrl_online; | ||||
} | } | ||||
/* Function used set/clear legacy INTx bit in Legacy Interrupt INTx | /* Function used set/clear legacy INTx bit in Legacy Interrupt INTx | ||||
* mask clear pqi register | * mask clear pqi register | ||||
*/ | */ | ||||
Context not available. | |||||
{ | { | ||||
uint32_t intx_mask; | uint32_t intx_mask; | ||||
uint32_t *reg_addr = NULL; | uint32_t *reg_addr = NULL; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
if (enable_intx) | if (enable_intx) | ||||
reg_addr = &softs->pqi_reg->legacy_intr_mask_clr; | reg_addr = &softs->pqi_reg->legacy_intr_mask_clr; | ||||
else | else | ||||
reg_addr = &softs->pqi_reg->legacy_intr_mask_set; | reg_addr = &softs->pqi_reg->legacy_intr_mask_set; | ||||
intx_mask = PCI_MEM_GET32(softs, reg_addr, PQI_LEGACY_INTR_MASK_CLR); | intx_mask = PCI_MEM_GET32(softs, reg_addr, PQI_LEGACY_INTR_MASK_CLR); | ||||
intx_mask |= PQISRC_LEGACY_INTX_MASK; | intx_mask |= PQISRC_LEGACY_INTX_MASK; | ||||
PCI_MEM_PUT32(softs, reg_addr, PQI_LEGACY_INTR_MASK_CLR ,intx_mask); | PCI_MEM_PUT32(softs, reg_addr, PQI_LEGACY_INTR_MASK_CLR ,intx_mask); | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
} | } | ||||
/* | /* | ||||
Context not available. | |||||
softs->ctrl_online = false; | softs->ctrl_online = false; | ||||
pqisrc_trigger_nmi_sis(softs); | pqisrc_trigger_nmi_sis(softs); | ||||
os_complete_outstanding_cmds_nodevice(softs); | os_complete_outstanding_cmds_nodevice(softs); | ||||
pqisrc_wait_for_rescan_complete(softs); | |||||
pqisrc_take_devices_offline(softs); | pqisrc_take_devices_offline(softs); | ||||
DBG_FUNC("OUT\n"); | DBG_FUNC("OUT\n"); | ||||
Context not available. | |||||
*/ | */ | ||||
void pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *softs) | void pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *softs) | ||||
{ | { | ||||
uint64_t num_intrs; | |||||
uint8_t take_offline = false; | uint8_t take_offline = false; | ||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
num_intrs = OS_ATOMIC64_READ(softs, num_intrs); | if (CTRLR_HEARTBEAT_CNT(softs) == softs->prev_heartbeat_count) { | ||||
take_offline = true; | |||||
if (PQI_NEW_HEARTBEAT_MECHANISM(softs)) { | goto take_ctrl_offline; | ||||
if (CTRLR_HEARTBEAT_CNT(softs) == softs->prev_heartbeat_count) { | } | ||||
take_offline = true; | softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs); | ||||
goto take_ctrl_offline; | DBG_INFO("CTRLR_HEARTBEAT_CNT(softs) = %lx \ | ||||
} | |||||
softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs); | |||||
DBG_INFO("CTRLR_HEARTBEAT_CNT(softs) = %lx \ | |||||
softs->prev_heartbeat_count = %lx\n", | softs->prev_heartbeat_count = %lx\n", | ||||
CTRLR_HEARTBEAT_CNT(softs), softs->prev_heartbeat_count); | CTRLR_HEARTBEAT_CNT(softs), softs->prev_heartbeat_count); | ||||
} else { | |||||
if (num_intrs == softs->prev_num_intrs) { | |||||
softs->num_heartbeats_requested++; | |||||
if (softs->num_heartbeats_requested > PQI_MAX_HEARTBEAT_REQUESTS) { | |||||
take_offline = true; | |||||
goto take_ctrl_offline; | |||||
} | |||||
softs->pending_events[PQI_EVENT_HEARTBEAT].pending = true; | |||||
pqisrc_ack_all_events((void*)softs); | |||||
} else { | |||||
softs->num_heartbeats_requested = 0; | |||||
} | |||||
softs->prev_num_intrs = num_intrs; | |||||
} | |||||
take_ctrl_offline: | take_ctrl_offline: | ||||
if (take_offline){ | if (take_offline){ | ||||
Context not available. | |||||
/* | /* | ||||
* Conditional variable management routine for internal commands. | * Conditional variable management routine for internal commands. | ||||
*/ | */ | ||||
int pqisrc_wait_on_condition(pqisrc_softstate_t *softs, rcb_t *rcb){ | int pqisrc_wait_on_condition(pqisrc_softstate_t *softs, rcb_t *rcb, | ||||
uint32_t timeout_in_msec) | |||||
{ | |||||
DBG_FUNC("IN\n"); | DBG_FUNC("IN\n"); | ||||
int ret = PQI_STATUS_SUCCESS; | int ret = PQI_STATUS_SUCCESS; | ||||
uint32_t loop_cnt = 0; | |||||
/* 1 msec = 500 usec * 2 */ | |||||
uint32_t loop_cnt = timeout_in_msec * 2; | |||||
uint32_t i = 0; | |||||
while (rcb->req_pending == true) { | while (rcb->req_pending == true) { | ||||
OS_SLEEP(500); /* Micro sec */ | OS_SLEEP(500); /* Micro sec */ | ||||
/* Polling needed for FreeBSD : since ithread routine is not scheduled | |||||
/*Polling needed for FreeBSD : since ithread routine is not scheduled | * during bootup, we could use polling until interrupts are | ||||
during bootup, we could use polling until interrupts are | * enabled (using 'if (cold)'to check for the boot time before | ||||
enabled (using 'if (cold)'to check for the boot time before | * interrupts are enabled). */ | ||||
interrupts are enabled). */ | |||||
IS_POLLING_REQUIRED(softs); | IS_POLLING_REQUIRED(softs); | ||||
if (loop_cnt++ == PQISRC_CMD_TIMEOUT_CNT) { | if ((timeout_in_msec != TIMEOUT_INFINITE) && (i++ == loop_cnt)) { | ||||
DBG_ERR("ERR: Requested cmd timed out !!!\n"); | DBG_ERR("ERR: Requested cmd timed out !!!\n"); | ||||
ret = PQI_STATUS_TIMEOUT; | ret = PQI_STATUS_TIMEOUT; | ||||
rcb->timedout = true; | |||||
break; | break; | ||||
} | } | ||||
if (pqisrc_ctrl_offline(softs)) { | if (pqisrc_ctrl_offline(softs)) { | ||||
DBG_ERR("Controller is Offline"); | DBG_ERR("Controller is Offline"); | ||||
ret = PQI_STATUS_FAILURE; | ret = PQI_STATUS_FAILURE; | ||||
Context not available. | |||||
/* validate the structure sizes */ | /* validate the structure sizes */ | ||||
void check_struct_sizes() | void check_struct_sizes() | ||||
{ | { | ||||
ASSERT(sizeof(SCSI3Addr_struct)== 2); | ASSERT(sizeof(SCSI3Addr_struct)== 2); | ||||
ASSERT(sizeof(PhysDevAddr_struct) == 8); | ASSERT(sizeof(PhysDevAddr_struct) == 8); | ||||
ASSERT(sizeof(LogDevAddr_struct)== 8); | ASSERT(sizeof(LogDevAddr_struct)== 8); | ||||
Context not available. | |||||
ASSERT(sizeof(RequestBlock_struct) == 20); | ASSERT(sizeof(RequestBlock_struct) == 20); | ||||
ASSERT(sizeof(MoreErrInfo_struct)== 8); | ASSERT(sizeof(MoreErrInfo_struct)== 8); | ||||
ASSERT(sizeof(ErrorInfo_struct)== 48); | ASSERT(sizeof(ErrorInfo_struct)== 48); | ||||
ASSERT(sizeof(IOCTL_Command_struct)== 86); | /* Checking the size of IOCTL_Command_struct for both | ||||
64 bit and 32 bit system*/ | |||||
ASSERT(sizeof(IOCTL_Command_struct)== 86 || | |||||
sizeof(IOCTL_Command_struct)== 82); | |||||
ASSERT(sizeof(struct bmic_host_wellness_driver_version)== 42); | ASSERT(sizeof(struct bmic_host_wellness_driver_version)== 42); | ||||
ASSERT(sizeof(struct bmic_host_wellness_time)== 20); | ASSERT(sizeof(struct bmic_host_wellness_time)== 20); | ||||
ASSERT(sizeof(struct pqi_dev_adminq_cap)== 8); | ASSERT(sizeof(struct pqi_dev_adminq_cap)== 8); | ||||
Context not available. | |||||
ASSERT(sizeof(pqi_dev_cap_t)== 576); | ASSERT(sizeof(pqi_dev_cap_t)== 576); | ||||
ASSERT(sizeof(pqi_aio_req_t)== 128); | ASSERT(sizeof(pqi_aio_req_t)== 128); | ||||
ASSERT(sizeof(pqisrc_raid_req_t)== 128); | ASSERT(sizeof(pqisrc_raid_req_t)== 128); | ||||
ASSERT(sizeof(pqi_tmf_req_t)== 32); | ASSERT(sizeof(pqi_raid_tmf_req_t)== 32); | ||||
ASSERT(sizeof(pqi_aio_tmf_req_t)== 32); | |||||
ASSERT(sizeof(struct pqi_io_response)== 16); | ASSERT(sizeof(struct pqi_io_response)== 16); | ||||
ASSERT(sizeof(struct sense_header_scsi)== 8); | ASSERT(sizeof(struct sense_header_scsi)== 8); | ||||
ASSERT(sizeof(reportlun_header_t)==8); | ASSERT(sizeof(reportlun_header_t)==8); | ||||
Context not available. | |||||
ASSERT(sizeof(pqisrc_raid_map_t)== 8256); | ASSERT(sizeof(pqisrc_raid_map_t)== 8256); | ||||
ASSERT(sizeof(bmic_ident_ctrl_t)== 325); | ASSERT(sizeof(bmic_ident_ctrl_t)== 325); | ||||
ASSERT(sizeof(bmic_ident_physdev_t)==2048); | ASSERT(sizeof(bmic_ident_physdev_t)==2048); | ||||
} | |||||
uint32_t pqisrc_count_num_scsi_active_requests_on_dev(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) | |||||
{ | |||||
uint32_t i, active_io = 0; | |||||
rcb_t* rcb; | |||||
for(i = 1; i <= softs->max_outstanding_io; i++) { | |||||
rcb = &softs->rcb[i]; | |||||
if(rcb && IS_OS_SCSICMD(rcb) && (rcb->dvp == device) && rcb->req_pending) { | |||||
active_io++; | |||||
} | |||||
} | |||||
return active_io; | |||||
} | |||||
void check_device_pending_commands_to_complete(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) | |||||
{ | |||||
uint32_t tag = softs->max_outstanding_io, active_requests; | |||||
uint64_t timeout = 0, delay_in_usec = 1000; //In micro Seconds | |||||
rcb_t* rcb; | |||||
DBG_FUNC("IN\n"); | |||||
active_requests = pqisrc_count_num_scsi_active_requests_on_dev(softs, device); | |||||
DBG_WARN("Device Outstanding IO count = %u\n", active_requests); | |||||
if(!active_requests) | |||||
return; | |||||
do { | |||||
rcb = &softs->rcb[tag]; | |||||
if(rcb && IS_OS_SCSICMD(rcb) && (rcb->dvp == device) && rcb->req_pending) { | |||||
OS_BUSYWAIT(delay_in_usec); | |||||
timeout += delay_in_usec; | |||||
} | |||||
else | |||||
tag--; | |||||
if(timeout >= PQISRC_PENDING_IO_TIMEOUT_USEC) { | |||||
DBG_WARN("timed out waiting for pending IO\n"); | |||||
return; | |||||
} | |||||
} while(tag); | |||||
} | |||||
inline uint64_t pqisrc_increment_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) | |||||
{ | |||||
#if PQISRC_DEVICE_IO_COUNTER | |||||
/*Increment device active io count by one*/ | |||||
return OS_ATOMIC64_INC(&device->active_requests); | |||||
#endif | |||||
} | |||||
inline uint64_t pqisrc_decrement_device_active_io(pqisrc_softstate_t *softs, rcb_t *rcb) | |||||
{ | |||||
#if PQISRC_DEVICE_IO_COUNTER | |||||
if(IS_OS_SCSICMD(rcb)) { | |||||
/*Decrement device active io count by one*/ | |||||
return OS_ATOMIC64_DEC(&rcb->dvp->active_requests); | |||||
} | |||||
return PQI_STATUS_FAILURE; | |||||
#endif | |||||
} | |||||
inline void pqisrc_init_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) | |||||
{ | |||||
#if PQISRC_DEVICE_IO_COUNTER | |||||
/* Reset device count to Zero */ | |||||
OS_ATOMIC64_INIT(&device->active_requests, 0); | |||||
#endif | |||||
} | |||||
inline uint64_t pqisrc_read_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) | |||||
{ | |||||
#if PQISRC_DEVICE_IO_COUNTER | |||||
/* read device active count*/ | |||||
return OS_ATOMIC64_READ(&device->active_requests); | |||||
#endif | |||||
} | |||||
void pqisrc_wait_for_device_commands_to_complete(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) | |||||
{ | |||||
uint64_t timeout_in_usec = 0, delay_in_usec = 1000; //In microseconds | |||||
DBG_FUNC("IN\n"); | |||||
if(!softs->ctrl_online) | |||||
return; | |||||
#if PQISRC_DEVICE_IO_COUNTER | |||||
DBG_WARN("Device Outstanding IO count = %ld\n", pqisrc_read_device_active_io(softs, device)); | |||||
while(pqisrc_read_device_active_io(softs, device)) { | |||||
OS_BUSYWAIT(delay_in_usec); // In microseconds | |||||
if(!softs->ctrl_online) { | |||||
DBG_WARN("Controller Offline was detected.\n"); | |||||
} | |||||
timeout_in_usec += delay_in_usec; | |||||
if(timeout_in_usec >= PQISRC_PENDING_IO_TIMEOUT_USEC) { | |||||
DBG_WARN("timed out waiting for pending IO. DeviceOutStandingIo's=%ld\n", | |||||
pqisrc_read_device_active_io(softs, device)); | |||||
return; | |||||
} | |||||
} | |||||
#else | |||||
check_device_pending_commands_to_complete(softs, device); | |||||
#endif | |||||
} | } | ||||
Context not available. |