Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/ice/ice_controlq.c
Context not available. | |||||
(qinfo)->sq.bal = prefix##_ATQBAL; \ | (qinfo)->sq.bal = prefix##_ATQBAL; \ | ||||
(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ | (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ | ||||
(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ | (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ | ||||
(qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \ | |||||
(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ | (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ | ||||
(qinfo)->rq.head = prefix##_ARQH; \ | (qinfo)->rq.head = prefix##_ARQH; \ | ||||
(qinfo)->rq.tail = prefix##_ARQT; \ | (qinfo)->rq.tail = prefix##_ARQT; \ | ||||
Context not available. | |||||
(qinfo)->rq.bal = prefix##_ARQBAL; \ | (qinfo)->rq.bal = prefix##_ARQBAL; \ | ||||
(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \ | (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \ | ||||
(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \ | (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \ | ||||
(qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \ | |||||
(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ | (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ | ||||
} while (0) | } while (0) | ||||
Context not available. | |||||
i--; | i--; | ||||
for (; i >= 0; i--) | for (; i >= 0; i--) | ||||
ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]); | ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]); | ||||
cq->rq.r.rq_bi = NULL; | |||||
ice_free(hw, cq->rq.dma_head); | ice_free(hw, cq->rq.dma_head); | ||||
cq->rq.dma_head = NULL; | |||||
return ICE_ERR_NO_MEMORY; | return ICE_ERR_NO_MEMORY; | ||||
} | } | ||||
Context not available. | |||||
i--; | i--; | ||||
for (; i >= 0; i--) | for (; i >= 0; i--) | ||||
ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]); | ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]); | ||||
cq->sq.r.sq_bi = NULL; | |||||
ice_free(hw, cq->sq.dma_head); | ice_free(hw, cq->sq.dma_head); | ||||
cq->sq.dma_head = NULL; | |||||
return ICE_ERR_NO_MEMORY; | return ICE_ERR_NO_MEMORY; | ||||
} | } | ||||
Context not available. | |||||
return ICE_SUCCESS; | return ICE_SUCCESS; | ||||
} | } | ||||
#define ICE_FREE_CQ_BUFS(hw, qi, ring) \ | |||||
do { \ | |||||
/* free descriptors */ \ | |||||
if ((qi)->ring.r.ring##_bi) { \ | |||||
int i; \ | |||||
\ | |||||
for (i = 0; i < (qi)->num_##ring##_entries; i++) \ | |||||
if ((qi)->ring.r.ring##_bi[i].pa) \ | |||||
ice_free_dma_mem((hw), \ | |||||
&(qi)->ring.r.ring##_bi[i]); \ | |||||
} \ | |||||
/* free the buffer info list */ \ | |||||
if ((qi)->ring.cmd_buf) \ | |||||
ice_free(hw, (qi)->ring.cmd_buf); \ | |||||
/* free DMA head */ \ | |||||
ice_free(hw, (qi)->ring.dma_head); \ | |||||
} while (0) | |||||
/** | /** | ||||
* ice_init_sq - main initialization routine for Control ATQ | * ice_init_sq - main initialization routine for Control ATQ | ||||
* @hw: pointer to the hardware structure | * @hw: pointer to the hardware structure | ||||
Context not available. | |||||
goto init_ctrlq_exit; | goto init_ctrlq_exit; | ||||
init_ctrlq_free_rings: | init_ctrlq_free_rings: | ||||
ICE_FREE_CQ_BUFS(hw, cq, sq); | |||||
ice_free_cq_ring(hw, &cq->sq); | ice_free_cq_ring(hw, &cq->sq); | ||||
init_ctrlq_exit: | init_ctrlq_exit: | ||||
Context not available. | |||||
goto init_ctrlq_exit; | goto init_ctrlq_exit; | ||||
init_ctrlq_free_rings: | init_ctrlq_free_rings: | ||||
ICE_FREE_CQ_BUFS(hw, cq, rq); | |||||
ice_free_cq_ring(hw, &cq->rq); | ice_free_cq_ring(hw, &cq->rq); | ||||
init_ctrlq_exit: | init_ctrlq_exit: | ||||
return ret_code; | return ret_code; | ||||
} | } | ||||
#define ICE_FREE_CQ_BUFS(hw, qi, ring) \ | |||||
do { \ | |||||
int i; \ | |||||
/* free descriptors */ \ | |||||
for (i = 0; i < (qi)->num_##ring##_entries; i++) \ | |||||
if ((qi)->ring.r.ring##_bi[i].pa) \ | |||||
ice_free_dma_mem((hw), \ | |||||
&(qi)->ring.r.ring##_bi[i]); \ | |||||
/* free the buffer info list */ \ | |||||
if ((qi)->ring.cmd_buf) \ | |||||
ice_free(hw, (qi)->ring.cmd_buf); \ | |||||
/* free DMA head */ \ | |||||
ice_free(hw, (qi)->ring.dma_head); \ | |||||
} while (0) | |||||
/** | /** | ||||
* ice_shutdown_sq - shutdown the Control ATQ | * ice_shutdown_sq - shutdown the Control ATQ | ||||
* @hw: pointer to the hardware structure | * @hw: pointer to the hardware structure | ||||
Context not available. | |||||
return ret_code; | return ret_code; | ||||
} | } | ||||
/** | |||||
* ice_init_all_ctrlq - main initialization routine for all control queues | |||||
* @hw: pointer to the hardware structure | |||||
* | |||||
* Prior to calling this function, the driver MUST* set the following fields | |||||
* in the cq->structure for all control queues: | |||||
* - cq->num_sq_entries | |||||
* - cq->num_rq_entries | |||||
* - cq->rq_buf_size | |||||
* - cq->sq_buf_size | |||||
* | |||||
* NOTE: this function does not initialize the controlq locks. | |||||
*/ | |||||
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) | |||||
{ | |||||
enum ice_status status; | |||||
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); | |||||
/* Init FW admin queue */ | |||||
status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN); | |||||
if (status) | |||||
return status; | |||||
status = ice_init_check_adminq(hw); | |||||
if (status) | |||||
return status; | |||||
/* Init Mailbox queue */ | |||||
return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); | |||||
} | |||||
/** | |||||
* ice_init_ctrlq_locks - Initialize locks for a control queue | |||||
* @cq: pointer to the control queue | |||||
* | |||||
* Initializes the send and receive queue locks for a given control queue. | |||||
*/ | |||||
static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) | |||||
{ | |||||
ice_init_lock(&cq->sq_lock); | |||||
ice_init_lock(&cq->rq_lock); | |||||
} | |||||
/** | |||||
* ice_create_all_ctrlq - main initialization routine for all control queues | |||||
* @hw: pointer to the hardware structure | |||||
* | |||||
* Prior to calling this function, the driver *MUST* set the following fields | |||||
* in the cq->structure for all control queues: | |||||
* - cq->num_sq_entries | |||||
* - cq->num_rq_entries | |||||
* - cq->rq_buf_size | |||||
* - cq->sq_buf_size | |||||
* | |||||
* This function creates all the control queue locks and then calls | |||||
* ice_init_all_ctrlq. It should be called once during driver load. If the | |||||
* driver needs to re-initialize control queues at run time it should call | |||||
* ice_init_all_ctrlq instead. | |||||
*/ | |||||
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) | |||||
{ | |||||
ice_init_ctrlq_locks(&hw->adminq); | |||||
ice_init_ctrlq_locks(&hw->mailboxq); | |||||
return ice_init_all_ctrlq(hw); | |||||
} | |||||
/** | /** | ||||
* ice_shutdown_ctrlq - shutdown routine for any control queue | * ice_shutdown_ctrlq - shutdown routine for any control queue | ||||
* @hw: pointer to the hardware structure | * @hw: pointer to the hardware structure | ||||
Context not available. | |||||
ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); | ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); | ||||
} | } | ||||
/** | |||||
* ice_init_all_ctrlq - main initialization routine for all control queues | |||||
* @hw: pointer to the hardware structure | |||||
* | |||||
* Prior to calling this function, the driver MUST* set the following fields | |||||
* in the cq->structure for all control queues: | |||||
* - cq->num_sq_entries | |||||
* - cq->num_rq_entries | |||||
* - cq->rq_buf_size | |||||
* - cq->sq_buf_size | |||||
* | |||||
* NOTE: this function does not initialize the controlq locks. | |||||
*/ | |||||
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) | |||||
{ | |||||
enum ice_status status; | |||||
u32 retry = 0; | |||||
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); | |||||
/* Init FW admin queue */ | |||||
do { | |||||
status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN); | |||||
if (status) | |||||
return status; | |||||
status = ice_init_check_adminq(hw); | |||||
if (status != ICE_ERR_AQ_FW_CRITICAL) | |||||
break; | |||||
ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n"); | |||||
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); | |||||
ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true); | |||||
} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT); | |||||
if (status) | |||||
return status; | |||||
/* Init Mailbox queue */ | |||||
return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); | |||||
} | |||||
/** | |||||
* ice_init_ctrlq_locks - Initialize locks for a control queue | |||||
* @cq: pointer to the control queue | |||||
* | |||||
* Initializes the send and receive queue locks for a given control queue. | |||||
*/ | |||||
static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) | |||||
{ | |||||
ice_init_lock(&cq->sq_lock); | |||||
ice_init_lock(&cq->rq_lock); | |||||
} | |||||
/** | |||||
* ice_create_all_ctrlq - main initialization routine for all control queues | |||||
* @hw: pointer to the hardware structure | |||||
* | |||||
* Prior to calling this function, the driver *MUST* set the following fields | |||||
* in the cq->structure for all control queues: | |||||
* - cq->num_sq_entries | |||||
* - cq->num_rq_entries | |||||
* - cq->rq_buf_size | |||||
* - cq->sq_buf_size | |||||
* | |||||
* This function creates all the control queue locks and then calls | |||||
* ice_init_all_ctrlq. It should be called once during driver load. If the | |||||
* driver needs to re-initialize control queues at run time it should call | |||||
* ice_init_all_ctrlq instead. | |||||
*/ | |||||
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) | |||||
{ | |||||
ice_init_ctrlq_locks(&hw->adminq); | |||||
ice_init_ctrlq_locks(&hw->mailboxq); | |||||
return ice_init_all_ctrlq(hw); | |||||
} | |||||
/** | /** | ||||
* ice_destroy_ctrlq_locks - Destroy locks for a control queue | * ice_destroy_ctrlq_locks - Destroy locks for a control queue | ||||
* @cq: pointer to the control queue | * @cq: pointer to the control queue | ||||
* | * | ||||
* Destroys the send and receive queue locks for a given control queue. | * Destroys the send and receive queue locks for a given control queue. | ||||
*/ | */ | ||||
static void | static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) | ||||
ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) | |||||
{ | { | ||||
ice_destroy_lock(&cq->sq_lock); | ice_destroy_lock(&cq->sq_lock); | ||||
ice_destroy_lock(&cq->rq_lock); | ice_destroy_lock(&cq->rq_lock); | ||||
Context not available. | |||||
details = ICE_CTL_Q_DETAILS(*sq, ntc); | details = ICE_CTL_Q_DETAILS(*sq, ntc); | ||||
while (rd32(hw, cq->sq.head) != ntc) { | while (rd32(hw, cq->sq.head) != ntc) { | ||||
ice_debug(hw, ICE_DBG_AQ_MSG, | ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); | ||||
"ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); | |||||
ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM); | ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM); | ||||
ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM); | ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM); | ||||
ntc++; | ntc++; | ||||
Context not available. | |||||
datalen = LE16_TO_CPU(cq_desc->datalen); | datalen = LE16_TO_CPU(cq_desc->datalen); | ||||
flags = LE16_TO_CPU(cq_desc->flags); | flags = LE16_TO_CPU(cq_desc->flags); | ||||
ice_debug(hw, ICE_DBG_AQ_DESC, | ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", | ||||
"CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", | |||||
LE16_TO_CPU(cq_desc->opcode), flags, datalen, | LE16_TO_CPU(cq_desc->opcode), flags, datalen, | ||||
LE16_TO_CPU(cq_desc->retval)); | LE16_TO_CPU(cq_desc->retval)); | ||||
ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n", | ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n", | ||||
Context not available. | |||||
cq->sq_last_status = ICE_AQ_RC_OK; | cq->sq_last_status = ICE_AQ_RC_OK; | ||||
if (!cq->sq.count) { | if (!cq->sq.count) { | ||||
ice_debug(hw, ICE_DBG_AQ_MSG, | ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n"); | ||||
"Control Send queue not initialized.\n"); | |||||
status = ICE_ERR_AQ_EMPTY; | status = ICE_ERR_AQ_EMPTY; | ||||
goto sq_send_command_error; | goto sq_send_command_error; | ||||
} | } | ||||
Context not available. | |||||
if (buf) { | if (buf) { | ||||
if (buf_size > cq->sq_buf_size) { | if (buf_size > cq->sq_buf_size) { | ||||
ice_debug(hw, ICE_DBG_AQ_MSG, | ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n", | ||||
"Invalid buffer size for Control Send queue: %d.\n", | |||||
buf_size); | buf_size); | ||||
status = ICE_ERR_INVAL_SIZE; | status = ICE_ERR_INVAL_SIZE; | ||||
goto sq_send_command_error; | goto sq_send_command_error; | ||||
Context not available. | |||||
val = rd32(hw, cq->sq.head); | val = rd32(hw, cq->sq.head); | ||||
if (val >= cq->num_sq_entries) { | if (val >= cq->num_sq_entries) { | ||||
ice_debug(hw, ICE_DBG_AQ_MSG, | ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n", | ||||
"head overrun at %d in the Control Send Queue ring\n", | |||||
val); | val); | ||||
status = ICE_ERR_AQ_EMPTY; | status = ICE_ERR_AQ_EMPTY; | ||||
goto sq_send_command_error; | goto sq_send_command_error; | ||||
Context not available. | |||||
* called in a separate thread in case of asynchronous completions. | * called in a separate thread in case of asynchronous completions. | ||||
*/ | */ | ||||
if (ice_clean_sq(hw, cq) == 0) { | if (ice_clean_sq(hw, cq) == 0) { | ||||
ice_debug(hw, ICE_DBG_AQ_MSG, | ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n"); | ||||
"Error: Control Send Queue is full.\n"); | |||||
status = ICE_ERR_AQ_FULL; | status = ICE_ERR_AQ_FULL; | ||||
goto sq_send_command_error; | goto sq_send_command_error; | ||||
} | } | ||||
Context not available. | |||||
} | } | ||||
/* Debug desc and buffer */ | /* Debug desc and buffer */ | ||||
ice_debug(hw, ICE_DBG_AQ_DESC, | ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n"); | ||||
"ATQ: Control Send queue desc and buffer:\n"); | |||||
ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size); | ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size); | ||||
Context not available. | |||||
u16 copy_size = LE16_TO_CPU(desc->datalen); | u16 copy_size = LE16_TO_CPU(desc->datalen); | ||||
if (copy_size > buf_size) { | if (copy_size > buf_size) { | ||||
ice_debug(hw, ICE_DBG_AQ_MSG, | ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n", | ||||
"Return len %d > than buf len %d\n", | |||||
copy_size, buf_size); | copy_size, buf_size); | ||||
status = ICE_ERR_AQ_ERROR; | status = ICE_ERR_AQ_ERROR; | ||||
} else { | } else { | ||||
Context not available. | |||||
} | } | ||||
retval = LE16_TO_CPU(desc->retval); | retval = LE16_TO_CPU(desc->retval); | ||||
if (retval) { | if (retval) { | ||||
ice_debug(hw, ICE_DBG_AQ_MSG, | ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n", | ||||
"Control Send Queue command 0x%04X completed with error 0x%X\n", | |||||
LE16_TO_CPU(desc->opcode), | LE16_TO_CPU(desc->opcode), | ||||
retval); | retval); | ||||
Context not available. | |||||
cq->sq_last_status = (enum ice_aq_err)retval; | cq->sq_last_status = (enum ice_aq_err)retval; | ||||
} | } | ||||
ice_debug(hw, ICE_DBG_AQ_MSG, | ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n"); | ||||
"ATQ: desc and buffer writeback:\n"); | |||||
ice_debug_cq(hw, (void *)desc, buf, buf_size); | ice_debug_cq(hw, (void *)desc, buf, buf_size); | ||||
Context not available. | |||||
/* update the error if time out occurred */ | /* update the error if time out occurred */ | ||||
if (!cmd_completed) { | if (!cmd_completed) { | ||||
ice_debug(hw, ICE_DBG_AQ_MSG, | if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask || | ||||
"Control Send Queue Writeback timeout.\n"); | rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) { | ||||
status = ICE_ERR_AQ_TIMEOUT; | ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n"); | ||||
status = ICE_ERR_AQ_FW_CRITICAL; | |||||
} else { | |||||
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n"); | |||||
status = ICE_ERR_AQ_TIMEOUT; | |||||
} | |||||
} | } | ||||
sq_send_command_error: | sq_send_command_error: | ||||
Context not available. | |||||
ice_acquire_lock(&cq->rq_lock); | ice_acquire_lock(&cq->rq_lock); | ||||
if (!cq->rq.count) { | if (!cq->rq.count) { | ||||
ice_debug(hw, ICE_DBG_AQ_MSG, | ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n"); | ||||
"Control Receive queue not initialized.\n"); | |||||
ret_code = ICE_ERR_AQ_EMPTY; | ret_code = ICE_ERR_AQ_EMPTY; | ||||
goto clean_rq_elem_err; | goto clean_rq_elem_err; | ||||
} | } | ||||
Context not available. | |||||
flags = LE16_TO_CPU(desc->flags); | flags = LE16_TO_CPU(desc->flags); | ||||
if (flags & ICE_AQ_FLAG_ERR) { | if (flags & ICE_AQ_FLAG_ERR) { | ||||
ret_code = ICE_ERR_AQ_ERROR; | ret_code = ICE_ERR_AQ_ERROR; | ||||
ice_debug(hw, ICE_DBG_AQ_MSG, | ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n", | ||||
"Control Receive Queue Event 0x%04X received with error 0x%X\n", | |||||
LE16_TO_CPU(desc->opcode), | LE16_TO_CPU(desc->opcode), | ||||
cq->rq_last_status); | cq->rq_last_status); | ||||
} | } | ||||
Context not available. | |||||
ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n"); | ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n"); | ||||
ice_debug_cq(hw, (void *)desc, e->msg_buf, | ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size); | ||||
cq->rq_buf_size); | |||||
/* Restore the original datalen and buffer address in the desc, | /* Restore the original datalen and buffer address in the desc, | ||||
* FW updates datalen to indicate the event message size | * FW updates datalen to indicate the event message size | ||||
Context not available. |