diff --git a/sys/dev/qat/qat_api/common/compression/dc_datapath.c b/sys/dev/qat/qat_api/common/compression/dc_datapath.c index 2e1f9ff96bd8..526954e15227 100644 --- a/sys/dev/qat/qat_api/common/compression/dc_datapath.c +++ b/sys/dev/qat/qat_api/common/compression/dc_datapath.c @@ -1,1982 +1,1980 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** ***************************************************************************** * @file dc_datapath.c * * @defgroup Dc_DataCompression DC Data Compression * * @ingroup Dc_DataCompression * * @description * Implementation of the Data Compression datapath operations. * *****************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" #include "cpa_dc.h" #include "cpa_dc_dp.h" /* ******************************************************************************* * Include private header files ******************************************************************************* */ #include "dc_session.h" #include "dc_datapath.h" #include "sal_statistics.h" #include "lac_common.h" #include "lac_mem.h" #include "lac_mem_pools.h" #include "sal_types_compression.h" #include "dc_stats.h" #include "lac_buffer_desc.h" #include "lac_sal.h" #include "lac_log.h" #include "lac_sync.h" #include "sal_service_state.h" #include "sal_qat_cmn_msg.h" #include "sal_hw_gen.h" #include "dc_error_counter.h" #define DC_COMP_MAX_BUFF_SIZE (1024 * 64) static QatUtilsAtomic dcErrorCount[MAX_DC_ERROR_TYPE]; void dcErrorLog(CpaDcReqStatus dcError) { Cpa32U absError = 0; absError = abs(dcError); if ((dcError < CPA_DC_OK) && (absError < MAX_DC_ERROR_TYPE)) { qatUtilsAtomicInc(&(dcErrorCount[absError])); } } Cpa64U getDcErrorCounter(CpaDcReqStatus dcError) { Cpa32U absError = 0; absError = abs(dcError); if (!(dcError >= CPA_DC_OK || dcError < CPA_DC_EMPTY_DYM_BLK)) { return (Cpa64U)qatUtilsAtomicGet(&dcErrorCount[absError]); } return 0; } static inline void dcUpdateXltOverflowChecksumsGen4(const dc_compression_cookie_t *pCookie, const icp_qat_fw_resp_comp_pars_t *pRespPars, CpaDcRqResults *pDcResults) { dc_session_desc_t *pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pCookie->pSessionHandle); /* Recompute CRC checksum when either the checksum type * is CPA_DC_CRC32 or when the integrity CRCs are enabled. */ if (CPA_DC_CRC32 == pSessionDesc->checksumType) { pDcResults->checksum = pRespPars->crc.legacy.curr_crc32; /* No need to recalculate the swCrc64I here as this will get * handled later in dcHandleIntegrityChecksumsGen4. */ } else if (CPA_DC_ADLER32 == pSessionDesc->checksumType) { pDcResults->checksum = pRespPars->crc.legacy.curr_adler_32; } } void dcCompression_ProcessCallback(void *pRespMsg) { CpaStatus status = CPA_STATUS_SUCCESS; icp_qat_fw_comp_resp_t *pCompRespMsg = NULL; void *callbackTag = NULL; Cpa64U *pReqData = NULL; CpaDcDpOpData *pResponse = NULL; CpaDcRqResults *pResults = NULL; CpaDcCallbackFn pCbFunc = NULL; dc_session_desc_t *pSessionDesc = NULL; sal_compression_service_t *pService = NULL; dc_compression_cookie_t *pCookie = NULL; CpaDcOpData *pOpData = NULL; CpaBoolean cmpPass = CPA_TRUE, xlatPass = CPA_TRUE; CpaBoolean isDcDp = CPA_FALSE; CpaBoolean integrityCrcCheck = CPA_FALSE; CpaBoolean verifyHwIntegrityCrcs = CPA_FALSE; Cpa8U cmpErr = ERR_CODE_NO_ERROR, xlatErr = ERR_CODE_NO_ERROR; dc_request_dir_t compDecomp = DC_COMPRESSION_REQUEST; Cpa8U opStatus = ICP_QAT_FW_COMN_STATUS_FLAG_OK; Cpa8U hdrFlags = 0; /* Cast response message to compression response message type */ pCompRespMsg = (icp_qat_fw_comp_resp_t *)pRespMsg; /* Extract request data pointer from the opaque data */ LAC_MEM_SHARED_READ_TO_PTR(pCompRespMsg->opaque_data, pReqData); /* Extract fields from the request data structure */ pCookie = (dc_compression_cookie_t *)pReqData; if (!pCookie) return; pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pCookie->pSessionHandle); pService = (sal_compression_service_t *)(pCookie->dcInstance); isDcDp = pSessionDesc->isDcDp; if (CPA_TRUE == isDcDp) { pResponse = (CpaDcDpOpData *)pReqData; pResults = &(pResponse->results); if (CPA_DC_DIR_DECOMPRESS == pSessionDesc->sessDirection) { compDecomp = DC_DECOMPRESSION_REQUEST; } pCookie = NULL; } else { pResults = pCookie->pResults; callbackTag = pCookie->callbackTag; pCbFunc = pCookie->pSessionDesc->pCompressionCb; compDecomp = pCookie->compDecomp; pOpData = pCookie->pDcOpData; } opStatus = pCompRespMsg->comn_resp.comn_status; if (NULL != pOpData) { verifyHwIntegrityCrcs = pOpData->verifyHwIntegrityCrcs; + integrityCrcCheck = pOpData->integrityCrcCheck; } hdrFlags = pCompRespMsg->comn_resp.hdr_flags; /* Get the cmp error code */ cmpErr = pCompRespMsg->comn_resp.comn_error.s1.cmp_err_code; if (ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(opStatus)) { /* Compression not supported by firmware, set produced/consumed to zero and call the cb function with status CPA_STATUS_UNSUPPORTED */ QAT_UTILS_LOG("Compression feature not supported\n"); status = CPA_STATUS_UNSUPPORTED; pResults->status = (Cpa8S)cmpErr; pResults->consumed = 0; pResults->produced = 0; if (CPA_TRUE == isDcDp) { if (pResponse) pResponse->responseStatus = CPA_STATUS_UNSUPPORTED; (pService->pDcDpCb)(pResponse); } else { /* Free the memory pool */ - if (NULL != pCookie) { - Lac_MemPoolEntryFree(pCookie); - pCookie = NULL; - } + Lac_MemPoolEntryFree(pCookie); + pCookie = NULL; + if (NULL != pCbFunc) { pCbFunc(callbackTag, status); } } if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC(numCompCompletedErrors, pService); } else { COMPRESSION_STAT_INC(numDecompCompletedErrors, pService); } return; } else { /* Check compression response status */ cmpPass = (CpaBoolean)(ICP_QAT_FW_COMN_STATUS_FLAG_OK == ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(opStatus)); } if (isDcGen2x(pService)) { /* QAT1.7 and QAT 1.8 hardware */ if (CPA_DC_INCOMPLETE_FILE_ERR == (Cpa8S)cmpErr) { cmpPass = CPA_TRUE; cmpErr = ERR_CODE_NO_ERROR; } } else { /* QAT2.0 hardware cancels the incomplete file errors * only for DEFLATE algorithm. * Decompression direction is not tested in the callback as * the request does not allow it. */ if ((pSessionDesc->compType == CPA_DC_DEFLATE) && (CPA_DC_INCOMPLETE_FILE_ERR == (Cpa8S)cmpErr)) { cmpPass = CPA_TRUE; cmpErr = ERR_CODE_NO_ERROR; } } /* log the slice hang and endpoint push/pull error inside the response */ if (ERR_CODE_SSM_ERROR == (Cpa8S)cmpErr) { QAT_UTILS_LOG( "Slice hang detected on the compression slice.\n"); } else if (ERR_CODE_ENDPOINT_ERROR == (Cpa8S)cmpErr) { QAT_UTILS_LOG( "PCIe End Point Push/Pull or TI/RI Parity error detected.\n"); } /* We return the compression error code for now. We would need to update * the API if we decide to return both error codes */ pResults->status = (Cpa8S)cmpErr; /* Check the translator status */ if ((DC_COMPRESSION_REQUEST == compDecomp) && (CPA_DC_HT_FULL_DYNAMIC == pSessionDesc->huffType)) { /* Check translator response status */ xlatPass = (CpaBoolean)(ICP_QAT_FW_COMN_STATUS_FLAG_OK == ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(opStatus)); /* Get the translator error code */ xlatErr = pCompRespMsg->comn_resp.comn_error.s1.xlat_err_code; /* Return a fatal error or a potential error in the translator * slice if the compression slice did not return any error */ if ((CPA_DC_OK == pResults->status) || (CPA_DC_FATALERR == (Cpa8S)xlatErr)) { pResults->status = (Cpa8S)xlatErr; } } /* Update dc error counter */ dcErrorLog(pResults->status); if (CPA_FALSE == isDcDp) { /* In case of any error for an end of packet request, we need to * update * the request type for the following request */ if (CPA_DC_FLUSH_FINAL == pCookie->flushFlag && cmpPass && xlatPass) { pSessionDesc->requestType = DC_REQUEST_FIRST; } else { pSessionDesc->requestType = DC_REQUEST_SUBSEQUENT; } if ((CPA_DC_STATEFUL == pSessionDesc->sessState) || ((CPA_DC_STATELESS == pSessionDesc->sessState) && (DC_COMPRESSION_REQUEST == compDecomp))) { /* Overflow is a valid use case for Traditional API * only. Stateful Overflow is supported in both * compression and decompression direction. Stateless * Overflow is supported only in compression direction. */ if (CPA_DC_OVERFLOW == (Cpa8S)cmpErr) cmpPass = CPA_TRUE; if (CPA_DC_OVERFLOW == (Cpa8S)xlatErr) { if (isDcGen4x(pService) && (CPA_TRUE == pService->comp_device_data .translatorOverflow)) { pResults->consumed = pCompRespMsg->comp_resp_pars .input_byte_counter; dcUpdateXltOverflowChecksumsGen4( pCookie, &pCompRespMsg->comp_resp_pars, pResults); } xlatPass = CPA_TRUE; } } } else { if (CPA_DC_OVERFLOW == (Cpa8S)cmpErr) { cmpPass = CPA_FALSE; } if (CPA_DC_OVERFLOW == (Cpa8S)xlatErr) { /* XLT overflow is not valid for Data Plane requests */ xlatPass = CPA_FALSE; } } if ((CPA_TRUE == cmpPass) && (CPA_TRUE == xlatPass)) { /* Extract the response from the firmware */ pResults->consumed = pCompRespMsg->comp_resp_pars.input_byte_counter; pResults->produced = pCompRespMsg->comp_resp_pars.output_byte_counter; pSessionDesc->cumulativeConsumedBytes += pResults->consumed; /* Handle Checksum for end to end data integrity. */ if (CPA_TRUE == pService->generic_service_info.integrityCrcCheck && CPA_TRUE == integrityCrcCheck) { pSessionDesc->previousChecksum = pSessionDesc->seedSwCrc.swCrc32I; } else if (CPA_DC_OVERFLOW != (Cpa8S)xlatErr) { if (CPA_DC_CRC32 == pSessionDesc->checksumType) { pResults->checksum = pCompRespMsg->comp_resp_pars.crc.legacy .curr_crc32; } else if (CPA_DC_ADLER32 == pSessionDesc->checksumType) { pResults->checksum = pCompRespMsg->comp_resp_pars.crc.legacy .curr_adler_32; } pSessionDesc->previousChecksum = pResults->checksum; } if (DC_DECOMPRESSION_REQUEST == compDecomp) { pResults->endOfLastBlock = (ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET == ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET( opStatus)); } /* Save the checksum for the next request */ if ((CPA_DC_OVERFLOW != (Cpa8S)xlatErr) && (CPA_TRUE == verifyHwIntegrityCrcs)) { pSessionDesc->previousChecksum = pSessionDesc->seedSwCrc.swCrc32I; } /* Check if a CNV recovery happened and * increase stats counter */ if ((DC_COMPRESSION_REQUEST == compDecomp) && ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdrFlags) && ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdrFlags)) { COMPRESSION_STAT_INC(numCompCnvErrorsRecovered, pService); } if (CPA_TRUE == isDcDp) { if (pResponse) pResponse->responseStatus = CPA_STATUS_SUCCESS; } else { if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC(numCompCompleted, pService); } else { COMPRESSION_STAT_INC(numDecompCompleted, pService); } } } else { #ifdef ICP_DC_RETURN_COUNTERS_ON_ERROR /* Extract the response from the firmware */ pResults->consumed = pCompRespMsg->comp_resp_pars.input_byte_counter; pResults->produced = pCompRespMsg->comp_resp_pars.output_byte_counter; if (CPA_DC_STATEFUL == pSessionDesc->sessState) { pSessionDesc->cumulativeConsumedBytes += pResults->consumed; } else { /* In the stateless case all requests have both SOP and * EOP set */ pSessionDesc->cumulativeConsumedBytes = pResults->consumed; } #else pResults->consumed = 0; pResults->produced = 0; #endif if (CPA_DC_OVERFLOW == pResults->status && CPA_DC_STATELESS == pSessionDesc->sessState) { /* This error message will be returned by Data Plane API * in both * compression and decompression direction. With * Traditional API * this error message will be returned only in stateless * decompression direction */ QAT_UTILS_LOG( "Unrecoverable error: stateless overflow. You may need to increase the size of your destination buffer.\n"); } if (CPA_TRUE == isDcDp) { if (pResponse) pResponse->responseStatus = CPA_STATUS_FAIL; } else { if (CPA_DC_OK != pResults->status && CPA_DC_INCOMPLETE_FILE_ERR != pResults->status) { status = CPA_STATUS_FAIL; } if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC(numCompCompletedErrors, pService); } else { COMPRESSION_STAT_INC(numDecompCompletedErrors, pService); } } } if (CPA_TRUE == isDcDp) { /* Decrement number of stateless pending callbacks for session */ pSessionDesc->pendingDpStatelessCbCount--; (pService->pDcDpCb)(pResponse); } else { /* Decrement number of pending callbacks for session */ if (CPA_DC_STATELESS == pSessionDesc->sessState) { qatUtilsAtomicDec( &(pCookie->pSessionDesc->pendingStatelessCbCount)); } else if (0 != qatUtilsAtomicGet(&pCookie->pSessionDesc ->pendingStatefulCbCount)) { qatUtilsAtomicDec( &(pCookie->pSessionDesc->pendingStatefulCbCount)); } /* Free the memory pool */ - if (NULL != pCookie) { - Lac_MemPoolEntryFree(pCookie); - pCookie = NULL; - } + Lac_MemPoolEntryFree(pCookie); + pCookie = NULL; if (NULL != pCbFunc) { pCbFunc(callbackTag, status); } } } /** ***************************************************************************** * @ingroup Dc_DataCompression * Check that all the parameters in the pOpData structure are valid * * @description * Check that all the parameters in the pOpData structure are valid * * @param[in] pService Pointer to the compression service * @param[in] pOpData Pointer to request information structure * holding parameters for cpaDcCompress2 and * CpaDcDecompressData2 * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in * *****************************************************************************/ CpaStatus dcCheckOpData(sal_compression_service_t *pService, CpaDcOpData *pOpData) { CpaDcSkipMode skipMode = 0; if ((pOpData->flushFlag < CPA_DC_FLUSH_NONE) || (pOpData->flushFlag > CPA_DC_FLUSH_FULL)) { LAC_INVALID_PARAM_LOG("Invalid flushFlag value"); return CPA_STATUS_INVALID_PARAM; } skipMode = pOpData->inputSkipData.skipMode; if ((skipMode < CPA_DC_SKIP_DISABLED) || (skipMode > CPA_DC_SKIP_STRIDE)) { LAC_INVALID_PARAM_LOG("Invalid input skip mode value"); return CPA_STATUS_INVALID_PARAM; } skipMode = pOpData->outputSkipData.skipMode; if ((skipMode < CPA_DC_SKIP_DISABLED) || (skipMode > CPA_DC_SKIP_STRIDE)) { LAC_INVALID_PARAM_LOG("Invalid output skip mode value"); return CPA_STATUS_INVALID_PARAM; } if (pOpData->integrityCrcCheck == CPA_FALSE && pOpData->verifyHwIntegrityCrcs == CPA_TRUE) { LAC_INVALID_PARAM_LOG( "integrityCrcCheck must be set to true" "in order to enable verifyHwIntegrityCrcs"); return CPA_STATUS_INVALID_PARAM; } if (pOpData->integrityCrcCheck != CPA_TRUE && pOpData->integrityCrcCheck != CPA_FALSE) { LAC_INVALID_PARAM_LOG("Invalid integrityCrcCheck value"); return CPA_STATUS_INVALID_PARAM; } if (pOpData->verifyHwIntegrityCrcs != CPA_TRUE && pOpData->verifyHwIntegrityCrcs != CPA_FALSE) { LAC_INVALID_PARAM_LOG("Invalid verifyHwIntegrityCrcs value"); return CPA_STATUS_INVALID_PARAM; } if (pOpData->compressAndVerify != CPA_TRUE && pOpData->compressAndVerify != CPA_FALSE) { LAC_INVALID_PARAM_LOG("Invalid cnv decompress check value"); return CPA_STATUS_INVALID_PARAM; } if (CPA_TRUE == pOpData->integrityCrcCheck && CPA_FALSE == pService->generic_service_info.integrityCrcCheck) { LAC_INVALID_PARAM_LOG("Integrity CRC check is not " "supported on this device"); return CPA_STATUS_INVALID_PARAM; } if (CPA_TRUE == pOpData->integrityCrcCheck && NULL == pOpData->pCrcData) { LAC_INVALID_PARAM_LOG("Integrity CRC data structure " "not intialized in CpaDcOpData"); return CPA_STATUS_INVALID_PARAM; } return CPA_STATUS_SUCCESS; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Check the compression source buffer for Batch and Pack API. * * @description * Check that all the parameters used for Pack compression * request are valid. This function essentially checks the source buffer * parameters and results structure parameters. * * @param[in] pSessionHandle Session handle * @param[in] pSrcBuff Pointer to data buffer for compression * @param[in] pDestBuff Pointer to buffer space allocated for * output data * @param[in] pResults Pointer to results structure * @param[in] flushFlag Indicates the type of flush to be * performed * @param[in] srcBuffSize Size of the source buffer * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in * *****************************************************************************/ static CpaStatus dcCheckSourceData(CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, CpaDcFlush flushFlag, Cpa64U srcBuffSize, CpaDcSkipData *skipData) { dc_session_desc_t *pSessionDesc = NULL; LAC_CHECK_NULL_PARAM(pSessionHandle); LAC_CHECK_NULL_PARAM(pSrcBuff); LAC_CHECK_NULL_PARAM(pDestBuff); LAC_CHECK_NULL_PARAM(pResults); pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); if (NULL == pSessionDesc) { LAC_INVALID_PARAM_LOG("Session handle not as expected"); return CPA_STATUS_INVALID_PARAM; } if ((flushFlag < CPA_DC_FLUSH_NONE) || (flushFlag > CPA_DC_FLUSH_FULL)) { LAC_INVALID_PARAM_LOG("Invalid flushFlag value"); return CPA_STATUS_INVALID_PARAM; } if (pSrcBuff == pDestBuff) { LAC_INVALID_PARAM_LOG("In place operation not supported"); return CPA_STATUS_INVALID_PARAM; } /* Compressing zero bytes is not supported for stateless sessions * for non Batch and Pack requests */ if ((CPA_DC_STATELESS == pSessionDesc->sessState) && (0 == srcBuffSize) && (NULL == skipData)) { LAC_INVALID_PARAM_LOG( "The source buffer size needs to be greater than " "zero bytes for stateless sessions"); return CPA_STATUS_INVALID_PARAM; } if (srcBuffSize > DC_BUFFER_MAX_SIZE) { LAC_INVALID_PARAM_LOG( "The source buffer size needs to be less than or " "equal to 2^32-1 bytes"); return CPA_STATUS_INVALID_PARAM; } return CPA_STATUS_SUCCESS; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Check the compression or decompression function parameters. * * @description * Check that all the parameters used for a Batch and Pack compression * request are valid. This function essentially checks the destination * buffer parameters and intermediate buffer parameters. * * @param[in] pService Pointer to the compression service * @param[in] pSessionHandle Session handle * @param[in] pDestBuff Pointer to buffer space allocated for * output data * @param[in] compDecomp Direction of the operation * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in * *****************************************************************************/ static CpaStatus dcCheckDestinationData(sal_compression_service_t *pService, CpaDcSessionHandle pSessionHandle, CpaBufferList *pDestBuff, dc_request_dir_t compDecomp) { dc_session_desc_t *pSessionDesc = NULL; Cpa64U destBuffSize = 0; LAC_CHECK_NULL_PARAM(pSessionHandle); LAC_CHECK_NULL_PARAM(pDestBuff); pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); if (NULL == pSessionDesc) { LAC_INVALID_PARAM_LOG("Session handle not as expected"); return CPA_STATUS_INVALID_PARAM; } if (LacBuffDesc_BufferListVerify(pDestBuff, &destBuffSize, LAC_NO_ALIGNMENT_SHIFT) != CPA_STATUS_SUCCESS) { LAC_INVALID_PARAM_LOG( "Invalid destination buffer list parameter"); return CPA_STATUS_INVALID_PARAM; } if (destBuffSize > DC_BUFFER_MAX_SIZE) { LAC_INVALID_PARAM_LOG( "The destination buffer size needs to be less " "than or equal to 2^32-1 bytes"); return CPA_STATUS_INVALID_PARAM; } if (CPA_TRUE == pSessionDesc->isDcDp) { LAC_INVALID_PARAM_LOG( "The session type should not be data plane"); return CPA_STATUS_INVALID_PARAM; } if (DC_COMPRESSION_REQUEST == compDecomp) { if (CPA_DC_HT_FULL_DYNAMIC == pSessionDesc->huffType) { /* Check if intermediate buffers are supported */ if ((isDcGen2x(pService)) && ((0 == pService->pInterBuffPtrsArrayPhyAddr) || (NULL == pService->pInterBuffPtrsArray))) { LAC_LOG_ERROR( "No intermediate buffer defined for this instance " "- see cpaDcStartInstance"); return CPA_STATUS_INVALID_PARAM; } /* Ensure that the destination buffer size is greater or * equal to 128B */ if (destBuffSize < DC_DEST_BUFFER_DYN_MIN_SIZE) { LAC_INVALID_PARAM_LOG( "Destination buffer size should be " "greater or equal to 128B"); return CPA_STATUS_INVALID_PARAM; } } else { /* Ensure that the destination buffer size is greater or * equal to devices min output buff size */ if (destBuffSize < pService->comp_device_data.minOutputBuffSize) { LAC_INVALID_PARAM_LOG1( "Destination buffer size should be " "greater or equal to %d bytes", pService->comp_device_data .minOutputBuffSize); return CPA_STATUS_INVALID_PARAM; } } } else { /* Ensure that the destination buffer size is greater than * 0 bytes */ if (destBuffSize < DC_DEST_BUFFER_DEC_MIN_SIZE) { LAC_INVALID_PARAM_LOG( "Destination buffer size should be " "greater than 0 bytes"); return CPA_STATUS_INVALID_PARAM; } } return CPA_STATUS_SUCCESS; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Populate the compression request parameters * * @description * This function will populate the compression request parameters * * @param[out] pCompReqParams Pointer to the compression request parameters * @param[in] pCookie Pointer to the compression cookie * *****************************************************************************/ static void dcCompRequestParamsPopulate(icp_qat_fw_comp_req_params_t *pCompReqParams, dc_compression_cookie_t *pCookie) { pCompReqParams->comp_len = pCookie->srcTotalDataLenInBytes; pCompReqParams->out_buffer_sz = pCookie->dstTotalDataLenInBytes; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Create the requests for compression or decompression * * @description * Create the requests for compression or decompression. This function * will update the cookie will all required information. * * @param{out] pCookie Pointer to the compression cookie * @param[in] pService Pointer to the compression service * @param[in] pSessionDesc Pointer to the session descriptor * @param[in pSessionHandle Session handle * @param[in] pSrcBuff Pointer to data buffer for compression * @param[in] pDestBuff Pointer to buffer space for data after * compression * @param[in] pResults Pointer to results structure * @param[in] flushFlag Indicates the type of flush to be * performed * @param[in] pOpData Pointer to request information structure * holding parameters for cpaDcCompress2 * and CpaDcDecompressData2 * @param[in] callbackTag Pointer to the callback tag * @param[in] compDecomp Direction of the operation * @param[in] compressAndVerify Compress and Verify * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in * *****************************************************************************/ static CpaStatus dcCreateRequest(dc_compression_cookie_t *pCookie, sal_compression_service_t *pService, dc_session_desc_t *pSessionDesc, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, CpaDcFlush flushFlag, CpaDcOpData *pOpData, void *callbackTag, dc_request_dir_t compDecomp, dc_cnv_mode_t cnvMode) { icp_qat_fw_comp_req_t *pMsg = NULL; icp_qat_fw_comp_req_params_t *pCompReqParams = NULL; Cpa64U srcAddrPhys = 0, dstAddrPhys = 0; Cpa64U srcTotalDataLenInBytes = 0, dstTotalDataLenInBytes = 0; Cpa32U rpCmdFlags = 0; Cpa8U sop = ICP_QAT_FW_COMP_SOP; Cpa8U eop = ICP_QAT_FW_COMP_EOP; Cpa8U bFinal = ICP_QAT_FW_COMP_NOT_BFINAL; Cpa8U crcMode = ICP_QAT_FW_COMP_CRC_MODE_LEGACY; Cpa8U cnvDecompReq = ICP_QAT_FW_COMP_NO_CNV; Cpa8U cnvRecovery = ICP_QAT_FW_COMP_NO_CNV_RECOVERY; CpaBoolean cnvErrorInjection = ICP_QAT_FW_COMP_NO_CNV_DFX; CpaBoolean integrityCrcCheck = CPA_FALSE; CpaStatus status = CPA_STATUS_SUCCESS; CpaDcFlush flush = CPA_DC_FLUSH_NONE; Cpa32U initial_adler = 1; Cpa32U initial_crc32 = 0; icp_qat_fw_comp_req_t *pReqCache = NULL; /* Write the buffer descriptors */ status = LacBuffDesc_BufferListDescWriteAndGetSize( pSrcBuff, &srcAddrPhys, CPA_FALSE, &srcTotalDataLenInBytes, &(pService->generic_service_info)); if (status != CPA_STATUS_SUCCESS) { return status; } status = LacBuffDesc_BufferListDescWriteAndGetSize( pDestBuff, &dstAddrPhys, CPA_FALSE, &dstTotalDataLenInBytes, &(pService->generic_service_info)); if (status != CPA_STATUS_SUCCESS) { return status; } /* Populate the compression cookie */ pCookie->dcInstance = pService; pCookie->pSessionHandle = pSessionHandle; pCookie->callbackTag = callbackTag; pCookie->pSessionDesc = pSessionDesc; pCookie->pDcOpData = pOpData; pCookie->pResults = pResults; pCookie->compDecomp = compDecomp; pCookie->pUserSrcBuff = NULL; pCookie->pUserDestBuff = NULL; /* Extract flush flag from either the opData or from the * parameter. Opdata have been introduce with APIs * cpaDcCompressData2 and cpaDcDecompressData2 */ if (NULL != pOpData) { flush = pOpData->flushFlag; integrityCrcCheck = pOpData->integrityCrcCheck; } else { flush = flushFlag; } pCookie->flushFlag = flush; /* The firmware expects the length in bytes for source and destination * to be Cpa32U parameters. However the total data length could be * bigger as allocated by the user. We ensure that this is not the case * in dcCheckSourceData and cast the values to Cpa32U here */ pCookie->srcTotalDataLenInBytes = (Cpa32U)srcTotalDataLenInBytes; if ((isDcGen2x(pService)) && (DC_COMPRESSION_REQUEST == compDecomp) && (CPA_DC_HT_FULL_DYNAMIC == pSessionDesc->huffType)) { if (pService->minInterBuffSizeInBytes < (Cpa32U)dstTotalDataLenInBytes) { pCookie->dstTotalDataLenInBytes = (Cpa32U)(pService->minInterBuffSizeInBytes); } else { pCookie->dstTotalDataLenInBytes = (Cpa32U)dstTotalDataLenInBytes; } } else { pCookie->dstTotalDataLenInBytes = (Cpa32U)dstTotalDataLenInBytes; } /* Device can not decompress an odd byte decompression request * if bFinal is not set */ if (CPA_TRUE != pService->comp_device_data.oddByteDecompNobFinal) { if ((CPA_DC_STATEFUL == pSessionDesc->sessState) && (CPA_DC_FLUSH_FINAL != flushFlag) && (DC_DECOMPRESSION_REQUEST == compDecomp) && (pCookie->srcTotalDataLenInBytes & 0x1)) { pCookie->srcTotalDataLenInBytes--; } } /* Device can not decompress odd byte interim requests */ if (CPA_TRUE != pService->comp_device_data.oddByteDecompInterim) { if ((CPA_DC_STATEFUL == pSessionDesc->sessState) && (CPA_DC_FLUSH_FINAL != flushFlag) && (CPA_DC_FLUSH_FULL != flushFlag) && (DC_DECOMPRESSION_REQUEST == compDecomp) && (pCookie->srcTotalDataLenInBytes & 0x1)) { pCookie->srcTotalDataLenInBytes--; } } pMsg = (icp_qat_fw_comp_req_t *)&pCookie->request; if (DC_COMPRESSION_REQUEST == compDecomp) { pReqCache = &(pSessionDesc->reqCacheComp); } else { pReqCache = &(pSessionDesc->reqCacheDecomp); } /* Fills the msg from the template cached in the session descriptor */ memcpy((void *)pMsg, (void *)(pReqCache), LAC_QAT_DC_REQ_SZ_LW * LAC_LONG_WORD_IN_BYTES); if (DC_REQUEST_FIRST == pSessionDesc->requestType) { initial_adler = 1; initial_crc32 = 0; if (CPA_DC_ADLER32 == pSessionDesc->checksumType) { pSessionDesc->previousChecksum = initial_adler; } else { pSessionDesc->previousChecksum = initial_crc32; } } else if (CPA_DC_STATELESS == pSessionDesc->sessState) { pSessionDesc->previousChecksum = pResults->checksum; if (CPA_DC_ADLER32 == pSessionDesc->checksumType) { initial_adler = pSessionDesc->previousChecksum; } else { initial_crc32 = pSessionDesc->previousChecksum; } } /* Backup source and destination buffer addresses, * CRC calculations both for CNV and translator overflow * will be performed on them in the callback function. */ pCookie->pUserSrcBuff = pSrcBuff; pCookie->pUserDestBuff = pDestBuff; /* * Due to implementation of CNV support and need for backwards * compatibility certain fields in the request and response structs had * been changed, moved or placed in unions cnvMode flag signifies fields * to be selected from req/res * * Doing extended crc checks makes sense only when we want to do the * actual CNV */ if (CPA_TRUE == pService->generic_service_info.integrityCrcCheck && CPA_TRUE == integrityCrcCheck) { pMsg->comp_pars.crc.crc_data_addr = pSessionDesc->physDataIntegrityCrcs; crcMode = ICP_QAT_FW_COMP_CRC_MODE_E2E; } else { /* Legacy request structure */ pMsg->comp_pars.crc.legacy.initial_adler = initial_adler; pMsg->comp_pars.crc.legacy.initial_crc32 = initial_crc32; crcMode = ICP_QAT_FW_COMP_CRC_MODE_LEGACY; } /* Populate the cmdFlags */ if (CPA_DC_STATEFUL == pSessionDesc->sessState) { pSessionDesc->previousRequestType = pSessionDesc->requestType; if (DC_REQUEST_FIRST == pSessionDesc->requestType) { /* Update the request type for following requests */ pSessionDesc->requestType = DC_REQUEST_SUBSEQUENT; /* Reinitialise the cumulative amount of consumed bytes */ pSessionDesc->cumulativeConsumedBytes = 0; if (DC_COMPRESSION_REQUEST == compDecomp) { pSessionDesc->isSopForCompressionProcessed = CPA_TRUE; } else if (DC_DECOMPRESSION_REQUEST == compDecomp) { pSessionDesc->isSopForDecompressionProcessed = CPA_TRUE; } } else { if (DC_COMPRESSION_REQUEST == compDecomp) { if (CPA_TRUE == pSessionDesc ->isSopForCompressionProcessed) { sop = ICP_QAT_FW_COMP_NOT_SOP; } else { pSessionDesc ->isSopForCompressionProcessed = CPA_TRUE; } } else if (DC_DECOMPRESSION_REQUEST == compDecomp) { if (CPA_TRUE == pSessionDesc ->isSopForDecompressionProcessed) { sop = ICP_QAT_FW_COMP_NOT_SOP; } else { pSessionDesc ->isSopForDecompressionProcessed = CPA_TRUE; } } } if ((CPA_DC_FLUSH_FINAL == flush) || (CPA_DC_FLUSH_FULL == flush)) { /* Update the request type for following requests */ pSessionDesc->requestType = DC_REQUEST_FIRST; } else { eop = ICP_QAT_FW_COMP_NOT_EOP; } } else { if (DC_REQUEST_FIRST == pSessionDesc->requestType) { /* Reinitialise the cumulative amount of consumed bytes */ pSessionDesc->cumulativeConsumedBytes = 0; } } /* (LW 14 - 15) */ pCompReqParams = &(pMsg->comp_pars); dcCompRequestParamsPopulate(pCompReqParams, pCookie); if (CPA_DC_FLUSH_FINAL == flush) { bFinal = ICP_QAT_FW_COMP_BFINAL; } switch (cnvMode) { case DC_CNVNR: cnvRecovery = ICP_QAT_FW_COMP_CNV_RECOVERY; /* Fall through is intended here, because for CNVNR * cnvDecompReq also needs to be set */ case DC_CNV: cnvDecompReq = ICP_QAT_FW_COMP_CNV; if (isDcGen4x(pService)) { cnvErrorInjection = pSessionDesc->cnvErrorInjection; } break; case DC_NO_CNV: cnvDecompReq = ICP_QAT_FW_COMP_NO_CNV; cnvRecovery = ICP_QAT_FW_COMP_NO_CNV_RECOVERY; break; } /* LW 18 */ rpCmdFlags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bFinal, cnvDecompReq, cnvRecovery, cnvErrorInjection, crcMode); pMsg->comp_pars.req_par_flags = rpCmdFlags; /* Populates the QAT common request middle part of the message * (LW 6 to 11) */ SalQatMsg_CmnMidWrite((icp_qat_fw_la_bulk_req_t *)pMsg, pCookie, DC_DEFAULT_QAT_PTR_TYPE, srcAddrPhys, dstAddrPhys, 0, 0); return CPA_STATUS_SUCCESS; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Send a compression request to QAT * * @description * Send the requests for compression or decompression to QAT * * @param{in] pCookie Pointer to the compression cookie * @param[in] pService Pointer to the compression service * @param[in] pSessionDesc Pointer to the session descriptor * @param[in] compDecomp Direction of the operation * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in * *****************************************************************************/ static CpaStatus dcSendRequest(dc_compression_cookie_t *pCookie, sal_compression_service_t *pService, dc_session_desc_t *pSessionDesc, dc_request_dir_t compDecomp) { CpaStatus status = CPA_STATUS_SUCCESS; /* Send to QAT */ status = icp_adf_transPutMsg(pService->trans_handle_compression_tx, (void *)&(pCookie->request), LAC_QAT_DC_REQ_SZ_LW); if ((CPA_DC_STATEFUL == pSessionDesc->sessState) && (CPA_STATUS_RETRY == status)) { /* reset requestType after receiving an retry on * the stateful request */ pSessionDesc->requestType = pSessionDesc->previousRequestType; } return status; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Process the synchronous and asynchronous case for compression or * decompression * * @description * Process the synchronous and asynchronous case for compression or * decompression. This function will then create and send the request to * the firmware. * * @param[in] pService Pointer to the compression service * @param[in] pSessionDesc Pointer to the session descriptor * @param[in] dcInstance Instance handle derived from discovery * functions * @param[in] pSessionHandle Session handle * @param[in] numRequests Number of operations in the batch request * @param[in] pBatchOpData Address of the list of jobs to be processed * @param[in] pSrcBuff Pointer to data buffer for compression * @param[in] pDestBuff Pointer to buffer space for data after * compression * @param[in] pResults Pointer to results structure * @param[in] flushFlag Indicates the type of flush to be * performed * @param[in] pOpData Pointer to request information structure * holding parameters for cpaDcCompress2 and * CpaDcDecompressData2 * @param[in] callbackTag Pointer to the callback tag * @param[in] compDecomp Direction of the operation * @param[in] isAsyncMode Used to know if synchronous or asynchronous * mode * @param[in] cnvMode CNV Mode * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_RETRY Retry operation * @retval CPA_STATUS_FAIL Function failed * @retval CPA_STATUS_RESOURCE Resource error * *****************************************************************************/ static CpaStatus dcCompDecompData(sal_compression_service_t *pService, dc_session_desc_t *pSessionDesc, CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, CpaDcFlush flushFlag, CpaDcOpData *pOpData, void *callbackTag, dc_request_dir_t compDecomp, CpaBoolean isAsyncMode, dc_cnv_mode_t cnvMode) { CpaStatus status = CPA_STATUS_SUCCESS; dc_compression_cookie_t *pCookie = NULL; if ((LacSync_GenWakeupSyncCaller == pSessionDesc->pCompressionCb) && isAsyncMode == CPA_TRUE) { lac_sync_op_data_t *pSyncCallbackData = NULL; status = LacSync_CreateSyncCookie(&pSyncCallbackData); if (CPA_STATUS_SUCCESS == status) { status = dcCompDecompData(pService, pSessionDesc, dcInstance, pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, pOpData, pSyncCallbackData, compDecomp, CPA_FALSE, cnvMode); } else { return status; } if (CPA_STATUS_SUCCESS == status) { CpaStatus syncStatus = CPA_STATUS_SUCCESS; syncStatus = LacSync_WaitForCallback(pSyncCallbackData, DC_SYNC_CALLBACK_TIMEOUT, &status, NULL); /* If callback doesn't come back */ if (CPA_STATUS_SUCCESS != syncStatus) { if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC( numCompCompletedErrors, pService); } else { COMPRESSION_STAT_INC( numDecompCompletedErrors, pService); } LAC_LOG_ERROR("Callback timed out"); status = syncStatus; } } else { /* As the Request was not sent the Callback will never * be called, so need to indicate that we're finished * with cookie so it can be destroyed. */ LacSync_SetSyncCookieComplete(pSyncCallbackData); } LacSync_DestroySyncCookie(&pSyncCallbackData); return status; } /* Allocate the compression cookie * The memory is freed in callback or in sendRequest if an error occurs */ pCookie = (dc_compression_cookie_t *)Lac_MemPoolEntryAlloc( pService->compression_mem_pool); if (NULL == pCookie) { LAC_LOG_ERROR("Cannot get mem pool entry for compression"); status = CPA_STATUS_RESOURCE; } else if ((void *)CPA_STATUS_RETRY == pCookie) { pCookie = NULL; status = CPA_STATUS_RETRY; } if (CPA_STATUS_SUCCESS == status) { status = dcCreateRequest(pCookie, pService, pSessionDesc, pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, pOpData, callbackTag, compDecomp, cnvMode); } if (CPA_STATUS_SUCCESS == status) { /* Increment number of pending callbacks for session */ if (CPA_DC_STATELESS == pSessionDesc->sessState) { qatUtilsAtomicInc( &(pSessionDesc->pendingStatelessCbCount)); } status = dcSendRequest(pCookie, pService, pSessionDesc, compDecomp); } if (CPA_STATUS_SUCCESS == status) { if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC(numCompRequests, pService); } else { COMPRESSION_STAT_INC(numDecompRequests, pService); } } else { if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC(numCompRequestsErrors, pService); } else { COMPRESSION_STAT_INC(numDecompRequestsErrors, pService); } /* Decrement number of pending callbacks for session */ if (CPA_DC_STATELESS == pSessionDesc->sessState) { qatUtilsAtomicDec( &(pSessionDesc->pendingStatelessCbCount)); } else { qatUtilsAtomicDec( &(pSessionDesc->pendingStatefulCbCount)); } /* Free the memory pool */ if (NULL != pCookie) { if (status != CPA_STATUS_UNSUPPORTED) { /* Free the memory pool */ Lac_MemPoolEntryFree(pCookie); pCookie = NULL; } } } return status; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Handle zero length compression or decompression requests * * @description * Handle zero length compression or decompression requests * * @param[in] pService Pointer to the compression service * @param[in] pSessionDesc Pointer to the session descriptor * @param[in] pResults Pointer to results structure * @param[in] flushFlag Indicates the type of flush to be * performed * @param[in] callbackTag User supplied value to help correlate * the callback with its associated request * @param[in] compDecomp Direction of the operation * * @retval CPA_TRUE Zero length SOP or MOP processed * @retval CPA_FALSE Zero length EOP * *****************************************************************************/ static CpaStatus dcZeroLengthRequests(sal_compression_service_t *pService, dc_session_desc_t *pSessionDesc, CpaDcRqResults *pResults, CpaDcFlush flushFlag, void *callbackTag, dc_request_dir_t compDecomp) { CpaBoolean status = CPA_FALSE; CpaDcCallbackFn pCbFunc = pSessionDesc->pCompressionCb; if (DC_REQUEST_FIRST == pSessionDesc->requestType) { /* Reinitialise the cumulative amount of consumed bytes */ pSessionDesc->cumulativeConsumedBytes = 0; /* Zero length SOP */ if (CPA_DC_ADLER32 == pSessionDesc->checksumType) { pResults->checksum = 1; } else { pResults->checksum = 0; } status = CPA_TRUE; } else if ((CPA_DC_FLUSH_NONE == flushFlag) || (CPA_DC_FLUSH_SYNC == flushFlag)) { /* Zero length MOP */ pResults->checksum = pSessionDesc->previousChecksum; status = CPA_TRUE; } if (CPA_TRUE == status) { pResults->status = CPA_DC_OK; pResults->produced = 0; pResults->consumed = 0; /* Increment statistics */ if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC(numCompRequests, pService); COMPRESSION_STAT_INC(numCompCompleted, pService); } else { COMPRESSION_STAT_INC(numDecompRequests, pService); COMPRESSION_STAT_INC(numDecompCompleted, pService); } LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); if ((NULL != pCbFunc) && (LacSync_GenWakeupSyncCaller != pCbFunc)) { pCbFunc(callbackTag, CPA_STATUS_SUCCESS); } return CPA_TRUE; } return CPA_FALSE; } static CpaStatus dcParamCheck(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, sal_compression_service_t *pService, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, dc_session_desc_t *pSessionDesc, CpaDcFlush flushFlag, Cpa64U srcBuffSize) { if (dcCheckSourceData(pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, srcBuffSize, NULL) != CPA_STATUS_SUCCESS) { return CPA_STATUS_INVALID_PARAM; } if (dcCheckDestinationData( pService, pSessionHandle, pDestBuff, DC_COMPRESSION_REQUEST) != CPA_STATUS_SUCCESS) { return CPA_STATUS_INVALID_PARAM; } if (CPA_DC_DIR_DECOMPRESS == pSessionDesc->sessDirection) { LAC_INVALID_PARAM_LOG("Invalid sessDirection value"); return CPA_STATUS_INVALID_PARAM; } return CPA_STATUS_SUCCESS; } CpaStatus cpaDcCompressData(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, CpaDcFlush flushFlag, void *callbackTag) { sal_compression_service_t *pService = NULL; dc_session_desc_t *pSessionDesc = NULL; CpaInstanceHandle insHandle = NULL; Cpa64U srcBuffSize = 0; if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); } else { insHandle = dcInstance; } pService = (sal_compression_service_t *)insHandle; LAC_CHECK_NULL_PARAM(insHandle); LAC_CHECK_NULL_PARAM(pSessionHandle); /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(insHandle); /* This check is outside the parameter checking as it is needed to * manage zero length requests */ if (LacBuffDesc_BufferListVerifyNull(pSrcBuff, &srcBuffSize, LAC_NO_ALIGNMENT_SHIFT) != CPA_STATUS_SUCCESS) { LAC_INVALID_PARAM_LOG("Invalid source buffer list parameter"); return CPA_STATUS_INVALID_PARAM; } /* Ensure this is a compression instance */ SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); if (CPA_STATUS_SUCCESS != dcParamCheck(insHandle, pSessionHandle, pService, pSrcBuff, pDestBuff, pResults, pSessionDesc, flushFlag, srcBuffSize)) { return CPA_STATUS_INVALID_PARAM; } if (CPA_DC_STATEFUL == pSessionDesc->sessState) { LAC_INVALID_PARAM_LOG( "Invalid session state, stateful sessions " "are not supported"); return CPA_STATUS_UNSUPPORTED; } if (!(pService->generic_service_info.dcExtendedFeatures & DC_CNV_EXTENDED_CAPABILITY)) { LAC_INVALID_PARAM_LOG( "CompressAndVerify feature not supported"); return CPA_STATUS_UNSUPPORTED; } if (!(pService->generic_service_info.dcExtendedFeatures & DC_CNVNR_EXTENDED_CAPABILITY)) { LAC_INVALID_PARAM_LOG( "CompressAndVerifyAndRecovery feature not supported"); return CPA_STATUS_UNSUPPORTED; } return dcCompDecompData(pService, pSessionDesc, insHandle, pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, NULL, callbackTag, DC_COMPRESSION_REQUEST, CPA_TRUE, DC_CNVNR); } CpaStatus cpaDcCompressData2(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcOpData *pOpData, CpaDcRqResults *pResults, void *callbackTag) { sal_compression_service_t *pService = NULL; dc_session_desc_t *pSessionDesc = NULL; CpaInstanceHandle insHandle = NULL; Cpa64U srcBuffSize = 0; dc_cnv_mode_t cnvMode = DC_NO_CNV; LAC_CHECK_NULL_PARAM(pOpData); if (((CPA_TRUE != pOpData->compressAndVerify) && (CPA_FALSE != pOpData->compressAndVerify)) || ((CPA_FALSE != pOpData->compressAndVerifyAndRecover) && (CPA_TRUE != pOpData->compressAndVerifyAndRecover))) { return CPA_STATUS_INVALID_PARAM; } if ((CPA_FALSE == pOpData->compressAndVerify) && (CPA_TRUE == pOpData->compressAndVerifyAndRecover)) { return CPA_STATUS_INVALID_PARAM; } if ((CPA_TRUE == pOpData->compressAndVerify) && (CPA_TRUE == pOpData->compressAndVerifyAndRecover) && (CPA_FALSE == pOpData->integrityCrcCheck)) { return cpaDcCompressData(dcInstance, pSessionHandle, pSrcBuff, pDestBuff, pResults, pOpData->flushFlag, callbackTag); } if (CPA_FALSE == pOpData->compressAndVerify) { LAC_INVALID_PARAM_LOG( "Data compression without verification not allowed"); return CPA_STATUS_UNSUPPORTED; } if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); } else { insHandle = dcInstance; } pService = (sal_compression_service_t *)insHandle; LAC_CHECK_NULL_PARAM(insHandle); LAC_CHECK_NULL_PARAM(pSessionHandle); LAC_CHECK_NULL_PARAM(pOpData); /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(insHandle); /* This check is outside the parameter checking as it is needed to * manage zero length requests */ if (LacBuffDesc_BufferListVerifyNull(pSrcBuff, &srcBuffSize, LAC_NO_ALIGNMENT_SHIFT) != CPA_STATUS_SUCCESS) { LAC_INVALID_PARAM_LOG("Invalid source buffer list parameter"); return CPA_STATUS_INVALID_PARAM; } /* Ensure this is a compression instance */ SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); if (CPA_TRUE == pOpData->compressAndVerify && CPA_DC_STATEFUL == pSessionDesc->sessState) { LAC_INVALID_PARAM_LOG( "Invalid session state, stateful sessions " "not supported with CNV"); return CPA_STATUS_UNSUPPORTED; } if (!(pService->generic_service_info.dcExtendedFeatures & DC_CNV_EXTENDED_CAPABILITY) && (CPA_TRUE == pOpData->compressAndVerify)) { LAC_INVALID_PARAM_LOG( "CompressAndVerify feature not supported"); return CPA_STATUS_UNSUPPORTED; } if (CPA_STATUS_SUCCESS != dcParamCheck(insHandle, pSessionHandle, pService, pSrcBuff, pDestBuff, pResults, pSessionDesc, pOpData->flushFlag, srcBuffSize)) { return CPA_STATUS_INVALID_PARAM; } if (CPA_STATUS_SUCCESS != dcCheckOpData(pService, pOpData)) { return CPA_STATUS_INVALID_PARAM; } if (CPA_TRUE != pOpData->compressAndVerify) { if (srcBuffSize > DC_COMP_MAX_BUFF_SIZE) { LAC_LOG_ERROR( "Compression payload greater than 64KB is " "unsupported, when CnV is disabled\n"); return CPA_STATUS_UNSUPPORTED; } } if (CPA_DC_STATEFUL == pSessionDesc->sessState) { /* Lock the session to check if there are in-flight stateful * requests */ LAC_SPINLOCK(&(pSessionDesc->sessionLock)); /* Check if there is already one in-flight stateful request */ if (0 != qatUtilsAtomicGet( &(pSessionDesc->pendingStatefulCbCount))) { LAC_LOG_ERROR( "Only one in-flight stateful request supported"); LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); return CPA_STATUS_RETRY; } if (0 == srcBuffSize) { if (CPA_TRUE == dcZeroLengthRequests(pService, pSessionDesc, pResults, pOpData->flushFlag, callbackTag, DC_COMPRESSION_REQUEST)) { return CPA_STATUS_SUCCESS; } } qatUtilsAtomicInc(&(pSessionDesc->pendingStatefulCbCount)); LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); } if (CPA_TRUE == pOpData->compressAndVerify) { cnvMode = DC_CNV; } return dcCompDecompData(pService, pSessionDesc, insHandle, pSessionHandle, pSrcBuff, pDestBuff, pResults, pOpData->flushFlag, pOpData, callbackTag, DC_COMPRESSION_REQUEST, CPA_TRUE, cnvMode); } static CpaStatus dcDecompressDataCheck(CpaInstanceHandle insHandle, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, CpaDcFlush flushFlag, Cpa64U *srcBufferSize) { sal_compression_service_t *pService = NULL; dc_session_desc_t *pSessionDesc = NULL; Cpa64U srcBuffSize = 0; pService = (sal_compression_service_t *)insHandle; LAC_CHECK_NULL_PARAM(insHandle); /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(insHandle); /* This check is outside the parameter checking as it is needed to * manage zero length requests */ if (LacBuffDesc_BufferListVerifyNull(pSrcBuff, &srcBuffSize, LAC_NO_ALIGNMENT_SHIFT) != CPA_STATUS_SUCCESS) { LAC_INVALID_PARAM_LOG("Invalid source buffer list parameter"); return CPA_STATUS_INVALID_PARAM; } /* Ensure this is a compression instance */ SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); if (dcCheckSourceData(pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, srcBuffSize, NULL) != CPA_STATUS_SUCCESS) { return CPA_STATUS_INVALID_PARAM; } if (dcCheckDestinationData(pService, pSessionHandle, pDestBuff, DC_DECOMPRESSION_REQUEST) != CPA_STATUS_SUCCESS) { return CPA_STATUS_INVALID_PARAM; } pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); if (CPA_DC_DIR_COMPRESS == pSessionDesc->sessDirection) { LAC_INVALID_PARAM_LOG("Invalid sessDirection value"); return CPA_STATUS_INVALID_PARAM; } *srcBufferSize = srcBuffSize; return CPA_STATUS_SUCCESS; } CpaStatus cpaDcDecompressData(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, CpaDcFlush flushFlag, void *callbackTag) { sal_compression_service_t *pService = NULL; dc_session_desc_t *pSessionDesc = NULL; CpaInstanceHandle insHandle = NULL; Cpa64U srcBuffSize = 0; CpaStatus status = CPA_STATUS_SUCCESS; if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); } else { insHandle = dcInstance; } status = dcDecompressDataCheck(insHandle, pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, &srcBuffSize); if (CPA_STATUS_SUCCESS != status) { return status; } pService = (sal_compression_service_t *)insHandle; /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(insHandle); /* This check is outside the parameter checking as it is needed to * manage zero length requests */ if (CPA_STATUS_SUCCESS != LacBuffDesc_BufferListVerifyNull(pSrcBuff, &srcBuffSize, LAC_NO_ALIGNMENT_SHIFT)) { QAT_UTILS_LOG("Invalid source buffer list parameter"); return CPA_STATUS_INVALID_PARAM; } /* Ensure this is a compression instance */ SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); if (dcCheckSourceData(pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, srcBuffSize, NULL) != CPA_STATUS_SUCCESS) { return CPA_STATUS_INVALID_PARAM; } if (dcCheckDestinationData(pService, pSessionHandle, pDestBuff, DC_DECOMPRESSION_REQUEST) != CPA_STATUS_SUCCESS) { return CPA_STATUS_INVALID_PARAM; } pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); if (CPA_DC_DIR_COMPRESS == pSessionDesc->sessDirection) { QAT_UTILS_LOG("Invalid sessDirection value"); return CPA_STATUS_INVALID_PARAM; } if (CPA_DC_STATEFUL == pSessionDesc->sessState) { /* Lock the session to check if there are in-flight stateful * requests */ LAC_SPINLOCK(&(pSessionDesc->sessionLock)); /* Check if there is already one in-flight stateful request */ if (0 != qatUtilsAtomicGet( &(pSessionDesc->pendingStatefulCbCount))) { LAC_LOG_ERROR( "Only one in-flight stateful request supported"); LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); return CPA_STATUS_RETRY; } /* Gen 4 handle 0 len requests in FW */ if (isDcGen2x(pService)) { if ((0 == srcBuffSize) || ((1 == srcBuffSize) && (CPA_DC_FLUSH_FINAL != flushFlag) && (CPA_DC_FLUSH_FULL != flushFlag))) { if (CPA_TRUE == dcZeroLengthRequests( pService, pSessionDesc, pResults, flushFlag, callbackTag, DC_DECOMPRESSION_REQUEST)) { return CPA_STATUS_SUCCESS; } } } qatUtilsAtomicInc(&(pSessionDesc->pendingStatefulCbCount)); LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); } return dcCompDecompData(pService, pSessionDesc, insHandle, pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, NULL, callbackTag, DC_DECOMPRESSION_REQUEST, CPA_TRUE, DC_NO_CNV); } CpaStatus cpaDcDecompressData2(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcOpData *pOpData, CpaDcRqResults *pResults, void *callbackTag) { sal_compression_service_t *pService = NULL; dc_session_desc_t *pSessionDesc = NULL; CpaInstanceHandle insHandle = NULL; CpaStatus status = CPA_STATUS_SUCCESS; Cpa64U srcBuffSize = 0; LAC_CHECK_NULL_PARAM(pOpData); if (CPA_FALSE == pOpData->integrityCrcCheck) { return cpaDcDecompressData(dcInstance, pSessionHandle, pSrcBuff, pDestBuff, pResults, pOpData->flushFlag, callbackTag); } if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); } else { insHandle = dcInstance; } status = dcDecompressDataCheck(insHandle, pSessionHandle, pSrcBuff, pDestBuff, pResults, pOpData->flushFlag, &srcBuffSize); if (CPA_STATUS_SUCCESS != status) { return status; } pService = (sal_compression_service_t *)insHandle; pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); LAC_CHECK_NULL_PARAM(insHandle); /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(insHandle); /* This check is outside the parameter checking as it is needed to * manage zero length requests */ if (CPA_STATUS_SUCCESS != LacBuffDesc_BufferListVerifyNull(pSrcBuff, &srcBuffSize, LAC_NO_ALIGNMENT_SHIFT)) { QAT_UTILS_LOG("Invalid source buffer list parameter"); return CPA_STATUS_INVALID_PARAM; } /* Ensure this is a compression instance */ SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); if (CPA_STATUS_SUCCESS != dcCheckSourceData(pSessionHandle, pSrcBuff, pDestBuff, pResults, CPA_DC_FLUSH_NONE, srcBuffSize, NULL)) { return CPA_STATUS_INVALID_PARAM; } if (CPA_STATUS_SUCCESS != dcCheckDestinationData(pService, pSessionHandle, pDestBuff, DC_DECOMPRESSION_REQUEST)) { return CPA_STATUS_INVALID_PARAM; } if (CPA_STATUS_SUCCESS != dcCheckOpData(pService, pOpData)) { return CPA_STATUS_INVALID_PARAM; } if (CPA_DC_DIR_COMPRESS == pSessionDesc->sessDirection) { QAT_UTILS_LOG("Invalid sessDirection value"); return CPA_STATUS_INVALID_PARAM; } if (CPA_DC_STATEFUL == pSessionDesc->sessState) { /* Lock the session to check if there are in-flight stateful * requests */ LAC_SPINLOCK(&(pSessionDesc->sessionLock)); /* Check if there is already one in-flight stateful request */ if (0 != qatUtilsAtomicGet( &(pSessionDesc->pendingStatefulCbCount))) { LAC_LOG_ERROR( "Only one in-flight stateful request supported"); LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); return CPA_STATUS_RETRY; } /* Gen 4 handle 0 len requests in FW */ if (isDcGen2x(pService)) { if ((0 == srcBuffSize) || ((1 == srcBuffSize) && (CPA_DC_FLUSH_FINAL != pOpData->flushFlag) && (CPA_DC_FLUSH_FULL != pOpData->flushFlag))) { if (CPA_TRUE == dcZeroLengthRequests( pService, pSessionDesc, pResults, pOpData->flushFlag, callbackTag, DC_DECOMPRESSION_REQUEST)) { return CPA_STATUS_SUCCESS; } } } qatUtilsAtomicInc(&(pSessionDesc->pendingStatefulCbCount)); LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); } return dcCompDecompData(pService, pSessionDesc, insHandle, pSessionHandle, pSrcBuff, pDestBuff, pResults, pOpData->flushFlag, pOpData, callbackTag, DC_DECOMPRESSION_REQUEST, CPA_TRUE, DC_NO_CNV); } diff --git a/sys/dev/qat/qat_common/adf_hw_arbiter.c b/sys/dev/qat/qat_common/adf_hw_arbiter.c index e89a5f6ce68c..827d612e2c22 100644 --- a/sys/dev/qat/qat_common/adf_hw_arbiter.c +++ b/sys/dev/qat/qat_common/adf_hw_arbiter.c @@ -1,208 +1,209 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_transport_internal.h" #define ADF_ARB_NUM 4 #define ADF_ARB_REG_SIZE 0x4 #define ADF_ARB_WTR_SIZE 0x20 #define ADF_ARB_OFFSET 0x30000 #define ADF_ARB_REG_SLOT 0x1000 #define ADF_ARB_WTR_OFFSET 0x010 #define ADF_ARB_RO_EN_OFFSET 0x090 #define ADF_ARB_WQCFG_OFFSET 0x100 #define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \ ADF_CSR_WR(csr_addr, \ ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * (index)), \ value) #define WRITE_CSR_ARB_SARCONFIG(csr_addr, csr_offset, index, value) \ ADF_CSR_WR(csr_addr, (csr_offset) + (ADF_ARB_REG_SIZE * (index)), value) #define READ_CSR_ARB_RINGSRVARBEN(csr_addr, index) \ ADF_CSR_RD(csr_addr, \ ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * (index))) static DEFINE_MUTEX(csr_arb_lock); #define WRITE_CSR_ARB_WRK_2_SER_MAP( \ csr_addr, csr_offset, wrk_to_ser_map_offset, index, value) \ ADF_CSR_WR(csr_addr, \ ((csr_offset) + (wrk_to_ser_map_offset)) + \ (ADF_ARB_REG_SIZE * (index)), \ value) int adf_init_arb(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct arb_info info; struct resource *csr = accel_dev->transport->banks[0].csr_addr; u32 arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1; u32 arb; hw_data->get_arb_info(&info); /* Service arb configured for 32 bytes responses and * ring flow control check enabled. */ for (arb = 0; arb < ADF_ARB_NUM; arb++) WRITE_CSR_ARB_SARCONFIG(csr, info.arbiter_offset, arb, arb_cfg); return 0; } int adf_init_gen2_arb(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct arb_info info; struct resource *csr = accel_dev->transport->banks[0].csr_addr; u32 i; const u32 *thd_2_arb_cfg; /* invoke common adf_init_arb */ adf_init_arb(accel_dev); hw_data->get_arb_info(&info); /* Map worker threads to service arbiters */ hw_data->get_arb_mapping(accel_dev, &thd_2_arb_cfg); if (!thd_2_arb_cfg) return EFAULT; for (i = 0; i < hw_data->num_engines; i++) WRITE_CSR_ARB_WRK_2_SER_MAP(csr, info.arbiter_offset, info.wrk_thd_2_srv_arb_map, i, *(thd_2_arb_cfg + i)); return 0; } void adf_update_ring_arb(struct adf_etr_ring_data *ring) { int shift; u32 arben, arben_tx, arben_rx, arb_mask; struct adf_accel_dev *accel_dev = ring->bank->accel_dev; struct adf_hw_csr_info *csr_info = &accel_dev->hw_device->csr_info; struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops; arb_mask = csr_info->arb_enable_mask; shift = hweight32(arb_mask); arben_tx = ring->bank->ring_mask & arb_mask; arben_rx = (ring->bank->ring_mask >> shift) & arb_mask; arben = arben_tx & arben_rx; csr_ops->write_csr_ring_srv_arb_en(ring->bank->csr_addr, ring->bank->bank_number, arben); } void adf_enable_ring_arb(struct adf_accel_dev *accel_dev, void *csr_addr, unsigned int bank_nr, unsigned int mask) { struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); struct resource *csr = csr_addr; u32 arbenable; if (!csr) return; mutex_lock(&csr_arb_lock); arbenable = csr_ops->read_csr_ring_srv_arb_en(csr, bank_nr); arbenable |= mask & 0xFF; csr_ops->write_csr_ring_srv_arb_en(csr, bank_nr, arbenable); mutex_unlock(&csr_arb_lock); } void adf_disable_ring_arb(struct adf_accel_dev *accel_dev, void *csr_addr, unsigned int bank_nr, unsigned int mask) { struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); struct resource *csr = csr_addr; u32 arbenable; if (!csr_addr) return; mutex_lock(&csr_arb_lock); arbenable = csr_ops->read_csr_ring_srv_arb_en(csr, bank_nr); arbenable &= ~mask & 0xFF; csr_ops->write_csr_ring_srv_arb_en(csr, bank_nr, arbenable); mutex_unlock(&csr_arb_lock); } void adf_exit_arb(struct adf_accel_dev *accel_dev) { struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct arb_info info; struct resource *csr; unsigned int i; if (!accel_dev->transport) return; csr = accel_dev->transport->banks[0].csr_addr; hw_data->get_arb_info(&info); /* Reset arbiter configuration */ for (i = 0; i < ADF_ARB_NUM; i++) WRITE_CSR_ARB_SARCONFIG(csr, info.arbiter_offset, i, 0); /* Unmap worker threads to service arbiters */ if (hw_data->get_arb_mapping) { for (i = 0; i < hw_data->num_engines; i++) WRITE_CSR_ARB_WRK_2_SER_MAP(csr, info.arbiter_offset, info.wrk_thd_2_srv_arb_map, i, 0); } /* Disable arbitration on all rings */ for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) csr_ops->write_csr_ring_srv_arb_en(csr, i, 0); } void adf_disable_arb(struct adf_accel_dev *accel_dev) { - struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); + struct adf_hw_csr_ops *csr_ops; struct resource *csr; unsigned int i; if (!accel_dev || !accel_dev->transport) return; csr = accel_dev->transport->banks[0].csr_addr; + csr_ops = GET_CSR_OPS(accel_dev); /* Disable arbitration on all rings */ for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) csr_ops->write_csr_ring_srv_arb_en(csr, i, 0); } diff --git a/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c b/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c index 1c8c9a2fda4c..002ea6d5d876 100644 --- a/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c +++ b/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c @@ -1,973 +1,973 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include #include #include #include #include #include #include #include "adf_4xxx_hw_data.h" #include "adf_heartbeat.h" #include "icp_qat_fw_init_admin.h" #include "icp_qat_hw.h" #define ADF_CONST_TABLE_SIZE 1024 struct adf_fw_config { u32 ae_mask; char *obj_name; }; /* Accel unit information */ static const struct adf_accel_unit adf_4xxx_au_a_ae[] = { { 0x1, 0x1, 0xF, 0x1B, 4, ADF_ACCEL_SERVICE_NULL }, { 0x2, 0x1, 0xF0, 0x6C0, 4, ADF_ACCEL_SERVICE_NULL }, { 0x4, 0x1, 0x100, 0xF000, 1, ADF_ACCEL_ADMIN }, }; /* Worker thread to service arbiter mappings */ static u32 thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = { 0x5555555, 0x5555555, 0x5555555, 0x5555555, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0x0 }; /* Masks representing ME thread-service mappings. * Thread 7 carries out Admin work and is thus * left out. */ static u8 default_active_thd_mask = 0x7F; static u8 dc_me_active_thd_mask = 0x03; static u32 thrd_to_arb_map_gen[ADF_4XXX_MAX_ACCELENGINES] = { 0 }; #define ADF_4XXX_ASYM_SYM \ (ASYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_4XXX_DC \ (COMP | COMP << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_4XXX_SYM \ (SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ SYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_4XXX_ASYM \ (ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ ASYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_4XXX_ASYM_DC \ (ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_4XXX_SYM_DC \ (SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_4XXX_NA \ (NA | NA << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ NA << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_4XXX_DEFAULT_RING_TO_SRV_MAP ADF_4XXX_ASYM_SYM struct adf_enabled_services { const char svcs_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; u16 rng_to_svc_msk; }; static struct adf_enabled_services adf_4xxx_svcs[] = { { "dc", ADF_4XXX_DC }, { "sym", ADF_4XXX_SYM }, { "asym", ADF_4XXX_ASYM }, { "dc;asym", ADF_4XXX_ASYM_DC }, { "asym;dc", ADF_4XXX_ASYM_DC }, { "sym;dc", ADF_4XXX_SYM_DC }, { "dc;sym", ADF_4XXX_SYM_DC }, { "asym;sym", ADF_4XXX_ASYM_SYM }, { "sym;asym", ADF_4XXX_ASYM_SYM }, }; static struct adf_hw_device_class adf_4xxx_class = { .name = ADF_4XXX_DEVICE_NAME, .type = DEV_4XXX, .instances = 0, }; static u32 get_accel_mask(struct adf_accel_dev *accel_dev) { return ADF_4XXX_ACCELERATORS_MASK; } static u32 get_ae_mask(struct adf_accel_dev *accel_dev) { u32 fusectl4 = accel_dev->hw_device->fuses; return ~fusectl4 & ADF_4XXX_ACCELENGINES_MASK; } static int get_ring_to_svc_map(struct adf_accel_dev *accel_dev, u16 *ring_to_svc_map) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; u32 i = 0; *ring_to_svc_map = 0; /* Get the services enabled by user */ snprintf(key, sizeof(key), ADF_SERVICES_ENABLED); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) return EFAULT; for (i = 0; i < ARRAY_SIZE(adf_4xxx_svcs); i++) { if (!strncmp(val, adf_4xxx_svcs[i].svcs_enabled, ADF_CFG_MAX_KEY_LEN_IN_BYTES)) { *ring_to_svc_map = adf_4xxx_svcs[i].rng_to_svc_msk; return 0; } } device_printf(GET_DEV(accel_dev), "Invalid services enabled: %s\n", val); return EFAULT; } static u32 get_num_accels(struct adf_hw_device_data *self) { return ADF_4XXX_MAX_ACCELERATORS; } static u32 get_num_aes(struct adf_hw_device_data *self) { if (!self || !self->ae_mask) return 0; return hweight32(self->ae_mask); } static u32 get_misc_bar_id(struct adf_hw_device_data *self) { return ADF_4XXX_PMISC_BAR; } static u32 get_etr_bar_id(struct adf_hw_device_data *self) { return ADF_4XXX_ETR_BAR; } static u32 get_sram_bar_id(struct adf_hw_device_data *self) { return ADF_4XXX_SRAM_BAR; } /* * The vector routing table is used to select the MSI-X entry to use for each * interrupt source. * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts. * The final entry corresponds to VF2PF or error interrupts. * This vector table could be used to configure one MSI-X entry to be shared * between multiple interrupt sources. * * The default routing is set to have a one to one correspondence between the * interrupt source and the MSI-X entry used. */ static void set_msix_default_rttable(struct adf_accel_dev *accel_dev) { struct resource *csr; int i; csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++) ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i); } static u32 adf_4xxx_get_hw_cap(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 fusectl1; u32 capabilities; /* Read accelerator capabilities mask */ fusectl1 = pci_read_config(pdev, ADF_4XXX_FUSECTL1_OFFSET, 4); capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | ICP_ACCEL_CAPABILITIES_CIPHER | ICP_ACCEL_CAPABILITIES_AUTHENTICATION | ICP_ACCEL_CAPABILITIES_COMPRESSION | ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION | ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION | ICP_ACCEL_CAPABILITIES_HKDF | ICP_ACCEL_CAPABILITIES_SHA3_EXT | ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 | ICP_ACCEL_CAPABILITIES_CHACHA_POLY | ICP_ACCEL_CAPABILITIES_AESGCM_SPC | ICP_ACCEL_CAPABILITIES_AES_V2 | ICP_ACCEL_CAPABILITIES_RL; if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) { capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER; } if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) { capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; capabilities &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; } if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) { capabilities &= ~ICP_ACCEL_CAPABILITIES_SM3; capabilities &= ~ICP_ACCEL_CAPABILITIES_SM4; } return capabilities; } static u32 get_hb_clock(struct adf_hw_device_data *self) { /* * 4XXX uses KPT counter for HB */ return ADF_4XXX_KPT_COUNTER_FREQ; } static u32 get_ae_clock(struct adf_hw_device_data *self) { /* * Clock update interval is <16> ticks for qat_4xxx. */ return self->clock_frequency / 16; } static int measure_clock(struct adf_accel_dev *accel_dev) { u32 frequency; int ret = 0; ret = adf_dev_measure_clock(accel_dev, &frequency, ADF_4XXX_MIN_AE_FREQ, ADF_4XXX_MAX_AE_FREQ); if (ret) return ret; accel_dev->hw_device->clock_frequency = frequency; return 0; } static int adf_4xxx_configure_accel_units(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES] = { 0 }; char val_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; if (adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC)) goto err; snprintf(key, sizeof(key), ADF_SERVICES_ENABLED); snprintf(val_str, sizeof(val_str), ADF_CFG_ASYM ADF_SERVICES_SEPARATOR ADF_CFG_SYM); if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)val_str, ADF_STR)) goto err; return 0; err: device_printf(GET_DEV(accel_dev), "Failed to configure accel units\n"); return EINVAL; } static u32 get_num_accel_units(struct adf_hw_device_data *self) { return ADF_4XXX_MAX_ACCELUNITS; } static void get_accel_unit(struct adf_hw_device_data *self, struct adf_accel_unit **accel_unit) { memcpy(*accel_unit, adf_4xxx_au_a_ae, sizeof(adf_4xxx_au_a_ae)); } static void adf_exit_accel_unit_services(struct adf_accel_dev *accel_dev) { if (accel_dev->au_info) { kfree(accel_dev->au_info->au); accel_dev->au_info->au = NULL; kfree(accel_dev->au_info); accel_dev->au_info = NULL; } } static int get_accel_unit_config(struct adf_accel_dev *accel_dev, u8 *num_sym_au, u8 *num_dc_au, u8 *num_asym_au) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; u32 num_au = hw_data->get_num_accel_units(hw_data); /* One AU will be allocated by default if a service enabled */ u32 alloc_au = 1; /* There's always one AU that is used for Admin AE */ u32 service_mask = ADF_ACCEL_ADMIN; char *token, *cur_str; u32 disabled_caps = 0; /* Get the services enabled by user */ snprintf(key, sizeof(key), ADF_SERVICES_ENABLED); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) return EFAULT; cur_str = val; token = strsep(&cur_str, ADF_SERVICES_SEPARATOR); while (token) { if (!strncmp(token, ADF_CFG_SYM, strlen(ADF_CFG_SYM))) service_mask |= ADF_ACCEL_CRYPTO; if (!strncmp(token, ADF_CFG_ASYM, strlen(ADF_CFG_ASYM))) service_mask |= ADF_ACCEL_ASYM; /* cy means both asym & crypto should be enabled * Hardware resources allocation check will be done later */ if (!strncmp(token, ADF_CFG_CY, strlen(ADF_CFG_CY))) service_mask |= ADF_ACCEL_ASYM | ADF_ACCEL_CRYPTO; if (!strncmp(token, ADF_SERVICE_DC, strlen(ADF_SERVICE_DC))) service_mask |= ADF_ACCEL_COMPRESSION; token = strsep(&cur_str, ADF_SERVICES_SEPARATOR); } /* Ensure the user won't enable more services than it can support */ if (hweight32(service_mask) > num_au) { device_printf(GET_DEV(accel_dev), "Can't enable more services than "); device_printf(GET_DEV(accel_dev), "%d!\n", num_au); return EFAULT; } else if (hweight32(service_mask) == 2) { /* Due to limitation, besides AU for Admin AE * only 2 more AUs can be allocated */ alloc_au = 2; } if (service_mask & ADF_ACCEL_CRYPTO) *num_sym_au = alloc_au; if (service_mask & ADF_ACCEL_ASYM) *num_asym_au = alloc_au; if (service_mask & ADF_ACCEL_COMPRESSION) *num_dc_au = alloc_au; /*update capability*/ if (!*num_sym_au || !(service_mask & ADF_ACCEL_CRYPTO)) { disabled_caps = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | ICP_ACCEL_CAPABILITIES_CIPHER | ICP_ACCEL_CAPABILITIES_SHA3_EXT | ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 | ICP_ACCEL_CAPABILITIES_CHACHA_POLY | ICP_ACCEL_CAPABILITIES_AESGCM_SPC | ICP_ACCEL_CAPABILITIES_AES_V2; } if (!*num_asym_au || !(service_mask & ADF_ACCEL_ASYM)) { disabled_caps |= ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | ICP_ACCEL_CAPABILITIES_AUTHENTICATION; } if (!*num_dc_au || !(service_mask & ADF_ACCEL_COMPRESSION)) { disabled_caps |= ICP_ACCEL_CAPABILITIES_COMPRESSION | ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION | ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION | ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; accel_dev->hw_device->extended_dc_capabilities = 0; } accel_dev->hw_device->accel_capabilities_mask = adf_4xxx_get_hw_cap(accel_dev) & ~disabled_caps; hw_data->service_mask = service_mask; hw_data->service_to_load_mask = service_mask; return 0; } static int adf_init_accel_unit_services(struct adf_accel_dev *accel_dev) { u8 num_sym_au = 0, num_dc_au = 0, num_asym_au = 0; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 num_au = hw_data->get_num_accel_units(hw_data); u32 au_size = num_au * sizeof(struct adf_accel_unit); u8 i; if (get_accel_unit_config( accel_dev, &num_sym_au, &num_dc_au, &num_asym_au)) return EFAULT; accel_dev->au_info = kzalloc(sizeof(*accel_dev->au_info), GFP_KERNEL); if (!accel_dev->au_info) return ENOMEM; accel_dev->au_info->au = kzalloc(au_size, GFP_KERNEL); if (!accel_dev->au_info->au) { kfree(accel_dev->au_info); accel_dev->au_info = NULL; return ENOMEM; } accel_dev->au_info->num_cy_au = num_sym_au; accel_dev->au_info->num_dc_au = num_dc_au; accel_dev->au_info->num_asym_au = num_asym_au; get_accel_unit(hw_data, &accel_dev->au_info->au); /* Enable ASYM accel units */ for (i = 0; i < num_au && num_asym_au > 0; i++) { if (accel_dev->au_info->au[i].services == ADF_ACCEL_SERVICE_NULL) { accel_dev->au_info->au[i].services = ADF_ACCEL_ASYM; num_asym_au--; } } /* Enable SYM accel units */ for (i = 0; i < num_au && num_sym_au > 0; i++) { if (accel_dev->au_info->au[i].services == ADF_ACCEL_SERVICE_NULL) { accel_dev->au_info->au[i].services = ADF_ACCEL_CRYPTO; num_sym_au--; } } /* Enable compression accel units */ for (i = 0; i < num_au && num_dc_au > 0; i++) { if (accel_dev->au_info->au[i].services == ADF_ACCEL_SERVICE_NULL) { accel_dev->au_info->au[i].services = ADF_ACCEL_COMPRESSION; num_dc_au--; } } accel_dev->au_info->dc_ae_msk |= hw_data->get_obj_cfg_ae_mask(accel_dev, ADF_ACCEL_COMPRESSION); return 0; } static int adf_init_accel_units(struct adf_accel_dev *accel_dev) { return adf_init_accel_unit_services(accel_dev); } static void adf_exit_accel_units(struct adf_accel_dev *accel_dev) { /* reset the AU service */ adf_exit_accel_unit_services(accel_dev); } static const char * get_obj_name(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services service) { switch (service) { case ADF_ACCEL_ASYM: return ADF_4XXX_ASYM_OBJ; case ADF_ACCEL_CRYPTO: return ADF_4XXX_SYM_OBJ; case ADF_ACCEL_COMPRESSION: return ADF_4XXX_DC_OBJ; case ADF_ACCEL_ADMIN: return ADF_4XXX_ADMIN_OBJ; default: return NULL; } } static uint32_t get_objs_num(struct adf_accel_dev *accel_dev) { return ADF_4XXX_MAX_OBJ; } static uint32_t get_obj_cfg_ae_mask(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services service) { u32 ae_mask = 0; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 num_au = hw_data->get_num_accel_units(hw_data); struct adf_accel_unit *accel_unit = accel_dev->au_info->au; u32 i = 0; if (service == ADF_ACCEL_SERVICE_NULL) return 0; for (i = 0; i < num_au; i++) { if (accel_unit[i].services == service) ae_mask |= accel_unit[i].ae_mask; } return ae_mask; } static enum adf_accel_unit_services adf_4xxx_get_service_type(struct adf_accel_dev *accel_dev, s32 obj_num) { struct adf_accel_unit *accel_unit; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u8 num_au = hw_data->get_num_accel_units(hw_data); int i; if (!hw_data->service_to_load_mask) return ADF_ACCEL_SERVICE_NULL; if (accel_dev->au_info && accel_dev->au_info->au) accel_unit = accel_dev->au_info->au; else return ADF_ACCEL_SERVICE_NULL; for (i = num_au - 2; i >= 0; i--) { if (hw_data->service_to_load_mask & accel_unit[i].services) { hw_data->service_to_load_mask &= ~accel_unit[i].services; return accel_unit[i].services; } } /* admin AE should be loaded last */ if (hw_data->service_to_load_mask & accel_unit[num_au - 1].services) { hw_data->service_to_load_mask &= ~accel_unit[num_au - 1].services; return accel_unit[num_au - 1].services; } return ADF_ACCEL_SERVICE_NULL; } static void get_ring_svc_map_data(int ring_pair_index, u16 ring_to_svc_map, u8 *serv_type, int *ring_index, int *num_rings_per_srv, int bundle_num) { *serv_type = GET_SRV_TYPE(ring_to_svc_map, bundle_num % ADF_CFG_NUM_SERVICES); *ring_index = 0; *num_rings_per_srv = ADF_4XXX_NUM_RINGS_PER_BANK / 2; } static int adf_get_dc_extcapabilities(struct adf_accel_dev *accel_dev, u32 *capabilities) { struct icp_qat_fw_init_admin_req req; struct icp_qat_fw_init_admin_resp resp; u8 i; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u8 num_au = hw_data->get_num_accel_units(hw_data); u32 first_dc_ae = 0; for (i = 0; i < num_au; i++) { if (accel_dev->au_info->au[i].services & ADF_ACCEL_COMPRESSION) { first_dc_ae = accel_dev->au_info->au[i].ae_mask; first_dc_ae &= ~(first_dc_ae - 1); } } memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); req.cmd_id = ICP_QAT_FW_COMP_CAPABILITY_GET; if (likely(first_dc_ae)) { if (adf_send_admin(accel_dev, &req, &resp, first_dc_ae) || resp.status) { *capabilities = 0; return EFAULT; } *capabilities = resp.extended_features; } return 0; } static int adf_get_fw_status(struct adf_accel_dev *accel_dev, u8 *major, u8 *minor, u8 *patch) { struct icp_qat_fw_init_admin_req req; struct icp_qat_fw_init_admin_resp resp; u32 ae_mask = 1; memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); req.cmd_id = ICP_QAT_FW_STATUS_GET; if (adf_send_admin(accel_dev, &req, &resp, ae_mask)) return EFAULT; *major = resp.version_major_num; *minor = resp.version_minor_num; *patch = resp.version_patch_num; return 0; } static int adf_4xxx_send_admin_init(struct adf_accel_dev *accel_dev) { int ret = 0; struct icp_qat_fw_init_admin_req req; struct icp_qat_fw_init_admin_resp resp; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 ae_mask = hw_data->ae_mask; u32 admin_ae_mask = hw_data->admin_ae_mask; u8 num_au = hw_data->get_num_accel_units(hw_data); u8 i; u32 dc_capabilities = 0; for (i = 0; i < num_au; i++) { if (accel_dev->au_info->au[i].services == ADF_ACCEL_SERVICE_NULL) ae_mask &= ~accel_dev->au_info->au[i].ae_mask; if (accel_dev->au_info->au[i].services != ADF_ACCEL_ADMIN) admin_ae_mask &= ~accel_dev->au_info->au[i].ae_mask; } if (!accel_dev->admin) { device_printf(GET_DEV(accel_dev), "adf_admin not available\n"); return EFAULT; } memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG; req.init_cfg_sz = ADF_CONST_TABLE_SIZE; req.init_cfg_ptr = accel_dev->admin->const_tbl_addr; if (adf_send_admin(accel_dev, &req, &resp, admin_ae_mask)) { device_printf(GET_DEV(accel_dev), "Error sending constants config message\n"); return EFAULT; } memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); req.cmd_id = ICP_QAT_FW_INIT_ME; if (adf_send_admin(accel_dev, &req, &resp, ae_mask)) { device_printf(GET_DEV(accel_dev), "Error sending init message\n"); return EFAULT; } memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); req.cmd_id = ICP_QAT_FW_HEARTBEAT_TIMER_SET; req.init_cfg_ptr = accel_dev->admin->phy_hb_addr; if (adf_get_hb_timer(accel_dev, &req.heartbeat_ticks)) return EINVAL; if (adf_send_admin(accel_dev, &req, &resp, ae_mask)) device_printf(GET_DEV(accel_dev), "Heartbeat is not supported\n"); ret = adf_get_dc_extcapabilities(accel_dev, &dc_capabilities); if (unlikely(ret)) { device_printf(GET_DEV(accel_dev), "Could not get FW ext. capabilities\n"); } accel_dev->hw_device->extended_dc_capabilities = dc_capabilities; adf_get_fw_status(accel_dev, &accel_dev->fw_versions.fw_version_major, &accel_dev->fw_versions.fw_version_minor, &accel_dev->fw_versions.fw_version_patch); device_printf(GET_DEV(accel_dev), "FW version: %d.%d.%d\n", accel_dev->fw_versions.fw_version_major, accel_dev->fw_versions.fw_version_minor, accel_dev->fw_versions.fw_version_patch); return ret; } static enum dev_sku_info get_sku(struct adf_hw_device_data *self) { return DEV_SKU_1; } static struct adf_accel_unit * get_au_by_ae(struct adf_accel_dev *accel_dev, int ae_num) { int i = 0; struct adf_accel_unit *accel_unit = accel_dev->au_info->au; if (!accel_unit) return NULL; for (i = 0; i < ADF_4XXX_MAX_ACCELUNITS; i++) if (accel_unit[i].ae_mask & BIT(ae_num)) return &accel_unit[i]; return NULL; } static bool check_accel_unit_service(enum adf_accel_unit_services au_srv, enum adf_cfg_service_type ring_srv) { - if ((au_srv & ADF_ACCEL_SERVICE_NULL) && ring_srv == NA) + if ((ADF_ACCEL_SERVICE_NULL == au_srv) && ring_srv == NA) return true; if ((au_srv & ADF_ACCEL_COMPRESSION) && ring_srv == COMP) return true; if ((au_srv & ADF_ACCEL_ASYM) && ring_srv == ASYM) return true; if ((au_srv & ADF_ACCEL_CRYPTO) && ring_srv == SYM) return true; return false; } static void adf_4xxx_cfg_gen_dispatch_arbiter(struct adf_accel_dev *accel_dev, u32 *thrd_to_arb_map_gen) { struct adf_accel_unit *au = NULL; int engine = 0; int thread = 0; int service; u16 ena_srv_mask; u16 service_type; u32 service_mask; unsigned long thd_srv_mask = default_active_thd_mask; ena_srv_mask = accel_dev->hw_device->ring_to_svc_map; /* If ring_to_svc_map is not changed, return default arbiter value */ if (ena_srv_mask == ADF_4XXX_DEFAULT_RING_TO_SRV_MAP) { memcpy(thrd_to_arb_map_gen, thrd_to_arb_map, sizeof(thrd_to_arb_map_gen[0]) * ADF_4XXX_MAX_ACCELENGINES); return; } for (engine = 0; engine < ADF_4XXX_MAX_ACCELENGINES - 1; engine++) { thrd_to_arb_map_gen[engine] = 0; service_mask = 0; au = get_au_by_ae(accel_dev, engine); if (!au) continue; for (service = 0; service < ADF_CFG_MAX_SERVICES; service++) { service_type = GET_SRV_TYPE(ena_srv_mask, service); if (check_accel_unit_service(au->services, service_type)) service_mask |= BIT(service); } if (au->services == ADF_ACCEL_COMPRESSION) thd_srv_mask = dc_me_active_thd_mask; else thd_srv_mask = default_active_thd_mask; for_each_set_bit(thread, &thd_srv_mask, 8) { thrd_to_arb_map_gen[engine] |= (service_mask << (ADF_CFG_MAX_SERVICES * thread)); } } } static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, u32 const **arb_map_config) { int i; struct adf_hw_device_data *hw_device = accel_dev->hw_device; for (i = 1; i < ADF_4XXX_MAX_ACCELENGINES; i++) { if (~hw_device->ae_mask & (1 << i)) thrd_to_arb_map[i] = 0; } adf_4xxx_cfg_gen_dispatch_arbiter(accel_dev, thrd_to_arb_map_gen); *arb_map_config = thrd_to_arb_map_gen; } static void get_arb_info(struct arb_info *arb_info) { arb_info->wrk_cfg_offset = ADF_4XXX_ARB_CONFIG; arb_info->arbiter_offset = ADF_4XXX_ARB_OFFSET; arb_info->wrk_thd_2_srv_arb_map = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET; } static void get_admin_info(struct admin_info *admin_csrs_info) { admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET; admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET; admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET; } static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) { struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR]; struct resource *csr = misc_bar->virt_addr; /* Enable all in errsou3 except VFLR notification on host */ ADF_CSR_WR(csr, ADF_4XXX_ERRMSK3, ADF_4XXX_VFLNOTIFY); } static void adf_enable_ints(struct adf_accel_dev *accel_dev) { struct resource *addr; addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; /* Enable bundle interrupts */ ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0); ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0); /* Enable misc interrupts */ ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0); } static int adf_init_device(struct adf_accel_dev *accel_dev) { struct resource *addr; u32 status; u32 csr; int ret; addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; /* Temporarily mask PM interrupt */ csr = ADF_CSR_RD(addr, ADF_4XXX_ERRMSK2); csr |= ADF_4XXX_PM_SOU; ADF_CSR_WR(addr, ADF_4XXX_ERRMSK2, csr); /* Set DRV_ACTIVE bit to power up the device */ ADF_CSR_WR(addr, ADF_4XXX_PM_INTERRUPT, ADF_4XXX_PM_DRV_ACTIVE); /* Poll status register to make sure the device is powered up */ status = 0; ret = read_poll_timeout(ADF_CSR_RD, status, status & ADF_4XXX_PM_INIT_STATE, ADF_4XXX_PM_POLL_DELAY_US, ADF_4XXX_PM_POLL_TIMEOUT_US, true, addr, ADF_4XXX_PM_STATUS); if (ret) device_printf(GET_DEV(accel_dev), "Failed to power up the device\n"); return ret; } void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data) { hw_data->dev_class = &adf_4xxx_class; hw_data->instance_id = adf_4xxx_class.instances++; hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS; hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK; hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS; hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES; hw_data->num_logical_accel = 1; hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET; hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK; hw_data->alloc_irq = adf_isr_resource_alloc; hw_data->free_irq = adf_isr_resource_free; hw_data->enable_error_correction = adf_enable_error_correction; hw_data->get_accel_mask = get_accel_mask; hw_data->get_ae_mask = get_ae_mask; hw_data->get_num_accels = get_num_accels; hw_data->get_num_aes = get_num_aes; hw_data->get_sram_bar_id = get_sram_bar_id; hw_data->get_etr_bar_id = get_etr_bar_id; hw_data->get_misc_bar_id = get_misc_bar_id; hw_data->get_arb_info = get_arb_info; hw_data->get_admin_info = get_admin_info; hw_data->get_accel_cap = adf_4xxx_get_hw_cap; hw_data->clock_frequency = ADF_4XXX_AE_FREQ; hw_data->get_sku = get_sku; hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE; hw_data->fw_name = ADF_4XXX_FW; hw_data->fw_mmp_name = ADF_4XXX_MMP; hw_data->init_admin_comms = adf_init_admin_comms; hw_data->exit_admin_comms = adf_exit_admin_comms; hw_data->send_admin_init = adf_4xxx_send_admin_init; hw_data->init_arb = adf_init_gen2_arb; hw_data->exit_arb = adf_exit_arb; hw_data->get_arb_mapping = adf_get_arbiter_mapping; hw_data->enable_ints = adf_enable_ints; hw_data->init_device = adf_init_device; hw_data->reset_device = adf_reset_flr; hw_data->restore_device = adf_dev_restore; hw_data->init_accel_units = adf_init_accel_units; hw_data->exit_accel_units = adf_exit_accel_units; hw_data->get_num_accel_units = get_num_accel_units; hw_data->configure_accel_units = adf_4xxx_configure_accel_units; hw_data->get_ring_to_svc_map = get_ring_to_svc_map; hw_data->get_ring_svc_map_data = get_ring_svc_map_data; hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK; hw_data->get_objs_num = get_objs_num; hw_data->get_obj_name = get_obj_name; hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask; hw_data->get_service_type = adf_4xxx_get_service_type; hw_data->set_msix_rttable = set_msix_default_rttable; hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; hw_data->disable_iov = adf_disable_sriov; hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; hw_data->config_device = adf_config_device; hw_data->set_asym_rings_mask = adf_cfg_set_asym_rings_mask; hw_data->get_hb_clock = get_hb_clock; hw_data->get_heartbeat_status = adf_get_heartbeat_status; hw_data->get_ae_clock = get_ae_clock; hw_data->measure_clock = measure_clock; hw_data->query_storage_cap = 1; adf_gen4_init_hw_csr_info(&hw_data->csr_info); } void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data) { hw_data->dev_class->instances--; }