diff --git a/share/man/man4/qat.4 b/share/man/man4/qat.4 index 347f257c7aec..b0fa0b0cbc92 100644 --- a/share/man/man4/qat.4 +++ b/share/man/man4/qat.4 @@ -1,185 +1,185 @@ .\" SPDX-License-Identifier: BSD-3-Clause .\" Copyright(c) 2007-2022 Intel Corporation .Dd May 4, 2023 .Dt QAT 4 .Os .Sh NAME .Nm qat .Nd Intel (R) QuickAssist Technology (QAT) driver .Sh SYNOPSIS To load the driver call: .Pp .Bl -item -compact .It kldload qat .El .Pp In order to load the driver on boot add these lines to .Xr loader.conf 5 selecting firmware(s) suitable for installed device(s) .Pp .Bl -item -compact .It qat_200xx_fw_load="YES" .It qat_c3xxx_fw_load="YES" .It qat_c4xxx_fw_load="YES" .It qat_c62x_fw_load="YES" .It qat_dh895xcc_fw_load="YES" .It qat_4xxx_fw_load="YES" .It qat_load="YES" .El .Sh DESCRIPTION The .Nm driver supports cryptography and compression acceleration of the Intel (R) QuickAssist Technology (QAT) devices. .Pp The .Nm driver is intended for platforms that contain: .Bl -bullet -compact .It Intel (R) C62x Chipset .It Intel (R) Atom C3000 processor product family .It Intel (R) QuickAssist Adapter 8960/Intel (R) QuickAssist Adapter 8970 (formerly known as "Lewis Hill") .It Intel (R) Communications Chipset 8925 to 8955 Series .It Intel (R) Atom P5300 processor product family .It Intel (R) QAT 4xxx Series .El .Pp The .Nm driver supports cryptography and compression acceleration. A complete API for offloading these operations is exposed in the kernel and may be used by any other entity directly. For details of usage and supported operations and algorithms refer to the -following documentation available from -.Lk 01.org : +following documentation available from Intel Download Center +.Lk https://downloadcenter.intel.com : .Bl -bullet -compact .It .Rs .%A Intel (R) .%T QuickAssist Technology API Programmer's Guide .Re .It .Rs .%A Intel (R) .%T QuickAssist Technology Cryptographic API Reference Manual .Re .It .Rs .%A Intel (R) .%T QuickAssist Technology Data Compression API Reference Manual .Re .It .Rs .%A Intel (R) .%T QuickAssist Technology Performance Optimization Guide .Re .El .Pp In addition to exposing complete kernel API for offloading cryptography and compression operations, the .Nm driver also integrates with .Xr crypto 4 , allowing offloading supported cryptography operations to Intel (R) QuickAssist Technology (QAT) devices. For details of usage and supported operations and algorithms refer to the documentation mentioned above and .Sx SEE ALSO section. .Sh SYSCTL_VARIABLES Following variables may be used to reconfigure the QAT device.\& For configuration persistence those variables may be set before loading the driver, either via .Xr kenv 1 or .Xr loader.conf(5). The device specific configuration options are prefixed with .Va dev.qat.X\&. where X is the device number. The specific device needs to be in "down" state before changing the configuration. .Bl -tag -width indent .It Va state Show current state of the device. Override the device state. Possible values: "down", "up". NOTE: If the symmetric services are used for device the qat_ocf driver needs to be disabled prior the device reconfiguration. Following variable may be used to enable/disable the QAT cryptographic framework connectivity .Va dev.qat_ocf.0.enable\&. Enabled by default. .It Va cfg_services Override the device services enabled: symmetric, asymmetric, data compression. Possible values: "sym", "asym", "dc", "sym;dc", "asym;dc", "sym;asym". Default services configured are "sym;asym" for even and "dc" for odd devices. .It Va cfg_mode Override the device mode configuration for kernel space and user space instances. Possible values: "ks", "us", "ks;us". Default value "ks;us". .It Va num_user_processes Override the number of uio user space processes that can connect to the QAT device. Default: 2 .El .Pp The following .Xr sysctl 8 variables are read-only: .Bl -tag -width indent .It Va frequency QAT device frequency value. .It Va mmp_version QAT MMP Library revision number. .It Va hw_version QAT hardware revision number. .It Va fw_version QAT firmware revision number. .It Va dev_cfg Summary of device specific configuration. .It Va heartbeat QAT device heartbeat status. Value '1' indicates that the device is operational. '0' value means that the device is not responsive. Device requires restart. .It Va heartbeat_failed Number of QAT heartbeat failures received. .It Va heartbeat_sent Number of QAT heartbeat requests sent. .El .Sh COMPATIBILITY The .Nm driver replaced previous implementation introduced in .Fx 13.0 . Current version, in addition to .Xr crypto 4 integration, supports also data compression and exposes a complete API for offloading data compression and cryptography operations. .Sh SEE ALSO .Xr crypto 4 , .Xr ipsec 4 , .Xr pci 4 , .Xr crypto 7 , .Xr crypto 9 .Sh HISTORY This .Nm driver was introduced in .Fx 14.0 . .Fx 13.0 included a different version of .Nm driver. .Sh AUTHORS The .Nm driver was written by .An Intel (R) Corporation . diff --git a/sys/dev/qat/qat_api/common/compression/dc_dp.c b/sys/dev/qat/qat_api/common/compression/dc_dp.c index 1bc50d89365d..8b409d9ad7ca 100644 --- a/sys/dev/qat/qat_api/common/compression/dc_dp.c +++ b/sys/dev/qat/qat_api/common/compression/dc_dp.c @@ -1,567 +1,853 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /** ***************************************************************************** * @file dc_dp.c * * @defgroup cpaDcDp Data Compression Data Plane API * * @ingroup cpaDcDp * * @description * Implementation of the Data Compression DP operations. * *****************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" #include "cpa_dc.h" #include "cpa_dc_dp.h" #include "icp_qat_fw_comp.h" /* ******************************************************************************* * Include private header files ******************************************************************************* */ #include "dc_session.h" #include "dc_datapath.h" #include "lac_common.h" #include "lac_mem.h" #include "lac_mem_pools.h" #include "sal_types_compression.h" #include "lac_sal.h" #include "lac_sync.h" #include "sal_service_state.h" #include "sal_qat_cmn_msg.h" #include "icp_sal_poll.h" #include "sal_hw_gen.h" /** ***************************************************************************** * @ingroup cpaDcDp * Check that pOpData is valid * * @description * Check that all the parameters defined in the pOpData are valid * * @param[in] pOpData Pointer to a structure containing the * request parameters * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in * *****************************************************************************/ static CpaStatus dcDataPlaneParamCheck(const CpaDcDpOpData *pOpData) { sal_compression_service_t *pService = NULL; dc_session_desc_t *pSessionDesc = NULL; LAC_CHECK_NULL_PARAM(pOpData); LAC_CHECK_NULL_PARAM(pOpData->dcInstance); LAC_CHECK_NULL_PARAM(pOpData->pSessionHandle); /* Ensure this is a compression instance */ SAL_CHECK_INSTANCE_TYPE(pOpData->dcInstance, SAL_SERVICE_TYPE_COMPRESSION); pService = (sal_compression_service_t *)(pOpData->dcInstance); pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pOpData->pSessionHandle); if (NULL == pSessionDesc) { QAT_UTILS_LOG("Session handle not as expected.\n"); return CPA_STATUS_INVALID_PARAM; } if (CPA_FALSE == pSessionDesc->isDcDp) { QAT_UTILS_LOG("The session type should be data plane.\n"); return CPA_STATUS_INVALID_PARAM; } /* Compressing zero byte is not supported */ if ((CPA_DC_DIR_COMPRESS == pSessionDesc->sessDirection) && (0 == pOpData->bufferLenToCompress)) { QAT_UTILS_LOG("The source buffer length to compress needs to " "be greater than zero byte.\n"); return CPA_STATUS_INVALID_PARAM; } if (pOpData->sessDirection > CPA_DC_DIR_DECOMPRESS) { QAT_UTILS_LOG("Invalid direction of operation.\n"); return CPA_STATUS_INVALID_PARAM; } if (0 == pOpData->srcBuffer) { QAT_UTILS_LOG("Invalid srcBuffer\n"); return CPA_STATUS_INVALID_PARAM; } if (0 == pOpData->destBuffer) { QAT_UTILS_LOG("Invalid destBuffer\n"); return CPA_STATUS_INVALID_PARAM; } if (pOpData->srcBuffer == pOpData->destBuffer) { QAT_UTILS_LOG("In place operation is not supported.\n"); return CPA_STATUS_INVALID_PARAM; } if (0 == pOpData->thisPhys) { QAT_UTILS_LOG("Invalid thisPhys\n"); return CPA_STATUS_INVALID_PARAM; } if ((CPA_TRUE != pOpData->compressAndVerify) && (CPA_FALSE != pOpData->compressAndVerify)) { QAT_UTILS_LOG("Invalid compressAndVerify\n"); return CPA_STATUS_INVALID_PARAM; } if ((CPA_TRUE == pOpData->compressAndVerify) && !(pService->generic_service_info.dcExtendedFeatures & DC_CNV_EXTENDED_CAPABILITY)) { QAT_UTILS_LOG("Invalid compressAndVerify, no CNV capability\n"); return CPA_STATUS_UNSUPPORTED; } if ((CPA_TRUE != pOpData->compressAndVerifyAndRecover) && (CPA_FALSE != pOpData->compressAndVerifyAndRecover)) { QAT_UTILS_LOG("Invalid compressAndVerifyAndRecover\n"); return CPA_STATUS_INVALID_PARAM; } if ((CPA_TRUE == pOpData->compressAndVerifyAndRecover) && (CPA_FALSE == pOpData->compressAndVerify)) { QAT_UTILS_LOG("CnVnR option set without setting CnV\n"); return CPA_STATUS_INVALID_PARAM; } if ((CPA_TRUE == pOpData->compressAndVerifyAndRecover) && !(pService->generic_service_info.dcExtendedFeatures & DC_CNVNR_EXTENDED_CAPABILITY)) { QAT_UTILS_LOG( "Invalid CnVnR option set and no CnVnR capability.\n"); return CPA_STATUS_UNSUPPORTED; } if ((CPA_DP_BUFLIST == pOpData->srcBufferLen) && (CPA_DP_BUFLIST != pOpData->destBufferLen)) { QAT_UTILS_LOG( "The source and destination buffers need to be of the same type (both flat buffers or buffer lists).\n"); return CPA_STATUS_INVALID_PARAM; } if ((CPA_DP_BUFLIST != pOpData->srcBufferLen) && (CPA_DP_BUFLIST == pOpData->destBufferLen)) { QAT_UTILS_LOG( "The source and destination buffers need to be of the same type (both flat buffers or buffer lists).\n"); return CPA_STATUS_INVALID_PARAM; } if (CPA_DP_BUFLIST != pOpData->srcBufferLen) { if (pOpData->srcBufferLen < pOpData->bufferLenToCompress) { QAT_UTILS_LOG( "srcBufferLen is smaller than bufferLenToCompress.\n"); return CPA_STATUS_INVALID_PARAM; } if (pOpData->destBufferLen < pOpData->bufferLenForData) { QAT_UTILS_LOG( "destBufferLen is smaller than bufferLenForData.\n"); return CPA_STATUS_INVALID_PARAM; } } else { /* We are assuming that there is enough memory in the source and * destination buffer lists. We only receive physical addresses * of the buffers so we are unable to test it here */ LAC_CHECK_8_BYTE_ALIGNMENT(pOpData->srcBuffer); LAC_CHECK_8_BYTE_ALIGNMENT(pOpData->destBuffer); } LAC_CHECK_8_BYTE_ALIGNMENT(pOpData->thisPhys); if ((CPA_DC_DIR_COMPRESS == pSessionDesc->sessDirection) || (CPA_DC_DIR_COMBINED == pSessionDesc->sessDirection)) { if (CPA_DC_HT_FULL_DYNAMIC == pSessionDesc->huffType) { /* Check if Intermediate Buffer Array pointer is NULL */ if (isDcGen2x(pService) && ((0 == pService->pInterBuffPtrsArrayPhyAddr) || (NULL == pService->pInterBuffPtrsArray))) { QAT_UTILS_LOG( "No intermediate buffer defined for this instance - see cpaDcStartInstance.\n"); return CPA_STATUS_INVALID_PARAM; } /* Ensure that the destination buffer length for data is * greater * or equal to 128B */ if (pOpData->bufferLenForData < DC_DEST_BUFFER_DYN_MIN_SIZE) { QAT_UTILS_LOG( "Destination buffer length for data should be greater or equal to 128B.\n"); return CPA_STATUS_INVALID_PARAM; } } else { /* Ensure that the destination buffer length for data is * greater * or equal to min output buffsize */ if (pOpData->bufferLenForData < pService->comp_device_data.minOutputBuffSize) { QAT_UTILS_LOG( "Destination buffer size should be greater or equal to %d bytes.\n", pService->comp_device_data .minOutputBuffSize); return CPA_STATUS_INVALID_PARAM; } } } return CPA_STATUS_SUCCESS; } +/** + ***************************************************************************** + * @ingroup cpaDcDp + * Partial-read parameters validation utility. + * + * @description + * Basic check that all partial-read related parameters provided by + * caller are valid. + * + * @param[in] pOpData Pointer to a structure containing the + * request parameters + * @param[in] pPartReadData Pointer to a structure containing the + * partial-read request parameters. + * + * @retval CPA_STATUS_SUCCESS Function executed successfully + * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in + * + *****************************************************************************/ +static CpaStatus +dcDataPlanePartReadCheck(CpaDcDpOpData *pOpData, + CpaDcDpPartialReadData *pPartReadData) +{ + sal_compression_service_t *pService = NULL; + + LAC_CHECK_NULL_PARAM(pPartReadData); + + pService = (sal_compression_service_t *)(pOpData->dcInstance); + + if (!isDcGen4x(pService)) { + /* Extended features are not supported prior Gen4 */ + return CPA_STATUS_UNSUPPORTED; + } + + if (pOpData->sessDirection == CPA_DC_DIR_COMPRESS) { + /* Decompression specific feature */ + return CPA_STATUS_INVALID_PARAM; + } + + if (pPartReadData->length > pOpData->bufferLenForData) { + QAT_UTILS_LOG( + "Partial read data length can not be greater than the destination buffer size\n"); + return CPA_STATUS_INVALID_PARAM; + } + + return CPA_STATUS_SUCCESS; +} + +/** + ***************************************************************************** + * @ingroup cpaDcDp + * Zero-padding parameters validation utility. + * + * @description + * Basic check that all zero-padding related parameters provided by + * caller are valid. + * + * @param[in] pOpData Pointer to a structure containing the + * request parameters. + * + * @retval CPA_STATUS_SUCCESS Function executed successfully + * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in + * @retval CPA_STATUS_NOT_SUPPORTED Feature not supported + * + *****************************************************************************/ +static CpaStatus +dcDataPlaneZeroPadCheck(CpaDcDpOpData *pOpData) +{ + sal_compression_service_t *pService = NULL; + + pService = (sal_compression_service_t *)(pOpData->dcInstance); + + if (!isDcGen4x(pService)) { + /* Extended features are not supported prior Gen4 */ + return CPA_STATUS_UNSUPPORTED; + } + + if (pOpData->sessDirection == CPA_DC_DIR_DECOMPRESS) { + /* Compression specific feature */ + return CPA_STATUS_INVALID_PARAM; + } + + return CPA_STATUS_SUCCESS; +} CpaStatus cpaDcDpGetSessionSize(CpaInstanceHandle dcInstance, CpaDcSessionSetupData *pSessionData, Cpa32U *pSessionSize) { return dcGetSessionSize(dcInstance, pSessionData, pSessionSize, NULL); } CpaStatus cpaDcDpInitSession(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaDcSessionSetupData *pSessionData) { CpaStatus status = CPA_STATUS_SUCCESS; dc_session_desc_t *pSessionDesc = NULL; sal_compression_service_t *pService = NULL; LAC_CHECK_INSTANCE_HANDLE(dcInstance); SAL_CHECK_INSTANCE_TYPE(dcInstance, SAL_SERVICE_TYPE_COMPRESSION); pService = (sal_compression_service_t *)dcInstance; /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(pService); /* Stateful is not supported */ if (CPA_DC_STATELESS != pSessionData->sessState) { QAT_UTILS_LOG("Invalid sessState value\n"); return CPA_STATUS_INVALID_PARAM; } status = dcInitSession(dcInstance, pSessionHandle, pSessionData, NULL, NULL); if (CPA_STATUS_SUCCESS == status) { pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); pSessionDesc->isDcDp = CPA_TRUE; ICP_QAT_FW_COMN_PTR_TYPE_SET( pSessionDesc->reqCacheDecomp.comn_hdr.comn_req_flags, DC_DP_QAT_PTR_TYPE); ICP_QAT_FW_COMN_PTR_TYPE_SET( pSessionDesc->reqCacheComp.comn_hdr.comn_req_flags, DC_DP_QAT_PTR_TYPE); } return status; } CpaStatus cpaDcDpRemoveSession(const CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle) { return cpaDcRemoveSession(dcInstance, pSessionHandle); } CpaStatus cpaDcDpUpdateSession(const CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaDcSessionUpdateData *pUpdateSessionData) { return CPA_STATUS_UNSUPPORTED; } CpaStatus cpaDcDpRegCbFunc(const CpaInstanceHandle dcInstance, const CpaDcDpCallbackFn pNewCb) { sal_compression_service_t *pService = NULL; LAC_CHECK_NULL_PARAM(dcInstance); SAL_CHECK_INSTANCE_TYPE(dcInstance, SAL_SERVICE_TYPE_COMPRESSION); LAC_CHECK_NULL_PARAM(pNewCb); /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(dcInstance); pService = (sal_compression_service_t *)dcInstance; pService->pDcDpCb = pNewCb; return CPA_STATUS_SUCCESS; } /** ***************************************************************************** * @ingroup cpaDcDp * * @description * Writes the message to the ring * * @param[in] pOpData Pointer to a structure containing the * request parameters * @param[in] pCurrentQatMsg Pointer to current QAT message on the ring * *****************************************************************************/ static void dcDpWriteRingMsg(CpaDcDpOpData *pOpData, icp_qat_fw_comp_req_t *pCurrentQatMsg) { icp_qat_fw_comp_req_t *pReqCache = NULL; dc_session_desc_t *pSessionDesc = NULL; Cpa8U bufferFormat; Cpa8U cnvDecompReq = ICP_QAT_FW_COMP_NO_CNV; Cpa8U cnvnrCompReq = ICP_QAT_FW_COMP_NO_CNV_RECOVERY; CpaBoolean cnvErrorInjection = ICP_QAT_FW_COMP_NO_CNV_DFX; sal_compression_service_t *pService = NULL; pService = (sal_compression_service_t *)(pOpData->dcInstance); pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pOpData->pSessionHandle); if (CPA_DC_DIR_COMPRESS == pOpData->sessDirection) { pReqCache = &(pSessionDesc->reqCacheComp); /* CNV check */ if (CPA_TRUE == pOpData->compressAndVerify) { cnvDecompReq = ICP_QAT_FW_COMP_CNV; if (isDcGen4x(pService)) { cnvErrorInjection = pSessionDesc->cnvErrorInjection; } /* CNVNR check */ if (CPA_TRUE == pOpData->compressAndVerifyAndRecover) { cnvnrCompReq = ICP_QAT_FW_COMP_CNV_RECOVERY; } } } else { pReqCache = &(pSessionDesc->reqCacheDecomp); } /* Fills in the template DC ET ring message - cached from the * session descriptor */ memcpy((void *)pCurrentQatMsg, (void *)(pReqCache), (LAC_QAT_DC_REQ_SZ_LW * LAC_LONG_WORD_IN_BYTES)); if (CPA_DP_BUFLIST == pOpData->srcBufferLen) { bufferFormat = QAT_COMN_PTR_TYPE_SGL; } else { bufferFormat = QAT_COMN_PTR_TYPE_FLAT; } pCurrentQatMsg->comp_pars.req_par_flags |= ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD( ICP_QAT_FW_COMP_NOT_SOP, ICP_QAT_FW_COMP_NOT_EOP, ICP_QAT_FW_COMP_NOT_BFINAL, cnvDecompReq, cnvnrCompReq, cnvErrorInjection, ICP_QAT_FW_COMP_CRC_MODE_LEGACY); SalQatMsg_CmnMidWrite((icp_qat_fw_la_bulk_req_t *)pCurrentQatMsg, pOpData, bufferFormat, pOpData->srcBuffer, pOpData->destBuffer, pOpData->srcBufferLen, pOpData->destBufferLen); pCurrentQatMsg->comp_pars.comp_len = pOpData->bufferLenToCompress; pCurrentQatMsg->comp_pars.out_buffer_sz = pOpData->bufferLenForData; } -CpaStatus -cpaDcDpEnqueueOp(CpaDcDpOpData *pOpData, const CpaBoolean performOpNow) +/** + ***************************************************************************** + * @ingroup cpaDcDp + * + * @description + * Updates the request decryptor with optional parameters: + * - partial read specific fields + * - zero-padding specific field + * + * @param[in] pOpData Pointer to a structure containing the + * request parameters. + * @param[in] pPartReadData Pointer to a structure containing the + * partial-read request parameters. + * @param[in] zeroPadFlag Boolean indicator containing the + * zero-padding enablement flag. + * @param[in] pCurrentQatMsg Pointer to current QAT message on the ring. + * + *****************************************************************************/ +static void +dcDpUpdateRingMsg(CpaDcDpOpData *pOpData, + CpaDcDpPartialReadData *pPartReadData, + CpaBoolean zeroPadFlag, + icp_qat_fw_comp_req_t *pCurrentQatMsg) +{ + sal_compression_service_t *pService = NULL; + + pService = (sal_compression_service_t *)(pOpData->dcInstance); + if (!isDcGen4x(pService)) { + return; + } + + /* Partial read settings */ + if (NULL != pPartReadData) { + pCurrentQatMsg->u1.partial_decompress + .partial_decompress_offset = pPartReadData->dataOffset; + pCurrentQatMsg->u1.partial_decompress + .partial_decompress_length = pPartReadData->length; + ICP_QAT_FW_COMP_PART_DECOMP_SET( + pCurrentQatMsg->comp_pars.req_par_flags, + ICP_QAT_FW_COMP_PART_DECOMP); + } + /* Zero padding settings */ + if (CPA_TRUE == zeroPadFlag) { + ICP_QAT_FW_COMP_ZEROPAD_SET( + pCurrentQatMsg->comp_pars.req_par_flags, + ICP_QAT_FW_COMP_ZEROPAD); + } +} + +static CpaStatus +dcDpEnqueueOpBase(CpaDcDpOpData *pOpData, + CpaDcDpPartialReadData *pPartReadData, + CpaBoolean zeroPadFlag, + const CpaBoolean performOpNow) { icp_qat_fw_comp_req_t *pCurrentQatMsg = NULL; icp_comms_trans_handle trans_handle = NULL; dc_session_desc_t *pSessionDesc = NULL; CpaStatus status = CPA_STATUS_SUCCESS; status = dcDataPlaneParamCheck(pOpData); if (CPA_STATUS_SUCCESS != status) { return status; } + if (NULL != pPartReadData) { + status = dcDataPlanePartReadCheck(pOpData, pPartReadData); + if (CPA_STATUS_SUCCESS != status) { + return status; + } + } + + if (CPA_TRUE == zeroPadFlag) { + status = dcDataPlaneZeroPadCheck(pOpData); + if (CPA_STATUS_SUCCESS != status) { + return status; + } + } + if ((CPA_FALSE == pOpData->compressAndVerify) && (CPA_DC_DIR_COMPRESS == pOpData->sessDirection)) { return CPA_STATUS_UNSUPPORTED; } /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(pOpData->dcInstance); trans_handle = ((sal_compression_service_t *)pOpData->dcInstance) ->trans_handle_compression_tx; pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pOpData->pSessionHandle); if ((CPA_DC_DIR_COMPRESS == pOpData->sessDirection) && (CPA_DC_DIR_DECOMPRESS == pSessionDesc->sessDirection)) { QAT_UTILS_LOG( "The session does not support this direction of operation.\n"); return CPA_STATUS_INVALID_PARAM; } else if ((CPA_DC_DIR_DECOMPRESS == pOpData->sessDirection) && (CPA_DC_DIR_COMPRESS == pSessionDesc->sessDirection)) { QAT_UTILS_LOG( "The session does not support this direction of operation.\n"); return CPA_STATUS_INVALID_PARAM; } icp_adf_getSingleQueueAddr(trans_handle, (void **)&pCurrentQatMsg); if (NULL == pCurrentQatMsg) { return CPA_STATUS_RETRY; } dcDpWriteRingMsg(pOpData, pCurrentQatMsg); + if (NULL != pPartReadData || CPA_TRUE == zeroPadFlag) { + dcDpUpdateRingMsg(pOpData, + pPartReadData, + zeroPadFlag, + pCurrentQatMsg); + } + pSessionDesc->pendingDpStatelessCbCount++; if (CPA_TRUE == performOpNow) { SalQatMsg_updateQueueTail(trans_handle); } return CPA_STATUS_SUCCESS; } CpaStatus -cpaDcDpEnqueueOpBatch(const Cpa32U numberRequests, - CpaDcDpOpData *pOpData[], - const CpaBoolean performOpNow) +cpaDcDpEnqueueOp(CpaDcDpOpData *pOpData, const CpaBoolean performOpNow) +{ + + return dcDpEnqueueOpBase(pOpData, NULL, CPA_FALSE, performOpNow); +} + +CpaStatus +cpaDcDpEnqueueOpWithPartRead(CpaDcDpOpData *pOpData, + CpaDcDpPartialReadData *pPartReadData, + const CpaBoolean performOpNow) +{ + return dcDpEnqueueOpBase(pOpData, + pPartReadData, + CPA_FALSE, + performOpNow); +} + +CpaStatus +cpaDcDpEnqueueOpWithZeroPad(CpaDcDpOpData *pOpData, + const CpaBoolean performOpNow) +{ + return dcDpEnqueueOpBase(pOpData, NULL, CPA_TRUE, performOpNow); +} + +static CpaStatus +dcDpEnqueueOpBatchBase(const Cpa32U numberRequests, + CpaDcDpOpData *pOpData[], + CpaDcDpPartialReadData *pPartData[], + CpaBoolean zeroPadFlag, + const CpaBoolean performOpNow) { icp_qat_fw_comp_req_t *pCurrentQatMsg = NULL; icp_comms_trans_handle trans_handle = NULL; dc_session_desc_t *pSessionDesc = NULL; Cpa32U i = 0; CpaStatus status = CPA_STATUS_SUCCESS; sal_compression_service_t *pService = NULL; LAC_CHECK_NULL_PARAM(pOpData); LAC_CHECK_NULL_PARAM(pOpData[0]); LAC_CHECK_NULL_PARAM(pOpData[0]->dcInstance); pService = (sal_compression_service_t *)(pOpData[0]->dcInstance); if ((numberRequests == 0) || (numberRequests > pService->maxNumCompConcurrentReq)) { QAT_UTILS_LOG( "The number of requests needs to be between 1 and %d.\n", pService->maxNumCompConcurrentReq); return CPA_STATUS_INVALID_PARAM; } for (i = 0; i < numberRequests; i++) { status = dcDataPlaneParamCheck(pOpData[i]); if (CPA_STATUS_SUCCESS != status) { return status; } + if (NULL != pPartData) { + status = + dcDataPlanePartReadCheck(pOpData[i], pPartData[i]); + if (CPA_STATUS_SUCCESS != status) { + return status; + } + } + + if (CPA_TRUE == zeroPadFlag) { + status = dcDataPlaneZeroPadCheck(pOpData[i]); + if (CPA_STATUS_SUCCESS != status) { + return status; + } + } + /* Check that all instance handles and session handles are the * same */ if (pOpData[i]->dcInstance != pOpData[0]->dcInstance) { QAT_UTILS_LOG( "All instance handles should be the same in the pOpData.\n"); return CPA_STATUS_INVALID_PARAM; } if (pOpData[i]->pSessionHandle != pOpData[0]->pSessionHandle) { QAT_UTILS_LOG( "All session handles should be the same in the pOpData.\n"); return CPA_STATUS_INVALID_PARAM; } } for (i = 0; i < numberRequests; i++) { if ((CPA_FALSE == pOpData[i]->compressAndVerify) && (CPA_DC_DIR_COMPRESS == pOpData[i]->sessDirection)) { return CPA_STATUS_UNSUPPORTED; } } /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(pOpData[0]->dcInstance); trans_handle = ((sal_compression_service_t *)pOpData[0]->dcInstance) ->trans_handle_compression_tx; pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pOpData[0]->pSessionHandle); for (i = 0; i < numberRequests; i++) { if ((CPA_DC_DIR_COMPRESS == pOpData[i]->sessDirection) && (CPA_DC_DIR_DECOMPRESS == pSessionDesc->sessDirection)) { QAT_UTILS_LOG( "The session does not support this direction of operation.\n"); return CPA_STATUS_INVALID_PARAM; } else if ((CPA_DC_DIR_DECOMPRESS == pOpData[i]->sessDirection) && (CPA_DC_DIR_COMPRESS == pSessionDesc->sessDirection)) { QAT_UTILS_LOG( "The session does not support this direction of operation.\n"); return CPA_STATUS_INVALID_PARAM; } } icp_adf_getQueueMemory(trans_handle, numberRequests, (void **)&pCurrentQatMsg); if (NULL == pCurrentQatMsg) { return CPA_STATUS_RETRY; } for (i = 0; i < numberRequests; i++) { dcDpWriteRingMsg(pOpData[i], pCurrentQatMsg); + if (pPartData) { + dcDpUpdateRingMsg(pOpData[i], + pPartData[i], + CPA_FALSE, + pCurrentQatMsg); + } + if (CPA_TRUE == zeroPadFlag) { + dcDpUpdateRingMsg(pOpData[i], + NULL, + CPA_TRUE, + pCurrentQatMsg); + } icp_adf_getQueueNext(trans_handle, (void **)&pCurrentQatMsg); } pSessionDesc->pendingDpStatelessCbCount += numberRequests; if (CPA_TRUE == performOpNow) { SalQatMsg_updateQueueTail(trans_handle); } return CPA_STATUS_SUCCESS; } +CpaStatus +cpaDcDpEnqueueOpBatch(const Cpa32U numberRequests, + CpaDcDpOpData *pOpData[], + const CpaBoolean performOpNow) +{ + return dcDpEnqueueOpBatchBase( + numberRequests, pOpData, NULL, CPA_FALSE, performOpNow); +} + +CpaStatus +cpaDcDpEnqueueOpWithPartReadBatch(const Cpa32U numberRequests, + CpaDcDpOpData *pOpData[], + CpaDcDpPartialReadData *pPartReadData[], + const CpaBoolean performOpNow) +{ + return dcDpEnqueueOpBatchBase( + numberRequests, pOpData, pPartReadData, CPA_FALSE, performOpNow); +} + +CpaStatus +cpaDcDpEnqueueOpWithZeroPadBatch(const Cpa32U numberRequests, + CpaDcDpOpData *pOpData[], + const CpaBoolean performOpNow) +{ + return dcDpEnqueueOpBatchBase( + numberRequests, pOpData, NULL, CPA_TRUE, performOpNow); +} + CpaStatus icp_sal_DcPollDpInstance(CpaInstanceHandle dcInstance, Cpa32U responseQuota) { icp_comms_trans_handle trans_handle = NULL; LAC_CHECK_INSTANCE_HANDLE(dcInstance); SAL_CHECK_INSTANCE_TYPE(dcInstance, SAL_SERVICE_TYPE_COMPRESSION); /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(dcInstance); trans_handle = ((sal_compression_service_t *)dcInstance) ->trans_handle_compression_rx; return icp_adf_pollQueue(trans_handle, responseQuota); } CpaStatus cpaDcDpPerformOpNow(CpaInstanceHandle dcInstance) { icp_comms_trans_handle trans_handle = NULL; LAC_CHECK_NULL_PARAM(dcInstance); SAL_CHECK_INSTANCE_TYPE(dcInstance, SAL_SERVICE_TYPE_COMPRESSION); /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(dcInstance); trans_handle = ((sal_compression_service_t *)dcInstance) ->trans_handle_compression_tx; if (CPA_TRUE == icp_adf_queueDataToSend(trans_handle)) { SalQatMsg_updateQueueTail(trans_handle); } return CPA_STATUS_SUCCESS; } + +CpaStatus +cpaDcDpIsPartReadSupported(const CpaInstanceHandle instanceHandle, + CpaBoolean *flag) +{ + sal_compression_service_t *pService = NULL; + dc_extd_ftrs_t *pExtendedFtrs = NULL; + + LAC_CHECK_NULL_PARAM(instanceHandle); + SAL_CHECK_INSTANCE_TYPE(instanceHandle, SAL_SERVICE_TYPE_COMPRESSION); + + pService = (sal_compression_service_t *)instanceHandle; + if (!isDcGen4x(pService)) { + *flag = CPA_FALSE; + return CPA_STATUS_SUCCESS; + } + + pExtendedFtrs = (dc_extd_ftrs_t *)&( + ((sal_service_t *)instanceHandle)->dcExtendedFeatures); + + *flag = (CpaBoolean)pExtendedFtrs->is_part_read; + + return CPA_STATUS_SUCCESS; +} + +CpaStatus +cpaDcDpIsZeroPadSupported(const CpaInstanceHandle instanceHandle, + CpaBoolean *flag) +{ + sal_compression_service_t *pService = NULL; + dc_extd_ftrs_t *pExtendedFtrs = NULL; + + LAC_CHECK_NULL_PARAM(instanceHandle); + SAL_CHECK_INSTANCE_TYPE(instanceHandle, SAL_SERVICE_TYPE_COMPRESSION); + + pService = (sal_compression_service_t *)instanceHandle; + if (!isDcGen4x(pService)) { + *flag = CPA_FALSE; + return CPA_STATUS_SUCCESS; + } + + pExtendedFtrs = (dc_extd_ftrs_t *)&( + ((sal_service_t *)instanceHandle)->dcExtendedFeatures); + + *flag = (CpaBoolean)pExtendedFtrs->is_zero_pad; + + return CPA_STATUS_SUCCESS; +} diff --git a/sys/dev/qat/qat_api/common/compression/include/dc_datapath.h b/sys/dev/qat/qat_api/common/compression/include/dc_datapath.h index 5bcff65c4fb3..58fb56f3c8ae 100644 --- a/sys/dev/qat/qat_api/common/compression/include/dc_datapath.h +++ b/sys/dev/qat/qat_api/common/compression/include/dc_datapath.h @@ -1,199 +1,210 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /** ***************************************************************************** * @file dc_datapath.h * * @ingroup Dc_DataCompression * * @description * Definition of the Data Compression datapath parameters. * ******************* * **********************************************************/ #ifndef DC_DATAPATH_H_ #define DC_DATAPATH_H_ #define LAC_QAT_DC_REQ_SZ_LW 32 #define LAC_QAT_DC_RESP_SZ_LW 8 /* Restriction on the source buffer size for compression due to the firmware * processing */ #define DC_SRC_BUFFER_MIN_SIZE (15) /* Restriction on the destination buffer size for compression due to * the management of skid buffers in the firmware */ #define DC_DEST_BUFFER_DYN_MIN_SIZE (128) #define DC_DEST_BUFFER_STA_MIN_SIZE (64) #define DC_DEST_BUFFER_DYN_MIN_SIZE_GEN4 (512) #define DC_DEST_BUFFER_STA_MIN_SIZE_GEN4 (1024) /* C62x and C3xxx pcie rev0 devices require an additional 32bytes */ #define DC_DEST_BUFFER_STA_ADDITIONAL_SIZE (32) /* C4xxx device only requires 47 bytes */ #define DC_DEST_BUFFER_MIN_SIZE (47) /* Minimum destination buffer size for decompression */ #define DC_DEST_BUFFER_DEC_MIN_SIZE (1) /* Restriction on the source and destination buffer sizes for compression due * to the firmware taking 32 bits parameters. The max size is 2^32-1 */ #define DC_BUFFER_MAX_SIZE (0xFFFFFFFF) /* DC Source & Destination buffer type (FLAT/SGL) */ #define DC_DEFAULT_QAT_PTR_TYPE QAT_COMN_PTR_TYPE_SGL #define DC_DP_QAT_PTR_TYPE QAT_COMN_PTR_TYPE_FLAT /* Offset to first byte of Input Byte Counter (IBC) in state register */ #define DC_STATE_IBC_OFFSET (8) /* Size in bytes of input byte counter (IBC) in state register */ #define DC_IBC_SIZE_IN_BYTES (4) /* Offset to first byte to CRC32 in state register */ #define DC_STATE_CRC32_OFFSET (40) /* Offset to first byte to output CRC32 in state register */ #define DC_STATE_OUTPUT_CRC32_OFFSET (48) /* Offset to first byte to input CRC32 in state register */ #define DC_STATE_INPUT_CRC32_OFFSET (52) /* Offset to first byte of ADLER32 in state register */ #define DC_STATE_ADLER32_OFFSET (48) /* 8 bit mask value */ #define DC_8_BIT_MASK (0xff) /* 8 bit shift position */ #define DC_8_BIT_SHIFT_POS (8) /* Size in bytes of checksum */ #define DC_CHECKSUM_SIZE_IN_BYTES (4) /* Mask used to set the most significant bit to zero */ #define DC_STATE_REGISTER_ZERO_MSB_MASK (0x7F) /* Mask used to keep only the most significant bit and set the others to zero */ #define DC_STATE_REGISTER_KEEP_MSB_MASK (0x80) /* Compression state register word containing the parity bit */ #define DC_STATE_REGISTER_PARITY_BIT_WORD (5) /* Location of the parity bit within the compression state register word */ #define DC_STATE_REGISTER_PARITY_BIT (7) /* size which needs to be reserved before the results field to * align the results field with the API struct */ #define DC_API_ALIGNMENT_OFFSET (offsetof(CpaDcDpOpData, results)) /* Mask used to check the CompressAndVerify capability bit */ #define DC_CNV_EXTENDED_CAPABILITY (0x01) /* Mask used to check the CompressAndVerifyAndRecover capability bit */ #define DC_CNVNR_EXTENDED_CAPABILITY (0x100) /* Default values for CNV integrity checks, * those are used to inform hardware of specifying CRC parameters to be used * when calculating CRCs */ #define DC_CRC_POLY_DEFAULT 0x04c11db7 #define DC_CRC64_POLY_DEFAULT 0x42f0e1eba9ea3693ULL #define DC_XOR_FLAGS_DEFAULT 0xe0000 #define DC_XOR_OUT_DEFAULT 0xffffffff #define DC_XOR64_OUT_DEFAULT 0x0ULL #define DC_INVALID_CRC 0x0 /** ******************************************************************************* * @ingroup cpaDc Data Compression * Compression cookie * @description * This cookie stores information for a particular compression perform op. * This includes various user-supplied parameters for the operation which * will be needed in our callback function. * A pointer to this cookie is stored in the opaque data field of the QAT * message so that it can be accessed in the asynchronous callback. * @note * The order of the parameters within this structure is important. It needs * to match the order of the parameters in CpaDcDpOpData up to the * pSessionHandle. This allows the correct processing of the callback. *****************************************************************************/ typedef struct dc_compression_cookie_s { Cpa8U dcReqParamsBuffer[DC_API_ALIGNMENT_OFFSET]; /**< Memory block - was previously reserved for request parameters. * Now size maintained so following members align with API struct, * but no longer used for request parameters */ CpaDcRqResults reserved; /**< This is reserved for results to correctly align the structure * to match the one from the data plane API */ CpaInstanceHandle dcInstance; /**< Compression instance handle */ CpaDcSessionHandle pSessionHandle; /**< Pointer to the session handle */ icp_qat_fw_comp_req_t request; /**< Compression request */ void *callbackTag; /**< Opaque data supplied by the client */ dc_session_desc_t *pSessionDesc; /**< Pointer to the session descriptor */ CpaDcFlush flushFlag; /**< Flush flag */ CpaDcOpData *pDcOpData; /**< struct containing flags and CRC related data for this session */ CpaDcRqResults *pResults; /**< Pointer to result buffer holding consumed and produced data */ Cpa32U srcTotalDataLenInBytes; /**< Total length of the source data */ Cpa32U dstTotalDataLenInBytes; /**< Total length of the destination data */ dc_request_dir_t compDecomp; /**< Used to know whether the request is compression or decompression. * Useful when defining the session as combined */ CpaBufferList *pUserSrcBuff; /**< virtual userspace ptr to source SGL */ CpaBufferList *pUserDestBuff; /**< virtual userspace ptr to destination SGL */ CpaDcCallbackFn pCbFunc; /**< Callback function defined for the traditional sessionless API */ CpaDcChecksum checksumType; /**< Type of checksum */ dc_integrity_crc_fw_t dataIntegrityCrcs; /**< Data integrity table */ } dc_compression_cookie_t; /** ***************************************************************************** * @ingroup Dc_DataCompression * Callback function called for compression and decompression requests in * asynchronous mode * * @description * Called to process compression and decompression response messages. This * callback will check for errors, update the statistics and will call the * user callback * * @param[in] pRespMsg Response message * *****************************************************************************/ void dcCompression_ProcessCallback(void *pRespMsg); CpaStatus dcCheckOpData(sal_compression_service_t *pService, CpaDcOpData *pOpData); /** ***************************************************************************** * @ingroup Dc_DataCompression * Describes CNV and CNVNR modes * * @description * This enum is used to indicate the CNV modes. * *****************************************************************************/ typedef enum dc_cnv_mode_s { DC_NO_CNV = 0, /* CNV = FALSE, CNVNR = FALSE */ DC_CNV, /* CNV = TRUE, CNVNR = FALSE */ DC_CNVNR, /* CNV = TRUE, CNVNR = TRUE */ } dc_cnv_mode_t; +/* Type to access extended features bit fields */ +typedef struct dc_extended_features_s { + unsigned is_cnv : 1; /* Bit<0> */ + unsigned padding : 7; + unsigned is_cnvnr : 1; /* Bit<8> */ + unsigned reserved : 2; + unsigned is_part_read : 1; /* Bit<11> */ + unsigned is_zero_pad : 1; /* Bit<12> */ + unsigned not_used : 19; +} dc_extd_ftrs_t; + #endif /* DC_DATAPATH_H_ */ diff --git a/sys/dev/qat/qat_api/common/ctrl/sal_compression.c b/sys/dev/qat/qat_api/common/ctrl/sal_compression.c index f0e8d28949ff..c0f5a411d87e 100644 --- a/sys/dev/qat/qat_api/common/ctrl/sal_compression.c +++ b/sys/dev/qat/qat_api/common/ctrl/sal_compression.c @@ -1,1675 +1,1667 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /** ***************************************************************************** * @file sal_compression.c * * @ingroup SalCtrl * * @description * This file contains the sal implementation for compression. * *****************************************************************************/ /* QAT-API includes */ #include "cpa.h" #include "cpa_dc.h" /* QAT utils includes */ #include "qat_utils.h" /* ADF includes */ #include "icp_adf_init.h" #include "icp_adf_transport.h" #include "icp_accel_devices.h" #include "icp_adf_cfg.h" #include "icp_adf_accel_mgr.h" #include "icp_adf_poll.h" #include "icp_adf_debug.h" #include "icp_adf_esram.h" #include "icp_qat_hw.h" /* SAL includes */ #include "lac_mem.h" #include "lac_common.h" #include "lac_mem_pools.h" #include "sal_statistics.h" #include "lac_list.h" #include "icp_sal_poll.h" #include "sal_types_compression.h" #include "dc_session.h" #include "dc_datapath.h" #include "dc_stats.h" #include "lac_sal.h" #include "lac_sal_ctrl.h" #include "sal_string_parse.h" #include "sal_service_state.h" #include "lac_buffer_desc.h" #include "icp_qat_fw_comp.h" #include "icp_qat_hw_20_comp_defs.h" #include "icp_sal_versions.h" /* C string null terminator size */ #define SAL_NULL_TERM_SIZE 1 -/* Type to access extended features bit fields */ -typedef struct dc_extended_features_s { - unsigned is_cnv : 1; /* Bit<0> */ - unsigned padding : 7; - unsigned is_cnvnr : 1; /* Bit<8> */ - unsigned not_used : 23; -} dc_extd_ftrs_t; - /* * Prints statistics for a compression instance */ static int SalCtrl_CompresionDebug(void *private_data, char *data, int size, int offset) { sal_compression_service_t *pCompressionService = (sal_compression_service_t *)private_data; CpaStatus status = CPA_STATUS_SUCCESS; CpaDcStats dcStats = { 0 }; Cpa32S len = 0; status = cpaDcGetStats(pCompressionService, &dcStats); if (status != CPA_STATUS_SUCCESS) { QAT_UTILS_LOG("cpaDcGetStats returned error.\n"); return (-1); } /* Engine Info */ if (NULL != pCompressionService->debug_file) { len += snprintf(data + len, size - len, SEPARATOR BORDER " Statistics for Instance %24s | \n" SEPARATOR, pCompressionService->debug_file->name); } /* Perform Info */ len += snprintf(data + len, size - len, BORDER " DC comp Requests: %16llu " BORDER "\n" BORDER " DC comp Request Errors: %16llu " BORDER "\n" BORDER " DC comp Completed: %16llu " BORDER "\n" BORDER " DC comp Completed Errors: %16llu " BORDER "\n" SEPARATOR, (long long unsigned int)dcStats.numCompRequests, (long long unsigned int)dcStats.numCompRequestsErrors, (long long unsigned int)dcStats.numCompCompleted, (long long unsigned int)dcStats.numCompCompletedErrors); /* Perform Info */ len += snprintf( data + len, size - len, BORDER " DC decomp Requests: %16llu " BORDER "\n" BORDER " DC decomp Request Errors: %16llu " BORDER "\n" BORDER " DC decomp Completed: %16llu " BORDER "\n" BORDER " DC decomp Completed Errors: %16llu " BORDER "\n" SEPARATOR, (long long unsigned int)dcStats.numDecompRequests, (long long unsigned int)dcStats.numDecompRequestsErrors, (long long unsigned int)dcStats.numDecompCompleted, (long long unsigned int)dcStats.numDecompCompletedErrors); return 0; } /* Initialise device specific information needed by compression service */ static CpaStatus SalCtrl_CompressionInit_CompData(icp_accel_dev_t *device, sal_compression_service_t *pCompService) { int level = 0; pCompService->comp_device_data.asbEnableSupport = CPA_FALSE; pCompService->comp_device_data.uniqueCompressionLevels[0] = CPA_FALSE; switch (device->deviceType) { case DEVICE_DH895XCC: case DEVICE_DH895XCCVF: pCompService->generic_service_info.integrityCrcCheck = CPA_FALSE; pCompService->numInterBuffs = DC_QAT_MAX_NUM_INTER_BUFFERS_6COMP_SLICES; pCompService->comp_device_data.minOutputBuffSize = DC_DEST_BUFFER_STA_MIN_SIZE; pCompService->comp_device_data.oddByteDecompNobFinal = CPA_TRUE; pCompService->comp_device_data.oddByteDecompInterim = CPA_FALSE; pCompService->comp_device_data.translatorOverflow = CPA_FALSE; pCompService->comp_device_data.useDevRam = ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF; pCompService->comp_device_data.enableDmm = ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED; pCompService->comp_device_data.inflateContextSize = DC_INFLATE_CONTEXT_SIZE; pCompService->comp_device_data.highestHwCompressionDepth = ICP_QAT_HW_COMPRESSION_DEPTH_16; pCompService->comp_device_data.windowSizeMask = (1 << DC_8K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE); pCompService->comp_device_data.cnvnrSupported = CPA_FALSE; for (level = CPA_DC_L1; level <= CPA_DC_L12; level++) { switch (level) { case CPA_DC_L1: case CPA_DC_L2: case CPA_DC_L3: case CPA_DC_L4: pCompService->comp_device_data .uniqueCompressionLevels[level] = CPA_TRUE; break; default: pCompService->comp_device_data .uniqueCompressionLevels[level] = CPA_FALSE; break; } } pCompService->comp_device_data.numCompressionLevels = DC_NUM_COMPRESSION_LEVELS; break; case DEVICE_C3XXX: case DEVICE_C3XXXVF: case DEVICE_200XX: case DEVICE_200XXVF: pCompService->generic_service_info.integrityCrcCheck = CPA_FALSE; pCompService->numInterBuffs = DC_QAT_MAX_NUM_INTER_BUFFERS_6COMP_SLICES; pCompService->comp_device_data.oddByteDecompNobFinal = CPA_FALSE; pCompService->comp_device_data.oddByteDecompInterim = CPA_TRUE; pCompService->comp_device_data.translatorOverflow = CPA_FALSE; pCompService->comp_device_data.useDevRam = ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF; pCompService->comp_device_data.inflateContextSize = DC_INFLATE_EH_CONTEXT_SIZE; pCompService->comp_device_data.highestHwCompressionDepth = ICP_QAT_HW_COMPRESSION_DEPTH_16; pCompService->comp_device_data.windowSizeMask = (1 << DC_16K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE); pCompService->comp_device_data.minOutputBuffSize = DC_DEST_BUFFER_STA_MIN_SIZE; pCompService->comp_device_data.enableDmm = ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED; pCompService->comp_device_data.cnvnrSupported = CPA_TRUE; for (level = CPA_DC_L1; level <= CPA_DC_L12; level++) { switch (level) { case CPA_DC_L1: case CPA_DC_L2: case CPA_DC_L3: case CPA_DC_L4: pCompService->comp_device_data .uniqueCompressionLevels[level] = CPA_TRUE; break; default: pCompService->comp_device_data .uniqueCompressionLevels[level] = CPA_FALSE; break; } } pCompService->comp_device_data.numCompressionLevels = DC_NUM_COMPRESSION_LEVELS; break; case DEVICE_C62X: case DEVICE_C62XVF: pCompService->generic_service_info.integrityCrcCheck = CPA_FALSE; pCompService->numInterBuffs = DC_QAT_MAX_NUM_INTER_BUFFERS_10COMP_SLICES; pCompService->comp_device_data.oddByteDecompNobFinal = CPA_FALSE; pCompService->comp_device_data.oddByteDecompInterim = CPA_TRUE; pCompService->comp_device_data.translatorOverflow = CPA_FALSE; pCompService->comp_device_data.useDevRam = ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF; pCompService->comp_device_data.inflateContextSize = DC_INFLATE_EH_CONTEXT_SIZE; pCompService->comp_device_data.highestHwCompressionDepth = ICP_QAT_HW_COMPRESSION_DEPTH_16; pCompService->comp_device_data.windowSizeMask = (1 << DC_4K_WINDOW_SIZE | 1 << DC_8K_WINDOW_SIZE | 1 << DC_16K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE); pCompService->comp_device_data.minOutputBuffSize = DC_DEST_BUFFER_STA_MIN_SIZE; pCompService->comp_device_data.minOutputBuffSizeDynamic = pCompService->comp_device_data.minOutputBuffSize; pCompService->comp_device_data.enableDmm = ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED; pCompService->comp_device_data.cnvnrSupported = CPA_TRUE; for (level = CPA_DC_L1; level <= CPA_DC_L12; level++) { switch (level) { case CPA_DC_L1: case CPA_DC_L2: case CPA_DC_L3: case CPA_DC_L4: pCompService->comp_device_data .uniqueCompressionLevels[level] = CPA_TRUE; break; default: pCompService->comp_device_data .uniqueCompressionLevels[level] = CPA_FALSE; break; } } pCompService->comp_device_data.numCompressionLevels = DC_NUM_COMPRESSION_LEVELS; break; case DEVICE_C4XXX: case DEVICE_C4XXXVF: pCompService->generic_service_info.integrityCrcCheck = CPA_TRUE; pCompService->numInterBuffs = DC_QAT_MAX_NUM_INTER_BUFFERS_24COMP_SLICES; pCompService->comp_device_data.minOutputBuffSize = DC_DEST_BUFFER_MIN_SIZE; pCompService->comp_device_data.oddByteDecompNobFinal = CPA_TRUE; pCompService->comp_device_data.oddByteDecompInterim = CPA_TRUE; pCompService->comp_device_data.translatorOverflow = CPA_TRUE; if (pCompService->generic_service_info.capabilitiesMask & ICP_ACCEL_CAPABILITIES_INLINE) { pCompService->comp_device_data.useDevRam = ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF; } else { pCompService->comp_device_data.useDevRam = ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF; } pCompService->comp_device_data.enableDmm = ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED; pCompService->comp_device_data.inflateContextSize = DC_INFLATE_EH_CONTEXT_SIZE; pCompService->comp_device_data.highestHwCompressionDepth = ICP_QAT_HW_COMPRESSION_DEPTH_128; pCompService->comp_device_data.windowSizeMask = (1 << DC_16K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE); pCompService->comp_device_data.cnvnrSupported = CPA_TRUE; for (level = CPA_DC_L1; level <= CPA_DC_L12; level++) { switch (level) { case CPA_DC_L1: case CPA_DC_L2: case CPA_DC_L3: case CPA_DC_L4: case CPA_DC_L5: pCompService->comp_device_data .uniqueCompressionLevels[level] = CPA_TRUE; break; default: pCompService->comp_device_data .uniqueCompressionLevels[level] = CPA_FALSE; break; } } pCompService->comp_device_data.numCompressionLevels = DC_NUM_COMPRESSION_LEVELS; break; case DEVICE_4XXX: case DEVICE_4XXXVF: pCompService->generic_service_info.integrityCrcCheck = CPA_TRUE; pCompService->numInterBuffs = 0; pCompService->comp_device_data.minOutputBuffSize = DC_DEST_BUFFER_STA_MIN_SIZE_GEN4; pCompService->comp_device_data.minOutputBuffSizeDynamic = DC_DEST_BUFFER_DYN_MIN_SIZE_GEN4; pCompService->comp_device_data.oddByteDecompNobFinal = CPA_TRUE; pCompService->comp_device_data.oddByteDecompInterim = CPA_FALSE; pCompService->comp_device_data.translatorOverflow = CPA_TRUE; pCompService->comp_device_data.useDevRam = ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF; pCompService->comp_device_data.enableDmm = ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED; pCompService->comp_device_data.inflateContextSize = DC_INFLATE_CONTEXT_SIZE; pCompService->comp_device_data.highestHwCompressionDepth = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_9; pCompService->comp_device_data.windowSizeMask = (1 << DC_4K_WINDOW_SIZE | 1 << DC_8K_WINDOW_SIZE | 1 << DC_16K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE); for (level = CPA_DC_L1; level <= CPA_DC_L12; level++) { switch (level) { case CPA_DC_L1: case CPA_DC_L6: case CPA_DC_L9: pCompService->comp_device_data .uniqueCompressionLevels[level] = CPA_TRUE; break; default: pCompService->comp_device_data .uniqueCompressionLevels[level] = CPA_FALSE; break; } } pCompService->comp_device_data.numCompressionLevels = DC_NUM_COMPRESSION_LEVELS; break; default: QAT_UTILS_LOG("Unknown device type! - %d.\n", device->deviceType); return CPA_STATUS_FAIL; } return CPA_STATUS_SUCCESS; } CpaStatus SalCtrl_CompressionInit(icp_accel_dev_t *device, sal_service_t *service) { CpaStatus status = CPA_STATUS_SUCCESS; Cpa32U numCompConcurrentReq = 0; Cpa32U request_ring_id = 0; Cpa32U response_ring_id = 0; char adfGetParam[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char compMemPool[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char temp_string[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char temp_string2[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char *instance_name = NULL; sal_statistics_collection_t *pStatsCollection = (sal_statistics_collection_t *)device->pQatStats; icp_resp_deliv_method rx_resp_type = ICP_RESP_TYPE_IRQ; sal_compression_service_t *pCompressionService = (sal_compression_service_t *)service; Cpa32U msgSize = 0; char *section = DYN_SEC; SAL_SERVICE_GOOD_FOR_INIT(pCompressionService); pCompressionService->generic_service_info.state = SAL_SERVICE_STATE_INITIALIZING; if (CPA_FALSE == pCompressionService->generic_service_info.is_dyn) { section = icpGetProcessName(); } if (pStatsCollection == NULL) { return CPA_STATUS_FAIL; } /* Get Config Info: Accel Num, bank Num, packageID, coreAffinity, nodeAffinity and response mode */ pCompressionService->acceleratorNum = 0; /* Initialise device specific compression data */ SalCtrl_CompressionInit_CompData(device, pCompressionService); status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "BankNumber", temp_string); LAC_CHECK_STATUS(status); status = icp_adf_cfgGetParamValue(device, section, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", temp_string); return status; } pCompressionService->bankNum = Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "IsPolled", temp_string); LAC_CHECK_STATUS(status); status = icp_adf_cfgGetParamValue(device, section, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", temp_string); return status; } pCompressionService->isPolled = (Cpa8U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); /* User instances only support poll and epoll mode */ if (SAL_RESP_POLL_CFG_FILE != pCompressionService->isPolled) { QAT_UTILS_LOG( "IsPolled %u is not supported for user instance %s.\n", pCompressionService->isPolled, temp_string); return CPA_STATUS_FAIL; } if (SAL_RESP_POLL_CFG_FILE == pCompressionService->isPolled) { rx_resp_type = ICP_RESP_TYPE_POLL; } status = icp_adf_cfgGetParamValue(device, LAC_CFG_SECTION_GENERAL, ADF_DEV_PKG_ID, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", ADF_DEV_PKG_ID); return status; } pCompressionService->pkgID = (Cpa16U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); status = icp_adf_cfgGetParamValue(device, LAC_CFG_SECTION_GENERAL, ADF_DEV_NODE_ID, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", ADF_DEV_NODE_ID); return status; } pCompressionService->nodeAffinity = (Cpa32U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); /* In case of interrupt instance, use the bank affinity set by adf_ctl * Otherwise, use the instance affinity for backwards compatibility */ if (SAL_RESP_POLL_CFG_FILE != pCompressionService->isPolled) { /* Next need to read the [AcceleratorX] section of the config * file */ status = Sal_StringParsing("Accelerator", pCompressionService->acceleratorNum, "", temp_string2); LAC_CHECK_STATUS(status); status = Sal_StringParsing("Bank", pCompressionService->bankNum, "CoreAffinity", temp_string); LAC_CHECK_STATUS(status); } else { strncpy(temp_string2, section, sizeof(temp_string2) - SAL_NULL_TERM_SIZE); temp_string2[SAL_CFG_MAX_VAL_LEN_IN_BYTES - SAL_NULL_TERM_SIZE] = '\0'; status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "CoreAffinity", temp_string); LAC_CHECK_STATUS(status); } status = icp_adf_cfgGetParamValue(device, temp_string2, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", temp_string); return status; } pCompressionService->coreAffinity = (Cpa32U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "NumConcurrentRequests", temp_string); LAC_CHECK_STATUS(status); status = icp_adf_cfgGetParamValue(device, section, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", temp_string); return status; } numCompConcurrentReq = (Cpa32U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); if (validateConcurrRequest(numCompConcurrentReq)) { QAT_UTILS_LOG( "Invalid NumConcurrentRequests, valid values are: {64, 128, 256, ... 32768, 65536}.\n"); return CPA_STATUS_FAIL; } /* ADF does not allow us to completely fill the ring for batch requests */ pCompressionService->maxNumCompConcurrentReq = (numCompConcurrentReq - SAL_BATCH_SUBMIT_FREE_SPACE); /* 1. Create transport handles */ status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "RingTx", temp_string); LAC_CHECK_STATUS(status); msgSize = LAC_QAT_DC_REQ_SZ_LW * LAC_LONG_WORD_IN_BYTES; status = icp_adf_transCreateHandle( device, ICP_TRANS_TYPE_ETR, section, pCompressionService->acceleratorNum, pCompressionService->bankNum, temp_string, lac_getRingType(SAL_RING_TYPE_DC), NULL, ICP_RESP_TYPE_NONE, numCompConcurrentReq, msgSize, (icp_comms_trans_handle *)&( pCompressionService->trans_handle_compression_tx)); LAC_CHECK_STATUS(status); if (icp_adf_transGetRingNum( pCompressionService->trans_handle_compression_tx, &request_ring_id) != CPA_STATUS_SUCCESS) { icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); QAT_UTILS_LOG("Failed to get DC TX ring number.\n"); return CPA_STATUS_FAIL; } status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "RingRx", temp_string); if (CPA_STATUS_SUCCESS != status) { icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); return status; } msgSize = LAC_QAT_DC_RESP_SZ_LW * LAC_LONG_WORD_IN_BYTES; status = icp_adf_transCreateHandle( device, ICP_TRANS_TYPE_ETR, section, pCompressionService->acceleratorNum, pCompressionService->bankNum, temp_string, lac_getRingType(SAL_RING_TYPE_NONE), (icp_trans_callback)dcCompression_ProcessCallback, rx_resp_type, numCompConcurrentReq, msgSize, (icp_comms_trans_handle *)&( pCompressionService->trans_handle_compression_rx)); if (CPA_STATUS_SUCCESS != status) { icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); return status; } if (icp_adf_transGetRingNum( pCompressionService->trans_handle_compression_rx, &response_ring_id) != CPA_STATUS_SUCCESS) { icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); QAT_UTILS_LOG("Failed to get DC RX ring number.\n"); return CPA_STATUS_FAIL; } /* 2. Allocates memory pools */ /* Valid initialisation value for a pool ID */ pCompressionService->compression_mem_pool = LAC_MEM_POOL_INIT_POOL_ID; status = Sal_StringParsing( "Comp", pCompressionService->generic_service_info.instance, "_MemPool", compMemPool); if (CPA_STATUS_SUCCESS != status) { icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); return status; } status = Lac_MemPoolCreate(&pCompressionService->compression_mem_pool, compMemPool, (numCompConcurrentReq + 1), sizeof(dc_compression_cookie_t), LAC_64BYTE_ALIGNMENT, CPA_FALSE, pCompressionService->nodeAffinity); if (CPA_STATUS_SUCCESS != status) { icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); return status; } /* Init compression statistics */ status = dcStatsInit(pCompressionService); if (CPA_STATUS_SUCCESS != status) { Lac_MemPoolDestroy(pCompressionService->compression_mem_pool); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); return status; } if (CPA_TRUE == pStatsCollection->bDcStatsEnabled) { /* Get instance name for stats */ instance_name = LAC_OS_MALLOC(ADF_CFG_MAX_VAL_LEN_IN_BYTES); if (NULL == instance_name) { Lac_MemPoolDestroy( pCompressionService->compression_mem_pool); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); return CPA_STATUS_RESOURCE; } status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "Name", temp_string); if (CPA_STATUS_SUCCESS != status) { Lac_MemPoolDestroy( pCompressionService->compression_mem_pool); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); LAC_OS_FREE(instance_name); return status; } status = icp_adf_cfgGetParamValue(device, section, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", temp_string); Lac_MemPoolDestroy( pCompressionService->compression_mem_pool); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); LAC_OS_FREE(instance_name); return status; } snprintf(instance_name, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%s", adfGetParam); pCompressionService->debug_file = LAC_OS_MALLOC(sizeof(debug_file_info_t)); if (NULL == pCompressionService->debug_file) { Lac_MemPoolDestroy( pCompressionService->compression_mem_pool); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); LAC_OS_FREE(instance_name); return CPA_STATUS_RESOURCE; } memset(pCompressionService->debug_file, 0, sizeof(debug_file_info_t)); pCompressionService->debug_file->name = instance_name; pCompressionService->debug_file->seq_read = SalCtrl_CompresionDebug; pCompressionService->debug_file->private_data = pCompressionService; pCompressionService->debug_file->parent = pCompressionService->generic_service_info.debug_parent_dir; status = icp_adf_debugAddFile(device, pCompressionService->debug_file); if (CPA_STATUS_SUCCESS != status) { Lac_MemPoolDestroy( pCompressionService->compression_mem_pool); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); LAC_OS_FREE(instance_name); LAC_OS_FREE(pCompressionService->debug_file); return status; } } pCompressionService->generic_service_info.stats = pStatsCollection; pCompressionService->generic_service_info.state = SAL_SERVICE_STATE_INITIALIZED; return status; } CpaStatus SalCtrl_CompressionStart(icp_accel_dev_t *device, sal_service_t *service) { CpaStatus status = CPA_STATUS_SUCCESS; sal_compression_service_t *pCompressionService = (sal_compression_service_t *)service; if (SAL_SERVICE_STATE_INITIALIZED != pCompressionService->generic_service_info.state) { QAT_UTILS_LOG("Not in the correct state to call start.\n"); return CPA_STATUS_FAIL; } /**************************************************************/ /* Obtain Extended Features. I.e. Compress And Verify */ /**************************************************************/ pCompressionService->generic_service_info.dcExtendedFeatures = device->dcExtendedFeatures; pCompressionService->generic_service_info.state = SAL_SERVICE_STATE_RUNNING; return status; } CpaStatus SalCtrl_CompressionStop(icp_accel_dev_t *device, sal_service_t *service) { sal_compression_service_t *pCompressionService = (sal_compression_service_t *)service; if (SAL_SERVICE_STATE_RUNNING != pCompressionService->generic_service_info.state) { QAT_UTILS_LOG("Not in the correct state to call stop.\n"); return CPA_STATUS_FAIL; } if (icp_adf_is_dev_in_reset(device)) { pCompressionService->generic_service_info.state = SAL_SERVICE_STATE_RESTARTING; return CPA_STATUS_SUCCESS; } pCompressionService->generic_service_info.state = SAL_SERVICE_STATE_SHUTTING_DOWN; return CPA_STATUS_RETRY; } CpaStatus SalCtrl_CompressionShutdown(icp_accel_dev_t *device, sal_service_t *service) { CpaStatus status = CPA_STATUS_SUCCESS; sal_compression_service_t *pCompressionService = (sal_compression_service_t *)service; sal_statistics_collection_t *pStatsCollection = (sal_statistics_collection_t *)device->pQatStats; if ((SAL_SERVICE_STATE_INITIALIZED != pCompressionService->generic_service_info.state) && (SAL_SERVICE_STATE_SHUTTING_DOWN != pCompressionService->generic_service_info.state) && (SAL_SERVICE_STATE_RESTARTING != pCompressionService->generic_service_info.state)) { QAT_UTILS_LOG("Not in the correct state to call shutdown.\n"); return CPA_STATUS_FAIL; } Lac_MemPoolDestroy(pCompressionService->compression_mem_pool); status = icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); LAC_CHECK_STATUS(status); status = icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); LAC_CHECK_STATUS(status); if (CPA_TRUE == pStatsCollection->bDcStatsEnabled) { /* Clean stats */ if (NULL != pCompressionService->debug_file) { icp_adf_debugRemoveFile( pCompressionService->debug_file); LAC_OS_FREE(pCompressionService->debug_file->name); LAC_OS_FREE(pCompressionService->debug_file); pCompressionService->debug_file = NULL; } } pCompressionService->generic_service_info.stats = NULL; dcStatsFree(pCompressionService); if (icp_adf_is_dev_in_reset(device)) { pCompressionService->generic_service_info.state = SAL_SERVICE_STATE_RESTARTING; return CPA_STATUS_SUCCESS; } pCompressionService->generic_service_info.state = SAL_SERVICE_STATE_SHUTDOWN; return status; } CpaStatus cpaDcGetStatusText(const CpaInstanceHandle dcInstance, const CpaStatus errStatus, Cpa8S *pStatusText) { CpaStatus status = CPA_STATUS_SUCCESS; LAC_CHECK_NULL_PARAM(pStatusText); switch (errStatus) { case CPA_STATUS_SUCCESS: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_SUCCESS); break; case CPA_STATUS_FAIL: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_FAIL); break; case CPA_STATUS_RETRY: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_RETRY); break; case CPA_STATUS_RESOURCE: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_RESOURCE); break; case CPA_STATUS_INVALID_PARAM: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_INVALID_PARAM); break; case CPA_STATUS_FATAL: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_FATAL); break; case CPA_STATUS_UNSUPPORTED: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_UNSUPPORTED); break; default: status = CPA_STATUS_INVALID_PARAM; break; } return status; } CpaStatus cpaDcGetNumIntermediateBuffers(CpaInstanceHandle dcInstance, Cpa16U *pNumBuffers) { CpaInstanceHandle insHandle = NULL; sal_compression_service_t *pService = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); } else { insHandle = dcInstance; } LAC_CHECK_NULL_PARAM(insHandle); LAC_CHECK_NULL_PARAM(pNumBuffers); pService = (sal_compression_service_t *)insHandle; *pNumBuffers = pService->numInterBuffs; return CPA_STATUS_SUCCESS; } CpaStatus cpaDcStartInstance(CpaInstanceHandle instanceHandle, Cpa16U numBuffers, CpaBufferList **pIntermediateBufferPtrsArray) { icp_qat_addr_width_t *pInterBuffPtrsArray = NULL; icp_qat_addr_width_t pArrayBufferListDescPhyAddr = 0; icp_qat_addr_width_t bufListDescPhyAddr; icp_qat_addr_width_t bufListAlignedPhyAddr; CpaFlatBuffer *pClientCurrFlatBuffer = NULL; icp_buffer_list_desc_t *pBufferListDesc = NULL; icp_flat_buffer_desc_t *pCurrFlatBufDesc = NULL; CpaInstanceInfo2 info = { 0 }; icp_accel_dev_t *dev = NULL; CpaStatus status = CPA_STATUS_SUCCESS; sal_compression_service_t *pService = NULL; CpaInstanceHandle insHandle = NULL; Cpa16U bufferIndex = 0; Cpa32U numFlatBuffers = 0; Cpa64U clientListSize = 0; CpaBufferList *pClientCurrentIntermediateBuffer = NULL; Cpa32U bufferIndex2 = 0; CpaBufferList **pTempIntermediateBufferPtrsArray; Cpa64U lastClientListSize = 0; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle) { insHandle = dcGetFirstHandle(); } else { insHandle = instanceHandle; } LAC_CHECK_NULL_PARAM(insHandle); status = cpaDcInstanceGetInfo2(insHandle, &info); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Can not get instance info.\n"); return status; } dev = icp_adf_getAccelDevByAccelId(info.physInstId.packageId); if (NULL == dev) { QAT_UTILS_LOG("Can not find device for the instance\n"); return CPA_STATUS_FAIL; } if (NULL == pIntermediateBufferPtrsArray) { /* Increment dev ref counter and return - DRAM is not used */ icp_qa_dev_get(dev); return CPA_STATUS_SUCCESS; } if (0 == numBuffers) { /* Increment dev ref counter and return - DRAM is not used */ icp_qa_dev_get(dev); return CPA_STATUS_SUCCESS; } pService = (sal_compression_service_t *)insHandle; LAC_CHECK_NULL_PARAM(insHandle); if ((numBuffers > 0) && (NULL == pIntermediateBufferPtrsArray)) { QAT_UTILS_LOG("Invalid Intermediate Buffers Array pointer\n"); return CPA_STATUS_INVALID_PARAM; } /* Check number of intermediate buffers allocated by user */ if ((pService->numInterBuffs != numBuffers)) { QAT_UTILS_LOG("Invalid number of buffers\n"); return CPA_STATUS_INVALID_PARAM; } pTempIntermediateBufferPtrsArray = pIntermediateBufferPtrsArray; for (bufferIndex = 0; bufferIndex < numBuffers; bufferIndex++) { if (NULL == *pTempIntermediateBufferPtrsArray) { QAT_UTILS_LOG( "Intermediate Buffer - Invalid Buffer List pointer\n"); return CPA_STATUS_INVALID_PARAM; } if (NULL == (*pTempIntermediateBufferPtrsArray)->pBuffers) { QAT_UTILS_LOG( "Intermediate Buffer - Invalid Flat Buffer descriptor pointer\n"); return CPA_STATUS_INVALID_PARAM; } if (NULL == (*pTempIntermediateBufferPtrsArray)->pPrivateMetaData) { QAT_UTILS_LOG( "Intermediate Buffer - Invalid Private MetaData descriptor pointer\n"); return CPA_STATUS_INVALID_PARAM; } clientListSize = 0; for (bufferIndex2 = 0; bufferIndex2 < (*pTempIntermediateBufferPtrsArray)->numBuffers; bufferIndex2++) { if ((0 != (*pTempIntermediateBufferPtrsArray) ->pBuffers[bufferIndex2] .dataLenInBytes) && NULL == (*pTempIntermediateBufferPtrsArray) ->pBuffers[bufferIndex2] .pData) { QAT_UTILS_LOG( "Intermediate Buffer - Invalid Flat Buffer pointer\n"); return CPA_STATUS_INVALID_PARAM; } clientListSize += (*pTempIntermediateBufferPtrsArray) ->pBuffers[bufferIndex2] .dataLenInBytes; } if (bufferIndex != 0) { if (lastClientListSize != clientListSize) { QAT_UTILS_LOG( "SGLs have to be of the same size.\n"); return CPA_STATUS_INVALID_PARAM; } } else { lastClientListSize = clientListSize; } pTempIntermediateBufferPtrsArray++; } /* Allocate array of physical pointers to icp_buffer_list_desc_t */ status = LAC_OS_CAMALLOC(&pInterBuffPtrsArray, (numBuffers * sizeof(icp_qat_addr_width_t)), LAC_64BYTE_ALIGNMENT, pService->nodeAffinity); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Can not allocate Intermediate Buffers array.\n"); return status; } /* Get physical address of the intermediate buffer pointers array */ pArrayBufferListDescPhyAddr = LAC_MEM_CAST_PTR_TO_UINT64( LAC_OS_VIRT_TO_PHYS_INTERNAL(pInterBuffPtrsArray)); pService->pInterBuffPtrsArray = pInterBuffPtrsArray; pService->pInterBuffPtrsArrayPhyAddr = pArrayBufferListDescPhyAddr; /* Get the full size of the buffer list */ /* Assumption: all the SGLs allocated by the user have the same size */ clientListSize = 0; for (bufferIndex = 0; bufferIndex < (*pIntermediateBufferPtrsArray)->numBuffers; bufferIndex++) { clientListSize += ((*pIntermediateBufferPtrsArray) ->pBuffers[bufferIndex] .dataLenInBytes); } pService->minInterBuffSizeInBytes = clientListSize; for (bufferIndex = 0; bufferIndex < numBuffers; bufferIndex++) { /* Get pointer to the client Intermediate Buffer List * (CpaBufferList) */ pClientCurrentIntermediateBuffer = *pIntermediateBufferPtrsArray; /* Get number of flat buffers in the buffer list */ numFlatBuffers = pClientCurrentIntermediateBuffer->numBuffers; /* Get pointer to the client array of CpaFlatBuffers */ pClientCurrFlatBuffer = pClientCurrentIntermediateBuffer->pBuffers; /* Calculate Physical address of current private SGL */ bufListDescPhyAddr = LAC_OS_VIRT_TO_PHYS_EXTERNAL( (*pService), pClientCurrentIntermediateBuffer->pPrivateMetaData); if (bufListDescPhyAddr == 0) { QAT_UTILS_LOG( "Unable to get the physical address of the metadata.\n"); return CPA_STATUS_FAIL; } /* Align SGL physical address */ bufListAlignedPhyAddr = LAC_ALIGN_POW2_ROUNDUP(bufListDescPhyAddr, ICP_DESCRIPTOR_ALIGNMENT_BYTES); /* Set physical address of the Intermediate Buffer SGL in the * SGLs array */ *pInterBuffPtrsArray = LAC_MEM_CAST_PTR_TO_UINT64(bufListAlignedPhyAddr); /* Calculate (virtual) offset to the buffer list descriptor */ pBufferListDesc = (icp_buffer_list_desc_t *)((LAC_ARCH_UINT)pClientCurrentIntermediateBuffer ->pPrivateMetaData + (LAC_ARCH_UINT)(bufListAlignedPhyAddr - bufListDescPhyAddr)); /* Set number of flat buffers in the physical Buffer List * descriptor */ pBufferListDesc->numBuffers = numFlatBuffers; /* Go past the Buffer List descriptor to the list of buffer * descriptors */ pCurrFlatBufDesc = (icp_flat_buffer_desc_t *)((pBufferListDesc->phyBuffers)); /* Loop for each flat buffer in the SGL */ while (0 != numFlatBuffers) { /* Set length of the current flat buffer */ pCurrFlatBufDesc->dataLenInBytes = pClientCurrFlatBuffer->dataLenInBytes; /* Set physical address of the flat buffer */ pCurrFlatBufDesc->phyBuffer = LAC_MEM_CAST_PTR_TO_UINT64( LAC_OS_VIRT_TO_PHYS_EXTERNAL( (*pService), pClientCurrFlatBuffer->pData)); if (pCurrFlatBufDesc->phyBuffer == 0) { QAT_UTILS_LOG( "Unable to get the physical address of the flat buffer.\n"); return CPA_STATUS_FAIL; } pCurrFlatBufDesc++; pClientCurrFlatBuffer++; numFlatBuffers--; } pIntermediateBufferPtrsArray++; pInterBuffPtrsArray++; } pService->generic_service_info.isInstanceStarted = CPA_TRUE; /* Increment dev ref counter */ icp_qa_dev_get(dev); return CPA_STATUS_SUCCESS; } CpaStatus cpaDcStopInstance(CpaInstanceHandle instanceHandle) { CpaInstanceHandle insHandle = NULL; CpaInstanceInfo2 info = { 0 }; icp_accel_dev_t *dev = NULL; CpaStatus status = CPA_STATUS_SUCCESS; sal_compression_service_t *pService = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle) { insHandle = dcGetFirstHandle(); } else { insHandle = instanceHandle; } LAC_CHECK_NULL_PARAM(insHandle); pService = (sal_compression_service_t *)insHandle; /* Free Intermediate Buffer Pointers Array */ if (pService->pInterBuffPtrsArray != NULL) { LAC_OS_CAFREE(pService->pInterBuffPtrsArray); pService->pInterBuffPtrsArray = 0; } pService->pInterBuffPtrsArrayPhyAddr = 0; status = cpaDcInstanceGetInfo2(insHandle, &info); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Can not get instance info.\n"); return status; } dev = icp_adf_getAccelDevByAccelId(info.physInstId.packageId); if (NULL == dev) { QAT_UTILS_LOG("Can not find device for the instance.\n"); return CPA_STATUS_FAIL; } pService->generic_service_info.isInstanceStarted = CPA_FALSE; /* Decrement dev ref counter */ icp_qa_dev_put(dev); return CPA_STATUS_SUCCESS; } CpaStatus cpaDcGetNumInstances(Cpa16U *pNumInstances) { CpaStatus status = CPA_STATUS_SUCCESS; icp_accel_dev_t **pAdfInsts = NULL; icp_accel_dev_t *dev_addr = NULL; sal_t *base_addr = NULL; sal_list_t *list_temp = NULL; Cpa16U num_accel_dev = 0; Cpa16U num = 0; Cpa16U i = 0; LAC_CHECK_NULL_PARAM(pNumInstances); /* Get the number of accel_dev in the system */ status = icp_amgr_getNumInstances(&num_accel_dev); LAC_CHECK_STATUS(status); /* Allocate memory to store addr of accel_devs */ pAdfInsts = malloc(num_accel_dev * sizeof(icp_accel_dev_t *), M_QAT, M_WAITOK); num_accel_dev = 0; /* Get ADF to return accel_devs with dc enabled */ status = icp_amgr_getAllAccelDevByCapabilities( ICP_ACCEL_CAPABILITIES_COMPRESSION, pAdfInsts, &num_accel_dev); if (CPA_STATUS_SUCCESS == status) { for (i = 0; i < num_accel_dev; i++) { dev_addr = (icp_accel_dev_t *)pAdfInsts[i]; if (NULL != dev_addr) { base_addr = dev_addr->pSalHandle; if (NULL != base_addr) { list_temp = base_addr->compression_services; while (NULL != list_temp) { num++; list_temp = SalList_next(list_temp); } } } } *pNumInstances = num; } free(pAdfInsts, M_QAT); return status; } CpaStatus cpaDcGetInstances(Cpa16U numInstances, CpaInstanceHandle *dcInstances) { CpaStatus status = CPA_STATUS_SUCCESS; icp_accel_dev_t **pAdfInsts = NULL; icp_accel_dev_t *dev_addr = NULL; sal_t *base_addr = NULL; sal_list_t *list_temp = NULL; Cpa16U num_accel_dev = 0; Cpa16U index = 0; Cpa16U i = 0; LAC_CHECK_NULL_PARAM(dcInstances); if (0 == numInstances) { QAT_UTILS_LOG("numInstances is 0.\n"); return CPA_STATUS_INVALID_PARAM; } /* Get the number of accel_dev in the system */ status = icp_amgr_getNumInstances(&num_accel_dev); LAC_CHECK_STATUS(status); /* Allocate memory to store addr of accel_devs */ pAdfInsts = malloc(num_accel_dev * sizeof(icp_accel_dev_t *), M_QAT, M_WAITOK); num_accel_dev = 0; /* Get ADF to return accel_devs with dc enabled */ status = icp_amgr_getAllAccelDevByCapabilities( ICP_ACCEL_CAPABILITIES_COMPRESSION, pAdfInsts, &num_accel_dev); if (CPA_STATUS_SUCCESS == status) { /* First check the number of instances in the system */ for (i = 0; i < num_accel_dev; i++) { dev_addr = (icp_accel_dev_t *)pAdfInsts[i]; if (NULL != dev_addr) { base_addr = dev_addr->pSalHandle; if (NULL != base_addr) { list_temp = base_addr->compression_services; while (NULL != list_temp) { if (index > (numInstances - 1)) { break; } dcInstances[index] = SalList_getObject( list_temp); list_temp = SalList_next(list_temp); index++; } } } } if (numInstances > index) { QAT_UTILS_LOG("Only %d dc instances available.\n", index); status = CPA_STATUS_RESOURCE; } } if (CPA_STATUS_SUCCESS == status) { index = 0; for (i = 0; i < num_accel_dev; i++) { dev_addr = (icp_accel_dev_t *)pAdfInsts[i]; /* Note dev_addr cannot be NULL here as numInstances=0 is not valid and if dev_addr=NULL then index=0 (which is less than numInstances and status is set to _RESOURCE above */ base_addr = dev_addr->pSalHandle; if (NULL != base_addr) { list_temp = base_addr->compression_services; while (NULL != list_temp) { if (index > (numInstances - 1)) { break; } dcInstances[index] = SalList_getObject(list_temp); list_temp = SalList_next(list_temp); index++; } } } } free(pAdfInsts, M_QAT); return status; } CpaStatus cpaDcInstanceGetInfo2(const CpaInstanceHandle instanceHandle, CpaInstanceInfo2 *pInstanceInfo2) { sal_compression_service_t *pCompressionService = NULL; CpaInstanceHandle insHandle = NULL; icp_accel_dev_t *dev = NULL; CpaStatus status = CPA_STATUS_SUCCESS; char keyStr[ADF_CFG_MAX_KEY_LEN_IN_BYTES] = { 0 }; char valStr[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char *section = DYN_SEC; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle) { insHandle = dcGetFirstHandle(); } else { insHandle = instanceHandle; } LAC_CHECK_NULL_PARAM(insHandle); SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); LAC_CHECK_NULL_PARAM(pInstanceInfo2); LAC_OS_BZERO(pInstanceInfo2, sizeof(CpaInstanceInfo2)); pInstanceInfo2->accelerationServiceType = CPA_ACC_SVC_TYPE_DATA_COMPRESSION; snprintf((char *)pInstanceInfo2->vendorName, CPA_INST_VENDOR_NAME_SIZE, "%s", SAL_INFO2_VENDOR_NAME); pInstanceInfo2->vendorName[CPA_INST_VENDOR_NAME_SIZE - 1] = '\0'; snprintf((char *)pInstanceInfo2->swVersion, CPA_INST_SW_VERSION_SIZE, "Version %d.%d", SAL_INFO2_DRIVER_SW_VERSION_MAJ_NUMBER, SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER); pInstanceInfo2->swVersion[CPA_INST_SW_VERSION_SIZE - 1] = '\0'; /* Note we can safely read the contents of the compression service instance here because icp_amgr_getAccelDevByCapabilities() only returns devs that have started */ pCompressionService = (sal_compression_service_t *)insHandle; pInstanceInfo2->physInstId.packageId = pCompressionService->pkgID; pInstanceInfo2->physInstId.acceleratorId = pCompressionService->acceleratorNum; pInstanceInfo2->physInstId.executionEngineId = 0; pInstanceInfo2->physInstId.busAddress = icp_adf_get_busAddress(pInstanceInfo2->physInstId.packageId); /* set coreAffinity to zero before use */ LAC_OS_BZERO(pInstanceInfo2->coreAffinity, sizeof(pInstanceInfo2->coreAffinity)); CPA_BITMAP_BIT_SET(pInstanceInfo2->coreAffinity, pCompressionService->coreAffinity); pInstanceInfo2->nodeAffinity = pCompressionService->nodeAffinity; if (CPA_TRUE == pCompressionService->generic_service_info.isInstanceStarted) { pInstanceInfo2->operState = CPA_OPER_STATE_UP; } else { pInstanceInfo2->operState = CPA_OPER_STATE_DOWN; } pInstanceInfo2->requiresPhysicallyContiguousMemory = CPA_TRUE; if (SAL_RESP_POLL_CFG_FILE == pCompressionService->isPolled) { pInstanceInfo2->isPolled = CPA_TRUE; } else { pInstanceInfo2->isPolled = CPA_FALSE; } pInstanceInfo2->isOffloaded = CPA_TRUE; /* Get the instance name and part name from the config file */ dev = icp_adf_getAccelDevByAccelId(pCompressionService->pkgID); if (NULL == dev) { QAT_UTILS_LOG("Can not find device for the instance.\n"); LAC_OS_BZERO(pInstanceInfo2, sizeof(CpaInstanceInfo2)); return CPA_STATUS_FAIL; } snprintf((char *)pInstanceInfo2->partName, CPA_INST_PART_NAME_SIZE, SAL_INFO2_PART_NAME, dev->deviceName); pInstanceInfo2->partName[CPA_INST_PART_NAME_SIZE - 1] = '\0'; if (CPA_FALSE == pCompressionService->generic_service_info.is_dyn) { section = icpGetProcessName(); } status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "Name", keyStr); LAC_CHECK_STATUS(status); status = icp_adf_cfgGetParamValue(dev, section, keyStr, valStr); LAC_CHECK_STATUS(status); strncpy((char *)pInstanceInfo2->instName, valStr, sizeof(pInstanceInfo2->instName) - 1); pInstanceInfo2->instName[CPA_INST_NAME_SIZE - 1] = '\0'; #if __GNUC__ >= 7 #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wformat-truncation" #endif snprintf((char *)pInstanceInfo2->instID, CPA_INST_ID_SIZE, "%s_%s", section, valStr); #if __GNUC__ >= 7 #pragma GCC diagnostic pop #endif return CPA_STATUS_SUCCESS; } CpaStatus cpaDcQueryCapabilities(CpaInstanceHandle dcInstance, CpaDcInstanceCapabilities *pInstanceCapabilities) { CpaInstanceHandle insHandle = NULL; sal_compression_service_t *pService = NULL; Cpa32U capabilitiesMask = 0; dc_extd_ftrs_t *pExtendedFtrs = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); if (NULL == insHandle) { QAT_UTILS_LOG("Can not get the instance.\n"); return CPA_STATUS_FAIL; } } else { insHandle = dcInstance; } pService = (sal_compression_service_t *)insHandle; LAC_CHECK_NULL_PARAM(insHandle); SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); LAC_CHECK_NULL_PARAM(pInstanceCapabilities); memset(pInstanceCapabilities, 0, sizeof(CpaDcInstanceCapabilities)); capabilitiesMask = pService->generic_service_info.capabilitiesMask; /* Set compression capabilities */ if (capabilitiesMask & ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY) { pInstanceCapabilities->integrityCrcs = CPA_TRUE; } pInstanceCapabilities->endOfLastBlock = CPA_TRUE; pInstanceCapabilities->statefulDeflateCompression = CPA_FALSE; pInstanceCapabilities->statefulDeflateDecompression = CPA_TRUE; pInstanceCapabilities->statelessDeflateCompression = CPA_TRUE; pInstanceCapabilities->statelessDeflateDecompression = CPA_TRUE; pInstanceCapabilities->checksumCRC32 = CPA_TRUE; pInstanceCapabilities->checksumAdler32 = CPA_TRUE; pInstanceCapabilities->dynamicHuffman = CPA_TRUE; pInstanceCapabilities->precompiledHuffman = CPA_FALSE; pInstanceCapabilities->dynamicHuffmanBufferReq = CPA_TRUE; pInstanceCapabilities->autoSelectBestHuffmanTree = CPA_TRUE; pInstanceCapabilities->validWindowSizeMaskCompression = pService->comp_device_data.windowSizeMask; pInstanceCapabilities->validWindowSizeMaskDecompression = pService->comp_device_data.windowSizeMask; pExtendedFtrs = (dc_extd_ftrs_t *)&( ((sal_service_t *)insHandle)->dcExtendedFeatures); pInstanceCapabilities->batchAndPack = CPA_FALSE; pInstanceCapabilities->compressAndVerify = (CpaBoolean)pExtendedFtrs->is_cnv; pInstanceCapabilities->compressAndVerifyStrict = CPA_TRUE; pInstanceCapabilities->compressAndVerifyAndRecover = (CpaBoolean)pExtendedFtrs->is_cnvnr; return CPA_STATUS_SUCCESS; } CpaStatus cpaDcSetAddressTranslation(const CpaInstanceHandle instanceHandle, CpaVirtualToPhysical virtual2Physical) { sal_service_t *pService = NULL; CpaInstanceHandle insHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle) { insHandle = dcGetFirstHandle(); } else { insHandle = instanceHandle; } LAC_CHECK_NULL_PARAM(insHandle); SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); LAC_CHECK_NULL_PARAM(virtual2Physical); pService = (sal_service_t *)insHandle; pService->virt2PhysClient = virtual2Physical; return CPA_STATUS_SUCCESS; } /** ****************************************************************************** * @ingroup cpaDcCommon * Data compression specific polling function which polls a DC instance. *****************************************************************************/ CpaStatus icp_sal_DcPollInstance(CpaInstanceHandle instanceHandle_in, Cpa32U response_quota) { CpaStatus status = CPA_STATUS_SUCCESS; sal_compression_service_t *dc_handle = NULL; sal_service_t *gen_handle = NULL; icp_comms_trans_handle trans_hndTable[DC_NUM_RX_RINGS]; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { dc_handle = (sal_compression_service_t *)dcGetFirstHandle(); } else { dc_handle = (sal_compression_service_t *)instanceHandle_in; } LAC_CHECK_NULL_PARAM(dc_handle); SAL_RUNNING_CHECK(dc_handle); gen_handle = &(dc_handle->generic_service_info); if (SAL_SERVICE_TYPE_COMPRESSION != gen_handle->type) { QAT_UTILS_LOG("Instance handle type is incorrect.\n"); return CPA_STATUS_FAIL; } /* * From the instanceHandle we must get the trans_handle and send * down to adf for polling. * Populate our trans handle table with the appropriate handles. */ trans_hndTable[0] = dc_handle->trans_handle_compression_rx; /* Call adf to do the polling. */ status = icp_adf_pollInstance(trans_hndTable, DC_NUM_RX_RINGS, response_quota); return status; } /** ****************************************************************************** * @ingroup cpaDcCommon *****************************************************************************/ CpaStatus cpaDcInstanceSetNotificationCb( const CpaInstanceHandle instanceHandle, const CpaDcInstanceNotificationCbFunc pInstanceNotificationCb, void *pCallbackTag) { CpaStatus status = CPA_STATUS_SUCCESS; sal_service_t *gen_handle = instanceHandle; LAC_CHECK_NULL_PARAM(gen_handle); gen_handle->notification_cb = pInstanceNotificationCb; gen_handle->cb_tag = pCallbackTag; return status; } CpaInstanceHandle dcGetFirstHandle(void) { CpaStatus status = CPA_STATUS_SUCCESS; static icp_accel_dev_t *adfInsts[ADF_MAX_DEVICES] = { 0 }; CpaInstanceHandle dcInst = NULL; icp_accel_dev_t *dev_addr = NULL; sal_t *base_addr = NULL; sal_list_t *list_temp = NULL; Cpa16U i, num_dc = 0; /* Only need 1 dev with compression enabled - so check all devices */ status = icp_amgr_getAllAccelDevByCapabilities( ICP_ACCEL_CAPABILITIES_COMPRESSION, adfInsts, &num_dc); if ((0 == num_dc) || (CPA_STATUS_SUCCESS != status)) { QAT_UTILS_LOG( "No compression devices enabled in the system.\n"); return dcInst; } for (i = 0; i < num_dc; i++) { dev_addr = (icp_accel_dev_t *)adfInsts[i]; if (NULL != dev_addr) { base_addr = dev_addr->pSalHandle; if (NULL != base_addr) { list_temp = base_addr->compression_services; if (NULL != list_temp) { dcInst = SalList_getObject(list_temp); break; } } } } return dcInst; } diff --git a/sys/dev/qat/qat_api/firmware/include/icp_qat_fw.h b/sys/dev/qat/qat_api/firmware/include/icp_qat_fw.h index 28dfeff6579e..b4d1f5829ba2 100644 --- a/sys/dev/qat/qat_api/firmware/include/icp_qat_fw.h +++ b/sys/dev/qat/qat_api/firmware/include/icp_qat_fw.h @@ -1,1436 +1,1424 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /** ***************************************************************************** * @file icp_qat_fw.h * @defgroup icp_qat_fw_comn ICP QAT FW Common Processing Definitions * @ingroup icp_qat_fw * * @description * This file documents the common interfaces that the QAT FW running on * the QAT AE exports. This common layer is used by a number of services * to export content processing services. * *****************************************************************************/ #ifndef _ICP_QAT_FW_H_ #define _ICP_QAT_FW_H_ /* * ============================== * General Notes on the Interface */ /* * * ============================== * * Introduction * * Data movement and slice chaining * * Endianness * - Unless otherwise stated, all structures are defined in LITTLE ENDIAN * MODE * * Alignment * - In general all data structures provided to a request should be aligned * on the 64 byte boundary so as to allow optimal memory transfers. At the * minimum they must be aligned to the 8 byte boundary * * Sizes * Quad words = 8 bytes * * Terminology * * ============================== */ /* ****************************************************************************** * Include public/global header files ****************************************************************************** */ #include "icp_qat_hw.h" /* Big assumptions that both bitpos and mask are constants */ #define QAT_FIELD_SET(flags, val, bitpos, mask) \ (flags) = (((flags) & (~((mask) << (bitpos)))) | \ (((val) & (mask)) << (bitpos))) #define QAT_FIELD_GET(flags, bitpos, mask) (((flags) >> (bitpos)) & (mask)) #define QAT_FLAG_SET(flags, val, bitpos) \ ((flags) = (((flags) & (~(1 << (bitpos)))) | (((val)&1) << (bitpos)))) #define QAT_FLAG_CLEAR(flags, bitpos) (flags) = ((flags) & (~(1 << (bitpos)))) #define QAT_FLAG_GET(flags, bitpos) (((flags) >> (bitpos)) & 1) /**< @ingroup icp_qat_fw_comn * Default request and response ring size in bytes */ #define ICP_QAT_FW_REQ_DEFAULT_SZ 128 #define ICP_QAT_FW_RESP_DEFAULT_SZ 32 #define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8 #define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF /**< @ingroup icp_qat_fw_comn * Common Request - Block sizes definitions in multiples of individual long * words */ #define ICP_QAT_FW_NUM_LONGWORDS_1 1 #define ICP_QAT_FW_NUM_LONGWORDS_2 2 #define ICP_QAT_FW_NUM_LONGWORDS_3 3 #define ICP_QAT_FW_NUM_LONGWORDS_4 4 #define ICP_QAT_FW_NUM_LONGWORDS_5 5 #define ICP_QAT_FW_NUM_LONGWORDS_6 6 #define ICP_QAT_FW_NUM_LONGWORDS_7 7 #define ICP_QAT_FW_NUM_LONGWORDS_10 10 #define ICP_QAT_FW_NUM_LONGWORDS_13 13 /**< @ingroup icp_qat_fw_comn * Definition of the associated service Id for NULL service type. * Note: the response is expected to use ICP_QAT_FW_COMN_RESP_SERV_CPM_FW */ #define ICP_QAT_FW_NULL_REQ_SERV_ID 1 /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Definition of the firmware interface service users, for * responses. * @description * Enumeration which is used to indicate the ids of the services * for responses using the external firmware interfaces. * *****************************************************************************/ typedef enum { ICP_QAT_FW_COMN_RESP_SERV_NULL, /**< NULL service id type */ ICP_QAT_FW_COMN_RESP_SERV_CPM_FW, /**< CPM FW Service ID */ ICP_QAT_FW_COMN_RESP_SERV_DELIMITER /**< Delimiter service id type */ } icp_qat_fw_comn_resp_serv_id_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Definition of the request types * @description * Enumeration which is used to indicate the ids of the request * types used in each of the external firmware interfaces * *****************************************************************************/ typedef enum { ICP_QAT_FW_COMN_REQ_NULL = 0, /**< NULL request type */ ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3, /**< CPM FW PKE Request */ ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4, /**< CPM FW Lookaside Request */ ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7, /**< CPM FW DMA Request */ ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9, /**< CPM FW Compression Request */ ICP_QAT_FW_COMN_REQ_DELIMITER /**< End delimiter */ } icp_qat_fw_comn_request_id_t; /* ========================================================================= */ /* QAT FW REQUEST STRUCTURES */ /* ========================================================================= */ /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Common request flags type * * @description * Definition of the common request flags. * *****************************************************************************/ typedef uint8_t icp_qat_fw_comn_flags; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Common request - Service specific flags type * * @description * Definition of the common request service specific flags. * *****************************************************************************/ typedef uint16_t icp_qat_fw_serv_specif_flags; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Common request - Extended service specific flags type * * @description * Definition of the common request extended service specific flags. * *****************************************************************************/ typedef uint8_t icp_qat_fw_ext_serv_specif_flags; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Definition of the common QAT FW request content descriptor field - * points to the content descriptor parameters or itself contains service- * specific data. Also specifies content descriptor parameter size. * Contains reserved fields. * @description * Common section of the request used across all of the services exposed * by the QAT FW. Each of the services inherit these common fields * *****************************************************************************/ typedef union icp_qat_fw_comn_req_hdr_cd_pars_s { /**< LWs 2-5 */ struct { uint64_t content_desc_addr; /**< Address of the content descriptor */ uint16_t content_desc_resrvd1; /**< Content descriptor reserved field */ uint8_t content_desc_params_sz; /**< Size of the content descriptor parameters in quad words. * These * parameters describe the session setup configuration info for * the * slices that this request relies upon i.e. the configuration * word and * cipher key needed by the cipher slice if there is a request * for * cipher processing. */ uint8_t content_desc_hdr_resrvd2; /**< Content descriptor reserved field */ uint32_t content_desc_resrvd3; /**< Content descriptor reserved field */ } s; struct { uint32_t serv_specif_fields[ICP_QAT_FW_NUM_LONGWORDS_4]; } s1; } icp_qat_fw_comn_req_hdr_cd_pars_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Definition of the common QAT FW request middle block. * @description * Common section of the request used across all of the services exposed * by the QAT FW. Each of the services inherit these common fields * *****************************************************************************/ typedef struct icp_qat_fw_comn_req_mid_s { /**< LWs 6-13 */ uint64_t opaque_data; /**< Opaque data passed unmodified from the request to response messages * by * firmware (fw) */ uint64_t src_data_addr; /**< Generic definition of the source data supplied to the QAT AE. The * common flags are used to further describe the attributes of this * field */ uint64_t dest_data_addr; /**< Generic definition of the destination data supplied to the QAT AE. * The * common flags are used to further describe the attributes of this * field */ uint32_t src_length; /** < Length of source flat buffer incase src buffer * type is flat */ uint32_t dst_length; /** < Length of source flat buffer incase dst buffer * type is flat */ } icp_qat_fw_comn_req_mid_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Definition of the common QAT FW request content descriptor control * block. * * @description * Service specific section of the request used across all of the services * exposed by the QAT FW. Each of the services populates this block * uniquely. Refer to the service-specific header structures e.g. * 'icp_qat_fw_cipher_hdr_s' (for Cipher) etc. * *****************************************************************************/ typedef struct icp_qat_fw_comn_req_cd_ctrl_s { /**< LWs 27-31 */ uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5]; } icp_qat_fw_comn_req_cd_ctrl_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Definition of the common QAT FW request header. * @description * Common section of the request used across all of the services exposed * by the QAT FW. Each of the services inherit these common fields. The * reserved field of 7 bits and the service command Id field are all * service-specific fields, along with the service specific flags. * *****************************************************************************/ typedef struct icp_qat_fw_comn_req_hdr_s { /**< LW0 */ uint8_t resrvd1; /**< reserved field */ uint8_t service_cmd_id; /**< Service Command Id - this field is service-specific * Please use service-specific command Id here e.g.Crypto Command Id * or Compression Command Id etc. */ uint8_t service_type; /**< Service type */ uint8_t hdr_flags; /**< This represents a flags field for the Service Request. * The most significant bit is the 'valid' flag and the only * one used. All remaining bit positions are unused and * are therefore reserved and need to be set to 0. */ /**< LW1 */ icp_qat_fw_serv_specif_flags serv_specif_flags; /**< Common Request service-specific flags * e.g. Symmetric Crypto Command Flags */ icp_qat_fw_comn_flags comn_req_flags; /**< Common Request Flags consisting of * - 6 reserved bits, * - 1 Content Descriptor field type bit and * - 1 Source/destination pointer type bit */ icp_qat_fw_ext_serv_specif_flags extended_serv_specif_flags; /**< An extension of serv_specif_flags */ } icp_qat_fw_comn_req_hdr_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Definition of the common QAT FW request parameter field. * * @description * Service specific section of the request used across all of the services * exposed by the QAT FW. Each of the services populates this block * uniquely. Refer to service-specific header structures e.g. * 'icp_qat_fw_comn_req_cipher_rqpars_s' (for Cipher) etc. * *****************************************************************************/ typedef struct icp_qat_fw_comn_req_rqpars_s { /**< LWs 14-26 */ uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13]; } icp_qat_fw_comn_req_rqpars_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Definition of the common request structure with service specific * fields * @description * This is a definition of the full qat request structure used by all * services. Each service is free to use the service fields in its own * way. This struct is useful as a message passing argument before the * service contained within the request is determined. * *****************************************************************************/ typedef struct icp_qat_fw_comn_req_s { /**< LWs 0-1 */ icp_qat_fw_comn_req_hdr_t comn_hdr; /**< Common request header */ /**< LWs 2-5 */ icp_qat_fw_comn_req_hdr_cd_pars_t cd_pars; /**< Common Request content descriptor field which points either to a * content descriptor * parameter block or contains the service-specific data itself. */ /**< LWs 6-13 */ icp_qat_fw_comn_req_mid_t comn_mid; /**< Common request middle section */ /**< LWs 14-26 */ icp_qat_fw_comn_req_rqpars_t serv_specif_rqpars; /**< Common request service-specific parameter field */ /**< LWs 27-31 */ icp_qat_fw_comn_req_cd_ctrl_t cd_ctrl; /**< Common request content descriptor control block - * this field is service-specific */ } icp_qat_fw_comn_req_t; /* ========================================================================= */ /* QAT FW RESPONSE STRUCTURES */ /* ========================================================================= */ /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Error code field * * @description * Overloaded field with 8 bit common error field or two * 8 bit compression error fields for compression and translator slices * *****************************************************************************/ typedef union icp_qat_fw_comn_error_s { struct { uint8_t resrvd; /**< 8 bit reserved field */ uint8_t comn_err_code; /**< 8 bit common error code */ } s; /**< Structure which is used for non-compression responses */ struct { uint8_t xlat_err_code; /**< 8 bit translator error field */ uint8_t cmp_err_code; /**< 8 bit compression error field */ } s1; /** Structure which is used for compression responses */ } icp_qat_fw_comn_error_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Definition of the common QAT FW response header. * @description * This section of the response is common across all of the services * that generate a firmware interface response * *****************************************************************************/ typedef struct icp_qat_fw_comn_resp_hdr_s { /**< LW0 */ uint8_t resrvd1; /**< Reserved field - this field is service-specific - * Note: The Response Destination Id has been removed * from first QWord */ uint8_t service_id; /**< Service Id returned by service block */ uint8_t response_type; /**< Response type - copied from the request to * the response message */ uint8_t hdr_flags; /**< This represents a flags field for the Response. * Bit<7> = 'valid' flag * Bit<6> = 'CNV' flag indicating that CNV was executed * on the current request * Bit<5> = 'CNVNR' flag indicating that a recovery happened * on the current request following a CNV error * All remaining bits are unused and are therefore reserved. * They must to be set to 0. */ /**< LW 1 */ icp_qat_fw_comn_error_t comn_error; /**< This field is overloaded to allow for one 8 bit common error field * or two 8 bit error fields from compression and translator */ uint8_t comn_status; /**< Status field which specifies which slice(s) report an error */ uint8_t cmd_id; /**< Command Id - passed from the request to the response message */ } icp_qat_fw_comn_resp_hdr_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Definition of the common response structure with service specific * fields * @description * This is a definition of the full qat response structure used by all * services. * *****************************************************************************/ typedef struct icp_qat_fw_comn_resp_s { /**< LWs 0-1 */ icp_qat_fw_comn_resp_hdr_t comn_hdr; /**< Common header fields */ /**< LWs 2-3 */ uint64_t opaque_data; /**< Opaque data passed from the request to the response message */ /**< LWs 4-7 */ uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; /**< Reserved */ } icp_qat_fw_comn_resp_t; /* ========================================================================= */ /* MACRO DEFINITIONS */ /* ========================================================================= */ /* Common QAT FW request header - structure of LW0 * + ===== + ------- + ----------- + ----------- + ----------- + -------- + * | Bit | 31/30 | 29 - 24 | 21 - 16 | 15 - 8 | 7 - 0 | * + ===== + ------- + ----------- + ----------- + ----------- + -------- + * | Flags | V/Gen | Reserved | Serv Type | Serv Cmd Id | Rsv | * + ===== + ------- + ----------- + ----------- + ----------- + -------- + */ /**< @ingroup icp_qat_fw_comn * Definition of the setting of the header's valid flag */ #define ICP_QAT_FW_COMN_REQ_FLAG_SET 1 /**< @ingroup icp_qat_fw_comn * Definition of the setting of the header's valid flag */ #define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0 /**< @ingroup icp_qat_fw_comn * Macros defining the bit position and mask of the 'valid' flag, within the * hdr_flags field of LW0 (service request and response) */ #define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7 #define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1 /**< @ingroup icp_qat_fw_comn * Macros defining the bit position and mask of the 'generation' flag, within * the hdr_flags field of LW0 (service request and response) */ #define ICP_QAT_FW_COMN_GEN_FLAG_BITPOS 6 #define ICP_QAT_FW_COMN_GEN_FLAG_MASK 0x1 /**< @ingroup icp_qat_fw_comn * The request is targeted for QAT2.0 */ #define ICP_QAT_FW_COMN_GEN_2 1 /**< @ingroup icp_qat_fw_comn * The request is targeted for QAT1.x. QAT2.0 FW will return 'unsupported request' if GEN1 request type is sent to QAT2.0 FW */ #define ICP_QAT_FW_COMN_GEN_1 0 #define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F /* Common QAT FW response header - structure of LW0 * + ===== + --- + --- + ----- + ----- + --------- + ----------- + ----- + * | Bit | 31 | 30 | 29 | 28-24 | 21 - 16 | 15 - 8 | 7-0 | * + ===== + --- + ----+ ----- + ----- + --------- + ----------- + ----- + * | Flags | V | CNV | CNVNR | Rsvd | Serv Type | Serv Cmd Id | Rsvd | * + ===== + --- + --- + ----- + ----- + --------- + ----------- + ----- + */ /**< @ingroup icp_qat_fw_comn * Macros defining the bit position and mask of 'CNV' flag * within the hdr_flags field of LW0 (service response only) */ #define ICP_QAT_FW_COMN_CNV_FLAG_BITPOS 6 #define ICP_QAT_FW_COMN_CNV_FLAG_MASK 0x1 /**< @ingroup icp_qat_fw_comn * Macros defining the bit position and mask of CNVNR flag * within the hdr_flags field of LW0 (service response only) */ #define ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS 5 #define ICP_QAT_FW_COMN_CNVNR_FLAG_MASK 0x1 /**< @ingroup icp_qat_fw_comn * Macros defining the bit position and mask of Stored Blocks flag * within the hdr_flags field of LW0 (service response only) */ #define ICP_QAT_FW_COMN_ST_BLK_FLAG_BITPOS 4 #define ICP_QAT_FW_COMN_ST_BLK_FLAG_MASK 0x1 /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of Service Type Field * * @param icp_qat_fw_comn_req_hdr_t Structure 'icp_qat_fw_comn_req_hdr_t' * to extract the Service Type Field * *****************************************************************************/ #define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \ icp_qat_fw_comn_req_hdr_t.service_type /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for setting of Service Type Field * * @param 'icp_qat_fw_comn_req_hdr_t' structure to set the Service * Type Field * @param val Value of the Service Type Field * *****************************************************************************/ #define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \ icp_qat_fw_comn_req_hdr_t.service_type = val /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of Service Command Id Field * * @param icp_qat_fw_comn_req_hdr_t Structure 'icp_qat_fw_comn_req_hdr_t' * to extract the Service Command Id Field * *****************************************************************************/ #define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \ icp_qat_fw_comn_req_hdr_t.service_cmd_id /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for setting of Service Command Id Field * * @param 'icp_qat_fw_comn_req_hdr_t' structure to set the * Service Command Id Field * @param val Value of the Service Command Id Field * *****************************************************************************/ #define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \ icp_qat_fw_comn_req_hdr_t.service_cmd_id = val /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Extract the valid flag from the request or response's header flags. * * @param hdr_t Request or Response 'hdr_t' structure to extract the valid bit * from the 'hdr_flags' field. * *****************************************************************************/ #define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \ ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Extract the CNVNR flag from the header flags in the response only. * * @param hdr_t Response 'hdr_t' structure to extract the CNVNR bit * from the 'hdr_flags' field. * *****************************************************************************/ #define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdr_flags) \ QAT_FIELD_GET(hdr_flags, \ ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \ ICP_QAT_FW_COMN_CNVNR_FLAG_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Extract the CNV flag from the header flags in the response only. * * @param hdr_t Response 'hdr_t' structure to extract the CNV bit * from the 'hdr_flags' field. * *****************************************************************************/ #define ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdr_flags) \ QAT_FIELD_GET(hdr_flags, \ ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \ ICP_QAT_FW_COMN_CNV_FLAG_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Set the valid bit in the request's header flags. * * @param hdr_t Request or Response 'hdr_t' structure to set the valid bit * @param val Value of the valid bit flag. * *****************************************************************************/ #define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \ ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Common macro to extract the valid flag from the header flags field * within the header structure (request or response). * * @param hdr_t Structure (request or response) to extract the * valid bit from the 'hdr_flags' field. * *****************************************************************************/ #define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \ QAT_FIELD_GET(hdr_flags, \ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ ICP_QAT_FW_COMN_VALID_FLAG_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Extract the Stored Block flag from the header flags in the * response only. * * @param hdr_flags Response 'hdr' structure to extract the * Stored Block bit from the 'hdr_flags' field. * *****************************************************************************/ #define ICP_QAT_FW_COMN_HDR_ST_BLK_FLAG_GET(hdr_flags) \ QAT_FIELD_GET(hdr_flags, \ ICP_QAT_FW_COMN_ST_BLK_FLAG_BITPOS, \ ICP_QAT_FW_COMN_ST_BLK_FLAG_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Set the Stored Block bit in the response's header flags. * * @param hdr_t Response 'hdr_t' structure to set the ST_BLK bit * @param val Value of the ST_BLK bit flag. * *****************************************************************************/ #define ICP_QAT_FW_COMN_HDR_ST_BLK_FLAG_SET(hdr_t, val) \ QAT_FIELD_SET((hdr_t.hdr_flags), \ (val), \ ICP_QAT_FW_COMN_ST_BLK_FLAG_BITPOS, \ ICP_QAT_FW_COMN_ST_BLK_FLAG_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Set the generation bit in the request's header flags. * * @param hdr_t Request or Response 'hdr_t' structure to set the gen bit * @param val Value of the generation bit flag. * *****************************************************************************/ #define ICP_QAT_FW_COMN_HDR_GENERATION_FLAG_SET(hdr_t, val) \ ICP_QAT_FW_COMN_GENERATION_FLAG_SET(hdr_t, val) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Common macro to set the generation bit in the common header * * @param hdr_t Structure (request or response) containing the header * flags field, to allow the generation bit to be set. * @param val Value of the generation bit flag. * *****************************************************************************/ #define ICP_QAT_FW_COMN_GENERATION_FLAG_SET(hdr_t, val) \ QAT_FIELD_SET((hdr_t.hdr_flags), \ (val), \ ICP_QAT_FW_COMN_GEN_FLAG_BITPOS, \ ICP_QAT_FW_COMN_GEN_FLAG_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Common macro to extract the generation flag from the header flags field * within the header structure (request or response). * * @param hdr_t Structure (request or response) to extract the * generation bit from the 'hdr_flags' field. * *****************************************************************************/ #define ICP_QAT_FW_COMN_HDR_GENERATION_FLAG_GET(hdr_flags) \ QAT_FIELD_GET(hdr_flags, \ ICP_QAT_FW_COMN_GEN_FLAG_BITPOS, \ ICP_QAT_FW_COMN_GEN_FLAG_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Common macro to extract the remaining reserved flags from the header flags field within the header structure (request or response). * * @param hdr_t Structure (request or response) to extract the * remaining bits from the 'hdr_flags' field (excluding the * valid flag). * *****************************************************************************/ #define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \ (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Common macro to set the valid bit in the header flags field within * the header structure (request or response). * * @param hdr_t Structure (request or response) containing the header * flags field, to allow the valid bit to be set. * @param val Value of the valid bit flag. * *****************************************************************************/ #define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \ QAT_FIELD_SET((hdr_t.hdr_flags), \ (val), \ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ ICP_QAT_FW_COMN_VALID_FLAG_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro that must be used when building the common header flags. * Note that all bits reserved field bits 0-6 (LW0) need to be forced to 0. * * @param ptr Value of the valid flag *****************************************************************************/ #define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \ (((valid)&ICP_QAT_FW_COMN_VALID_FLAG_MASK) \ << ICP_QAT_FW_COMN_VALID_FLAG_BITPOS) /* * < @ingroup icp_qat_fw_comn * Common Request Flags Definition * The bit offsets below are within the flags field. These are NOT relative to * the memory word. Unused fields e.g. reserved bits, must be zeroed. * * + ===== + ------ + --- + --- + --- + --- + --- + --- + --- + --- + * | Bits [15:8] | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | * + ===== + ------ + --- + --- + --- + --- + --- + --- + --- + --- + * | Flags[15:8] | Rsv | Rsv | Rsv | Rsv | Rsv | Rsv | Rsv | Rsv | * + ===== + ------ + --- + --- + --- + --- + --- + --- + --- + --- + * | Bits [7:0] | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | * + ===== + ------ + --- + --- + --- + --- + --- + --- + --- + --- + * | Flags [7:0] | Rsv | Rsv | Rsv | Rsv | Rsv | BnP | Cdt | Ptr | * + ===== + ------ + --- + --- + --- + --- + --- + --- + --- + --- + */ #define QAT_COMN_PTR_TYPE_BITPOS 0 /**< @ingroup icp_qat_fw_comn * Common Request Flags - Starting bit position indicating * Src&Dst Buffer Pointer type */ #define QAT_COMN_PTR_TYPE_MASK 0x1 /**< @ingroup icp_qat_fw_comn * Common Request Flags - One bit mask used to determine * Src&Dst Buffer Pointer type */ #define QAT_COMN_CD_FLD_TYPE_BITPOS 1 /**< @ingroup icp_qat_fw_comn * Common Request Flags - Starting bit position indicating * CD Field type */ #define QAT_COMN_CD_FLD_TYPE_MASK 0x1 /**< @ingroup icp_qat_fw_comn * Common Request Flags - One bit mask used to determine * CD Field type */ #define QAT_COMN_BNP_ENABLED_BITPOS 2 /**< @ingroup icp_qat_fw_comn * Common Request Flags - Starting bit position indicating * the source buffer contains batch of requests. if this * bit is set, source buffer is type of Batch And Pack OpData List * and the Ptr Type Bit only applies to Destination buffer. */ #define QAT_COMN_BNP_ENABLED_MASK 0x1 /**< @ingroup icp_qat_fw_comn * Batch And Pack Enabled Flag Mask - One bit mask used to determine * the source buffer is in Batch and Pack OpData Link List Mode. */ /* ========================================================================= */ /* Pointer Type Flag definitions */ /* ========================================================================= */ #define QAT_COMN_PTR_TYPE_FLAT 0x0 /**< @ingroup icp_qat_fw_comn * Constant value indicating Src&Dst Buffer Pointer type is flat * If Batch and Pack mode is enabled, only applies to Destination buffer.*/ #define QAT_COMN_PTR_TYPE_SGL 0x1 /**< @ingroup icp_qat_fw_comn * Constant value indicating Src&Dst Buffer Pointer type is SGL type * If Batch and Pack mode is enabled, only applies to Destination buffer.*/ #define QAT_COMN_PTR_TYPE_BATCH 0x2 /**< @ingroup icp_qat_fw_comn * Constant value indicating Src is a batch request * and Dst Buffer Pointer type is SGL type */ /* ========================================================================= */ /* CD Field Flag definitions */ /* ========================================================================= */ #define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0 /**< @ingroup icp_qat_fw_comn * Constant value indicating CD Field contains 64-bit address */ #define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1 /**< @ingroup icp_qat_fw_comn * Constant value indicating CD Field contains 16 bytes of setup data */ /* ========================================================================= */ /* Batch And Pack Enable/Disable Definitions */ /* ========================================================================= */ #define QAT_COMN_BNP_ENABLED 0x1 /**< @ingroup icp_qat_fw_comn * Constant value indicating Source buffer will point to Batch And Pack OpData * List */ #define QAT_COMN_BNP_DISABLED 0x0 /**< @ingroup icp_qat_fw_comn * Constant value indicating Source buffer will point to Batch And Pack OpData * List */ /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro that must be used when building the common request flags (for all * requests but comp BnP). * Note that all bits reserved field bits 2-15 (LW1) need to be forced to 0. * * @param ptr Value of the pointer type flag * @param cdt Value of the cd field type flag *****************************************************************************/ #define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \ ((((cdt)&QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) | \ (((ptr)&QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS)) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro that must be used when building the common request flags for comp * BnP service. * Note that all bits reserved field bits 3-15 (LW1) need to be forced to 0. * * @param ptr Value of the pointer type flag * @param cdt Value of the cd field type flag * @param bnp Value of the bnp enabled flag *****************************************************************************/ #define ICP_QAT_FW_COMN_FLAGS_BUILD_BNP(cdt, ptr, bnp) \ ((((cdt)&QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) | \ (((ptr)&QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS) | \ (((bnp)&QAT_COMN_BNP_ENABLED_MASK) << QAT_COMN_BNP_ENABLED_BITPOS)) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of the pointer type bit from the common flags * * @param flags Flags to extract the pointer type bit from * *****************************************************************************/ #define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \ QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of the cd field type bit from the common flags * * @param flags Flags to extract the cd field type type bit from * *****************************************************************************/ #define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \ QAT_FIELD_GET(flags, \ QAT_COMN_CD_FLD_TYPE_BITPOS, \ QAT_COMN_CD_FLD_TYPE_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of the bnp field type bit from the common flags * * @param flags Flags to extract the bnp field type type bit from * *****************************************************************************/ #define ICP_QAT_FW_COMN_BNP_ENABLED_GET(flags) \ QAT_FIELD_GET(flags, \ QAT_COMN_BNP_ENABLED_BITPOS, \ QAT_COMN_BNP_ENABLED_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for setting the pointer type bit in the common flags * * @param flags Flags in which Pointer Type bit will be set * @param val Value of the bit to be set in flags * *****************************************************************************/ #define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \ QAT_FIELD_SET(flags, \ val, \ QAT_COMN_PTR_TYPE_BITPOS, \ QAT_COMN_PTR_TYPE_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for setting the cd field type bit in the common flags * * @param flags Flags in which Cd Field Type bit will be set * @param val Value of the bit to be set in flags * *****************************************************************************/ #define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \ QAT_FIELD_SET(flags, \ val, \ QAT_COMN_CD_FLD_TYPE_BITPOS, \ QAT_COMN_CD_FLD_TYPE_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for setting the bnp field type bit in the common flags * * @param flags Flags in which Bnp Field Type bit will be set * @param val Value of the bit to be set in flags * *****************************************************************************/ #define ICP_QAT_FW_COMN_BNP_ENABLE_SET(flags, val) \ QAT_FIELD_SET(flags, \ val, \ QAT_COMN_BNP_ENABLED_BITPOS, \ QAT_COMN_BNP_ENABLED_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macros using the bit position and mask to set/extract the next * and current id nibbles within the next_curr_id field of the * content descriptor header block. Note that these are defined * in the common header file, as they are used by compression, cipher * and authentication. * * @param cd_ctrl_hdr_t Content descriptor control block header pointer. * @param val Value of the field being set. * *****************************************************************************/ #define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4 #define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0 #define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0 #define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F #define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \ ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) >> \ (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) #define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ ((cd_ctrl_hdr_t)->next_curr_id) = \ ((((cd_ctrl_hdr_t)->next_curr_id) & \ ICP_QAT_FW_COMN_CURR_ID_MASK) | \ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) & \ ICP_QAT_FW_COMN_NEXT_ID_MASK)) #define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \ (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK) #define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \ ((cd_ctrl_hdr_t)->next_curr_id) = \ ((((cd_ctrl_hdr_t)->next_curr_id) & \ ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ ((val)&ICP_QAT_FW_COMN_CURR_ID_MASK)) /* * < @ingroup icp_qat_fw_comn * Common Status Field Definition The bit offsets below are within the COMMON * RESPONSE status field, assumed to be 8 bits wide. In the case of the PKE * response (which follows the CPM 1.5 message format), the status field is 16 * bits wide. * The status flags are contained within the most significant byte and align * with the diagram below. Please therefore refer to the service-specific PKE * header file for the appropriate macro definition to extract the PKE status * flag from the PKE response, which assumes that a word is passed to the * macro. * + ===== + ------ + --- + --- + ---- + ---- + -------- + ---- + ---------- + * | Bit | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | * + ===== + ------ + --- + --- + ---- + ---- + -------- + ---- + ---------- + * | Flags | Crypto | Pke | Cmp | Xlat | EOLB | UnSupReq | Rsvd | XltWaApply | * + ===== + ------ + --- + --- + ---- + ---- + -------- + ---- + ---------- + * Note: * For the service specific status bit definitions refer to service header files * Eg. Crypto Status bit refers to Symmetric Crypto, Key Generation, and NRBG * Requests' Status. Unused bits e.g. reserved bits need to have been forced to * 0. */ #define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7 /**< @ingroup icp_qat_fw_comn * Starting bit position indicating Response for Crypto service Flag */ #define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1 /**< @ingroup icp_qat_fw_comn * One bit mask used to determine Crypto status mask */ #define QAT_COMN_RESP_PKE_STATUS_BITPOS 6 /**< @ingroup icp_qat_fw_comn * Starting bit position indicating Response for PKE service Flag */ #define QAT_COMN_RESP_PKE_STATUS_MASK 0x1 /**< @ingroup icp_qat_fw_comn * One bit mask used to determine PKE status mask */ #define QAT_COMN_RESP_CMP_STATUS_BITPOS 5 /**< @ingroup icp_qat_fw_comn * Starting bit position indicating Response for Compression service Flag */ #define QAT_COMN_RESP_CMP_STATUS_MASK 0x1 /**< @ingroup icp_qat_fw_comn * One bit mask used to determine Compression status mask */ #define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4 /**< @ingroup icp_qat_fw_comn * Starting bit position indicating Response for Xlat service Flag */ #define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1 /**< @ingroup icp_qat_fw_comn * One bit mask used to determine Translator status mask */ #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3 /**< @ingroup icp_qat_fw_comn * Starting bit position indicating the last block in a deflate stream for the compression service Flag */ #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1 /**< @ingroup icp_qat_fw_comn * One bit mask used to determine the last block in a deflate stream status mask */ #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2 /**< @ingroup icp_qat_fw_comn * Starting bit position indicating when an unsupported service request Flag */ #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1 /**< @ingroup icp_qat_fw_comn * One bit mask used to determine the unsupported service request status mask */ #define QAT_COMN_RESP_XLT_INV_APPLIED_BITPOS 0 /**< @ingroup icp_qat_fw_comn * Bit position indicating that firmware detected an invalid translation during * dynamic compression and took measures to overcome this * */ #define QAT_COMN_RESP_XLT_INV_APPLIED_MASK 0x1 /**< @ingroup icp_qat_fw_comn * One bit mask */ /** ****************************************************************************** * @description * Macro that must be used when building the status * for the common response * * @param crypto Value of the Crypto Service status flag * @param comp Value of the Compression Service Status flag * @param xlat Value of the Xlator Status flag * @param eolb Value of the Compression End of Last Block Status flag * @param unsupp Value of the Unsupported Request flag * @param xlt_inv Value of the Invalid Translation flag *****************************************************************************/ #define ICP_QAT_FW_COMN_RESP_STATUS_BUILD( \ crypto, pke, comp, xlat, eolb, unsupp, xlt_inv) \ ((((crypto)&QAT_COMN_RESP_CRYPTO_STATUS_MASK) \ << QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \ (((pke)&QAT_COMN_RESP_PKE_STATUS_MASK) \ << QAT_COMN_RESP_PKE_STATUS_BITPOS) | \ (((xlt_inv)&QAT_COMN_RESP_XLT_INV_APPLIED_MASK) \ << QAT_COMN_RESP_XLT_INV_APPLIED_BITPOS) | \ (((comp)&QAT_COMN_RESP_CMP_STATUS_MASK) \ << QAT_COMN_RESP_CMP_STATUS_BITPOS) | \ (((xlat)&QAT_COMN_RESP_XLAT_STATUS_MASK) \ << QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \ (((eolb)&QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) \ << QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS) | \ (((unsupp)&QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS) \ << QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK)) /* ========================================================================= */ /* GETTERS */ /* ========================================================================= */ /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of the Crypto bit from the status * * @param status * Status to extract the status bit from * *****************************************************************************/ #define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \ QAT_FIELD_GET(status, \ QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \ QAT_COMN_RESP_CRYPTO_STATUS_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of the PKE bit from the status * * @param status * Status to extract the status bit from * *****************************************************************************/ #define ICP_QAT_FW_COMN_RESP_PKE_STAT_GET(status) \ QAT_FIELD_GET(status, \ QAT_COMN_RESP_PKE_STATUS_BITPOS, \ QAT_COMN_RESP_PKE_STATUS_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of the Compression bit from the status * * @param status * Status to extract the status bit from * *****************************************************************************/ #define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \ QAT_FIELD_GET(status, \ QAT_COMN_RESP_CMP_STATUS_BITPOS, \ QAT_COMN_RESP_CMP_STATUS_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of the Translator bit from the status * * @param status * Status to extract the status bit from * *****************************************************************************/ #define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \ QAT_FIELD_GET(status, \ QAT_COMN_RESP_XLAT_STATUS_BITPOS, \ QAT_COMN_RESP_XLAT_STATUS_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of the Translation Invalid bit * from the status * * @param status * Status to extract the status bit from * *****************************************************************************/ #define ICP_QAT_FW_COMN_RESP_XLT_INV_APPLIED_GET(status) \ QAT_FIELD_GET(status, \ QAT_COMN_RESP_XLT_INV_APPLIED_BITPOS, \ QAT_COMN_RESP_XLT_INV_APPLIED_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of the end of compression block bit from the * status * * @param status * Status to extract the status bit from * *****************************************************************************/ #define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \ QAT_FIELD_GET(status, \ QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \ QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of the Unsupported request from the status * * @param status * Status to extract the status bit from * *****************************************************************************/ #define ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(status) \ QAT_FIELD_GET(status, \ QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \ QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK) /* ========================================================================= */ /* Status Flag definitions */ /* ========================================================================= */ #define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0 /**< @ingroup icp_qat_fw_comn * Definition of successful processing of a request */ #define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1 /**< @ingroup icp_qat_fw_comn * Definition of erroneous processing of a request */ #define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0 /**< @ingroup icp_qat_fw_comn * Final Deflate block of a compression request not completed */ #define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1 /**< @ingroup icp_qat_fw_comn * Final Deflate block of a compression request completed */ #define ERR_CODE_NO_ERROR 0 /**< Error Code constant value for no error */ #define ERR_CODE_INVALID_BLOCK_TYPE -1 /* Invalid block type (type == 3)*/ #define ERR_CODE_NO_MATCH_ONES_COMP -2 /* Stored block length does not match one's complement */ #define ERR_CODE_TOO_MANY_LEN_OR_DIS -3 /* Too many length or distance codes */ #define ERR_CODE_INCOMPLETE_LEN -4 /* Code lengths codes incomplete */ #define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5 /* Repeat lengths with no first length */ #define ERR_CODE_RPT_GT_SPEC_LEN -6 /* Repeat more than specified lengths */ #define ERR_CODE_INV_LIT_LEN_CODE_LEN -7 /* Invalid lit/len code lengths */ #define ERR_CODE_INV_DIS_CODE_LEN -8 /* Invalid distance code lengths */ #define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9 /* Invalid lit/len or distance code in fixed/dynamic block */ #define ERR_CODE_DIS_TOO_FAR_BACK -10 /* Distance too far back in fixed or dynamic block */ /* Common Error code definitions */ #define ERR_CODE_OVERFLOW_ERROR -11 /**< Error Code constant value for overflow error */ #define ERR_CODE_SOFT_ERROR -12 /**< Error Code constant value for soft error */ #define ERR_CODE_FATAL_ERROR -13 /**< Error Code constant value for hard/fatal error */ #define ERR_CODE_COMP_OUTPUT_CORRUPTION -14 /**< Error Code constant for compression output corruption */ #define ERR_CODE_HW_INCOMPLETE_FILE -15 /**< Error Code constant value for incomplete file hardware error */ #define ERR_CODE_SSM_ERROR -16 /**< Error Code constant value for error detected by SSM e.g. slice hang */ #define ERR_CODE_ENDPOINT_ERROR -17 /**< Error Code constant value for error detected by PCIe Endpoint, e.g. push * data error */ #define ERR_CODE_CNV_ERROR -18 /**< Error Code constant value for cnv failure */ #define ERR_CODE_EMPTY_DYM_BLOCK -19 /**< Error Code constant value for submission of empty dynamic stored block to * slice */ -#define ERR_CODE_EXCEED_MAX_REQ_TIME -24 -/**< Error Code constant for exceeding max request time */ - -#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_HANDLE -20 -/**< Error Code constant for invalid handle in kpt crypto service */ - -#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_HMAC_FAILED -21 -/**< Error Code constant for failed hmac in kpt crypto service */ - -#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_WRAPPING_ALGO -22 -/**< Error Code constant for invalid wrapping algo in kpt crypto service */ - -#define ERR_CODE_KPT_DRNG_SEED_NOT_LOAD -23 -/**< Error Code constant for no drng seed is not loaded in kpt ecdsa signrs -/service */ +#define ERR_CODE_REGION_OUT_OF_BOUNDS -21 +/**< Error returned when decompression ends before the specified partial + * decompression region was produced */ #define ERR_CODE_MISC_ERROR -50 /**< Error Code constant for error detected but the source * of error is not recognized */ /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Slice types for building of the processing chain within the content * descriptor * * @description * Enumeration used to indicate the ids of the slice types through which * data will pass. * * A logical slice is not a hardware slice but is a software FSM * performing the actions of a slice * *****************************************************************************/ typedef enum { ICP_QAT_FW_SLICE_NULL = 0, /**< NULL slice type */ ICP_QAT_FW_SLICE_CIPHER = 1, /**< CIPHER slice type */ ICP_QAT_FW_SLICE_AUTH = 2, /**< AUTH slice type */ ICP_QAT_FW_SLICE_DRAM_RD = 3, /**< DRAM_RD Logical slice type */ ICP_QAT_FW_SLICE_DRAM_WR = 4, /**< DRAM_WR Logical slice type */ ICP_QAT_FW_SLICE_COMP = 5, /**< Compression slice type */ ICP_QAT_FW_SLICE_XLAT = 6, /**< Translator slice type */ ICP_QAT_FW_SLICE_DELIMITER /**< End delimiter */ } icp_qat_fw_slice_t; #endif /* _ICP_QAT_FW_H_ */ diff --git a/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_comp.h b/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_comp.h index 5edf7022ee1d..fe1b7ad55de8 100644 --- a/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_comp.h +++ b/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_comp.h @@ -1,1146 +1,1146 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /** ***************************************************************************** * @file icp_qat_fw_comp.h * @defgroup icp_qat_fw_comp ICP QAT FW Compression Service * Interface Definitions * @ingroup icp_qat_fw * @description * This file documents structs used to provide the interface to the * Compression QAT FW service * *****************************************************************************/ #ifndef _ICP_QAT_FW_COMP_H_ #define _ICP_QAT_FW_COMP_H_ /* ****************************************************************************** * Include local header files ****************************************************************************** */ #include "icp_qat_fw.h" /** ***************************************************************************** * @ingroup icp_qat_fw_comp * Definition of the Compression command types * @description * Enumeration which is used to indicate the ids of functions * that are exposed by the Compression QAT FW service * *****************************************************************************/ typedef enum { ICP_QAT_FW_COMP_CMD_STATIC = 0, /*!< Static Compress Request */ ICP_QAT_FW_COMP_CMD_DYNAMIC = 1, /*!< Dynamic Compress Request */ ICP_QAT_FW_COMP_CMD_DECOMPRESS = 2, /*!< Decompress Request */ ICP_QAT_FW_COMP_CMD_DELIMITER /**< Delimiter type */ } icp_qat_fw_comp_cmd_id_t; + /* * REQUEST FLAGS IN COMMON COMPRESSION * In common message it is named as SERVICE SPECIFIC FLAGS. * * + ===== + ------ + ------ + --- + ----- + ----- + ----- + -- + ---- + --- + * | Bit | 15 - 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | * + ===== + ------ + ----- + --- + ----- + ----- + ----- + -- + ---- + --- + * | Flags | Rsvd | Dis. |Resvd| Dis. | Enh. |Auto |Sess| Rsvd | Rsvd| * | | Bits | secure | =0 | Type0 | ASB |Select |Type| = 0 | = 0 | * | | = 0 |RAM use | | Header | |Best | | | | * | | |as intmd| | | | | | | | * | | | buf | | | | | | | | * + ===== + ------ + ----- + --- + ------ + ----- + ----- + -- + ---- + --- + * Note: For QAT 2.0 Disable Secure Ram, DisType0 Header and Enhanced ASB bits * are don't care. i.e., these features are removed from QAT 2.0. */ -/** Flag usage */ + +/**< Flag usage */ #define ICP_QAT_FW_COMP_STATELESS_SESSION 0 /**< @ingroup icp_qat_fw_comp * Flag representing that session is stateless */ #define ICP_QAT_FW_COMP_STATEFUL_SESSION 1 /**< @ingroup icp_qat_fw_comp * Flag representing that session is stateful */ #define ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST 0 /**< @ingroup icp_qat_fw_comp * Flag representing that autoselectbest is NOT used */ #define ICP_QAT_FW_COMP_AUTO_SELECT_BEST 1 /**< @ingroup icp_qat_fw_comp * Flag representing that autoselectbest is used */ #define ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST 0 /**< @ingroup icp_qat_fw_comp * Flag representing that enhanced autoselectbest is NOT used */ #define ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST 1 /**< @ingroup icp_qat_fw_comp * Flag representing that enhanced autoselectbest is used */ #define ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 0 /**< @ingroup icp_qat_fw_comp * Flag representing that enhanced autoselectbest is NOT used */ #define ICP_QAT_FW_COMP_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 1 /**< @ingroup icp_qat_fw_comp * Flag representing that enhanced autoselectbest is used */ #define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF 1 /**< @ingroup icp_qat_fw_comp * Flag representing secure RAM from being used as * an intermediate buffer is DISABLED. */ #define ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF 0 /**< @ingroup icp_qat_fw_comp * Flag representing secure RAM from being used as * an intermediate buffer is ENABLED. */ -/** Flag mask & bit position */ +/**< Flag mask & bit position */ #define ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS 2 /**< @ingroup icp_qat_fw_comp * Starting bit position for the session type */ #define ICP_QAT_FW_COMP_SESSION_TYPE_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask used to determine the session type */ #define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS 3 /**< @ingroup icp_qat_fw_comp * Starting bit position for auto select best */ #define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask for auto select best */ #define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS 4 /**< @ingroup icp_qat_fw_comp * Starting bit position for enhanced auto select best */ #define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask for enhanced auto select best */ #define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS 5 /**< @ingroup icp_qat_fw_comp * Starting bit position for disabling type zero header write back when Enhanced autoselect best is enabled. If set firmware does not return type0 store block header, only copies src to dest. (if best output is Type0) */ #define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask for auto select best */ #define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7 /**< @ingroup icp_qat_fw_comp * Starting bit position for flag used to disable secure ram from - * being used as an intermediate buffer. */ + being used as an intermediate buffer. */ #define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask for disable secure ram for use as an intermediate buffer. */ /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro used for the generation of the command flags for Compression Request. * This should always be used for the generation of the flags. No direct sets or * masks should be performed on the flags data * * @param sesstype Session Type * @param autoselect AutoSelectBest * @enhanced_asb Enhanced AutoSelectBest * @ret_uncomp RetUnCompressed * @secure_ram Secure Ram usage * - *********************************************************************************/ + ******************************************************************************/ #define ICP_QAT_FW_COMP_FLAGS_BUILD( \ sesstype, autoselect, enhanced_asb, ret_uncomp, secure_ram) \ (((sesstype & ICP_QAT_FW_COMP_SESSION_TYPE_MASK) \ << ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) | \ ((autoselect & ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) \ << ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS) | \ ((enhanced_asb & ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK) \ << ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS) | \ ((ret_uncomp & ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK) \ << ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS) | \ ((secure_ram & ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) \ << ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS)) /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro used for the generation of the command flags for Compression Request. * This should always be used for the generation of the flags. No direct sets or * masks should be performed on the flags data * * @param sesstype Session Type * @param autoselect AutoSelectBest * Selects between compressed and uncompressed output. * No distinction made between static and dynamic * compressed data. * *********************************************************************************/ #define ICP_QAT_FW_COMP_20_FLAGS_BUILD(sesstype, autoselect) \ (((sesstype & ICP_QAT_FW_COMP_SESSION_TYPE_MASK) \ << ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) | \ ((autoselect & ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) \ << ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS)) /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the session type bit * * @param flags Flags to extract the session type bit from * - ******************************************************************************/ + *****************************************************************************/ #define ICP_QAT_FW_COMP_SESSION_TYPE_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS, \ ICP_QAT_FW_COMP_SESSION_TYPE_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the autoSelectBest bit * * @param flags Flags to extract the autoSelectBest bit from * - ******************************************************************************/ + *****************************************************************************/ #define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS, \ ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the enhanced asb bit * * @param flags Flags to extract the enhanced asb bit from * - ******************************************************************************/ + *****************************************************************************/ #define ICP_QAT_FW_COMP_EN_ASB_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS, \ ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the RetUncomp bit * * @param flags Flags to extract the Ret Uncomp bit from * - ******************************************************************************/ + *****************************************************************************/ #define ICP_QAT_FW_COMP_RET_UNCOMP_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS, \ ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the Secure Ram usage bit * * @param flags Flags to extract the Secure Ram usage from * - ******************************************************************************/ + *****************************************************************************/ #define ICP_QAT_FW_COMP_SECURE_RAM_USE_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS, \ ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) /** - ****************************************************************************** + ***************************************************************************** * @ingroup icp_qat_fw_comp * Definition of the compression header cd pars block * @description * Definition of the compression processing cd pars block. * The structure is a service-specific implementation of the common * structure. - ******************************************************************************/ + *****************************************************************************/ typedef union icp_qat_fw_comp_req_hdr_cd_pars_s { /**< LWs 2-5 */ struct { uint64_t content_desc_addr; /**< Address of the content descriptor */ uint16_t content_desc_resrvd1; /**< Content descriptor reserved field */ uint8_t content_desc_params_sz; /**< Size of the content descriptor parameters in quad words. - * These - * parameters describe the session setup configuration info for - * the - * slices that this request relies upon i.e. the configuration - * word and - * cipher key needed by the cipher slice if there is a request - * for - * cipher - * processing. */ + * These parameters describe the session setup configuration + * info for the slices that this request relies upon i.e. the + * configuration word and cipher key needed by the cipher slice + * if there is a request for cipher processing. */ uint8_t content_desc_hdr_resrvd2; /**< Content descriptor reserved field */ uint32_t content_desc_resrvd3; /**< Content descriptor reserved field */ } s; struct { uint32_t comp_slice_cfg_word[ICP_QAT_FW_NUM_LONGWORDS_2]; /* Compression Slice Config Word */ uint32_t content_desc_resrvd4; /**< Content descriptor reserved field */ + } sl; } icp_qat_fw_comp_req_hdr_cd_pars_t; /** - ****************************************************************************** + ***************************************************************************** * @ingroup icp_qat_fw_comp * Definition of the compression request parameters block * @description * Definition of the compression processing request parameters block. * The structure below forms part of the Compression + Translation - * Parameters block spanning LWs 14-21, thus differing from the common + * Parameters block spanning LWs 14-23, thus differing from the common * base Parameters block structure. Unused fields must be set to 0. * - ******************************************************************************/ + *****************************************************************************/ typedef struct icp_qat_fw_comp_req_params_s { /**< LW 14 */ uint32_t comp_len; /**< Size of input to process in bytes Note: Only EOP requests can be - * odd - * for decompression. IA must set LSB to zero for odd sized intermediate - * inputs */ + * odd for decompression. IA must set LSB to zero for odd sized + * intermediate inputs */ /**< LW 15 */ uint32_t out_buffer_sz; /**< Size of output buffer in bytes */ /**< LW 16 */ union { struct { /** LW 16 */ uint32_t initial_crc32; /**< CRC for processed bytes (input byte count) */ /** LW 17 */ uint32_t initial_adler; /**< Adler for processed bytes (input byte count) */ } legacy; /** LW 16-17 */ uint64_t crc_data_addr; /**< CRC data structure pointer */ } crc; - /** LW 18 */ + /**< LW 18 */ uint32_t req_par_flags; - /** LW 19 */ + /**< LW 19 */ uint32_t rsrvd; } icp_qat_fw_comp_req_params_t; /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro used for the generation of the request parameter flags. * This should always be used for the generation of the flags. No direct sets or * masks should be performed on the flags data * * @param sop SOP Flag, 0 restore, 1 don't restore * @param eop EOP Flag, 0 restore, 1 don't restore * @param bfinal Set bfinal in this block or not * @param cnv Whether internal CNV check is to be performed * * ICP_QAT_FW_COMP_NO_CNV * * ICP_QAT_FW_COMP_CNV * @param cnvnr Whether internal CNV recovery is to be performed * * ICP_QAT_FW_COMP_NO_CNV_RECOVERY * * ICP_QAT_FW_COMP_CNV_RECOVERY + * @param cnvdfx Whether CNV error injection is to be performed + * * ICP_QAT_FW_COMP_NO_CNV_DFX + * * ICP_QAT_FW_COMP_CNV_DFX * @param crc CRC Mode Flag - 0 legacy, 1 crc data struct - * *****************************************************************************/ #define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD( \ sop, eop, bfinal, cnv, cnvnr, cnvdfx, crc) \ (((sop & ICP_QAT_FW_COMP_SOP_MASK) << ICP_QAT_FW_COMP_SOP_BITPOS) | \ ((eop & ICP_QAT_FW_COMP_EOP_MASK) << ICP_QAT_FW_COMP_EOP_BITPOS) | \ ((bfinal & ICP_QAT_FW_COMP_BFINAL_MASK) \ << ICP_QAT_FW_COMP_BFINAL_BITPOS) | \ ((cnv & ICP_QAT_FW_COMP_CNV_MASK) << ICP_QAT_FW_COMP_CNV_BITPOS) | \ ((cnvnr & ICP_QAT_FW_COMP_CNVNR_MASK) \ << ICP_QAT_FW_COMP_CNVNR_BITPOS) | \ ((cnvdfx & ICP_QAT_FW_COMP_CNV_DFX_MASK) \ << ICP_QAT_FW_COMP_CNV_DFX_BITPOS) | \ ((crc & ICP_QAT_FW_COMP_CRC_MODE_MASK) \ << ICP_QAT_FW_COMP_CRC_MODE_BITPOS)) + +/* + * REQUEST FLAGS IN REQUEST PARAMETERS COMPRESSION + * + * +=====+-----+----- + --- + --- +-----+ --- + ----- + --- + ---- + -- + -- + + * | Bit |31-24| 20 | 19 | 18 | 17 | 16 | 15-7 | 6 | 5-2 | 1 | 0 | + * +=====+-----+----- + --- + ----+-----+ --- + ----- + --- + ---- + -- + -- + + * |Flags|Resvd|xxHash| CRC | CNV |CNVNR| CNV | Resvd |BFin | Resvd|EOP |SOP | + * | |=0 |acc | MODE| DFX | | | =0 | | =0 | | | + * | | | | | | | | | | | | | + * +=====+-----+----- + --- + ----+-----+ --- + ----- + --- + ---- + -- + -- + + */ + + +/** +***************************************************************************** +* @ingroup icp_qat_fw_comp +* Definition of the additional QAT2.0 Compression command types +* @description +* Enumeration which is used to indicate the ids of functions +* that are exposed by the Compression QAT FW service +* +*****************************************************************************/ +typedef enum { + ICP_QAT_FW_COMP_20_CMD_LZ4_COMPRESS = 3, + /*!< LZ4 Compress Request */ + + ICP_QAT_FW_COMP_20_CMD_LZ4_DECOMPRESS = 4, + /*!< LZ4 Decompress Request */ + + ICP_QAT_FW_COMP_20_CMD_LZ4S_COMPRESS = 5, + /*!< LZ4S Compress Request */ + + ICP_QAT_FW_COMP_20_CMD_LZ4S_DECOMPRESS = 6, + /*!< LZ4S Decompress Request */ + + ICP_QAT_FW_COMP_20_CMD_XP10_COMPRESS = 7, + /*!< XP10 Compress Request -- Placeholder */ + + ICP_QAT_FW_COMP_20_CMD_XP10_DECOMPRESS = 8, + /*!< XP10 Decompress Request -- Placeholder */ + + ICP_QAT_FW_COMP_20_CMD_DELIMITER + /**< Delimiter type */ + +} icp_qat_fw_comp_20_cmd_id_t; + + /* * REQUEST FLAGS IN REQUEST PARAMETERS COMPRESSION * - * + ===== + ----- + --- +-----+-------+ --- + ---------+ --- + ---- + --- + - * --- + - * | Bit | 31-20 | 19 | 18 | 17 | 16 | 15 - 7 | 6 | 5-2 | 1 | 0 - * | - * + ===== + ----- + --- +-----+-------+ --- + ---------+ --- | ---- + --- + - * --- + - * | Flags | Resvd | CRC |Resvd| CNVNR | CNV |Resvd Bits|BFin |Resvd | EOP | - * SOP | - * | | =0 | Mode| =0 | | | =0 | | =0 | | | - * | | | | | | | | | | | | - * + ===== + ----- + --- +-----+-------+ --- + ---------+ --- | ---- + --- + - * --- + + * + ===== + ----- + --- +-----+-------+ --- + ---------+ --- + ---- + --- + --- + + * | Bit | 31-20 | 19 | 18 | 17 | 16 | 15 - 7 | 6 | 5-2 | 1 | 0 | + * + ===== + ----- + --- +-----+-------+ --- + ---------+ --- | ---- + --- + --- + + * | Flags | Resvd | CRC | CNV | CNVNR | CNV |Resvd Bits|BFin |Resvd | EOP | SOP | + * | | =0 | Mode| DFX | | | =0 | | =0 | | | + * | | | | | | | | | | | | + * + ===== + ----- + --- +-----+-------+ --- + ---------+ --- | ---- + --- + --- + */ #define ICP_QAT_FW_COMP_NOT_SOP 0 /**< @ingroup icp_qat_fw_comp * Flag representing that a request is NOT Start of Packet */ #define ICP_QAT_FW_COMP_SOP 1 /**< @ingroup icp_qat_fw_comp - * Flag representing that a request IS Start of Packet */ + * * Flag representing that a request IS Start of Packet */ #define ICP_QAT_FW_COMP_NOT_EOP 0 /**< @ingroup icp_qat_fw_comp - * Flag representing that a request is NOT Start of Packet */ + * Flag representing that a request is NOT Start of Packet */ #define ICP_QAT_FW_COMP_EOP 1 /**< @ingroup icp_qat_fw_comp - * Flag representing that a request IS End of Packet */ + * Flag representing that a request IS End of Packet */ #define ICP_QAT_FW_COMP_NOT_BFINAL 0 /**< @ingroup icp_qat_fw_comp * Flag representing to indicate firmware this is not the last block */ #define ICP_QAT_FW_COMP_BFINAL 1 /**< @ingroup icp_qat_fw_comp * Flag representing to indicate firmware this is the last block */ #define ICP_QAT_FW_COMP_NO_CNV 0 /**< @ingroup icp_qat_fw_comp * Flag indicating that NO cnv check is to be performed on the request */ #define ICP_QAT_FW_COMP_CNV 1 /**< @ingroup icp_qat_fw_comp * Flag indicating that a cnv check IS to be performed on the request */ #define ICP_QAT_FW_COMP_NO_CNV_RECOVERY 0 /**< @ingroup icp_qat_fw_comp * Flag indicating that NO cnv recovery is to be performed on the request */ #define ICP_QAT_FW_COMP_CNV_RECOVERY 1 /**< @ingroup icp_qat_fw_comp * Flag indicating that a cnv recovery is to be performed on the request */ #define ICP_QAT_FW_COMP_NO_CNV_DFX 0 /**< @ingroup icp_qat_fw_comp * Flag indicating that NO CNV inject error is to be performed on the request */ #define ICP_QAT_FW_COMP_CNV_DFX 1 /**< @ingroup icp_qat_fw_comp * Flag indicating that CNV inject error is to be performed on the request */ #define ICP_QAT_FW_COMP_CRC_MODE_LEGACY 0 /**< @ingroup icp_qat_fw_comp * Flag representing to use the legacy CRC mode */ #define ICP_QAT_FW_COMP_CRC_MODE_E2E 1 /**< @ingroup icp_qat_fw_comp * Flag representing to use the external CRC data struct */ +#define ICP_QAT_FW_COMP_NO_XXHASH_ACC 0 +/**< @ingroup icp_qat_fw_comp + * * Flag indicating that xxHash will NOT be accumulated across requests */ + +#define ICP_QAT_FW_COMP_XXHASH_ACC 1 +/**< @ingroup icp_qat_fw_comp + * * Flag indicating that xxHash WILL be accumulated across requests */ + +#define ICP_QAT_FW_COMP_PART_DECOMP 1 +/**< @ingroup icp_qat_fw_comp + * * Flag indicating to perform partial de-compressing */ + +#define ICP_QAT_FW_COMP_NO_PART_DECOMP 1 +/**< @ingroup icp_qat_fw_comp + * * Flag indicating to not perform partial de-compressing */ + +#define ICP_QAT_FW_COMP_ZEROPAD 1 +/**< @ingroup icp_qat_fw_comp + * * Flag indicating to perform zero-padding in compression request */ + +#define ICP_QAT_FW_COMP_NO_ZEROPAD 0 +/**< @ingroup icp_qat_fw_comp + * * Flag indicating to not perform zero-padding in compression request */ + #define ICP_QAT_FW_COMP_SOP_BITPOS 0 /**< @ingroup icp_qat_fw_comp * Starting bit position for SOP */ #define ICP_QAT_FW_COMP_SOP_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask used to determine SOP */ #define ICP_QAT_FW_COMP_EOP_BITPOS 1 /**< @ingroup icp_qat_fw_comp * Starting bit position for EOP */ #define ICP_QAT_FW_COMP_EOP_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask used to determine EOP */ #define ICP_QAT_FW_COMP_BFINAL_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask for the bfinal bit */ #define ICP_QAT_FW_COMP_BFINAL_BITPOS 6 /**< @ingroup icp_qat_fw_comp * Starting bit position for the bfinal bit */ #define ICP_QAT_FW_COMP_CNV_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask for the CNV bit */ #define ICP_QAT_FW_COMP_CNV_BITPOS 16 /**< @ingroup icp_qat_fw_comp * Starting bit position for the CNV bit */ -#define ICP_QAT_FW_COMP_CNV_RECOVERY_MASK 0x1 -/**< @ingroup icp_qat_fw_comp - * One bit mask for the CNV Recovery bit */ - -#define ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS 17 -/**< @ingroup icp_qat_fw_comp - * Starting bit position for the CNV Recovery bit */ - #define ICP_QAT_FW_COMP_CNVNR_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask for the CNV Recovery bit */ #define ICP_QAT_FW_COMP_CNVNR_BITPOS 17 /**< @ingroup icp_qat_fw_comp * Starting bit position for the CNV Recovery bit */ #define ICP_QAT_FW_COMP_CNV_DFX_BITPOS 18 /**< @ingroup icp_qat_fw_comp * Starting bit position for the CNV DFX bit */ #define ICP_QAT_FW_COMP_CNV_DFX_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask for the CNV DFX bit */ #define ICP_QAT_FW_COMP_CRC_MODE_BITPOS 19 /**< @ingroup icp_qat_fw_comp * Starting bit position for CRC mode */ #define ICP_QAT_FW_COMP_CRC_MODE_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask used to determine CRC mode */ #define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS 20 /**< @ingroup icp_qat_fw_comp * Starting bit position for xxHash accumulate mode */ #define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask used to determine xxHash accumulate mode */ +#define ICP_QAT_FW_COMP_PART_DECOMP_BITPOS 27 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for the partial de-compress bit */ + +#define ICP_QAT_FW_COMP_PART_DECOMP_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for the partial de-compress mask */ + +#define ICP_QAT_FW_COMP_ZEROPAD_BITPOS 26 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for the partial zero-pad bit */ + +#define ICP_QAT_FW_COMP_ZEROPAD_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for the partial zero-pad mask */ + /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the SOP bit * * @param flags Flags to extract the SOP bit from * - ******************************************************************************/ + *****************************************************************************/ #define ICP_QAT_FW_COMP_SOP_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_SOP_BITPOS, \ ICP_QAT_FW_COMP_SOP_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the EOP bit * * @param flags Flags to extract the EOP bit from * *****************************************************************************/ #define ICP_QAT_FW_COMP_EOP_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_EOP_BITPOS, \ ICP_QAT_FW_COMP_EOP_MASK) - /** + + ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the bfinal bit * * @param flags Flags to extract the bfinal bit from * - ******************************************************************************/ + *****************************************************************************/ #define ICP_QAT_FW_COMP_BFINAL_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_BFINAL_BITPOS, \ ICP_QAT_FW_COMP_BFINAL_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the CNV bit * * @param flags Flag set containing the CNV flag * *****************************************************************************/ #define ICP_QAT_FW_COMP_CNV_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_CNV_BITPOS, \ ICP_QAT_FW_COMP_CNV_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the crc mode bit * * @param flags Flags to extract the crc mode bit from * ******************************************************************************/ #define ICP_QAT_FW_COMP_CRC_MODE_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_CRC_MODE_BITPOS, \ ICP_QAT_FW_COMP_CRC_MODE_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the xxHash accumulate mode bit * * @param flags Flags to extract the xxHash accumulate mode bit from * *****************************************************************************/ #define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS, \ ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for setting of the xxHash accumulate mode bit * * @param flags Flags to set the xxHash accumulate mode bit to * @param val xxHash accumulate mode to set * *****************************************************************************/ #define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_SET(flags, val) \ QAT_FIELD_SET(flags, \ val, \ ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS, \ ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK) +/** + ****************************************************************************** + * @ingroup icp_qat_fw_comp + * + * @description + * Macro for extraction of the partial de-compress on/off bit + * + * @param flags Flags to extract the partial de-compress on/off bit from + * + ******************************************************************************/ +#define ICP_QAT_FW_COMP_PART_DECOMP_GET(flags) \ + QAT_FIELD_GET(flags, \ + ICP_QAT_FW_COMP_PART_DECOMP_BITPOS, \ + ICP_QAT_FW_COMP_PART_DECOMP_MASK) + +/** + ****************************************************************************** + * @ingroup icp_qat_fw_comp + * + * @description + * Macro for setting of the partial de-compress on/off bit + * + * @param flags Flags to set the partial de-compress on/off bit to + * @param val partial de-compress on/off bit + * + *****************************************************************************/ +#define ICP_QAT_FW_COMP_PART_DECOMP_SET(flags, val) \ + QAT_FIELD_SET(flags, \ + val, \ + ICP_QAT_FW_COMP_PART_DECOMP_BITPOS, \ + ICP_QAT_FW_COMP_PART_DECOMP_MASK) + +/** + ****************************************************************************** + * @ingroup icp_qat_fw_comp + * + * @description + * Macro for extraction of the zero padding on/off bit + * + * @param flags Flags to extract the zero padding on/off bit from + * + ******************************************************************************/ +#define ICP_QAT_FW_COMP_ZEROPAD_GET(flags) \ + QAT_FIELD_GET(flags, \ + ICP_QAT_FW_COMP_ZEROPAD_BITPOS, \ + ICP_QAT_FW_COMP_ZEROPAD_MASK) + +/** + ****************************************************************************** + * @ingroup icp_qat_fw_comp + * + * @description + * Macro for setting of the zero-padding on/off bit + * + * @param flags Flags to set the zero-padding on/off bit to + * @param val zero-padding on/off bit + * + *****************************************************************************/ +#define ICP_QAT_FW_COMP_ZEROPAD_SET(flags, val) \ + QAT_FIELD_SET(flags, \ + val, \ + ICP_QAT_FW_COMP_ZEROPAD_BITPOS, \ + ICP_QAT_FW_COMP_ZEROPAD_MASK) + /** ****************************************************************************** * @ingroup icp_qat_fw_comp * Definition of the translator request parameters block * @description * Definition of the translator processing request parameters block * The structure below forms part of the Compression + Translation - * Parameters block spanning LWs 20-21, thus differing from the common + * Parameters block spanning LWs 14-23, thus differing from the common * base Parameters block structure. Unused fields must be set to 0. * - ******************************************************************************/ + *****************************************************************************/ typedef struct icp_qat_fw_xlt_req_params_s { /**< LWs 20-21 */ uint64_t inter_buff_ptr; /**< This field specifies the physical address of an intermediate - * buffer SGL array. The array contains a pair of 64-bit - * intermediate buffer pointers to SGL buffer descriptors, one pair - * per CPM. Please refer to the CPM1.6 Firmware Interface HLD - * specification for more details. - * Placeholder for QAT2.0. */ + * buffer SGL array. The array contains a pair of 64-bit + * intermediate buffer pointers to SGL buffer descriptors, one pair + * per CPM. Please refer to the CPM1.6 Firmware Interface HLD + * specification for more details. */ + } icp_qat_fw_xlt_req_params_t; /** - ****************************************************************************** + ***************************************************************************** * @ingroup icp_qat_fw_comp * Compression header of the content descriptor block * @description * Definition of the service-specific compression control block header * structure. The compression parameters are defined per algorithm * and are located in the icp_qat_hw.h file. This compression * cd block spans LWs 24-29, forming part of the compression + translation * cd block, thus differing from the common base content descriptor * structure. * - ******************************************************************************/ + *****************************************************************************/ typedef struct icp_qat_fw_comp_cd_hdr_s { /**< LW 24 */ uint16_t ram_bank_flags; /**< Flags to show which ram banks to access */ uint8_t comp_cfg_offset; /**< Quad word offset from the content descriptor parameters address to - * the - * parameters for the compression processing */ + * the parameters for the compression processing */ uint8_t next_curr_id; /**< This field combines the next and current id (each four bits) - - * the next id is the most significant nibble. - * Next Id: Set to the next slice to pass the compressed data through. - * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through - * anymore slices after compression - * Current Id: Initialised with the compression slice type */ + * the next id is the most significant nibble. + * Next Id: Set to the next slice to pass the compressed data through. + * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through + * anymore slices after compression + * Current Id: Initialised with the compression slice type */ /**< LW 25 */ uint32_t resrvd; /**< LWs 26-27 */ uint64_t comp_state_addr; /**< Pointer to compression state */ /**< LWs 28-29 */ uint64_t ram_banks_addr; /**< Pointer to banks */ } icp_qat_fw_comp_cd_hdr_t; #define COMP_CPR_INITIAL_CRC 0 #define COMP_CPR_INITIAL_ADLER 1 /** - ****************************************************************************** + ***************************************************************************** * @ingroup icp_qat_fw_comp * Translator content descriptor header block * @description * Definition of the structure used to describe the translation processing * to perform on data. The translator parameters are defined per algorithm * and are located in the icp_qat_hw.h file. This translation cd block * spans LWs 30-31, forming part of the compression + translation cd block, * thus differing from the common base content descriptor structure. * - ******************************************************************************/ + *****************************************************************************/ typedef struct icp_qat_fw_xlt_cd_hdr_s { /**< LW 30 */ uint16_t resrvd1; /**< Reserved field and assumed set to 0 */ uint8_t resrvd2; /**< Reserved field and assumed set to 0 */ uint8_t next_curr_id; /**< This field combines the next and current id (each four bits) - - * the next id is the most significant nibble. - * Next Id: Set to the next slice to pass the translated data through. - * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through - * any more slices after compression - * Current Id: Initialised with the translation slice type */ + * the next id is the most significant nibble. + * Next Id: Set to the next slice to pass the translated data through. + * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through + * any more slices after compression + * Current Id: Initialised with the translation slice type */ /**< LW 31 */ uint32_t resrvd3; /**< Reserved and should be set to zero, needed for quadword alignment */ + } icp_qat_fw_xlt_cd_hdr_t; /** - ****************************************************************************** + ***************************************************************************** * @ingroup icp_qat_fw_comp * Definition of the common Compression QAT FW request * @description * This is a definition of the full request structure for * compression and translation. * - ******************************************************************************/ + *****************************************************************************/ typedef struct icp_qat_fw_comp_req_s { /**< LWs 0-1 */ icp_qat_fw_comn_req_hdr_t comn_hdr; /**< Common request header - for Service Command Id, * use service-specific Compression Command Id. * Service Specific Flags - use Compression Command Flags */ /**< LWs 2-5 */ icp_qat_fw_comp_req_hdr_cd_pars_t cd_pars; /**< Compression service-specific content descriptor field which points * either to a content descriptor parameter block or contains the * compression slice config word. */ /**< LWs 6-13 */ icp_qat_fw_comn_req_mid_t comn_mid; /**< Common request middle section */ /**< LWs 14-19 */ icp_qat_fw_comp_req_params_t comp_pars; /**< Compression request Parameters block */ /**< LWs 20-21 */ union { icp_qat_fw_xlt_req_params_t xlt_pars; /**< Translation request Parameters block */ uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2]; /**< Reserved if not used for translation */ + + struct { + uint32_t partial_decompress_length; + /**< LW 20 \n Length of the decompressed data to return + */ + + uint32_t partial_decompress_offset; + /**< LW 21 \n Offset of the decompressed data at which + * to return */ + + } partial_decompress; + } u1; /**< LWs 22-23 */ union { uint32_t resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2]; /**< Reserved - not used if Batch and Pack is disabled.*/ - uint64_t bnp_res_table_addr; - /**< A generic pointer to the unbounded list of - * icp_qat_fw_resp_comp_pars_t members. This pointer is only - * used when the Batch and Pack is enabled. */ + uint64_t resrvd3; + /**< Reserved - not used if Batch and Pack is disabled.*/ } u3; /**< LWs 24-29 */ icp_qat_fw_comp_cd_hdr_t comp_cd_ctrl; /**< Compression request content descriptor control * block header */ /**< LWs 30-31 */ union { icp_qat_fw_xlt_cd_hdr_t xlt_cd_ctrl; /**< Translation request content descriptor * control block header */ uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_2]; /**< Reserved if not used for translation */ + } u2; } icp_qat_fw_comp_req_t; /** - ****************************************************************************** + ***************************************************************************** * @ingroup icp_qat_fw_comp - * Definition of the compression QAT FW response descriptor - * parameters + * Definition of the compression QAT FW response descriptor parameters * @description * This part of the response is specific to the compression response. * - ******************************************************************************/ + *****************************************************************************/ typedef struct icp_qat_fw_resp_comp_pars_s { /**< LW 4 */ uint32_t input_byte_counter; /**< Input byte counter */ /**< LW 5 */ uint32_t output_byte_counter; /**< Output byte counter */ /** LW 6-7 */ union { struct { /** LW 6 */ uint32_t curr_crc32; /**< Current CRC32 */ /** LW 7 */ uint32_t curr_adler_32; /**< Current Adler32 */ } legacy; uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_2]; /**< Reserved if not in legacy mode */ } crc; } icp_qat_fw_resp_comp_pars_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comp - * Definition of a single result metadata structure inside Batch and Pack - * results table array. It describes the output if single job in the - * batch and pack jobs. - * Total number of entries in BNP Out table shall be equal to total - * number of requests in the 'batch'. - * @description - * This structure is specific to the compression output. - * - *****************************************************************************/ -typedef struct icp_qat_fw_comp_bnp_out_tbl_entry_s { - /**< LWs 0-3 */ - icp_qat_fw_resp_comp_pars_t comp_out_pars; - /**< Common output params (checksums and byte counts) */ - - /**< LW 4 */ - icp_qat_fw_comn_error_t comn_error; - /**< This field is overloaded to allow for one 8 bit common error field - * or two 8 bit error fields from compression and translator */ - - uint8_t comn_status; - /**< Status field which specifies which slice(s) report an error */ - - uint8_t reserved0; - /**< Reserved, shall be set to zero */ - - uint32_t reserved1; - /**< Reserved, shall be set to zero, - added for aligning entries to quadword boundary */ -} icp_qat_fw_comp_bnp_out_tbl_entry_t; - -/** -***************************************************************************** -* @ingroup icp_qat_fw_comp -* Supported modes for skipping regions of input or output buffers. -* -* @description -* This enumeration lists the supported modes for skipping regions of -* input or output buffers. -* -*****************************************************************************/ -typedef enum icp_qat_fw_comp_bnp_skip_mode_s { - ICP_QAT_FW_SKIP_DISABLED = 0, - /**< Skip mode is disabled */ - ICP_QAT_FW_SKIP_AT_START = 1, - /**< Skip region is at the start of the buffer. */ - ICP_QAT_FW_SKIP_AT_END = 2, - /**< Skip region is at the end of the buffer. */ - ICP_QAT_FW_SKIP_STRIDE = 3 - /**< Skip region occurs at regular intervals within the buffer. - specifies the number of bytes between each - skip region. */ -} icp_qat_fw_comp_bnp_skip_mode_t; - -/** - ***************************************************************************** - * @ingroup icp_qat_fw_comn - * Flags describing the skip and compression job bahaviour. refer to flag - * definitions on skip mode and reset/flush types. - * Note: compression behaviour flags are ignored for destination skip info. - * @description - * Definition of the common request flags. - * - *****************************************************************************/ -typedef uint8_t icp_qat_fw_comp_bnp_flags_t; - -/** - ***************************************************************************** - * @ingroup icp_qat_fw_comn - * Skip Region Data. - * @description - * This structure contains data relating to configuring skip region - * behaviour. A skip region is a region of an input buffer that - * should be omitted from processing or a region that should be inserted - * into the output buffer. - * - *****************************************************************************/ -typedef struct icp_qat_fw_comp_bnp_skip_info_s { - /**< LW 0 */ - uint16_t skip_length; - /** // built in support for _byteswap_ulong -#define BYTE_SWAP_32 _byteswap_ulong -#else #define BYTE_SWAP_32 __builtin_bswap32 -#endif /** ***************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Definition of the hw config csr. This representation has to be further * processed by the corresponding config build function. * *****************************************************************************/ typedef struct icp_qat_hw_comp_20_config_csr_lower_s { - // Fields programmable directly by the SW. + /* Fields programmable directly by the SW. */ icp_qat_hw_comp_20_extended_delay_match_mode_t edmm; icp_qat_hw_comp_20_hw_comp_format_t algo; icp_qat_hw_comp_20_search_depth_t sd; icp_qat_hw_comp_20_hbs_control_t hbs; - // Fields programmable directly by the FW. - // Block Drop enable. (Set by FW) + /* Fields programmable directly by the FW. */ + /* Block Drop enable. (Set by FW) */ icp_qat_hw_comp_20_abd_t abd; icp_qat_hw_comp_20_lllbd_ctrl_t lllbd; - // Advanced HW control (Set to default vals) + /* Advanced HW control (Set to default vals) */ + icp_qat_hw_comp_20_min_match_control_t mmctrl; icp_qat_hw_comp_20_skip_hash_collision_t hash_col; icp_qat_hw_comp_20_skip_hash_update_t hash_update; icp_qat_hw_comp_20_byte_skip_t skip_ctrl; } icp_qat_hw_comp_20_config_csr_lower_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Build the longword as expected by the HW * *****************************************************************************/ static inline uint32_t ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(icp_qat_hw_comp_20_config_csr_lower_t csr) { uint32_t val32 = 0; - // Programmable values + /* Programmable values */ QAT_FIELD_SET(val32, csr.algo, ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS, ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK); QAT_FIELD_SET(val32, csr.sd, ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS, ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK); QAT_FIELD_SET( val32, csr.edmm, ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS, ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK); QAT_FIELD_SET(val32, csr.hbs, ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS, ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK); QAT_FIELD_SET(val32, - csr.lllbd, - ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS, - ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK); + csr.mmctrl, + ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK); QAT_FIELD_SET(val32, csr.hash_col, ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS, ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK); QAT_FIELD_SET(val32, csr.hash_update, ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS, ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK); QAT_FIELD_SET(val32, csr.skip_ctrl, ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS, ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK); - // Default values. + /* Default values. */ QAT_FIELD_SET(val32, csr.abd, ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS, ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK); QAT_FIELD_SET(val32, csr.lllbd, ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS, ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK); return BYTE_SWAP_32(val32); } /** ***************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Definition of the hw config csr. This representation has to be further * processed by the corresponding config build function. * *****************************************************************************/ typedef struct icp_qat_hw_comp_20_config_csr_upper_s { icp_qat_hw_comp_20_scb_control_t scb_ctrl; icp_qat_hw_comp_20_rmb_control_t rmb_ctrl; icp_qat_hw_comp_20_som_control_t som_ctrl; icp_qat_hw_comp_20_skip_hash_rd_control_t skip_hash_ctrl; icp_qat_hw_comp_20_scb_unload_control_t scb_unload_ctrl; icp_qat_hw_comp_20_disable_token_fusion_control_t disable_token_fusion_ctrl; + icp_qat_hw_comp_20_lbms_t lbms; icp_qat_hw_comp_20_scb_mode_reset_mask_t scb_mode_reset; uint16_t lazy; uint16_t nice; } icp_qat_hw_comp_20_config_csr_upper_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Build the longword as expected by the HW * *****************************************************************************/ static inline uint32_t ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(icp_qat_hw_comp_20_config_csr_upper_t csr) { uint32_t val32 = 0; QAT_FIELD_SET(val32, csr.scb_ctrl, ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS, ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK); QAT_FIELD_SET(val32, csr.rmb_ctrl, ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS, ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK); QAT_FIELD_SET(val32, csr.som_ctrl, ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS, ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK); QAT_FIELD_SET(val32, csr.skip_hash_ctrl, ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS, ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK); QAT_FIELD_SET(val32, csr.scb_unload_ctrl, ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS, ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK); QAT_FIELD_SET( val32, csr.disable_token_fusion_ctrl, ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS, ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK); + QAT_FIELD_SET(val32, + csr.lbms, + ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_MASK); + QAT_FIELD_SET(val32, csr.scb_mode_reset, ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS, ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK); QAT_FIELD_SET(val32, csr.lazy, ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS, ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK); QAT_FIELD_SET(val32, csr.nice, ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS, ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK); return BYTE_SWAP_32(val32); } /** ***************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Definition of the hw config csr. This representation has to be further * processed by the corresponding config build function. * *****************************************************************************/ typedef struct icp_qat_hw_decomp_20_config_csr_lower_s { /* Fields programmable directly by the SW. */ icp_qat_hw_decomp_20_hbs_control_t hbs; + icp_qat_hw_decomp_20_lbms_t lbms; /* Advanced HW control (Set to default vals) */ icp_qat_hw_decomp_20_hw_comp_format_t algo; + icp_qat_hw_decomp_20_min_match_control_t mmctrl; + icp_qat_hw_decomp_20_lz4_block_checksum_present_t lbc; } icp_qat_hw_decomp_20_config_csr_lower_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Build the longword as expected by the HW * *****************************************************************************/ static inline uint32_t ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER( icp_qat_hw_decomp_20_config_csr_lower_t csr) { uint32_t val32 = 0; QAT_FIELD_SET(val32, csr.hbs, ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS, ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK); + QAT_FIELD_SET(val32, + csr.lbms, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_BITPOS, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_MASK); + QAT_FIELD_SET(val32, csr.algo, ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS, ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK); + QAT_FIELD_SET(val32, + csr.mmctrl, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK); + + QAT_FIELD_SET( + val32, + csr.lbc, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK); + return BYTE_SWAP_32(val32); } /** ***************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Definition of the hw config csr. This representation has to be further * processed by the corresponding config build function. * *****************************************************************************/ typedef struct icp_qat_hw_decomp_20_config_csr_upper_s { /* Advanced HW control (Set to default vals) */ icp_qat_hw_decomp_20_speculative_decoder_control_t sdc; - icp_qat_hw_decomp_20_mini_cam_control_t mcc; + icp_qat_hw_decomp_20_reserved4_control_t res4; } icp_qat_hw_decomp_20_config_csr_upper_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Build the longword as expected by the HW * *****************************************************************************/ static inline uint32_t ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_UPPER( icp_qat_hw_decomp_20_config_csr_upper_t csr) { uint32_t val32 = 0; QAT_FIELD_SET( val32, csr.sdc, ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS, ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK); QAT_FIELD_SET(val32, - csr.mcc, - ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS, - ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK); + csr.res4, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_RESERVED4_CONTROL_BITPOS, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_RESERVED4_CONTROL_MASK); return BYTE_SWAP_32(val32); } - #endif /* ICP_QAT_HW__2X_COMP_H_ */ diff --git a/sys/dev/qat/qat_api/firmware/include/icp_qat_hw_20_comp_defs.h b/sys/dev/qat/qat_api/firmware/include/icp_qat_hw_20_comp_defs.h index ccdeb471f88b..f5c7c4f958b9 100644 --- a/sys/dev/qat/qat_api/firmware/include/icp_qat_hw_20_comp_defs.h +++ b/sys/dev/qat/qat_api/firmware/include/icp_qat_hw_20_comp_defs.h @@ -1,442 +1,563 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* **************************************************************************** * @file icp_qat_hw_20_comp_defs.h, (autogenerated at 04-19-18 16:06) * @defgroup icp_qat_hw_comp_20 * @ingroup icp_qat_hw_comp_20 * @description * This file represents the HW configuration CSR definitions **************************************************************************** */ #ifndef _ICP_QAT_HW_20_COMP_DEFS_H #define _ICP_QAT_HW_20_COMP_DEFS_H /*****************************************************************************/ /* SCB Disabled - Set by FW, located in upper 32bit */ #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS 31 #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK 0x1 /* **************************************************************************** * @ingroup icp_qat_hw_defs * @description * Enumeration of possible SCB_CONTROL field values *****************************************************************************/ typedef enum { ICP_QAT_HW_COMP_20_SCB_CONTROL_ENABLE = 0x0, /* Normal Mode using SCB (Default) */ ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE = 0x1, /* Legacy CPM1.x Mode with SCB disabled. */ } icp_qat_hw_comp_20_scb_control_t; #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_DEFAULT_VAL \ ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE /*****************************************************************************/ /* Reset Bit Mask Disabled - Set by FW , located in upper 32bit */ #define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS 30 #define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK 0x1 /* **************************************************************************** * @ingroup icp_qat_hw_defs * @description * Enumeration of possible RMB_CONTROL field values *****************************************************************************/ typedef enum { ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL = 0x0, /* Reset all data structures with a set_config command. (Set by FW) */ ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_FC_ONLY = 0x1, /* Reset only the Frequency Counters (LFCT) with a set_config command. */ } icp_qat_hw_comp_20_rmb_control_t; #define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_DEFAULT_VAL \ ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL /*****************************************************************************/ /* Slice Operation Mode (SOM) - Set By FW, located in upper 32bit */ #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS 28 #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK 0x3 /* **************************************************************************** * @ingroup icp_qat_hw_defs * @description * Enumeration of possible SOM_CONTROL field values *****************************************************************************/ typedef enum { ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE = 0x0, /* Normal mode. */ ICP_QAT_HW_COMP_20_SOM_CONTROL_REPLAY_MODE = 0x1, /* Replay mode */ ICP_QAT_HW_COMP_20_SOM_CONTROL_INPUT_CRC = 0x2, /* Input CRC Mode */ ICP_QAT_HW_COMP_20_SOM_CONTROL_RESERVED_MODE = 0x3, /* Reserved. */ } icp_qat_hw_comp_20_som_control_t; #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_DEFAULT_VAL \ ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE /*****************************************************************************/ /* Skip Hash Read (Set By FW) , located in upper 32bit */ #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS 27 #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK 0x1 /* **************************************************************************** * @ingroup icp_qat_hw_defs * @description * Enumeration of possible SKIP_HASH_RD_CONTROL field values *****************************************************************************/ typedef enum { ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP = 0x0, /* When set to 0, hash reads are not skipped. */ ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_SKIP_HASH_READS = 0x1, /* Hash reads are skipped. */ } icp_qat_hw_comp_20_skip_hash_rd_control_t; #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_DEFAULT_VAL \ ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP /*****************************************************************************/ /* SCB Unload Disable, located in upper 32bit */ #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS 26 #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK 0x1 /* **************************************************************************** * @ingroup icp_qat_hw_defs * @description * Enumeration of possible SCB_UNLOAD_CONTROL field values *****************************************************************************/ typedef enum { ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD = 0x0, /* Unloads the LFCT and flushes the State Registers. */ ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_NO_UNLOAD = 0x1, /* Does not unload the LFCT, but flushes the State Registers. */ } icp_qat_hw_comp_20_scb_unload_control_t; #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_DEFAULT_VAL \ ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD /*****************************************************************************/ /* Disable token fusion, located in upper 32bit */ #define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS 21 #define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK 0x1 /* **************************************************************************** * @ingroup icp_qat_hw_defs * @description * Enumeration of possible DISABLE_TOKEN_FUSION_CONTROL field values *****************************************************************************/ typedef enum { ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE = 0x0, /* Enables token fusion. */ ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_DISABLE = 0x1, /* Disables token fusion. */ } icp_qat_hw_comp_20_disable_token_fusion_control_t; #define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_DEFAULT_VAL \ ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE +/*****************************************************************************/ +/* LZ4 Block Maximum Size (LBMS). Set by FW , located in upper 32bit */ +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_BITPOS 19 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_MASK 0x3 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible LBMS field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_COMP_20_LBMS_LBMS_64KB = 0x0, + /* LZ4 Block Maximum Size (LBMS) == 64 KB */ + ICP_QAT_HW_COMP_20_LBMS_LBMS_256KB = 0x1, + /* LZ4 Block Maximum Size (LBMS) == 256 KB */ + ICP_QAT_HW_COMP_20_LBMS_LBMS_1MB = 0x2, + /* LZ4 Block Maximum Size (LBMS) == 1 MB */ + ICP_QAT_HW_COMP_20_LBMS_LBMS_4MB = 0x3, + /* LZ4 Block Maximum Size (LBMS) == 4 MB */ +} icp_qat_hw_comp_20_lbms_t; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_LBMS_LBMS_64KB + /*****************************************************************************/ /* SCB Mode Reset Mask (Set By FW) , located in upper 32bit */ #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS 18 #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK 0x1 /* **************************************************************************** * @ingroup icp_qat_hw_defs * @description * Enumeration of possible SCB_MODE_RESET_MASK field values *****************************************************************************/ typedef enum { ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS = 0x0, - /* iLZ77 mode: Reset LFCT, OBC */ + /* LZ4 mode: Reset LIBC, LOBC, In iLZ77 mode: Reset LFCT, OBC */ ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS_AND_HISTORY = 0x1, - /* iLZ77 mode: Reset LFCT, OBC, HB, HT */ + /* LZ4 mode: Reset LIBC, LOBC, HB, HT, In iLZ77 mode: Reset LFCT, OBC, + HB, HT */ } icp_qat_hw_comp_20_scb_mode_reset_mask_t; #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_DEFAULT_VAL \ ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS /*****************************************************************************/ -/* Lazy - For iLZ77 and Static DEFLATE, Lazy = 102h , located in upper +/* Lazy - For iLZ77, LZ4, and Static DEFLATE, Lazy = 102h , located in upper * 32bit */ #define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS 9 #define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK 0x1ff #define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL 258 /*****************************************************************************/ -/* Nice - For iLZ77 and Static DEFLATE, Nice = 103h , located in upper +/* Nice - For iLZ77, LZ4, and Static DEFLATE, Nice = 103h , located in upper * 32bit */ #define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS 0 #define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK 0x1ff #define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL 259 /*****************************************************************************/ /* History Buffer Size (Set By the Driver/ Application), located in lower 32bit */ #define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14 #define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7 /* **************************************************************************** * @ingroup icp_qat_hw_defs * @description * Enumeration of possible HBS_CONTROL field values *****************************************************************************/ typedef enum { ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0, /* 000b - 32KB */ - ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_64KB = 0x1, - /* 001b - 64KB */ } icp_qat_hw_comp_20_hbs_control_t; #define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \ ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB /*****************************************************************************/ /* Adaptive Block Drop (Set By FW if Dynamic), located in lower 32bit */ #define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS 13 #define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK 0x1 /* **************************************************************************** * @ingroup icp_qat_hw_defs * @description * Enumeration of possible ABD field values *****************************************************************************/ typedef enum { ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED = 0x0, /* 0b - Feature enabled. */ ICP_QAT_HW_COMP_20_ABD_ABD_DISABLED = 0x1, /* 1b - Feature disabled. */ } icp_qat_hw_comp_20_abd_t; #define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_DEFAULT_VAL \ ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED /*****************************************************************************/ /* Literal+Length Limit Block Drop Block Drop, (Set By FW if Dynamic) , located * in lower 32bit */ #define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS 12 #define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK 0x1 /* **************************************************************************** * @ingroup icp_qat_hw_defs * @description * Enumeration of possible LLLBD_CTRL field values *****************************************************************************/ typedef enum { ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED = 0x0, /* 0b - Feature enabled. */ ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_DISABLED = 0x1, /* 1b - Feature disabled. */ } icp_qat_hw_comp_20_lllbd_ctrl_t; #define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_DEFAULT_VAL \ ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED /*****************************************************************************/ /* Search Depth (SD) (Set By Driver/Application), located in lower 32bit */ #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS 8 #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK 0xf /* **************************************************************************** * @ingroup icp_qat_hw_defs * @description * Enumeration of possible SEARCH_DEPTH field values *****************************************************************************/ typedef enum { ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1 = 0x1, /* 0001b - Level 1 (search depth = 2^1 = 2) */ ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_6 = 0x3, - /* 0011b - Level 6 (search depth = 2^3 = 8) */ + /* 0001b - Level 6 (search depth = 2^3 = 8) */ ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_9 = 0x4, - /* 0100b - Level 9 (search depth = 2^4 = 16) */ + /* 0001b - Level 9 (search depth = 2^4 = 16) */ + ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_9P = 0x12, + /* 0001b - Level 9P (search depth = 2^12 = 4096) */ } icp_qat_hw_comp_20_search_depth_t; #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_DEFAULT_VAL \ ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1 /*****************************************************************************/ /* Compression Format (Set By Driver/Application. Also See CMD ID), located in * lower 32bit */ #define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS 5 #define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK 0x7 /* **************************************************************************** * @ingroup icp_qat_hw_defs * @description * Enumeration of possible HW_COMP_FORMAT field values *****************************************************************************/ typedef enum { ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77 = 0x0, /* 000 - iLZ77. (Must set Min_Match = 3 bytes and HB size = 32KB.) */ ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE = 0x1, /* 001 - Static DEFLATE. (Must set Min_Match = 3 bytes and HB size = - 32KB.) */ + * 32KB.) + */ + ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4 = 0x2, + /* 010 - LZ4. (Must set Min Match = 4 bytes and HB size = 64KB.) */ + ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4S = 0x3, + /* 011 - LZ4s. (Min_Match and HBSize must be set accordingly.) */ } icp_qat_hw_comp_20_hw_comp_format_t; #define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_DEFAULT_VAL \ ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE +/*****************************************************************************/ +/* Min Match (Set By FW to default value), located in lower 32bit */ +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK 0x1 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible MIN_MATCH_CONTROL field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_3B = 0x0, + /* 0 - Match 3 B */ + ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_4B = 0x1, + /* 1 - Match 4 B */ +} icp_qat_hw_comp_20_min_match_control_t; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_3B + /*****************************************************************************/ /* Skip Hash Collision (Set By FW to default value), located in lower 32bit */ #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS 3 #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK 0x1 /* **************************************************************************** * @ingroup icp_qat_hw_defs * @description * Enumeration of possible SKIP_HASH_COLLISION field values *****************************************************************************/ typedef enum { ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW = 0x0, /* When set to 0, hash collisions are allowed. */ ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_DONT_ALLOW = 0x1, /* When set to 0, hash collisions are allowed. */ } icp_qat_hw_comp_20_skip_hash_collision_t; #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_DEFAULT_VAL \ ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW /*****************************************************************************/ /* Skip Hash Update (Set By FW to default value) , located in lower 32bit */ #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS 2 #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK 0x1 /* **************************************************************************** * @ingroup icp_qat_hw_defs * @description * Enumeration of possible SKIP_HASH_UPDATE field values *****************************************************************************/ typedef enum { ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW = 0x0, /* 0 - hash updates are not skipped. */ ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW = 0x1, /* 1 - hash updates are skipped. */ } icp_qat_hw_comp_20_skip_hash_update_t; #define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_DEFAULT_VAL \ ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW /*****************************************************************************/ /* 3-Byte Match Skip (Set By FW to default value), located in lower 32bit */ #define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS 1 #define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK 0x1 /* **************************************************************************** * @ingroup icp_qat_hw_defs * @description * Enumeration of possible BYTE_SKIP field values *****************************************************************************/ typedef enum { ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN = 0x0, /* 0 - Use 3-byte token */ ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL = 0x1, /* 0 - Use 3-byte literal */ } icp_qat_hw_comp_20_byte_skip_t; #define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_DEFAULT_VAL \ ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN /*****************************************************************************/ /* Extended Delayed Match Mode enabled (Set By the Driver), located in lower * 32bit */ #define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS 0 #define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK 0x1 /* **************************************************************************** * @ingroup icp_qat_hw_defs * @description * Enumeration of possible EXTENDED_DELAY_MATCH_MODE field values *****************************************************************************/ typedef enum { ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED = 0x0, /* 0 - EXTENDED_DELAY_MATCH_MODE disabled */ ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED = 0x1, /* 1 - EXTENDED_DELAY_MATCH_MODE enabled */ } icp_qat_hw_comp_20_extended_delay_match_mode_t; #define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_DEFAULT_VAL \ ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED /*****************************************************************************/ /* Speculative Decoder Disable (Set By the Driver/ Application), located in * upper 32bit */ #define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS 31 #define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK 0x1 /* **************************************************************************** * @ingroup icp_qat_hw_defs * @description * Enumeration of possible SPECULATIVE_DECODER_CONTROL field values *****************************************************************************/ typedef enum { ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE = 0x0, /* 0b - Enabled */ ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_DISABLE = 0x1, /* 1b - Disabled */ } icp_qat_hw_decomp_20_speculative_decoder_control_t; #define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_DEFAULT_VAL \ ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE /*****************************************************************************/ /* Mini CAM Disable (Set By the Driver/ Application), located in upper 32bit */ -#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS 30 -#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK 0x1 +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_RESERVED4_CONTROL_BITPOS 30 +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_RESERVED4_CONTROL_MASK 0x1 /* **************************************************************************** * @ingroup icp_qat_hw_defs * @description - * Enumeration of possible MINI_CAM_CONTROL field values + * Enumeration of possible RESERVED4 field values *****************************************************************************/ typedef enum { - ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE = 0x0, + ICP_QAT_HW_DECOMP_20_RESERVED4_CONTROL_ENABLE = 0x0, /* 0b - Enabled */ - ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_DISABLE = 0x1, + ICP_QAT_HW_DECOMP_20_RESERVED4_CONTROL_DISABLE = 0x1, /* 1b - Disabled */ -} icp_qat_hw_decomp_20_mini_cam_control_t; +} icp_qat_hw_decomp_20_reserved4_control_t; -#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_DEFAULT_VAL \ - ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_RESERVED4_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_20_RESERVED4_CONTROL_ENABLE /*****************************************************************************/ /* History Buffer Size (Set By the Driver/ Application), located in lower 32bit */ #define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14 #define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7 /* **************************************************************************** * @ingroup icp_qat_hw_defs * @description * Enumeration of possible HBS_CONTROL field values *****************************************************************************/ typedef enum { ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0, /* 000b - 32KB */ } icp_qat_hw_decomp_20_hbs_control_t; #define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \ ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB +/*****************************************************************************/ +/* LZ4 Block Maximum Size (LBMS). Set by FW , located in lower 32bit */ +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_BITPOS 8 +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_MASK 0x3 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible LBMS field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_DECOMP_20_LBMS_LBMS_64KB = 0x0, + /* LZ4 Block Maximum Size (LBMS) == 64 KB */ + ICP_QAT_HW_DECOMP_20_LBMS_LBMS_256KB = 0x1, + /* LZ4 Block Maximum Size (LBMS) == 256 KB */ + ICP_QAT_HW_DECOMP_20_LBMS_LBMS_1MB = 0x2, + /* LZ4 Block Maximum Size (LBMS) == 1 MB */ + ICP_QAT_HW_DECOMP_20_LBMS_LBMS_4MB = 0x3, + /* LZ4 Block Maximum Size (LBMS) == 4 MB */ +} icp_qat_hw_decomp_20_lbms_t; + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_20_LBMS_LBMS_64KB + /*****************************************************************************/ /* Decompression Format (Set By Driver/Application. Also See CMD ID), located in * lower 32bit */ #define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS 5 #define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK 0x7 /* **************************************************************************** * @ingroup icp_qat_hw_defs * @description * Enumeration of possible HW_DECOMP_FORMAT field values *****************************************************************************/ typedef enum { ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE = 0x1, /* 001 - Static DEFLATE. (Must set Min_Match = 3 bytes and HB size = - 32KB.) */ + * 32KB.) + */ + ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4 = 0x2, + /* 010 - LZ4. (Must set Min Match = 4 bytes and HB size = 32KB.) */ + ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4S = 0x3, + /* 011 - LZ4s. (Min_Match and HBSize must be set accordingly.) */ } icp_qat_hw_decomp_20_hw_comp_format_t; #define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_DEFAULT_VAL \ ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE -#endif //_ICP_QAT_HW_20_COMP_DEFS_H +/*****************************************************************************/ +/* Decompression Format (Set By Driver/Application. Also See CMD ID), located in + * lower 32bit */ +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4 +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK 0x1 + +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible MIN_MATCH_CONTROL field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_3B = 0x0, + /* 0 - Match 3 B */ + ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_4B = 0x1, + /* 1 - Match 4 B */ +} icp_qat_hw_decomp_20_min_match_control_t; + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_3B + +/*****************************************************************************/ +/* LZ4 Block Checksum Present, located in lower 32bit */ +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS 3 +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK 0x1 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible LZ4_CHECKSUM_PRESENT field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_ABSENT = 0x0, + /* the LZ4 Block does not contain the 4-byte checksum */ + ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_PRESENT = 0x1, + /* LZ4 Block contains a 4-byte checksum. */ +} icp_qat_hw_decomp_20_lz4_block_checksum_present_t; + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_ABSENT + +#endif /* _ICP_QAT_HW_20_COMP_DEFS_H */ diff --git a/sys/dev/qat/qat_api/include/dc/cpa_dc.h b/sys/dev/qat/qat_api/include/dc/cpa_dc.h index f0ed869d1020..7094747bc83e 100644 --- a/sys/dev/qat/qat_api/include/dc/cpa_dc.h +++ b/sys/dev/qat/qat_api/include/dc/cpa_dc.h @@ -1,3243 +1,3254 @@ /**************************************************************************** * * BSD LICENSE * * Copyright(c) 2007-2023 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ***************************************************************************/ /* ***************************************************************************** * Doxygen group definitions ****************************************************************************/ /** ***************************************************************************** * @file cpa_dc.h * * @defgroup cpaDc Data Compression API * * @ingroup cpa * * @description * These functions specify the API for Data Compression operations. * * The Data Compression API has the following: * 1) Session based API functions * These functions require a session to be created before performing any * DC operations. Subsequent DC API functions make use of the returned * Session Handle within their structures or function prototypes. * 2) Session-less or No-Session (Ns) based API functions. * These functions do not require a session to be initialized before * performing DC operations. They are "one-shot" API function calls * that submit DC requests directly using the supplied parameters. * * @remarks * * *****************************************************************************/ #ifndef CPA_DC_H #define CPA_DC_H #ifdef __cplusplus extern"C" { #endif #ifndef CPA_H #include "cpa.h" #endif /** ***************************************************************************** * @ingroup cpaDc * CPA Dc Major Version Number * @description * The CPA_DC API major version number. This number will be incremented * when significant churn to the API has occurred. The combination of the * major and minor number definitions represent the complete version number * for this interface. * *****************************************************************************/ #define CPA_DC_API_VERSION_NUM_MAJOR (3) /** ***************************************************************************** * @ingroup cpaDc * CPA DC Minor Version Number * @description * The CPA_DC API minor version number. This number will be incremented * when minor changes to the API has occurred. The combination of the major * and minor number definitions represent the complete version number for * this interface. * *****************************************************************************/ #define CPA_DC_API_VERSION_NUM_MINOR (2) /** ***************************************************************************** * @file cpa_dc.h * @ingroup cpaDc * CPA DC API version at least * @description * The minimal supported CPA_DC API version. Allow to check if the API * version is equal or above some version to avoid compilation issues * with an older API version. * *****************************************************************************/ #define CPA_DC_API_VERSION_AT_LEAST(major, minor) \ (CPA_DC_API_VERSION_NUM_MAJOR > major || \ (CPA_DC_API_VERSION_NUM_MAJOR == major && \ CPA_DC_API_VERSION_NUM_MINOR >= minor)) /** ***************************************************************************** * @file cpa_dc.h * @ingroup cpaDc * CPA DC API version less than * @description * The maximum supported CPA_DC API version. Allow to check if the API * version is below some version to avoid compilation issues with a newer * API version. * *****************************************************************************/ #define CPA_DC_API_VERSION_LESS_THAN(major, minor) \ (CPA_DC_API_VERSION_NUM_MAJOR < major || \ (CPA_DC_API_VERSION_NUM_MAJOR == major && \ CPA_DC_API_VERSION_NUM_MINOR < minor)) /** ***************************************************************************** * @ingroup cpaDc * Size of bitmap needed for compression chaining capabilities. * * @description * Defines the number of bits in the bitmap to represent supported * chaining capabilities @ref dcChainCapInfo. Should be set to * at least one greater than the largest value in the enumerated type * @ref CpaDcChainOperations, so that the value of the enum constant * can also be used as the bit position in the bitmap. * * A larger value was chosen to allow for extensibility without the need * to change the size of the bitmap (to ease backwards compatibility in * future versions of the API). * *****************************************************************************/ #define CPA_DC_CHAIN_CAP_BITMAP_SIZE (32) /** ***************************************************************************** * @ingroup cpaDc * Compression API session handle type * * @description * Handle used to uniquely identify a Compression API session handle. This * handle is established upon registration with the API using * cpaDcInitSession(). * * * *****************************************************************************/ typedef void * CpaDcSessionHandle; /** ***************************************************************************** * @ingroup cpaDc * Supported flush flags * * @description * This enumerated list identifies the types of flush that can be * specified for stateful and stateless cpaDcCompressData and * cpaDcDecompressData functions. * *****************************************************************************/ typedef enum _CpaDcFlush { CPA_DC_FLUSH_NONE = 0, /**< No flush request. */ CPA_DC_FLUSH_FINAL, /**< Indicates that the input buffer contains all of the data for the compression session allowing any buffered data to be released. For Deflate, BFINAL is set in the compression header.*/ CPA_DC_FLUSH_SYNC, /**< Used for stateful deflate compression to indicate that all pending output is flushed, byte aligned, to the output buffer. The session state is not reset.*/ CPA_DC_FLUSH_FULL /**< Used for deflate compression to indicate that all pending output is flushed to the output buffer and the session state is reset.*/ } CpaDcFlush; /** ***************************************************************************** * @ingroup cpaDc * Supported Huffman Tree types * * @description * This enumeration lists support for Huffman Tree types. * Selecting Static Huffman trees generates compressed blocks with an RFC * 1951 header specifying "compressed with fixed Huffman trees". * * Selecting Full Dynamic Huffman trees generates compressed blocks with * an RFC 1951 header specifying "compressed with dynamic Huffman codes". * The headers are calculated on the data being compressed, requiring two * passes. * * Selecting Precompiled Huffman Trees generates blocks with RFC 1951 * dynamic headers. The headers are pre-calculated and are specified by * the file type. * *****************************************************************************/ typedef enum _CpaDcHuffType { CPA_DC_HT_STATIC = 0, /**< Static Huffman Trees */ CPA_DC_HT_PRECOMP, /**< Precompiled Huffman Trees */ CPA_DC_HT_FULL_DYNAMIC /**< Full Dynamic Huffman Trees */ } CpaDcHuffType; /** ***************************************************************************** * @ingroup cpaDc * Supported compression types * * @description * This enumeration lists the supported data compression algorithms. * In combination with CpaDcChecksum it is used to decide on the file * header and footer format. * *****************************************************************************/ typedef enum _CpaDcCompType { CPA_DC_DEFLATE = 3, /**< Deflate Compression */ CPA_DC_LZ4, /**< LZ4 Compression */ CPA_DC_LZ4S /**< LZ4S Compression */ } CpaDcCompType; /** ***************************************************************************** * @ingroup cpaDc * Support for defined algorithm window sizes * * @description * This enumerated list defines the valid window sizes that can be * used with the supported algorithms *****************************************************************************/ typedef enum _CpaDcCompWindowSize { CPA_DC_WINSIZE_4K = 0, /**< Window size of 4KB */ CPA_DC_WINSIZE_8K, /**< Window size of 8KB */ CPA_DC_WINSIZE_16K, /**< Window size of 16KB */ CPA_DC_WINSIZE_32K /**< Window size of 32KB */ } CpaDcCompWindowSize; /** ***************************************************************************** * @ingroup cpaDc * Min match size in bytes * @description * This is the min match size that will be used for the search algorithm. * It is only configurable for LZ4S. *****************************************************************************/ typedef enum _CpaDcCompMinMatch { CPA_DC_MIN_3_BYTE_MATCH = 0, /**< Min Match of 3 bytes */ CPA_DC_MIN_4_BYTE_MATCH /**< Min Match of 4 bytes */ } CpaDcCompMinMatch; /** ***************************************************************************** * @ingroup cpaDc * Maximum LZ4 output block size * @description * Maximum LZ4 output block size *****************************************************************************/ typedef enum _CpaDcCompLZ4BlockMaxSize { CPA_DC_LZ4_MAX_BLOCK_SIZE_64K = 0, /**< Maximum block size 64K */ CPA_DC_LZ4_MAX_BLOCK_SIZE_256K, /**< Maximum block size 256K */ CPA_DC_LZ4_MAX_BLOCK_SIZE_1M, /**< Maximum block size 1M */ CPA_DC_LZ4_MAX_BLOCK_SIZE_4M, /**< Maximum block size 4M */ } CpaDcCompLZ4BlockMaxSize; /** ***************************************************************************** * @ingroup cpaDc * Supported checksum algorithms * * @description * This enumeration lists the supported checksum algorithms * Used to decide on file header and footer specifics. * *****************************************************************************/ typedef enum _CpaDcChecksum { CPA_DC_NONE = 0, /**< No checksum required */ CPA_DC_CRC32, /**< Application requires a CRC32 checksum */ CPA_DC_ADLER32, /**< Application requires Adler-32 checksum */ CPA_DC_CRC32_ADLER32, /**< Application requires both CRC32 and Adler-32 checksums */ CPA_DC_XXHASH32, /**< Application requires xxHash-32 checksum */ } CpaDcChecksum; /** ***************************************************************************** * @ingroup cpaDc * Supported session directions * * @description * This enumerated list identifies the direction of a session. * A session can be compress, decompress or both. * *****************************************************************************/ typedef enum _CpaDcSessionDir { CPA_DC_DIR_COMPRESS = 0, /**< Session will be used for compression */ CPA_DC_DIR_DECOMPRESS, /**< Session will be used for decompression */ CPA_DC_DIR_COMBINED /**< Session will be used for both compression and decompression */ } CpaDcSessionDir; typedef CpaDcSessionDir CpaDcDir; /** ***************************************************************************** * @ingroup cpaDc * Supported session state settings * * @description * This enumerated list identifies the stateful setting of a session. * A session can be either stateful or stateless. * * Stateful sessions are limited to have only one in-flight message per * session. This means a compress or decompress request must be complete * before a new request can be started. This applies equally to sessions * that are uni-directional in nature and sessions that are combined * compress and decompress. Completion occurs when the synchronous function * returns, or when the asynchronous callback function has completed. * *****************************************************************************/ typedef enum _CpaDcSessionState { CPA_DC_STATEFUL = 0, /**< Session will be stateful, implying that state may need to be saved in some situations */ CPA_DC_STATELESS /**< Session will be stateless, implying no state will be stored*/ } CpaDcSessionState; typedef CpaDcSessionState CpaDcState; /** ***************************************************************************** * @ingroup cpaDc * Supported compression levels * * @description * This enumerated lists the supported compressed levels. * Lower values will result in less compressibility in less time. * * *****************************************************************************/ typedef enum _CpaDcCompLvl { CPA_DC_L1 = 1, /**< Compression level 1 */ CPA_DC_L2, /**< Compression level 2 */ CPA_DC_L3, /**< Compression level 3 */ CPA_DC_L4, /**< Compression level 4 */ CPA_DC_L5, /**< Compression level 5 */ CPA_DC_L6, /**< Compression level 6 */ CPA_DC_L7, /**< Compression level 7 */ CPA_DC_L8, /**< Compression level 8 */ CPA_DC_L9, /**< Compression level 9 */ CPA_DC_L10, /**< Compression level 10 */ CPA_DC_L11, /**< Compression level 11 */ CPA_DC_L12 /**< Compression level 12 */ } CpaDcCompLvl; /** ***************************************************************************** * @ingroup cpaDc * Supported additional details from accelerator * * @description * This enumeration lists the supported additional details from the * accelerator. These may be useful in determining the best way to * recover from a failure. * * *****************************************************************************/ typedef enum _CpaDcReqStatus { CPA_DC_OK = 0, /**< No error detected by compression slice */ CPA_DC_INVALID_BLOCK_TYPE = -1, /**< Invalid block type (type == 3) */ CPA_DC_BAD_STORED_BLOCK_LEN = -2, /**< Stored block length did not match one's complement */ CPA_DC_TOO_MANY_CODES = -3, /**< Too many length or distance codes */ CPA_DC_INCOMPLETE_CODE_LENS = -4, /**< Code length codes incomplete */ CPA_DC_REPEATED_LENS = -5, /**< Repeated lengths with no first length */ CPA_DC_MORE_REPEAT = -6, /**< Repeat more than specified lengths */ CPA_DC_BAD_LITLEN_CODES = -7, /**< Invalid literal/length code lengths */ CPA_DC_BAD_DIST_CODES = -8, /**< Invalid distance code lengths */ CPA_DC_INVALID_CODE = -9, /**< Invalid literal/length or distance code in fixed or dynamic block */ CPA_DC_INVALID_DIST = -10, /**< Distance is too far back in fixed or dynamic block */ CPA_DC_OVERFLOW = -11, /**< Overflow detected. This is an indication that output buffer has overflowed. * For stateful sessions, this is a warning (the input can be adjusted and * resubmitted). * For stateless sessions this is an error condition */ CPA_DC_SOFTERR = -12, /**< Other non-fatal detected */ CPA_DC_FATALERR = -13, /**< Fatal error detected */ CPA_DC_MAX_RESUBITERR = -14, /**< On an error being detected, the firmware attempted to correct and resubmitted the * request, however, the maximum resubmit value was exceeded */ CPA_DC_INCOMPLETE_FILE_ERR = -15, /**< The input file is incomplete. Note this is an indication that the request was * submitted with a CPA_DC_FLUSH_FINAL, however, a BFINAL bit was not found in the * request */ CPA_DC_WDOG_TIMER_ERR = -16, /**< The request was not completed as a watchdog timer hardware event occurred */ CPA_DC_EP_HARDWARE_ERR = -17, /**< Request was not completed as an end point hardware error occurred (for * example, a parity error) */ CPA_DC_VERIFY_ERROR = -18, /**< Error detected during "compress and verify" operation */ CPA_DC_EMPTY_DYM_BLK = -19, /**< Decompression request contained an empty dynamic stored block * (not supported) */ CPA_DC_CRC_INTEG_ERR = -20, /**< A data integrity CRC error was detected */ + CPA_DC_REGION_OUT_OF_BOUNDS = -21, + /**< Error returned when decompression ends before the specified partial + * decompression region was produced */ CPA_DC_LZ4_MAX_BLOCK_SIZE_EXCEEDED = -93, /**< LZ4 max block size exceeded */ CPA_DC_LZ4_BLOCK_OVERFLOW_ERR = -95, /**< LZ4 Block Overflow Error */ CPA_DC_LZ4_TOKEN_IS_ZERO_ERR = -98, /**< LZ4 Decoded token offset or token length is zero */ CPA_DC_LZ4_DISTANCE_OUT_OF_RANGE_ERR = -100, /**< LZ4 Distance out of range for len/distance pair */ } CpaDcReqStatus; /** ***************************************************************************** * @ingroup cpaDc * Supported modes for automatically selecting the best compression type. * * @description * This enumeration lists the supported modes for automatically selecting * the best encoding which would lead to the best compression results. * * When CPA_DC_ASB_ENABLED is used the output will be a format compliant * block, whether the data is compressed or not. * * The following values are deprecated and should not be used. They * will be removed in a future version of this file. * - CPA_DC_ASB_STATIC_DYNAMIC * - CPA_DC_ASB_UNCOMP_STATIC_DYNAMIC_WITH_STORED_HDRS * - CPA_DC_ASB_UNCOMP_STATIC_DYNAMIC_WITH_NO_HDRS * *****************************************************************************/ typedef enum _CpaDcAutoSelectBest { CPA_DC_ASB_DISABLED = 0, /**< Auto select best mode is disabled */ CPA_DC_ASB_STATIC_DYNAMIC = 1, /**< Auto select between static and dynamic compression */ CPA_DC_ASB_UNCOMP_STATIC_DYNAMIC_WITH_STORED_HDRS = 2, /**< Auto select between uncompressed, static and dynamic compression, * using stored block deflate headers if uncompressed is selected */ CPA_DC_ASB_UNCOMP_STATIC_DYNAMIC_WITH_NO_HDRS = 3, /**< Auto select between uncompressed, static and dynamic compression, * using no deflate headers if uncompressed is selected */ CPA_DC_ASB_ENABLED = 4, /**< Auto select best mode is enabled */ } CpaDcAutoSelectBest; /** ***************************************************************************** * @ingroup cpaDc * Supported modes for skipping regions of input or output buffers. * * @description * This enumeration lists the supported modes for skipping regions of * input or output buffers. * *****************************************************************************/ typedef enum _CpaDcSkipMode { CPA_DC_SKIP_DISABLED = 0, /**< Skip mode is disabled */ CPA_DC_SKIP_AT_START = 1, /**< Skip region is at the start of the buffer. */ CPA_DC_SKIP_AT_END = 2, /**< Skip region is at the end of the buffer. */ CPA_DC_SKIP_STRIDE = 3 /**< Skip region occurs at regular intervals within the buffer. CpaDcSkipData.strideLength specifies the number of bytes between each skip region. */ } CpaDcSkipMode; /** ***************************************************************************** * @ingroup cpaDc * Service specific return codes * * @description * Compression specific return codes * * *****************************************************************************/ #define CPA_DC_BAD_DATA (-100) /**consumed arg. * -# The implementation communicates the amount of data in the * destination buffer list via pResults->produced arg. * * Source Buffer Setup Rules * -# The buffer list must have the correct number of flat buffers. This * is specified by the numBuffers element of the CpaBufferList. * -# Each flat buffer must have a pointer to contiguous memory that has * been allocated by the calling application. The * number of octets to be compressed or decompressed must be stored * in the dataLenInBytes element of the flat buffer. * -# It is permissible to have one or more flat buffers with a zero length * data store. This function will process all flat buffers until the * destination buffer is full or all source data has been processed. * If a buffer has zero length, then no data will be processed from * that buffer. * * Source Buffer Processing Rules. * -# The buffer list is processed in index order - SrcBuff->pBuffers[0] * will be completely processed before SrcBuff->pBuffers[1] begins to * be processed. * -# The application must drain the destination buffers. * If the source data was not completely consumed, the application * must resubmit the request. * -# On return, the pResults->consumed will indicate the number of bytes * consumed from the input buffers. * * Destination Buffer Setup Rules * -# The destination buffer list must have storage for processed data. * This implies at least one flat buffer must exist in the buffer list. * -# For each flat buffer in the buffer list, the dataLenInBytes element * must be set to the size of the buffer space. * -# It is permissible to have one or more flat buffers with a zero length * data store. * If a buffer has zero length, then no data will be added to * that buffer. * * Destination Buffer Processing Rules. * -# The buffer list is processed in index order - DestBuff->pBuffers[0] * will be completely processed before DestBuff->pBuffers[1] begins to * be processed. * -# On return, the pResults->produced will indicate the number of bytes * written to the output buffers. * -# If processing has not been completed, the application must drain the * destination buffers and resubmit the request. The application must * reset the dataLenInBytes for each flat buffer in the destination * buffer list. * * Checksum rules. * If a checksum is specified in the session setup data, then: * -# For the first request for a particular data segment the checksum * is initialised internally by the implementation. * -# The checksum is maintained by the implementation between calls * until the flushFlag is set to CPA_DC_FLUSH_FINAL indicating the * end of a particular data segment. * -# Intermediate checksum values are returned to the application, * via the CpaDcRqResults structure, in response to each request. * However these checksum values are not guaranteed to the valid * until the call with flushFlag set to CPA_DC_FLUSH_FINAL * completes successfully. * * The application should set flushFlag to * CPA_DC_FLUSH_FINAL to indicate processing a particular data segment * is complete. It should be noted that this function may have to be * called more than once to process data after the flushFlag parameter has * been set to CPA_DC_FLUSH_FINAL if the destination buffer fills. Refer * to buffer processing rules. * * For stateful operations, when the function is invoked with flushFlag * set to CPA_DC_FLUSH_NONE or CPA_DC_FLUSH_SYNC, indicating more data * is yet to come, the function may or may not retain data. When the * function is invoked with flushFlag set to CPA_DC_FLUSH_FULL or * CPA_DC_FLUSH_FINAL, the function will process all buffered data. * * For stateless operations, CPA_DC_FLUSH_FINAL will cause the BFINAL * bit to be set for deflate compression. The initial checksum for the * stateless operation should be set to 0. CPA_DC_FLUSH_NONE and * CPA_DC_FLUSH_SYNC should not be used for stateless operations. * * It is possible to maintain checksum and length information across * cpaDcCompressData() calls with a stateless session without maintaining * the full history state that is maintained by a stateful session. In this * mode of operation, an initial checksum value of 0 is passed into the * first cpaDcCompressData() call with the flush flag set to * CPA_DC_FLUSH_FULL. On subsequent calls to cpaDcCompressData() for this * session, the checksum passed to cpaDcCompressData should be set to the * checksum value produced by the previous call to cpaDcCompressData(). * When the last block of input data is passed to cpaDcCompressData(), the * flush flag should be set to CP_DC_FLUSH_FINAL. This will cause the BFINAL * bit to be set in a deflate stream. It is the responsibility of the calling * application to maintain overall lengths across the stateless requests * and to pass the checksum produced by one request into the next request. * * When an instance supports compressAndVerifyAndRecover, it is enabled by * default when using cpaDcCompressData(). If this feature needs to be * disabled, cpaDcCompressData2() must be used. * * Synchronous or Asynchronous operation of the API is determined by * the value of the callbackFn parameter passed to cpaDcInitSession() * when the sessionHandle was setup. If a non-NULL value was specified * then the supplied callback function will be invoked asynchronously * with the response of this request. * * Response ordering: * For each session, the implementation must maintain the order of * responses. That is, if in asynchronous mode, the order of the callback * functions must match the order of jobs submitted by this function. * In a simple synchronous mode implementation, the practice of submitting * a request and blocking on its completion ensure ordering is preserved. * * This limitation does not apply if the application employs multiple * threads to service a single session. * * If this API is invoked asynchronous, the return code represents * the success or not of asynchronously scheduling the request. * The results of the operation, along with the amount of data consumed * and produced become available when the callback function is invoked. * As such, pResults->consumed and pResults->produced are available * only when the operation is complete. * * The application must not use either the source or destination buffers * until the callback has completed. * * @see * None * *****************************************************************************/ CpaStatus cpaDcCompressData( CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, CpaDcFlush flushFlag, void *callbackTag ); /** ***************************************************************************** * @ingroup cpaDc * Submit a request to compress a buffer of data. * * @description * This API consumes data from the input buffer and generates compressed * data in the output buffer. This API is very similar to * cpaDcCompressData() except it provides a CpaDcOpData structure for * passing additional input parameters not covered in cpaDcCompressData(). * * @context * When called as an asynchronous function it cannot sleep. It can be * executed in a context that does not permit sleeping. * When called as a synchronous function it may sleep. It MUST NOT be * executed in a context that DOES NOT permit sleeping. * @assumptions * None * @sideEffects * None * @blocking * Yes when configured to operate in synchronous mode. * @reentrant * No * @threadSafe * Yes * * @param[in] dcInstance Target service instance. * @param[in,out] pSessionHandle Session handle. * @param[in] pSrcBuff Pointer to data buffer for compression. * @param[in] pDestBuff Pointer to buffer space for data after * compression. * @param[in,out] pOpData Additional parameters. * @param[in,out] pResults Pointer to results structure * @param[in] callbackTag User supplied value to help correlate * the callback with its associated * request. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Resubmit the request. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. * @retval CPA_DC_BAD_DATA The input data was not properly formed. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit * the request. * * @pre * pSessionHandle has been setup using cpaDcInitSession() * @post * pSessionHandle has session related state information * @note * This function passes control to the compression service for processing * * @see * cpaDcCompressData() * *****************************************************************************/ CpaStatus cpaDcCompressData2( CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcOpData *pOpData, CpaDcRqResults *pResults, void *callbackTag ); /** ***************************************************************************** * @ingroup cpaDc * Submit a request to compress a buffer of data without requiring a * session to be created. This is a No-Session (Ns) variant of the * cpaDcCompressData function. * * @description * This API consumes data from the input buffer and generates compressed * data in the output buffer. Unlike the other compression APIs this * does not use a previously created session. This is a "one-shot" API * that requests can be directly submitted to. * * @context * When called as an asynchronous function it cannot sleep. It can be * executed in a context that does not permit sleeping. * When called as a synchronous function it may sleep. It MUST NOT be * executed in a context that DOES NOT permit sleeping. * @assumptions * None * @sideEffects * None * @blocking * Yes when configured to operate in synchronous mode. * @reentrant * No * @threadSafe * Yes * * @param[in] dcInstance Target service instance. * @param[in] pSetupData Configuration structure for compression. * @param[in] pSrcBuff Pointer to data buffer for compression. * @param[in] pDestBuff Pointer to buffer space for data after * compression. * @param[in] pOpData Additional input parameters. * @param[in,out] pResults Pointer to results structure * @param[in] callbackFn For synchronous operation this callback * shall be a null pointer. * @param[in] callbackTag User supplied value to help correlate * the callback with its associated * request. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Resubmit the request. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit * the request. * * @pre * None * @post * None * @note * This function passes control to the compression service for processing * * Checksum rules. * The checksum rules are the same as those for the session based APIs * (cpaDcCompressData or cpaDcCompressData2) with the following exception. * -# If the algorithm specified is CPA_DC_LZ4 or CPA_DC_LZ4S the xxHash32 * checksum will not be maintained across calls to the API. The * implication is that the xxHash32 value will only be valid for the * output of a single request, no state will be saved. If an LZ4 frame is * required, even in recoverable error scenarios such as CPA_DC_OVERFLOW, * the checksum will not be continued. If that is required the session * based API must be used. * * @see * None * *****************************************************************************/ CpaStatus cpaDcNsCompressData( CpaInstanceHandle dcInstance, CpaDcNsSetupData *pSetupData, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcOpData *pOpData, CpaDcRqResults *pResults, CpaDcCallbackFn callbackFn, void *callbackTag ); /** ***************************************************************************** * @ingroup cpaDc * Submit a request to decompress a buffer of data. * * @description * This API consumes compressed data from the input buffer and generates * uncompressed data in the output buffer. * * @context * When called as an asynchronous function it cannot sleep. It can be * executed in a context that does not permit sleeping. * When called as a synchronous function it may sleep. It MUST NOT be * executed in a context that DOES NOT permit sleeping. * @assumptions * None * @sideEffects * None * @blocking * Yes when configured to operate in synchronous mode. * @reentrant * No * @threadSafe * Yes * * @param[in] dcInstance Target service instance. * @param[in,out] pSessionHandle Session handle. * @param[in] pSrcBuff Pointer to data buffer for compression. * @param[in] pDestBuff Pointer to buffer space for data * after decompression. * @param[in,out] pResults Pointer to results structure * @param[in] flushFlag When set to CPA_DC_FLUSH_FINAL, indicates * that the input buffer contains all of * the data for the compression session, * allowing the function to release * history data. * @param[in] callbackTag User supplied value to help correlate * the callback with its associated * request. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Resubmit the request. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. * @retval CPA_DC_BAD_DATA The input data was not properly formed. * @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit * the request. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * * @pre * pSessionHandle has been setup using cpaDcInitSession() * @post * pSessionHandle has session related state information * @note * This function passes control to the compression service for * decompression. The function returns the status from the service. * * This function may be called repetitively with input until all of the * input has been provided and all the output has been consumed. * * This function has identical buffer processing rules as * cpaDcCompressData(). * * This function has identical checksum processing rules as * cpaDcCompressData(). * * The application should set flushFlag to * CPA_DC_FLUSH_FINAL to indicate processing a particular compressed * data segment is complete. It should be noted that this function may * have to be called more than once to process data after flushFlag * has been set if the destination buffer fills. Refer to * buffer processing rules in cpaDcCompressData(). * * Synchronous or Asynchronous operation of the API is determined by * the value of the callbackFn parameter passed to cpaDcInitSession() * when the sessionHandle was setup. If a non-NULL value was specified * then the supplied callback function will be invoked asynchronously * with the response of this request, along with the callbackTag * specified in the function. * * The same response ordering constraints identified in the * cpaDcCompressData API apply to this function. * * @see * cpaDcCompressData() * *****************************************************************************/ CpaStatus cpaDcDecompressData( CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, CpaDcFlush flushFlag, void *callbackTag ); /** ***************************************************************************** * @ingroup cpaDc * Submit a request to decompress a buffer of data. * * @description * This API consumes compressed data from the input buffer and generates * uncompressed data in the output buffer. This API is very similar to * cpaDcDecompressData() except it provides a CpaDcOpData structure for * passing additional input parameters not covered in cpaDcDecompressData(). * * @context * When called as an asynchronous function it cannot sleep. It can be * executed in a context that does not permit sleeping. * When called as a synchronous function it may sleep. It MUST NOT be * executed in a context that DOES NOT permit sleeping. * @assumptions * None * @sideEffects * None * @blocking * Yes when configured to operate in synchronous mode. * @reentrant * No * @threadSafe * Yes * * @param[in] dcInstance Target service instance. * @param[in,out] pSessionHandle Session handle. * @param[in] pSrcBuff Pointer to data buffer for compression. * @param[in] pDestBuff Pointer to buffer space for data * after decompression. * @param[in] pOpData Additional input parameters. * @param[in,out] pResults Pointer to results structure * @param[in] callbackTag User supplied value to help correlate * the callback with its associated * request. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Resubmit the request. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. * @retval CPA_DC_BAD_DATA The input data was not properly formed. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit * the request. * * @pre * pSessionHandle has been setup using cpaDcInitSession() * @post * pSessionHandle has session related state information * @note * This function passes control to the compression service for * decompression. The function returns the status from the service. * * @see * cpaDcDecompressData() * cpaDcCompressData2() * cpaDcCompressData() * *****************************************************************************/ CpaStatus cpaDcDecompressData2( CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcOpData *pOpData, CpaDcRqResults *pResults, void *callbackTag ); /** ***************************************************************************** * @ingroup cpaDc * Submit a request to decompress a buffer of data without requiring a * session to be created. This is a No-Session (Ns) variant of the * cpaDcDecompressData function. * * @description * This API consumes data from the input buffer and generates decompressed * data in the output buffer. Unlike the other decompression APIs this * does not use a previously created session. This is a "one-shot" API * that requests can be directly submitted to. * * @context * When called as an asynchronous function it cannot sleep. It can be * executed in a context that does not permit sleeping. * When called as a synchronous function it may sleep. It MUST NOT be * executed in a context that DOES NOT permit sleeping. * @assumptions * None * @sideEffects * None * @blocking * Yes when configured to operate in synchronous mode. * @reentrant * No * @threadSafe * Yes * * @param[in] dcInstance Target service instance. * @param[in] pSetupData Configuration structure for decompression.. * @param[in] pSrcBuff Pointer to data buffer for decompression. * @param[in] pDestBuff Pointer to buffer space for data * after decompression. * @param[in] pOpData Additional input parameters. * @param[in,out] pResults Pointer to results structure * @param[in] callbackFn For synchronous operation this callback * shall be a null pointer. * @param[in] callbackTag User supplied value to help correlate * the callback with its associated * request. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Resubmit the request. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit * the request. * * @pre * None * @post * None * @note * This function passes control to the decompression service. The function * returns the status from the service. * * @see * cpaDcDecompressData() * cpaDcCompressData2() * cpaDcCompressData() * *****************************************************************************/ CpaStatus cpaDcNsDecompressData( CpaInstanceHandle dcInstance, CpaDcNsSetupData *pSetupData, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcOpData *pOpData, CpaDcRqResults *pResults, CpaDcCallbackFn callbackFn, void *callbackTag ); /** ***************************************************************************** * @ingroup cpaDc * Generate compression header. * * @description * This function generates the gzip header, zlib header or LZ4 frame * header and stores it in the destination buffer. The type of header * created is determined using the compression algorithm selected using * CpaDcSessionSetupData.compType, for the session associated with the * session handle. * * @context * This function may be call from any context. * @assumptions * None * @sideEffects * None * @blocking * No * @reentrant * No * @threadSafe * Yes * * @param[in] pSessionHandle Session handle. * @param[in] pDestBuff Pointer to data buffer where the * compression header will go. * @param[out] count Pointer to counter filled in with * header size. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit * the request. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * * @pre * pSessionHandle has been setup using cpaDcInitSession() * * @note * When the deflate compression algorithm is used, this function can output * a 10 byte gzip header or 2 byte zlib header to the destination buffer. * The session properties are used to determine the header type. To * output a Gzip or a Zlib header the session must have been initialized * with CpaDcCompType CPA_DC_DEFLATE. * To output a gzip header the session must have been initialized with * CpaDcChecksum CPA_DC_CRC32. To output a zlib header the session must * have been initialized with CpaDcChecksum CPA_DC_ADLER32. * For CpaDcChecksum CPA_DC_NONE no header is output. * * If the compression requires a gzip header, then this header requires * at a minimum the following fields, defined in RFC1952: * ID1: 0x1f * ID2: 0x8b * CM: Compression method = 8 for deflate * * The zlib header is defined in RFC1950 and this function must implement * as a minimum: * CM: four bit compression method - 8 is deflate with window size to * 32k * CINFO: four bit window size (see RFC1950 for details), 7 is 32k * window * FLG: defined as: * - Bits 0 - 4: check bits for CM, CINFO and FLG (see RFC1950) * - Bit 5: FDICT 0 = default, 1 is preset dictionary * - Bits 6 - 7: FLEVEL, compression level (see RFC 1950) * * When LZ4 algorithm is used, this function can output a 7 byte frame * header. This function will set the LZ4 frame header with: * - Magic number 0x184D2204 * - The LZ4 max block size defined in the CpaDcSessionSetupData * - Flag byte as: * * Version = 1 * * Block independence = 0 * * Block checksum = 0 * * Content size present = 0 * * Content checksum present = 1 * * Dictionary ID present = 0 * - Content size = 0 * - Dictionary ID = 0 * - Header checksum = 1 byte representing the second byte of the * XXH32 of the frame decriptor field. * * The counter parameter will be set to the number of bytes added to the * buffer. The pData will be not be changed. * * For any of the compression algorithms used, the application is * responsible to offset the pData pointer in CpaBufferList by the length * of the header before calling the CpaDcCompressData() or * CpaDcCompressData2() functions. * @see * None * *****************************************************************************/ CpaStatus cpaDcGenerateHeader( CpaDcSessionHandle pSessionHandle, CpaFlatBuffer *pDestBuff, Cpa32U *count ); /** ***************************************************************************** * @ingroup cpaDc * Generate compression footer. * * @description * This function generates the footer for gzip, zlib or LZ4. * The generated footer is stored it in the destination buffer. * The type of footer created is determined using the compression * algorithm selected for the session associated with the session handle. * * @context * This function may be call from any context. * @assumptions * None * @sideEffects * All session variables are reset * @blocking * No * @reentrant * No * @threadSafe * Yes * * @param[in,out] pSessionHandle Session handle. * @param[in] pDestBuff Pointer to data buffer where the * compression footer will go. * @param[in,out] pResults Pointer to results structure filled by * CpaDcCompressData. Updated with the * results of this API call * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit * the request. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * * @pre * pSessionHandle has been setup using cpaDcInitSession() * pResults structure has been filled by CpaDcCompressData(). * * @note * Depending on the session variables, this function can add the * alder32 footer to the zlib compressed data as defined in RFC1950. * If required, it can also add the gzip footer, which is the crc32 of the * uncompressed data and the length of the uncompressed data. * This section is defined in RFC1952. The session variables used to * determine the header type are CpaDcCompType and CpaDcChecksum, see * cpaDcGenerateHeader for more details. * * For LZ4 compression, this function adds the LZ4 frame footer * using XXH32 algorithm of the uncompressed data. The XXH32 checksum is * added after the end mark. This section is defined in the documentation * of the LZ4 frame format at: * https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md * * An artifact of invoking this function for writing the footer data is * that all opaque session specific data is re-initialized. If the * compression level and file types are consistent, the upper level * application can continue processing compression requests using the * same session handle. * * The produced element of the pResults structure will be incremented by * the numbers bytes added to the buffer. The pointer to the buffer will * not be modified. It is necessary for the application to ensure that * there is always sufficient memory in the destination buffer to append * the footer. In the event that the destination buffer would be too small * to accept the footer, overflow will not be reported. * * @see * None * *****************************************************************************/ CpaStatus cpaDcGenerateFooter( CpaDcSessionHandle pSessionHandle, CpaFlatBuffer *pDestBuff, CpaDcRqResults *pResults ); /** ***************************************************************************** * @ingroup cpaDc * Generate compression header without requiring a session to be created. * This is a No-Session (Ns) variant of the cpaDcGenerateHeader function. * * @description * This API generates the required compression format header and stores it * in the output buffer. * * @context * This function may be called from any context. * @assumptions * None * @sideEffects * None * @blocking * No * @reentrant * No * @threadSafe * Yes * * @param[in] pSetupData Pointer to Ns Configuration structure. * @param[in] pDestBuff Pointer to data buffer where the * compression header will go. * @param[out] count Pointer to counter filled in with * header size. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit * the request. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * * @pre * None * * @note * This function outputs the required compression format header to * the destination buffer. The CpaDcNsSetupData structure fields are used to * determine the header type. * * To output an LZ4 header the structure must have been initialized with * with CpaDcCompType CPA_DC_LZ4. * To output a gzip or zlib header the structure must have been initialized * with CpaDcCompType CPA_DC_DEFLATE. * To output a gzip header the structure must have been initialized with * CpaDcChecksum CPA_DC_CRC32. * To output a zlib header the structure must have been initialized with * CpaDcChecksum CPA_DC_ADLER32. * For CpaDcChecksum CPA_DC_NONE no header is output. * * The counter parameter will be set to the number of bytes added to the * buffer. * * @see * cpaDcGenerateHeader * *****************************************************************************/ CpaStatus cpaDcNsGenerateHeader( CpaDcNsSetupData *pSetupData, CpaFlatBuffer *pDestBuff, Cpa32U *count ); /** ***************************************************************************** * @ingroup cpaDc * Generate compression footer without requiring a session to be created. * This is a No-Session (Ns) variant of the cpaDcGenerateFooter function. * * @description * This API generates the footer for the required format and stores it in * the destination buffer. * @context * This function may be call from any context. * @assumptions * None * @sideEffects * All session variables are reset * @blocking * No * @reentrant * No * @threadSafe * Yes * * @param[in] pSetupData Pointer to Ns Configuration structure. * @param[in] totalLength Total accumulated length of input data * processed. See description for formats * that make use of this parameter. * @param[in] pDestBuff Pointer to data buffer where the * compression footer will go. * @param[in,out] pResults Pointer to results structure filled by * CpaDcNsCompressData. Updated with the * results of this API call * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit * the request. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * * @pre * pResults structure has been filled by CpaDcNsCompressData(). * * @note * This function outputs the required compression format footer to * the destination buffer. The CpaDcNsSetupData structure fields are used to * determine the footer type created. * * To output an LZ4 footer the structure must have been initialized with * with CpaDcCompType CPA_DC_LZ4. * To output a gzip or zlib footer the structure must have been initialized * with CpaDcCompType CPA_DC_DEFLATE. * To output a gzip footer the structure must have been initialized with * CpaDcChecksum CPA_DC_CRC32 and the totalLength parameter initialized to * the total accumulated length of data processed. * To output a zlib footer the structure must have been initialized with * CpaDcChecksum CPA_DC_ADLER32. * For CpaDcChecksum CPA_DC_NONE no footer is output. * * The produced element of the pResults structure will be incremented by the * number of bytes added to the buffer. The pointer to the buffer * will not be modified. * * @see * CpaDcNsSetupData * cpaDcNsGenerateHeader * cpaDcGenerateFooter * *****************************************************************************/ CpaStatus cpaDcNsGenerateFooter( CpaDcNsSetupData *pSetupData, Cpa64U totalLength, CpaFlatBuffer *pDestBuff, CpaDcRqResults *pResults ); /** ***************************************************************************** * @ingroup cpaDc * Retrieve statistics * * @description * This API retrieves the current statistics for a compression instance. * * @context * This function may be call from any context. * @assumptions * None * @sideEffects * None * @blocking * Yes * @reentrant * No * @threadSafe * Yes * * @param[in] dcInstance Instance handle. * @param[out] pStatistics Pointer to statistics structure. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit * the request. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * * @pre * None * @post * None * * @see * None * *****************************************************************************/ CpaStatus cpaDcGetStats( CpaInstanceHandle dcInstance, CpaDcStats *pStatistics ); /*****************************************************************************/ /* Instance Discovery Functions */ /** ***************************************************************************** * @ingroup cpaDc * Get the number of device instances that are supported by the API * implementation. * * @description * * This function will get the number of device instances that are supported * by an implementation of the compression API. This number is then used to * determine the size of the array that must be passed to * cpaDcGetInstances(). * * @context * This function MUST NOT be called from an interrupt context as it MAY * sleep. * @assumptions * None * @sideEffects * None * @blocking * This function is synchronous and blocking. * @reentrant * No * @threadSafe * Yes * * @param[out] pNumInstances Pointer to where the number of * instances will be written. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * * @pre * None * @post * None * @note * This function operates in a synchronous manner and no asynchronous * callback will be generated * * @see * cpaDcGetInstances * *****************************************************************************/ CpaStatus cpaDcGetNumInstances(Cpa16U* pNumInstances); /** ***************************************************************************** * @ingroup cpaDc * Get the handles to the device instances that are supported by the * API implementation. * * @description * * This function will return handles to the device instances that are * supported by an implementation of the compression API. These instance * handles can then be used as input parameters with other compression API * functions. * * This function will populate an array that has been allocated by the * caller. The size of this API is determined by the * cpaDcGetNumInstances() function. * * @context * This function MUST NOT be called from an interrupt context as it MAY * sleep. * @assumptions * None * @sideEffects * None * @blocking * This function is synchronous and blocking. * @reentrant * No * @threadSafe * Yes * * @param[in] numInstances Size of the array. * @param[out] dcInstances Pointer to where the instance * handles will be written. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * * @pre * None * @post * None * @note * This function operates in a synchronous manner and no asynchronous * callback will be generated * * @see * cpaDcGetInstances * *****************************************************************************/ CpaStatus cpaDcGetInstances(Cpa16U numInstances, CpaInstanceHandle* dcInstances); /** ***************************************************************************** * @ingroup cpaDc * Compression Component utility function to determine the number of * intermediate buffers required by an implementation. * * @description * This function will determine the number of intermediate buffer lists * required by an implementation for a compression instance. These buffers * should then be allocated and provided when calling @ref cpaDcStartInstance() * to start a compression instance that will use dynamic compression. * * @context * This function may sleep, and MUST NOT be called in interrupt context. * @assumptions * None * @sideEffects * None * @blocking * This function is synchronous and blocking. * @reentrant * No * @threadSafe * Yes * @param[in,out] instanceHandle Handle to an instance of this API to be * initialized. * @param[out] pNumBuffers When the function returns, this will * specify the number of buffer lists that * should be used as intermediate buffers * when calling cpaDcStartInstance(). * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. Suggested course of action * is to shutdown and restart. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * * @pre * None * @post * None * @note * Note that this is a synchronous function and has no completion callback * associated with it. * * @see * cpaDcStartInstance() * *****************************************************************************/ CpaStatus cpaDcGetNumIntermediateBuffers(CpaInstanceHandle instanceHandle, Cpa16U *pNumBuffers); /** ***************************************************************************** * @ingroup cpaDc * Compression Component Initialization and Start function. * * @description * This function will initialize and start the compression component. * It MUST be called before any other compress function is called. This * function SHOULD be called only once (either for the very first time, * or after an cpaDcStopInstance call which succeeded) per instance. * Subsequent calls will have no effect. * * If required by an implementation, this function can be provided with * instance specific intermediate buffers. The intent is to provide an * instance specific location to store intermediate results during dynamic * instance Huffman tree compression requests. The memory should be * accessible by the compression engine. The buffers are to support * deflate compression with dynamic Huffman Trees. Each buffer list * should be similar in size to twice the destination buffer size passed * to the compress API. The number of intermediate buffer lists may vary * between implementations and so @ref cpaDcGetNumIntermediateBuffers() * should be called first to determine the number of intermediate * buffers required by the implementation. * * If not required, this parameter can be passed in as NULL. * * @context * This function may sleep, and MUST NOT be called in interrupt context. * @assumptions * None * @sideEffects * None * @blocking * This function is synchronous and blocking. * @reentrant * No * @threadSafe * Yes * @param[in,out] instanceHandle Handle to an instance of this API to be * initialized. * @param[in] numBuffers Number of buffer lists represented by * the pIntermediateBuffers parameter. * Note: @ref cpaDcGetNumIntermediateBuffers() * can be used to determine the number of * intermediate buffers that an implementation * requires. * @param[in] pIntermediateBuffers Optional pointer to Instance specific * DRAM buffer. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. Suggested course of action * is to shutdown and restart. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * * @pre * None * @post * None * @note * Note that this is a synchronous function and has no completion callback * associated with it. * * @see * cpaDcStopInstance() * cpaDcGetNumIntermediateBuffers() * *****************************************************************************/ CpaStatus cpaDcStartInstance(CpaInstanceHandle instanceHandle, Cpa16U numBuffers, CpaBufferList **pIntermediateBuffers); /** ***************************************************************************** * @ingroup cpaDc * Compress Component Stop function. * * @description * This function will stop the Compression component and free * all system resources associated with it. The client MUST ensure that * all outstanding operations have completed before calling this function. * The recommended approach to ensure this is to deregister all session or * callback handles before calling this function. If outstanding * operations still exist when this function is invoked, the callback * function for each of those operations will NOT be invoked and the * shutdown will continue. If the component is to be restarted, then a * call to cpaDcStartInstance is required. * * @context * This function may sleep, and so MUST NOT be called in interrupt * context. * @assumptions * None * @sideEffects * None * @blocking * This function is synchronous and blocking. * @reentrant * No * @threadSafe * Yes * @param[in] instanceHandle Handle to an instance of this API to be * shutdown. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. Suggested course of action * is to ensure requests are not still being * submitted and that all sessions are * deregistered. If this does not help, then * forcefully remove the component from the * system. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * * @pre * The component has been initialized via cpaDcStartInstance * @post * None * @note * Note that this is a synchronous function and has no completion callback * associated with it. * * @see * cpaDcStartInstance() * *****************************************************************************/ CpaStatus cpaDcStopInstance(CpaInstanceHandle instanceHandle); /** ***************************************************************************** * @ingroup cpaDc * Function to get information on a particular instance. * * @description * This function will provide instance specific information through a * @ref CpaInstanceInfo2 structure. * * @context * This function will be executed in a context that requires that sleeping * MUST NOT be permitted. * @assumptions * None * @sideEffects * None * @blocking * Yes * @reentrant * No * @threadSafe * Yes * * @param[in] instanceHandle Handle to an instance of this API to be * initialized. * @param[out] pInstanceInfo2 Pointer to the memory location allocated by * the client into which the CpaInstanceInfo2 * structure will be written. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * * @pre * The client has retrieved an instanceHandle from successive calls to * @ref cpaDcGetNumInstances and @ref cpaDcGetInstances. * @post * None * @note * None * @see * cpaDcGetNumInstances, * cpaDcGetInstances, * CpaInstanceInfo2 * *****************************************************************************/ CpaStatus cpaDcInstanceGetInfo2(const CpaInstanceHandle instanceHandle, CpaInstanceInfo2 * pInstanceInfo2); /*****************************************************************************/ /* Instance Notification Functions */ /*****************************************************************************/ /** ***************************************************************************** * @ingroup cpaDc * Callback function for instance notification support. * * @description * This is the prototype for the instance notification callback function. * The callback function is passed in as a parameter to the * @ref cpaDcInstanceSetNotificationCb function. * * @context * This function will be executed in a context that requires that sleeping * MUST NOT be permitted. * @assumptions * None * @sideEffects * None * @blocking * No * @reentrant * No * @threadSafe * Yes * * @param[in] instanceHandle Instance handle. * @param[in] pCallbackTag Opaque value provided by user while making * individual function calls. * @param[in] instanceEvent The event that will trigger this function to * get invoked. * * @retval * None * @pre * Component has been initialized and the notification function has been * set via the cpaDcInstanceSetNotificationCb function. * @post * None * @note * None * @see * cpaDcInstanceSetNotificationCb(), * *****************************************************************************/ typedef void (*CpaDcInstanceNotificationCbFunc)( const CpaInstanceHandle instanceHandle, void * pCallbackTag, const CpaInstanceEvent instanceEvent); /** ***************************************************************************** * @ingroup cpaDc * Subscribe for instance notifications. * * @description * Clients of the CpaDc interface can subscribe for instance notifications * by registering a @ref CpaDcInstanceNotificationCbFunc function. * * @context * This function may be called from any context. * @assumptions * None * @sideEffects * None * @blocking * No * @reentrant * No * @threadSafe * Yes * * @param[in] instanceHandle Instance handle. * @param[in] pInstanceNotificationCb Instance notification callback * function pointer. * @param[in] pCallbackTag Opaque value provided by user while * making individual function calls. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * * @pre * Instance has been initialized. * @post * None * @note * None * @see * CpaDcInstanceNotificationCbFunc * *****************************************************************************/ CpaStatus cpaDcInstanceSetNotificationCb( const CpaInstanceHandle instanceHandle, const CpaDcInstanceNotificationCbFunc pInstanceNotificationCb, void *pCallbackTag); /** ***************************************************************************** * @ingroup cpaDc * Get the size of the memory required to hold the session information. * * @description * * The client of the Data Compression API is responsible for * allocating sufficient memory to hold session information and the context * data. This function provides a means for determining the size of the * session information and the size of the context data. * * @context * No restrictions * @assumptions * None * @sideEffects * None * @blocking * No * @reentrant * No * @threadSafe * Yes * * @param[in] dcInstance Instance handle. * @param[in] pSessionData Pointer to a user instantiated structure * containing session data. * @param[out] pSessionSize On return, this parameter will be the size * of the memory that will be * required by cpaDcInitSession() for session * data. * @param[out] pContextSize On return, this parameter will be the size * of the memory that will be required * for context data. Context data is * save/restore data including history and * any implementation specific data that is * required for a save/restore operation. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * * @pre * None * @post * None * @note * Only a synchronous version of this function is provided. * * It is expected that context data is comprised of the history and * any data stores that are specific to the history such as linked * lists or hash tables. * For stateless sessions the context size returned from this function * will be zero. For stateful sessions the context size returned will * depend on the session setup data and may be zero. * * Session data is expected to include interim checksum values, various * counters and other session related data that needs to persist * between invocations. * For a given implementation of this API, it is safe to assume that * cpaDcGetSessionSize() will always return the same session size and * that the size will not be different for different setup data * parameters. However, it should be noted that the size may change: * (1) between different implementations of the API (e.g. between software * and hardware implementations or between different hardware * implementations) * (2) between different releases of the same API implementation. * * @see * cpaDcInitSession() * *****************************************************************************/ CpaStatus cpaDcGetSessionSize(CpaInstanceHandle dcInstance, CpaDcSessionSetupData* pSessionData, Cpa32U* pSessionSize, Cpa32U* pContextSize ); /** ***************************************************************************** * @ingroup cpaDc * Function to return the size of the memory which must be allocated for * the pPrivateMetaData member of CpaBufferList. * * @description * This function is used to obtain the size (in bytes) required to allocate * a buffer descriptor for the pPrivateMetaData member in the * CpaBufferList structure. * Should the function return zero then no meta data is required for the * buffer list. * * @context * This function may be called from any context. * @assumptions * None * @sideEffects * None * @blocking * No * @reentrant * No * @threadSafe * Yes * * @param[in] instanceHandle Handle to an instance of this API. * @param[in] numBuffers The number of pointers in the CpaBufferList. * This is the maximum number of CpaFlatBuffers * which may be contained in this CpaBufferList. * @param[out] pSizeInBytes Pointer to the size in bytes of memory to be * allocated when the client wishes to allocate * a cpaFlatBuffer. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * * @pre * None * @post * None * @note * None * @see * cpaDcGetInstances() * *****************************************************************************/ CpaStatus cpaDcBufferListGetMetaSize(const CpaInstanceHandle instanceHandle, Cpa32U numBuffers, Cpa32U *pSizeInBytes); /** ***************************************************************************** * @ingroup cpaDc * Function to return a string indicating the specific error that occurred * within the system. * * @description * When a function returns any error including CPA_STATUS_SUCCESS, the * client can invoke this function to get a string which describes the * general error condition, and if available additional information on * the specific error. * The Client MUST allocate CPA_STATUS_MAX_STR_LENGTH_IN_BYTES bytes for the buffer * string. * * @context * This function may be called from any context. * @assumptions * None * @sideEffects * None * @blocking * No * @reentrant * No * @threadSafe * Yes * * @param[in] dcInstance Handle to an instance of this API. * @param[in] errStatus The error condition that occurred. * @param[in,out] pStatusText Pointer to the string buffer that will * be updated with the status text. The invoking * application MUST allocate this buffer to be * exactly CPA_STATUS_MAX_STR_LENGTH_IN_BYTES. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. Note, in this scenario * it is INVALID to call this function a * second time. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * * @pre * None * @post * None * @note * None * @see * CpaStatus * *****************************************************************************/ CpaStatus cpaDcGetStatusText(const CpaInstanceHandle dcInstance, const CpaStatus errStatus, Cpa8S * pStatusText); /** ***************************************************************************** * @ingroup cpaDc * Set Address Translation function * * @description * This function is used to set the virtual to physical address * translation routine for the instance. The specified routine * is used by the instance to perform any required translation of * a virtual address to a physical address. If the application * does not invoke this function, then the instance will use its * default method, such as virt2phys, for address translation. * * @assumptions * None * @sideEffects * None * @blocking * This function is synchronous and blocking. * @reentrant * No * @threadSafe * Yes * * @param[in] instanceHandle Data Compression API instance handle. * @param[in] virtual2Physical Routine that performs virtual to * physical address translation. * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * * @pre * None * @post * None * @see * None * *****************************************************************************/ CpaStatus cpaDcSetAddressTranslation(const CpaInstanceHandle instanceHandle, CpaVirtualToPhysical virtual2Physical); #ifdef __cplusplus } /* close the extern "C" { */ #endif #endif /* CPA_DC_H */ diff --git a/sys/dev/qat/qat_api/include/dc/cpa_dc_dp.h b/sys/dev/qat/qat_api/include/dc/cpa_dc_dp.h index 1a08979bb941..680e021f95d6 100644 --- a/sys/dev/qat/qat_api/include/dc/cpa_dc_dp.h +++ b/sys/dev/qat/qat_api/include/dc/cpa_dc_dp.h @@ -1,819 +1,1250 @@ /*************************************************************************** * * BSD LICENSE * * Copyright(c) 2007-2023 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ***************************************************************************/ /* ***************************************************************************** * Doxygen group definitions ****************************************************************************/ /** ***************************************************************************** * @file cpa_dc_dp.h * * @defgroup cpaDcDp Data Compression Data Plane API * * @ingroup cpaDc * * @description * These data structures and functions specify the Data Plane API * for compression and decompression operations. * * This API is recommended for data plane applications, in which the * cost of offload - that is, the cycles consumed by the driver in * sending requests to the hardware, and processing responses - needs * to be minimized. In particular, use of this API is recommended * if the following constraints are acceptable to your application: * * - Thread safety is not guaranteed. Each software thread should * have access to its own unique instance (CpaInstanceHandle) to * avoid contention. * - Polling is used, rather than interrupts (which are expensive). * Implementations of this API will provide a function (not * defined as part of this API) to read responses from the hardware * response queue and dispatch callback functions, as specified on * this API. * - Buffers and buffer lists are passed using physical addresses, * to avoid virtual to physical address translation costs. * - The ability to enqueue one or more requests without submitting * them to the hardware allows for certain costs to be amortized * across multiple requests. * - Only asynchronous invocation is supported. * - There is no support for partial packets. * - Implementations may provide certain features as optional at * build time, such as atomic counters. * - There is no support for stateful operations. * - The "default" instance (CPA_INSTANCE_HANDLE_SINGLE) is not * supported on this API. The specific handle should be obtained * using the instance discovery functions (@ref cpaDcGetNumInstances, * @ref cpaDcGetInstances). * *****************************************************************************/ #ifndef CPA_DC_DP_H #define CPA_DC_DP_H #ifdef __cplusplus extern "C" { #endif #include "cpa_dc.h" +/** + ***************************************************************************** + * @ingroup cpaDcDp + * Decompression partial read data. + * @description + * This structure contains configuration related to requesting + * specific chunk of decompression data. + * + ****************************************************************************/ +typedef struct _CpaDcDpPartialReadData { + Cpa32U bufferOffset; + /**< Number of bytes to skip in a destination buffer (or buffers list) + * before writing. At this point only zero is supported. + */ + Cpa32U dataOffset; + /**< The offset in the decompressed data of the first byte written to + * the destination buffer. The data offset length should be an integer + * multiple of 4KB in order to achieve the best performance. + */ + Cpa32U length; + /**< Size of requested decompressed data chunk. The length should be + * an integer multiple of 4KB in order to achieve the best performance. + */ +} CpaDcDpPartialReadData; + /** ***************************************************************************** * @ingroup cpaDcDp * Operation Data for compression data plane API. * * @description * This structure contains data relating to a request to perform * compression processing on one or more data buffers. * * The physical memory to which this structure points should be * at least 8-byte aligned. * * All reserved fields SHOULD NOT be written or read by the * calling code. * * @see * cpaDcDpEnqueueOp, cpaDcDpEnqueueOpBatch ****************************************************************************/ typedef struct _CpaDcDpOpData { Cpa64U reserved0; /**< Reserved for internal use. Source code should not read or write * this field. */ Cpa32U bufferLenToCompress; /**< The number of bytes from the source buffer to compress. This must be * less than, or more typically equal to, the total size of the source * buffer (or buffer list). */ Cpa32U bufferLenForData; /**< The maximum number of bytes that should be written to the destination * buffer. This must be less than, or more typically equal to, the total * size of the destination buffer (or buffer list). */ Cpa64U reserved1; /**< Reserved for internal use. Source code should not read or write */ Cpa64U reserved2; /**< Reserved for internal use. Source code should not read or write */ Cpa64U reserved3; /**< Reserved for internal use. Source code should not read or write */ CpaDcRqResults results; /**< Results of the operation. Contents are valid upon completion. */ CpaInstanceHandle dcInstance; /**< Instance to which the request is to be enqueued */ CpaDcSessionHandle pSessionHandle; /**< DC Session associated with the stream of requests. * This field is only valid when using the session based API functions. * This field must be set to NULL if the application wishes to use * the No-Session (Ns) API. */ CpaPhysicalAddr srcBuffer; /**< Physical address of the source buffer on which to operate. * This is either the location of the data, of length srcBufferLen; or, * if srcBufferLen has the special value @ref CPA_DP_BUFLIST, then * srcBuffer contains the location where a @ref CpaPhysBufferList is * stored. */ Cpa32U srcBufferLen; /**< If the source buffer is a "flat buffer", then this field * specifies the size of the buffer, in bytes. If the source buffer * is a "buffer list" (of type @ref CpaPhysBufferList), then this field * should be set to the value @ref CPA_DP_BUFLIST. */ CpaPhysicalAddr destBuffer; /**< Physical address of the destination buffer on which to operate. * This is either the location of the data, of length destBufferLen; or, * if destBufferLen has the special value @ref CPA_DP_BUFLIST, then * destBuffer contains the location where a @ref CpaPhysBufferList is * stored. */ Cpa32U destBufferLen; /**< If the destination buffer is a "flat buffer", then this field * specifies the size of the buffer, in bytes. If the destination buffer * is a "buffer list" (of type @ref CpaPhysBufferList), then this field * should be set to the value @ref CPA_DP_BUFLIST. */ CpaDcSessionDir sessDirection; /**pSessionHandle was setup using * @ref cpaDcDpInitSession OR pOpData->pSetupData data structure was * initialized for No-Session (Ns) usage. * The instance identified by pOpData->dcInstance has had a * callback function registered via @ref cpaDcDpRegCbFunc. * * @post * None * * @note * A callback of type @ref CpaDcDpCallbackFn is generated in * response to this function call. Any errors generated during * processing are reported as part of the callback status code. * * @see * @ref cpaDcDpPerformOpNow *****************************************************************************/ CpaStatus cpaDcDpEnqueueOp(CpaDcDpOpData *pOpData, const CpaBoolean performOpNow); +/** + ***************************************************************************** + * @ingroup cpaDcDp + * Enqueue a single decompression request with partial read configuration. + * See @CpaDcDpPartialReadData for more details. + * + * @description + * This function enqueues a single request to perform a decompression + * operation and allows to specify particular region of decompressed + * data to be placed in to the destination buffer (or buffer list). + * + * The function is asynchronous; control is returned to the user once + * the request has been submitted. On completion of the request, the + * application may poll for responses, which will cause a callback + * function (registered via @ref cpaDcDpRegCbFunc) to be invoked. + * Callbacks within a session are guaranteed to be in the same order + * in which they were submitted. + * + * The following restrictions apply to the pOpData parameter: + * + * - The memory MUST be aligned on an 8-byte boundary. + * - The reserved fields of the structure MUST NOT be written to + * or read from. + * - The structure MUST reside in physically contiguous memory. + * + * @context + * This function will not sleep, and hence can be executed in a context + * that does not permit sleeping. + * + * @sideEffects + * None + * @blocking + * No + * @reentrant + * No + * @threadSafe + * No + * + * @param[in,out] pOpData See @ref cpaDcDpEnqueueOp pOpData description. + * + * @param[in] pPartReadData Pointer to a structure containing the partial + * read configuration parameters. + * See @CpaDcDpPartialReadData for more details. + * + * @param[in] performOpNow See @ref cpaDcDpEnqueueOp performOpNow input + * parameter. + * + * @retval CPA_STATUS_SUCCESS Function executed successfully. + * @retval CPA_STATUS_FAIL Function failed. + * @retval CPA_STATUS_RETRY Resubmit the request. + * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. + * @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit + * the request. + * @retval CPA_STATUS_UNSUPPORTED Function is not supported. + * + * @pre + * The session identified by pOpData->pSessionHandle was setup using + * @ref cpaDcDpInitSession. The instance identified by pOpData->dcInstance + * has had a callback function registered via @ref cpaDcDpRegCbFunc. + * + * @post + * None + * + * @note + * A callback of type @ref CpaDcDpCallbackFn is generated in + * response to this function call. Any errors generated during + * processing are reported as part of the callback status code. + * + * @see + * @ref cpaDcDpPerformOpNow + *****************************************************************************/ +CpaStatus +cpaDcDpEnqueueOpWithPartRead(CpaDcDpOpData *pOpData, + CpaDcDpPartialReadData *pPartReadData, + const CpaBoolean performOpNow); + +/** + ***************************************************************************** + * @ingroup cpaDcDp + * Enqueue a single compression request with an option set to zero-fill + * data after the compression output in the leftover bytes. + * + * @description + * This function enqueues a single request to perform a compression + * operation with zero-filling leftover bytes with 4KB alignment + * in the destination buffer (or buffer list). + * + * The function is asynchronous; control is returned to the user once + * the request has been submitted. On completion of the request, the + * application may poll for responses, which will cause a callback + * function (registered via @ref cpaDcDpRegCbFunc) to be invoked. + * Callbacks within a session are guaranteed to be in the same order + * in which they were submitted. + * + * The following restrictions apply to the pOpData parameter: + * + * - The memory MUST be aligned on an 8-byte boundary. + * - The reserved fields of the structure MUST NOT be written to + * or read from. + * - The structure MUST reside in physically contiguous memory. + * + * @context + * This function will not sleep, and hence can be executed in a context + * that does not permit sleeping. + * + * @sideEffects + * None + * @blocking + * No + * @reentrant + * No + * @threadSafe + * No + * + * @param[in,out] pOpData See @ref cpaDcDpEnqueueOp pOpData description. + * + * @param[in] performOpNow See @ref cpaDcDpEnqueueOp performOpNow input + * parameter. + * + * @retval CPA_STATUS_SUCCESS Function executed successfully. + * @retval CPA_STATUS_FAIL Function failed. + * @retval CPA_STATUS_RETRY Resubmit the request. + * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. + * @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit + * the request. + * @retval CPA_STATUS_UNSUPPORTED Function is not supported. + * + * @pre + * The session identified by pOpData->pSessionHandle was setup using + * @ref cpaDcDpInitSession. The instance identified by pOpData->dcInstance + * has had a callback function registered via @ref cpaDcDpRegCbFunc. + * + * @post + * None + * + * @note + * A callback of type @ref CpaDcDpCallbackFn is generated in + * response to this function call. Any errors generated during + * processing are reported as part of the callback status code. + * + * @see + * @ref cpaDcDpPerformOpNow + *****************************************************************************/ +CpaStatus +cpaDcDpEnqueueOpWithZeroPad(CpaDcDpOpData *pOpData, + const CpaBoolean performOpNow); /** ***************************************************************************** * @ingroup cpaDcDp * Enqueue multiple requests to the compression data plane API. * * @description * This function enqueues multiple requests to perform compression or * decompression operations. * * The function is asynchronous; control is returned to the user once * the request has been submitted. On completion of the request, the * application may poll for responses, which will cause a callback * function (registered via @ref cpaDcDpRegCbFunc) to be invoked. * Separate callbacks will be invoked for each request. * Callbacks within a session and at the same priority are guaranteed * to be in the same order in which they were submitted. * * The following restrictions apply to each element of the pOpData * array: * * - The memory MUST be aligned on an 8-byte boundary. * - The reserved fields of the structure MUST be set to zero. * - The structure MUST reside in physically contiguous memory. * * @context * This function will not sleep, and hence can be executed in a context * that does not permit sleeping. * * @assumptions * Client MUST allocate the request parameters to 8 byte alignment. * Reserved elements of the CpaDcDpOpData structure MUST not used * The CpaDcDpOpData structure MUST reside in physically * contiguous memory. * * @sideEffects * None * @blocking * No * @reentrant * No * @threadSafe * No * * @param[in] numberRequests The number of requests in the array of * CpaDcDpOpData structures. * @param[in] pOpData An array of pointers to CpaDcDpOpData * structures. Each CpaDcDpOpData * structure contains the request parameters for * that request. The client code allocates the * memory for this structure. This component takes * ownership of the memory until it is returned in * the callback, which was registered on the * instance via @ref cpaDcDpRegCbFunc. * See the above Description for some restrictions * that apply to this parameter. * @param[in] performOpNow Flag to indicate whether the operation should be * performed immediately (CPA_TRUE), or simply * enqueued to be performed later (CPA_FALSE). * In the latter case, the request is submitted * to be performed either by calling this function * again with this flag set to CPA_TRUE, or by * invoking the function @ref * cpaDcDpPerformOpNow. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Resubmit the request. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit * the request. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * * @pre * The session identified by pOpData[i]->pSessionHandle was setup using * @ref cpaDcDpInitSession OR pOpData[i]->pSetupData data structure was * initialized for No-Session (Ns) usage. * The instance identified by pOpData[i]->dcInstance has had a * callback function registered via @ref cpaDcDpRegCbFunc. * * @post * None * * @note * Multiple callbacks of type @ref CpaDcDpCallbackFn are generated in * response to this function call (one per request). Any errors * generated during processing are reported as part of the callback * status code. * * @see * cpaDcDpEnqueueOp *****************************************************************************/ CpaStatus cpaDcDpEnqueueOpBatch(const Cpa32U numberRequests, CpaDcDpOpData *pOpData[], const CpaBoolean performOpNow); +/** + ***************************************************************************** + * @ingroup cpaDcDp + * Enqueue multiple decompression request with partial read configuration. + * See @CpaDcDpPartialReadData for more details. + * + * @description + * This function enqueues multiple requests to perform decompression + * operations and allows to specify particular region of decompressed + * data to be placed in to the destination buffer (or buffer list) for + * each individual request. + * + * The function is asynchronous; control is returned to the user once + * the request has been submitted. On completion of the request, the + * application may poll for responses, which will cause a callback + * function (registered via @ref cpaDcDpRegCbFunc) to be invoked. + * Separate callbacks will be invoked for each request. + * Callbacks within a session and at the same priority are guaranteed + * to be in the same order in which they were submitted. + * + * The following restrictions apply to each element of the pOpData + * array: + * + * - The memory MUST be aligned on an 8-byte boundary. + * - The reserved fields of the structure MUST be set to zero. + * - The structure MUST reside in physically contiguous memory. + * + * @context + * See @ref cpaDcDpEnqueueOpBatch context. + * + * @assumptions + * See @ref cpaDcDpEnqueueOpBatch assumptions. + * + * @sideEffects + * None + * @blocking + * No + * @reentrant + * No + * @threadSafe + * No + * + * @param[in] numberRequests The number of requests in the array of + * CpaDcDpOpData structures. + * + * @param[in,out] pOpData See @ref cpaDcDpEnqueueOpBatch pOpData for more + * details. + * + * @param[in] pPartReadData An array of pointers to a structures containing + * the partial read configuration parameters. + * See @CpaDcDpPartialReadData for more details. + * + * @param[in] performOpNow See @ref cpaDcDpEnqueueOpBatch performOpNow + * input parameter. + * + * @retval CPA_STATUS_SUCCESS Function executed successfully. + * @retval CPA_STATUS_FAIL Function failed. + * @retval CPA_STATUS_RETRY Resubmit the request. + * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. + * @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit + * the request. + * @retval CPA_STATUS_UNSUPPORTED Function is not supported. + * + * + * @pre + * The session identified by pOpData[i]->pSessionHandle was setup using + * @ref cpaDcDpInitSession. The instance identified by + * pOpData[i]->dcInstance has had a callback function registered via + * @ref cpaDcDpRegCbFunc. + * + * @post + * None + * + * @note + * Multiple callbacks of type @ref CpaDcDpCallbackFn are generated in + * response to this function call (one per request). Any errors + * generated during processing are reported as part of the callback + * status code. + * + * @see + * @ref cpaDcDpEnqueueOp + *****************************************************************************/ +CpaStatus +cpaDcDpEnqueueOpWithPartReadBatch(const Cpa32U numberRequests, + CpaDcDpOpData *pOpData[], + CpaDcDpPartialReadData *pPartReadData[], + const CpaBoolean performOpNow); + +/** + ***************************************************************************** + * @ingroup cpaDcDp + * Enqueue multiple compression requests with an option set to zero-fill + * data after the compression output in the leftover bytes. + * + * @description + * This function enqueues multiple requests to perform compression + * operations with an option set to zero-fill leftover bytes in the + * destination buffer (of buffer list) for each individual request. + * Please note that optional zero-filling leftover output buffer bytes + * is aligned to 4KB. + * + * The function is asynchronous; control is returned to the user once + * the request has been submitted. On completion of the request, the + * application may poll for responses, which will cause a callback + * function (registered via @ref cpaDcDpRegCbFunc) to be invoked. + * Separate callbacks will be invoked for each request. + * Callbacks within a session and at the same priority are guaranteed + * to be in the same order in which they were submitted. + * + * The following restrictions apply to each element of the pOpData + * array: + * + * - The memory MUST be aligned on an 8-byte boundary. + * - The reserved fields of the structure MUST be set to zero. + * - The structure MUST reside in physically contiguous memory. + * + * @context + * See @ref cpaDcDpEnqueueOpBatch context. + * + * @assumptions + * See @ref cpaDcDpEnqueueOpBatch assumptions. + * + * @sideEffects + * None + * @blocking + * No + * @reentrant + * No + * @threadSafe + * No + * + * @param[in] numberRequests The number of requests in the array of + * CpaDcDpOpData structures. + * + * @param[in,out] pOpData See @ref cpaDcDpEnqueueOpBatch pOpData for more + * details. + * + * @param[in] performOpNow See @ref cpaDcDpEnqueueOpBatch performOpNow + * input parameter. + * + * @retval CPA_STATUS_SUCCESS Function executed successfully. + * @retval CPA_STATUS_FAIL Function failed. + * @retval CPA_STATUS_RETRY Resubmit the request. + * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. + * @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit + * the request. + * @retval CPA_STATUS_UNSUPPORTED Function is not supported. + * + * + * @pre + * The session identified by pOpData[i]->pSessionHandle was setup using + * @ref cpaDcDpInitSession. The instance identified by + * pOpData[i]->dcInstance has had a callback function registered via + * @ref cpaDcDpRegCbFunc. + * + * @post + * None + * + * @note + * Multiple callbacks of type @ref CpaDcDpCallbackFn are generated in + * response to this function call (one per request). Any errors + * generated during processing are reported as part of the callback + * status code. + * + * @see + * @ref cpaDcDpEnqueueOp + *****************************************************************************/ +CpaStatus +cpaDcDpEnqueueOpWithZeroPadBatch(const Cpa32U numberRequests, + CpaDcDpOpData *pOpData[], + const CpaBoolean performOpNow); /** ***************************************************************************** * @ingroup cpaDcDp * Submit any previously enqueued requests to be performed now on the * compression data plane API. * * @description * This function triggers processing of previously enqueued requests on the * referenced instance. * * * @context * Will not sleep. It can be executed in a context that does not * permit sleeping. * * @sideEffects * None * @blocking * No * @reentrant * No * @threadSafe * No * * @param[in] dcInstance Instance to which the requests will be * submitted. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Resubmit the request. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit * the request. * @retval CPA_STATUS_UNSUPPORTED Function is not supported. * * @pre * The component has been initialized via @ref cpaDcStartInstance function. * A compression session has been previously setup using the * @ref cpaDcDpInitSession function call. * * @post * None * * @see * cpaDcDpEnqueueOp, cpaDcDpEnqueueOpBatch *****************************************************************************/ CpaStatus cpaDcDpPerformOpNow(CpaInstanceHandle dcInstance); +/** + ***************************************************************************** + * @ingroup cpaDc + * Function to return the "partial read" feature support. + * + * @description + * This function is used to determine if given instance supports + * "partial read" feature. + * + * @context + * This function may be called from any context. + * @assumptions + * None + * @sideEffects + * None + * @blocking + * No + * @reentrant + * No + * @threadSafe + * Yes + * + * @param[in] instanceHandle Handle to an instance of this API. + * @param[out] pFlag Pointer to boolean flag which indicates + * whether a feature is supported. + * + * @retval CPA_STATUS_SUCCESS Function executed successfully. + * @retval CPA_STATUS_FAIL Function failed. + * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. + * @retval CPA_STATUS_UNSUPPORTED Function is not supported. + * + * @pre + * None + * @post + * None + * @note + * None + * @see + * cpaDcQueryCapabilities() + * + *****************************************************************************/ +CpaStatus +cpaDcDpIsPartReadSupported(const CpaInstanceHandle instanceHandle, + CpaBoolean *pFlag); + +/** + ***************************************************************************** + * @ingroup cpaDc + * Function to return the "zero pad" feature support. + * + * @description + * This function is used to determine if given instance supports + * "zero pad" feature. + * + * @context + * This function may be called from any context. + * @assumptions + * None + * @sideEffects + * None + * @blocking + * No + * @reentrant + * No + * @threadSafe + * Yes + * + * @param[in] instanceHandle Handle to an instance of this API. + * @param[out] pFlag Pointer to boolean flag which indicates + * whether a feature is supported. + * + * @retval CPA_STATUS_SUCCESS Function executed successfully. + * @retval CPA_STATUS_FAIL Function failed. + * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. + * @retval CPA_STATUS_UNSUPPORTED Function is not supported. + * + * @pre + * None + * @post + * None + * @note + * None + * @see + * cpaDcQueryCapabilities() + * + *****************************************************************************/ +CpaStatus +cpaDcDpIsZeroPadSupported(const CpaInstanceHandle instanceHandle, + CpaBoolean *pFlag); #ifdef __cplusplus } /* close the extern "C" { */ #endif #endif /* CPA_DC_DP_H */ diff --git a/sys/dev/qat/qat_api/include/icp_sal_versions.h b/sys/dev/qat/qat_api/include/icp_sal_versions.h index 5e46f86e3a54..db1ba297adc6 100644 --- a/sys/dev/qat/qat_api/include/icp_sal_versions.h +++ b/sys/dev/qat/qat_api/include/icp_sal_versions.h @@ -1,96 +1,96 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /** *************************************************************************** * @file icp_sal_versions.h * * @defgroup SalVersions * * @ingroup SalVersions * * API and structures definition for obtaining software and hardware versions * ***************************************************************************/ #ifndef _ICP_SAL_VERSIONS_H_ #define _ICP_SAL_VERSIONS_H_ #define ICP_SAL_VERSIONS_FW_VERSION_SIZE 16 /**< Max length of firmware version string */ #define ICP_SAL_VERSIONS_SW_VERSION_SIZE 16 /**< Max length of software version string */ #define ICP_SAL_VERSIONS_MMP_VERSION_SIZE 16 /**< Max length of MMP binary version string */ #define ICP_SAL_VERSIONS_HW_VERSION_SIZE 4 /**< Max length of hardware version string */ /* Part name and number of the accelerator device */ #define SAL_INFO2_DRIVER_SW_VERSION_MAJ_NUMBER 3 -#define SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER 13 +#define SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER 14 #define SAL_INFO2_DRIVER_SW_VERSION_PATCH_NUMBER 0 /** ******************************************************************************* * @ingroup SalVersions * Structure holding versions information * * @description * This structure stores information about versions of software * and hardware being run on a particular device. *****************************************************************************/ typedef struct icp_sal_dev_version_info_s { Cpa32U devId; /**< Number of acceleration device for which this structure holds * version * information */ Cpa8U firmwareVersion[ICP_SAL_VERSIONS_FW_VERSION_SIZE]; /**< String identifying the version of the firmware associated with * the device. */ Cpa8U mmpVersion[ICP_SAL_VERSIONS_MMP_VERSION_SIZE]; /**< String identifying the version of the MMP binary associated with * the device. */ Cpa8U softwareVersion[ICP_SAL_VERSIONS_SW_VERSION_SIZE]; /**< String identifying the version of the software associated with * the device. */ Cpa8U hardwareVersion[ICP_SAL_VERSIONS_HW_VERSION_SIZE]; /**< String identifying the version of the hardware (stepping and * revision ID) associated with the device. */ } icp_sal_dev_version_info_t; /** ******************************************************************************* * @ingroup SalVersions * Obtains the version information for a given device * @description * This function obtains hardware and software version information * associated with a given device. * * @param[in] accelId ID of the acceleration device for which version * information is to be obtained. * @param[out] pVerInfo Pointer to a structure that will hold version * information * * @context * This function might sleep. It cannot be executed in a context that * does not permit sleeping. * @assumptions * The system has been started * @sideEffects * None * @blocking * No * @reentrant * No * @threadSafe * Yes * * @return CPA_STATUS_SUCCESS Operation finished successfully * @return CPA_STATUS_INVALID_PARAM Invalid parameter passed to the function * @return CPA_STATUS_RESOURCE System resources problem * @return CPA_STATUS_FAIL Operation failed * *****************************************************************************/ CpaStatus icp_sal_getDevVersionInfo(Cpa32U accelId, icp_sal_dev_version_info_t *pVerInfo); #endif diff --git a/sys/dev/qat/qat_common/adf_ctl_drv.c b/sys/dev/qat/qat_common/adf_ctl_drv.c index aecc98332e72..71b1e107cb5b 100644 --- a/sys/dev/qat/qat_common/adf_ctl_drv.c +++ b/sys/dev/qat/qat_common/adf_ctl_drv.c @@ -1,490 +1,489 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_uio_control.h" #include "adf_uio_cleanup.h" #include "adf_uio.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_cfg.h" #include "adf_cfg_common.h" #include "adf_cfg_user.h" #include "adf_heartbeat.h" #include "adf_cfg_device.h" #define DEVICE_NAME "qat_adf_ctl" static struct sx adf_ctl_lock; static d_ioctl_t adf_ctl_ioctl; void *misc_counter; static struct cdevsw adf_ctl_cdevsw = { .d_version = D_VERSION, .d_ioctl = adf_ctl_ioctl, .d_name = DEVICE_NAME, }; static struct cdev *adf_ctl_dev; static void adf_chr_drv_destroy(void) { destroy_dev(adf_ctl_dev); } struct adf_user_addr_info { struct list_head list; void *user_addr; }; static int adf_chr_drv_create(void) { adf_ctl_dev = make_dev(&adf_ctl_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, DEVICE_NAME); if (!adf_ctl_dev) { printf("QAT: failed to create device\n"); goto err_cdev_del; } return 0; err_cdev_del: return EFAULT; } static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data, caddr_t arg) { *ctl_data = (struct adf_user_cfg_ctl_data *)arg; return 0; } static int adf_copy_keyval_to_user(struct adf_accel_dev *accel_dev, struct adf_user_cfg_ctl_data *ctl_data) { struct adf_user_cfg_key_val key_val; struct adf_user_cfg_section section; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; char *user_ptr; if (copyin(ctl_data->config_section, §ion, sizeof(struct adf_user_cfg_section))) { device_printf(GET_DEV(accel_dev), "failed to copy section info\n"); return EFAULT; } if (copyin(section.params, &key_val, sizeof(struct adf_user_cfg_key_val))) { device_printf(GET_DEV(accel_dev), "failed to copy key val\n"); return EFAULT; } user_ptr = ((char *)section.params) + ADF_CFG_MAX_KEY_LEN_IN_BYTES; if (adf_cfg_get_param_value( accel_dev, section.name, key_val.key, val)) { return EFAULT; } if (copyout(val, user_ptr, ADF_CFG_MAX_VAL_LEN_IN_BYTES)) { device_printf(GET_DEV(accel_dev), "failed to copy keyvalue to user!\n"); return EFAULT; } return 0; } static int adf_ctl_ioctl_get_num_devices(unsigned int cmd, caddr_t arg) { adf_devmgr_get_num_dev((uint32_t *)arg); return 0; } static int adf_ctl_ioctl_get_status(unsigned int cmd, caddr_t arg) { struct adf_hw_device_data *hw_data; struct adf_dev_status_info *dev_info; struct adf_accel_dev *accel_dev; dev_info = (struct adf_dev_status_info *)arg; accel_dev = adf_devmgr_get_dev_by_id(dev_info->accel_id); if (!accel_dev) return ENODEV; hw_data = accel_dev->hw_device; dev_info->state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN; dev_info->num_ae = hw_data->get_num_aes(hw_data); dev_info->num_accel = hw_data->get_num_accels(hw_data); dev_info->num_logical_accel = hw_data->num_logical_accel; dev_info->banks_per_accel = hw_data->num_banks / hw_data->num_logical_accel; strlcpy(dev_info->name, hw_data->dev_class->name, sizeof(dev_info->name)); dev_info->instance_id = hw_data->instance_id; dev_info->type = hw_data->dev_class->type; dev_info->bus = pci_get_bus(accel_to_pci_dev(accel_dev)); dev_info->dev = pci_get_slot(accel_to_pci_dev(accel_dev)); dev_info->fun = pci_get_function(accel_to_pci_dev(accel_dev)); dev_info->domain = pci_get_domain(accel_to_pci_dev(accel_dev)); dev_info->pci_device_id = pci_get_device(accel_to_pci_dev(accel_dev)); dev_info->node_id = accel_dev->accel_pci_dev.node; dev_info->sku = accel_dev->accel_pci_dev.sku; dev_info->device_mem_available = accel_dev->aram_info ? accel_dev->aram_info->inter_buff_aram_region_size : 0; return 0; } static int adf_ctl_ioctl_heartbeat(unsigned int cmd, caddr_t arg) { int ret = 0; struct adf_accel_dev *accel_dev; struct adf_dev_heartbeat_status_ctl *hb_status; hb_status = (struct adf_dev_heartbeat_status_ctl *)arg; accel_dev = adf_devmgr_get_dev_by_id(hb_status->device_id); if (!accel_dev) return ENODEV; if (adf_heartbeat_status(accel_dev, &hb_status->status)) { device_printf(GET_DEV(accel_dev), "failed to get heartbeat status\n"); return EAGAIN; } return ret; } static int adf_ctl_ioctl_dev_get_value(unsigned int cmd, caddr_t arg) { int ret = 0; struct adf_user_cfg_ctl_data *ctl_data; struct adf_accel_dev *accel_dev; ret = adf_ctl_alloc_resources(&ctl_data, arg); if (ret) return ret; accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id); if (!accel_dev) { printf("QAT: Device %d not found\n", ctl_data->device_id); ret = ENODEV; goto out; } ret = adf_copy_keyval_to_user(accel_dev, ctl_data); if (ret) { ret = ENODEV; goto out; } out: return ret; } static struct adf_uio_control_bundle *adf_ctl_ioctl_bundle(struct adf_user_reserve_ring reserve) { struct adf_accel_dev *accel_dev; struct adf_uio_control_accel *accel; struct adf_uio_control_bundle *bundle = NULL; u8 num_rings_per_bank = 0; accel_dev = adf_devmgr_get_dev_by_id(reserve.accel_id); if (!accel_dev) { pr_err("QAT: Failed to get accel_dev\n"); return NULL; } num_rings_per_bank = accel_dev->hw_device->num_rings_per_bank; accel = accel_dev->accel; if (!accel) { pr_err("QAT: Failed to get accel\n"); return NULL; } if (reserve.bank_nr >= GET_MAX_BANKS(accel_dev)) { pr_err("QAT: Invalid bank number %d\n", reserve.bank_nr); return NULL; } if (reserve.ring_mask & ~((1 << num_rings_per_bank) - 1)) { pr_err("QAT: Invalid ring mask %0X\n", reserve.ring_mask); return NULL; } if (accel->num_ker_bundles > reserve.bank_nr) { pr_err("QAT: Invalid user reserved bank\n"); return NULL; } bundle = &accel->bundle[reserve.bank_nr]; return bundle; } static int adf_ctl_ioctl_reserve_ring(caddr_t arg) { struct adf_user_reserve_ring reserve = {0}; struct adf_uio_control_bundle *bundle; struct adf_uio_instance_rings *instance_rings; int pid_entry_found = 0; reserve = *((struct adf_user_reserve_ring *)arg); bundle = adf_ctl_ioctl_bundle(reserve); if (!bundle) { pr_err("QAT: Failed to get bundle\n"); return -EINVAL; } mutex_lock(&bundle->lock); if (bundle->rings_used & reserve.ring_mask) { pr_err("QAT: Bundle %d, rings 0x%04X already reserved\n", reserve.bank_nr, reserve.ring_mask); mutex_unlock(&bundle->lock); return -EINVAL; } - mutex_unlock(&bundle->lock); /* Find the list entry for this process */ mutex_lock(&bundle->list_lock); list_for_each_entry(instance_rings, &bundle->list, list) { if (instance_rings->user_pid == curproc->p_pid) { pid_entry_found = 1; break; } } mutex_unlock(&bundle->list_lock); if (!pid_entry_found) { pr_err("QAT: process %d not found\n", curproc->p_pid); + mutex_unlock(&bundle->lock); return -EINVAL; } instance_rings->ring_mask |= reserve.ring_mask; - mutex_lock(&bundle->lock); bundle->rings_used |= reserve.ring_mask; mutex_unlock(&bundle->lock); return 0; } static int adf_ctl_ioctl_release_ring(caddr_t arg) { struct adf_user_reserve_ring reserve; struct adf_uio_control_bundle *bundle; struct adf_uio_instance_rings *instance_rings; int pid_entry_found; reserve = *((struct adf_user_reserve_ring *)arg); bundle = adf_ctl_ioctl_bundle(reserve); if (!bundle) { pr_err("QAT: Failed to get bundle\n"); return -EINVAL; } /* Find the list entry for this process */ pid_entry_found = 0; mutex_lock(&bundle->list_lock); list_for_each_entry(instance_rings, &bundle->list, list) { if (instance_rings->user_pid == curproc->p_pid) { pid_entry_found = 1; break; } } mutex_unlock(&bundle->list_lock); if (!pid_entry_found) { pr_err("QAT: No ring reservation found for PID %d\n", curproc->p_pid); return -EINVAL; } if ((instance_rings->ring_mask & reserve.ring_mask) != reserve.ring_mask) { pr_err("QAT: Attempt to release rings not reserved by this process\n"); return -EINVAL; } instance_rings->ring_mask &= ~reserve.ring_mask; mutex_lock(&bundle->lock); bundle->rings_used &= ~reserve.ring_mask; mutex_unlock(&bundle->lock); return 0; } static int adf_ctl_ioctl_enable_ring(caddr_t arg) { struct adf_user_reserve_ring reserve; struct adf_uio_control_bundle *bundle; reserve = *((struct adf_user_reserve_ring *)arg); bundle = adf_ctl_ioctl_bundle(reserve); if (!bundle) { pr_err("QAT: Failed to get bundle\n"); return -EINVAL; } mutex_lock(&bundle->lock); bundle->rings_enabled |= reserve.ring_mask; adf_update_uio_ring_arb(bundle); mutex_unlock(&bundle->lock); return 0; } static int adf_ctl_ioctl_disable_ring(caddr_t arg) { struct adf_user_reserve_ring reserve; struct adf_uio_control_bundle *bundle; reserve = *((struct adf_user_reserve_ring *)arg); bundle = adf_ctl_ioctl_bundle(reserve); if (!bundle) { pr_err("QAT: Failed to get bundle\n"); return -EINVAL; } mutex_lock(&bundle->lock); bundle->rings_enabled &= ~reserve.ring_mask; adf_update_uio_ring_arb(bundle); mutex_unlock(&bundle->lock); return 0; } static int adf_ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int fflag, struct thread *td) { int ret = 0; bool allowed = false; int i; static const unsigned int unrestricted_cmds[] = { IOCTL_GET_NUM_DEVICES, IOCTL_STATUS_ACCEL_DEV, IOCTL_HEARTBEAT_ACCEL_DEV, IOCTL_GET_CFG_VAL, IOCTL_RESERVE_RING, IOCTL_RELEASE_RING, IOCTL_ENABLE_RING, IOCTL_DISABLE_RING, }; if (priv_check(curthread, PRIV_DRIVER)) { for (i = 0; i < ARRAY_SIZE(unrestricted_cmds); i++) { if (cmd == unrestricted_cmds[i]) { allowed = true; break; } } if (!allowed) return EACCES; } /* All commands have an argument */ if (!arg) return EFAULT; if (sx_xlock_sig(&adf_ctl_lock)) return EINTR; switch (cmd) { case IOCTL_GET_NUM_DEVICES: ret = adf_ctl_ioctl_get_num_devices(cmd, arg); break; case IOCTL_STATUS_ACCEL_DEV: ret = adf_ctl_ioctl_get_status(cmd, arg); break; case IOCTL_GET_CFG_VAL: ret = adf_ctl_ioctl_dev_get_value(cmd, arg); break; case IOCTL_RESERVE_RING: ret = adf_ctl_ioctl_reserve_ring(arg); break; case IOCTL_RELEASE_RING: ret = adf_ctl_ioctl_release_ring(arg); break; case IOCTL_ENABLE_RING: ret = adf_ctl_ioctl_enable_ring(arg); break; case IOCTL_DISABLE_RING: ret = adf_ctl_ioctl_disable_ring(arg); break; case IOCTL_HEARTBEAT_ACCEL_DEV: ret = adf_ctl_ioctl_heartbeat(cmd, arg); break; default: printf("QAT: Invalid ioctl\n"); ret = ENOTTY; break; } sx_xunlock(&adf_ctl_lock); return ret; } int adf_register_ctl_device_driver(void) { sx_init(&adf_ctl_lock, "adf ctl"); if (adf_chr_drv_create()) goto err_chr_dev; adf_state_init(); if (adf_processes_dev_register() != 0) goto err_processes_dev_register; return 0; err_processes_dev_register: adf_chr_drv_destroy(); err_chr_dev: sx_destroy(&adf_ctl_lock); return EFAULT; } void adf_unregister_ctl_device_driver(void) { adf_processes_dev_unregister(); adf_state_destroy(); adf_chr_drv_destroy(); adf_clean_vf_map(false); sx_destroy(&adf_ctl_lock); } diff --git a/sys/dev/qat/qat_common/adf_freebsd_dev_processes.c b/sys/dev/qat/qat_common/adf_freebsd_dev_processes.c index 6bb91c8d3c46..b8a17344bdea 100644 --- a/sys/dev/qat/qat_common/adf_freebsd_dev_processes.c +++ b/sys/dev/qat/qat_common/adf_freebsd_dev_processes.c @@ -1,676 +1,680 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_uio_control.h" #include "adf_uio_cleanup.h" #include "adf_uio.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #define ADF_DEV_PROCESSES_NAME "qat_dev_processes" #define ADF_DEV_STATE_NAME "qat_dev_state" #define ADF_STATE_CALLOUT_TIME 10 -static const char *mtx_name = "state_callout_mtx"; +static const char *mtx_name = "state_mtx"; +static const char *mtx_callout_name = "callout_mtx"; static d_open_t adf_processes_open; static void adf_processes_release(void *data); static d_read_t adf_processes_read; static d_write_t adf_processes_write; static d_open_t adf_state_open; static void adf_state_release(void *data); static d_read_t adf_state_read; static int adf_state_kqfilter(struct cdev *dev, struct knote *kn); static int adf_state_kqread_event(struct knote *kn, long hint); static void adf_state_kqread_detach(struct knote *kn); static struct callout callout; static struct mtx mtx; +static struct mtx callout_mtx; static struct service_hndl adf_state_hndl; struct entry_proc_events { struct adf_state_priv_data *proc_events; SLIST_ENTRY(entry_proc_events) entries_proc_events; }; struct entry_state { struct adf_state state; STAILQ_ENTRY(entry_state) entries_state; }; SLIST_HEAD(proc_events_head, entry_proc_events); STAILQ_HEAD(state_head, entry_state); static struct proc_events_head proc_events_head; struct adf_processes_priv_data { char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES]; int read_flag; struct list_head list; }; struct adf_state_priv_data { struct cdev *cdev; struct selinfo rsel; struct state_head state_head; }; static struct cdevsw adf_processes_cdevsw = { .d_version = D_VERSION, .d_open = adf_processes_open, .d_read = adf_processes_read, .d_write = adf_processes_write, .d_name = ADF_DEV_PROCESSES_NAME, }; static struct cdevsw adf_state_cdevsw = { .d_version = D_VERSION, .d_open = adf_state_open, .d_read = adf_state_read, .d_kqfilter = adf_state_kqfilter, .d_name = ADF_DEV_STATE_NAME, }; static struct filterops adf_state_read_filterops = { .f_isfd = 1, .f_attach = NULL, .f_detach = adf_state_kqread_detach, .f_event = adf_state_kqread_event, }; static struct cdev *adf_processes_dev; static struct cdev *adf_state_dev; static LINUX_LIST_HEAD(processes_list); struct sx processes_list_sema; SX_SYSINIT(processes_list_sema, &processes_list_sema, "adf proc list"); static void adf_chr_drv_destroy(void) { destroy_dev(adf_processes_dev); } static int adf_chr_drv_create(void) { adf_processes_dev = make_dev(&adf_processes_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, ADF_DEV_PROCESSES_NAME); if (adf_processes_dev == NULL) { printf("QAT: failed to create device\n"); goto err_cdev_del; } return 0; err_cdev_del: return EFAULT; } static int adf_processes_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { int i = 0, devices = 0; struct adf_accel_dev *accel_dev = NULL; struct adf_processes_priv_data *prv_data = NULL; int error = 0; for (i = 0; i < ADF_MAX_DEVICES; i++) { accel_dev = adf_devmgr_get_dev_by_id(i); if (!accel_dev) continue; if (!adf_dev_started(accel_dev)) continue; devices++; } if (!devices) { printf("QAT: No active devices found.\n"); return ENXIO; } prv_data = malloc(sizeof(*prv_data), M_QAT, M_WAITOK | M_ZERO); if (!prv_data) return ENOMEM; INIT_LIST_HEAD(&prv_data->list); error = devfs_set_cdevpriv(prv_data, adf_processes_release); if (error) { free(prv_data, M_QAT); return error; } return 0; } static int adf_get_first_started_dev(void) { int i = 0; struct adf_accel_dev *accel_dev = NULL; for (i = 0; i < ADF_MAX_DEVICES; i++) { accel_dev = adf_devmgr_get_dev_by_id(i); if (!accel_dev) continue; if (adf_dev_started(accel_dev)) return i; } return -1; } static int adf_processes_write(struct cdev *dev, struct uio *uio, int ioflag) { struct adf_processes_priv_data *prv_data = NULL; struct adf_processes_priv_data *pdata = NULL; int dev_num = 0, pr_num = 0; struct list_head *lpos = NULL; char usr_name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES] = { 0 }; struct adf_accel_dev *accel_dev = NULL; struct adf_cfg_section *section_ptr = NULL; bool pr_name_available = 1; uint32_t num_accel_devs = 0; int error = 0; ssize_t count; int dev_id; error = devfs_get_cdevpriv((void **)&prv_data); if (error) { printf("QAT: invalid file descriptor\n"); return error; } if (prv_data->read_flag == 1) { printf("QAT: can only write once\n"); return EBADF; } count = uio->uio_resid; if ((count <= 0) || (count > ADF_CFG_MAX_SECTION_LEN_IN_BYTES)) { printf("QAT: wrong size %d\n", (int)count); return EIO; } error = uiomove(usr_name, count, uio); if (error) { printf("QAT: can't copy data\n"); return error; } /* Lock other processes and try to find out the process name */ if (sx_xlock_sig(&processes_list_sema)) { printf("QAT: can't aquire process info lock\n"); return EBADF; } dev_id = adf_get_first_started_dev(); if (-1 == dev_id) { pr_err("QAT: could not find started device\n"); sx_xunlock(&processes_list_sema); return -EIO; } accel_dev = adf_devmgr_get_dev_by_id(dev_id); if (!accel_dev) { pr_err("QAT: could not find started device\n"); sx_xunlock(&processes_list_sema); return -EIO; } /* If there is nothing there then take the first name and return */ if (list_empty(&processes_list)) { snprintf(prv_data->name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES, "%s" ADF_INTERNAL_USERSPACE_SEC_SUFF "%d", usr_name, 0); list_add(&prv_data->list, &processes_list); sx_xunlock(&processes_list_sema); prv_data->read_flag = 1; return 0; } /* If there are processes running then search for a first free name */ adf_devmgr_get_num_dev(&num_accel_devs); for (dev_num = 0; dev_num < num_accel_devs; dev_num++) { accel_dev = adf_devmgr_get_dev_by_id(dev_num); if (!accel_dev) continue; if (!adf_dev_started(accel_dev)) continue; /* to next device */ for (pr_num = 0; pr_num < GET_MAX_PROCESSES(accel_dev); pr_num++) { snprintf(prv_data->name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES, "%s" ADF_INTERNAL_USERSPACE_SEC_SUFF "%d", usr_name, pr_num); pr_name_available = 1; /* Figure out if section exists in the config table */ section_ptr = adf_cfg_sec_find(accel_dev, prv_data->name); if (NULL == section_ptr) { /* This section name doesn't exist */ pr_name_available = 0; /* As process_num enumerates from 0, once we get * to one which doesn't exist no further ones * will exist. On to next device */ break; } /* Figure out if it's been taken already */ list_for_each(lpos, &processes_list) { pdata = list_entry(lpos, struct adf_processes_priv_data, list); if (!strncmp( pdata->name, prv_data->name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES)) { pr_name_available = 0; break; } } if (pr_name_available) break; } if (pr_name_available) break; } /* * If we have a valid name that is not on * the list take it and add to the list */ if (pr_name_available) { list_add(&prv_data->list, &processes_list); sx_xunlock(&processes_list_sema); prv_data->read_flag = 1; return 0; } /* If not then the process needs to wait */ sx_xunlock(&processes_list_sema); explicit_bzero(prv_data->name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES); prv_data->read_flag = 0; return 1; } static int adf_processes_read(struct cdev *dev, struct uio *uio, int ioflag) { struct adf_processes_priv_data *prv_data = NULL; int error = 0; error = devfs_get_cdevpriv((void **)&prv_data); if (error) { printf("QAT: invalid file descriptor\n"); return error; } /* * If there is a name that the process can use then give it * to the proocess. */ if (prv_data->read_flag) { error = uiomove(prv_data->name, strnlen(prv_data->name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES), uio); if (error) { printf("QAT: failed to copy data to user\n"); return error; } return 0; } return EIO; } static void adf_processes_release(void *data) { struct adf_processes_priv_data *prv_data = NULL; prv_data = (struct adf_processes_priv_data *)data; sx_xlock(&processes_list_sema); list_del(&prv_data->list); sx_xunlock(&processes_list_sema); free(prv_data, M_QAT); } int adf_processes_dev_register(void) { return adf_chr_drv_create(); } void adf_processes_dev_unregister(void) { adf_chr_drv_destroy(); } static void adf_state_callout_notify_ev(void *arg) { int notified = 0; struct adf_state_priv_data *priv = NULL; struct entry_proc_events *proc_events = NULL; SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) { if (!STAILQ_EMPTY(&proc_events->proc_events->state_head)) { notified = 1; priv = proc_events->proc_events; wakeup(priv); selwakeup(&priv->rsel); KNOTE_UNLOCKED(&priv->rsel.si_note, 0); } } if (notified) callout_schedule(&callout, ADF_STATE_CALLOUT_TIME); } static void adf_state_set(int dev, enum adf_event event) { struct adf_accel_dev *accel_dev = NULL; struct state_head *head = NULL; struct entry_proc_events *proc_events = NULL; struct entry_state *state = NULL; accel_dev = adf_devmgr_get_dev_by_id(dev); if (!accel_dev) return; mtx_lock(&mtx); SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) { state = NULL; head = &proc_events->proc_events->state_head; state = malloc(sizeof(struct entry_state), M_QAT, M_NOWAIT | M_ZERO); if (!state) continue; state->state.dev_state = event; state->state.dev_id = dev; STAILQ_INSERT_TAIL(head, state, entries_state); if (event == ADF_EVENT_STOP) { state = NULL; state = malloc(sizeof(struct entry_state), M_QAT, M_NOWAIT | M_ZERO); if (!state) continue; state->state.dev_state = ADF_EVENT_SHUTDOWN; state->state.dev_id = dev; STAILQ_INSERT_TAIL(head, state, entries_state); } } - callout_schedule(&callout, ADF_STATE_CALLOUT_TIME); mtx_unlock(&mtx); + callout_schedule(&callout, ADF_STATE_CALLOUT_TIME); } static int adf_state_event_handler(struct adf_accel_dev *accel_dev, enum adf_event event) { int ret = 0; #if defined(QAT_UIO) && defined(QAT_DBG) if (event > ADF_EVENT_DBG_SHUTDOWN) return -EINVAL; #else if (event > ADF_EVENT_ERROR) return -EINVAL; #endif /* defined(QAT_UIO) && defined(QAT_DBG) */ switch (event) { case ADF_EVENT_INIT: return ret; case ADF_EVENT_SHUTDOWN: return ret; case ADF_EVENT_RESTARTING: break; case ADF_EVENT_RESTARTED: break; case ADF_EVENT_START: return ret; case ADF_EVENT_STOP: break; case ADF_EVENT_ERROR: break; #if defined(QAT_UIO) && defined(QAT_DBG) case ADF_EVENT_PROC_CRASH: break; case ADF_EVENT_MANUAL_DUMP: break; case ADF_EVENT_SLICE_HANG: break; case ADF_EVENT_DBG_SHUTDOWN: break; #endif /* defined(QAT_UIO) && defined(QAT_DBG) */ default: return -1; } adf_state_set(accel_dev->accel_id, event); return 0; } static int adf_state_kqfilter(struct cdev *dev, struct knote *kn) { struct adf_state_priv_data *priv; mtx_lock(&mtx); priv = dev->si_drv1; switch (kn->kn_filter) { case EVFILT_READ: kn->kn_fop = &adf_state_read_filterops; kn->kn_hook = priv; - knlist_add(&priv->rsel.si_note, kn, 0); + knlist_add(&priv->rsel.si_note, kn, 1); mtx_unlock(&mtx); return 0; default: mtx_unlock(&mtx); return -EINVAL; } } static int adf_state_kqread_event(struct knote *kn, long hint) { return 1; } static void adf_state_kqread_detach(struct knote *kn) { struct adf_state_priv_data *priv = NULL; mtx_lock(&mtx); if (!kn) { mtx_unlock(&mtx); return; } priv = kn->kn_hook; if (!priv) { mtx_unlock(&mtx); return; } knlist_remove(&priv->rsel.si_note, kn, 1); mtx_unlock(&mtx); } void adf_state_init(void) { adf_state_dev = make_dev(&adf_state_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "%s", ADF_DEV_STATE_NAME); SLIST_INIT(&proc_events_head); mtx_init(&mtx, mtx_name, NULL, MTX_DEF); - callout_init_mtx(&callout, &mtx, 0); + mtx_init(&callout_mtx, mtx_callout_name, NULL, MTX_DEF); + callout_init_mtx(&callout, &callout_mtx, 0); explicit_bzero(&adf_state_hndl, sizeof(adf_state_hndl)); adf_state_hndl.event_hld = adf_state_event_handler; adf_state_hndl.name = "adf_state_event_handler"; - mtx_lock(&mtx); adf_service_register(&adf_state_hndl); callout_reset(&callout, ADF_STATE_CALLOUT_TIME, adf_state_callout_notify_ev, NULL); - mtx_unlock(&mtx); } void adf_state_destroy(void) { struct entry_proc_events *proc_events = NULL; - mtx_lock(&mtx); adf_service_unregister(&adf_state_hndl); + mtx_lock(&callout_mtx); callout_stop(&callout); + mtx_unlock(&callout_mtx); + mtx_destroy(&callout_mtx); + mtx_lock(&mtx); while (!SLIST_EMPTY(&proc_events_head)) { proc_events = SLIST_FIRST(&proc_events_head); SLIST_REMOVE_HEAD(&proc_events_head, entries_proc_events); free(proc_events, M_QAT); } - destroy_dev(adf_state_dev); mtx_unlock(&mtx); mtx_destroy(&mtx); + destroy_dev(adf_state_dev); } static int adf_state_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct adf_state_priv_data *prv_data = NULL; struct entry_proc_events *entry_proc_events = NULL; int ret = 0; prv_data = malloc(sizeof(*prv_data), M_QAT, M_WAITOK | M_ZERO); if (!prv_data) return -ENOMEM; entry_proc_events = malloc(sizeof(struct entry_proc_events), M_QAT, M_WAITOK | M_ZERO); if (!entry_proc_events) { free(prv_data, M_QAT); return -ENOMEM; } mtx_lock(&mtx); prv_data->cdev = dev; prv_data->cdev->si_drv1 = prv_data; knlist_init_mtx(&prv_data->rsel.si_note, &mtx); STAILQ_INIT(&prv_data->state_head); entry_proc_events->proc_events = prv_data; SLIST_INSERT_HEAD(&proc_events_head, entry_proc_events, entries_proc_events); + mtx_unlock(&mtx); ret = devfs_set_cdevpriv(prv_data, adf_state_release); if (ret) { SLIST_REMOVE(&proc_events_head, entry_proc_events, entry_proc_events, entries_proc_events); free(entry_proc_events, M_QAT); free(prv_data, M_QAT); } callout_schedule(&callout, ADF_STATE_CALLOUT_TIME); - mtx_unlock(&mtx); return ret; } static int adf_state_read(struct cdev *dev, struct uio *uio, int ioflag) { int ret = 0; struct adf_state_priv_data *prv_data = NULL; struct state_head *state_head = NULL; struct entry_state *entry_state = NULL; struct adf_state *state = NULL; struct entry_proc_events *proc_events = NULL; mtx_lock(&mtx); ret = devfs_get_cdevpriv((void **)&prv_data); if (ret) { mtx_unlock(&mtx); return 0; } state_head = &prv_data->state_head; if (STAILQ_EMPTY(state_head)) { mtx_unlock(&mtx); return 0; } entry_state = STAILQ_FIRST(state_head); state = &entry_state->state; ret = uiomove(state, sizeof(struct adf_state), uio); if (!ret && !STAILQ_EMPTY(state_head)) { STAILQ_REMOVE_HEAD(state_head, entries_state); free(entry_state, M_QAT); } SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) { if (!STAILQ_EMPTY(&proc_events->proc_events->state_head)) { prv_data = proc_events->proc_events; wakeup(prv_data); selwakeup(&prv_data->rsel); KNOTE_UNLOCKED(&prv_data->rsel.si_note, 0); } } - callout_schedule(&callout, ADF_STATE_CALLOUT_TIME); mtx_unlock(&mtx); + callout_schedule(&callout, ADF_STATE_CALLOUT_TIME); return ret; } static void adf_state_release(void *data) { struct adf_state_priv_data *prv_data = NULL; struct entry_state *entry_state = NULL; struct entry_proc_events *entry_proc_events = NULL; struct entry_proc_events *tmp = NULL; mtx_lock(&mtx); prv_data = (struct adf_state_priv_data *)data; knlist_delete(&prv_data->rsel.si_note, curthread, 1); knlist_destroy(&prv_data->rsel.si_note); seldrain(&prv_data->rsel); while (!STAILQ_EMPTY(&prv_data->state_head)) { entry_state = STAILQ_FIRST(&prv_data->state_head); STAILQ_REMOVE_HEAD(&prv_data->state_head, entries_state); free(entry_state, M_QAT); } SLIST_FOREACH_SAFE (entry_proc_events, &proc_events_head, entries_proc_events, tmp) { if (entry_proc_events->proc_events == prv_data) { SLIST_REMOVE(&proc_events_head, entry_proc_events, entry_proc_events, entries_proc_events); free(entry_proc_events, M_QAT); } } free(prv_data, M_QAT); mtx_unlock(&mtx); } diff --git a/sys/dev/qat/qat_common/adf_freebsd_uio_cleanup.c b/sys/dev/qat/qat_common/adf_freebsd_uio_cleanup.c index e5628bed371e..6fb4cf0bf2f7 100644 --- a/sys/dev/qat/qat_common/adf_freebsd_uio_cleanup.c +++ b/sys/dev/qat/qat_common/adf_freebsd_uio_cleanup.c @@ -1,403 +1,394 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_uio_control.h" #include "adf_uio_cleanup.h" #include "adf_uio.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define TX_RINGS_DISABLE 0 #define TX_RINGS_ENABLE 1 #define PKE_REQ_SIZE 64 #define BASE_ADDR_SHIFT 6 #define PKE_RX_RING_0 0 #define PKE_RX_RING_1 1 #define ADF_RING_EMPTY_RETRY_DELAY 2 #define ADF_RING_EMPTY_MAX_RETRY 15 struct bundle_orphan_ring { unsigned long tx_mask; unsigned long rx_mask; unsigned long asym_mask; int bank; struct resource *csr_base; struct adf_uio_control_bundle *bundle; }; /* * if orphan->tx_mask does not match with orphan->rx_mask */ static void check_orphan_ring(struct adf_accel_dev *accel_dev, struct bundle_orphan_ring *orphan, struct adf_hw_device_data *hw_data) { struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); int i; int tx_rx_gap = hw_data->tx_rx_gap; u8 num_rings_per_bank = hw_data->num_rings_per_bank; struct resource *csr_base = orphan->csr_base; int bank = orphan->bank; for (i = 0; i < num_rings_per_bank; i++) { if (test_bit(i, &orphan->tx_mask)) { int rx_ring = i + tx_rx_gap; if (!test_bit(rx_ring, &orphan->rx_mask)) { __clear_bit(i, &orphan->tx_mask); /* clean up this tx ring */ csr_ops->write_csr_ring_config(csr_base, bank, i, 0); csr_ops->write_csr_ring_base(csr_base, bank, i, 0); } } else if (test_bit(i, &orphan->rx_mask)) { int tx_ring = i - tx_rx_gap; if (!test_bit(tx_ring, &orphan->tx_mask)) { __clear_bit(i, &orphan->rx_mask); /* clean up this rx ring */ csr_ops->write_csr_ring_config(csr_base, bank, i, 0); csr_ops->write_csr_ring_base(csr_base, bank, i, 0); } } } } static int get_orphan_bundle(int bank, struct adf_uio_control_accel *accel, struct bundle_orphan_ring **orphan_bundle_out) { int i; int ret = 0; struct resource *csr_base; unsigned long tx_mask; unsigned long asym_mask; struct adf_accel_dev *accel_dev = accel->accel_dev; struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); struct adf_hw_device_data *hw_data = accel_dev->hw_device; u8 num_rings_per_bank = hw_data->num_rings_per_bank; struct bundle_orphan_ring *orphan_bundle = NULL; uint64_t base; struct list_head *entry; struct adf_uio_instance_rings *instance_rings; struct adf_uio_control_bundle *bundle; u16 ring_mask = 0; orphan_bundle = malloc(sizeof(*orphan_bundle), M_QAT, M_WAITOK | M_ZERO); if (!orphan_bundle) return ENOMEM; csr_base = accel->bar->virt_addr; orphan_bundle->csr_base = csr_base; orphan_bundle->bank = bank; orphan_bundle->tx_mask = 0; orphan_bundle->rx_mask = 0; tx_mask = accel_dev->hw_device->tx_rings_mask; asym_mask = accel_dev->hw_device->asym_rings_mask; /* Get ring mask for this process. */ bundle = &accel->bundle[bank]; orphan_bundle->bundle = bundle; mutex_lock(&bundle->list_lock); list_for_each(entry, &bundle->list) { instance_rings = list_entry(entry, struct adf_uio_instance_rings, list); if (instance_rings->user_pid == curproc->p_pid) { ring_mask = instance_rings->ring_mask; break; } } mutex_unlock(&bundle->list_lock); for (i = 0; i < num_rings_per_bank; i++) { base = csr_ops->read_csr_ring_base(csr_base, bank, i); if (!base) continue; if (!(ring_mask & 1 << i)) continue; /* Not reserved for this process. */ if (test_bit(i, &tx_mask)) __set_bit(i, &orphan_bundle->tx_mask); else __set_bit(i, &orphan_bundle->rx_mask); if (test_bit(i, &asym_mask)) __set_bit(i, &orphan_bundle->asym_mask); } if (orphan_bundle->tx_mask || orphan_bundle->rx_mask) check_orphan_ring(accel_dev, orphan_bundle, hw_data); *orphan_bundle_out = orphan_bundle; return ret; } static void put_orphan_bundle(struct bundle_orphan_ring *bundle) { if (!bundle) return; free(bundle, M_QAT); } /* cleanup all ring */ static void cleanup_all_ring(struct adf_uio_control_accel *accel, struct bundle_orphan_ring *orphan) { int i; struct resource *csr_base = orphan->csr_base; unsigned long mask = orphan->rx_mask | orphan->tx_mask; struct adf_accel_dev *accel_dev = accel->accel_dev; struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); struct adf_hw_device_data *hw_data = accel_dev->hw_device; u8 num_rings_per_bank = hw_data->num_rings_per_bank; int bank = orphan->bank; mutex_lock(&orphan->bundle->lock); orphan->bundle->rings_enabled &= ~mask; adf_update_uio_ring_arb(orphan->bundle); mutex_unlock(&orphan->bundle->lock); for (i = 0; i < num_rings_per_bank; i++) { if (!test_bit(i, &mask)) continue; csr_ops->write_csr_ring_config(csr_base, bank, i, 0); csr_ops->write_csr_ring_base(csr_base, bank, i, 0); } } /* * Return true, if number of messages in tx ring is equal to number * of messages in corresponding rx ring, else false. */ static bool is_all_resp_recvd(struct adf_hw_csr_ops *csr_ops, struct bundle_orphan_ring *bundle, const u8 num_rings_per_bank) { u32 rx_tail = 0, tx_head = 0, rx_ring_msg_offset = 0, tx_ring_msg_offset = 0, tx_rx_offset = num_rings_per_bank / 2, idx = 0, retry = 0, delay = ADF_RING_EMPTY_RETRY_DELAY; do { for_each_set_bit(idx, &bundle->tx_mask, tx_rx_offset) { rx_tail = csr_ops->read_csr_ring_tail(bundle->csr_base, 0, (idx + tx_rx_offset)); tx_head = csr_ops->read_csr_ring_head(bundle->csr_base, 0, idx); /* * Normalize messages in tx rings to match rx ring * message size, i.e., size of response message(32). * Asym messages are 64 bytes each, so right shift * by 1 to normalize to 32. Sym and compression * messages are 128 bytes each, so right shift by 2 * to normalize to 32. */ if (bundle->asym_mask & (1 << idx)) tx_ring_msg_offset = (tx_head >> 1); else tx_ring_msg_offset = (tx_head >> 2); rx_ring_msg_offset = rx_tail; if (tx_ring_msg_offset != rx_ring_msg_offset) break; } if (idx == tx_rx_offset) /* All Tx and Rx ring message counts match */ return true; DELAY(delay); delay *= 2; } while (++retry < ADF_RING_EMPTY_MAX_RETRY); return false; } static int bundle_need_cleanup(int bank, struct adf_uio_control_accel *accel) { struct resource *csr_base = accel->bar->virt_addr; struct adf_accel_dev *accel_dev = accel->accel_dev; struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); struct adf_hw_device_data *hw_data = accel_dev->hw_device; u8 num_rings_per_bank = hw_data->num_rings_per_bank; int i; if (!csr_base) return 0; for (i = 0; i < num_rings_per_bank; i++) { if (csr_ops->read_csr_ring_base(csr_base, bank, i)) return 1; } return 0; } static void cleanup_orphan_ring(struct bundle_orphan_ring *orphan, struct adf_uio_control_accel *accel) { struct adf_accel_dev *accel_dev = accel->accel_dev; struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); struct adf_hw_device_data *hw_data = accel_dev->hw_device; u8 number_rings_per_bank = hw_data->num_rings_per_bank; /* disable the interrupt */ csr_ops->write_csr_int_col_en(orphan->csr_base, orphan->bank, 0); /* * wait firmware finish the in-process ring * 1. disable all tx rings * 2. check if all responses are received * 3. reset all rings */ adf_disable_ring_arb(accel_dev, orphan->csr_base, 0, orphan->tx_mask); if (!is_all_resp_recvd(csr_ops, orphan, number_rings_per_bank)) { device_printf(GET_DEV(accel_dev), "Failed to clean up orphan rings"); return; } /* * When the execution reaches here, it is assumed that * there is no inflight request in the rings and that * there is no in-process ring. */ cleanup_all_ring(accel, orphan); } void adf_uio_do_cleanup_orphan(int bank, struct adf_uio_control_accel *accel) { - int ret, pid_found; + int ret; struct adf_uio_instance_rings *instance_rings, *tmp; struct adf_uio_control_bundle *bundle; /* orphan is local pointer allocated and deallocated in this function */ struct bundle_orphan_ring *orphan = NULL; struct adf_accel_dev *accel_dev = accel->accel_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; if (!bundle_need_cleanup(bank, accel)) goto release; ret = get_orphan_bundle(bank, accel, &orphan); if (ret != 0) return; /* * If driver supports ring pair reset, no matter process * exits normally or abnormally, just do ring pair reset. * ring pair reset will reset all ring pair registers to * default value. Driver only needs to reset ring mask */ if (hw_data->ring_pair_reset) { hw_data->ring_pair_reset( accel_dev, orphan->bundle->hardware_bundle_number); - mutex_lock(&orphan->bundle->lock); /* * If processes exit normally, rx_mask, tx_mask * and rings_enabled are all 0, below expression * have no impact on rings_enabled. * If processes exit abnormally, rings_enabled * will be set as 0 by below expression. */ orphan->bundle->rings_enabled &= ~(orphan->rx_mask | orphan->tx_mask); - mutex_unlock(&orphan->bundle->lock); goto out; } if (!orphan->tx_mask && !orphan->rx_mask) goto out; device_printf(GET_DEV(accel_dev), "Process %d %s exit with orphan rings %lx:%lx\n", curproc->p_pid, curproc->p_comm, orphan->tx_mask, orphan->rx_mask); if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) { cleanup_orphan_ring(orphan, accel); } out: put_orphan_bundle(orphan); release: bundle = &accel->bundle[bank]; /* * If the user process died without releasing the rings * then force a release here. */ mutex_lock(&bundle->list_lock); - pid_found = 0; list_for_each_entry_safe(instance_rings, tmp, &bundle->list, list) { if (instance_rings->user_pid == curproc->p_pid) { - pid_found = 1; + bundle->rings_used &= ~instance_rings->ring_mask; break; } } mutex_unlock(&bundle->list_lock); - - if (pid_found) { - mutex_lock(&bundle->lock); - bundle->rings_used &= ~instance_rings->ring_mask; - mutex_unlock(&bundle->lock); - } } diff --git a/sys/dev/qat/qat_common/adf_pfvf_vf_proto.c b/sys/dev/qat/qat_common/adf_pfvf_vf_proto.c index 044e5040f5b6..a09ddb819831 100644 --- a/sys/dev/qat/qat_common/adf_pfvf_vf_proto.c +++ b/sys/dev/qat/qat_common/adf_pfvf_vf_proto.c @@ -1,404 +1,409 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ #include #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_pfvf_msg.h" #include "adf_pfvf_utils.h" #include "adf_pfvf_vf_msg.h" #include "adf_pfvf_vf_proto.h" #define __bf_shf(x) (__builtin_ffsll(x) - 1) #define FIELD_MAX(_mask) ({ (typeof(_mask))((_mask) >> __bf_shf(_mask)); }) #define FIELD_PREP(_mask, _val) \ ({ ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); }) #define FIELD_GET(_mask, _reg) \ ({ (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); }) /** * adf_send_vf2pf_msg() - send VF to PF message * @accel_dev: Pointer to acceleration device * @msg: Message to send * * This function allows the VF to send a message to the PF. * * Return: 0 on success, error code otherwise. */ int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg) { struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev); u32 pfvf_offset = pfvf_ops->get_pf2vf_offset(0); int ret = pfvf_ops->send_msg(accel_dev, msg, pfvf_offset, &accel_dev->u1.vf.vf2pf_lock); return ret; } /** * adf_recv_pf2vf_msg() - receive a PF to VF message * @accel_dev: Pointer to acceleration device * * This function allows the VF to receive a message from the PF. * * Return: a valid message on success, zero otherwise. */ static struct pfvf_message adf_recv_pf2vf_msg(struct adf_accel_dev *accel_dev) { struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev); u32 pfvf_offset = pfvf_ops->get_vf2pf_offset(0); // 1008 return pfvf_ops->recv_msg(accel_dev, pfvf_offset, accel_dev->u1.vf.pf_compat_ver); } /** * adf_send_vf2pf_req() - send VF2PF request message * @accel_dev: Pointer to acceleration device. * @msg: Request message to send * @resp: Returned PF response * * This function sends a message that requires a response from the VF to the PF * and waits for a reply. * * Return: 0 on success, error code otherwise. */ int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, struct pfvf_message msg, struct pfvf_message *resp) { unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT); unsigned int retries = ADF_PFVF_MSG_RESP_RETRIES; int ret; reinit_completion(&accel_dev->u1.vf.msg_received); /* Send request from VF to PF */ do { ret = adf_send_vf2pf_msg(accel_dev, msg); if (ret) { device_printf(GET_DEV(accel_dev), "Failed to send request msg to PF\n"); return ret; } /* Wait for response, if it times out retry */ ret = wait_for_completion_timeout(&accel_dev->u1.vf.msg_received, timeout); if (ret) { if (likely(resp)) *resp = accel_dev->u1.vf.response; /* Once copied, set to an invalid value */ accel_dev->u1.vf.response.type = 0; return 0; } device_printf(GET_DEV(accel_dev), "PFVF response message timeout\n"); } while (--retries); return -EIO; } static int adf_vf2pf_blkmsg_data_req(struct adf_accel_dev *accel_dev, bool crc, u8 *type, u8 *data) { struct pfvf_message req = { 0 }; struct pfvf_message resp = { 0 }; u8 blk_type; u8 blk_byte; u8 msg_type; u8 max_data; int err; /* Convert the block type to {small, medium, large} size category */ if (*type <= ADF_VF2PF_SMALL_BLOCK_TYPE_MAX) { msg_type = ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ; blk_type = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK, *type); blk_byte = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, *data); max_data = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX; } else if (*type <= ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX) { msg_type = ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ; blk_type = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK, *type - ADF_VF2PF_SMALL_BLOCK_TYPE_MAX); blk_byte = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK, *data); max_data = ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX; } else if (*type <= ADF_VF2PF_LARGE_BLOCK_TYPE_MAX) { msg_type = ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ; blk_type = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK, *type - ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX); blk_byte = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK, *data); max_data = ADF_VF2PF_LARGE_BLOCK_BYTE_MAX; } else { device_printf(GET_DEV(accel_dev), "Invalid message type %u\n", *type); return -EINVAL; } /* Sanity check */ if (*data > max_data) { device_printf(GET_DEV(accel_dev), "Invalid byte %s %u for message type %u\n", crc ? "count" : "index", *data, *type); return -EINVAL; } /* Build the block message */ req.type = msg_type; req.data = blk_type | blk_byte | FIELD_PREP(ADF_VF2PF_BLOCK_CRC_REQ_MASK, crc); err = adf_send_vf2pf_req(accel_dev, req, &resp); if (err) return err; *type = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK, resp.data); *data = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_DATA_MASK, resp.data); return 0; } static int adf_vf2pf_blkmsg_get_byte(struct adf_accel_dev *accel_dev, u8 type, u8 index, u8 *data) { int ret; ret = adf_vf2pf_blkmsg_data_req(accel_dev, false, &type, &index); if (ret < 0) return ret; if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_DATA)) { device_printf(GET_DEV(accel_dev), "Unexpected BLKMSG response type %u, byte 0x%x\n", type, index); return -EFAULT; } *data = index; return 0; } static int adf_vf2pf_blkmsg_get_crc(struct adf_accel_dev *accel_dev, u8 type, u8 bytes, u8 *crc) { int ret; /* The count of bytes refers to a length, however shift it to a 0-based * count to avoid overflows. Thus, a request for 0 bytes is technically * valid. */ --bytes; ret = adf_vf2pf_blkmsg_data_req(accel_dev, true, &type, &bytes); if (ret < 0) return ret; if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_CRC)) { device_printf( GET_DEV(accel_dev), "Unexpected CRC BLKMSG response type %u, crc 0x%x\n", type, bytes); return -EFAULT; } *crc = bytes; return 0; } /** * adf_send_vf2pf_blkmsg_req() - retrieve block message * @accel_dev: Pointer to acceleration VF device. * @type: The block message type, see adf_pfvf_msg.h for allowed values * @buffer: input buffer where to place the received data * @buffer_len: buffer length as input, the amount of written bytes on output * * Request a message of type 'type' over the block message transport. * This function will send the required amount block message requests and * return the overall content back to the caller through the provided buffer. * The buffer should be large enough to contain the requested message type, * otherwise the response will be truncated. * * Return: 0 on success, error code otherwise. */ int adf_send_vf2pf_blkmsg_req(struct adf_accel_dev *accel_dev, u8 type, u8 *buffer, unsigned int *buffer_len) { unsigned int index; unsigned int msg_len; int ret; u8 remote_crc; u8 local_crc; if (unlikely(type > ADF_VF2PF_LARGE_BLOCK_TYPE_MAX)) { device_printf(GET_DEV(accel_dev), "Invalid block message type %d\n", type); return -EINVAL; } if (unlikely(*buffer_len < ADF_PFVF_BLKMSG_HEADER_SIZE)) { device_printf(GET_DEV(accel_dev), "Buffer size too small for a block message\n"); return -EINVAL; } ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type, ADF_PFVF_BLKMSG_VER_BYTE, &buffer[ADF_PFVF_BLKMSG_VER_BYTE]); if (unlikely(ret)) return ret; if (unlikely(!buffer[ADF_PFVF_BLKMSG_VER_BYTE])) { device_printf(GET_DEV(accel_dev), "Invalid version 0 received for block request %u", type); return -EFAULT; } ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type, ADF_PFVF_BLKMSG_LEN_BYTE, &buffer[ADF_PFVF_BLKMSG_LEN_BYTE]); if (unlikely(ret)) return ret; if (unlikely(!buffer[ADF_PFVF_BLKMSG_LEN_BYTE])) { device_printf(GET_DEV(accel_dev), "Invalid size 0 received for block request %u", type); return -EFAULT; } /* We need to pick the minimum since there is no way to request a * specific version. As a consequence any scenario is possible: * - PF has a newer (longer) version which doesn't fit in the buffer * - VF expects a newer (longer) version, so we must not ask for * bytes in excess * - PF and VF share the same version, no problem */ msg_len = ADF_PFVF_BLKMSG_HEADER_SIZE + buffer[ADF_PFVF_BLKMSG_LEN_BYTE]; msg_len = min(*buffer_len, msg_len); /* Get the payload */ for (index = ADF_PFVF_BLKMSG_HEADER_SIZE; index < msg_len; index++) { ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type, index, &buffer[index]); if (unlikely(ret)) return ret; } ret = adf_vf2pf_blkmsg_get_crc(accel_dev, type, msg_len, &remote_crc); if (unlikely(ret)) return ret; local_crc = adf_pfvf_calc_blkmsg_crc(buffer, msg_len); if (unlikely(local_crc != remote_crc)) { device_printf( GET_DEV(accel_dev), "CRC error on msg type %d. Local %02X, remote %02X\n", type, local_crc, remote_crc); return -EIO; } *buffer_len = msg_len; return 0; } static bool adf_handle_pf2vf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg) { switch (msg.type) { case ADF_PF2VF_MSGTYPE_RESTARTING: adf_pf2vf_handle_pf_restarting(accel_dev); return false; case ADF_PF2VF_MSGTYPE_RP_RESET_RESP: adf_pf2vf_handle_pf_rp_reset(accel_dev, msg); return true; case ADF_PF2VF_MSGTYPE_VERSION_RESP: case ADF_PF2VF_MSGTYPE_BLKMSG_RESP: accel_dev->u1.vf.response = msg; complete(&accel_dev->u1.vf.msg_received); return true; default: device_printf( GET_DEV(accel_dev), "Unknown message from PF (type 0x%.4x, data: 0x%.4x)\n", msg.type, msg.data); } return false; } bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev) { struct pfvf_message msg; msg = adf_recv_pf2vf_msg(accel_dev); if (msg.type) /* Invalid or no message */ return adf_handle_pf2vf_msg(accel_dev, msg); /* No replies for PF->VF messages at present */ return true; } /** * adf_enable_vf2pf_comms() - Function enables communication from vf to pf * * @accel_dev: Pointer to acceleration device virtual function. * * Return: 0 on success, error code otherwise. */ int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; int ret; + /* init workqueue for VF */ + ret = adf_init_vf_wq(); + if (ret) + return ret; + hw_data->enable_pf2vf_interrupt(accel_dev); ret = adf_vf2pf_request_version(accel_dev); if (ret) return ret; ret = adf_vf2pf_get_capabilities(accel_dev); if (ret) return ret; ret = adf_vf2pf_get_ring_to_svc(accel_dev); return ret; } diff --git a/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c b/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c index 75100126ffa0..f4a673e25a40 100644 --- a/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c +++ b/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c @@ -1,266 +1,270 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007 - 2022 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "adf_4xxx_hw_data.h" #include "adf_gen4_hw_data.h" #include "adf_fw_counters.h" #include "adf_cfg_device.h" #include #include #include #include #include #include "adf_heartbeat_dbg.h" #include "adf_cnvnr_freq_counters.h" static MALLOC_DEFINE(M_QAT_4XXX, "qat_4xxx", "qat_4xxx"); #define ADF_SYSTEM_DEVICE(device_id) \ { \ PCI_VENDOR_ID_INTEL, device_id \ } static const struct pci_device_id adf_pci_tbl[] = { ADF_SYSTEM_DEVICE(ADF_4XXX_PCI_DEVICE_ID), ADF_SYSTEM_DEVICE(ADF_401XX_PCI_DEVICE_ID), { 0, } }; static int adf_probe(device_t dev) { const struct pci_device_id *id; for (id = adf_pci_tbl; id->vendor != 0; id++) { if (pci_get_vendor(dev) == id->vendor && pci_get_device(dev) == id->device) { device_set_desc(dev, "Intel " ADF_4XXX_DEVICE_NAME " QuickAssist"); return BUS_PROBE_GENERIC; } } return ENXIO; } static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; int i; if (accel_dev->dma_tag) bus_dma_tag_destroy(accel_dev->dma_tag); for (i = 0; i < ADF_PCI_MAX_BARS; i++) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; if (bar->virt_addr) bus_free_resource(accel_pci_dev->pci_dev, SYS_RES_MEMORY, bar->virt_addr); } if (accel_dev->hw_device) { switch (pci_get_device(accel_pci_dev->pci_dev)) { case ADF_4XXX_PCI_DEVICE_ID: case ADF_401XX_PCI_DEVICE_ID: adf_clean_hw_data_4xxx(accel_dev->hw_device); break; default: break; } free(accel_dev->hw_device, M_QAT_4XXX); accel_dev->hw_device = NULL; } adf_cfg_dev_remove(accel_dev); adf_devmgr_rm_dev(accel_dev, NULL); } static int adf_attach(device_t dev) { struct adf_accel_dev *accel_dev; struct adf_accel_pci *accel_pci_dev; struct adf_hw_device_data *hw_data; - unsigned int i, bar_nr; + unsigned int bar_nr; int ret, rid; struct adf_cfg_device *cfg_dev = NULL; /* Set pci MaxPayLoad to 512. Implemented to avoid the issue of * Pci-passthrough causing Maxpayload to be reset to 128 bytes * when the device is reset. */ if (pci_get_max_payload(dev) != 512) pci_set_max_payload(dev, 512); accel_dev = device_get_softc(dev); INIT_LIST_HEAD(&accel_dev->crypto_list); accel_pci_dev = &accel_dev->accel_pci_dev; accel_pci_dev->pci_dev = dev; if (bus_get_domain(dev, &accel_pci_dev->node) != 0) accel_pci_dev->node = 0; /* Add accel device to accel table. * This should be called before adf_cleanup_accel is called */ if (adf_devmgr_add_dev(accel_dev, NULL)) { device_printf(dev, "Failed to add new accelerator device.\n"); return ENXIO; } /* Allocate and configure device configuration structure */ hw_data = malloc(sizeof(*hw_data), M_QAT_4XXX, M_WAITOK | M_ZERO); accel_dev->hw_device = hw_data; adf_init_hw_data_4xxx(accel_dev->hw_device, pci_get_device(dev)); accel_pci_dev->revid = pci_get_revid(dev); hw_data->fuses = pci_read_config(dev, ADF_4XXX_FUSECTL4_OFFSET, 4); if (accel_pci_dev->revid == 0x00) { device_printf(dev, "A0 stepping is not supported.\n"); ret = ENODEV; goto out_err; } /* Get PPAERUCM values and store */ ret = adf_aer_store_ppaerucm_reg(dev, hw_data); if (ret) goto out_err; /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(accel_dev); hw_data->ae_mask = hw_data->get_ae_mask(accel_dev); accel_pci_dev->sku = hw_data->get_sku(hw_data); /* If the device has no acceleration engines then ignore it. */ if (!hw_data->accel_mask || !hw_data->ae_mask || (~hw_data->ae_mask & 0x01)) { device_printf(dev, "No acceleration units found\n"); ret = ENXIO; goto out_err; } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); if (ret) goto out_err; ret = adf_clock_debugfs_add(accel_dev); if (ret) goto out_err; pci_set_max_read_req(dev, 4096); ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, /* BUS_SPACE_UNRESTRICTED */ 1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &accel_dev->dma_tag); if (ret) goto out_err; if (hw_data->get_accel_cap) { hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); } /* Find and map all the device's BARS */ - i = 0; - for (bar_nr = 0; i < ADF_PCI_MAX_BARS && bar_nr < PCIR_MAX_BAR_0; - bar_nr++) { + /* Logical BARs configuration for 64bit BARs: + bar 0 and 1 - logical BAR0 + bar 2 and 3 - logical BAR1 + bar 4 and 5 - logical BAR3 + */ + for (bar_nr = 0; + bar_nr < (ADF_PCI_MAX_BARS * 2) && bar_nr < PCIR_MAX_BAR_0; + bar_nr += 2) { struct adf_bar *bar; rid = PCIR_BAR(bar_nr); - if (bus_get_resource(dev, SYS_RES_MEMORY, rid, NULL, NULL) != 0) - continue; - bar = &accel_pci_dev->pci_bars[i++]; + bar = &accel_pci_dev->pci_bars[bar_nr / 2]; + bar->virt_addr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!bar->virt_addr) { device_printf(dev, "Failed to map BAR %d\n", bar_nr); ret = ENXIO; goto out_err; } bar->base_addr = rman_get_start(bar->virt_addr); bar->size = rman_get_size(bar->virt_addr); } pci_enable_busmaster(dev); if (!accel_dev->hw_device->config_device) { ret = EFAULT; goto out_err; } ret = accel_dev->hw_device->config_device(accel_dev); if (ret) goto out_err; ret = adf_dev_init(accel_dev); if (ret) goto out_dev_shutdown; ret = adf_dev_start(accel_dev); if (ret) goto out_dev_stop; cfg_dev = accel_dev->cfg->dev; adf_cfg_device_clear(cfg_dev, accel_dev); free(cfg_dev, M_QAT); accel_dev->cfg->dev = NULL; return ret; out_dev_stop: adf_dev_stop(accel_dev); out_dev_shutdown: adf_dev_shutdown(accel_dev); out_err: adf_cleanup_accel(accel_dev); return ret; } static int adf_detach(device_t dev) { struct adf_accel_dev *accel_dev = device_get_softc(dev); if (adf_dev_stop(accel_dev)) { device_printf(dev, "Failed to stop QAT accel dev\n"); return EBUSY; } adf_dev_shutdown(accel_dev); adf_cleanup_accel(accel_dev); return 0; } static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe), DEVMETHOD(device_attach, adf_attach), DEVMETHOD(device_detach, adf_detach), DEVMETHOD_END }; static driver_t adf_driver = { "qat", adf_methods, sizeof(struct adf_accel_dev) }; DRIVER_MODULE_ORDERED(qat_4xxx, pci, adf_driver, NULL, NULL, SI_ORDER_THIRD); MODULE_VERSION(qat_4xxx, 1); MODULE_DEPEND(qat_4xxx, qat_common, 1, 1, 1); MODULE_DEPEND(qat_4xxx, qat_api, 1, 1, 1); MODULE_DEPEND(qat_4xxx, linuxkpi, 1, 1, 1); diff --git a/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c b/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c index edc6c754d350..05a99ae43ab7 100644 --- a/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c +++ b/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c @@ -1,281 +1,271 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ #include "qat_freebsd.h" #include #include #include #include "adf_4xxxvf_hw_data.h" #include "adf_gen4_hw_data.h" #include "adf_fw_counters.h" #include "adf_cfg_device.h" #include #include #include #include #include static MALLOC_DEFINE(M_QAT_4XXXVF, "qat_4xxxvf", "qat_4xxxvf"); #define ADF_SYSTEM_DEVICE(device_id) \ { \ PCI_VENDOR_ID_INTEL, device_id \ } static const struct pci_device_id adf_pci_tbl[] = { ADF_SYSTEM_DEVICE(ADF_4XXXIOV_PCI_DEVICE_ID), ADF_SYSTEM_DEVICE(ADF_401XXIOV_PCI_DEVICE_ID), { 0, } }; static int adf_probe(device_t dev) { const struct pci_device_id *id; for (id = adf_pci_tbl; id->vendor != 0; id++) { if (pci_get_vendor(dev) == id->vendor && pci_get_device(dev) == id->device) { device_set_desc(dev, "Intel " ADF_4XXXVF_DEVICE_NAME " QuickAssist"); return BUS_PROBE_GENERIC; } } return ENXIO; } static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; struct adf_accel_dev *pf; int i; if (accel_dev->dma_tag) bus_dma_tag_destroy(accel_dev->dma_tag); for (i = 0; i < ADF_PCI_MAX_BARS; i++) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; if (bar->virt_addr) bus_free_resource(accel_pci_dev->pci_dev, SYS_RES_MEMORY, bar->virt_addr); } /* * As adf_clean_hw_data_4xxxiov() will update class index, before * index is updated, vf must be remove from accel_table. */ pf = adf_devmgr_pci_to_accel_dev(pci_find_pf(accel_pci_dev->pci_dev)); adf_devmgr_rm_dev(accel_dev, pf); if (accel_dev->hw_device) { switch (pci_get_device(accel_pci_dev->pci_dev)) { case ADF_4XXXIOV_PCI_DEVICE_ID: case ADF_401XXIOV_PCI_DEVICE_ID: adf_clean_hw_data_4xxxiov(accel_dev->hw_device); break; default: break; } free(accel_dev->hw_device, M_QAT_4XXXVF); accel_dev->hw_device = NULL; } adf_cfg_dev_remove(accel_dev); } static int adf_attach(device_t dev) { struct adf_accel_dev *accel_dev; struct adf_accel_dev *pf; struct adf_accel_pci *accel_pci_dev; struct adf_hw_device_data *hw_data; - unsigned int i, bar_nr; + unsigned int bar_nr; int ret = 0; int rid; struct adf_cfg_device *cfg_dev = NULL; accel_dev = device_get_softc(dev); accel_dev->is_vf = true; pf = adf_devmgr_pci_to_accel_dev(pci_find_pf(dev)); INIT_LIST_HEAD(&accel_dev->crypto_list); accel_pci_dev = &accel_dev->accel_pci_dev; accel_pci_dev->pci_dev = dev; if (bus_get_domain(dev, &accel_pci_dev->node) != 0) accel_pci_dev->node = 0; /* Add accel device to accel table */ if (adf_devmgr_add_dev(accel_dev, pf)) { device_printf(GET_DEV(accel_dev), "Failed to add new accelerator device.\n"); return -EFAULT; } /* Allocate and configure device configuration structure */ hw_data = malloc(sizeof(*hw_data), M_QAT_4XXXVF, M_WAITOK | M_ZERO); if (!hw_data) { ret = -ENOMEM; goto out_err; } accel_dev->hw_device = hw_data; adf_init_hw_data_4xxxiov(accel_dev->hw_device); accel_pci_dev->revid = pci_get_revid(dev); hw_data->fuses = pci_read_config(dev, ADF_4XXXIOV_VFFUSECTL4_OFFSET, 4); /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(accel_dev); hw_data->ae_mask = hw_data->get_ae_mask(accel_dev); hw_data->admin_ae_mask = hw_data->ae_mask; accel_pci_dev->sku = hw_data->get_sku(hw_data); /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); if (ret) goto out_err; pci_set_max_read_req(dev, 1024); ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, /* BUS_SPACE_UNRESTRICTED */ 1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &accel_dev->dma_tag); hw_data->accel_capabilities_mask = adf_4xxxvf_get_hw_cap(accel_dev); /* Find and map all the device's BARS */ - i = 0; - for (bar_nr = 0; i < ADF_PCI_MAX_BARS && bar_nr < PCIR_MAX_BAR_0; - bar_nr++) { + /* Logical BARs configuration for 64bit BARs: + bar 0 and 1 - logical BAR0 + bar 2 and 3 - logical BAR1 + bar 4 and 5 - logical BAR3 + */ + for (bar_nr = 0; + bar_nr < (ADF_PCI_MAX_BARS * 2) && bar_nr < PCIR_MAX_BAR_0; + bar_nr += 2) { struct adf_bar *bar; rid = PCIR_BAR(bar_nr); - if (bus_get_resource(dev, SYS_RES_MEMORY, rid, NULL, NULL) != - 0) { - continue; - } - bar = &accel_pci_dev->pci_bars[i++]; + bar = &accel_pci_dev->pci_bars[bar_nr / 2]; bar->virt_addr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!bar->virt_addr) { - device_printf(GET_DEV(accel_dev), - "Failed to map BAR %d\n", - bar_nr); + device_printf(dev, "Failed to map BAR %d\n", bar_nr); ret = ENXIO; goto out_err; } bar->base_addr = rman_get_start(bar->virt_addr); bar->size = rman_get_size(bar->virt_addr); } - - if (i == 0) { - device_printf( - GET_DEV(accel_dev), - "No BARs mapped. Please check if PCI BARs are mapped correctly for device\n"); - ret = ENXIO; - goto out_err; - } - pci_enable_busmaster(dev); /* Completion for VF2PF request/response message exchange */ init_completion(&accel_dev->u1.vf.msg_received); mutex_init(&accel_dev->u1.vf.rpreset_lock); ret = hw_data->config_device(accel_dev); if (ret) goto out_err; ret = adf_dev_init(accel_dev); if (!ret) ret = adf_dev_start(accel_dev); if (ret) { device_printf( GET_DEV(accel_dev), "Failed to start - make sure PF enabled services match VF configuration.\n"); adf_dev_stop(accel_dev); adf_dev_shutdown(accel_dev); return 0; } cfg_dev = accel_dev->cfg->dev; adf_cfg_device_clear(cfg_dev, accel_dev); free(cfg_dev, M_QAT); accel_dev->cfg->dev = NULL; return ret; out_err: adf_cleanup_accel(accel_dev); return ret; } static int adf_detach(device_t dev) { struct adf_accel_dev *accel_dev = device_get_softc(dev); if (!accel_dev) { printf("QAT: Driver removal failed\n"); return EFAULT; } adf_flush_vf_wq(accel_dev); clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); adf_dev_stop(accel_dev); adf_dev_shutdown(accel_dev); adf_cleanup_accel(accel_dev); return 0; } static int adf_modevent(module_t mod, int type, void *data) { switch (type) { case MOD_UNLOAD: adf_clean_vf_map(true); return 0; default: return EOPNOTSUPP; } } static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe), DEVMETHOD(device_attach, adf_attach), DEVMETHOD(device_detach, adf_detach), DEVMETHOD_END }; static driver_t adf_driver = { "qat", adf_methods, sizeof(struct adf_accel_dev) }; DRIVER_MODULE_ORDERED(qat_4xxxvf, pci, adf_driver, adf_modevent, NULL, SI_ORDER_THIRD); MODULE_VERSION(qat_4xxxvf, 1); MODULE_DEPEND(qat_4xxxvf, qat_common, 1, 1, 1); MODULE_DEPEND(qat_4xxxvf, qat_api, 1, 1, 1); MODULE_DEPEND(qat_4xxxvf, linuxkpi, 1, 1, 1);