diff --git a/sys/dev/qat/include/adf_cfg_device.h b/sys/dev/qat/include/adf_cfg_device.h index f2891e4eb805..1419292a262f 100644 --- a/sys/dev/qat/include/adf_cfg_device.h +++ b/sys/dev/qat/include/adf_cfg_device.h @@ -1,82 +1,84 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #ifndef ADF_CFG_DEVICE_H_ #define ADF_CFG_DEVICE_H_ #include "adf_cfg.h" #include "sal_statistics_strings.h" #define ADF_CFG_STATIC_CONF_VER 2 #define ADF_CFG_STATIC_CONF_CY_ASYM_RING_SIZE 64 #define ADF_CFG_STATIC_CONF_CY_SYM_RING_SIZE 512 #define ADF_CFG_STATIC_CONF_DC_INTER_BUF_SIZE 64 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_ENABLED 1 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DC 1 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DH 1 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DRBG 1 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DSA 1 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_ECC 1 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_KEYGEN 1 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_LN 1 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_PRIME 1 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_RSA 1 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_SYM 1 #define ADF_CFG_STATIC_CONF_POLL 1 #define ADF_CFG_STATIC_CONF_IRQ 0 #define ADF_CFG_STATIC_CONF_AUTO_RESET 0 #define ADF_CFG_STATIC_CONF_NUM_DC_ACCEL_UNITS 2 #define ADF_CFG_STATIC_CONF_NUM_INLINE_ACCEL_UNITS 0 #define ADF_CFG_STATIC_CONF_INST_NUM_DC 2 #define ADF_CFG_STATIC_CONF_INST_NUM_CY_POLL 6 #define ADF_CFG_STATIC_CONF_INST_NUM_CY_IRQ 2 #define ADF_CFG_STATIC_CONF_USER_PROCESSES_NUM 2 #define ADF_CFG_STATIC_CONF_USER_INST_NUM_CY 6 #define ADF_CFG_STATIC_CONF_USER_INST_NUM_DC 2 #define ADF_CFG_STATIC_CONF_INST_NUM_CY_POLL_VF 1 #define ADF_CFG_STATIC_CONF_INST_NUM_CY_IRQ_VF 1 #define ADF_CFG_STATIC_CONF_INST_NUM_DC_VF 2 #define ADF_CFG_STATIC_CONF_USER_INST_NUM_CY_VF 2 #define ADF_CFG_STATIC_CONF_USER_INST_NUM_DC_VF 2 #define ADF_CFG_FW_STRING_TO_ID(str, acc, id) \ do { \ typeof(id) id_ = (id); \ typeof(str) str_; \ memcpy(str_, (str), sizeof(str_)); \ if (!strncmp(str_, \ ADF_SERVICES_DEFAULT, \ sizeof(ADF_SERVICES_DEFAULT))) \ *id_ = ADF_FW_IMAGE_DEFAULT; \ else if (!strncmp(str_, \ ADF_SERVICES_CRYPTO, \ sizeof(ADF_SERVICES_CRYPTO))) \ *id_ = ADF_FW_IMAGE_CRYPTO; \ else if (!strncmp(str_, \ ADF_SERVICES_COMPRESSION, \ sizeof(ADF_SERVICES_COMPRESSION))) \ *id_ = ADF_FW_IMAGE_COMPRESSION; \ else if (!strncmp(str_, \ ADF_SERVICES_CUSTOM1, \ sizeof(ADF_SERVICES_CUSTOM1))) \ *id_ = ADF_FW_IMAGE_CUSTOM1; \ else { \ *id_ = ADF_FW_IMAGE_DEFAULT; \ device_printf(GET_DEV(acc), \ "Invalid SerivesProfile: %s," \ "Using DEFAULT image\n", \ str_); \ } \ } while (0) int adf_cfg_get_ring_pairs(struct adf_cfg_device *device, struct adf_cfg_instance *inst, const char *process_name, struct adf_accel_dev *accel_dev); int adf_cfg_device_init(struct adf_cfg_device *device, struct adf_accel_dev *accel_dev); void adf_cfg_device_clear(struct adf_cfg_device *device, struct adf_accel_dev *accel_dev); +void adf_cfg_device_clear_all(struct adf_accel_dev *accel_dev); + #endif diff --git a/sys/dev/qat/include/adf_gen4vf_hw_csr_data.h b/sys/dev/qat/include/adf_gen4vf_hw_csr_data.h index ed78ab54ec4e..5143b88907ba 100644 --- a/sys/dev/qat/include/adf_gen4vf_hw_csr_data.h +++ b/sys/dev/qat/include/adf_gen4vf_hw_csr_data.h @@ -1,150 +1,150 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #ifndef ADF_GEN4VF_HW_CSR_DATA_H_ #define ADF_GEN4VF_HW_CSR_DATA_H_ #define ADF_RING_CSR_ADDR_OFFSET_GEN4VF 0x0 #define ADF_RING_BUNDLE_SIZE_GEN4 0x2000 #define ADF_RING_CSR_RING_HEAD 0x0C0 #define ADF_RING_CSR_RING_TAIL 0x100 #define ADF_RING_CSR_E_STAT 0x14C #define ADF_RING_CSR_RING_CONFIG_GEN4 0x1000 #define ADF_RING_CSR_RING_LBASE_GEN4 0x1040 #define ADF_RING_CSR_RING_UBASE_GEN4 0x1080 #define ADF_RING_CSR_INT_FLAG 0x170 #define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 #define ADF_RING_CSR_NEXT_INT_SRCSEL 0x4 #define ADF_RING_CSR_INT_SRCSEL 0x174 #define ADF_RING_CSR_INT_COL_EN 0x17C #define ADF_RING_CSR_INT_COL_CTL 0x180 #define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C #define ADF_BANK_INT_SRC_SEL_MASK_GEN4 0x44UL #define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 #define ADF_BANK_INT_FLAG_CLEAR_MASK_GEN4 0x3 #define ADF_RINGS_PER_INT_SRCSEL_GEN4 2 #define BUILD_RING_BASE_ADDR_GEN4(addr, size) \ ((((addr) >> 6) & (0xFFFFFFFFFFFFFFFFULL << (size))) << 6) #define READ_CSR_RING_HEAD_GEN4VF(csr_base_addr, bank, ring) \ ADF_CSR_RD((csr_base_addr), \ (ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \ ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \ ADF_RING_CSR_RING_HEAD + ((ring) << 2)) #define READ_CSR_RING_TAIL_GEN4VF(csr_base_addr, bank, ring) \ ADF_CSR_RD((csr_base_addr), \ (ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \ ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \ ADF_RING_CSR_RING_TAIL + ((ring) << 2)) #define READ_CSR_E_STAT_GEN4VF(csr_base_addr, bank) \ ADF_CSR_RD((csr_base_addr), \ (ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \ ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \ ADF_RING_CSR_E_STAT) #define WRITE_CSR_RING_CONFIG_GEN4VF(csr_base_addr, bank, ring, value) \ ADF_CSR_WR((csr_base_addr), \ (ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \ ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \ ADF_RING_CSR_RING_CONFIG_GEN4 + ((ring) << 2), \ (value)) #define WRITE_CSR_RING_BASE_GEN4VF(csr_base_addr, bank, ring, value) \ do { \ struct resource *_csr_base_addr = csr_base_addr; \ u32 _bank = bank; \ u32 _ring = ring; \ - dma_addr_t _value = value; \ + bus_addr_t _value = value; \ u32 l_base = 0, u_base = 0; \ l_base = (u32)((_value)&0xFFFFFFFF); \ u_base = (u32)(((_value)&0xFFFFFFFF00000000ULL) >> 32); \ ADF_CSR_WR((_csr_base_addr), \ (ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \ ADF_RING_BUNDLE_SIZE_GEN4 * (_bank)) + \ ADF_RING_CSR_RING_LBASE_GEN4 + ((_ring) << 2), \ l_base); \ ADF_CSR_WR((_csr_base_addr), \ (ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \ ADF_RING_BUNDLE_SIZE_GEN4 * (_bank)) + \ ADF_RING_CSR_RING_UBASE_GEN4 + ((_ring) << 2), \ u_base); \ } while (0) static inline u64 read_base_gen4vf(struct resource *csr_base_addr, u32 bank, u32 ring) { u32 l_base, u_base; u64 addr; l_base = ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE_GEN4 * bank) + ADF_RING_CSR_RING_LBASE_GEN4 + (ring << 2)); u_base = ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE_GEN4 * bank) + ADF_RING_CSR_RING_UBASE_GEN4 + (ring << 2)); addr = (u64)l_base & 0x00000000FFFFFFFFULL; addr |= (u64)u_base << 32 & 0xFFFFFFFF00000000ULL; return addr; } #define WRITE_CSR_INT_SRCSEL_GEN4VF(csr_base_addr, bank) \ ADF_CSR_WR((csr_base_addr), \ ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \ ADF_RING_BUNDLE_SIZE_GEN4 * (bank) + \ ADF_RING_CSR_INT_SRCSEL, \ ADF_BANK_INT_SRC_SEL_MASK_GEN4) #define READ_CSR_RING_BASE_GEN4VF(csr_base_addr, bank, ring) \ read_base_gen4vf((csr_base_addr), (bank), (ring)) #define WRITE_CSR_RING_HEAD_GEN4VF(csr_base_addr, bank, ring, value) \ ADF_CSR_WR((csr_base_addr), \ (ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \ ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \ ADF_RING_CSR_RING_HEAD + ((ring) << 2), \ (value)) #define WRITE_CSR_RING_TAIL_GEN4VF(csr_base_addr, bank, ring, value) \ ADF_CSR_WR((csr_base_addr), \ (ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \ ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \ ADF_RING_CSR_RING_TAIL + ((ring) << 2), \ (value)) #define WRITE_CSR_INT_FLAG_GEN4VF(csr_base_addr, bank, value) \ ADF_CSR_WR((csr_base_addr), \ (ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \ ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \ ADF_RING_CSR_INT_FLAG, \ (value)) #define WRITE_CSR_INT_COL_EN_GEN4VF(csr_base_addr, bank, value) \ ADF_CSR_WR((csr_base_addr), \ (ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \ ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \ ADF_RING_CSR_INT_COL_EN, \ (value)) #define WRITE_CSR_INT_COL_CTL_GEN4VF(csr_base_addr, bank, value) \ ADF_CSR_WR((csr_base_addr), \ (ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \ ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \ ADF_RING_CSR_INT_COL_CTL, \ (value)) #define WRITE_CSR_INT_FLAG_AND_COL_GEN4VF(csr_base_addr, bank, value) \ ADF_CSR_WR((csr_base_addr), \ (ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \ ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \ ADF_RING_CSR_INT_FLAG_AND_COL, \ (value)) #define READ_CSR_RING_SRV_ARB_EN_GEN4VF(csr_base_addr, bank) \ ADF_CSR_RD((csr_base_addr), \ (ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \ ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \ ADF_RING_CSR_RING_SRV_ARB_EN) #define WRITE_CSR_RING_SRV_ARB_EN_GEN4VF(csr_base_addr, bank, value) \ ADF_CSR_WR((csr_base_addr), \ (ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \ ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \ ADF_RING_CSR_RING_SRV_ARB_EN, \ (value)) struct adf_hw_csr_info; void gen4vf_init_hw_csr_info(struct adf_hw_csr_info *csr_info); #endif /* ADF_GEN4VF_HW_CSR_DATA_H_ */ diff --git a/sys/dev/qat/include/adf_pfvf_vf_msg.h b/sys/dev/qat/include/adf_pfvf_vf_msg.h index 3cf6d5ed0815..44066ca1fe85 100644 --- a/sys/dev/qat/include/adf_pfvf_vf_msg.h +++ b/sys/dev/qat/include/adf_pfvf_vf_msg.h @@ -1,12 +1,13 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #ifndef ADF_PFVF_VF_MSG_H #define ADF_PFVF_VF_MSG_H int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev); void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev); int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev); int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev); int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev); +void adf_vf2pf_restarting_complete(struct adf_accel_dev *accel_dev); #endif /* ADF_PFVF_VF_MSG_H */ diff --git a/sys/dev/qat/include/common/adf_accel_devices.h b/sys/dev/qat/include/common/adf_accel_devices.h index b3f82234bc2b..c09aee8ea4bd 100644 --- a/sys/dev/qat/include/common/adf_accel_devices.h +++ b/sys/dev/qat/include/common/adf_accel_devices.h @@ -1,725 +1,727 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2025 Intel Corporation */ #ifndef ADF_ACCEL_DEVICES_H_ #define ADF_ACCEL_DEVICES_H_ #include "qat_freebsd.h" #include "adf_cfg_common.h" #include "adf_pfvf_msg.h" #include "opt_qat.h" #define ADF_CFG_NUM_SERVICES 4 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf" #define ADF_C62X_DEVICE_NAME "c6xx" #define ADF_C62XVF_DEVICE_NAME "c6xxvf" #define ADF_C3XXX_DEVICE_NAME "c3xxx" #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf" #define ADF_200XX_DEVICE_NAME "200xx" #define ADF_200XXVF_DEVICE_NAME "200xxvf" #define ADF_C4XXX_DEVICE_NAME "c4xxx" #define ADF_C4XXXVF_DEVICE_NAME "c4xxxvf" #define ADF_4XXX_DEVICE_NAME "4xxx" #define ADF_4XXXVF_DEVICE_NAME "4xxxvf" #define ADF_DH895XCC_PCI_DEVICE_ID 0x435 #define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443 #define ADF_C62X_PCI_DEVICE_ID 0x37c8 #define ADF_C62XIOV_PCI_DEVICE_ID 0x37c9 #define ADF_C3XXX_PCI_DEVICE_ID 0x19e2 #define ADF_C3XXXIOV_PCI_DEVICE_ID 0x19e3 #define ADF_200XX_PCI_DEVICE_ID 0x18ee #define ADF_200XXIOV_PCI_DEVICE_ID 0x18ef #define ADF_D15XX_PCI_DEVICE_ID 0x6f54 #define ADF_D15XXIOV_PCI_DEVICE_ID 0x6f55 #define ADF_C4XXX_PCI_DEVICE_ID 0x18a0 #define ADF_C4XXXIOV_PCI_DEVICE_ID 0x18a1 #define ADF_4XXX_PCI_DEVICE_ID 0x4940 #define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941 #define ADF_401XX_PCI_DEVICE_ID 0x4942 #define ADF_401XXIOV_PCI_DEVICE_ID 0x4943 #define IS_QAT_GEN3(ID) ({ (ID == ADF_C4XXX_PCI_DEVICE_ID); }) static inline bool IS_QAT_GEN4(const unsigned int id) { return (id == ADF_4XXX_PCI_DEVICE_ID || id == ADF_401XX_PCI_DEVICE_ID || id == ADF_4XXXIOV_PCI_DEVICE_ID || id == ADF_401XXIOV_PCI_DEVICE_ID); } #define IS_QAT_GEN3_OR_GEN4(ID) (IS_QAT_GEN3(ID) || IS_QAT_GEN4(ID)) #define ADF_VF2PF_SET_SIZE 32 #define ADF_MAX_VF2PF_SET 4 #define ADF_VF2PF_SET_OFFSET(set_nr) ((set_nr)*ADF_VF2PF_SET_SIZE) #define ADF_VF2PF_VFNR_TO_SET(vf_nr) ((vf_nr) / ADF_VF2PF_SET_SIZE) #define ADF_VF2PF_VFNR_TO_MASK(vf_nr) \ ({ \ u32 vf_nr_ = (vf_nr); \ BIT((vf_nr_)-ADF_VF2PF_SET_SIZE *ADF_VF2PF_VFNR_TO_SET( \ vf_nr_)); \ }) #define ADF_DEVICE_FUSECTL_OFFSET 0x40 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C #define ADF_DEVICE_FUSECTL_MASK 0x80000000 #define ADF_PCI_MAX_BARS 3 #define ADF_DEVICE_NAME_LENGTH 32 #define ADF_ETR_MAX_RINGS_PER_BANK 16 #define ADF_MAX_MSIX_VECTOR_NAME 32 #define ADF_DEVICE_NAME_PREFIX "qat_" #define ADF_STOP_RETRY 50 #define ADF_NUM_THREADS_PER_AE (8) #define ADF_AE_ADMIN_THREAD (7) #define ADF_NUM_PKE_STRAND (2) #define ADF_AE_STRAND0_THREAD (8) #define ADF_AE_STRAND1_THREAD (9) #define ADF_CFG_NUM_SERVICES 4 #define ADF_SRV_TYPE_BIT_LEN 3 #define ADF_SRV_TYPE_MASK 0x7 #define ADF_RINGS_PER_SRV_TYPE 2 #define ADF_THRD_ABILITY_BIT_LEN 4 #define ADF_THRD_ABILITY_MASK 0xf #define ADF_VF_OFFSET 0x8 #define ADF_MAX_FUNC_PER_DEV 0x7 #define ADF_PCI_DEV_OFFSET 0x3 #define ADF_SRV_TYPE_BIT_LEN 3 #define ADF_SRV_TYPE_MASK 0x7 #define GET_SRV_TYPE(ena_srv_mask, srv) \ (((ena_srv_mask) >> (ADF_SRV_TYPE_BIT_LEN * (srv))) & ADF_SRV_TYPE_MASK) #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_info.csr_ops) #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->csr_info.pfvf_ops) #define ADF_DEFAULT_RING_TO_SRV_MAP \ (CRYPTO | CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) enum adf_accel_capabilities { ADF_ACCEL_CAPABILITIES_NULL = 0, ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1, ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2, ADF_ACCEL_CAPABILITIES_CIPHER = 4, ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8, ADF_ACCEL_CAPABILITIES_COMPRESSION = 32, ADF_ACCEL_CAPABILITIES_DEPRECATED = 64, ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128 }; struct adf_bar { rman_res_t base_addr; struct resource *virt_addr; rman_res_t size; } __packed; struct adf_accel_msix { struct msix_entry *entries; u32 num_entries; } __packed; struct adf_accel_pci { device_t pci_dev; struct adf_accel_msix msix_entries; struct adf_bar pci_bars[ADF_PCI_MAX_BARS]; uint8_t revid; uint8_t sku; int node; } __packed; enum dev_state { DEV_DOWN = 0, DEV_UP }; enum dev_sku_info { DEV_SKU_1 = 0, DEV_SKU_2, DEV_SKU_3, DEV_SKU_4, DEV_SKU_VF, DEV_SKU_1_CY, DEV_SKU_2_CY, DEV_SKU_3_CY, DEV_SKU_UNKNOWN }; static inline const char * get_sku_info(enum dev_sku_info info) { switch (info) { case DEV_SKU_1: return "SKU1"; case DEV_SKU_1_CY: return "SKU1CY"; case DEV_SKU_2: return "SKU2"; case DEV_SKU_2_CY: return "SKU2CY"; case DEV_SKU_3: return "SKU3"; case DEV_SKU_3_CY: return "SKU3CY"; case DEV_SKU_4: return "SKU4"; case DEV_SKU_VF: return "SKUVF"; case DEV_SKU_UNKNOWN: default: break; } return "Unknown SKU"; } enum adf_accel_unit_services { ADF_ACCEL_SERVICE_NULL = 0, ADF_ACCEL_INLINE_CRYPTO = 1, ADF_ACCEL_CRYPTO = 2, ADF_ACCEL_COMPRESSION = 4, ADF_ACCEL_ASYM = 8, ADF_ACCEL_ADMIN = 16 }; struct adf_ae_info { u32 num_asym_thd; u32 num_sym_thd; u32 num_dc_thd; } __packed; struct adf_accel_unit { u8 au_mask; u32 accel_mask; u64 ae_mask; u64 comp_ae_mask; u32 num_ae; enum adf_accel_unit_services services; } __packed; struct adf_accel_unit_info { u32 inline_ingress_msk; u32 inline_egress_msk; u32 sym_ae_msk; u32 asym_ae_msk; u32 dc_ae_msk; u8 num_cy_au; u8 num_dc_au; u8 num_asym_au; u8 num_inline_au; struct adf_accel_unit *au; const struct adf_ae_info *ae_info; } __packed; struct adf_hw_aram_info { /* Inline Egress mask. "1" = AE is working with egress traffic */ u32 inline_direction_egress_mask; /* Inline congestion managmenet profiles set in config file */ u32 inline_congest_mngt_profile; /* Initialise CY AE mask, "1" = AE is used for CY operations */ u32 cy_ae_mask; /* Initialise DC AE mask, "1" = AE is used for DC operations */ u32 dc_ae_mask; /* Number of long words used to define the ARAM regions */ u32 num_aram_lw_entries; /* ARAM region definitions */ u32 mmp_region_size; u32 mmp_region_offset; u32 skm_region_size; u32 skm_region_offset; /* * Defines size and offset of compression intermediate buffers stored * in ARAM (device's on-chip memory). */ u32 inter_buff_aram_region_size; u32 inter_buff_aram_region_offset; u32 sadb_region_size; u32 sadb_region_offset; } __packed; struct adf_hw_device_class { const char *name; const enum adf_device_type type; uint32_t instances; } __packed; struct arb_info { u32 arbiter_offset; u32 wrk_thd_2_srv_arb_map; u32 wrk_cfg_offset; } __packed; struct admin_info { u32 admin_msg_ur; u32 admin_msg_lr; u32 mailbox_offset; } __packed; struct adf_hw_csr_ops { u64 (*build_csr_ring_base_addr)(bus_addr_t addr, u32 size); u32 (*read_csr_ring_head)(struct resource *csr_base_addr, u32 bank, u32 ring); void (*write_csr_ring_head)(struct resource *csr_base_addr, u32 bank, u32 ring, u32 value); u32 (*read_csr_ring_tail)(struct resource *csr_base_addr, u32 bank, u32 ring); void (*write_csr_ring_tail)(struct resource *csr_base_addr, u32 bank, u32 ring, u32 value); u32 (*read_csr_e_stat)(struct resource *csr_base_addr, u32 bank); void (*write_csr_ring_config)(struct resource *csr_base_addr, u32 bank, u32 ring, u32 value); bus_addr_t (*read_csr_ring_base)(struct resource *csr_base_addr, u32 bank, u32 ring); void (*write_csr_ring_base)(struct resource *csr_base_addr, u32 bank, u32 ring, bus_addr_t addr); void (*write_csr_int_flag)(struct resource *csr_base_addr, u32 bank, u32 value); void (*write_csr_int_srcsel)(struct resource *csr_base_addr, u32 bank); void (*write_csr_int_col_en)(struct resource *csr_base_addr, u32 bank, u32 value); void (*write_csr_int_col_ctl)(struct resource *csr_base_addr, u32 bank, u32 value); void (*write_csr_int_flag_and_col)(struct resource *csr_base_addr, u32 bank, u32 value); u32 (*read_csr_ring_srv_arb_en)(struct resource *csr_base_addr, u32 bank); void (*write_csr_ring_srv_arb_en)(struct resource *csr_base_addr, u32 bank, u32 value); u32 (*get_src_sel_mask)(void); u32 (*get_int_col_ctl_enable_mask)(void); u32 (*get_bank_irq_mask)(u32 irq_mask); }; struct adf_cfg_device_data; struct adf_accel_dev; struct adf_etr_data; struct adf_etr_ring_data; struct adf_pfvf_ops { int (*enable_comms)(struct adf_accel_dev *accel_dev); u32 (*get_pf2vf_offset)(u32 i); u32 (*get_vf2pf_offset)(u32 i); void (*enable_vf2pf_interrupts)(struct resource *pmisc_addr, u32 vf_mask); void (*disable_all_vf2pf_interrupts)(struct resource *pmisc_addr); u32 (*disable_pending_vf2pf_interrupts)(struct resource *pmisc_addr); int (*send_msg)(struct adf_accel_dev *accel_dev, struct pfvf_message msg, u32 pfvf_offset, struct mutex *csr_lock); struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev, u32 pfvf_offset, u8 compat_ver); }; struct adf_hw_csr_info { struct adf_hw_csr_ops csr_ops; struct adf_pfvf_ops pfvf_ops; u32 csr_addr_offset; u32 ring_bundle_size; u32 bank_int_flag_clear_mask; u32 num_rings_per_int_srcsel; u32 arb_enable_mask; }; struct adf_hw_device_data { struct adf_hw_device_class *dev_class; uint32_t (*get_accel_mask)(struct adf_accel_dev *accel_dev); uint32_t (*get_ae_mask)(struct adf_accel_dev *accel_dev); uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self); uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self); uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self); uint32_t (*get_num_aes)(struct adf_hw_device_data *self); uint32_t (*get_num_accels)(struct adf_hw_device_data *self); void (*notify_and_wait_ethernet)(struct adf_accel_dev *accel_dev); bool (*get_eth_doorbell_msg)(struct adf_accel_dev *accel_dev); void (*get_arb_info)(struct arb_info *arb_csrs_info); void (*get_admin_info)(struct admin_info *admin_csrs_info); void (*get_errsou_offset)(u32 *errsou3, u32 *errsou5); uint32_t (*get_num_accel_units)(struct adf_hw_device_data *self); int (*init_accel_units)(struct adf_accel_dev *accel_dev); void (*exit_accel_units)(struct adf_accel_dev *accel_dev); uint32_t (*get_clock_speed)(struct adf_hw_device_data *self); enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self); bool (*check_prod_sku)(struct adf_accel_dev *accel_dev); int (*alloc_irq)(struct adf_accel_dev *accel_dev); void (*free_irq)(struct adf_accel_dev *accel_dev); void (*enable_error_correction)(struct adf_accel_dev *accel_dev); int (*check_uncorrectable_error)(struct adf_accel_dev *accel_dev); void (*print_err_registers)(struct adf_accel_dev *accel_dev); void (*disable_error_interrupts)(struct adf_accel_dev *accel_dev); int (*init_ras)(struct adf_accel_dev *accel_dev); void (*exit_ras)(struct adf_accel_dev *accel_dev); void (*disable_arb)(struct adf_accel_dev *accel_dev); void (*update_ras_errors)(struct adf_accel_dev *accel_dev, int error); bool (*ras_interrupts)(struct adf_accel_dev *accel_dev, bool *reset_required); int (*init_admin_comms)(struct adf_accel_dev *accel_dev); void (*exit_admin_comms)(struct adf_accel_dev *accel_dev); int (*send_admin_init)(struct adf_accel_dev *accel_dev); void (*set_asym_rings_mask)(struct adf_accel_dev *accel_dev); int (*get_ring_to_svc_map)(struct adf_accel_dev *accel_dev, u16 *ring_to_svc_map); uint32_t (*get_accel_cap)(struct adf_accel_dev *accel_dev); int (*init_arb)(struct adf_accel_dev *accel_dev); void (*exit_arb)(struct adf_accel_dev *accel_dev); void (*get_arb_mapping)(struct adf_accel_dev *accel_dev, const uint32_t **cfg); int (*init_device)(struct adf_accel_dev *accel_dev); int (*get_heartbeat_status)(struct adf_accel_dev *accel_dev); int (*int_timer_init)(struct adf_accel_dev *accel_dev); void (*int_timer_exit)(struct adf_accel_dev *accel_dev); uint32_t (*get_ae_clock)(struct adf_hw_device_data *self); uint32_t (*get_hb_clock)(struct adf_hw_device_data *self); void (*disable_iov)(struct adf_accel_dev *accel_dev); void (*configure_iov_threads)(struct adf_accel_dev *accel_dev, bool enable); void (*enable_ints)(struct adf_accel_dev *accel_dev); bool (*check_slice_hang)(struct adf_accel_dev *accel_dev); int (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev); void (*enable_pf2vf_interrupt)(struct adf_accel_dev *accel_dev); void (*disable_pf2vf_interrupt)(struct adf_accel_dev *accel_dev); int (*interrupt_active_pf2vf)(struct adf_accel_dev *accel_dev); int (*get_int_active_bundles)(struct adf_accel_dev *accel_dev); void (*reset_device)(struct adf_accel_dev *accel_dev); void (*reset_hw_units)(struct adf_accel_dev *accel_dev); int (*measure_clock)(struct adf_accel_dev *accel_dev); void (*restore_device)(struct adf_accel_dev *accel_dev); uint32_t (*get_obj_cfg_ae_mask)(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services services); enum adf_accel_unit_services ( *get_service_type)(struct adf_accel_dev *accel_dev, s32 obj_num); int (*add_pke_stats)(struct adf_accel_dev *accel_dev); void (*remove_pke_stats)(struct adf_accel_dev *accel_dev); int (*add_misc_error)(struct adf_accel_dev *accel_dev); int (*count_ras_event)(struct adf_accel_dev *accel_dev, u32 *ras_event, char *aeidstr); void (*remove_misc_error)(struct adf_accel_dev *accel_dev); int (*configure_accel_units)(struct adf_accel_dev *accel_dev); int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_number); void (*config_ring_irq)(struct adf_accel_dev *accel_dev, u32 bank_number, u16 ring_mask); uint32_t (*get_objs_num)(struct adf_accel_dev *accel_dev); const char *(*get_obj_name)(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services services); void (*pre_reset)(struct adf_accel_dev *accel_dev); void (*post_reset)(struct adf_accel_dev *accel_dev); void (*set_msix_rttable)(struct adf_accel_dev *accel_dev); void (*get_ring_svc_map_data)(int ring_pair_index, u16 ring_to_svc_map, u8 *serv_type, int *ring_index, int *num_rings_per_srv, int bundle_num); struct adf_hw_csr_info csr_info; const char *fw_name; const char *fw_mmp_name; bool reset_ack; uint32_t fuses; uint32_t accel_capabilities_mask; uint32_t instance_id; uint16_t accel_mask; u32 aerucm_mask; u32 ae_mask; u32 admin_ae_mask; u32 service_mask; u32 service_to_load_mask; u32 heartbeat_ctr_num; uint16_t tx_rings_mask; uint8_t tx_rx_gap; uint8_t num_banks; u8 num_rings_per_bank; uint8_t num_accel; uint8_t num_logical_accel; uint8_t num_engines; + bool get_ring_to_svc_done; int (*get_storage_enabled)(struct adf_accel_dev *accel_dev, uint32_t *storage_enabled); u8 query_storage_cap; u32 clock_frequency; u8 storage_enable; u32 extended_dc_capabilities; int (*config_device)(struct adf_accel_dev *accel_dev); u32 asym_ae_active_thd_mask; u16 asym_rings_mask; int (*get_fw_image_type)(struct adf_accel_dev *accel_dev, enum adf_cfg_fw_image_type *fw_image_type); u16 ring_to_svc_map; } __packed; /* helper enum for performing CSR operations */ enum operation { AND, OR, }; /* 32-bit CSR write macro */ #define ADF_CSR_WR(csr_base, csr_offset, val) \ bus_write_4(csr_base, csr_offset, val) /* 64-bit CSR write macro */ #ifdef __x86_64__ #define ADF_CSR_WR64(csr_base, csr_offset, val) \ bus_write_8(csr_base, csr_offset, val) #else static __inline void adf_csr_wr64(struct resource *csr_base, bus_size_t offset, uint64_t value) { bus_write_4(csr_base, offset, (uint32_t)value); bus_write_4(csr_base, offset + 4, (uint32_t)(value >> 32)); } #define ADF_CSR_WR64(csr_base, csr_offset, val) \ adf_csr_wr64(csr_base, csr_offset, val) #endif /* 32-bit CSR read macro */ #define ADF_CSR_RD(csr_base, csr_offset) bus_read_4(csr_base, csr_offset) /* 64-bit CSR read macro */ #ifdef __x86_64__ #define ADF_CSR_RD64(csr_base, csr_offset) bus_read_8(csr_base, csr_offset) #else static __inline uint64_t adf_csr_rd64(struct resource *csr_base, bus_size_t offset) { return (((uint64_t)bus_read_4(csr_base, offset)) | (((uint64_t)bus_read_4(csr_base, offset + 4)) << 32)); } #define ADF_CSR_RD64(csr_base, csr_offset) adf_csr_rd64(csr_base, csr_offset) #endif #define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev) #define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars) #define GET_HW_DATA(accel_dev) (accel_dev->hw_device) #define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks) #define GET_DEV_SKU(accel_dev) (accel_dev->accel_pci_dev.sku) #define GET_NUM_RINGS_PER_BANK(accel_dev) \ (GET_HW_DATA(accel_dev)->num_rings_per_bank) #define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines) #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev #define GET_SRV_TYPE(ena_srv_mask, srv) \ (((ena_srv_mask) >> (ADF_SRV_TYPE_BIT_LEN * (srv))) & ADF_SRV_TYPE_MASK) #define SET_ASYM_MASK(asym_mask, srv) \ ({ \ typeof(srv) srv_ = (srv); \ (asym_mask) |= ((1 << (srv_)*ADF_RINGS_PER_SRV_TYPE) | \ (1 << ((srv_)*ADF_RINGS_PER_SRV_TYPE + 1))); \ }) #define GET_NUM_RINGS_PER_BANK(accel_dev) \ (GET_HW_DATA(accel_dev)->num_rings_per_bank) #define GET_MAX_PROCESSES(accel_dev) \ ({ \ typeof(accel_dev) dev = (accel_dev); \ (GET_MAX_BANKS(dev) * (GET_NUM_RINGS_PER_BANK(dev) / 2)); \ }) #define GET_DU_TABLE(accel_dev) (accel_dev->du_table) static inline void adf_csr_fetch_and_and(struct resource *csr, size_t offs, unsigned long mask) { unsigned int val = ADF_CSR_RD(csr, offs); val &= mask; ADF_CSR_WR(csr, offs, val); } static inline void adf_csr_fetch_and_or(struct resource *csr, size_t offs, unsigned long mask) { unsigned int val = ADF_CSR_RD(csr, offs); val |= mask; ADF_CSR_WR(csr, offs, val); } static inline void adf_csr_fetch_and_update(enum operation op, struct resource *csr, size_t offs, unsigned long mask) { switch (op) { case AND: adf_csr_fetch_and_and(csr, offs, mask); break; case OR: adf_csr_fetch_and_or(csr, offs, mask); break; } } struct pfvf_stats { struct dentry *stats_file; /* Messages put in CSR */ unsigned int tx; /* Messages read from CSR */ unsigned int rx; /* Interrupt fired but int bit was clear */ unsigned int spurious; /* Block messages sent */ unsigned int blk_tx; /* Block messages received */ unsigned int blk_rx; /* Blocks received with CRC errors */ unsigned int crc_err; /* CSR in use by other side */ unsigned int busy; /* Receiver did not acknowledge */ unsigned int no_ack; /* Collision detected */ unsigned int collision; /* Couldn't send a response */ unsigned int tx_timeout; /* Didn't receive a response */ unsigned int rx_timeout; /* Responses received */ unsigned int rx_rsp; /* Messages re-transmitted */ unsigned int retry; /* Event put timeout */ unsigned int event_timeout; }; #define NUM_PFVF_COUNTERS 14 void adf_get_admin_info(struct admin_info *admin_csrs_info); struct adf_admin_comms { bus_addr_t phy_addr; bus_addr_t const_tbl_addr; bus_addr_t aram_map_phys_addr; bus_addr_t phy_hb_addr; bus_dmamap_t aram_map; bus_dmamap_t const_tbl_map; bus_dmamap_t hb_map; char *virt_addr; char *virt_hb_addr; struct resource *mailbox_addr; struct sx lock; struct bus_dmamem dma_mem; struct bus_dmamem dma_hb; }; struct icp_qat_fw_loader_handle; struct adf_fw_loader_data { struct icp_qat_fw_loader_handle *fw_loader; const struct firmware *uof_fw; const struct firmware *mmp_fw; }; struct adf_accel_vf_info { struct adf_accel_dev *accel_dev; struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */ u32 vf_nr; bool init; u8 compat_ver; struct pfvf_stats pfvf_counters; }; struct adf_fw_versions { u8 fw_version_major; u8 fw_version_minor; u8 fw_version_patch; u8 mmp_version_major; u8 mmp_version_minor; u8 mmp_version_patch; }; struct adf_int_timer { struct adf_accel_dev *accel_dev; struct workqueue_struct *timer_irq_wq; struct timer_list timer; u32 timeout_val; u32 int_cnt; bool enabled; }; #define ADF_COMPAT_CHECKER_MAX 8 typedef int (*adf_iov_compat_checker_t)(struct adf_accel_dev *accel_dev, u8 vf_compat_ver); struct adf_accel_compat_manager { u8 num_chker; adf_iov_compat_checker_t iov_compat_checkers[ADF_COMPAT_CHECKER_MAX]; }; struct adf_heartbeat; struct adf_accel_dev { struct adf_hw_aram_info *aram_info; struct adf_accel_unit_info *au_info; struct adf_etr_data *transport; struct adf_hw_device_data *hw_device; struct adf_cfg_device_data *cfg; struct adf_fw_loader_data *fw_loader; struct adf_admin_comms *admin; struct adf_uio_control_accel *accel; struct adf_heartbeat *heartbeat; struct adf_int_timer *int_timer; struct adf_fw_versions fw_versions; unsigned int autoreset_on_error; struct adf_fw_counters_data *fw_counters_data; struct sysctl_oid *debugfs_ae_config; struct list_head crypto_list; atomic_t *ras_counters; unsigned long status; atomic_t ref_count; bus_dma_tag_t dma_tag; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *ras_correctable; struct sysctl_oid *ras_uncorrectable; struct sysctl_oid *ras_fatal; struct sysctl_oid *ras_reset; struct sysctl_oid *pke_replay_dbgfile; struct sysctl_oid *misc_error_dbgfile; struct sysctl_oid *fw_version_oid; struct sysctl_oid *mmp_version_oid; struct sysctl_oid *hw_version_oid; struct sysctl_oid *cnv_error_oid; struct list_head list; struct adf_accel_pci accel_pci_dev; struct adf_accel_compat_manager *cm; u8 compat_ver; #ifdef QAT_DISABLE_SAFE_DC_MODE struct sysctl_oid *safe_dc_mode; u8 disable_safe_dc_mode; #endif /* QAT_DISABLE_SAFE_DC_MODE */ union { struct { /* vf_info is non-zero when SR-IOV is init'ed */ struct adf_accel_vf_info *vf_info; int num_vfs; } pf; struct { bool irq_enabled; struct resource *irq; void *cookie; struct task pf2vf_bh_tasklet; struct mutex vf2pf_lock; /* protect CSR access */ struct completion msg_received; struct pfvf_message response; /* temp field holding pf2vf response */ enum ring_reset_result rpreset_sts; struct mutex rpreset_lock; /* protect rpreset_sts */ struct pfvf_stats pfvf_counters; u8 pf_compat_ver; } vf; } u1; bool is_vf; u32 accel_id; void *lac_dev; + struct mutex lock; /* protect accel_dev during start/stop e.t.c */ }; #endif diff --git a/sys/dev/qat/include/common/adf_cfg_common.h b/sys/dev/qat/include/common/adf_cfg_common.h index 4a85e021aeb7..eb3edec41742 100644 --- a/sys/dev/qat/include/common/adf_cfg_common.h +++ b/sys/dev/qat/include/common/adf_cfg_common.h @@ -1,231 +1,231 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #ifndef ADF_CFG_COMMON_H_ #define ADF_CFG_COMMON_H_ #include #include #include #define ADF_CFG_MAX_STR_LEN 128 #define ADF_CFG_MAX_KEY_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN /* * Max value length increased to 128 to support more length of values. * like Dc0CoreAffinity = 0, 1, 2,... config values to max cores */ #define ADF_CFG_MAX_VAL_LEN_IN_BYTES 128 #define ADF_CFG_MAX_SECTION_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN #define ADF_CFG_NULL_TERM_SIZE 1 #define ADF_CFG_BASE_DEC 10 #define ADF_CFG_BASE_HEX 16 #define ADF_CFG_ALL_DEVICES 0xFFFE #define ADF_CFG_NO_DEVICE 0xFFFF #define ADF_CFG_AFFINITY_WHATEVER 0xFF #define MAX_DEVICE_NAME_SIZE 32 #define ADF_MAX_DEVICES (32 * 32) #define ADF_MAX_ACCELENGINES 12 #define ADF_CFG_STORAGE_ENABLED 1 #define ADF_DEVS_ARRAY_SIZE BITS_TO_LONGS(ADF_MAX_DEVICES) #define ADF_GEN2_SSM_WDT_PKE_DEFAULT_VALUE 0x3000000 #define ADF_WDT_TIMER_SYM_COMP_MS 3 #define ADF_MIN_HB_TIMER_MS 100 #define ADF_CFG_MAX_NUM_OF_SECTIONS 16 #define ADF_CFG_MAX_NUM_OF_TOKENS 16 #define ADF_CFG_MAX_TOKENS_IN_CONFIG 8 #define ADF_CFG_RESP_POLL 1 #define ADF_CFG_RESP_EPOLL 2 #define ADF_CFG_DEF_CY_RING_ASYM_SIZE 64 #define ADF_CFG_DEF_CY_RING_SYM_SIZE 512 #define ADF_CFG_DEF_DC_RING_SIZE 512 #define ADF_CFG_MAX_CORE_NUM 256 #define ADF_CFG_MAX_TOKENS ADF_CFG_MAX_CORE_NUM #define ADF_CFG_MAX_TOKEN_LEN 10 #define ADF_CFG_ACCEL_DEF_COALES 1 #define ADF_CFG_ACCEL_DEF_COALES_TIMER 10000 #define ADF_CFG_ACCEL_DEF_COALES_NUM_MSG 0 #define ADF_CFG_ASYM_SRV_MASK 1 #define ADF_CFG_SYM_SRV_MASK 2 #define ADF_CFG_DC_SRV_MASK 8 #define ADF_CFG_UNKNOWN_SRV_MASK 0 #define ADF_CFG_DEF_ASYM_MASK 0x03 #define ADF_CFG_MAX_SERVICES 4 #define ADF_MAX_SERVICES 3 enum adf_svc_type { ADF_SVC_ASYM = 0, ADF_SVC_SYM = 1, ADF_SVC_DC = 2, ADF_SVC_NONE = 3 }; struct adf_pci_address { unsigned char bus; unsigned char dev; unsigned char func; -} __packed; +}; #define ADF_CFG_SERV_RING_PAIR_0_SHIFT 0 #define ADF_CFG_SERV_RING_PAIR_1_SHIFT 3 #define ADF_CFG_SERV_RING_PAIR_2_SHIFT 6 #define ADF_CFG_SERV_RING_PAIR_3_SHIFT 9 enum adf_cfg_service_type { NA = 0, CRYPTO, COMP, SYM, ASYM, USED }; enum adf_cfg_bundle_type { FREE, KERNEL, USER }; enum adf_cfg_val_type { ADF_DEC, ADF_HEX, ADF_STR }; enum adf_device_type { DEV_UNKNOWN = 0, DEV_DH895XCC, DEV_DH895XCCVF, DEV_C62X, DEV_C62XVF, DEV_C3XXX, DEV_C3XXXVF, DEV_200XX, DEV_200XXVF, DEV_C4XXX, DEV_C4XXXVF, DEV_D15XX, DEV_D15XXVF, DEV_4XXX, DEV_4XXXVF }; enum adf_cfg_fw_image_type { ADF_FW_IMAGE_DEFAULT = 0, ADF_FW_IMAGE_CRYPTO, ADF_FW_IMAGE_COMPRESSION, ADF_FW_IMAGE_CUSTOM1 }; struct adf_dev_status_info { enum adf_device_type type; uint16_t accel_id; uint16_t instance_id; uint8_t num_ae; uint8_t num_accel; uint8_t num_logical_accel; uint8_t banks_per_accel; uint8_t state; uint8_t bus; uint8_t dev; uint8_t fun; int domain; char name[MAX_DEVICE_NAME_SIZE]; u8 sku; u32 node_id; u32 device_mem_available; u32 pci_device_id; }; struct adf_cfg_device { /* contains all the bundles info */ struct adf_cfg_bundle **bundles; /* contains all the instances info */ struct adf_cfg_instance **instances; int bundle_num; int instance_index; char name[ADF_CFG_MAX_STR_LEN]; int dev_id; int max_kernel_bundle_nr; u16 total_num_inst; }; enum adf_accel_serv_type { ADF_ACCEL_SERV_NA = 0x0, ADF_ACCEL_SERV_ASYM, ADF_ACCEL_SERV_SYM, ADF_ACCEL_SERV_RND, ADF_ACCEL_SERV_DC }; struct adf_cfg_ring { u8 mode : 1; enum adf_accel_serv_type serv_type; u8 number : 4; }; struct adf_cfg_bundle { /* Section(s) name this bundle is shared by */ char **sections; int max_section; int section_index; int number; enum adf_cfg_bundle_type type; cpuset_t affinity_mask; int polling_mode; int instance_num; int num_of_rings; /* contains all the info about rings */ struct adf_cfg_ring **rings; u16 in_use; u16 max_cfg_svc_num; }; struct adf_cfg_instance { enum adf_cfg_service_type stype; char name[ADF_CFG_MAX_STR_LEN]; int polling_mode; cpuset_t affinity_mask; /* rings within an instance for services */ int asym_tx; int asym_rx; int sym_tx; int sym_rx; int dc_tx; int dc_rx; int bundle; }; #define ADF_CFG_MAX_CORE_NUM 256 #define ADF_CFG_MAX_TOKENS_IN_CONFIG 8 #define ADF_CFG_MAX_TOKEN_LEN 10 #define ADF_CFG_MAX_TOKENS ADF_CFG_MAX_CORE_NUM #define ADF_CFG_ACCEL_DEF_COALES 1 #define ADF_CFG_ACCEL_DEF_COALES_TIMER 10000 #define ADF_CFG_ACCEL_DEF_COALES_NUM_MSG 0 #define ADF_CFG_RESP_EPOLL 2 #define ADF_CFG_SERV_RING_PAIR_1_SHIFT 3 #define ADF_CFG_SERV_RING_PAIR_2_SHIFT 6 #define ADF_CFG_SERV_RING_PAIR_3_SHIFT 9 #define ADF_CFG_RESP_POLL 1 #define ADF_CFG_ASYM_SRV_MASK 1 #define ADF_CFG_SYM_SRV_MASK 2 #define ADF_CFG_DC_SRV_MASK 8 #define ADF_CFG_UNKNOWN_SRV_MASK 0 #define ADF_CFG_DEF_ASYM_MASK 0x03 #define ADF_CFG_MAX_SERVICES 4 #define ADF_CTL_IOC_MAGIC 'a' #define IOCTL_STATUS_ACCEL_DEV \ _IOWR(ADF_CTL_IOC_MAGIC, 3, struct adf_dev_status_info) #define IOCTL_RESERVE_RING \ _IOWR(ADF_CTL_IOC_MAGIC, 10, struct adf_user_reserve_ring) #define IOCTL_RELEASE_RING \ _IOWR(ADF_CTL_IOC_MAGIC, 11, struct adf_user_reserve_ring) #define IOCTL_ENABLE_RING \ _IOWR(ADF_CTL_IOC_MAGIC, 12, struct adf_user_reserve_ring) #define IOCTL_DISABLE_RING \ _IOWR(ADF_CTL_IOC_MAGIC, 13, struct adf_user_reserve_ring) #define IOCTL_GET_NUM_DEVICES _IOR(ADF_CTL_IOC_MAGIC, 4, int32_t) #define ADF_CFG_HB_DEFAULT_VALUE 500 #define ADF_CFG_HB_COUNT_THRESHOLD 3 #define ADF_MIN_HB_TIMER_MS 100 #define IOCTL_GET_CFG_VAL \ _IOW(ADF_CTL_IOC_MAGIC, 5, struct adf_user_cfg_ctl_data) enum adf_device_heartbeat_status { DEV_HB_UNRESPONSIVE = 0, DEV_HB_ALIVE, DEV_HB_UNSUPPORTED }; struct adf_dev_heartbeat_status_ctl { uint16_t device_id; enum adf_device_heartbeat_status status; }; #define IOCTL_HEARTBEAT_ACCEL_DEV \ _IOWR(ADF_CTL_IOC_MAGIC, 9, struct adf_dev_heartbeat_status_ctl) #endif diff --git a/sys/dev/qat/include/common/adf_common_drv.h b/sys/dev/qat/include/common/adf_common_drv.h index b6bc2511bfba..f9f4463f69c3 100644 --- a/sys/dev/qat/include/common/adf_common_drv.h +++ b/sys/dev/qat/include/common/adf_common_drv.h @@ -1,334 +1,335 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #ifndef ADF_DRV_H #define ADF_DRV_H #include #include "adf_accel_devices.h" #include "icp_qat_fw_loader_handle.h" #include "icp_qat_hal.h" #include "adf_cfg_user.h" #include "adf_uio.h" #include "adf_uio_control.h" #define QAT_UIO_IOC_MAGIC 'b' #define ADF_MAJOR_VERSION 0 #define ADF_MINOR_VERSION 6 #define ADF_BUILD_VERSION 0 #define ADF_DRV_VERSION \ __stringify(ADF_MAJOR_VERSION) "." __stringify( \ ADF_MINOR_VERSION) "." __stringify(ADF_BUILD_VERSION) #define IOCTL_GET_BUNDLE_SIZE _IOR(QAT_UIO_IOC_MAGIC, 0, int32_t) #define IOCTL_ALLOC_BUNDLE _IOW(QAT_UIO_IOC_MAGIC, 1, int) #define IOCTL_GET_ACCEL_TYPE _IOR(QAT_UIO_IOC_MAGIC, 2, uint32_t) #define IOCTL_ADD_MEM_FD _IOW(QAT_UIO_IOC_MAGIC, 3, int) #define ADF_STATUS_RESTARTING 0 #define ADF_STATUS_STARTING 1 #define ADF_STATUS_CONFIGURED 2 #define ADF_STATUS_STARTED 3 #define ADF_STATUS_AE_INITIALISED 4 #define ADF_STATUS_AE_UCODE_LOADED 5 #define ADF_STATUS_AE_STARTED 6 #define ADF_STATUS_PF_RUNNING 7 #define ADF_STATUS_IRQ_ALLOCATED 8 #define ADF_PCIE_FLR_ATTEMPT 10 #define ADF_STATUS_SYSCTL_CTX_INITIALISED 9 #define PCI_EXP_AERUCS 0x104 /* PMISC BAR upper and lower offsets in PCIe config space */ #define ADF_PMISC_L_OFFSET 0x18 #define ADF_PMISC_U_OFFSET 0x1c enum adf_dev_reset_mode { ADF_DEV_RESET_ASYNC = 0, ADF_DEV_RESET_SYNC }; enum adf_event { ADF_EVENT_INIT = 0, ADF_EVENT_START, ADF_EVENT_STOP, ADF_EVENT_SHUTDOWN, ADF_EVENT_RESTARTING, ADF_EVENT_RESTARTED, ADF_EVENT_ERROR, }; struct adf_state { enum adf_event dev_state; int dev_id; }; struct service_hndl { int (*event_hld)(struct adf_accel_dev *accel_dev, enum adf_event event); unsigned long init_status[ADF_DEVS_ARRAY_SIZE]; unsigned long start_status[ADF_DEVS_ARRAY_SIZE]; char *name; struct list_head list; }; static inline int get_current_node(void) { return PCPU_GET(domain); } int adf_service_register(struct service_hndl *service); int adf_service_unregister(struct service_hndl *service); int adf_dev_init(struct adf_accel_dev *accel_dev); int adf_dev_start(struct adf_accel_dev *accel_dev); int adf_dev_stop(struct adf_accel_dev *accel_dev); void adf_dev_shutdown(struct adf_accel_dev *accel_dev); int adf_dev_autoreset(struct adf_accel_dev *accel_dev); int adf_dev_reset(struct adf_accel_dev *accel_dev, enum adf_dev_reset_mode mode); int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, enum adf_dev_reset_mode mode); void adf_error_notifier(uintptr_t arg); int adf_init_fatal_error_wq(void); void adf_exit_fatal_error_wq(void); int adf_notify_fatal_error(struct adf_accel_dev *accel_dev); void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data); void adf_clean_vf_map(bool); int adf_sysctl_add_fw_versions(struct adf_accel_dev *accel_dev); int adf_sysctl_remove_fw_versions(struct adf_accel_dev *accel_dev); int adf_ctl_dev_register(void); void adf_ctl_dev_unregister(void); int adf_register_ctl_device_driver(void); void adf_unregister_ctl_device_driver(void); int adf_processes_dev_register(void); void adf_processes_dev_unregister(void); void adf_state_init(void); void adf_state_destroy(void); int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf); void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf); struct list_head *adf_devmgr_get_head(void); struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id); struct adf_accel_dev *adf_devmgr_get_first(void); struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(device_t pci_dev); int adf_devmgr_verify_id(uint32_t *id); void adf_devmgr_get_num_dev(uint32_t *num); int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev); int adf_dev_started(struct adf_accel_dev *accel_dev); int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev); int adf_dev_restarting_notify_sync(struct adf_accel_dev *accel_dev); int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev); int adf_dev_stop_notify_sync(struct adf_accel_dev *accel_dev); int adf_ae_init(struct adf_accel_dev *accel_dev); int adf_ae_shutdown(struct adf_accel_dev *accel_dev); int adf_ae_fw_load(struct adf_accel_dev *accel_dev); void adf_ae_fw_release(struct adf_accel_dev *accel_dev); int adf_ae_start(struct adf_accel_dev *accel_dev); int adf_ae_stop(struct adf_accel_dev *accel_dev); int adf_aer_store_ppaerucm_reg(device_t pdev, struct adf_hw_device_data *hw_data); int adf_enable_aer(struct adf_accel_dev *accel_dev, device_t *adf); void adf_disable_aer(struct adf_accel_dev *accel_dev); void adf_reset_sbr(struct adf_accel_dev *accel_dev); void adf_reset_flr(struct adf_accel_dev *accel_dev); void adf_dev_pre_reset(struct adf_accel_dev *accel_dev); void adf_dev_post_reset(struct adf_accel_dev *accel_dev); void adf_dev_restore(struct adf_accel_dev *accel_dev); int adf_init_aer(void); void adf_exit_aer(void); int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae, void *in, void *out); struct icp_qat_fw_init_admin_req; struct icp_qat_fw_init_admin_resp; int adf_send_admin(struct adf_accel_dev *accel_dev, struct icp_qat_fw_init_admin_req *req, struct icp_qat_fw_init_admin_resp *resp, u32 ae_mask); int adf_config_device(struct adf_accel_dev *accel_dev); int adf_init_admin_comms(struct adf_accel_dev *accel_dev); void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); int adf_send_admin_init(struct adf_accel_dev *accel_dev); int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); int adf_get_fw_pke_stats(struct adf_accel_dev *accel_dev, u64 *suc_count, u64 *unsuc_count); int adf_dev_measure_clock(struct adf_accel_dev *accel_dev, u32 *frequency, u32 min, u32 max); int adf_clock_debugfs_add(struct adf_accel_dev *accel_dev); u64 adf_clock_get_current_time(void); int adf_init_arb(struct adf_accel_dev *accel_dev); int adf_init_gen2_arb(struct adf_accel_dev *accel_dev); void adf_exit_arb(struct adf_accel_dev *accel_dev); void adf_disable_arb(struct adf_accel_dev *accel_dev); void adf_update_ring_arb(struct adf_etr_ring_data *ring); void adf_enable_ring_arb(struct adf_accel_dev *accel_dev, void *csr_addr, unsigned int bank_nr, unsigned int mask); void adf_disable_ring_arb(struct adf_accel_dev *accel_dev, void *csr_addr, unsigned int bank_nr, unsigned int mask); int adf_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); void adf_update_uio_ring_arb(struct adf_uio_control_bundle *bundle); struct adf_accel_dev *adf_devmgr_get_dev_by_bdf(struct adf_pci_address *addr); struct adf_accel_dev *adf_devmgr_get_dev_by_pci_bus(u8 bus); int adf_get_vf_nr(struct adf_pci_address *vf_pci_addr, int *vf_nr); u32 adf_get_slices_for_svc(struct adf_accel_dev *accel_dev, enum adf_svc_type svc); bool adf_is_bdf_equal(struct adf_pci_address *bdf1, struct adf_pci_address *bdf2); int adf_is_vf_nr_valid(struct adf_accel_dev *accel_dev, int vf_nr); void adf_dev_get(struct adf_accel_dev *accel_dev); void adf_dev_put(struct adf_accel_dev *accel_dev); int adf_dev_in_use(struct adf_accel_dev *accel_dev); int adf_init_etr_data(struct adf_accel_dev *accel_dev); void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev); struct qat_crypto_instance *qat_crypto_get_instance_node(int node); void qat_crypto_put_instance(struct qat_crypto_instance *inst); void qat_alg_callback(void *resp); void qat_alg_asym_callback(void *resp); int qat_algs_register(void); void qat_algs_unregister(void); int qat_asym_algs_register(void); void qat_asym_algs_unregister(void); int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev); void adf_isr_resource_free(struct adf_accel_dev *accel_dev); int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev); void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev); int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev); int qat_hal_init(struct adf_accel_dev *accel_dev); void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle); int qat_hal_start(struct icp_qat_fw_loader_handle *handle); void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask); void qat_hal_reset(struct icp_qat_fw_loader_handle *handle); int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle); void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask); int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle, unsigned int ae); int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, enum icp_qat_uof_regtype lm_type, unsigned char mode); void qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char mode); void qat_hal_set_ae_scs_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char mode); int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char mode); int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char mode); void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask, unsigned int upc); void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, unsigned int words_num, const uint64_t *uword); void qat_hal_wr_coalesce_uwords(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, unsigned int words_num, uint64_t *uword); void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uword_addr, unsigned int words_num, unsigned int *data); int qat_hal_get_ins_num(void); int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle, unsigned char ae, struct icp_qat_uof_batch_init *lm_init_header); int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned long ctx_mask, enum icp_qat_uof_regtype reg_type, unsigned short reg_num, unsigned int regdata); int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned long ctx_mask, enum icp_qat_uof_regtype reg_type, unsigned short reg_num, unsigned int regdata); int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned long ctx_mask, enum icp_qat_uof_regtype reg_type, unsigned short reg_num, unsigned int regdata); int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned long ctx_mask, unsigned short reg_num, unsigned int regdata); int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned short lm_addr, unsigned int value); int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle); void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle); void qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle); int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, const void *addr_ptr, int mem_size); int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, const void *addr_ptr, u32 mem_size, const char *obj_name); void qat_hal_get_scs_neigh_ae(unsigned char ae, unsigned char *ae_neigh); int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle, unsigned int cfg_ae_mask); int adf_init_vf_wq(void); void adf_exit_vf_wq(void); void adf_flush_vf_wq(struct adf_accel_dev *accel_dev); int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev); int adf_pf2vf_handle_pf_rp_reset(struct adf_accel_dev *accel_dev, struct pfvf_message msg); +int adf_pf2vf_handle_pf_error(struct adf_accel_dev *accel_dev); bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev); static inline int adf_sriov_configure(device_t *pdev, int numvfs) { return 0; } static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev) { } static inline void adf_vf2pf_handler(struct adf_accel_vf_info *vf_info) { } static inline int adf_init_pf_wq(void) { return 0; } static inline void adf_exit_pf_wq(void) { } #endif diff --git a/sys/dev/qat/include/common/adf_gen4_hw_data.h b/sys/dev/qat/include/common/adf_gen4_hw_data.h index d0423eaa17cf..cde5ae1f4e10 100644 --- a/sys/dev/qat/include/common/adf_gen4_hw_data.h +++ b/sys/dev/qat/include/common/adf_gen4_hw_data.h @@ -1,172 +1,172 @@ -/* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2021 Intel Corporation */ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright(c) 2007-2025 Intel Corporation */ #ifndef ADF_GEN4_HW_CSR_DATA_H_ #define ADF_GEN4_HW_CSR_DATA_H_ #include "adf_accel_devices.h" /* Transport access */ #define ADF_BANK_INT_SRC_SEL_MASK 0x44UL #define ADF_RING_CSR_RING_CONFIG 0x1000 #define ADF_RING_CSR_RING_LBASE 0x1040 #define ADF_RING_CSR_RING_UBASE 0x1080 #define ADF_RING_CSR_RING_HEAD 0x0C0 #define ADF_RING_CSR_RING_TAIL 0x100 #define ADF_RING_CSR_E_STAT 0x14C #define ADF_RING_CSR_INT_FLAG 0x170 #define ADF_RING_CSR_INT_SRCSEL 0x174 #define ADF_RING_CSR_INT_COL_CTL 0x180 #define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 #define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 #define ADF_RING_CSR_INT_COL_EN 0x17C #define ADF_RING_CSR_ADDR_OFFSET 0x100000 #define ADF_RING_BUNDLE_SIZE 0x2000 /* Ring reset */ #define ADF_RPRESET_POLL_TIMEOUT_US (5 * USEC_PER_SEC) #define ADF_RPRESET_POLL_DELAY_US 20 #define ADF_WQM_CSR_RPRESETCTL_RESET BIT(0) #define ADF_WQM_CSR_RPRESETCTL(bank) (0x6000 + ((bank) << 3)) #define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0) #define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4) #define ADF_WQM_CSR_RPRESETCTL_SHIFT 0 #define ADF_WQM_CSR_RPRESETCTL_DRAIN_SHIFT 2 #define ADF_WQM_CSR_RPRESETCTL_MASK (BIT(3) - 1) #define ADF_WQM_CSR_RPRESETCTL(bank) (0x6000 + ((bank) << 3)) #define ADF_WQM_CSR_RPRESETSTS_SHIFT 0 #define ADF_WQM_CSR_RPRESETSTS_MASK (BIT(0)) #define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4) #define BUILD_RING_BASE_ADDR(addr, size) \ ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6) #define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ ADF_CSR_RD((csr_base_addr), \ ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_RING_HEAD + ((ring) << 2)) #define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ ADF_CSR_RD((csr_base_addr), \ ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_RING_TAIL + ((ring) << 2)) #define READ_CSR_E_STAT(csr_base_addr, bank) \ ADF_CSR_RD((csr_base_addr), \ ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_E_STAT) #define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ ADF_CSR_WR((csr_base_addr), \ ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_RING_CONFIG + ((ring) << 2), \ value) #define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ do { \ struct resource *_csr_base_addr = csr_base_addr; \ u32 _bank = bank; \ u32 _ring = ring; \ - dma_addr_t _value = value; \ + bus_addr_t _value = value; \ u32 l_base = 0, u_base = 0; \ l_base = lower_32_bits(_value); \ u_base = upper_32_bits(_value); \ ADF_CSR_WR((_csr_base_addr), \ ADF_RING_CSR_ADDR_OFFSET + \ ADF_RING_BUNDLE_SIZE * (_bank) + \ ADF_RING_CSR_RING_LBASE + ((_ring) << 2), \ l_base); \ ADF_CSR_WR((_csr_base_addr), \ ADF_RING_CSR_ADDR_OFFSET + \ ADF_RING_BUNDLE_SIZE * (_bank) + \ ADF_RING_CSR_RING_UBASE + ((_ring) << 2), \ u_base); \ } while (0) static inline u64 read_base_gen4(struct resource *csr_base_addr, u32 bank, u32 ring) { u32 l_base, u_base; u64 addr; l_base = ADF_CSR_RD(csr_base_addr, ADF_RING_CSR_ADDR_OFFSET + (ADF_RING_BUNDLE_SIZE * bank) + ADF_RING_CSR_RING_LBASE + (ring << 2)); u_base = ADF_CSR_RD(csr_base_addr, ADF_RING_CSR_ADDR_OFFSET + (ADF_RING_BUNDLE_SIZE * bank) + ADF_RING_CSR_RING_UBASE + (ring << 2)); addr = (u64)l_base & 0x00000000FFFFFFFFULL; addr |= (u64)u_base << 32 & 0xFFFFFFFF00000000ULL; return addr; } #define READ_CSR_RING_BASE(csr_base_addr, bank, ring) \ read_base_gen4((csr_base_addr), (bank), (ring)) #define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ ADF_CSR_WR((csr_base_addr), \ ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_RING_HEAD + ((ring) << 2), \ value) #define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ ADF_CSR_WR((csr_base_addr), \ ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_RING_TAIL + ((ring) << 2), \ value) #define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ ADF_CSR_WR((csr_base_addr), \ ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_INT_FLAG, \ (value)) #define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ ADF_CSR_WR((csr_base_addr), \ ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_INT_SRCSEL, \ ADF_BANK_INT_SRC_SEL_MASK) #define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ ADF_CSR_WR((csr_base_addr), \ ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_INT_COL_EN, \ (value)) #define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ ADF_CSR_WR((csr_base_addr), \ ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_INT_COL_CTL, \ ADF_RING_CSR_INT_COL_CTL_ENABLE | (value)) #define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ ADF_CSR_WR((csr_base_addr), \ ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_INT_FLAG_AND_COL, \ (value)) /* Arbiter configuration */ #define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C #define READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank) \ ADF_CSR_RD((csr_base_addr), \ ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_RING_SRV_ARB_EN) #define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \ ADF_CSR_WR((csr_base_addr), \ ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_RING_SRV_ARB_EN, \ (value)) /* WDT timers * * Timeout is in cycles. Clock speed may vary across products but this * value should be a few milli-seconds. */ #define ADF_SSM_WDT_DEFAULT_VALUE 0x7000000ULL #define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000 #define ADF_SSMWDTL_OFFSET 0x54 #define ADF_SSMWDTH_OFFSET 0x5C #define ADF_SSMWDTPKEL_OFFSET 0x58 #define ADF_SSMWDTPKEH_OFFSET 0x60 #define ADF_NUM_HB_CNT_PER_AE (ADF_NUM_THREADS_PER_AE) int adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); void adf_gen4_init_hw_csr_info(struct adf_hw_csr_info *csr_info); int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number); #endif diff --git a/sys/dev/qat/include/common/adf_pfvf_msg.h b/sys/dev/qat/include/common/adf_pfvf_msg.h index 349db9a13b22..abd9cd46014f 100644 --- a/sys/dev/qat/include/common/adf_pfvf_msg.h +++ b/sys/dev/qat/include/common/adf_pfvf_msg.h @@ -1,259 +1,263 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #ifndef ADF_PFVF_MSG_H #define ADF_PFVF_MSG_H /* * PF<->VF Gen2 Messaging format * * The PF has an array of 32-bit PF2VF registers, one for each VF. The * PF can access all these registers while each VF can access only the one * register associated with that particular VF. * * The register functionally is split into two parts: * The bottom half is for PF->VF messages. In particular when the first * bit of this register (bit 0) gets set an interrupt will be triggered * in the respective VF. * The top half is for VF->PF messages. In particular when the first bit * of this half of register (bit 16) gets set an interrupt will be triggered * in the PF. * * The remaining bits within this register are available to encode messages. * and implement a collision control mechanism to prevent concurrent use of * the PF2VF register by both the PF and VF. * * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 * _______________________________________________ * | | | | | | | | | | | | | | | | | * +-----------------------------------------------+ * \___________________________/ \_________/ ^ ^ * ^ ^ | | * | | | VF2PF Int * | | Message Origin * | Message Type * Message-specific Data/Reserved * * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 * _______________________________________________ * | | | | | | | | | | | | | | | | | * +-----------------------------------------------+ * \___________________________/ \_________/ ^ ^ * ^ ^ | | * | | | PF2VF Int * | | Message Origin * | Message Type * Message-specific Data/Reserved * * Message Origin (Should always be 1) * A legacy out-of-tree QAT driver allowed for a set of messages not supported * by this driver; these had a Msg Origin of 0 and are ignored by this driver. * * When a PF or VF attempts to send a message in the lower or upper 16 bits, * respectively, the other 16 bits are written to first with a defined * IN_USE_BY pattern as part of a collision control scheme (see function * adf_gen2_pfvf_send() in adf_pf2vf_msg.c). * * * PF<->VF Gen4 Messaging format * * Similarly to the gen2 messaging format, 32-bit long registers are used for * communication between PF and VFs. However, each VF and PF share a pair of * 32-bits register to avoid collisions: one for PV to VF messages and one * for VF to PF messages. * * Both the Interrupt bit and the Message Origin bit retain the same position * and meaning, although non-system messages are now deprecated and not * expected. * * 31 30 9 8 7 6 5 4 3 2 1 0 * _______________________________________________ * | | | . . . | | | | | | | | | | | * +-----------------------------------------------+ * \_____________________/ \_______________/ ^ ^ * ^ ^ | | * | | | PF/VF Int * | | Message Origin * | Message Type * Message-specific Data/Reserved * * For both formats, the message reception is acknowledged by lowering the * interrupt bit on the register where the message was sent. */ /* PFVF message common bits */ #define ADF_PFVF_INT BIT(0) #define ADF_PFVF_MSGORIGIN_SYSTEM BIT(1) /* Different generations have different CSR layouts, use this struct * to abstract these differences away */ struct pfvf_message { u8 type; u32 data; }; /* PF->VF messages */ enum pf2vf_msgtype { ADF_PF2VF_MSGTYPE_RESTARTING = 0x01, ADF_PF2VF_MSGTYPE_VERSION_RESP = 0x02, ADF_PF2VF_MSGTYPE_BLKMSG_RESP = 0x03, + ADF_PF2VF_MSGTYPE_FATAL_ERROR = 0x04, /* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */ ADF_PF2VF_MSGTYPE_RP_RESET_RESP = 0x10, }; /* VF->PF messages */ enum vf2pf_msgtype { ADF_VF2PF_MSGTYPE_INIT = 0x03, ADF_VF2PF_MSGTYPE_SHUTDOWN = 0x04, ADF_VF2PF_MSGTYPE_VERSION_REQ = 0x05, ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ = 0x06, ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ = 0x07, ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ = 0x08, ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ = 0x09, + ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE = 0x0a, /* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */ ADF_VF2PF_MSGTYPE_RP_RESET = 0x10, }; /* VF/PF compatibility version. */ enum pfvf_compatibility_version { /* Support for extended capabilities */ ADF_PFVF_COMPAT_CAPABILITIES = 0x02, /* In-use pattern cleared by receiver */ ADF_PFVF_COMPAT_FAST_ACK = 0x03, /* Ring to service mapping support for non-standard mappings */ ADF_PFVF_COMPAT_RING_TO_SVC_MAP = 0x04, + /* Fallback compat */ + ADF_PFVF_COMPAT_FALLBACK = 0x05, /* Reference to the latest version */ - ADF_PFVF_COMPAT_THIS_VERSION = 0x04, + ADF_PFVF_COMPAT_THIS_VERSION = 0x05, }; /* PF->VF Version Response */ #define ADF_PF2VF_VERSION_RESP_VERS_MASK GENMASK(7, 0) #define ADF_PF2VF_VERSION_RESP_RESULT_MASK GENMASK(9, 8) enum pf2vf_compat_response { ADF_PF2VF_VF_COMPATIBLE = 0x01, ADF_PF2VF_VF_INCOMPATIBLE = 0x02, ADF_PF2VF_VF_COMPAT_UNKNOWN = 0x03, }; enum ring_reset_result { RPRESET_SUCCESS = 0x00, RPRESET_NOT_SUPPORTED = 0x01, RPRESET_INVAL_BANK = 0x02, RPRESET_TIMEOUT = 0x03, }; #define ADF_VF2PF_RNG_RESET_RP_MASK GENMASK(1, 0) #define ADF_VF2PF_RNG_RESET_RSVD_MASK GENMASK(25, 2) /* PF->VF Block Responses */ #define ADF_PF2VF_BLKMSG_RESP_TYPE_MASK GENMASK(1, 0) #define ADF_PF2VF_BLKMSG_RESP_DATA_MASK GENMASK(9, 2) enum pf2vf_blkmsg_resp_type { ADF_PF2VF_BLKMSG_RESP_TYPE_DATA = 0x00, ADF_PF2VF_BLKMSG_RESP_TYPE_CRC = 0x01, ADF_PF2VF_BLKMSG_RESP_TYPE_ERROR = 0x02, }; /* PF->VF Block Error Code */ enum pf2vf_blkmsg_error { ADF_PF2VF_INVALID_BLOCK_TYPE = 0x00, ADF_PF2VF_INVALID_BYTE_NUM_REQ = 0x01, ADF_PF2VF_PAYLOAD_TRUNCATED = 0x02, ADF_PF2VF_UNSPECIFIED_ERROR = 0x03, }; /* VF->PF Block Requests */ #define ADF_VF2PF_LARGE_BLOCK_TYPE_MASK GENMASK(1, 0) #define ADF_VF2PF_LARGE_BLOCK_BYTE_MASK GENMASK(8, 2) #define ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK GENMASK(2, 0) #define ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK GENMASK(8, 3) #define ADF_VF2PF_SMALL_BLOCK_TYPE_MASK GENMASK(3, 0) #define ADF_VF2PF_SMALL_BLOCK_BYTE_MASK GENMASK(8, 4) #define ADF_VF2PF_BLOCK_CRC_REQ_MASK BIT(9) /* PF->VF Block Request Types * 0..15 - 32 byte message * 16..23 - 64 byte message * 24..27 - 128 byte message */ enum vf2pf_blkmsg_req_type { ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY = 0x02, ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP = 0x03, }; #define ADF_VF2PF_SMALL_BLOCK_TYPE_MAX \ (FIELD_MAX(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK)) #define ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX \ (FIELD_MAX(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK) + \ ADF_VF2PF_SMALL_BLOCK_TYPE_MAX + 1) #define ADF_VF2PF_LARGE_BLOCK_TYPE_MAX \ (FIELD_MAX(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK) + \ ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX) #define ADF_VF2PF_SMALL_BLOCK_BYTE_MAX \ FIELD_MAX(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK) #define ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX \ FIELD_MAX(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK) #define ADF_VF2PF_LARGE_BLOCK_BYTE_MAX \ FIELD_MAX(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK) struct pfvf_blkmsg_header { u8 version; u8 payload_size; } __packed; #define ADF_PFVF_BLKMSG_HEADER_SIZE (sizeof(struct pfvf_blkmsg_header)) #define ADF_PFVF_BLKMSG_PAYLOAD_SIZE(blkmsg) \ (sizeof(blkmsg) - ADF_PFVF_BLKMSG_HEADER_SIZE) #define ADF_PFVF_BLKMSG_MSG_SIZE(blkmsg) \ (ADF_PFVF_BLKMSG_HEADER_SIZE + (blkmsg)->hdr.payload_size) #define ADF_PFVF_BLKMSG_MSG_MAX_SIZE 128 /* PF->VF Block message header bytes */ #define ADF_PFVF_BLKMSG_VER_BYTE 0 #define ADF_PFVF_BLKMSG_LEN_BYTE 1 /* PF/VF Capabilities message values */ enum blkmsg_capabilities_versions { ADF_PFVF_CAPABILITIES_V1_VERSION = 0x01, ADF_PFVF_CAPABILITIES_V2_VERSION = 0x02, ADF_PFVF_CAPABILITIES_V3_VERSION = 0x03, }; struct capabilities_v1 { struct pfvf_blkmsg_header hdr; u32 ext_dc_caps; } __packed; struct capabilities_v2 { struct pfvf_blkmsg_header hdr; u32 ext_dc_caps; u32 capabilities; } __packed; struct capabilities_v3 { struct pfvf_blkmsg_header hdr; u32 ext_dc_caps; u32 capabilities; u32 frequency; } __packed; /* PF/VF Ring to service mapping values */ enum blkmsg_ring_to_svc_versions { ADF_PFVF_RING_TO_SVC_VERSION = 0x01, }; struct ring_to_svc_map_v1 { struct pfvf_blkmsg_header hdr; u16 map; } __packed; #endif /* ADF_PFVF_MSG_H */ diff --git a/sys/dev/qat/include/icp_qat_fw_init_admin.h b/sys/dev/qat/include/icp_qat_fw_init_admin.h index cd4edbdbc93b..f3e0ff9d0064 100644 --- a/sys/dev/qat/include/icp_qat_fw_init_admin.h +++ b/sys/dev/qat/include/icp_qat_fw_init_admin.h @@ -1,220 +1,220 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2025 Intel Corporation */ #ifndef _ICP_QAT_FW_INIT_ADMIN_H_ #define _ICP_QAT_FW_INIT_ADMIN_H_ #include "icp_qat_fw.h" enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_INIT_ME = 0, ICP_QAT_FW_TRNG_ENABLE = 1, ICP_QAT_FW_TRNG_DISABLE = 2, ICP_QAT_FW_CONSTANTS_CFG = 3, ICP_QAT_FW_STATUS_GET = 4, ICP_QAT_FW_COUNTERS_GET = 5, ICP_QAT_FW_LOOPBACK = 6, ICP_QAT_FW_HEARTBEAT_SYNC = 7, ICP_QAT_FW_HEARTBEAT_GET = 8, ICP_QAT_FW_COMP_CAPABILITY_GET = 9, ICP_QAT_FW_CRYPTO_CAPABILITY_GET = 10, ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13, ICP_QAT_FW_RL_SLA_CONFIG = 14, ICP_QAT_FW_RL_INIT = 15, ICP_QAT_FW_RL_DU_START = 16, ICP_QAT_FW_RL_DU_STOP = 17, ICP_QAT_FW_TIMER_GET = 19, ICP_QAT_FW_CNV_STATS_GET = 20, ICP_QAT_FW_PKE_REPLAY_STATS_GET = 21 }; enum icp_qat_fw_init_admin_resp_status { ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0, ICP_QAT_FW_INIT_RESP_STATUS_FAIL = 1, ICP_QAT_FW_INIT_RESP_STATUS_UNSUPPORTED = 4 }; enum icp_qat_fw_cnv_error_type { CNV_ERR_TYPE_NO_ERROR = 0, CNV_ERR_TYPE_CHECKSUM_ERROR, CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH_ERROR, CNV_ERR_TYPE_DECOMPRESSION_ERROR, CNV_ERR_TYPE_TRANSLATION_ERROR, CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH_ERROR, CNV_ERR_TYPE_UNKNOWN_ERROR }; #define ICP_QAT_FW_INIT_DISABLE_SAFE_DC_MODE_FLAG 0x02 #define CNV_ERROR_TYPE_GET(latest_error) \ ({ \ __typeof__(latest_error) _lerror = latest_error; \ (_lerror >> 12) > CNV_ERR_TYPE_UNKNOWN_ERROR ? \ CNV_ERR_TYPE_UNKNOWN_ERROR : \ (enum icp_qat_fw_cnv_error_type)(_lerror >> 12); \ }) #define CNV_ERROR_LENGTH_DELTA_GET(latest_error) \ ({ \ __typeof__(latest_error) _lerror = latest_error; \ ((s16)((_lerror & 0x0FFF) | (_lerror & 0x0800 ? 0xF000 : 0))); \ }) #define CNV_ERROR_DECOMP_STATUS_GET(latest_error) ((s8)(latest_error & 0xFF)) struct icp_qat_fw_init_admin_req { u16 init_cfg_sz; u8 resrvd1; u8 cmd_id; u32 max_req_duration; u64 opaque_data; union { /* ICP_QAT_FW_INIT_ME */ struct { u64 resrvd2; u16 ibuf_size_in_kb; u8 fw_flags; u8 resrvd3; u32 resrvd4; }; /* ICP_QAT_FW_CONSTANTS_CFG */ struct { u64 init_cfg_ptr; u64 resrvd5; }; /* ICP_QAT_FW_HEARTBEAT_TIMER_SET */ struct { u64 hb_cfg_ptr; u32 heartbeat_ticks; u32 resrvd6; }; /* ICP_QAT_FW_RL_SLA_CONFIG */ struct { u32 credit_per_sla; u8 service_id; u8 vf_id; u8 resrvd7; u8 resrvd8; u32 resrvd9; u32 resrvd10; }; /* ICP_QAT_FW_RL_INIT */ struct { u32 rl_period; u8 config; u8 resrvd11; u8 num_me; u8 resrvd12; u8 pke_svc_arb_map; u8 bulk_crypto_svc_arb_map; u8 compression_svc_arb_map; u8 resrvd13; u32 resrvd14; }; /* ICP_QAT_FW_RL_DU_STOP */ struct { u64 cfg_ptr; u32 resrvd15; u32 resrvd16; }; }; } __packed; struct icp_qat_fw_init_admin_resp { u8 flags; u8 resrvd1; u8 status; u8 cmd_id; union { u32 resrvd2; u32 ras_event_count; /* ICP_QAT_FW_STATUS_GET */ struct { u16 version_minor_num; u16 version_major_num; }; /* ICP_QAT_FW_COMP_CAPABILITY_GET */ u32 extended_features; /* ICP_QAT_FW_CNV_STATS_GET */ struct { u16 error_count; u16 latest_error; }; }; u64 opaque_data; union { u32 resrvd3[4]; /* ICP_QAT_FW_STATUS_GET */ struct { u32 version_patch_num; u8 context_id; u8 ae_id; u16 resrvd4; u64 resrvd5; }; /* ICP_QAT_FW_COMP_CAPABILITY_GET */ struct { u16 compression_algos; u16 checksum_algos; u32 deflate_capabilities; u32 resrvd6; u32 deprecated; }; /* ICP_QAT_FW_CRYPTO_CAPABILITY_GET */ struct { u32 cipher_algos; u32 hash_algos; u16 keygen_algos; u16 other; u16 public_key_algos; u16 prime_algos; }; /* ICP_QAT_FW_RL_DU_STOP */ struct { u32 resrvd7; u8 granularity; u8 resrvd8; u16 resrvd9; u32 total_du_time; u32 resrvd10; }; /* ICP_QAT_FW_TIMER_GET */ struct { u64 timestamp; u64 resrvd11; }; /* ICP_QAT_FW_COUNTERS_GET */ struct { u64 req_rec_count; u64 resp_sent_count; }; /* ICP_QAT_FW_PKE_REPLAY_STATS_GET */ struct { u32 successful_count; u32 unsuccessful_count; u64 resrvd12; }; }; } __packed; enum icp_qat_fw_init_admin_init_flag { ICP_QAT_FW_INIT_FLAG_PKE_DISABLED = 0 }; struct icp_qat_fw_init_admin_hb_cnt { - u16 resp_heartbeat_cnt; u16 req_heartbeat_cnt; + u16 resp_heartbeat_cnt; }; #define ICP_QAT_FW_COMN_HEARTBEAT_OK 0 #define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1 #define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0 #define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1 #define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE #define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \ ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags) #define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \ ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val) #define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \ ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK) #endif diff --git a/sys/dev/qat/qat_api/common/compression/dc_datapath.c b/sys/dev/qat/qat_api/common/compression/dc_datapath.c index f5a8600eadfa..312b2d6749cc 100644 --- a/sys/dev/qat/qat_api/common/compression/dc_datapath.c +++ b/sys/dev/qat/qat_api/common/compression/dc_datapath.c @@ -1,1977 +1,1982 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2025 Intel Corporation */ /** ***************************************************************************** * @file dc_datapath.c * * @defgroup Dc_DataCompression DC Data Compression * * @ingroup Dc_DataCompression * * @description * Implementation of the Data Compression datapath operations. * *****************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" #include "cpa_dc.h" #include "cpa_dc_dp.h" /* ******************************************************************************* * Include private header files ******************************************************************************* */ #include "dc_session.h" #include "dc_datapath.h" #include "sal_statistics.h" #include "lac_common.h" #include "lac_mem.h" #include "lac_mem_pools.h" #include "sal_types_compression.h" #include "dc_stats.h" #include "lac_buffer_desc.h" #include "lac_sal.h" #include "lac_log.h" #include "lac_sync.h" #include "sal_service_state.h" #include "sal_qat_cmn_msg.h" #include "sal_hw_gen.h" #include "dc_error_counter.h" #define DC_COMP_MAX_BUFF_SIZE (1024 * 64) static QatUtilsAtomic dcErrorCount[MAX_DC_ERROR_TYPE]; void dcErrorLog(CpaDcReqStatus dcError) { Cpa32U absError = 0; absError = abs(dcError); if ((dcError < CPA_DC_OK) && (absError < MAX_DC_ERROR_TYPE)) { qatUtilsAtomicInc(&(dcErrorCount[absError])); } } Cpa64U getDcErrorCounter(CpaDcReqStatus dcError) { Cpa32U absError = 0; absError = abs(dcError); if (!(dcError >= CPA_DC_OK || dcError < CPA_DC_EMPTY_DYM_BLK)) { return (Cpa64U)qatUtilsAtomicGet(&dcErrorCount[absError]); } return 0; } static inline void dcUpdateXltOverflowChecksumsGen4(const dc_compression_cookie_t *pCookie, const icp_qat_fw_resp_comp_pars_t *pRespPars, CpaDcRqResults *pDcResults) { dc_session_desc_t *pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pCookie->pSessionHandle); /* Recompute CRC checksum when either the checksum type * is CPA_DC_CRC32 or when the integrity CRCs are enabled. */ if (CPA_DC_CRC32 == pSessionDesc->checksumType) { pDcResults->checksum = pRespPars->crc.legacy.curr_crc32; /* No need to recalculate the swCrc64I here as this will get * handled later in dcHandleIntegrityChecksumsGen4. */ } else if (CPA_DC_ADLER32 == pSessionDesc->checksumType) { pDcResults->checksum = pRespPars->crc.legacy.curr_adler_32; } } void dcCompression_ProcessCallback(void *pRespMsg) { CpaStatus status = CPA_STATUS_SUCCESS; icp_qat_fw_comp_resp_t *pCompRespMsg = NULL; void *callbackTag = NULL; Cpa64U *pReqData = NULL; CpaDcDpOpData *pResponse = NULL; CpaDcRqResults *pResults = NULL; CpaDcCallbackFn pCbFunc = NULL; dc_session_desc_t *pSessionDesc = NULL; sal_compression_service_t *pService = NULL; dc_compression_cookie_t *pCookie = NULL; CpaDcOpData *pOpData = NULL; CpaBoolean cmpPass = CPA_TRUE, xlatPass = CPA_TRUE; CpaBoolean isDcDp = CPA_FALSE; CpaBoolean integrityCrcCheck = CPA_FALSE; CpaBoolean verifyHwIntegrityCrcs = CPA_FALSE; Cpa8U cmpErr = ERR_CODE_NO_ERROR, xlatErr = ERR_CODE_NO_ERROR; dc_request_dir_t compDecomp = DC_COMPRESSION_REQUEST; Cpa8U opStatus = ICP_QAT_FW_COMN_STATUS_FLAG_OK; Cpa8U hdrFlags = 0; /* Cast response message to compression response message type */ pCompRespMsg = (icp_qat_fw_comp_resp_t *)pRespMsg; - + if (!(pCompRespMsg)) { + QAT_UTILS_LOG("pCompRespMsg is NULL\n"); + return; + } /* Extract request data pointer from the opaque data */ LAC_MEM_SHARED_READ_TO_PTR(pCompRespMsg->opaque_data, pReqData); + if (!(pReqData)) { + QAT_UTILS_LOG("pReqData is NULL\n"); + return; + } /* Extract fields from the request data structure */ pCookie = (dc_compression_cookie_t *)pReqData; - if (!pCookie) - return; pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pCookie->pSessionHandle); pService = (sal_compression_service_t *)(pCookie->dcInstance); isDcDp = pSessionDesc->isDcDp; if (CPA_TRUE == isDcDp) { pResponse = (CpaDcDpOpData *)pReqData; pResults = &(pResponse->results); if (CPA_DC_DIR_DECOMPRESS == pSessionDesc->sessDirection) { compDecomp = DC_DECOMPRESSION_REQUEST; } pCookie = NULL; } else { pResults = pCookie->pResults; callbackTag = pCookie->callbackTag; pCbFunc = pCookie->pSessionDesc->pCompressionCb; compDecomp = pCookie->compDecomp; pOpData = pCookie->pDcOpData; } opStatus = pCompRespMsg->comn_resp.comn_status; if (NULL != pOpData) { verifyHwIntegrityCrcs = pOpData->verifyHwIntegrityCrcs; integrityCrcCheck = pOpData->integrityCrcCheck; } hdrFlags = pCompRespMsg->comn_resp.hdr_flags; /* Get the cmp error code */ cmpErr = pCompRespMsg->comn_resp.comn_error.s1.cmp_err_code; if (ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(opStatus)) { /* Compression not supported by firmware, set produced/consumed to zero and call the cb function with status CPA_STATUS_UNSUPPORTED */ QAT_UTILS_LOG("Compression feature not supported\n"); status = CPA_STATUS_UNSUPPORTED; pResults->status = (Cpa8S)cmpErr; pResults->consumed = 0; pResults->produced = 0; if (CPA_TRUE == isDcDp) { if (pResponse) pResponse->responseStatus = CPA_STATUS_UNSUPPORTED; (pService->pDcDpCb)(pResponse); } else { /* Free the memory pool */ Lac_MemPoolEntryFree(pCookie); pCookie = NULL; if (NULL != pCbFunc) { pCbFunc(callbackTag, status); } } if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC(numCompCompletedErrors, pService); } else { COMPRESSION_STAT_INC(numDecompCompletedErrors, pService); } return; } else { /* Check compression response status */ cmpPass = (CpaBoolean)(ICP_QAT_FW_COMN_STATUS_FLAG_OK == ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(opStatus)); } if (isDcGen2x(pService)) { /* QAT1.7 and QAT 1.8 hardware */ if (CPA_DC_INCOMPLETE_FILE_ERR == (Cpa8S)cmpErr) { cmpPass = CPA_TRUE; cmpErr = ERR_CODE_NO_ERROR; } } else { /* QAT2.0 hardware cancels the incomplete file errors * only for DEFLATE algorithm. * Decompression direction is not tested in the callback as * the request does not allow it. */ if ((pSessionDesc->compType == CPA_DC_DEFLATE) && (CPA_DC_INCOMPLETE_FILE_ERR == (Cpa8S)cmpErr)) { cmpPass = CPA_TRUE; cmpErr = ERR_CODE_NO_ERROR; } } /* log the slice hang and endpoint push/pull error inside the response */ if (ERR_CODE_SSM_ERROR == (Cpa8S)cmpErr) { QAT_UTILS_LOG( "Slice hang detected on the compression slice.\n"); } else if (ERR_CODE_ENDPOINT_ERROR == (Cpa8S)cmpErr) { QAT_UTILS_LOG( "PCIe End Point Push/Pull or TI/RI Parity error detected.\n"); } /* We return the compression error code for now. We would need to update * the API if we decide to return both error codes */ pResults->status = (Cpa8S)cmpErr; /* Check the translator status */ if ((DC_COMPRESSION_REQUEST == compDecomp) && (CPA_DC_HT_FULL_DYNAMIC == pSessionDesc->huffType)) { /* Check translator response status */ xlatPass = (CpaBoolean)(ICP_QAT_FW_COMN_STATUS_FLAG_OK == ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(opStatus)); /* Get the translator error code */ xlatErr = pCompRespMsg->comn_resp.comn_error.s1.xlat_err_code; /* Return a fatal error or a potential error in the translator * slice if the compression slice did not return any error */ if ((CPA_DC_OK == pResults->status) || (CPA_DC_FATALERR == (Cpa8S)xlatErr)) { pResults->status = (Cpa8S)xlatErr; } } /* Update dc error counter */ dcErrorLog(pResults->status); if (CPA_FALSE == isDcDp) { /* In case of any error for an end of packet request, we need to * update * the request type for the following request */ if (CPA_DC_FLUSH_FINAL == pCookie->flushFlag && cmpPass && xlatPass) { pSessionDesc->requestType = DC_REQUEST_FIRST; } else { pSessionDesc->requestType = DC_REQUEST_SUBSEQUENT; } if ((CPA_DC_STATEFUL == pSessionDesc->sessState) || ((CPA_DC_STATELESS == pSessionDesc->sessState) && (DC_COMPRESSION_REQUEST == compDecomp))) { /* Overflow is a valid use case for Traditional API * only. Stateful Overflow is supported in both * compression and decompression direction. Stateless * Overflow is supported only in compression direction. */ if (CPA_DC_OVERFLOW == (Cpa8S)cmpErr) cmpPass = CPA_TRUE; if (CPA_DC_OVERFLOW == (Cpa8S)xlatErr) { if (isDcGen4x(pService) && (CPA_TRUE == pService->comp_device_data .translatorOverflow)) { pResults->consumed = pCompRespMsg->comp_resp_pars .input_byte_counter; dcUpdateXltOverflowChecksumsGen4( pCookie, &pCompRespMsg->comp_resp_pars, pResults); } xlatPass = CPA_TRUE; } } } else { if (CPA_DC_OVERFLOW == (Cpa8S)cmpErr) { cmpPass = CPA_FALSE; } if (CPA_DC_OVERFLOW == (Cpa8S)xlatErr) { /* XLT overflow is not valid for Data Plane requests */ xlatPass = CPA_FALSE; } } if ((CPA_TRUE == cmpPass) && (CPA_TRUE == xlatPass)) { /* Extract the response from the firmware */ pResults->consumed = pCompRespMsg->comp_resp_pars.input_byte_counter; pResults->produced = pCompRespMsg->comp_resp_pars.output_byte_counter; pSessionDesc->cumulativeConsumedBytes += pResults->consumed; /* Handle Checksum for end to end data integrity. */ if (CPA_TRUE == pService->generic_service_info.integrityCrcCheck && CPA_TRUE == integrityCrcCheck) { pSessionDesc->previousChecksum = pSessionDesc->seedSwCrc.swCrc32I; } else if (CPA_DC_OVERFLOW != (Cpa8S)xlatErr) { if (CPA_DC_CRC32 == pSessionDesc->checksumType) { pResults->checksum = pCompRespMsg->comp_resp_pars.crc.legacy .curr_crc32; } else if (CPA_DC_ADLER32 == pSessionDesc->checksumType) { pResults->checksum = pCompRespMsg->comp_resp_pars.crc.legacy .curr_adler_32; } pSessionDesc->previousChecksum = pResults->checksum; } if (DC_DECOMPRESSION_REQUEST == compDecomp) { pResults->endOfLastBlock = (ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET == ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET( opStatus)); } else { /* Check if returned data is a stored block * in compression direction */ pResults->dataUncompressed = ICP_QAT_FW_COMN_HDR_ST_BLK_FLAG_GET(hdrFlags); } /* Save the checksum for the next request */ if ((CPA_DC_OVERFLOW != (Cpa8S)xlatErr) && (CPA_TRUE == verifyHwIntegrityCrcs)) { pSessionDesc->previousChecksum = pSessionDesc->seedSwCrc.swCrc32I; } /* Check if a CNV recovery happened and * increase stats counter */ if ((DC_COMPRESSION_REQUEST == compDecomp) && ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdrFlags) && ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdrFlags)) { COMPRESSION_STAT_INC(numCompCnvErrorsRecovered, pService); } if (CPA_TRUE == isDcDp) { if (pResponse) pResponse->responseStatus = CPA_STATUS_SUCCESS; } else { if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC(numCompCompleted, pService); } else { COMPRESSION_STAT_INC(numDecompCompleted, pService); } } } else { #ifdef ICP_DC_RETURN_COUNTERS_ON_ERROR /* Extract the response from the firmware */ pResults->consumed = pCompRespMsg->comp_resp_pars.input_byte_counter; pResults->produced = pCompRespMsg->comp_resp_pars.output_byte_counter; if (CPA_DC_STATEFUL == pSessionDesc->sessState) { pSessionDesc->cumulativeConsumedBytes += pResults->consumed; } else { /* In the stateless case all requests have both SOP and * EOP set */ pSessionDesc->cumulativeConsumedBytes = pResults->consumed; } #else pResults->consumed = 0; pResults->produced = 0; #endif if (CPA_DC_OVERFLOW == pResults->status && CPA_DC_STATELESS == pSessionDesc->sessState) { /* This error message will be returned by Data Plane API * in both * compression and decompression direction. With * Traditional API * this error message will be returned only in stateless * decompression direction */ QAT_UTILS_LOG( "Unrecoverable error: stateless overflow. You may need to increase the size of your destination buffer.\n"); } if (CPA_TRUE == isDcDp) { if (pResponse) pResponse->responseStatus = CPA_STATUS_FAIL; } else { if (CPA_DC_OK != pResults->status && CPA_DC_INCOMPLETE_FILE_ERR != pResults->status) { status = CPA_STATUS_FAIL; } if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC(numCompCompletedErrors, pService); } else { COMPRESSION_STAT_INC(numDecompCompletedErrors, pService); } } } if (CPA_TRUE == isDcDp) { /* Decrement number of stateless pending callbacks for session */ pSessionDesc->pendingDpStatelessCbCount--; (pService->pDcDpCb)(pResponse); } else { /* Decrement number of pending callbacks for session */ if (CPA_DC_STATELESS == pSessionDesc->sessState) { qatUtilsAtomicDec( &(pCookie->pSessionDesc->pendingStatelessCbCount)); } else if (0 != qatUtilsAtomicGet(&pCookie->pSessionDesc ->pendingStatefulCbCount)) { qatUtilsAtomicDec( &(pCookie->pSessionDesc->pendingStatefulCbCount)); } /* Free the memory pool */ Lac_MemPoolEntryFree(pCookie); pCookie = NULL; if (NULL != pCbFunc) { pCbFunc(callbackTag, status); } } } /** ***************************************************************************** * @ingroup Dc_DataCompression * Check that all the parameters in the pOpData structure are valid * * @description * Check that all the parameters in the pOpData structure are valid * * @param[in] pService Pointer to the compression service * @param[in] pOpData Pointer to request information structure * holding parameters for cpaDcCompress2 and * CpaDcDecompressData2 * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in * *****************************************************************************/ CpaStatus dcCheckOpData(sal_compression_service_t *pService, CpaDcOpData *pOpData) { CpaDcSkipMode skipMode = 0; if ((pOpData->flushFlag < CPA_DC_FLUSH_NONE) || (pOpData->flushFlag > CPA_DC_FLUSH_FULL)) { LAC_INVALID_PARAM_LOG("Invalid flushFlag value"); return CPA_STATUS_INVALID_PARAM; } skipMode = pOpData->inputSkipData.skipMode; if ((skipMode < CPA_DC_SKIP_DISABLED) || (skipMode > CPA_DC_SKIP_STRIDE)) { LAC_INVALID_PARAM_LOG("Invalid input skip mode value"); return CPA_STATUS_INVALID_PARAM; } skipMode = pOpData->outputSkipData.skipMode; if ((skipMode < CPA_DC_SKIP_DISABLED) || (skipMode > CPA_DC_SKIP_STRIDE)) { LAC_INVALID_PARAM_LOG("Invalid output skip mode value"); return CPA_STATUS_INVALID_PARAM; } if (pOpData->integrityCrcCheck == CPA_FALSE && pOpData->verifyHwIntegrityCrcs == CPA_TRUE) { LAC_INVALID_PARAM_LOG( "integrityCrcCheck must be set to true" "in order to enable verifyHwIntegrityCrcs"); return CPA_STATUS_INVALID_PARAM; } if (pOpData->integrityCrcCheck != CPA_TRUE && pOpData->integrityCrcCheck != CPA_FALSE) { LAC_INVALID_PARAM_LOG("Invalid integrityCrcCheck value"); return CPA_STATUS_INVALID_PARAM; } if (pOpData->verifyHwIntegrityCrcs != CPA_TRUE && pOpData->verifyHwIntegrityCrcs != CPA_FALSE) { LAC_INVALID_PARAM_LOG("Invalid verifyHwIntegrityCrcs value"); return CPA_STATUS_INVALID_PARAM; } if (pOpData->compressAndVerify != CPA_TRUE && pOpData->compressAndVerify != CPA_FALSE) { LAC_INVALID_PARAM_LOG("Invalid cnv decompress check value"); return CPA_STATUS_INVALID_PARAM; } if (CPA_TRUE == pOpData->integrityCrcCheck && CPA_FALSE == pService->generic_service_info.integrityCrcCheck) { LAC_INVALID_PARAM_LOG("Integrity CRC check is not " "supported on this device"); return CPA_STATUS_INVALID_PARAM; } if (CPA_TRUE == pOpData->integrityCrcCheck && NULL == pOpData->pCrcData) { LAC_INVALID_PARAM_LOG("Integrity CRC data structure " "not initialized in CpaDcOpData"); return CPA_STATUS_INVALID_PARAM; } return CPA_STATUS_SUCCESS; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Check the compression source buffer for Batch and Pack API. * * @description * Check that all the parameters used for Pack compression * request are valid. This function essentially checks the source buffer * parameters and results structure parameters. * * @param[in] pSessionHandle Session handle * @param[in] pSrcBuff Pointer to data buffer for compression * @param[in] pDestBuff Pointer to buffer space allocated for * output data * @param[in] pResults Pointer to results structure * @param[in] flushFlag Indicates the type of flush to be * performed * @param[in] srcBuffSize Size of the source buffer * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in * *****************************************************************************/ static CpaStatus dcCheckSourceData(CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, CpaDcFlush flushFlag, Cpa64U srcBuffSize, CpaDcSkipData *skipData) { dc_session_desc_t *pSessionDesc = NULL; LAC_CHECK_NULL_PARAM(pSessionHandle); LAC_CHECK_NULL_PARAM(pSrcBuff); LAC_CHECK_NULL_PARAM(pDestBuff); LAC_CHECK_NULL_PARAM(pResults); pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); if (NULL == pSessionDesc) { LAC_INVALID_PARAM_LOG("Session handle not as expected"); return CPA_STATUS_INVALID_PARAM; } if ((flushFlag < CPA_DC_FLUSH_NONE) || (flushFlag > CPA_DC_FLUSH_FULL)) { LAC_INVALID_PARAM_LOG("Invalid flushFlag value"); return CPA_STATUS_INVALID_PARAM; } if (pSrcBuff == pDestBuff) { LAC_INVALID_PARAM_LOG("In place operation not supported"); return CPA_STATUS_INVALID_PARAM; } /* Compressing zero bytes is not supported for stateless sessions * for non Batch and Pack requests */ if ((CPA_DC_STATELESS == pSessionDesc->sessState) && (0 == srcBuffSize) && (NULL == skipData)) { LAC_INVALID_PARAM_LOG( "The source buffer size needs to be greater than " "zero bytes for stateless sessions"); return CPA_STATUS_INVALID_PARAM; } if (srcBuffSize > DC_BUFFER_MAX_SIZE) { LAC_INVALID_PARAM_LOG( "The source buffer size needs to be less than or " "equal to 2^32-1 bytes"); return CPA_STATUS_INVALID_PARAM; } return CPA_STATUS_SUCCESS; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Check the compression or decompression function parameters. * * @description * Check that all the parameters used for a Batch and Pack compression * request are valid. This function essentially checks the destination * buffer parameters and intermediate buffer parameters. * * @param[in] pService Pointer to the compression service * @param[in] pSessionHandle Session handle * @param[in] pDestBuff Pointer to buffer space allocated for * output data * @param[in] compDecomp Direction of the operation * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in * *****************************************************************************/ static CpaStatus dcCheckDestinationData(sal_compression_service_t *pService, CpaDcSessionHandle pSessionHandle, CpaBufferList *pDestBuff, dc_request_dir_t compDecomp) { dc_session_desc_t *pSessionDesc = NULL; Cpa64U destBuffSize = 0; LAC_CHECK_NULL_PARAM(pSessionHandle); LAC_CHECK_NULL_PARAM(pDestBuff); pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); if (NULL == pSessionDesc) { LAC_INVALID_PARAM_LOG("Session handle not as expected"); return CPA_STATUS_INVALID_PARAM; } if (LacBuffDesc_BufferListVerify(pDestBuff, &destBuffSize, LAC_NO_ALIGNMENT_SHIFT) != CPA_STATUS_SUCCESS) { LAC_INVALID_PARAM_LOG( "Invalid destination buffer list parameter"); return CPA_STATUS_INVALID_PARAM; } if (destBuffSize > DC_BUFFER_MAX_SIZE) { LAC_INVALID_PARAM_LOG( "The destination buffer size needs to be less " "than or equal to 2^32-1 bytes"); return CPA_STATUS_INVALID_PARAM; } if (CPA_TRUE == pSessionDesc->isDcDp) { LAC_INVALID_PARAM_LOG( "The session type should not be data plane"); return CPA_STATUS_INVALID_PARAM; } if (DC_COMPRESSION_REQUEST == compDecomp) { if (CPA_DC_HT_FULL_DYNAMIC == pSessionDesc->huffType) { /* Check if intermediate buffers are supported */ if ((isDcGen2x(pService)) && ((0 == pService->pInterBuffPtrsArrayPhyAddr) || (NULL == pService->pInterBuffPtrsArray))) { LAC_LOG_ERROR( "No intermediate buffer defined for this instance " "- see cpaDcStartInstance"); return CPA_STATUS_INVALID_PARAM; } /* Ensure that the destination buffer size is greater or * equal to 128B */ if (destBuffSize < DC_DEST_BUFFER_DYN_MIN_SIZE) { LAC_INVALID_PARAM_LOG( "Destination buffer size should be " "greater or equal to 128B"); return CPA_STATUS_INVALID_PARAM; } } else { /* Ensure that the destination buffer size is greater or * equal to devices min output buff size */ if (destBuffSize < pService->comp_device_data.minOutputBuffSize) { LAC_INVALID_PARAM_LOG1( "Destination buffer size should be " "greater or equal to %d bytes", pService->comp_device_data .minOutputBuffSize); return CPA_STATUS_INVALID_PARAM; } } } else { /* Ensure that the destination buffer size is greater than * 0 bytes */ if (destBuffSize < DC_DEST_BUFFER_DEC_MIN_SIZE) { LAC_INVALID_PARAM_LOG( "Destination buffer size should be " "greater than 0 bytes"); return CPA_STATUS_INVALID_PARAM; } } return CPA_STATUS_SUCCESS; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Populate the compression request parameters * * @description * This function will populate the compression request parameters * * @param[out] pCompReqParams Pointer to the compression request parameters * @param[in] pCookie Pointer to the compression cookie * *****************************************************************************/ static void dcCompRequestParamsPopulate(icp_qat_fw_comp_req_params_t *pCompReqParams, dc_compression_cookie_t *pCookie) { pCompReqParams->comp_len = pCookie->srcTotalDataLenInBytes; pCompReqParams->out_buffer_sz = pCookie->dstTotalDataLenInBytes; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Create the requests for compression or decompression * * @description * Create the requests for compression or decompression. This function * will update the cookie will all required information. * * @param{out] pCookie Pointer to the compression cookie * @param[in] pService Pointer to the compression service * @param[in] pSessionDesc Pointer to the session descriptor * @param[in pSessionHandle Session handle * @param[in] pSrcBuff Pointer to data buffer for compression * @param[in] pDestBuff Pointer to buffer space for data after * compression * @param[in] pResults Pointer to results structure * @param[in] flushFlag Indicates the type of flush to be * performed * @param[in] pOpData Pointer to request information structure * holding parameters for cpaDcCompress2 * and CpaDcDecompressData2 * @param[in] callbackTag Pointer to the callback tag * @param[in] compDecomp Direction of the operation * @param[in] compressAndVerify Compress and Verify * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in * *****************************************************************************/ static CpaStatus dcCreateRequest(dc_compression_cookie_t *pCookie, sal_compression_service_t *pService, dc_session_desc_t *pSessionDesc, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, CpaDcFlush flushFlag, CpaDcOpData *pOpData, void *callbackTag, dc_request_dir_t compDecomp, dc_cnv_mode_t cnvMode) { icp_qat_fw_comp_req_t *pMsg = NULL; icp_qat_fw_comp_req_params_t *pCompReqParams = NULL; Cpa64U srcAddrPhys = 0, dstAddrPhys = 0; Cpa64U srcTotalDataLenInBytes = 0, dstTotalDataLenInBytes = 0; Cpa32U rpCmdFlags = 0; Cpa8U sop = ICP_QAT_FW_COMP_SOP; Cpa8U eop = ICP_QAT_FW_COMP_EOP; Cpa8U bFinal = ICP_QAT_FW_COMP_NOT_BFINAL; Cpa8U crcMode = ICP_QAT_FW_COMP_CRC_MODE_LEGACY; Cpa8U cnvDecompReq = ICP_QAT_FW_COMP_NO_CNV; Cpa8U cnvRecovery = ICP_QAT_FW_COMP_NO_CNV_RECOVERY; CpaBoolean cnvErrorInjection = ICP_QAT_FW_COMP_NO_CNV_DFX; CpaBoolean integrityCrcCheck = CPA_FALSE; CpaStatus status = CPA_STATUS_SUCCESS; CpaDcFlush flush = CPA_DC_FLUSH_NONE; Cpa32U initial_adler = 1; Cpa32U initial_crc32 = 0; icp_qat_fw_comp_req_t *pReqCache = NULL; /* Write the buffer descriptors */ status = LacBuffDesc_BufferListDescWriteAndGetSize( pSrcBuff, &srcAddrPhys, CPA_FALSE, &srcTotalDataLenInBytes, &(pService->generic_service_info)); if (status != CPA_STATUS_SUCCESS) { return status; } status = LacBuffDesc_BufferListDescWriteAndGetSize( pDestBuff, &dstAddrPhys, CPA_FALSE, &dstTotalDataLenInBytes, &(pService->generic_service_info)); if (status != CPA_STATUS_SUCCESS) { return status; } /* Populate the compression cookie */ pCookie->dcInstance = pService; pCookie->pSessionHandle = pSessionHandle; pCookie->callbackTag = callbackTag; pCookie->pSessionDesc = pSessionDesc; pCookie->pDcOpData = pOpData; pCookie->pResults = pResults; pCookie->compDecomp = compDecomp; pCookie->pUserSrcBuff = NULL; pCookie->pUserDestBuff = NULL; /* Extract flush flag from either the opData or from the * parameter. Opdata have been introduce with APIs * cpaDcCompressData2 and cpaDcDecompressData2 */ if (NULL != pOpData) { flush = pOpData->flushFlag; integrityCrcCheck = pOpData->integrityCrcCheck; } else { flush = flushFlag; } pCookie->flushFlag = flush; /* The firmware expects the length in bytes for source and destination * to be Cpa32U parameters. However the total data length could be * bigger as allocated by the user. We ensure that this is not the case * in dcCheckSourceData and cast the values to Cpa32U here */ pCookie->srcTotalDataLenInBytes = (Cpa32U)srcTotalDataLenInBytes; if ((isDcGen2x(pService)) && (DC_COMPRESSION_REQUEST == compDecomp) && (CPA_DC_HT_FULL_DYNAMIC == pSessionDesc->huffType)) { if (pService->minInterBuffSizeInBytes < (Cpa32U)dstTotalDataLenInBytes) { pCookie->dstTotalDataLenInBytes = (Cpa32U)(pService->minInterBuffSizeInBytes); } else { pCookie->dstTotalDataLenInBytes = (Cpa32U)dstTotalDataLenInBytes; } } else { pCookie->dstTotalDataLenInBytes = (Cpa32U)dstTotalDataLenInBytes; } /* Device can not decompress an odd byte decompression request * if bFinal is not set */ if (CPA_TRUE != pService->comp_device_data.oddByteDecompNobFinal) { if ((CPA_DC_STATEFUL == pSessionDesc->sessState) && (CPA_DC_FLUSH_FINAL != flushFlag) && (DC_DECOMPRESSION_REQUEST == compDecomp) && (pCookie->srcTotalDataLenInBytes & 0x1)) { pCookie->srcTotalDataLenInBytes--; } } /* Device can not decompress odd byte interim requests */ if (CPA_TRUE != pService->comp_device_data.oddByteDecompInterim) { if ((CPA_DC_STATEFUL == pSessionDesc->sessState) && (CPA_DC_FLUSH_FINAL != flushFlag) && (CPA_DC_FLUSH_FULL != flushFlag) && (DC_DECOMPRESSION_REQUEST == compDecomp) && (pCookie->srcTotalDataLenInBytes & 0x1)) { pCookie->srcTotalDataLenInBytes--; } } pMsg = (icp_qat_fw_comp_req_t *)&pCookie->request; if (DC_COMPRESSION_REQUEST == compDecomp) { pReqCache = &(pSessionDesc->reqCacheComp); } else { pReqCache = &(pSessionDesc->reqCacheDecomp); } /* Fills the msg from the template cached in the session descriptor */ memcpy((void *)pMsg, (void *)(pReqCache), LAC_QAT_DC_REQ_SZ_LW * LAC_LONG_WORD_IN_BYTES); if (DC_REQUEST_FIRST == pSessionDesc->requestType) { initial_adler = 1; initial_crc32 = 0; if (CPA_DC_ADLER32 == pSessionDesc->checksumType) { pSessionDesc->previousChecksum = initial_adler; } else { pSessionDesc->previousChecksum = initial_crc32; } } else if (CPA_DC_STATELESS == pSessionDesc->sessState) { pSessionDesc->previousChecksum = pResults->checksum; if (CPA_DC_ADLER32 == pSessionDesc->checksumType) { initial_adler = pSessionDesc->previousChecksum; } else { initial_crc32 = pSessionDesc->previousChecksum; } } /* Backup source and destination buffer addresses, * CRC calculations both for CNV and translator overflow * will be performed on them in the callback function. */ pCookie->pUserSrcBuff = pSrcBuff; pCookie->pUserDestBuff = pDestBuff; /* * Due to implementation of CNV support and need for backwards * compatibility certain fields in the request and response structs had * been changed, moved or placed in unions cnvMode flag signifies fields * to be selected from req/res * * Doing extended crc checks makes sense only when we want to do the * actual CNV */ if (CPA_TRUE == pService->generic_service_info.integrityCrcCheck && CPA_TRUE == integrityCrcCheck) { pMsg->comp_pars.crc.crc_data_addr = pSessionDesc->physDataIntegrityCrcs; crcMode = ICP_QAT_FW_COMP_CRC_MODE_E2E; } else { /* Legacy request structure */ pMsg->comp_pars.crc.legacy.initial_adler = initial_adler; pMsg->comp_pars.crc.legacy.initial_crc32 = initial_crc32; crcMode = ICP_QAT_FW_COMP_CRC_MODE_LEGACY; } /* Populate the cmdFlags */ if (CPA_DC_STATEFUL == pSessionDesc->sessState) { pSessionDesc->previousRequestType = pSessionDesc->requestType; if (DC_REQUEST_FIRST == pSessionDesc->requestType) { /* Update the request type for following requests */ pSessionDesc->requestType = DC_REQUEST_SUBSEQUENT; /* Reinitialise the cumulative amount of consumed bytes */ pSessionDesc->cumulativeConsumedBytes = 0; if (DC_COMPRESSION_REQUEST == compDecomp) { pSessionDesc->isSopForCompressionProcessed = CPA_TRUE; } else if (DC_DECOMPRESSION_REQUEST == compDecomp) { pSessionDesc->isSopForDecompressionProcessed = CPA_TRUE; } } else { if (DC_COMPRESSION_REQUEST == compDecomp) { if (CPA_TRUE == pSessionDesc ->isSopForCompressionProcessed) { sop = ICP_QAT_FW_COMP_NOT_SOP; } else { pSessionDesc ->isSopForCompressionProcessed = CPA_TRUE; } } else if (DC_DECOMPRESSION_REQUEST == compDecomp) { if (CPA_TRUE == pSessionDesc ->isSopForDecompressionProcessed) { sop = ICP_QAT_FW_COMP_NOT_SOP; } else { pSessionDesc ->isSopForDecompressionProcessed = CPA_TRUE; } } } if ((CPA_DC_FLUSH_FINAL == flush) || (CPA_DC_FLUSH_FULL == flush)) { /* Update the request type for following requests */ pSessionDesc->requestType = DC_REQUEST_FIRST; } else { eop = ICP_QAT_FW_COMP_NOT_EOP; } } else { if (DC_REQUEST_FIRST == pSessionDesc->requestType) { /* Reinitialise the cumulative amount of consumed bytes */ pSessionDesc->cumulativeConsumedBytes = 0; } } /* (LW 14 - 15) */ pCompReqParams = &(pMsg->comp_pars); dcCompRequestParamsPopulate(pCompReqParams, pCookie); if (CPA_DC_FLUSH_FINAL == flush) { bFinal = ICP_QAT_FW_COMP_BFINAL; } switch (cnvMode) { case DC_CNVNR: cnvRecovery = ICP_QAT_FW_COMP_CNV_RECOVERY; /* Fall through is intended here, because for CNVNR * cnvDecompReq also needs to be set */ case DC_CNV: cnvDecompReq = ICP_QAT_FW_COMP_CNV; if (isDcGen4x(pService)) { cnvErrorInjection = pSessionDesc->cnvErrorInjection; } break; case DC_NO_CNV: cnvDecompReq = ICP_QAT_FW_COMP_NO_CNV; cnvRecovery = ICP_QAT_FW_COMP_NO_CNV_RECOVERY; break; } /* LW 18 */ rpCmdFlags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bFinal, cnvDecompReq, cnvRecovery, cnvErrorInjection, crcMode); pMsg->comp_pars.req_par_flags = rpCmdFlags; /* Populates the QAT common request middle part of the message * (LW 6 to 11) */ SalQatMsg_CmnMidWrite((icp_qat_fw_la_bulk_req_t *)pMsg, pCookie, DC_DEFAULT_QAT_PTR_TYPE, srcAddrPhys, dstAddrPhys, 0, 0); return CPA_STATUS_SUCCESS; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Send a compression request to QAT * * @description * Send the requests for compression or decompression to QAT * * @param{in] pCookie Pointer to the compression cookie * @param[in] pService Pointer to the compression service * @param[in] pSessionDesc Pointer to the session descriptor * @param[in] compDecomp Direction of the operation * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in * *****************************************************************************/ static CpaStatus dcSendRequest(dc_compression_cookie_t *pCookie, sal_compression_service_t *pService, dc_session_desc_t *pSessionDesc, dc_request_dir_t compDecomp) { CpaStatus status = CPA_STATUS_SUCCESS; /* Send to QAT */ status = icp_adf_transPutMsg(pService->trans_handle_compression_tx, (void *)&(pCookie->request), LAC_QAT_DC_REQ_SZ_LW); if ((CPA_DC_STATEFUL == pSessionDesc->sessState) && (CPA_STATUS_RETRY == status)) { /* reset requestType after receiving an retry on * the stateful request */ pSessionDesc->requestType = pSessionDesc->previousRequestType; } return status; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Process the synchronous and asynchronous case for compression or * decompression * * @description * Process the synchronous and asynchronous case for compression or * decompression. This function will then create and send the request to * the firmware. * * @param[in] pService Pointer to the compression service * @param[in] pSessionDesc Pointer to the session descriptor * @param[in] dcInstance Instance handle derived from discovery * functions * @param[in] pSessionHandle Session handle * @param[in] numRequests Number of operations in the batch request * @param[in] pBatchOpData Address of the list of jobs to be processed * @param[in] pSrcBuff Pointer to data buffer for compression * @param[in] pDestBuff Pointer to buffer space for data after * compression * @param[in] pResults Pointer to results structure * @param[in] flushFlag Indicates the type of flush to be * performed * @param[in] pOpData Pointer to request information structure * holding parameters for cpaDcCompress2 and * CpaDcDecompressData2 * @param[in] callbackTag Pointer to the callback tag * @param[in] compDecomp Direction of the operation * @param[in] isAsyncMode Used to know if synchronous or asynchronous * mode * @param[in] cnvMode CNV Mode * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_RETRY Retry operation * @retval CPA_STATUS_FAIL Function failed * @retval CPA_STATUS_RESOURCE Resource error * *****************************************************************************/ static CpaStatus dcCompDecompData(sal_compression_service_t *pService, dc_session_desc_t *pSessionDesc, CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, CpaDcFlush flushFlag, CpaDcOpData *pOpData, void *callbackTag, dc_request_dir_t compDecomp, CpaBoolean isAsyncMode, dc_cnv_mode_t cnvMode) { CpaStatus status = CPA_STATUS_SUCCESS; dc_compression_cookie_t *pCookie = NULL; if ((LacSync_GenWakeupSyncCaller == pSessionDesc->pCompressionCb) && isAsyncMode == CPA_TRUE) { lac_sync_op_data_t *pSyncCallbackData = NULL; status = LacSync_CreateSyncCookie(&pSyncCallbackData); if (CPA_STATUS_SUCCESS == status) { status = dcCompDecompData(pService, pSessionDesc, dcInstance, pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, pOpData, pSyncCallbackData, compDecomp, CPA_FALSE, cnvMode); } else { return status; } if (CPA_STATUS_SUCCESS == status) { CpaStatus syncStatus = CPA_STATUS_SUCCESS; syncStatus = LacSync_WaitForCallback(pSyncCallbackData, DC_SYNC_CALLBACK_TIMEOUT, &status, NULL); /* If callback doesn't come back */ if (CPA_STATUS_SUCCESS != syncStatus) { if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC( numCompCompletedErrors, pService); } else { COMPRESSION_STAT_INC( numDecompCompletedErrors, pService); } LAC_LOG_ERROR("Callback timed out"); status = syncStatus; } } else { /* As the Request was not sent the Callback will never * be called, so need to indicate that we're finished * with cookie so it can be destroyed. */ LacSync_SetSyncCookieComplete(pSyncCallbackData); } LacSync_DestroySyncCookie(&pSyncCallbackData); return status; } /* Allocate the compression cookie * The memory is freed in callback or in sendRequest if an error occurs */ pCookie = (dc_compression_cookie_t *)Lac_MemPoolEntryAlloc( pService->compression_mem_pool); if (NULL == pCookie) { LAC_LOG_ERROR("Cannot get mem pool entry for compression"); status = CPA_STATUS_RESOURCE; } else if ((void *)CPA_STATUS_RETRY == pCookie) { pCookie = NULL; status = CPA_STATUS_RETRY; } if (CPA_STATUS_SUCCESS == status) { status = dcCreateRequest(pCookie, pService, pSessionDesc, pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, pOpData, callbackTag, compDecomp, cnvMode); } if (CPA_STATUS_SUCCESS == status) { /* Increment number of pending callbacks for session */ if (CPA_DC_STATELESS == pSessionDesc->sessState) { qatUtilsAtomicInc( &(pSessionDesc->pendingStatelessCbCount)); } status = dcSendRequest(pCookie, pService, pSessionDesc, compDecomp); } if (CPA_STATUS_SUCCESS == status) { if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC(numCompRequests, pService); } else { COMPRESSION_STAT_INC(numDecompRequests, pService); } } else { if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC(numCompRequestsErrors, pService); } else { COMPRESSION_STAT_INC(numDecompRequestsErrors, pService); } /* Decrement number of pending callbacks for session */ if (CPA_DC_STATELESS == pSessionDesc->sessState) { qatUtilsAtomicDec( &(pSessionDesc->pendingStatelessCbCount)); } else { qatUtilsAtomicDec( &(pSessionDesc->pendingStatefulCbCount)); } /* Free the memory pool */ if (NULL != pCookie) { if (status != CPA_STATUS_UNSUPPORTED) { /* Free the memory pool */ Lac_MemPoolEntryFree(pCookie); pCookie = NULL; } } } return status; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Handle zero length compression or decompression requests * * @description * Handle zero length compression or decompression requests * * @param[in] pService Pointer to the compression service * @param[in] pSessionDesc Pointer to the session descriptor * @param[in] pResults Pointer to results structure * @param[in] flushFlag Indicates the type of flush to be * performed * @param[in] callbackTag User supplied value to help correlate * the callback with its associated request * @param[in] compDecomp Direction of the operation * * @retval CPA_TRUE Zero length SOP or MOP processed * @retval CPA_FALSE Zero length EOP * *****************************************************************************/ static CpaStatus dcZeroLengthRequests(sal_compression_service_t *pService, dc_session_desc_t *pSessionDesc, CpaDcRqResults *pResults, CpaDcFlush flushFlag, void *callbackTag, dc_request_dir_t compDecomp) { CpaBoolean status = CPA_FALSE; CpaDcCallbackFn pCbFunc = pSessionDesc->pCompressionCb; if (DC_REQUEST_FIRST == pSessionDesc->requestType) { /* Reinitialise the cumulative amount of consumed bytes */ pSessionDesc->cumulativeConsumedBytes = 0; /* Zero length SOP */ if (CPA_DC_ADLER32 == pSessionDesc->checksumType) { pResults->checksum = 1; } else { pResults->checksum = 0; } status = CPA_TRUE; } else if ((CPA_DC_FLUSH_NONE == flushFlag) || (CPA_DC_FLUSH_SYNC == flushFlag)) { /* Zero length MOP */ pResults->checksum = pSessionDesc->previousChecksum; status = CPA_TRUE; } if (CPA_TRUE == status) { pResults->status = CPA_DC_OK; pResults->produced = 0; pResults->consumed = 0; /* Increment statistics */ if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC(numCompRequests, pService); COMPRESSION_STAT_INC(numCompCompleted, pService); } else { COMPRESSION_STAT_INC(numDecompRequests, pService); COMPRESSION_STAT_INC(numDecompCompleted, pService); } LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); if ((NULL != pCbFunc) && (LacSync_GenWakeupSyncCaller != pCbFunc)) { pCbFunc(callbackTag, CPA_STATUS_SUCCESS); } return CPA_TRUE; } return CPA_FALSE; } static CpaStatus dcParamCheck(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, sal_compression_service_t *pService, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, dc_session_desc_t *pSessionDesc, CpaDcFlush flushFlag, Cpa64U srcBuffSize) { if (dcCheckSourceData(pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, srcBuffSize, NULL) != CPA_STATUS_SUCCESS) { return CPA_STATUS_INVALID_PARAM; } if (dcCheckDestinationData( pService, pSessionHandle, pDestBuff, DC_COMPRESSION_REQUEST) != CPA_STATUS_SUCCESS) { return CPA_STATUS_INVALID_PARAM; } if (CPA_DC_DIR_DECOMPRESS == pSessionDesc->sessDirection) { LAC_INVALID_PARAM_LOG("Invalid sessDirection value"); return CPA_STATUS_INVALID_PARAM; } return CPA_STATUS_SUCCESS; } CpaStatus cpaDcCompressData(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, CpaDcFlush flushFlag, void *callbackTag) { sal_compression_service_t *pService = NULL; dc_session_desc_t *pSessionDesc = NULL; CpaInstanceHandle insHandle = NULL; Cpa64U srcBuffSize = 0; if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); } else { insHandle = dcInstance; } pService = (sal_compression_service_t *)insHandle; LAC_CHECK_NULL_PARAM(insHandle); LAC_CHECK_NULL_PARAM(pSessionHandle); /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(insHandle); /* This check is outside the parameter checking as it is needed to * manage zero length requests */ if (LacBuffDesc_BufferListVerifyNull(pSrcBuff, &srcBuffSize, LAC_NO_ALIGNMENT_SHIFT) != CPA_STATUS_SUCCESS) { LAC_INVALID_PARAM_LOG("Invalid source buffer list parameter"); return CPA_STATUS_INVALID_PARAM; } /* Ensure this is a compression instance */ SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); if (CPA_STATUS_SUCCESS != dcParamCheck(insHandle, pSessionHandle, pService, pSrcBuff, pDestBuff, pResults, pSessionDesc, flushFlag, srcBuffSize)) { return CPA_STATUS_INVALID_PARAM; } if (CPA_DC_STATEFUL == pSessionDesc->sessState) { LAC_INVALID_PARAM_LOG( "Invalid session state, stateful sessions " "are not supported"); return CPA_STATUS_UNSUPPORTED; } if (!(pService->generic_service_info.dcExtendedFeatures & DC_CNV_EXTENDED_CAPABILITY)) { LAC_INVALID_PARAM_LOG( "CompressAndVerify feature not supported"); return CPA_STATUS_UNSUPPORTED; } if (!(pService->generic_service_info.dcExtendedFeatures & DC_CNVNR_EXTENDED_CAPABILITY)) { LAC_INVALID_PARAM_LOG( "CompressAndVerifyAndRecovery feature not supported"); return CPA_STATUS_UNSUPPORTED; } return dcCompDecompData(pService, pSessionDesc, insHandle, pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, NULL, callbackTag, DC_COMPRESSION_REQUEST, CPA_TRUE, DC_CNVNR); } CpaStatus cpaDcCompressData2(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcOpData *pOpData, CpaDcRqResults *pResults, void *callbackTag) { sal_compression_service_t *pService = NULL; dc_session_desc_t *pSessionDesc = NULL; CpaInstanceHandle insHandle = NULL; Cpa64U srcBuffSize = 0; dc_cnv_mode_t cnvMode = DC_NO_CNV; LAC_CHECK_NULL_PARAM(pOpData); if (((CPA_TRUE != pOpData->compressAndVerify) && (CPA_FALSE != pOpData->compressAndVerify)) || ((CPA_FALSE != pOpData->compressAndVerifyAndRecover) && (CPA_TRUE != pOpData->compressAndVerifyAndRecover))) { return CPA_STATUS_INVALID_PARAM; } if ((CPA_FALSE == pOpData->compressAndVerify) && (CPA_TRUE == pOpData->compressAndVerifyAndRecover)) { return CPA_STATUS_INVALID_PARAM; } if ((CPA_TRUE == pOpData->compressAndVerify) && (CPA_TRUE == pOpData->compressAndVerifyAndRecover) && (CPA_FALSE == pOpData->integrityCrcCheck)) { return cpaDcCompressData(dcInstance, pSessionHandle, pSrcBuff, pDestBuff, pResults, pOpData->flushFlag, callbackTag); } if (CPA_FALSE == pOpData->compressAndVerify) { LAC_INVALID_PARAM_LOG( "Data compression without verification not allowed"); return CPA_STATUS_UNSUPPORTED; } if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); } else { insHandle = dcInstance; } pService = (sal_compression_service_t *)insHandle; LAC_CHECK_NULL_PARAM(insHandle); LAC_CHECK_NULL_PARAM(pSessionHandle); LAC_CHECK_NULL_PARAM(pOpData); /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(insHandle); /* This check is outside the parameter checking as it is needed to * manage zero length requests */ if (LacBuffDesc_BufferListVerifyNull(pSrcBuff, &srcBuffSize, LAC_NO_ALIGNMENT_SHIFT) != CPA_STATUS_SUCCESS) { LAC_INVALID_PARAM_LOG("Invalid source buffer list parameter"); return CPA_STATUS_INVALID_PARAM; } /* Ensure this is a compression instance */ SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); if (CPA_TRUE == pOpData->compressAndVerify && CPA_DC_STATEFUL == pSessionDesc->sessState) { LAC_INVALID_PARAM_LOG( "Invalid session state, stateful sessions " "not supported with CNV"); return CPA_STATUS_UNSUPPORTED; } if (!(pService->generic_service_info.dcExtendedFeatures & DC_CNV_EXTENDED_CAPABILITY) && (CPA_TRUE == pOpData->compressAndVerify)) { LAC_INVALID_PARAM_LOG( "CompressAndVerify feature not supported"); return CPA_STATUS_UNSUPPORTED; } if (CPA_STATUS_SUCCESS != dcParamCheck(insHandle, pSessionHandle, pService, pSrcBuff, pDestBuff, pResults, pSessionDesc, pOpData->flushFlag, srcBuffSize)) { return CPA_STATUS_INVALID_PARAM; } if (CPA_STATUS_SUCCESS != dcCheckOpData(pService, pOpData)) { return CPA_STATUS_INVALID_PARAM; } if (CPA_TRUE != pOpData->compressAndVerify) { if (srcBuffSize > DC_COMP_MAX_BUFF_SIZE) { LAC_LOG_ERROR( "Compression payload greater than 64KB is " "unsupported, when CnV is disabled\n"); return CPA_STATUS_UNSUPPORTED; } } if (CPA_DC_STATEFUL == pSessionDesc->sessState) { /* Lock the session to check if there are in-flight stateful * requests */ LAC_SPINLOCK(&(pSessionDesc->sessionLock)); /* Check if there is already one in-flight stateful request */ if (0 != qatUtilsAtomicGet( &(pSessionDesc->pendingStatefulCbCount))) { LAC_LOG_ERROR( "Only one in-flight stateful request supported"); LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); return CPA_STATUS_RETRY; } if (0 == srcBuffSize) { if (CPA_TRUE == dcZeroLengthRequests(pService, pSessionDesc, pResults, pOpData->flushFlag, callbackTag, DC_COMPRESSION_REQUEST)) { return CPA_STATUS_SUCCESS; } } qatUtilsAtomicInc(&(pSessionDesc->pendingStatefulCbCount)); LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); } if (CPA_TRUE == pOpData->compressAndVerify) { cnvMode = DC_CNV; } return dcCompDecompData(pService, pSessionDesc, insHandle, pSessionHandle, pSrcBuff, pDestBuff, pResults, pOpData->flushFlag, pOpData, callbackTag, DC_COMPRESSION_REQUEST, CPA_TRUE, cnvMode); } static CpaStatus dcDecompressDataCheck(CpaInstanceHandle insHandle, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, CpaDcFlush flushFlag, Cpa64U *srcBufferSize) { sal_compression_service_t *pService = NULL; dc_session_desc_t *pSessionDesc = NULL; Cpa64U srcBuffSize = 0; pService = (sal_compression_service_t *)insHandle; LAC_CHECK_NULL_PARAM(insHandle); /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(insHandle); /* This check is outside the parameter checking as it is needed to * manage zero length requests */ if (LacBuffDesc_BufferListVerifyNull(pSrcBuff, &srcBuffSize, LAC_NO_ALIGNMENT_SHIFT) != CPA_STATUS_SUCCESS) { LAC_INVALID_PARAM_LOG("Invalid source buffer list parameter"); return CPA_STATUS_INVALID_PARAM; } /* Ensure this is a compression instance */ SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); if (dcCheckSourceData(pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, srcBuffSize, NULL) != CPA_STATUS_SUCCESS) { return CPA_STATUS_INVALID_PARAM; } if (dcCheckDestinationData(pService, pSessionHandle, pDestBuff, DC_DECOMPRESSION_REQUEST) != CPA_STATUS_SUCCESS) { return CPA_STATUS_INVALID_PARAM; } pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); if (CPA_DC_DIR_COMPRESS == pSessionDesc->sessDirection) { LAC_INVALID_PARAM_LOG("Invalid sessDirection value"); return CPA_STATUS_INVALID_PARAM; } *srcBufferSize = srcBuffSize; return CPA_STATUS_SUCCESS; } CpaStatus cpaDcDecompressData(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, CpaDcFlush flushFlag, void *callbackTag) { sal_compression_service_t *pService = NULL; dc_session_desc_t *pSessionDesc = NULL; CpaInstanceHandle insHandle = NULL; Cpa64U srcBuffSize = 0; CpaStatus status = CPA_STATUS_SUCCESS; if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); } else { insHandle = dcInstance; } status = dcDecompressDataCheck(insHandle, pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, &srcBuffSize); if (CPA_STATUS_SUCCESS != status) { return status; } pService = (sal_compression_service_t *)insHandle; /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(insHandle); /* This check is outside the parameter checking as it is needed to * manage zero length requests */ if (CPA_STATUS_SUCCESS != LacBuffDesc_BufferListVerifyNull(pSrcBuff, &srcBuffSize, LAC_NO_ALIGNMENT_SHIFT)) { QAT_UTILS_LOG("Invalid source buffer list parameter"); return CPA_STATUS_INVALID_PARAM; } /* Ensure this is a compression instance */ SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); if (dcCheckSourceData(pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, srcBuffSize, NULL) != CPA_STATUS_SUCCESS) { return CPA_STATUS_INVALID_PARAM; } if (dcCheckDestinationData(pService, pSessionHandle, pDestBuff, DC_DECOMPRESSION_REQUEST) != CPA_STATUS_SUCCESS) { return CPA_STATUS_INVALID_PARAM; } pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); if (CPA_DC_DIR_COMPRESS == pSessionDesc->sessDirection) { QAT_UTILS_LOG("Invalid sessDirection value"); return CPA_STATUS_INVALID_PARAM; } if (CPA_DC_STATEFUL == pSessionDesc->sessState) { /* Lock the session to check if there are in-flight stateful * requests */ LAC_SPINLOCK(&(pSessionDesc->sessionLock)); /* Check if there is already one in-flight stateful request */ if (0 != qatUtilsAtomicGet( &(pSessionDesc->pendingStatefulCbCount))) { LAC_LOG_ERROR( "Only one in-flight stateful request supported"); LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); return CPA_STATUS_RETRY; } /* Gen 4 handle 0 len requests in FW */ if (isDcGen2x(pService)) { if ((0 == srcBuffSize) || ((1 == srcBuffSize) && (CPA_DC_FLUSH_FINAL != flushFlag) && (CPA_DC_FLUSH_FULL != flushFlag))) { if (CPA_TRUE == dcZeroLengthRequests( pService, pSessionDesc, pResults, flushFlag, callbackTag, DC_DECOMPRESSION_REQUEST)) { return CPA_STATUS_SUCCESS; } } } qatUtilsAtomicInc(&(pSessionDesc->pendingStatefulCbCount)); LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); } return dcCompDecompData(pService, pSessionDesc, insHandle, pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, NULL, callbackTag, DC_DECOMPRESSION_REQUEST, CPA_TRUE, DC_NO_CNV); } CpaStatus cpaDcDecompressData2(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcOpData *pOpData, CpaDcRqResults *pResults, void *callbackTag) { sal_compression_service_t *pService = NULL; dc_session_desc_t *pSessionDesc = NULL; CpaInstanceHandle insHandle = NULL; CpaStatus status = CPA_STATUS_SUCCESS; Cpa64U srcBuffSize = 0; LAC_CHECK_NULL_PARAM(pOpData); if (CPA_FALSE == pOpData->integrityCrcCheck) { return cpaDcDecompressData(dcInstance, pSessionHandle, pSrcBuff, pDestBuff, pResults, pOpData->flushFlag, callbackTag); } if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); } else { insHandle = dcInstance; } status = dcDecompressDataCheck(insHandle, pSessionHandle, pSrcBuff, pDestBuff, pResults, pOpData->flushFlag, &srcBuffSize); if (CPA_STATUS_SUCCESS != status) { return status; } pService = (sal_compression_service_t *)insHandle; pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); LAC_CHECK_NULL_PARAM(insHandle); /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(insHandle); /* This check is outside the parameter checking as it is needed to * manage zero length requests */ if (CPA_STATUS_SUCCESS != LacBuffDesc_BufferListVerifyNull(pSrcBuff, &srcBuffSize, LAC_NO_ALIGNMENT_SHIFT)) { QAT_UTILS_LOG("Invalid source buffer list parameter"); return CPA_STATUS_INVALID_PARAM; } /* Ensure this is a compression instance */ SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); if (CPA_STATUS_SUCCESS != dcCheckSourceData(pSessionHandle, pSrcBuff, pDestBuff, pResults, CPA_DC_FLUSH_NONE, srcBuffSize, NULL)) { return CPA_STATUS_INVALID_PARAM; } if (CPA_STATUS_SUCCESS != dcCheckDestinationData(pService, pSessionHandle, pDestBuff, DC_DECOMPRESSION_REQUEST)) { return CPA_STATUS_INVALID_PARAM; } if (CPA_STATUS_SUCCESS != dcCheckOpData(pService, pOpData)) { return CPA_STATUS_INVALID_PARAM; } if (CPA_DC_DIR_COMPRESS == pSessionDesc->sessDirection) { QAT_UTILS_LOG("Invalid sessDirection value"); return CPA_STATUS_INVALID_PARAM; } if (CPA_DC_STATEFUL == pSessionDesc->sessState) { /* Lock the session to check if there are in-flight stateful * requests */ LAC_SPINLOCK(&(pSessionDesc->sessionLock)); /* Check if there is already one in-flight stateful request */ if (0 != qatUtilsAtomicGet( &(pSessionDesc->pendingStatefulCbCount))) { LAC_LOG_ERROR( "Only one in-flight stateful request supported"); LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); return CPA_STATUS_RETRY; } /* Gen 4 handle 0 len requests in FW */ if (isDcGen2x(pService)) { if ((0 == srcBuffSize) || ((1 == srcBuffSize) && (CPA_DC_FLUSH_FINAL != pOpData->flushFlag) && (CPA_DC_FLUSH_FULL != pOpData->flushFlag))) { if (CPA_TRUE == dcZeroLengthRequests( pService, pSessionDesc, pResults, pOpData->flushFlag, callbackTag, DC_DECOMPRESSION_REQUEST)) { return CPA_STATUS_SUCCESS; } } } qatUtilsAtomicInc(&(pSessionDesc->pendingStatefulCbCount)); LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); } return dcCompDecompData(pService, pSessionDesc, insHandle, pSessionHandle, pSrcBuff, pDestBuff, pResults, pOpData->flushFlag, pOpData, callbackTag, DC_DECOMPRESSION_REQUEST, CPA_TRUE, DC_NO_CNV); } diff --git a/sys/dev/qat/qat_api/common/crypto/sym/key/lac_sym_key.c b/sys/dev/qat/qat_api/common/crypto/sym/key/lac_sym_key.c index 965874e7466f..36e0175f988a 100644 --- a/sys/dev/qat/qat_api/common/crypto/sym/key/lac_sym_key.c +++ b/sys/dev/qat/qat_api/common/crypto/sym/key/lac_sym_key.c @@ -1,3006 +1,3014 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2025 Intel Corporation */ /** ***************************************************************************** * @file lac_sym_key.c * * @ingroup LacSymKey * * This file contains the implementation of all keygen functionality * *****************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" #include "cpa_cy_key.h" #include "cpa_cy_im.h" /* ******************************************************************************* * Include private header files ******************************************************************************* */ #include "icp_accel_devices.h" #include "icp_adf_debug.h" #include "icp_adf_init.h" #include "icp_adf_transport.h" #include "qat_utils.h" #include "lac_log.h" #include "lac_hooks.h" #include "lac_sym.h" #include "lac_sym_qat_hash_defs_lookup.h" #include "lac_sym_qat.h" #include "lac_sal.h" #include "lac_sym_key.h" #include "lac_sal_types_crypto.h" #include "sal_service_state.h" #include "lac_sym_qat_key.h" #include "lac_sym_hash_defs.h" #include "sal_statistics.h" /* Number of statistics */ #define LAC_KEY_NUM_STATS (sizeof(CpaCyKeyGenStats64) / sizeof(Cpa64U)) #define LAC_KEY_STAT_INC(statistic, instanceHandle) \ do { \ sal_crypto_service_t *pService = NULL; \ pService = (sal_crypto_service_t *)instanceHandle; \ if (CPA_TRUE == \ pService->generic_service_info.stats \ ->bKeyGenStatsEnabled) { \ qatUtilsAtomicInc( \ &pService \ ->pLacKeyStats[offsetof(CpaCyKeyGenStats64, \ statistic) / \ sizeof(Cpa64U)]); \ } \ } while (0) /**< Macro to increment a Key stat (derives offset into array of atomics) */ #define LAC_KEY_STATS32_GET(keyStats, instanceHandle) \ do { \ int i; \ sal_crypto_service_t *pService = \ (sal_crypto_service_t *)instanceHandle; \ for (i = 0; i < LAC_KEY_NUM_STATS; i++) { \ ((Cpa32U *)&(keyStats))[i] = \ (Cpa32U)qatUtilsAtomicGet( \ &pService->pLacKeyStats[i]); \ } \ } while (0) /**< Macro to get all 32bit Key stats (from internal array of atomics) */ #define LAC_KEY_STATS64_GET(keyStats, instanceHandle) \ do { \ int i; \ sal_crypto_service_t *pService = \ (sal_crypto_service_t *)instanceHandle; \ for (i = 0; i < LAC_KEY_NUM_STATS; i++) { \ ((Cpa64U *)&(keyStats))[i] = \ qatUtilsAtomicGet(&pService->pLacKeyStats[i]); \ } \ } while (0) /**< Macro to get all 64bit Key stats (from internal array of atomics) */ #define IS_HKDF_UNSUPPORTED(cmdId, hkdfSupported) \ ((ICP_QAT_FW_LA_CMD_HKDF_EXTRACT <= cmdId && \ ICP_QAT_FW_LA_CMD_HKDF_EXTRACT_AND_EXPAND_LABEL >= cmdId) && \ !hkdfSupported) /**< macro to check whether the HKDF algorithm can be \ supported on the device */ /* Sublabel for HKDF TLS Key Generation, as defined in RFC8446. */ const static Cpa8U key256[HKDF_SUB_LABEL_KEY_LENGTH] = { 0, 16, 9, 't', 'l', 's', '1', '3', ' ', 'k', 'e', 'y', 0 }; const static Cpa8U key384[HKDF_SUB_LABEL_KEY_LENGTH] = { 0, 32, 9, 't', 'l', 's', '1', '3', ' ', 'k', 'e', 'y', 0 }; const static Cpa8U keyChaChaPoly[HKDF_SUB_LABEL_KEY_LENGTH] = { 0, 32, 9, 't', 'l', 's', '1', '3', ' ', 'k', 'e', 'y', 0 }; /* Sublabel for HKDF TLS IV key Generation, as defined in RFC8446. */ const static Cpa8U iv256[HKDF_SUB_LABEL_IV_LENGTH] = { 0, 12, 8, 't', 'l', 's', '1', '3', ' ', 'i', 'v', 0 }; const static Cpa8U iv384[HKDF_SUB_LABEL_IV_LENGTH] = { 0, 12, 8, 't', 'l', 's', '1', '3', ' ', 'i', 'v', 0 }; /* Sublabel for HKDF TLS RESUMPTION key Generation, as defined in RFC8446. */ const static Cpa8U resumption256[HKDF_SUB_LABEL_RESUMPTION_LENGTH] = { 0, 32, 16, 't', 'l', 's', '1', '3', ' ', 'r', 'e', 's', 'u', 'm', 'p', 't', 'i', 'o', 'n', 0 }; const static Cpa8U resumption384[HKDF_SUB_LABEL_RESUMPTION_LENGTH] = { 0, 48, 16, 't', 'l', 's', '1', '3', ' ', 'r', 'e', 's', 'u', 'm', 'p', 't', 'i', 'o', 'n', 0 }; /* Sublabel for HKDF TLS FINISHED key Generation, as defined in RFC8446. */ const static Cpa8U finished256[HKDF_SUB_LABEL_FINISHED_LENGTH] = { 0, 32, 14, 't', 'l', 's', '1', '3', ' ', 'f', 'i', 'n', 'i', 's', 'h', 'e', 'd', 0 }; const static Cpa8U finished384[HKDF_SUB_LABEL_FINISHED_LENGTH] = { 0, 48, 14, 't', 'l', 's', '1', '3', ' ', 'f', 'i', 'n', 'i', 's', 'h', 'e', 'd', 0 }; /** ****************************************************************************** * @ingroup LacSymKey * SSL/TLS stat type * * @description * This enum determines which stat should be incremented *****************************************************************************/ typedef enum { LAC_KEY_REQUESTS = 0, /**< Key requests sent */ LAC_KEY_REQUEST_ERRORS, /**< Key requests errors */ LAC_KEY_COMPLETED, /**< Key requests which received responses */ LAC_KEY_COMPLETED_ERRORS /**< Key requests which received responses with errors */ } lac_key_stat_type_t; /*** Local functions prototypes ***/ static void LacSymKey_MgfHandleResponse(icp_qat_fw_la_cmd_id_t lacCmdId, void *pOpaqueData, icp_qat_fw_comn_flags cmnRespFlags); static CpaStatus LacSymKey_MgfSync(const CpaInstanceHandle instanceHandle, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, const void *pKeyGenMgfOpData, CpaFlatBuffer *pGeneratedMaskBuffer, CpaBoolean bIsExtRequest); static void LacSymKey_SslTlsHandleResponse(icp_qat_fw_la_cmd_id_t lacCmdId, void *pOpaqueData, icp_qat_fw_comn_flags cmnRespFlags); static CpaStatus LacSymKey_SslTlsSync(CpaInstanceHandle instanceHandle, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, icp_qat_fw_la_cmd_id_t lacCmdId, void *pKeyGenSslTlsOpData, Cpa8U hashAlgorithm, CpaFlatBuffer *pKeyGenOutpuData); /*** Implementation ***/ /** ****************************************************************************** * @ingroup LacSymKey * Get the instance handle. Support single handle. * @param[in] instanceHandle_in user supplied handle. * @retval CpaInstanceHandle the instance handle */ static CpaInstanceHandle LacKey_GetHandle(CpaInstanceHandle instanceHandle_in) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } return instanceHandle; } /** ******************************************************************************* * @ingroup LacSymKey * Perform SSL/TLS key gen operation * * @description * Perform SSL/TLS key gen operation * * @param[in] instanceHandle QAT device handle. * @param[in] pKeyGenCb Pointer to callback function to be invoked * when the operation is complete. * @param[in] pCallbackTag Opaque User Data for this specific call. * @param[in] lacCmdId Lac command ID (identify SSL & TLS ops) * @param[in] pKeyGenSslTlsOpData Structure containing all the data needed to * perform the SSL/TLS key generation * operation. * @param[in] hashAlgorithm Specifies the hash algorithm to use. * According to RFC5246, this should be * "SHA-256 or a stronger standard hash * function." * @param[out] pKeyGenOutputData pointer to where output result should be * written * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Function should be retried. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. * *****************************************************************************/ static CpaStatus LacSymKey_KeyGenSslTls_GenCommon(CpaInstanceHandle instanceHandle, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, icp_qat_fw_la_cmd_id_t lacCmdId, void *pKeyGenSslTlsOpData, Cpa8U hashAlgorithm, CpaFlatBuffer *pKeyGenOutputData); /** ****************************************************************************** * @ingroup LacSymKey * Increment stat for TLS or SSL operation * * @description * This is a generic function to update the stats for either a TLS or SSL * operation. * * @param[in] lacCmdId Indicate SSL or TLS operations * @param[in] statType Statistics Type * @param[in] instanceHandle Instance Handle * * @return None * *****************************************************************************/ static void LacKey_StatsInc(icp_qat_fw_la_cmd_id_t lacCmdId, lac_key_stat_type_t statType, CpaInstanceHandle instanceHandle) { if (ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE == lacCmdId) { switch (statType) { case LAC_KEY_REQUESTS: LAC_KEY_STAT_INC(numSslKeyGenRequests, instanceHandle); break; case LAC_KEY_REQUEST_ERRORS: LAC_KEY_STAT_INC(numSslKeyGenRequestErrors, instanceHandle); break; case LAC_KEY_COMPLETED: LAC_KEY_STAT_INC(numSslKeyGenCompleted, instanceHandle); break; case LAC_KEY_COMPLETED_ERRORS: LAC_KEY_STAT_INC(numSslKeyGenCompletedErrors, instanceHandle); break; default: QAT_UTILS_LOG("Invalid statistics type\n"); break; } } else /* TLS v1.0/1.1 and 1.2 */ { switch (statType) { case LAC_KEY_REQUESTS: LAC_KEY_STAT_INC(numTlsKeyGenRequests, instanceHandle); break; case LAC_KEY_REQUEST_ERRORS: LAC_KEY_STAT_INC(numTlsKeyGenRequestErrors, instanceHandle); break; case LAC_KEY_COMPLETED: LAC_KEY_STAT_INC(numTlsKeyGenCompleted, instanceHandle); break; case LAC_KEY_COMPLETED_ERRORS: LAC_KEY_STAT_INC(numTlsKeyGenCompletedErrors, instanceHandle); break; default: QAT_UTILS_LOG("Invalid statistics type\n"); break; } } } void LacKeygen_StatsShow(CpaInstanceHandle instanceHandle) { CpaCyKeyGenStats64 keyStats = { 0 }; LAC_KEY_STATS64_GET(keyStats, instanceHandle); QAT_UTILS_LOG(SEPARATOR BORDER " Key Stats: " BORDER "\n" SEPARATOR); QAT_UTILS_LOG(BORDER " SSL Key Requests: %16llu " BORDER "\n" BORDER " SSL Key Request Errors: %16llu " BORDER "\n" BORDER " SSL Key Completed %16llu " BORDER "\n" BORDER " SSL Key Complete Errors: %16llu " BORDER "\n" SEPARATOR, (unsigned long long)keyStats.numSslKeyGenRequests, (unsigned long long)keyStats.numSslKeyGenRequestErrors, (unsigned long long)keyStats.numSslKeyGenCompleted, (unsigned long long)keyStats.numSslKeyGenCompletedErrors); QAT_UTILS_LOG(BORDER " TLS Key Requests: %16llu " BORDER "\n" BORDER " TLS Key Request Errors: %16llu " BORDER "\n" BORDER " TLS Key Completed %16llu " BORDER "\n" BORDER " TLS Key Complete Errors: %16llu " BORDER "\n" SEPARATOR, (unsigned long long)keyStats.numTlsKeyGenRequests, (unsigned long long)keyStats.numTlsKeyGenRequestErrors, (unsigned long long)keyStats.numTlsKeyGenCompleted, (unsigned long long)keyStats.numTlsKeyGenCompletedErrors); QAT_UTILS_LOG(BORDER " MGF Key Requests: %16llu " BORDER "\n" BORDER " MGF Key Request Errors: %16llu " BORDER "\n" BORDER " MGF Key Completed %16llu " BORDER "\n" BORDER " MGF Key Complete Errors: %16llu " BORDER "\n" SEPARATOR, (unsigned long long)keyStats.numMgfKeyGenRequests, (unsigned long long)keyStats.numMgfKeyGenRequestErrors, (unsigned long long)keyStats.numMgfKeyGenCompleted, (unsigned long long)keyStats.numMgfKeyGenCompletedErrors); } /** @ingroup LacSymKey */ CpaStatus cpaCyKeyGenQueryStats(CpaInstanceHandle instanceHandle_in, struct _CpaCyKeyGenStats *pSymKeyStats) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_INSTANCE_HANDLE(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); LAC_CHECK_NULL_PARAM(pSymKeyStats); SAL_RUNNING_CHECK(instanceHandle); LAC_KEY_STATS32_GET(*pSymKeyStats, instanceHandle); return CPA_STATUS_SUCCESS; } /** @ingroup LacSymKey */ CpaStatus cpaCyKeyGenQueryStats64(CpaInstanceHandle instanceHandle_in, CpaCyKeyGenStats64 *pSymKeyStats) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_INSTANCE_HANDLE(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); LAC_CHECK_NULL_PARAM(pSymKeyStats); SAL_RUNNING_CHECK(instanceHandle); LAC_KEY_STATS64_GET(*pSymKeyStats, instanceHandle); return CPA_STATUS_SUCCESS; } /** ****************************************************************************** * @ingroup LacSymKey * Return the size of the digest for a specific hash algorithm. * @description * Return the expected digest size based on the sha algorithm submitted. * The only supported value are sha256, sha384 and sha512. * * @param[in] hashAlgorithm either sha256, sha384 or sha512. * @return the expected size or 0 for an invalid hash. * *****************************************************************************/ static Cpa32U getDigestSizeFromHashAlgo(CpaCySymHashAlgorithm hashAlgorithm) { switch (hashAlgorithm) { case CPA_CY_SYM_HASH_SHA256: return LAC_HASH_SHA256_DIGEST_SIZE; case CPA_CY_SYM_HASH_SHA384: return LAC_HASH_SHA384_DIGEST_SIZE; case CPA_CY_SYM_HASH_SHA512: return LAC_HASH_SHA512_DIGEST_SIZE; case CPA_CY_SYM_HASH_SM3: return LAC_HASH_SM3_DIGEST_SIZE; default: return 0; } } /** ****************************************************************************** * @ingroup LacSymKey * Return the hash algorithm for a specific cipher. * @description * Return the hash algorithm related to the cipher suite. * Supported hash's are SHA256, and SHA384. * * @param[in] cipherSuite AES_128_GCM, AES_256_GCM, AES_128_CCM, * and CHACHA20_POLY1305. * @return the expected hash algorithm or 0 for an invalid cipher. * *****************************************************************************/ static CpaCySymHashAlgorithm getHashAlgorithmFromCipherSuiteHKDF(CpaCyKeyHKDFCipherSuite cipherSuite) { switch (cipherSuite) { case CPA_CY_HKDF_TLS_AES_128_GCM_SHA256: /* Fall through */ case CPA_CY_HKDF_TLS_CHACHA20_POLY1305_SHA256: case CPA_CY_HKDF_TLS_AES_128_CCM_SHA256: case CPA_CY_HKDF_TLS_AES_128_CCM_8_SHA256: return CPA_CY_SYM_HASH_SHA256; case CPA_CY_HKDF_TLS_AES_256_GCM_SHA384: return CPA_CY_SYM_HASH_SHA384; default: return 0; } } /** ****************************************************************************** * @ingroup LacSymKey * Return the digest size of cipher. * @description * Return the output key size of specific cipher, for specified sub label * * @param[in] cipherSuite = AES_128_GCM, AES_256_GCM, AES_128_CCM, * and CHACHA20_POLY1305. * subLabels = KEY, IV, RESUMPTION, and FINISHED. * @return the expected digest size of the cipher. * *****************************************************************************/ static const Cpa32U cipherSuiteHKDFHashSizes [LAC_KEY_HKDF_CIPHERS_MAX][LAC_KEY_HKDF_SUBLABELS_MAX] = { {}, /* Not used */ { 32, 16, 12, 32, 32 }, /* AES_128_GCM_SHA256 */ { 48, 32, 12, 48, 48 }, /* AES_256_GCM_SHA384 */ { 32, 32, 12, 32, 32 }, /* CHACHA20_POLY1305_SHA256 */ { 32, 16, 12, 32, 32 }, /* AES_128_CCM_SHA256 */ { 32, 16, 12, 32, 32 } /* AES_128_CCM_8_SHA256 */ }; /** ****************************************************************************** * @ingroup LacSymKey * Key Generation MGF response handler * * @description * Handles Key Generation MGF response messages from the QAT. * * @param[in] lacCmdId Command id of the original request * @param[in] pOpaqueData Pointer to opaque data that was in request * @param[in] cmnRespFlags Indicates whether request succeeded * * @return void * *****************************************************************************/ static void LacSymKey_MgfHandleResponse(icp_qat_fw_la_cmd_id_t lacCmdId, void *pOpaqueData, icp_qat_fw_comn_flags cmnRespFlags) { CpaCyKeyGenMgfOpData *pMgfOpData = NULL; lac_sym_key_cookie_t *pCookie = NULL; CpaCyGenFlatBufCbFunc pKeyGenMgfCb = NULL; void *pCallbackTag = NULL; CpaFlatBuffer *pGeneratedKeyBuffer = NULL; CpaStatus status = CPA_STATUS_SUCCESS; CpaBoolean respStatusOk = (ICP_QAT_FW_COMN_STATUS_FLAG_OK == ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(cmnRespFlags)) ? CPA_TRUE : CPA_FALSE; pCookie = (lac_sym_key_cookie_t *)pOpaqueData; if (CPA_TRUE == respStatusOk) { status = CPA_STATUS_SUCCESS; LAC_KEY_STAT_INC(numMgfKeyGenCompleted, pCookie->instanceHandle); } else { status = CPA_STATUS_FAIL; LAC_KEY_STAT_INC(numMgfKeyGenCompletedErrors, pCookie->instanceHandle); } pKeyGenMgfCb = (CpaCyGenFlatBufCbFunc)(pCookie->pKeyGenCb); pMgfOpData = pCookie->pKeyGenOpData; pCallbackTag = pCookie->pCallbackTag; pGeneratedKeyBuffer = pCookie->pKeyGenOutputData; Lac_MemPoolEntryFree(pCookie); (*pKeyGenMgfCb)(pCallbackTag, status, pMgfOpData, pGeneratedKeyBuffer); } /** ****************************************************************************** * @ingroup LacSymKey * Synchronous mode of operation wrapper function * * @description * Wrapper function to implement synchronous mode of operation for * cpaCyKeyGenMgf and cpaCyKeyGenMgfExt function. * * @param[in] instanceHandle Instance handle * @param[in] pKeyGenCb Internal callback function pointer * @param[in] pCallbackTag Callback tag * @param[in] pKeyGenMgfOpData Pointer to user provided Op Data structure * @param[in] pGeneratedMaskBuffer Pointer to a buffer where generated mask * will be stored * @param[in] bIsExtRequest Indicates origin of function call; * if CPA_TRUE then the call comes from * cpaCyKeyGenMgfExt function, otherwise * from cpaCyKeyGenMgf * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Function should be retried. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. * *****************************************************************************/ static CpaStatus LacSymKey_MgfSync(const CpaInstanceHandle instanceHandle, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, const void *pKeyGenMgfOpData, CpaFlatBuffer *pGeneratedMaskBuffer, CpaBoolean bIsExtRequest) { CpaStatus status = CPA_STATUS_SUCCESS; lac_sync_op_data_t *pSyncCallbackData = NULL; status = LacSync_CreateSyncCookie(&pSyncCallbackData); if (CPA_STATUS_SUCCESS == status) { if (CPA_TRUE == bIsExtRequest) { status = cpaCyKeyGenMgfExt( instanceHandle, LacSync_GenFlatBufCb, pSyncCallbackData, (const CpaCyKeyGenMgfOpDataExt *)pKeyGenMgfOpData, pGeneratedMaskBuffer); } else { status = cpaCyKeyGenMgf(instanceHandle, LacSync_GenFlatBufCb, pSyncCallbackData, (const CpaCyKeyGenMgfOpData *) pKeyGenMgfOpData, pGeneratedMaskBuffer); } } else { /* Failure allocating sync cookie */ LAC_KEY_STAT_INC(numMgfKeyGenRequestErrors, instanceHandle); return status; } if (CPA_STATUS_SUCCESS == status) { CpaStatus syncStatus = CPA_STATUS_SUCCESS; syncStatus = LacSync_WaitForCallback(pSyncCallbackData, LAC_SYM_SYNC_CALLBACK_TIMEOUT, &status, NULL); /* If callback doesn't come back */ if (CPA_STATUS_SUCCESS != syncStatus) { LAC_KEY_STAT_INC(numMgfKeyGenCompletedErrors, instanceHandle); LAC_LOG_ERROR("Callback timed out"); status = syncStatus; } } else { /* As the Request was not sent the Callback will never * be called, so need to indicate that we're finished * with cookie so it can be destroyed. */ LacSync_SetSyncCookieComplete(pSyncCallbackData); } LacSync_DestroySyncCookie(&pSyncCallbackData); return status; } /** ****************************************************************************** * @ingroup LacSymKey * Perform MGF key gen operation * * @description * This function performs MGF key gen operation. It is common for requests * coming from both cpaCyKeyGenMgf and cpaCyKeyGenMgfExt QAT API * functions. * * @param[in] instanceHandle Instance handle * @param[in] pKeyGenCb Pointer to callback function to be invoked * when the operation is complete. * @param[in] pCallbackTag Opaque User Data for this specific call. * @param[in] pOpData Pointer to the Op Data structure provided by * the user in API function call. For calls * originating from cpaCyKeyGenMgfExt it will * point to CpaCyKeyGenMgfOpDataExt type of * structure while for calls originating from * cpaCyKeyGenMgf it will point to * CpaCyKeyGenMgfOpData type of structure. * @param[in] pKeyGenMgfOpData Pointer to the user provided * CpaCyKeyGenMgfOpData structure. For calls * originating from cpaCyKeyGenMgf it will * point to the same structure as pOpData * parameter; for calls originating from * cpaCyKeyGenMgfExt it will point to the * baseOpData member of the * CpaCyKeyGenMgfOpDataExt structure passed in * as a parameter to the API function call. * @param[in] pGeneratedMaskBuffer Pointer to a buffer where generated mask * will be stored * @param[in] hashAlgorithm Indicates which hash algorithm is to be used * to perform MGF key gen operation. For calls * originating from cpaCyKeyGenMgf it will * always be CPA_CY_SYM_HASH_SHA1. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Function should be retried. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. * *****************************************************************************/ static CpaStatus LacSymKey_MgfCommon(const CpaInstanceHandle instanceHandle, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, const void *pOpData, const CpaCyKeyGenMgfOpData *pKeyGenMgfOpData, CpaFlatBuffer *pGeneratedMaskBuffer, CpaCySymHashAlgorithm hashAlgorithm) { CpaStatus status = CPA_STATUS_SUCCESS; icp_qat_fw_la_bulk_req_t keyGenReq = { { 0 } }; icp_qat_la_bulk_req_hdr_t keyGenReqHdr = { { 0 } }; icp_qat_fw_la_key_gen_common_t keyGenReqMid = { { 0 } }; icp_qat_la_bulk_req_ftr_t keyGenReqFtr = { { { 0 } } }; Cpa8U *pMsgDummy = NULL; Cpa8U *pCacheDummyHdr = NULL; Cpa8U *pCacheDummyMid = NULL; Cpa8U *pCacheDummyFtr = NULL; sal_qat_content_desc_info_t contentDescInfo = { 0 }; lac_sym_key_cookie_t *pCookie = NULL; lac_sym_cookie_t *pSymCookie = NULL; sal_crypto_service_t *pService = NULL; Cpa64U inputPhysAddr = 0; Cpa64U outputPhysAddr = 0; /* Structure initializer is supported by C99, but it is * not supported by some former Intel compiler. */ CpaCySymHashSetupData hashSetupData = { 0 }; Cpa32U hashBlkSizeInBytes = 0; lac_sym_qat_hash_alg_info_t *pHashAlgInfo = NULL; icp_qat_fw_serv_specif_flags laCmdFlags = 0; icp_qat_fw_comn_flags cmnRequestFlags = ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, QAT_COMN_CD_FLD_TYPE_64BIT_ADR); pService = (sal_crypto_service_t *)instanceHandle; LAC_CHECK_INSTANCE_HANDLE(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); SAL_RUNNING_CHECK(instanceHandle); LAC_CHECK_NULL_PARAM(pOpData); LAC_CHECK_NULL_PARAM(pKeyGenMgfOpData); LAC_CHECK_NULL_PARAM(pGeneratedMaskBuffer); LAC_CHECK_NULL_PARAM(pGeneratedMaskBuffer->pData); LAC_CHECK_NULL_PARAM(pKeyGenMgfOpData->seedBuffer.pData); /* Maximum seed length for MGF1 request */ if (pKeyGenMgfOpData->seedBuffer.dataLenInBytes > ICP_QAT_FW_LA_MGF_SEED_LEN_MAX) { LAC_INVALID_PARAM_LOG("seedBuffer.dataLenInBytes"); return CPA_STATUS_INVALID_PARAM; } /* Maximum mask length for MGF1 request */ if (pKeyGenMgfOpData->maskLenInBytes > ICP_QAT_FW_LA_MGF_MASK_LEN_MAX) { LAC_INVALID_PARAM_LOG("maskLenInBytes"); return CPA_STATUS_INVALID_PARAM; } /* check for enough space in the flat buffer */ if (pKeyGenMgfOpData->maskLenInBytes > pGeneratedMaskBuffer->dataLenInBytes) { LAC_INVALID_PARAM_LOG("pGeneratedMaskBuffer.dataLenInBytes"); return CPA_STATUS_INVALID_PARAM; } /* Get hash alg info */ LacSymQat_HashAlgLookupGet(instanceHandle, hashAlgorithm, &pHashAlgInfo); /* Allocate the cookie */ pCookie = (lac_sym_key_cookie_t *)Lac_MemPoolEntryAlloc( pService->lac_sym_cookie_pool); if (NULL == pCookie) { LAC_LOG_ERROR("Cannot get mem pool entry"); status = CPA_STATUS_RESOURCE; } else if ((void *)CPA_STATUS_RETRY == pCookie) { pCookie = NULL; status = CPA_STATUS_RETRY; } else { pSymCookie = (lac_sym_cookie_t *)pCookie; } if (CPA_STATUS_SUCCESS == status) { /* populate the cookie */ pCookie->instanceHandle = instanceHandle; pCookie->pCallbackTag = pCallbackTag; pCookie->pKeyGenOpData = (void *)LAC_CONST_PTR_CAST(pOpData); pCookie->pKeyGenCb = pKeyGenCb; pCookie->pKeyGenOutputData = pGeneratedMaskBuffer; hashSetupData.hashAlgorithm = hashAlgorithm; hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN; hashSetupData.digestResultLenInBytes = pHashAlgInfo->digestLength; /* Populate the CD ctrl Block (LW 27 - LW 31) * and the CD Hash HW setup block */ LacSymQat_HashContentDescInit( &(keyGenReqFtr), instanceHandle, &hashSetupData, /* point to base of hw setup block */ (Cpa8U *)pCookie->contentDesc, LAC_SYM_KEY_NO_HASH_BLK_OFFSET_QW, ICP_QAT_FW_SLICE_DRAM_WR, ICP_QAT_HW_AUTH_MODE0, /* just a plain hash */ CPA_FALSE, /* Not using sym Constants Table in Shared SRAM */ CPA_FALSE, /* not using the optimised Content Desc */ CPA_FALSE, /* Not using the stateful SHA3 Content Desc */ NULL, &hashBlkSizeInBytes); /* Populate the Req param LW 14-26 */ LacSymQat_KeyMgfRequestPopulate( &keyGenReqHdr, &keyGenReqMid, pKeyGenMgfOpData->seedBuffer.dataLenInBytes, pKeyGenMgfOpData->maskLenInBytes, (Cpa8U)pHashAlgInfo->digestLength); contentDescInfo.pData = pCookie->contentDesc; contentDescInfo.hardwareSetupBlockPhys = LAC_MEM_CAST_PTR_TO_UINT64( pSymCookie->keyContentDescPhyAddr); contentDescInfo.hwBlkSzQuadWords = LAC_BYTES_TO_QUADWORDS(hashBlkSizeInBytes); /* Populate common request fields */ inputPhysAddr = LAC_MEM_CAST_PTR_TO_UINT64(LAC_OS_VIRT_TO_PHYS_EXTERNAL( pService->generic_service_info, pKeyGenMgfOpData->seedBuffer.pData)); if (inputPhysAddr == 0) { LAC_LOG_ERROR( "Unable to get the seed buffer physical address"); status = CPA_STATUS_FAIL; } outputPhysAddr = LAC_MEM_CAST_PTR_TO_UINT64( LAC_OS_VIRT_TO_PHYS_EXTERNAL(pService->generic_service_info, pGeneratedMaskBuffer->pData)); if (outputPhysAddr == 0) { LAC_LOG_ERROR( "Unable to get the physical address of the mask"); status = CPA_STATUS_FAIL; } } if (CPA_STATUS_SUCCESS == status) { /* Make up the full keyGenReq struct from its constituents */ pMsgDummy = (Cpa8U *)&(keyGenReq); pCacheDummyHdr = (Cpa8U *)&(keyGenReqHdr); pCacheDummyMid = (Cpa8U *)&(keyGenReqMid); pCacheDummyFtr = (Cpa8U *)&(keyGenReqFtr); memcpy(pMsgDummy, pCacheDummyHdr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_HDR_IN_LW)); memset((pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_HDR_IN_LW)), 0, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_TO_CLEAR_IN_LW)); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_MID_IN_LW), pCacheDummyMid, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_MID_IN_LW)); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_FTR_IN_LW), pCacheDummyFtr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_FTR_IN_LW)); SalQatMsg_ContentDescHdrWrite((icp_qat_fw_comn_req_t *)&( keyGenReq), &(contentDescInfo)); SalQatMsg_CmnHdrWrite((icp_qat_fw_comn_req_t *)&keyGenReq, ICP_QAT_FW_COMN_REQ_CPM_FW_LA, ICP_QAT_FW_LA_CMD_MGF1, cmnRequestFlags, laCmdFlags); /* * MGF uses a flat buffer but we can use zero for source and * dest length because the firmware will use the seed length, * hash length and mask length to find source length. */ SalQatMsg_CmnMidWrite((icp_qat_fw_la_bulk_req_t *)&(keyGenReq), pCookie, LAC_SYM_KEY_QAT_PTR_TYPE, inputPhysAddr, outputPhysAddr, 0, 0); /* Send to QAT */ status = icp_adf_transPutMsg(pService->trans_handle_sym_tx, (void *)&(keyGenReq), LAC_QAT_SYM_REQ_SZ_LW); } if (CPA_STATUS_SUCCESS == status) { /* Update stats */ LAC_KEY_STAT_INC(numMgfKeyGenRequests, instanceHandle); } else { LAC_KEY_STAT_INC(numMgfKeyGenRequestErrors, instanceHandle); /* clean up memory */ if (NULL != pCookie) { Lac_MemPoolEntryFree(pCookie); } } return status; } /** * cpaCyKeyGenMgf */ CpaStatus cpaCyKeyGenMgf(const CpaInstanceHandle instanceHandle_in, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, const CpaCyKeyGenMgfOpData *pKeyGenMgfOpData, CpaFlatBuffer *pGeneratedMaskBuffer) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } /* If synchronous Operation */ if (NULL == pKeyGenCb) { return LacSymKey_MgfSync(instanceHandle, pKeyGenCb, pCallbackTag, (const void *)pKeyGenMgfOpData, pGeneratedMaskBuffer, CPA_FALSE); } /* Asynchronous Operation */ return LacSymKey_MgfCommon(instanceHandle, pKeyGenCb, pCallbackTag, (const void *)pKeyGenMgfOpData, pKeyGenMgfOpData, pGeneratedMaskBuffer, CPA_CY_SYM_HASH_SHA1); } /** * cpaCyKeyGenMgfExt */ CpaStatus cpaCyKeyGenMgfExt(const CpaInstanceHandle instanceHandle_in, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, const CpaCyKeyGenMgfOpDataExt *pKeyGenMgfOpDataExt, CpaFlatBuffer *pGeneratedMaskBuffer) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } /* If synchronous Operation */ if (NULL == pKeyGenCb) { return LacSymKey_MgfSync(instanceHandle, pKeyGenCb, pCallbackTag, (const void *)pKeyGenMgfOpDataExt, pGeneratedMaskBuffer, CPA_TRUE); } /* Param check specific for Ext function, rest of parameters validated * in LacSymKey_MgfCommon */ LAC_CHECK_NULL_PARAM(pKeyGenMgfOpDataExt); if (CPA_CY_SYM_HASH_MD5 > pKeyGenMgfOpDataExt->hashAlgorithm || CPA_CY_SYM_HASH_SHA512 < pKeyGenMgfOpDataExt->hashAlgorithm) { LAC_INVALID_PARAM_LOG("hashAlgorithm"); return CPA_STATUS_INVALID_PARAM; } /* Asynchronous Operation */ return LacSymKey_MgfCommon(instanceHandle, pKeyGenCb, pCallbackTag, (const void *)pKeyGenMgfOpDataExt, &pKeyGenMgfOpDataExt->baseOpData, pGeneratedMaskBuffer, pKeyGenMgfOpDataExt->hashAlgorithm); } /** ****************************************************************************** * @ingroup LacSymKey * Key Generation SSL & TLS response handler * * @description * Handles Key Generation SSL & TLS response messages from the QAT. * * @param[in] lacCmdId Command id of the original request * @param[in] pOpaqueData Pointer to opaque data that was in request * @param[in] cmnRespFlags LA response flags * * @return void * *****************************************************************************/ static void LacSymKey_SslTlsHandleResponse(icp_qat_fw_la_cmd_id_t lacCmdId, void *pOpaqueData, icp_qat_fw_comn_flags cmnRespFlags) { void *pSslTlsOpData = NULL; CpaCyGenFlatBufCbFunc pKeyGenSslTlsCb = NULL; lac_sym_key_cookie_t *pCookie = NULL; void *pCallbackTag = NULL; CpaFlatBuffer *pGeneratedKeyBuffer = NULL; CpaStatus status = CPA_STATUS_SUCCESS; CpaBoolean respStatusOk = (ICP_QAT_FW_COMN_STATUS_FLAG_OK == ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(cmnRespFlags)) ? CPA_TRUE : CPA_FALSE; pCookie = (lac_sym_key_cookie_t *)pOpaqueData; pSslTlsOpData = pCookie->pKeyGenOpData; if (CPA_TRUE == respStatusOk) { LacKey_StatsInc(lacCmdId, LAC_KEY_COMPLETED, pCookie->instanceHandle); } else { status = CPA_STATUS_FAIL; LacKey_StatsInc(lacCmdId, LAC_KEY_COMPLETED_ERRORS, pCookie->instanceHandle); } pKeyGenSslTlsCb = (CpaCyGenFlatBufCbFunc)(pCookie->pKeyGenCb); pCallbackTag = pCookie->pCallbackTag; pGeneratedKeyBuffer = pCookie->pKeyGenOutputData; Lac_MemPoolEntryFree(pCookie); (*pKeyGenSslTlsCb)(pCallbackTag, status, pSslTlsOpData, pGeneratedKeyBuffer); } /** ******************************************************************************* * @ingroup LacSymKey * Synchronous mode of operation function wrapper for performing SSL/TLS * key gen operation * * @description * Synchronous mode of operation function wrapper for performing SSL/TLS * key gen operation * * @param[in] instanceHandle QAT device handle. * @param[in] pKeyGenCb Pointer to callback function to be invoked * when the operation is complete. * @param[in] pCallbackTag Opaque User Data for this specific call. * @param[in] lacCmdId Lac command ID (identify SSL & TLS ops) * @param[in] pKeyGenSslTlsOpData Structure containing all the data needed to * perform the SSL/TLS key generation * operation. * @param[in] hashAlgorithm Specifies the hash algorithm to use. * According to RFC5246, this should be * "SHA-256 or a stronger standard hash * function." * @param[out] pKeyGenOutputData pointer to where output result should be * written * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Function should be retried. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. * *****************************************************************************/ static CpaStatus LacSymKey_SslTlsSync(CpaInstanceHandle instanceHandle, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, icp_qat_fw_la_cmd_id_t lacCmdId, void *pKeyGenSslTlsOpData, Cpa8U hashAlgorithm, CpaFlatBuffer *pKeyGenOutpuData) { lac_sync_op_data_t *pSyncCallbackData = NULL; CpaStatus status = CPA_STATUS_SUCCESS; status = LacSync_CreateSyncCookie(&pSyncCallbackData); if (CPA_STATUS_SUCCESS == status) { status = LacSymKey_KeyGenSslTls_GenCommon(instanceHandle, pKeyGenCb, pSyncCallbackData, lacCmdId, pKeyGenSslTlsOpData, hashAlgorithm, pKeyGenOutpuData); } else { /* Failure allocating sync cookie */ LacKey_StatsInc(lacCmdId, LAC_KEY_REQUEST_ERRORS, instanceHandle); return status; } if (CPA_STATUS_SUCCESS == status) { CpaStatus syncStatus = CPA_STATUS_SUCCESS; syncStatus = LacSync_WaitForCallback(pSyncCallbackData, LAC_SYM_SYNC_CALLBACK_TIMEOUT, &status, NULL); /* If callback doesn't come back */ if (CPA_STATUS_SUCCESS != syncStatus) { LacKey_StatsInc(lacCmdId, LAC_KEY_COMPLETED_ERRORS, instanceHandle); LAC_LOG_ERROR("Callback timed out"); status = syncStatus; } } else { /* As the Request was not sent the Callback will never * be called, so need to indicate that we're finished * with cookie so it can be destroyed. */ LacSync_SetSyncCookieComplete(pSyncCallbackData); } LacSync_DestroySyncCookie(&pSyncCallbackData); return status; } static CpaStatus computeHashKey(CpaFlatBuffer *secret, CpaFlatBuffer *hash, CpaCySymHashAlgorithm *hashAlgorithm) { CpaStatus status = CPA_STATUS_SUCCESS; switch (*hashAlgorithm) { case CPA_CY_SYM_HASH_MD5: status = qatUtilsHashMD5Full(secret->pData, hash->pData, secret->dataLenInBytes); break; case CPA_CY_SYM_HASH_SHA1: status = qatUtilsHashSHA1Full(secret->pData, hash->pData, secret->dataLenInBytes); break; case CPA_CY_SYM_HASH_SHA256: status = qatUtilsHashSHA256Full(secret->pData, hash->pData, secret->dataLenInBytes); break; case CPA_CY_SYM_HASH_SHA384: status = qatUtilsHashSHA384Full(secret->pData, hash->pData, secret->dataLenInBytes); break; case CPA_CY_SYM_HASH_SHA512: status = qatUtilsHashSHA512Full(secret->pData, hash->pData, secret->dataLenInBytes); break; default: status = CPA_STATUS_FAIL; } return status; } static CpaStatus LacSymKey_KeyGenSslTls_GenCommon(CpaInstanceHandle instanceHandle, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, icp_qat_fw_la_cmd_id_t lacCmdId, void *pKeyGenSslTlsOpData, Cpa8U hashAlgCipher, CpaFlatBuffer *pKeyGenOutputData) { CpaStatus status = CPA_STATUS_SUCCESS; CpaBoolean precompute = CPA_FALSE; icp_qat_fw_la_bulk_req_t keyGenReq = { { 0 } }; icp_qat_la_bulk_req_hdr_t keyGenReqHdr = { { 0 } }; icp_qat_fw_la_key_gen_common_t keyGenReqMid = { { 0 } }; icp_qat_la_bulk_req_ftr_t keyGenReqFtr = { { { 0 } } }; Cpa8U *pMsgDummy = NULL; Cpa8U *pCacheDummyHdr = NULL; Cpa8U *pCacheDummyMid = NULL; Cpa8U *pCacheDummyFtr = NULL; lac_sym_key_cookie_t *pCookie = NULL; lac_sym_cookie_t *pSymCookie = NULL; Cpa64U inputPhysAddr = 0; Cpa64U outputPhysAddr = 0; /* Structure initializer is supported by C99, but it is * not supported by some former Intel compiler. */ CpaCySymHashSetupData hashSetupData = { 0 }; sal_qat_content_desc_info_t contentDescInfo = { 0 }; Cpa32U hashBlkSizeInBytes = 0; Cpa32U tlsPrefixLen = 0; CpaFlatBuffer inputSecret = { 0 }; CpaFlatBuffer hashKeyOutput = { 0 }; Cpa32U uSecretLen = 0; CpaCySymHashNestedModeSetupData *pNestedModeSetupData = &(hashSetupData.nestedModeSetupData); icp_qat_fw_serv_specif_flags laCmdFlags = 0; icp_qat_fw_comn_flags cmnRequestFlags = ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, QAT_COMN_CD_FLD_TYPE_64BIT_ADR); sal_crypto_service_t *pService = (sal_crypto_service_t *)instanceHandle; /* If synchronous Operation */ if (NULL == pKeyGenCb) { return LacSymKey_SslTlsSync(instanceHandle, LacSync_GenFlatBufCb, pCallbackTag, lacCmdId, pKeyGenSslTlsOpData, hashAlgCipher, pKeyGenOutputData); } /* Allocate the cookie */ pCookie = (lac_sym_key_cookie_t *)Lac_MemPoolEntryAlloc( pService->lac_sym_cookie_pool); if (NULL == pCookie) { LAC_LOG_ERROR("Cannot get mem pool entry"); status = CPA_STATUS_RESOURCE; } else if ((void *)CPA_STATUS_RETRY == pCookie) { pCookie = NULL; status = CPA_STATUS_RETRY; } else { pSymCookie = (lac_sym_cookie_t *)pCookie; } if (CPA_STATUS_SUCCESS == status) { icp_qat_hw_auth_mode_t qatHashMode = 0; if (ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE == lacCmdId) { qatHashMode = ICP_QAT_HW_AUTH_MODE0; } else /* TLS v1.1, v1.2, v1.3 */ { qatHashMode = ICP_QAT_HW_AUTH_MODE2; } pCookie->instanceHandle = pService; pCookie->pCallbackTag = pCallbackTag; pCookie->pKeyGenCb = pKeyGenCb; pCookie->pKeyGenOpData = pKeyGenSslTlsOpData; pCookie->pKeyGenOutputData = pKeyGenOutputData; hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_NESTED; /* SSL3 */ if (ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE == lacCmdId) { hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1; hashSetupData.digestResultLenInBytes = LAC_HASH_MD5_DIGEST_SIZE; pNestedModeSetupData->outerHashAlgorithm = CPA_CY_SYM_HASH_MD5; pNestedModeSetupData->pInnerPrefixData = NULL; pNestedModeSetupData->innerPrefixLenInBytes = 0; pNestedModeSetupData->pOuterPrefixData = NULL; pNestedModeSetupData->outerPrefixLenInBytes = 0; } /* TLS v1.1 */ else if (ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE == lacCmdId) { CpaCyKeyGenTlsOpData *pKeyGenTlsOpData = (CpaCyKeyGenTlsOpData *)pKeyGenSslTlsOpData; hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1; hashSetupData.digestResultLenInBytes = LAC_HASH_MD5_DIGEST_SIZE; pNestedModeSetupData->outerHashAlgorithm = CPA_CY_SYM_HASH_MD5; uSecretLen = pKeyGenTlsOpData->secret.dataLenInBytes; /* We want to handle pre_master_secret > 128 bytes * therefore we * only verify if the current operation is Master Secret * Derive. * The other operations remain unchanged. */ if ((uSecretLen > ICP_QAT_FW_LA_TLS_V1_1_SECRET_LEN_MAX) && (CPA_CY_KEY_TLS_OP_MASTER_SECRET_DERIVE == pKeyGenTlsOpData->tlsOp || CPA_CY_KEY_TLS_OP_USER_DEFINED == pKeyGenTlsOpData->tlsOp)) { CpaCySymHashAlgorithm hashAlgorithm = (CpaCySymHashAlgorithm)hashAlgCipher; /* secret = [s1 | s2 ] * s1 = outer prefix, s2 = inner prefix * length of s1 and s2 = ceil(secret_length / 2) * (secret length + 1)/2 will always give the * ceil as * division by 2 * (>>1) will give the smallest integral value * not less than * arg */ tlsPrefixLen = (pKeyGenTlsOpData->secret.dataLenInBytes + 1) >> 1; inputSecret.dataLenInBytes = tlsPrefixLen; inputSecret.pData = pKeyGenTlsOpData->secret.pData; /* Since the pre_master_secret is > 128, we * split the input * pre_master_secret in 2 halves and compute the * MD5 of the * first half and the SHA1 on the second half. */ hashAlgorithm = CPA_CY_SYM_HASH_MD5; /* Initialize pointer where MD5 key will go. */ hashKeyOutput.pData = &pCookie->hashKeyBuffer[0]; hashKeyOutput.dataLenInBytes = LAC_HASH_MD5_DIGEST_SIZE; computeHashKey(&inputSecret, &hashKeyOutput, &hashAlgorithm); pNestedModeSetupData->pOuterPrefixData = &pCookie->hashKeyBuffer[0]; pNestedModeSetupData->outerPrefixLenInBytes = LAC_HASH_MD5_DIGEST_SIZE; /* Point to the second half of the * pre_master_secret */ inputSecret.pData = pKeyGenTlsOpData->secret.pData + (pKeyGenTlsOpData->secret.dataLenInBytes - tlsPrefixLen); /* Compute SHA1 on the second half of the * pre_master_secret */ hashAlgorithm = CPA_CY_SYM_HASH_SHA1; /* Initialize pointer where SHA1 key will go. */ hashKeyOutput.pData = &pCookie->hashKeyBuffer [LAC_HASH_MD5_DIGEST_SIZE]; hashKeyOutput.dataLenInBytes = LAC_HASH_SHA1_DIGEST_SIZE; computeHashKey(&inputSecret, &hashKeyOutput, &hashAlgorithm); pNestedModeSetupData->pInnerPrefixData = &pCookie->hashKeyBuffer [LAC_HASH_MD5_DIGEST_SIZE]; pNestedModeSetupData->innerPrefixLenInBytes = LAC_HASH_SHA1_DIGEST_SIZE; } else { /* secret = [s1 | s2 ] * s1 = outer prefix, s2 = inner prefix * length of s1 and s2 = ceil(secret_length / 2) * (secret length + 1)/2 will always give the * ceil as * division by 2 * (>>1) will give the smallest integral value * not less than * arg */ tlsPrefixLen = (pKeyGenTlsOpData->secret.dataLenInBytes + 1) >> 1; /* last byte of s1 will be first byte of s2 if * Length is odd */ pNestedModeSetupData->pInnerPrefixData = pKeyGenTlsOpData->secret.pData + (pKeyGenTlsOpData->secret.dataLenInBytes - tlsPrefixLen); pNestedModeSetupData->pOuterPrefixData = pKeyGenTlsOpData->secret.pData; pNestedModeSetupData->innerPrefixLenInBytes = pNestedModeSetupData ->outerPrefixLenInBytes = tlsPrefixLen; } } /* TLS v1.2 */ else if (ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE == lacCmdId) { CpaCyKeyGenTlsOpData *pKeyGenTlsOpData = (CpaCyKeyGenTlsOpData *)pKeyGenSslTlsOpData; CpaCySymHashAlgorithm hashAlgorithm = (CpaCySymHashAlgorithm)hashAlgCipher; uSecretLen = pKeyGenTlsOpData->secret.dataLenInBytes; hashSetupData.hashAlgorithm = (CpaCySymHashAlgorithm)hashAlgorithm; hashSetupData.digestResultLenInBytes = (Cpa32U)getDigestSizeFromHashAlgo(hashAlgorithm); pNestedModeSetupData->outerHashAlgorithm = (CpaCySymHashAlgorithm)hashAlgorithm; if (CPA_CY_KEY_TLS_OP_MASTER_SECRET_DERIVE == pKeyGenTlsOpData->tlsOp || CPA_CY_KEY_TLS_OP_USER_DEFINED == pKeyGenTlsOpData->tlsOp) { switch (hashAlgorithm) { case CPA_CY_SYM_HASH_SM3: precompute = CPA_FALSE; break; case CPA_CY_SYM_HASH_SHA256: if (uSecretLen > ICP_QAT_FW_LA_TLS_V1_2_SECRET_LEN_MAX) { precompute = CPA_TRUE; } break; case CPA_CY_SYM_HASH_SHA384: case CPA_CY_SYM_HASH_SHA512: if (uSecretLen > ICP_QAT_FW_LA_TLS_SECRET_LEN_MAX) { precompute = CPA_TRUE; } break; default: break; } } if (CPA_TRUE == precompute) { /* Case when secret > algorithm block size * RFC 4868: For SHA-256 Block size is 512 bits, * for SHA-384 * and SHA-512 Block size is 1024 bits * Initialize pointer * where SHAxxx key will go. */ hashKeyOutput.pData = &pCookie->hashKeyBuffer[0]; hashKeyOutput.dataLenInBytes = hashSetupData.digestResultLenInBytes; computeHashKey(&pKeyGenTlsOpData->secret, &hashKeyOutput, &hashSetupData.hashAlgorithm); /* Outer prefix = secret , inner prefix = secret * secret < 64 bytes */ pNestedModeSetupData->pInnerPrefixData = hashKeyOutput.pData; pNestedModeSetupData->pOuterPrefixData = hashKeyOutput.pData; pNestedModeSetupData->innerPrefixLenInBytes = hashKeyOutput.dataLenInBytes; pNestedModeSetupData->outerPrefixLenInBytes = hashKeyOutput.dataLenInBytes; } else { /* Outer prefix = secret , inner prefix = secret * secret <= 64 bytes */ pNestedModeSetupData->pInnerPrefixData = pKeyGenTlsOpData->secret.pData; pNestedModeSetupData->pOuterPrefixData = pKeyGenTlsOpData->secret.pData; pNestedModeSetupData->innerPrefixLenInBytes = pKeyGenTlsOpData->secret.dataLenInBytes; pNestedModeSetupData->outerPrefixLenInBytes = pKeyGenTlsOpData->secret.dataLenInBytes; } } /* TLS v1.3 */ else if ((ICP_QAT_FW_LA_CMD_HKDF_EXTRACT <= lacCmdId) && (ICP_QAT_FW_LA_CMD_HKDF_EXTRACT_AND_EXPAND_LABEL >= lacCmdId)) { CpaCyKeyGenHKDFOpData *pKeyGenTlsOpData = (CpaCyKeyGenHKDFOpData *)pKeyGenSslTlsOpData; CpaCySymHashAlgorithm hashAlgorithm = getHashAlgorithmFromCipherSuiteHKDF(hashAlgCipher); /* Set HASH data */ hashSetupData.hashAlgorithm = hashAlgorithm; /* Calculate digest length from the HASH type */ hashSetupData.digestResultLenInBytes = cipherSuiteHKDFHashSizes[hashAlgCipher] [LAC_KEY_HKDF_DIGESTS]; /* Outer Hash type is the same as inner hash type */ pNestedModeSetupData->outerHashAlgorithm = hashAlgorithm; /* EXPAND (PRK): * Outer prefix = secret, inner prefix = secret * EXTRACT (SEED/SALT): * Outer prefix = seed, inner prefix = seed * Secret <= 64 Bytes * We do not pre compute as secret can't be larger than * 64 bytes */ if ((ICP_QAT_FW_LA_CMD_HKDF_EXPAND == lacCmdId) || (ICP_QAT_FW_LA_CMD_HKDF_EXPAND_LABEL == lacCmdId)) { pNestedModeSetupData->pInnerPrefixData = pKeyGenTlsOpData->secret; pNestedModeSetupData->pOuterPrefixData = pKeyGenTlsOpData->secret; pNestedModeSetupData->innerPrefixLenInBytes = pKeyGenTlsOpData->secretLen; pNestedModeSetupData->outerPrefixLenInBytes = pKeyGenTlsOpData->secretLen; } else { pNestedModeSetupData->pInnerPrefixData = pKeyGenTlsOpData->seed; pNestedModeSetupData->pOuterPrefixData = pKeyGenTlsOpData->seed; pNestedModeSetupData->innerPrefixLenInBytes = pKeyGenTlsOpData->seedLen; pNestedModeSetupData->outerPrefixLenInBytes = pKeyGenTlsOpData->seedLen; } } /* Set the footer Data. * Note that following function doesn't look at inner/outer * prefix pointers in nested digest ctx */ LacSymQat_HashContentDescInit( &keyGenReqFtr, instanceHandle, &hashSetupData, pCookie ->contentDesc, /* Pointer to base of hw setup block */ LAC_SYM_KEY_NO_HASH_BLK_OFFSET_QW, ICP_QAT_FW_SLICE_DRAM_WR, qatHashMode, CPA_FALSE, /* Not using sym Constants Table in Shared SRAM */ CPA_FALSE, /* not using the optimised content Desc */ CPA_FALSE, /* Not using the stateful SHA3 Content Desc */ NULL, /* precompute data */ &hashBlkSizeInBytes); /* SSL3 */ if (ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE == lacCmdId) { CpaCyKeyGenSslOpData *pKeyGenSslOpData = (CpaCyKeyGenSslOpData *)pKeyGenSslTlsOpData; Cpa8U *pLabel = NULL; Cpa32U labelLen = 0; Cpa8U iterations = 0; Cpa64U labelPhysAddr = 0; /* Iterations = ceiling of output required / output per * iteration Ceiling of a / b = (a + (b-1)) / b */ iterations = (pKeyGenSslOpData->generatedKeyLenInBytes + (LAC_SYM_QAT_KEY_SSL_BYTES_PER_ITERATION - 1)) >> LAC_SYM_QAT_KEY_SSL_ITERATIONS_SHIFT; if (CPA_CY_KEY_SSL_OP_USER_DEFINED == pKeyGenSslOpData->sslOp) { pLabel = pKeyGenSslOpData->userLabel.pData; labelLen = pKeyGenSslOpData->userLabel.dataLenInBytes; labelPhysAddr = LAC_OS_VIRT_TO_PHYS_EXTERNAL( pService->generic_service_info, pLabel); if (labelPhysAddr == 0) { LAC_LOG_ERROR( "Unable to get the physical address of the" " label"); status = CPA_STATUS_FAIL; } } else { pLabel = pService->pSslLabel; /* Calculate label length. * eg. 3 iterations is ABBCCC so length is 6 */ labelLen = ((iterations * iterations) + iterations) >> 1; labelPhysAddr = LAC_OS_VIRT_TO_PHYS_INTERNAL(pLabel); } LacSymQat_KeySslRequestPopulate( &keyGenReqHdr, &keyGenReqMid, pKeyGenSslOpData->generatedKeyLenInBytes, labelLen, pKeyGenSslOpData->secret.dataLenInBytes, iterations); LacSymQat_KeySslKeyMaterialInputPopulate( &(pService->generic_service_info), &(pCookie->u.sslKeyInput), pKeyGenSslOpData->seed.pData, labelPhysAddr, pKeyGenSslOpData->secret.pData); inputPhysAddr = LAC_MEM_CAST_PTR_TO_UINT64( pSymCookie->keySslKeyInputPhyAddr); } /* TLS v1.1, v1.2 */ else if (ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE == lacCmdId || ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE == lacCmdId) { CpaCyKeyGenTlsOpData *pKeyGenTlsOpData = (CpaCyKeyGenTlsOpData *)pKeyGenSslTlsOpData; lac_sym_qat_hash_state_buffer_info_t hashStateBufferInfo = { 0 }; CpaBoolean hashStateBuffer = CPA_FALSE; icp_qat_fw_auth_cd_ctrl_hdr_t *pHashControlBlock = (icp_qat_fw_auth_cd_ctrl_hdr_t *)&( keyGenReqFtr.cd_ctrl); icp_qat_la_auth_req_params_t *pHashReqParams = NULL; Cpa8U *pLabel = NULL; Cpa32U labelLen = 0; Cpa64U labelPhysAddr = 0; hashStateBufferInfo.pData = pCookie->hashStateBuffer; hashStateBufferInfo.pDataPhys = LAC_MEM_CAST_PTR_TO_UINT64( pSymCookie->keyHashStateBufferPhyAddr); hashStateBufferInfo.stateStorageSzQuadWords = 0; LacSymQat_HashSetupReqParamsMetaData(&(keyGenReqFtr), instanceHandle, &(hashSetupData), hashStateBuffer, qatHashMode, CPA_FALSE); pHashReqParams = (icp_qat_la_auth_req_params_t *)&( keyGenReqFtr.serv_specif_rqpars); hashStateBufferInfo.prefixAadSzQuadWords = LAC_BYTES_TO_QUADWORDS( pHashReqParams->u2.inner_prefix_sz + pHashControlBlock->outer_prefix_sz); /* Copy prefix data into hash state buffer */ pMsgDummy = (Cpa8U *)&(keyGenReq); pCacheDummyHdr = (Cpa8U *)&(keyGenReqHdr); pCacheDummyMid = (Cpa8U *)&(keyGenReqMid); pCacheDummyFtr = (Cpa8U *)&(keyGenReqFtr); memcpy(pMsgDummy, pCacheDummyHdr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_HDR_IN_LW)); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_MID_IN_LW), pCacheDummyMid, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_MID_IN_LW)); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_FTR_IN_LW), pCacheDummyFtr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_FTR_IN_LW)); LacSymQat_HashStatePrefixAadBufferPopulate( &hashStateBufferInfo, &keyGenReqFtr, pNestedModeSetupData->pInnerPrefixData, pNestedModeSetupData->innerPrefixLenInBytes, pNestedModeSetupData->pOuterPrefixData, pNestedModeSetupData->outerPrefixLenInBytes); /* Firmware only looks at hash state buffer pointer and * the * hash state buffer size so all other fields are set to * 0 */ LacSymQat_HashRequestParamsPopulate( &(keyGenReq), 0, /* Auth offset */ 0, /* Auth length */ &(pService->generic_service_info), &hashStateBufferInfo, /* Hash state prefix buffer */ ICP_QAT_FW_LA_PARTIAL_NONE, 0, /* Hash result size */ CPA_FALSE, NULL, CPA_CY_SYM_HASH_NONE, /* Hash algorithm */ NULL); /* HKDF only */ /* Set up the labels and their length */ if (CPA_CY_KEY_TLS_OP_USER_DEFINED == pKeyGenTlsOpData->tlsOp) { pLabel = pKeyGenTlsOpData->userLabel.pData; labelLen = pKeyGenTlsOpData->userLabel.dataLenInBytes; labelPhysAddr = LAC_OS_VIRT_TO_PHYS_EXTERNAL( pService->generic_service_info, pLabel); if (labelPhysAddr == 0) { LAC_LOG_ERROR( "Unable to get the physical address of the" " label"); status = CPA_STATUS_FAIL; } } else if (CPA_CY_KEY_TLS_OP_MASTER_SECRET_DERIVE == pKeyGenTlsOpData->tlsOp) { pLabel = pService->pTlsLabel->masterSecret; labelLen = sizeof( LAC_SYM_KEY_TLS_MASTER_SECRET_LABEL) - 1; labelPhysAddr = LAC_OS_VIRT_TO_PHYS_INTERNAL(pLabel); } else if (CPA_CY_KEY_TLS_OP_KEY_MATERIAL_DERIVE == pKeyGenTlsOpData->tlsOp) { pLabel = pService->pTlsLabel->keyMaterial; labelLen = sizeof(LAC_SYM_KEY_TLS_KEY_MATERIAL_LABEL) - 1; labelPhysAddr = LAC_OS_VIRT_TO_PHYS_INTERNAL(pLabel); } else if (CPA_CY_KEY_TLS_OP_CLIENT_FINISHED_DERIVE == pKeyGenTlsOpData->tlsOp) { pLabel = pService->pTlsLabel->clientFinished; labelLen = sizeof(LAC_SYM_KEY_TLS_CLIENT_FIN_LABEL) - 1; labelPhysAddr = LAC_OS_VIRT_TO_PHYS_INTERNAL(pLabel); } else { pLabel = pService->pTlsLabel->serverFinished; labelLen = sizeof(LAC_SYM_KEY_TLS_SERVER_FIN_LABEL) - 1; labelPhysAddr = LAC_OS_VIRT_TO_PHYS_INTERNAL(pLabel); } LacSymQat_KeyTlsRequestPopulate( &keyGenReqMid, pKeyGenTlsOpData->generatedKeyLenInBytes, labelLen, pKeyGenTlsOpData->secret.dataLenInBytes, pKeyGenTlsOpData->seed.dataLenInBytes, lacCmdId); LacSymQat_KeyTlsKeyMaterialInputPopulate( &(pService->generic_service_info), &(pCookie->u.tlsKeyInput), pKeyGenTlsOpData->seed.pData, labelPhysAddr); inputPhysAddr = LAC_MEM_CAST_PTR_TO_UINT64( pSymCookie->keyTlsKeyInputPhyAddr); } /* TLS v1.3 */ else if (ICP_QAT_FW_LA_CMD_HKDF_EXTRACT <= lacCmdId && ICP_QAT_FW_LA_CMD_HKDF_EXTRACT_AND_EXPAND >= lacCmdId) { CpaCyKeyGenHKDFOpData *pKeyGenTlsOpData = (CpaCyKeyGenHKDFOpData *)pKeyGenSslTlsOpData; lac_sym_qat_hash_state_buffer_info_t hashStateBufferInfo = { 0 }; CpaBoolean hashStateBuffer = CPA_FALSE; icp_qat_fw_auth_cd_ctrl_hdr_t *pHashControlBlock = (icp_qat_fw_auth_cd_ctrl_hdr_t *)&( keyGenReqFtr.cd_ctrl); icp_qat_la_auth_req_params_t *pHashReqParams = NULL; hashStateBufferInfo.pData = pCookie->hashStateBuffer; hashStateBufferInfo.pDataPhys = LAC_MEM_CAST_PTR_TO_UINT64( pSymCookie->keyHashStateBufferPhyAddr); hashStateBufferInfo.stateStorageSzQuadWords = 0; LacSymQat_HashSetupReqParamsMetaData(&(keyGenReqFtr), instanceHandle, &(hashSetupData), hashStateBuffer, qatHashMode, CPA_FALSE); pHashReqParams = (icp_qat_la_auth_req_params_t *)&( keyGenReqFtr.serv_specif_rqpars); hashStateBufferInfo.prefixAadSzQuadWords = LAC_BYTES_TO_QUADWORDS( pHashReqParams->u2.inner_prefix_sz + pHashControlBlock->outer_prefix_sz); /* Copy prefix data into hash state buffer */ pMsgDummy = (Cpa8U *)&(keyGenReq); pCacheDummyHdr = (Cpa8U *)&(keyGenReqHdr); pCacheDummyMid = (Cpa8U *)&(keyGenReqMid); pCacheDummyFtr = (Cpa8U *)&(keyGenReqFtr); memcpy(pMsgDummy, pCacheDummyHdr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_HDR_IN_LW)); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_MID_IN_LW), pCacheDummyMid, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_MID_IN_LW)); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_FTR_IN_LW), pCacheDummyFtr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_FTR_IN_LW)); LacSymQat_HashStatePrefixAadBufferPopulate( &hashStateBufferInfo, &keyGenReqFtr, pNestedModeSetupData->pInnerPrefixData, pNestedModeSetupData->innerPrefixLenInBytes, pNestedModeSetupData->pOuterPrefixData, pNestedModeSetupData->outerPrefixLenInBytes); /* Firmware only looks at hash state buffer pointer and * the * hash state buffer size so all other fields are set to * 0 */ LacSymQat_HashRequestParamsPopulate( &(keyGenReq), 0, /* Auth offset */ 0, /* Auth length */ &(pService->generic_service_info), &hashStateBufferInfo, /* Hash state prefix buffer */ ICP_QAT_FW_LA_PARTIAL_NONE, 0, /* Hash result size */ CPA_FALSE, NULL, CPA_CY_SYM_HASH_NONE, /* Hash algorithm */ pKeyGenTlsOpData->secret); /* IKM or PRK */ LacSymQat_KeyTlsRequestPopulate( &keyGenReqMid, cipherSuiteHKDFHashSizes[hashAlgCipher] [LAC_KEY_HKDF_DIGESTS], /* For EXTRACT, EXPAND, FW expects info to be passed as label */ pKeyGenTlsOpData->infoLen, pKeyGenTlsOpData->secretLen, pKeyGenTlsOpData->seedLen, lacCmdId); LacSymQat_KeyTlsHKDFKeyMaterialInputPopulate( &(pService->generic_service_info), &(pCookie->u.tlsHKDFKeyInput), pKeyGenTlsOpData, 0, /* No subLabels used */ lacCmdId); /* Pass op being performed */ inputPhysAddr = LAC_MEM_CAST_PTR_TO_UINT64( pSymCookie->keyTlsKeyInputPhyAddr); } /* TLS v1.3 LABEL */ else if (ICP_QAT_FW_LA_CMD_HKDF_EXPAND_LABEL == lacCmdId || ICP_QAT_FW_LA_CMD_HKDF_EXTRACT_AND_EXPAND_LABEL == lacCmdId) { CpaCyKeyGenHKDFOpData *pKeyGenTlsOpData = (CpaCyKeyGenHKDFOpData *)pKeyGenSslTlsOpData; Cpa64U subLabelsPhysAddr = 0; lac_sym_qat_hash_state_buffer_info_t hashStateBufferInfo = { 0 }; CpaBoolean hashStateBuffer = CPA_FALSE; icp_qat_fw_auth_cd_ctrl_hdr_t *pHashControlBlock = (icp_qat_fw_auth_cd_ctrl_hdr_t *)&( keyGenReqFtr.cd_ctrl); icp_qat_la_auth_req_params_t *pHashReqParams = NULL; hashStateBufferInfo.pData = pCookie->hashStateBuffer; hashStateBufferInfo.pDataPhys = LAC_MEM_CAST_PTR_TO_UINT64( pSymCookie->keyHashStateBufferPhyAddr); hashStateBufferInfo.stateStorageSzQuadWords = 0; LacSymQat_HashSetupReqParamsMetaData(&(keyGenReqFtr), instanceHandle, &(hashSetupData), hashStateBuffer, qatHashMode, CPA_FALSE); pHashReqParams = (icp_qat_la_auth_req_params_t *)&( keyGenReqFtr.serv_specif_rqpars); hashStateBufferInfo.prefixAadSzQuadWords = LAC_BYTES_TO_QUADWORDS( pHashReqParams->u2.inner_prefix_sz + pHashControlBlock->outer_prefix_sz); /* Copy prefix data into hash state buffer */ pMsgDummy = (Cpa8U *)&(keyGenReq); pCacheDummyHdr = (Cpa8U *)&(keyGenReqHdr); pCacheDummyMid = (Cpa8U *)&(keyGenReqMid); pCacheDummyFtr = (Cpa8U *)&(keyGenReqFtr); memcpy(pMsgDummy, pCacheDummyHdr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_HDR_IN_LW)); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_MID_IN_LW), pCacheDummyMid, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_MID_IN_LW)); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_FTR_IN_LW), pCacheDummyFtr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_FTR_IN_LW)); LacSymQat_HashStatePrefixAadBufferPopulate( &hashStateBufferInfo, &keyGenReqFtr, pNestedModeSetupData->pInnerPrefixData, pNestedModeSetupData->innerPrefixLenInBytes, pNestedModeSetupData->pOuterPrefixData, pNestedModeSetupData->outerPrefixLenInBytes); /* Firmware only looks at hash state buffer pointer and * the * hash state buffer size so all other fields are set to * 0 */ LacSymQat_HashRequestParamsPopulate( &(keyGenReq), 0, /* Auth offset */ 0, /* Auth length */ &(pService->generic_service_info), &hashStateBufferInfo, /* Hash state prefix buffer */ ICP_QAT_FW_LA_PARTIAL_NONE, 0, /* Hash result size */ CPA_FALSE, NULL, CPA_CY_SYM_HASH_NONE, /* Hash algorithm */ pKeyGenTlsOpData->secret); /* IKM or PRK */ LacSymQat_KeyTlsRequestPopulate( &keyGenReqMid, cipherSuiteHKDFHashSizes[hashAlgCipher] [LAC_KEY_HKDF_DIGESTS], pKeyGenTlsOpData->numLabels, /* Number of Labels */ pKeyGenTlsOpData->secretLen, pKeyGenTlsOpData->seedLen, lacCmdId); /* Get physical address of subLabels */ switch (hashAlgCipher) { case CPA_CY_HKDF_TLS_AES_128_GCM_SHA256: /* Fall Through */ case CPA_CY_HKDF_TLS_AES_128_CCM_SHA256: case CPA_CY_HKDF_TLS_AES_128_CCM_8_SHA256: subLabelsPhysAddr = pService->pTlsHKDFSubLabel ->sublabelPhysAddr256; break; case CPA_CY_HKDF_TLS_CHACHA20_POLY1305_SHA256: subLabelsPhysAddr = pService->pTlsHKDFSubLabel ->sublabelPhysAddrChaChaPoly; break; case CPA_CY_HKDF_TLS_AES_256_GCM_SHA384: subLabelsPhysAddr = pService->pTlsHKDFSubLabel ->sublabelPhysAddr384; break; default: break; } LacSymQat_KeyTlsHKDFKeyMaterialInputPopulate( &(pService->generic_service_info), &(pCookie->u.tlsHKDFKeyInput), pKeyGenTlsOpData, subLabelsPhysAddr, lacCmdId); /* Pass op being performed */ inputPhysAddr = LAC_MEM_CAST_PTR_TO_UINT64( pSymCookie->keyTlsKeyInputPhyAddr); } outputPhysAddr = LAC_MEM_CAST_PTR_TO_UINT64( LAC_OS_VIRT_TO_PHYS_EXTERNAL(pService->generic_service_info, pKeyGenOutputData->pData)); if (outputPhysAddr == 0) { LAC_LOG_ERROR( "Unable to get the physical address of the" " output buffer"); status = CPA_STATUS_FAIL; } } if (CPA_STATUS_SUCCESS == status) { Cpa8U lw26[4]; char *tmp = NULL; unsigned char a; int n = 0; /* Make up the full keyGenReq struct from its constituents * before calling the SalQatMsg functions below. * Note: The full cache struct has been reduced to a * header, mid and footer for memory size reduction */ pMsgDummy = (Cpa8U *)&(keyGenReq); pCacheDummyHdr = (Cpa8U *)&(keyGenReqHdr); pCacheDummyMid = (Cpa8U *)&(keyGenReqMid); pCacheDummyFtr = (Cpa8U *)&(keyGenReqFtr); memcpy(pMsgDummy, pCacheDummyHdr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_HDR_IN_LW)); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_MID_IN_LW), pCacheDummyMid, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_MID_IN_LW)); memcpy(&lw26, pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_FTR_IN_LW), LAC_LONG_WORD_IN_BYTES); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_FTR_IN_LW), pCacheDummyFtr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_FTR_IN_LW)); tmp = (char *)(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_FTR_IN_LW)); /* Copy LW26, or'd with what's already there, into the Msg, for * TLS */ for (n = 0; n < LAC_LONG_WORD_IN_BYTES; n++) { a = (unsigned char)*(tmp + n); lw26[n] = lw26[n] | a; } memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_FTR_IN_LW), &lw26, LAC_LONG_WORD_IN_BYTES); contentDescInfo.pData = pCookie->contentDesc; contentDescInfo.hardwareSetupBlockPhys = LAC_MEM_CAST_PTR_TO_UINT64( pSymCookie->keyContentDescPhyAddr); contentDescInfo.hwBlkSzQuadWords = LAC_BYTES_TO_QUADWORDS(hashBlkSizeInBytes); /* Populate common request fields */ SalQatMsg_ContentDescHdrWrite((icp_qat_fw_comn_req_t *)&( keyGenReq), &(contentDescInfo)); SalQatMsg_CmnHdrWrite((icp_qat_fw_comn_req_t *)&keyGenReq, ICP_QAT_FW_COMN_REQ_CPM_FW_LA, lacCmdId, cmnRequestFlags, laCmdFlags); SalQatMsg_CmnMidWrite((icp_qat_fw_la_bulk_req_t *)&(keyGenReq), pCookie, LAC_SYM_KEY_QAT_PTR_TYPE, inputPhysAddr, outputPhysAddr, 0, 0); /* Send to QAT */ status = icp_adf_transPutMsg(pService->trans_handle_sym_tx, (void *)&(keyGenReq), LAC_QAT_SYM_REQ_SZ_LW); } if (CPA_STATUS_SUCCESS == status) { /* Update stats */ LacKey_StatsInc(lacCmdId, LAC_KEY_REQUESTS, pCookie->instanceHandle); } else { /* Clean up cookie memory */ if (NULL != pCookie) { LacKey_StatsInc(lacCmdId, LAC_KEY_REQUEST_ERRORS, pCookie->instanceHandle); Lac_MemPoolEntryFree(pCookie); } } return status; } /** * @ingroup LacSymKey * Parameters check for TLS v1.0/1.1, v1.2, v1.3 and SSL3 * @description * Check user parameters against the firmware/spec requirements. * * @param[in] pKeyGenOpData Pointer to a structure containing all * the data needed to perform the key * generation operation. * @param[in] hashAlgCipher Specifies the hash algorithm, * or cipher we are using. * According to RFC5246, this should be * "SHA-256 or a stronger standard hash * function." * @param[in] pGeneratedKeyBuffer User output buffers. * @param[in] cmdId Keygen operation to perform. */ static CpaStatus LacSymKey_CheckParamSslTls(const void *pKeyGenOpData, Cpa8U hashAlgCipher, const CpaFlatBuffer *pGeneratedKeyBuffer, icp_qat_fw_la_cmd_id_t cmdId) { /* Api max value */ Cpa32U maxSecretLen = 0; Cpa32U maxSeedLen = 0; Cpa32U maxOutputLen = 0; Cpa32U maxInfoLen = 0; Cpa32U maxLabelLen = 0; /* User info */ Cpa32U uSecretLen = 0; Cpa32U uSeedLen = 0; Cpa32U uOutputLen = 0; LAC_CHECK_NULL_PARAM(pKeyGenOpData); LAC_CHECK_NULL_PARAM(pGeneratedKeyBuffer); LAC_CHECK_NULL_PARAM(pGeneratedKeyBuffer->pData); if (ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE == cmdId) { CpaCyKeyGenSslOpData *opData = (CpaCyKeyGenSslOpData *)pKeyGenOpData; /* User info */ uSecretLen = opData->secret.dataLenInBytes; uSeedLen = opData->seed.dataLenInBytes; uOutputLen = opData->generatedKeyLenInBytes; /* Api max value */ maxSecretLen = ICP_QAT_FW_LA_SSL_SECRET_LEN_MAX; maxSeedLen = ICP_QAT_FW_LA_SSL_SEED_LEN_MAX; maxOutputLen = ICP_QAT_FW_LA_SSL_OUTPUT_LEN_MAX; /* Check user buffers */ LAC_CHECK_NULL_PARAM(opData->secret.pData); LAC_CHECK_NULL_PARAM(opData->seed.pData); /* Check operation */ if ((Cpa32U)opData->sslOp > CPA_CY_KEY_SSL_OP_USER_DEFINED) { LAC_INVALID_PARAM_LOG("opData->sslOp"); return CPA_STATUS_INVALID_PARAM; } if ((Cpa32U)opData->sslOp == CPA_CY_KEY_SSL_OP_USER_DEFINED) { LAC_CHECK_NULL_PARAM(opData->userLabel.pData); /* Maximum label length for SSL Key Gen request */ if (opData->userLabel.dataLenInBytes > ICP_QAT_FW_LA_SSL_LABEL_LEN_MAX) { LAC_INVALID_PARAM_LOG( "userLabel.dataLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } + /* check 0 secret length as it is not valid for SSL3 Key Gen + * request */ + if (0 == uSecretLen) { + LAC_INVALID_PARAM_LOG1("%u secret.dataLenInBytes", + uSecretLen); + return CPA_STATUS_INVALID_PARAM; + } + /* Only seed length for SSL3 Key Gen request */ if (maxSeedLen != uSeedLen) { LAC_INVALID_PARAM_LOG("seed.dataLenInBytes"); return CPA_STATUS_INVALID_PARAM; } /* Maximum output length for SSL3 Key Gen request */ if (uOutputLen > maxOutputLen) { LAC_INVALID_PARAM_LOG("generatedKeyLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } /* TLS v1.1 or TLS v.12 */ else if (ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE == cmdId || ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE == cmdId) { CpaCyKeyGenTlsOpData *opData = (CpaCyKeyGenTlsOpData *)pKeyGenOpData; /* User info */ uSecretLen = opData->secret.dataLenInBytes; uSeedLen = opData->seed.dataLenInBytes; uOutputLen = opData->generatedKeyLenInBytes; if (ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE == cmdId) { /* Api max value */ /* ICP_QAT_FW_LA_TLS_V1_1_SECRET_LEN_MAX needs to be * multiplied * by 4 in order to verify the 512 conditions. We did * not change * ICP_QAT_FW_LA_TLS_V1_1_SECRET_LEN_MAX as it * represents * the max value that firmware can handle. */ maxSecretLen = ICP_QAT_FW_LA_TLS_V1_1_SECRET_LEN_MAX * 4; } else { /* Api max value */ /* ICP_QAT_FW_LA_TLS_V1_2_SECRET_LEN_MAX needs to be * multiplied * by 8 in order to verify the 512 conditions. We did * not change * ICP_QAT_FW_LA_TLS_V1_2_SECRET_LEN_MAX as it * represents * the max value that firmware can handle. */ maxSecretLen = ICP_QAT_FW_LA_TLS_V1_2_SECRET_LEN_MAX * 8; /* Check Hash algorithm */ if (0 == getDigestSizeFromHashAlgo(hashAlgCipher)) { LAC_INVALID_PARAM_LOG("hashAlgorithm"); return CPA_STATUS_INVALID_PARAM; } } maxSeedLen = ICP_QAT_FW_LA_TLS_SEED_LEN_MAX; maxOutputLen = ICP_QAT_FW_LA_TLS_OUTPUT_LEN_MAX; /* Check user buffers */ LAC_CHECK_NULL_PARAM(opData->secret.pData); LAC_CHECK_NULL_PARAM(opData->seed.pData); /* Check operation */ if ((Cpa32U)opData->tlsOp > CPA_CY_KEY_TLS_OP_USER_DEFINED) { LAC_INVALID_PARAM_LOG("opData->tlsOp"); return CPA_STATUS_INVALID_PARAM; } else if ((Cpa32U)opData->tlsOp == CPA_CY_KEY_TLS_OP_USER_DEFINED) { LAC_CHECK_NULL_PARAM(opData->userLabel.pData); /* Maximum label length for TLS Key Gen request */ if (opData->userLabel.dataLenInBytes > ICP_QAT_FW_LA_TLS_LABEL_LEN_MAX) { LAC_INVALID_PARAM_LOG( "userLabel.dataLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } /* Maximum/only seed length for TLS Key Gen request */ if (((Cpa32U)opData->tlsOp != CPA_CY_KEY_TLS_OP_MASTER_SECRET_DERIVE) && ((Cpa32U)opData->tlsOp != CPA_CY_KEY_TLS_OP_KEY_MATERIAL_DERIVE)) { if (uSeedLen > maxSeedLen) { LAC_INVALID_PARAM_LOG("seed.dataLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } else { if (maxSeedLen != uSeedLen) { LAC_INVALID_PARAM_LOG("seed.dataLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } /* Maximum output length for TLS Key Gen request */ if (uOutputLen > maxOutputLen) { LAC_INVALID_PARAM_LOG("generatedKeyLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } /* TLS v1.3 */ else if (cmdId >= ICP_QAT_FW_LA_CMD_HKDF_EXTRACT && cmdId <= ICP_QAT_FW_LA_CMD_HKDF_EXTRACT_AND_EXPAND_LABEL) { CpaCyKeyGenHKDFOpData *HKDF_Data = (CpaCyKeyGenHKDFOpData *)pKeyGenOpData; CpaCyKeyHKDFCipherSuite cipherSuite = hashAlgCipher; CpaCySymHashAlgorithm hashAlgorithm = getHashAlgorithmFromCipherSuiteHKDF(cipherSuite); maxSeedLen = cipherSuiteHKDFHashSizes[cipherSuite][LAC_KEY_HKDF_DIGESTS]; maxSecretLen = CPA_CY_HKDF_KEY_MAX_SECRET_SZ; maxInfoLen = CPA_CY_HKDF_KEY_MAX_INFO_SZ; maxLabelLen = CPA_CY_HKDF_KEY_MAX_LABEL_SZ; uSecretLen = HKDF_Data->secretLen; /* Check using supported hash function */ if (0 == (uOutputLen = getDigestSizeFromHashAlgo(hashAlgorithm))) { LAC_INVALID_PARAM_LOG("Hash function not supported"); return CPA_STATUS_INVALID_PARAM; } /* Number of labels does not exceed the MAX */ if (HKDF_Data->numLabels > CPA_CY_HKDF_KEY_MAX_LABEL_COUNT) { LAC_INVALID_PARAM_LOG( "CpaCyKeyGenHKDFOpData.numLabels"); return CPA_STATUS_INVALID_PARAM; } switch (cmdId) { case ICP_QAT_FW_LA_CMD_HKDF_EXTRACT: if (maxSeedLen < HKDF_Data->seedLen) { LAC_INVALID_PARAM_LOG( "CpaCyKeyGenHKDFOpData.seedLen"); return CPA_STATUS_INVALID_PARAM; } break; case ICP_QAT_FW_LA_CMD_HKDF_EXPAND: maxSecretLen = cipherSuiteHKDFHashSizes[cipherSuite] [LAC_KEY_HKDF_DIGESTS]; if (maxInfoLen < HKDF_Data->infoLen) { LAC_INVALID_PARAM_LOG( "CpaCyKeyGenHKDFOpData.infoLen"); return CPA_STATUS_INVALID_PARAM; } break; case ICP_QAT_FW_LA_CMD_HKDF_EXTRACT_AND_EXPAND: uOutputLen *= 2; if (maxSeedLen < HKDF_Data->seedLen) { LAC_INVALID_PARAM_LOG( "CpaCyKeyGenHKDFOpData.seedLen"); return CPA_STATUS_INVALID_PARAM; } if (maxInfoLen < HKDF_Data->infoLen) { LAC_INVALID_PARAM_LOG( "CpaCyKeyGenHKDFOpData.infoLen"); return CPA_STATUS_INVALID_PARAM; } break; case ICP_QAT_FW_LA_CMD_HKDF_EXPAND_LABEL: /* Fall through */ case ICP_QAT_FW_LA_CMD_HKDF_EXTRACT_AND_EXPAND_LABEL: { Cpa8U subl_mask = 0, subl_number = 1; Cpa8U i = 0; if (maxSeedLen < HKDF_Data->seedLen) { LAC_INVALID_PARAM_LOG( "CpaCyKeyGenHKDFOpData.seedLen"); return CPA_STATUS_INVALID_PARAM; } /* If EXPAND set uOutputLen to zero */ if (ICP_QAT_FW_LA_CMD_HKDF_EXPAND_LABEL == cmdId) { uOutputLen = 0; maxSecretLen = cipherSuiteHKDFHashSizes [cipherSuite][LAC_KEY_HKDF_DIGESTS]; } for (i = 0; i < HKDF_Data->numLabels; i++) { /* Check that the labelLen does not overflow */ if (maxLabelLen < HKDF_Data->label[i].labelLen) { LAC_INVALID_PARAM_LOG1( "CpaCyKeyGenHKDFOpData.label[%d].labelLen", i); return CPA_STATUS_INVALID_PARAM; } if (HKDF_Data->label[i].sublabelFlag & ~HKDF_SUB_LABELS_ALL) { LAC_INVALID_PARAM_LOG1( "CpaCyKeyGenHKDFOpData.label[%d]." "subLabelFlag", i); return CPA_STATUS_INVALID_PARAM; } /* Calculate the appended subLabel output * lengths and * check that the output buffer that the user * has * supplied is the correct length. */ uOutputLen += cipherSuiteHKDFHashSizes [cipherSuite][LAC_KEY_HKDF_DIGESTS]; /* Get mask of subLabel */ subl_mask = HKDF_Data->label[i].sublabelFlag; for (subl_number = 1; subl_number <= LAC_KEY_HKDF_SUBLABELS_NUM; subl_number++) { /* Add the used subLabel key lengths */ if (subl_mask & 1) { uOutputLen += cipherSuiteHKDFHashSizes [cipherSuite] [subl_number]; } subl_mask >>= 1; } } } break; default: break; } } else { LAC_INVALID_PARAM_LOG("TLS/SSL operation"); return CPA_STATUS_INVALID_PARAM; } /* Maximum secret length for TLS/SSL Key Gen request */ if (uSecretLen > maxSecretLen) { LAC_INVALID_PARAM_LOG("HKFD.secretLen/secret.dataLenInBytes"); return CPA_STATUS_INVALID_PARAM; } /* Check for enough space in the flat buffer */ if (uOutputLen > pGeneratedKeyBuffer->dataLenInBytes) { LAC_INVALID_PARAM_LOG("pGeneratedKeyBuffer->dataLenInBytes"); return CPA_STATUS_INVALID_PARAM; } return CPA_STATUS_SUCCESS; } /** * */ /** * @ingroup LacSymKey * Common Keygen Code for TLS v1.0/1.1, v1.2 and SSL3. * @description * Check user parameters and perform the required operation. * * @param[in] instanceHandle_in Instance handle. * @param[in] pKeyGenCb Pointer to callback function to be * invoked when the operation is complete. * If this is set to a NULL value the * function will operate synchronously. * @param[in] pCallbackTag Opaque User Data for this specific * call. Will be returned unchanged in the * callback. * @param[in] pKeyGenOpData Pointer to a structure containing all * the data needed to perform the key * generation operation. * @param[in] hashAlgorithm Specifies the hash algorithm to use. * According to RFC5246, this should be * "SHA-256 or a stronger standard hash * function." * @param[out] pGeneratedKeyBuffer User output buffer. * @param[in] cmdId Keygen operation to perform. */ static CpaStatus LacSymKey_KeyGenSslTls(const CpaInstanceHandle instanceHandle_in, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, const void *pKeyGenOpData, Cpa8U hashAlgorithm, CpaFlatBuffer *pGeneratedKeyBuffer, icp_qat_fw_la_cmd_id_t cmdId) { CpaStatus status = CPA_STATUS_FAIL; CpaInstanceHandle instanceHandle = LacKey_GetHandle(instanceHandle_in); LAC_CHECK_INSTANCE_HANDLE(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); SAL_RUNNING_CHECK(instanceHandle); status = LacSymKey_CheckParamSslTls(pKeyGenOpData, hashAlgorithm, pGeneratedKeyBuffer, cmdId); if (CPA_STATUS_SUCCESS != status) return status; return LacSymKey_KeyGenSslTls_GenCommon(instanceHandle, pKeyGenCb, pCallbackTag, cmdId, LAC_CONST_PTR_CAST( pKeyGenOpData), hashAlgorithm, pGeneratedKeyBuffer); } /** * @ingroup LacSymKey * SSL Key Generation Function. * @description * This function is used for SSL key generation. It implements the key * generation function defined in section 6.2.2 of the SSL 3.0 * specification as described in * http://www.mozilla.org/projects/security/pki/nss/ssl/draft302.txt. * * The input seed is taken as a flat buffer and the generated key is * returned to caller in a flat destination data buffer. * * @param[in] instanceHandle_in Instance handle. * @param[in] pKeyGenCb Pointer to callback function to be * invoked when the operation is complete. * If this is set to a NULL value the * function will operate synchronously. * @param[in] pCallbackTag Opaque User Data for this specific * call. Will be returned unchanged in the * callback. * @param[in] pKeyGenSslOpData Pointer to a structure containing all * the data needed to perform the SSL key * generation operation. The client code * allocates the memory for this * structure. This component takes * ownership of the memory until it is * returned in the callback. * @param[out] pGeneratedKeyBuffer Caller MUST allocate a sufficient * buffer to hold the key generation * output. The data pointer SHOULD be * aligned on an 8-byte boundary. The * length field passed in represents the * size of the buffer in bytes. The value * that is returned is the size of the * result key in bytes. * On invocation the callback function * will contain this parameter in the * pOut parameter. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Resubmit the request. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. */ CpaStatus cpaCyKeyGenSsl(const CpaInstanceHandle instanceHandle_in, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, const CpaCyKeyGenSslOpData *pKeyGenSslOpData, CpaFlatBuffer *pGeneratedKeyBuffer) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } return LacSymKey_KeyGenSslTls(instanceHandle, pKeyGenCb, pCallbackTag, LAC_CONST_PTR_CAST(pKeyGenSslOpData), CPA_CY_SYM_HASH_NONE, /* Hash algorithm */ pGeneratedKeyBuffer, ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE); } /** * @ingroup LacSymKey * TLS Key Generation Function. * @description * This function is used for TLS key generation. It implements the * TLS PRF (Pseudo Random Function) as defined by RFC2246 (TLS v1.0) * and RFC4346 (TLS v1.1). * * The input seed is taken as a flat buffer and the generated key is * returned to caller in a flat destination data buffer. * * @param[in] instanceHandle_in Instance handle. * @param[in] pKeyGenCb Pointer to callback function to be * invoked when the operation is complete. * If this is set to a NULL value the * function will operate synchronously. * @param[in] pCallbackTag Opaque User Data for this specific * call. Will be returned unchanged in the * callback. * @param[in] pKeyGenTlsOpData Pointer to a structure containing all * the data needed to perform the TLS key * generation operation. The client code * allocates the memory for this * structure. This component takes * ownership of the memory until it is * returned in the callback. * @param[out] pGeneratedKeyBuffer Caller MUST allocate a sufficient * buffer to hold the key generation * output. The data pointer SHOULD be * aligned on an 8-byte boundary. The * length field passed in represents the * size of the buffer in bytes. The value * that is returned is the size of the * result key in bytes. * On invocation the callback function * will contain this parameter in the * pOut parameter. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Resubmit the request. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. * */ CpaStatus cpaCyKeyGenTls(const CpaInstanceHandle instanceHandle_in, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, const CpaCyKeyGenTlsOpData *pKeyGenTlsOpData, CpaFlatBuffer *pGeneratedKeyBuffer) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } return LacSymKey_KeyGenSslTls(instanceHandle, pKeyGenCb, pCallbackTag, LAC_CONST_PTR_CAST(pKeyGenTlsOpData), CPA_CY_SYM_HASH_NONE, /* Hash algorithm */ pGeneratedKeyBuffer, ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE); } /** * @ingroup LacSymKey * @description * This function is used for TLS key generation. It implements the * TLS PRF (Pseudo Random Function) as defined by RFC5246 (TLS v1.2). * * The input seed is taken as a flat buffer and the generated key is * returned to caller in a flat destination data buffer. * * @param[in] instanceHandle_in Instance handle. * @param[in] pKeyGenCb Pointer to callback function to be * invoked when the operation is complete. * If this is set to a NULL value the * function will operate synchronously. * @param[in] pCallbackTag Opaque User Data for this specific * call. Will be returned unchanged in the * callback. * @param[in] pKeyGenTlsOpData Pointer to a structure containing all * the data needed to perform the TLS key * generation operation. The client code * allocates the memory for this * structure. This component takes * ownership of the memory until it is * returned in the callback. * @param[in] hashAlgorithm Specifies the hash algorithm to use. * According to RFC5246, this should be * "SHA-256 or a stronger standard hash * function." * @param[out] pGeneratedKeyBuffer Caller MUST allocate a sufficient * buffer to hold the key generation * output. The data pointer SHOULD be * aligned on an 8-byte boundary. The * length field passed in represents the * size of the buffer in bytes. The value * that is returned is the size of the * result key in bytes. * On invocation the callback function * will contain this parameter in the * pOut parameter. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Resubmit the request. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. */ CpaStatus cpaCyKeyGenTls2(const CpaInstanceHandle instanceHandle_in, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, const CpaCyKeyGenTlsOpData *pKeyGenTlsOpData, CpaCySymHashAlgorithm hashAlgorithm, CpaFlatBuffer *pGeneratedKeyBuffer) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } return LacSymKey_KeyGenSslTls(instanceHandle, pKeyGenCb, pCallbackTag, LAC_CONST_PTR_CAST(pKeyGenTlsOpData), hashAlgorithm, pGeneratedKeyBuffer, ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE); } /** * @ingroup LacSymKey * @description * This function is used for TLS1.3 HKDF key generation. It implements * the "extract-then-expand" paradigm as defined by RFC 5869. * * The input seed/secret/info is taken as a flat buffer and the generated * key(s)/labels are returned to caller in a flat data buffer. * * @param[in] instanceHandle_in Instance handle. * @param[in] pKeyGenCb Pointer to callback function to be * invoked when the operation is complete. * If this is set to a NULL value the * function will operate synchronously. * @param[in] pCallbackTag Opaque User Data for this specific * call. Will be returned unchanged in the * callback. * @param[in] pKeyGenTlsOpData Pointer to a structure containing * the data needed to perform the HKDF key * generation operation. * The client code allocates the memory * for this structure as contiguous * pinned memory. * This component takes ownership of the * memory until it is returned in the * callback. * @param[in] hashAlgorithm Specifies the hash algorithm to use. * According to RFC5246, this should be * "SHA-256 or a stronger standard hash * function." * @param[out] pGeneratedKeyBuffer Caller MUST allocate a sufficient * buffer to hold the key generation * output. The data pointer SHOULD be * aligned on an 8-byte boundary. The * length field passed in represents the * size of the buffer in bytes. The value * that is returned is the size of the * result key in bytes. * On invocation the callback function * will contain this parameter in the * pOut parameter. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Resubmit the request. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. */ CpaStatus cpaCyKeyGenTls3(const CpaInstanceHandle instanceHandle_in, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, const CpaCyKeyGenHKDFOpData *pKeyGenTlsOpData, CpaCyKeyHKDFCipherSuite cipherSuite, CpaFlatBuffer *pGeneratedKeyBuffer) { LAC_CHECK_NULL_PARAM(pKeyGenTlsOpData); switch (pKeyGenTlsOpData->hkdfKeyOp) { case CPA_CY_HKDF_KEY_EXTRACT: /* Fall through */ case CPA_CY_HKDF_KEY_EXPAND: case CPA_CY_HKDF_KEY_EXTRACT_EXPAND: case CPA_CY_HKDF_KEY_EXPAND_LABEL: case CPA_CY_HKDF_KEY_EXTRACT_EXPAND_LABEL: break; default: LAC_INVALID_PARAM_LOG("HKDF operation not supported"); return CPA_STATUS_INVALID_PARAM; } return LacSymKey_KeyGenSslTls(instanceHandle_in, pKeyGenCb, pCallbackTag, LAC_CONST_PTR_CAST(pKeyGenTlsOpData), cipherSuite, pGeneratedKeyBuffer, (icp_qat_fw_la_cmd_id_t) pKeyGenTlsOpData->hkdfKeyOp); } /* * LacSymKey_Init */ CpaStatus LacSymKey_Init(CpaInstanceHandle instanceHandle_in) { CpaStatus status = CPA_STATUS_SUCCESS; CpaInstanceHandle instanceHandle = LacKey_GetHandle(instanceHandle_in); sal_crypto_service_t *pService = NULL; LAC_CHECK_INSTANCE_HANDLE(instanceHandle); pService = (sal_crypto_service_t *)instanceHandle; pService->pLacKeyStats = LAC_OS_MALLOC(LAC_KEY_NUM_STATS * sizeof(QatUtilsAtomic)); if (NULL != pService->pLacKeyStats) { LAC_OS_BZERO((void *)pService->pLacKeyStats, LAC_KEY_NUM_STATS * sizeof(QatUtilsAtomic)); status = LAC_OS_CAMALLOC(&pService->pSslLabel, ICP_QAT_FW_LA_SSL_LABEL_LEN_MAX, LAC_8BYTE_ALIGNMENT, pService->nodeAffinity); } else { status = CPA_STATUS_RESOURCE; } if (CPA_STATUS_SUCCESS == status) { Cpa32U i = 0; Cpa32U offset = 0; /* Initialise SSL label ABBCCC..... */ for (i = 0; i < ICP_QAT_FW_LA_SSL_ITERATES_LEN_MAX; i++) { memset(pService->pSslLabel + offset, 'A' + i, i + 1); offset += (i + 1); } /* Allocate memory for TLS labels */ status = LAC_OS_CAMALLOC(&pService->pTlsLabel, sizeof(lac_sym_key_tls_labels_t), LAC_8BYTE_ALIGNMENT, pService->nodeAffinity); } if (CPA_STATUS_SUCCESS == status) { /* Allocate memory for HKDF sub_labels */ status = LAC_OS_CAMALLOC(&pService->pTlsHKDFSubLabel, sizeof(lac_sym_key_tls_hkdf_sub_labels_t), LAC_8BYTE_ALIGNMENT, pService->nodeAffinity); } if (CPA_STATUS_SUCCESS == status) { LAC_OS_BZERO(pService->pTlsLabel, sizeof(lac_sym_key_tls_labels_t)); /* Copy the TLS v1.2 labels into the dynamically allocated * structure */ memcpy(pService->pTlsLabel->masterSecret, LAC_SYM_KEY_TLS_MASTER_SECRET_LABEL, sizeof(LAC_SYM_KEY_TLS_MASTER_SECRET_LABEL) - 1); memcpy(pService->pTlsLabel->keyMaterial, LAC_SYM_KEY_TLS_KEY_MATERIAL_LABEL, sizeof(LAC_SYM_KEY_TLS_KEY_MATERIAL_LABEL) - 1); memcpy(pService->pTlsLabel->clientFinished, LAC_SYM_KEY_TLS_CLIENT_FIN_LABEL, sizeof(LAC_SYM_KEY_TLS_CLIENT_FIN_LABEL) - 1); memcpy(pService->pTlsLabel->serverFinished, LAC_SYM_KEY_TLS_SERVER_FIN_LABEL, sizeof(LAC_SYM_KEY_TLS_SERVER_FIN_LABEL) - 1); LAC_OS_BZERO(pService->pTlsHKDFSubLabel, sizeof(lac_sym_key_tls_hkdf_sub_labels_t)); /* Copy the TLS v1.3 subLabels into the dynamically allocated * struct */ /* KEY SHA-256 */ memcpy(&pService->pTlsHKDFSubLabel->keySublabel256, &key256, HKDF_SUB_LABEL_KEY_LENGTH); pService->pTlsHKDFSubLabel->keySublabel256.labelLen = HKDF_SUB_LABEL_KEY_LENGTH; pService->pTlsHKDFSubLabel->keySublabel256.sublabelFlag = 1 << QAT_FW_HKDF_INNER_SUBLABEL_16_BYTE_OKM_BITPOS; /* KEY SHA-384 */ memcpy(&pService->pTlsHKDFSubLabel->keySublabel384, &key384, HKDF_SUB_LABEL_KEY_LENGTH); pService->pTlsHKDFSubLabel->keySublabel384.labelLen = HKDF_SUB_LABEL_KEY_LENGTH; pService->pTlsHKDFSubLabel->keySublabel384.sublabelFlag = 1 << QAT_FW_HKDF_INNER_SUBLABEL_32_BYTE_OKM_BITPOS; /* KEY CHACHAPOLY */ memcpy(&pService->pTlsHKDFSubLabel->keySublabelChaChaPoly, &keyChaChaPoly, HKDF_SUB_LABEL_KEY_LENGTH); pService->pTlsHKDFSubLabel->keySublabelChaChaPoly.labelLen = HKDF_SUB_LABEL_KEY_LENGTH; pService->pTlsHKDFSubLabel->keySublabelChaChaPoly.sublabelFlag = 1 << QAT_FW_HKDF_INNER_SUBLABEL_32_BYTE_OKM_BITPOS; /* IV SHA-256 */ memcpy(&pService->pTlsHKDFSubLabel->ivSublabel256, &iv256, HKDF_SUB_LABEL_IV_LENGTH); pService->pTlsHKDFSubLabel->ivSublabel256.labelLen = HKDF_SUB_LABEL_IV_LENGTH; pService->pTlsHKDFSubLabel->ivSublabel256.sublabelFlag = 1 << QAT_FW_HKDF_INNER_SUBLABEL_12_BYTE_OKM_BITPOS; /* IV SHA-384 */ memcpy(&pService->pTlsHKDFSubLabel->ivSublabel384, &iv384, HKDF_SUB_LABEL_IV_LENGTH); pService->pTlsHKDFSubLabel->ivSublabel384.labelLen = HKDF_SUB_LABEL_IV_LENGTH; pService->pTlsHKDFSubLabel->ivSublabel384.sublabelFlag = 1 << QAT_FW_HKDF_INNER_SUBLABEL_12_BYTE_OKM_BITPOS; /* IV CHACHAPOLY */ memcpy(&pService->pTlsHKDFSubLabel->ivSublabelChaChaPoly, &iv256, HKDF_SUB_LABEL_IV_LENGTH); pService->pTlsHKDFSubLabel->ivSublabelChaChaPoly.labelLen = HKDF_SUB_LABEL_IV_LENGTH; pService->pTlsHKDFSubLabel->ivSublabelChaChaPoly.sublabelFlag = 1 << QAT_FW_HKDF_INNER_SUBLABEL_12_BYTE_OKM_BITPOS; /* RESUMPTION SHA-256 */ memcpy(&pService->pTlsHKDFSubLabel->resumptionSublabel256, &resumption256, HKDF_SUB_LABEL_RESUMPTION_LENGTH); pService->pTlsHKDFSubLabel->resumptionSublabel256.labelLen = HKDF_SUB_LABEL_RESUMPTION_LENGTH; /* RESUMPTION SHA-384 */ memcpy(&pService->pTlsHKDFSubLabel->resumptionSublabel384, &resumption384, HKDF_SUB_LABEL_RESUMPTION_LENGTH); pService->pTlsHKDFSubLabel->resumptionSublabel384.labelLen = HKDF_SUB_LABEL_RESUMPTION_LENGTH; /* RESUMPTION CHACHAPOLY */ memcpy( &pService->pTlsHKDFSubLabel->resumptionSublabelChaChaPoly, &resumption256, HKDF_SUB_LABEL_RESUMPTION_LENGTH); pService->pTlsHKDFSubLabel->resumptionSublabelChaChaPoly .labelLen = HKDF_SUB_LABEL_RESUMPTION_LENGTH; /* FINISHED SHA-256 */ memcpy(&pService->pTlsHKDFSubLabel->finishedSublabel256, &finished256, HKDF_SUB_LABEL_FINISHED_LENGTH); pService->pTlsHKDFSubLabel->finishedSublabel256.labelLen = HKDF_SUB_LABEL_FINISHED_LENGTH; /* FINISHED SHA-384 */ memcpy(&pService->pTlsHKDFSubLabel->finishedSublabel384, &finished384, HKDF_SUB_LABEL_FINISHED_LENGTH); pService->pTlsHKDFSubLabel->finishedSublabel384.labelLen = HKDF_SUB_LABEL_FINISHED_LENGTH; /* FINISHED CHACHAPOLY */ memcpy(&pService->pTlsHKDFSubLabel->finishedSublabelChaChaPoly, &finished256, HKDF_SUB_LABEL_FINISHED_LENGTH); pService->pTlsHKDFSubLabel->finishedSublabelChaChaPoly .labelLen = HKDF_SUB_LABEL_FINISHED_LENGTH; /* Set physical address of sublabels */ pService->pTlsHKDFSubLabel->sublabelPhysAddr256 = LAC_OS_VIRT_TO_PHYS_INTERNAL( &pService->pTlsHKDFSubLabel->keySublabel256); pService->pTlsHKDFSubLabel->sublabelPhysAddr384 = LAC_OS_VIRT_TO_PHYS_INTERNAL( &pService->pTlsHKDFSubLabel->keySublabel384); pService->pTlsHKDFSubLabel->sublabelPhysAddrChaChaPoly = LAC_OS_VIRT_TO_PHYS_INTERNAL( &pService->pTlsHKDFSubLabel->keySublabelChaChaPoly); /* Register request handlers */ LacSymQat_RespHandlerRegister(ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE, LacSymKey_SslTlsHandleResponse); LacSymQat_RespHandlerRegister( ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE, LacSymKey_SslTlsHandleResponse); LacSymQat_RespHandlerRegister( ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE, LacSymKey_SslTlsHandleResponse); LacSymQat_RespHandlerRegister(ICP_QAT_FW_LA_CMD_HKDF_EXTRACT, LacSymKey_SslTlsHandleResponse); LacSymQat_RespHandlerRegister(ICP_QAT_FW_LA_CMD_HKDF_EXPAND, LacSymKey_SslTlsHandleResponse); LacSymQat_RespHandlerRegister( ICP_QAT_FW_LA_CMD_HKDF_EXTRACT_AND_EXPAND, LacSymKey_SslTlsHandleResponse); LacSymQat_RespHandlerRegister( ICP_QAT_FW_LA_CMD_HKDF_EXPAND_LABEL, LacSymKey_SslTlsHandleResponse); LacSymQat_RespHandlerRegister( ICP_QAT_FW_LA_CMD_HKDF_EXTRACT_AND_EXPAND_LABEL, LacSymKey_SslTlsHandleResponse); LacSymQat_RespHandlerRegister(ICP_QAT_FW_LA_CMD_MGF1, LacSymKey_MgfHandleResponse); } if (CPA_STATUS_SUCCESS != status) { LAC_OS_FREE(pService->pLacKeyStats); LAC_OS_CAFREE(pService->pSslLabel); LAC_OS_CAFREE(pService->pTlsLabel); LAC_OS_CAFREE(pService->pTlsHKDFSubLabel); } return status; } /* * LacSymKey_Shutdown */ CpaStatus LacSymKey_Shutdown(CpaInstanceHandle instanceHandle_in) { CpaStatus status = CPA_STATUS_SUCCESS; CpaInstanceHandle instanceHandle = LacKey_GetHandle(instanceHandle_in); sal_crypto_service_t *pService = NULL; LAC_CHECK_INSTANCE_HANDLE(instanceHandle); pService = (sal_crypto_service_t *)instanceHandle; if (NULL != pService->pLacKeyStats) { LAC_OS_FREE(pService->pLacKeyStats); } LAC_OS_CAFREE(pService->pSslLabel); LAC_OS_CAFREE(pService->pTlsLabel); LAC_OS_CAFREE(pService->pTlsHKDFSubLabel); return status; } diff --git a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_hash.c b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_hash.c index 5ad64ca09a0b..46f652cfd5c6 100644 --- a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_hash.c +++ b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_hash.c @@ -1,764 +1,767 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2025 Intel Corporation */ /** *************************************************************************** * @file lac_sym_hash.c * * @ingroup LacHash * * Hash specific functionality ***************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" #include "cpa_cy_sym.h" #include "icp_accel_devices.h" #include "icp_adf_debug.h" /* ******************************************************************************* * Include private header files ******************************************************************************* */ #include "lac_common.h" #include "lac_mem.h" #include "lac_sym.h" #include "lac_session.h" #include "lac_sym_hash.h" #include "lac_log.h" #include "lac_sym_qat_hash.h" #include "lac_sym_qat_hash_defs_lookup.h" #include "lac_sym_cb.h" #include "lac_sync.h" #define LAC_HASH_ALG_MODE_NOT_SUPPORTED(alg, mode) \ ((((CPA_CY_SYM_HASH_KASUMI_F9 == (alg)) || \ (CPA_CY_SYM_HASH_SNOW3G_UIA2 == (alg)) || \ (CPA_CY_SYM_HASH_AES_XCBC == (alg)) || \ (CPA_CY_SYM_HASH_AES_CCM == (alg)) || \ (CPA_CY_SYM_HASH_AES_GCM == (alg)) || \ (CPA_CY_SYM_HASH_AES_GMAC == (alg)) || \ (CPA_CY_SYM_HASH_AES_CMAC == (alg)) || \ (CPA_CY_SYM_HASH_ZUC_EIA3 == (alg))) && \ (CPA_CY_SYM_HASH_MODE_AUTH != (mode))) || \ ((LAC_HASH_IS_SHA3(alg)) && (CPA_CY_SYM_HASH_MODE_NESTED == (mode)))) /**< Macro to check for valid algorithm-mode combination */ void LacSync_GenBufListVerifyCb(void *pCallbackTag, CpaStatus status, CpaCySymOp operationType, void *pOpData, CpaBufferList *pDstBuffer, CpaBoolean opResult); /** * @ingroup LacHash * This callback function will be invoked whenever a synchronous * hash precompute operation completes. It will set the wait * queue flag for the synchronous operation. * * @param[in] pCallbackTag Opaque value provided by user. This will * be a pointer to a wait queue flag. * * @retval * None * */ static void LacHash_SyncPrecomputeDoneCb(void *pCallbackTag) { LacSync_GenWakeupSyncCaller(pCallbackTag, CPA_STATUS_SUCCESS); } /** @ingroup LacHash */ CpaStatus LacHash_StatePrefixAadBufferInit( sal_service_t *pService, const CpaCySymHashSetupData *pHashSetupData, icp_qat_la_bulk_req_ftr_t *pReq, icp_qat_hw_auth_mode_t qatHashMode, Cpa8U *pHashStateBuffer, lac_sym_qat_hash_state_buffer_info_t *pHashStateBufferInfo) { /* set up the hash state prefix buffer info structure */ pHashStateBufferInfo->pData = pHashStateBuffer; pHashStateBufferInfo->pDataPhys = LAC_MEM_CAST_PTR_TO_UINT64( LAC_OS_VIRT_TO_PHYS_EXTERNAL((*pService), pHashStateBuffer)); if (pHashStateBufferInfo->pDataPhys == 0) { LAC_LOG_ERROR("Unable to get the physical address of " "the hash state buffer\n"); return CPA_STATUS_FAIL; } LacSymQat_HashStatePrefixAadBufferSizeGet(pReq, pHashStateBufferInfo); /* Prefix data gets copied to the hash state buffer for nested mode */ if (CPA_CY_SYM_HASH_MODE_NESTED == pHashSetupData->hashMode) { LacSymQat_HashStatePrefixAadBufferPopulate( pHashStateBufferInfo, pReq, pHashSetupData->nestedModeSetupData.pInnerPrefixData, (Cpa8U)pHashSetupData->nestedModeSetupData .innerPrefixLenInBytes, pHashSetupData->nestedModeSetupData.pOuterPrefixData, (Cpa8U)pHashSetupData->nestedModeSetupData .outerPrefixLenInBytes); } /* For mode2 HMAC the key gets copied into both the inner and * outer prefix fields */ else if (IS_HASH_MODE_2_AUTH(qatHashMode, pHashSetupData->hashMode)) { LacSymQat_HashStatePrefixAadBufferPopulate( pHashStateBufferInfo, pReq, pHashSetupData->authModeSetupData.authKey, (Cpa8U)pHashSetupData->authModeSetupData.authKeyLenInBytes, pHashSetupData->authModeSetupData.authKey, (Cpa8U)pHashSetupData->authModeSetupData.authKeyLenInBytes); } /* else do nothing for the other cases */ return CPA_STATUS_SUCCESS; } /** @ingroup LacHash */ CpaStatus LacHash_PrecomputeDataCreate(const CpaInstanceHandle instanceHandle, CpaCySymSessionSetupData *pSessionSetup, lac_hash_precompute_done_cb_t callbackFn, void *pCallbackTag, Cpa8U *pWorkingBuffer, Cpa8U *pState1, Cpa8U *pState2) { CpaStatus status = CPA_STATUS_SUCCESS; Cpa8U *pAuthKey = NULL; Cpa32U authKeyLenInBytes = 0; CpaCySymHashAlgorithm hashAlgorithm = pSessionSetup->hashSetupData.hashAlgorithm; CpaCySymHashAuthModeSetupData *pAuthModeSetupData = &pSessionSetup->hashSetupData.authModeSetupData; /* synchronous operation */ if (NULL == callbackFn) { lac_sync_op_data_t *pSyncCallbackData = NULL; status = LacSync_CreateSyncCookie(&pSyncCallbackData); if (CPA_STATUS_SUCCESS == status) { status = LacHash_PrecomputeDataCreate( instanceHandle, pSessionSetup, LacHash_SyncPrecomputeDoneCb, /* wait queue condition from sync cookie */ pSyncCallbackData, pWorkingBuffer, pState1, pState2); } else { return status; } if (CPA_STATUS_SUCCESS == status) { CpaStatus syncStatus = CPA_STATUS_SUCCESS; syncStatus = LacSync_WaitForCallback( pSyncCallbackData, LAC_SYM_SYNC_CALLBACK_TIMEOUT, &status, NULL); /* If callback doesn't come back */ if (CPA_STATUS_SUCCESS != syncStatus) { QAT_UTILS_LOG( "callback functions for precomputes did not return\n"); status = syncStatus; } } else { /* As the Request was not sent the Callback will never * be called, so need to indicate that we're finished * with cookie so it can be destroyed. */ LacSync_SetSyncCookieComplete(pSyncCallbackData); } LacSync_DestroySyncCookie(&pSyncCallbackData); return status; } /* set up convenience pointers */ pAuthKey = pAuthModeSetupData->authKey; authKeyLenInBytes = pAuthModeSetupData->authKeyLenInBytes; /* Pre-compute data state pointers must already be set up * by LacSymQat_HashSetupBlockInit() */ /* state1 is not allocated for AES XCBC/CCM/GCM/Kasumi/UIA2 * so for these algorithms set state2 only */ if (CPA_CY_SYM_HASH_AES_XCBC == hashAlgorithm) { status = LacSymHash_AesECBPreCompute(instanceHandle, hashAlgorithm, authKeyLenInBytes, pAuthKey, pWorkingBuffer, pState2, callbackFn, pCallbackTag); } else if (CPA_CY_SYM_HASH_AES_CMAC == hashAlgorithm) { /* First, copy the original key to pState2 */ memcpy(pState2, pAuthKey, authKeyLenInBytes); /* Then precompute */ status = LacSymHash_AesECBPreCompute(instanceHandle, hashAlgorithm, authKeyLenInBytes, pAuthKey, pWorkingBuffer, pState2, callbackFn, pCallbackTag); } else if (CPA_CY_SYM_HASH_AES_CCM == hashAlgorithm) { /* * The Inner Hash Initial State2 block is 32 bytes long. * Therefore, for keys bigger than 128 bits (16 bytes), * there is no space for 16 zeroes. */ if (pSessionSetup->cipherSetupData.cipherKeyLenInBytes == ICP_QAT_HW_AES_128_KEY_SZ) { /* * The Inner Hash Initial State2 block must contain K * (the cipher key) and 16 zeroes which will be replaced * with EK(Ctr0) by the QAT-ME. */ /* write the auth key which for CCM is equivalent to * cipher key */ memcpy( pState2, pSessionSetup->cipherSetupData.pCipherKey, pSessionSetup->cipherSetupData.cipherKeyLenInBytes); /* initialize remaining buffer space to all zeroes */ LAC_OS_BZERO(pState2 + pSessionSetup->cipherSetupData .cipherKeyLenInBytes, ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ); } /* There is no request sent to the QAT for this operation, * so just invoke the user's callback directly to signal * completion of the precompute */ callbackFn(pCallbackTag); } else if (CPA_CY_SYM_HASH_AES_GCM == hashAlgorithm || CPA_CY_SYM_HASH_AES_GMAC == hashAlgorithm) { /* * The Inner Hash Initial State2 block contains the following * H (the Galois Hash Multiplier) * len(A) (the length of A), (length before padding) * 16 zeroes which will be replaced with EK(Ctr0) by the * QAT. */ /* Memset state2 to 0 */ LAC_OS_BZERO(pState2, ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ + ICP_QAT_HW_GALOIS_E_CTR0_SZ); /* write H (the Galois Hash Multiplier) where H = E(K, 0...0) * This will only write bytes 0-15 of pState2 */ status = LacSymHash_AesECBPreCompute( instanceHandle, hashAlgorithm, pSessionSetup->cipherSetupData.cipherKeyLenInBytes, pSessionSetup->cipherSetupData.pCipherKey, pWorkingBuffer, pState2, callbackFn, pCallbackTag); if (CPA_STATUS_SUCCESS == status) { /* write len(A) (the length of A) into bytes 16-19 of * pState2 in big-endian format. This field is 8 bytes */ *(Cpa32U *)&pState2[ICP_QAT_HW_GALOIS_H_SZ] = LAC_MEM_WR_32(pAuthModeSetupData->aadLenInBytes); } } else if (CPA_CY_SYM_HASH_KASUMI_F9 == hashAlgorithm) { Cpa32U wordIndex = 0; Cpa32U *pTempKey = (Cpa32U *)(pState2 + authKeyLenInBytes); /* * The Inner Hash Initial State2 block must contain IK * (Initialisation Key), followed by IK XOR-ed with KM * (Key Modifier): IK||(IK^KM). */ /* write the auth key */ memcpy(pState2, pAuthKey, authKeyLenInBytes); /* initialise temp key with auth key */ memcpy(pTempKey, pAuthKey, authKeyLenInBytes); /* XOR Key with KASUMI F9 key modifier at 4 bytes level */ for (wordIndex = 0; wordIndex < LAC_BYTES_TO_LONGWORDS(authKeyLenInBytes); wordIndex++) { pTempKey[wordIndex] ^= LAC_HASH_KASUMI_F9_KEY_MODIFIER_4_BYTES; } /* There is no request sent to the QAT for this operation, * so just invoke the user's callback directly to signal * completion of the precompute */ callbackFn(pCallbackTag); } else if (CPA_CY_SYM_HASH_SNOW3G_UIA2 == hashAlgorithm) { /* * The Inner Hash Initial State2 should be all zeros */ LAC_OS_BZERO(pState2, ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ); /* There is no request sent to the QAT for this operation, * so just invoke the user's callback directly to signal * completion of the precompute */ callbackFn(pCallbackTag); } else if (CPA_CY_SYM_HASH_ZUC_EIA3 == hashAlgorithm) { /* * The Inner Hash Initial State2 should contain the key * and zero the rest of the state. */ LAC_OS_BZERO(pState2, ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ); memcpy(pState2, pAuthKey, authKeyLenInBytes); /* There is no request sent to the QAT for this operation, * so just invoke the user's callback directly to signal * completion of the precompute */ callbackFn(pCallbackTag); } else if (CPA_CY_SYM_HASH_POLY == hashAlgorithm) { /* There is no request sent to the QAT for this operation, * so just invoke the user's callback directly to signal * completion of the precompute */ callbackFn(pCallbackTag); } else /* For Hmac Precomputes */ { status = LacSymHash_HmacPreComputes(instanceHandle, hashAlgorithm, authKeyLenInBytes, pAuthKey, pWorkingBuffer, pState1, pState2, callbackFn, pCallbackTag); } return status; } /** @ingroup LacHash */ CpaStatus LacHash_HashContextCheck(CpaInstanceHandle instanceHandle, const CpaCySymHashSetupData *pHashSetupData) { lac_sym_qat_hash_alg_info_t *pHashAlgInfo = NULL; lac_sym_qat_hash_alg_info_t *pOuterHashAlgInfo = NULL; CpaCySymCapabilitiesInfo capInfo; /*Protect against value of hash outside the bitmap*/ if (pHashSetupData->hashAlgorithm >= CPA_CY_SYM_HASH_CAP_BITMAP_SIZE) { LAC_INVALID_PARAM_LOG("hashAlgorithm"); return CPA_STATUS_INVALID_PARAM; } cpaCySymQueryCapabilities(instanceHandle, &capInfo); if (!CPA_BITMAP_BIT_TEST(capInfo.hashes, pHashSetupData->hashAlgorithm) && pHashSetupData->hashAlgorithm != CPA_CY_SYM_HASH_AES_CBC_MAC) { LAC_INVALID_PARAM_LOG("hashAlgorithm"); return CPA_STATUS_INVALID_PARAM; } switch (pHashSetupData->hashMode) { case CPA_CY_SYM_HASH_MODE_PLAIN: case CPA_CY_SYM_HASH_MODE_AUTH: case CPA_CY_SYM_HASH_MODE_NESTED: break; default: { LAC_INVALID_PARAM_LOG("hashMode"); return CPA_STATUS_INVALID_PARAM; } } if (LAC_HASH_ALG_MODE_NOT_SUPPORTED(pHashSetupData->hashAlgorithm, pHashSetupData->hashMode)) { LAC_UNSUPPORTED_PARAM_LOG( "hashAlgorithm and hashMode combination"); return CPA_STATUS_UNSUPPORTED; } LacSymQat_HashAlgLookupGet(instanceHandle, pHashSetupData->hashAlgorithm, &pHashAlgInfo); /* note: nested hash mode checks digest length against outer algorithm */ if ((CPA_CY_SYM_HASH_MODE_PLAIN == pHashSetupData->hashMode) || (CPA_CY_SYM_HASH_MODE_AUTH == pHashSetupData->hashMode)) { /* Check Digest Length is permitted by the algorithm */ if ((0 == pHashSetupData->digestResultLenInBytes) || (pHashSetupData->digestResultLenInBytes > pHashAlgInfo->digestLength)) { LAC_INVALID_PARAM_LOG("digestResultLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } if (CPA_CY_SYM_HASH_MODE_AUTH == pHashSetupData->hashMode) { if (CPA_CY_SYM_HASH_AES_GCM == pHashSetupData->hashAlgorithm || CPA_CY_SYM_HASH_AES_GMAC == pHashSetupData->hashAlgorithm) { Cpa32U aadDataSize = 0; /* RFC 4106: Implementations MUST support a full-length * 16-octet ICV, and MAY support 8 or 12 octet ICVs, and * MUST NOT support other ICV lengths. */ if ((pHashSetupData->digestResultLenInBytes != LAC_HASH_AES_GCM_ICV_SIZE_8) && (pHashSetupData->digestResultLenInBytes != LAC_HASH_AES_GCM_ICV_SIZE_12) && (pHashSetupData->digestResultLenInBytes != LAC_HASH_AES_GCM_ICV_SIZE_16)) { LAC_INVALID_PARAM_LOG("digestResultLenInBytes"); return CPA_STATUS_INVALID_PARAM; } /* ensure aadLen is within maximum limit imposed by QAT */ aadDataSize = pHashSetupData->authModeSetupData.aadLenInBytes; /* round the aad size to the multiple of GCM hash block * size. */ aadDataSize = LAC_ALIGN_POW2_ROUNDUP(aadDataSize, LAC_HASH_AES_GCM_BLOCK_SIZE); if (aadDataSize > ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX && CPA_CY_SYM_HASH_AES_GMAC != pHashSetupData->hashAlgorithm) { LAC_INVALID_PARAM_LOG("aadLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } else if (CPA_CY_SYM_HASH_AES_CCM == pHashSetupData->hashAlgorithm) { Cpa32U aadDataSize = 0; /* RFC 3610: Valid values are 4, 6, 8, 10, 12, 14, and * 16 octets */ if ((pHashSetupData->digestResultLenInBytes >= LAC_HASH_AES_CCM_ICV_SIZE_MIN) && (pHashSetupData->digestResultLenInBytes <= LAC_HASH_AES_CCM_ICV_SIZE_MAX)) { if ((pHashSetupData->digestResultLenInBytes & 0x01) != 0) { LAC_INVALID_PARAM_LOG( "digestResultLenInBytes must be a multiple of 2"); return CPA_STATUS_INVALID_PARAM; } } else { LAC_INVALID_PARAM_LOG("digestResultLenInBytes"); return CPA_STATUS_INVALID_PARAM; } /* ensure aadLen is within maximum limit imposed by QAT */ /* at the beginning of the buffer there is B0 block */ aadDataSize = LAC_HASH_AES_CCM_BLOCK_SIZE; /* then, if there is some 'a' data, the buffer will * store encoded length of 'a' and 'a' itself */ if (pHashSetupData->authModeSetupData.aadLenInBytes > 0) { /* as the QAT API puts the requirement on the * pAdditionalAuthData not to be bigger than 240 * bytes then we just need 2 bytes to store * encoded length of 'a' */ aadDataSize += sizeof(Cpa16U); aadDataSize += pHashSetupData->authModeSetupData .aadLenInBytes; } /* round the aad size to the multiple of CCM block * size.*/ aadDataSize = LAC_ALIGN_POW2_ROUNDUP(aadDataSize, LAC_HASH_AES_CCM_BLOCK_SIZE); if (aadDataSize > ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX) { LAC_INVALID_PARAM_LOG("aadLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } else if (CPA_CY_SYM_HASH_KASUMI_F9 == pHashSetupData->hashAlgorithm) { /* QAT-FW only supports 128 bit Integrity Key size for * Kasumi f9 * Ref: 3GPP TS 35.201 version 7.0.0 Release 7 */ if (pHashSetupData->authModeSetupData .authKeyLenInBytes != ICP_QAT_HW_KASUMI_KEY_SZ) { LAC_INVALID_PARAM_LOG("authKeyLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } else if (CPA_CY_SYM_HASH_SNOW3G_UIA2 == pHashSetupData->hashAlgorithm) { /* QAT-FW only supports 128 bits Integrity Key size for * Snow3g */ if (pHashSetupData->authModeSetupData .authKeyLenInBytes != ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ) { LAC_INVALID_PARAM_LOG("authKeyLenInBytes"); return CPA_STATUS_INVALID_PARAM; } /* For Snow3g hash aad field contains IV - it needs to * be 16 bytes long */ if (pHashSetupData->authModeSetupData.aadLenInBytes != ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ) { LAC_INVALID_PARAM_LOG("aadLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } else if (CPA_CY_SYM_HASH_AES_XCBC == pHashSetupData->hashAlgorithm || CPA_CY_SYM_HASH_AES_CMAC == pHashSetupData->hashAlgorithm || CPA_CY_SYM_HASH_AES_CBC_MAC == pHashSetupData->hashAlgorithm) { /* ensure auth key len is valid (128-bit keys supported) */ if ((pHashSetupData->authModeSetupData .authKeyLenInBytes != ICP_QAT_HW_AES_128_KEY_SZ)) { LAC_INVALID_PARAM_LOG("authKeyLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } else if (CPA_CY_SYM_HASH_ZUC_EIA3 == pHashSetupData->hashAlgorithm) { /* QAT-FW only supports 128 bits Integrity Key size for * ZUC */ if (pHashSetupData->authModeSetupData .authKeyLenInBytes != ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ) { LAC_INVALID_PARAM_LOG("authKeyLenInBytes"); return CPA_STATUS_INVALID_PARAM; } /* For ZUC EIA3 hash aad field contains IV - it needs to * be 16 bytes long */ if (pHashSetupData->authModeSetupData.aadLenInBytes != ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ) { LAC_INVALID_PARAM_LOG("aadLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } else if (CPA_CY_SYM_HASH_POLY == pHashSetupData->hashAlgorithm) { if (pHashSetupData->digestResultLenInBytes != ICP_QAT_HW_SPC_CTR_SZ) { LAC_INVALID_PARAM_LOG("Digest Length for CCP"); return CPA_STATUS_INVALID_PARAM; } if (pHashSetupData->authModeSetupData.aadLenInBytes > ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX) { LAC_INVALID_PARAM_LOG("AAD Length for CCP"); return CPA_STATUS_INVALID_PARAM; } } else { /* The key size must be less than or equal the block * length */ if (pHashSetupData->authModeSetupData .authKeyLenInBytes > pHashAlgInfo->blockLength) { LAC_INVALID_PARAM_LOG("authKeyLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } /* when the key size is greater than 0 check pointer is not null */ if (CPA_CY_SYM_HASH_AES_CCM != pHashSetupData->hashAlgorithm && CPA_CY_SYM_HASH_AES_GCM != pHashSetupData->hashAlgorithm && pHashSetupData->authModeSetupData.authKeyLenInBytes > 0) { LAC_CHECK_NULL_PARAM( pHashSetupData->authModeSetupData.authKey); } } else if (CPA_CY_SYM_HASH_MODE_NESTED == pHashSetupData->hashMode) { if (!CPA_BITMAP_BIT_TEST(capInfo.hashes, pHashSetupData->nestedModeSetupData .outerHashAlgorithm)) { LAC_INVALID_PARAM_LOG("outerHashAlgorithm"); return CPA_STATUS_INVALID_PARAM; } if (LAC_HASH_ALG_MODE_NOT_SUPPORTED( pHashSetupData->nestedModeSetupData.outerHashAlgorithm, pHashSetupData->hashMode)) { LAC_INVALID_PARAM_LOG( "outerHashAlgorithm and hashMode combination"); return CPA_STATUS_INVALID_PARAM; } LacSymQat_HashAlgLookupGet( instanceHandle, pHashSetupData->nestedModeSetupData.outerHashAlgorithm, &pOuterHashAlgInfo); /* Check Digest Length is permitted by the algorithm */ if ((0 == pHashSetupData->digestResultLenInBytes) || (pHashSetupData->digestResultLenInBytes > pOuterHashAlgInfo->digestLength)) { LAC_INVALID_PARAM_LOG("digestResultLenInBytes"); return CPA_STATUS_INVALID_PARAM; } if (pHashSetupData->nestedModeSetupData.innerPrefixLenInBytes > LAC_MAX_INNER_OUTER_PREFIX_SIZE_BYTES) { LAC_INVALID_PARAM_LOG("innerPrefixLenInBytes"); return CPA_STATUS_INVALID_PARAM; } if (pHashSetupData->nestedModeSetupData.innerPrefixLenInBytes > 0) { LAC_CHECK_NULL_PARAM(pHashSetupData->nestedModeSetupData .pInnerPrefixData); } if (pHashSetupData->nestedModeSetupData.outerPrefixLenInBytes > LAC_MAX_INNER_OUTER_PREFIX_SIZE_BYTES) { LAC_INVALID_PARAM_LOG("outerPrefixLenInBytes"); return CPA_STATUS_INVALID_PARAM; } if (pHashSetupData->nestedModeSetupData.outerPrefixLenInBytes > 0) { LAC_CHECK_NULL_PARAM(pHashSetupData->nestedModeSetupData .pOuterPrefixData); } } return CPA_STATUS_SUCCESS; } /** @ingroup LacHash */ CpaStatus LacHash_PerformParamCheck(CpaInstanceHandle instanceHandle, lac_session_desc_t *pSessionDesc, const CpaCySymOpData *pOpData, Cpa64U srcPktSize, const CpaBoolean *pVerifyResult) { CpaStatus status = CPA_STATUS_SUCCESS; lac_sym_qat_hash_alg_info_t *pHashAlgInfo = NULL; CpaBoolean digestIsAppended = pSessionDesc->digestIsAppended; CpaBoolean digestVerify = pSessionDesc->digestVerify; CpaCySymOp symOperation = pSessionDesc->symOperation; CpaCySymHashAlgorithm hashAlgorithm = pSessionDesc->hashAlgorithm; /* digestVerify and digestIsAppended on Hash-Only operation not * supported */ if (digestIsAppended && digestVerify && (CPA_CY_SYM_OP_HASH == symOperation)) { LAC_INVALID_PARAM_LOG( "digestVerify and digestIsAppended set " "on Hash-Only operation is not supported"); return CPA_STATUS_INVALID_PARAM; } /* check the digest result pointer */ if ((CPA_CY_SYM_PACKET_TYPE_PARTIAL != pOpData->packetType) && !digestIsAppended && (NULL == pOpData->pDigestResult)) { LAC_INVALID_PARAM_LOG("pDigestResult is NULL"); return CPA_STATUS_INVALID_PARAM; } /* * Check if the pVerifyResult pointer is not null for hash operation * when the packet is the last one and user has set verifyDigest flag * Also, this is only needed for symchronous operation, so check if the * callback pointer is the internal synchronous one rather than a user- * supplied one. */ if ((CPA_TRUE == digestVerify) && (CPA_CY_SYM_PACKET_TYPE_PARTIAL != pOpData->packetType) && (LacSync_GenBufListVerifyCb == pSessionDesc->pSymCb)) { if (NULL == pVerifyResult) { LAC_INVALID_PARAM_LOG( "Null pointer pVerifyResult for hash op"); return CPA_STATUS_INVALID_PARAM; } } /* verify start offset + messageLenToDigest is inside the source packet. * this also verifies that the start offset is inside the packet * Note: digest is specified as a pointer therefore it can be * written anywhere so we cannot check for this been inside a buffer * CCM/GCM specify the auth region using just the cipher params as this * region is the same for auth and cipher. It is not checked here */ if ((CPA_CY_SYM_HASH_AES_CCM == hashAlgorithm) || (CPA_CY_SYM_HASH_AES_GCM == hashAlgorithm)) { /* ensure AAD data pointer is non-NULL if AAD len > 0 */ if ((pSessionDesc->aadLenInBytes > 0) && (NULL == pOpData->pAdditionalAuthData)) { LAC_INVALID_PARAM_LOG("pAdditionalAuthData is NULL"); return CPA_STATUS_INVALID_PARAM; } } else { if ((pOpData->hashStartSrcOffsetInBytes + pOpData->messageLenToHashInBytes) > srcPktSize) { LAC_INVALID_PARAM_LOG( "hashStartSrcOffsetInBytes + " "messageLenToHashInBytes > Src Buffer Packet Length"); return CPA_STATUS_INVALID_PARAM; } } /* For Snow3g & ZUC hash pAdditionalAuthData field * of OpData should contain IV */ if ((CPA_CY_SYM_HASH_SNOW3G_UIA2 == hashAlgorithm) || (CPA_CY_SYM_HASH_ZUC_EIA3 == hashAlgorithm)) { if (NULL == pOpData->pAdditionalAuthData) { LAC_INVALID_PARAM_LOG("pAdditionalAuthData is NULL"); return CPA_STATUS_INVALID_PARAM; } } /* partial packets need to be multiples of the algorithm block size in * hash only mode (except for final partial packet) */ if ((CPA_CY_SYM_PACKET_TYPE_PARTIAL == pOpData->packetType) && (CPA_CY_SYM_OP_HASH == symOperation)) { LacSymQat_HashAlgLookupGet(instanceHandle, hashAlgorithm, &pHashAlgInfo); /* check if the message is a multiple of the block size. */ - if ((pOpData->messageLenToHashInBytes % - pHashAlgInfo->blockLength) != 0) { - LAC_INVALID_PARAM_LOG( - "messageLenToHashInBytes not block size"); + if (pOpData->messageLenToHashInBytes % + pHashAlgInfo->blockLength != + 0) { + LAC_INVALID_PARAM_LOG2( + "message(%d) not block-size(%d) multiple", + pOpData->messageLenToHashInBytes, + pHashAlgInfo->blockLength); return CPA_STATUS_INVALID_PARAM; } } return status; } diff --git a/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_cipher.c b/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_cipher.c index c6fbeb1b888a..d8a7ac75aec1 100644 --- a/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_cipher.c +++ b/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_cipher.c @@ -1,1008 +1,1025 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2025 Intel Corporation */ /** *************************************************************************** * @file lac_sym_qat_cipher.c QAT-related support functions for Cipher * * @ingroup LacSymQat_Cipher * * @description Functions to support the QAT related operations for Cipher ***************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" #include "icp_accel_devices.h" #include "icp_adf_debug.h" #include "lac_sym_qat.h" #include "lac_sym_qat_cipher.h" #include "lac_mem.h" #include "lac_common.h" #include "cpa_cy_sym.h" #include "lac_sym_qat.h" #include "lac_sym_cipher_defs.h" #include "icp_qat_hw.h" #include "icp_qat_fw_la.h" #include "sal_hw_gen.h" #define LAC_UNUSED_POS_MASK 0x3 /***************************************************************************** * Internal data *****************************************************************************/ typedef enum _icp_qat_hw_key_depend { IS_KEY_DEP_NO = 0, IS_KEY_DEP_YES, } icp_qat_hw_key_depend; /* LAC_CIPHER_IS_XTS_MODE */ static const uint8_t key_size_xts[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ICP_QAT_HW_CIPHER_ALGO_AES128, /* ICP_QAT_HW_AES_128_XTS_KEY_SZ */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ICP_QAT_HW_CIPHER_ALGO_AES256 /* ICP_QAT_HW_AES_256_XTS_KEY_SZ */ }; /* LAC_CIPHER_IS_AES */ static const uint8_t key_size_aes[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ICP_QAT_HW_CIPHER_ALGO_AES128, /* ICP_QAT_HW_AES_128_KEY_SZ */ 0, 0, 0, 0, 0, 0, 0, ICP_QAT_HW_CIPHER_ALGO_AES192, /* ICP_QAT_HW_AES_192_KEY_SZ */ 0, 0, 0, 0, 0, 0, 0, ICP_QAT_HW_CIPHER_ALGO_AES256 /* ICP_QAT_HW_AES_256_KEY_SZ */ }; /* LAC_CIPHER_IS_AES_F8 */ static const uint8_t key_size_f8[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ICP_QAT_HW_CIPHER_ALGO_AES128, /* ICP_QAT_HW_AES_128_F8_KEY_SZ */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ICP_QAT_HW_CIPHER_ALGO_AES192, /* ICP_QAT_HW_AES_192_F8_KEY_SZ */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ICP_QAT_HW_CIPHER_ALGO_AES256 /* ICP_QAT_HW_AES_256_F8_KEY_SZ */ }; +/* This array must be kept aligned with CpaCySymCipherAlgorithm enum but + * offset by -1 as that enum starts at 1. LacSymQat_CipherGetCfgData() + * below relies on that alignment and uses that enum -1 to index into this + * array. + */ typedef struct _icp_qat_hw_cipher_info { icp_qat_hw_cipher_algo_t algorithm; icp_qat_hw_cipher_mode_t mode; icp_qat_hw_cipher_convert_t key_convert[2]; icp_qat_hw_cipher_dir_t dir[2]; icp_qat_hw_key_depend isKeyLenDepend; const uint8_t *pAlgByKeySize; } icp_qat_hw_cipher_info; static const icp_qat_hw_cipher_info icp_qat_alg_info[] = { /* CPA_CY_SYM_CIPHER_NULL */ { ICP_QAT_HW_CIPHER_ALGO_NULL, ICP_QAT_HW_CIPHER_ECB_MODE, { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, IS_KEY_DEP_NO, NULL, }, /* CPA_CY_SYM_CIPHER_ARC4 */ { ICP_QAT_HW_CIPHER_ALGO_ARC4, ICP_QAT_HW_CIPHER_ECB_MODE, { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, /* Streaming ciphers are a special case. Decrypt = encrypt */ { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, IS_KEY_DEP_NO, NULL, }, /* CPA_CY_SYM_CIPHER_AES_ECB */ { ICP_QAT_HW_CIPHER_ALGO_AES128, ICP_QAT_HW_CIPHER_ECB_MODE, /* AES decrypt key needs to be reversed. Instead of reversing the * key at session registration, it is instead reversed on-the-fly by * setting the KEY_CONVERT bit here */ { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, IS_KEY_DEP_YES, key_size_aes, }, /* CPA_CY_SYM_CIPHER_AES_CBC */ { ICP_QAT_HW_CIPHER_ALGO_AES128, ICP_QAT_HW_CIPHER_CBC_MODE, /* AES decrypt key needs to be reversed. Instead of reversing the * key at session registration, it is instead reversed on-the-fly by * setting the KEY_CONVERT bit here */ { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, IS_KEY_DEP_YES, key_size_aes, }, /* CPA_CY_SYM_CIPHER_AES_CTR */ { ICP_QAT_HW_CIPHER_ALGO_AES128, ICP_QAT_HW_CIPHER_CTR_MODE, /* AES decrypt key needs to be reversed. Instead of reversing the * key at session registration, it is instead reversed on-the-fly by * setting the KEY_CONVERT bit here */ { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, /* Streaming ciphers are a special case. Decrypt = encrypt * Overriding default values previously set for AES */ { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, IS_KEY_DEP_YES, key_size_aes, }, /* CPA_CY_SYM_CIPHER_AES_CCM */ { ICP_QAT_HW_CIPHER_ALGO_AES128, ICP_QAT_HW_CIPHER_CTR_MODE, /* AES decrypt key needs to be reversed. Instead of reversing the * key at session registration, it is instead reversed on-the-fly by * setting the KEY_CONVERT bit here */ { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, /* Streaming ciphers are a special case. Decrypt = encrypt * Overriding default values previously set for AES */ { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, IS_KEY_DEP_YES, key_size_aes, }, /* CPA_CY_SYM_CIPHER_AES_GCM */ { ICP_QAT_HW_CIPHER_ALGO_AES128, ICP_QAT_HW_CIPHER_CTR_MODE, /* AES decrypt key needs to be reversed. Instead of reversing the * key at session registration, it is instead reversed on-the-fly by * setting the KEY_CONVERT bit here */ { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, /* Streaming ciphers are a special case. Decrypt = encrypt * Overriding default values previously set for AES */ { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, IS_KEY_DEP_YES, key_size_aes, }, /* CPA_CY_SYM_CIPHER_DES_ECB */ { ICP_QAT_HW_CIPHER_ALGO_DES, ICP_QAT_HW_CIPHER_ECB_MODE, { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, IS_KEY_DEP_NO, NULL, }, /* CPA_CY_SYM_CIPHER_DES_CBC */ { ICP_QAT_HW_CIPHER_ALGO_DES, ICP_QAT_HW_CIPHER_CBC_MODE, { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, IS_KEY_DEP_NO, NULL, }, /* CPA_CY_SYM_CIPHER_3DES_ECB */ { ICP_QAT_HW_CIPHER_ALGO_3DES, ICP_QAT_HW_CIPHER_ECB_MODE, { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, IS_KEY_DEP_NO, NULL, }, /* CPA_CY_SYM_CIPHER_3DES_CBC */ { ICP_QAT_HW_CIPHER_ALGO_3DES, ICP_QAT_HW_CIPHER_CBC_MODE, { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, IS_KEY_DEP_NO, NULL, }, /* CPA_CY_SYM_CIPHER_3DES_CTR */ { ICP_QAT_HW_CIPHER_ALGO_3DES, ICP_QAT_HW_CIPHER_CTR_MODE, { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, /* Streaming ciphers are a special case. Decrypt = encrypt * Overriding default values previously set for AES */ { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, IS_KEY_DEP_NO, NULL, }, /* CPA_CY_SYM_CIPHER_KASUMI_F8 */ { ICP_QAT_HW_CIPHER_ALGO_KASUMI, ICP_QAT_HW_CIPHER_F8_MODE, { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, /* Streaming ciphers are a special case. Decrypt = encrypt */ { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, IS_KEY_DEP_NO, NULL, }, /* CPA_CY_SYM_CIPHER_SNOW3G_UEA2 */ { /* The KEY_CONVERT bit has to be set for Snow_3G operation */ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2, ICP_QAT_HW_CIPHER_ECB_MODE, { ICP_QAT_HW_CIPHER_KEY_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, IS_KEY_DEP_NO, NULL, }, /* CPA_CY_SYM_CIPHER_AES_F8 */ { ICP_QAT_HW_CIPHER_ALGO_AES128, ICP_QAT_HW_CIPHER_F8_MODE, { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, /* Streaming ciphers are a special case. Decrypt = encrypt */ { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, IS_KEY_DEP_YES, key_size_f8, }, /* CPA_CY_SYM_CIPHER_AES_XTS */ { ICP_QAT_HW_CIPHER_ALGO_AES128, ICP_QAT_HW_CIPHER_XTS_MODE, /* AES decrypt key needs to be reversed. Instead of reversing the * key at session registration, it is instead reversed on-the-fly by * setting the KEY_CONVERT bit here */ { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, IS_KEY_DEP_YES, key_size_xts, }, /* CPA_CY_SYM_CIPHER_ZUC_EEA3 */ { ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3, ICP_QAT_HW_CIPHER_ECB_MODE, { ICP_QAT_HW_CIPHER_KEY_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, IS_KEY_DEP_NO, NULL, }, /* CPA_CY_SYM_CIPHER_CHACHA */ { ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305, ICP_QAT_HW_CIPHER_CTR_MODE, { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, IS_KEY_DEP_NO, NULL, }, /* CPA_CY_SYM_CIPHER_SM4_ECB */ { ICP_QAT_HW_CIPHER_ALGO_SM4, ICP_QAT_HW_CIPHER_ECB_MODE, { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, IS_KEY_DEP_NO, NULL, }, /* CPA_CY_SYM_CIPHER_SM4_CBC */ { ICP_QAT_HW_CIPHER_ALGO_SM4, ICP_QAT_HW_CIPHER_CBC_MODE, { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, IS_KEY_DEP_NO, NULL, }, /* CPA_CY_SYM_CIPHER_SM4_CTR */ { ICP_QAT_HW_CIPHER_ALGO_SM4, ICP_QAT_HW_CIPHER_CTR_MODE, { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, IS_KEY_DEP_NO, NULL, }, }; /***************************************************************************** * Internal functions *****************************************************************************/ void LacSymQat_CipherCtrlBlockWrite(icp_qat_la_bulk_req_ftr_t *pMsg, Cpa32U cipherAlgorithm, Cpa32U targetKeyLenInBytes, Cpa32U sliceType, icp_qat_fw_slice_t nextSlice, Cpa8U cipherCfgOffsetInQuadWord) { icp_qat_fw_cipher_cd_ctrl_hdr_t *cd_ctrl = (icp_qat_fw_cipher_cd_ctrl_hdr_t *)&(pMsg->cd_ctrl); /* state_padding_sz is nonzero for f8 mode only */ cd_ctrl->cipher_padding_sz = 0; /* Special handling of AES 192 key for UCS slice. UCS requires it to have 32 bytes - set is as targetKeyLen in this case, and add padding. It makes no sense to force applications to provide such key length for couple reasons: 1. It won't be possible to distinguish between AES 192 and 256 based on key length only 2. Only some modes of AES will use UCS slice, then application will have to know which ones */ if (ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE == sliceType && ICP_QAT_HW_AES_192_KEY_SZ == targetKeyLenInBytes) { targetKeyLenInBytes = ICP_QAT_HW_UCS_AES_192_KEY_SZ; } switch (cipherAlgorithm) { /* Base Key is not passed down to QAT in the case of ARC4 or NULL */ case CPA_CY_SYM_CIPHER_ARC4: case CPA_CY_SYM_CIPHER_NULL: cd_ctrl->cipher_key_sz = 0; break; case CPA_CY_SYM_CIPHER_KASUMI_F8: cd_ctrl->cipher_key_sz = LAC_BYTES_TO_QUADWORDS(ICP_QAT_HW_KASUMI_F8_KEY_SZ); cd_ctrl->cipher_padding_sz = ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR; break; /* For Snow3G UEA2 content descriptor key size is key size plus iv size */ case CPA_CY_SYM_CIPHER_SNOW3G_UEA2: cd_ctrl->cipher_key_sz = LAC_BYTES_TO_QUADWORDS(ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ); break; case CPA_CY_SYM_CIPHER_AES_F8: cd_ctrl->cipher_key_sz = LAC_BYTES_TO_QUADWORDS(targetKeyLenInBytes); cd_ctrl->cipher_padding_sz = (2 * ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR); break; /* For ZUC EEA3 content descriptor key size is key size plus iv size */ case CPA_CY_SYM_CIPHER_ZUC_EEA3: cd_ctrl->cipher_key_sz = LAC_BYTES_TO_QUADWORDS(ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ); break; default: cd_ctrl->cipher_key_sz = LAC_BYTES_TO_QUADWORDS(targetKeyLenInBytes); } cd_ctrl->cipher_state_sz = LAC_BYTES_TO_QUADWORDS( LacSymQat_CipherIvSizeBytesGet(cipherAlgorithm)); cd_ctrl->cipher_cfg_offset = cipherCfgOffsetInQuadWord; ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, nextSlice); ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); } void LacSymQat_CipherGetCfgData(lac_session_desc_t *pSession, icp_qat_hw_cipher_algo_t *pAlgorithm, icp_qat_hw_cipher_mode_t *pMode, icp_qat_hw_cipher_dir_t *pDir, icp_qat_hw_cipher_convert_t *pKey_convert) { sal_crypto_service_t *pService = (sal_crypto_service_t *)pSession->pInstance; - CpaCySymCipherAlgorithm cipherAlgorithm = 0; + int cipherIdx = 0; icp_qat_hw_cipher_dir_t cipherDirection = 0; /* Set defaults */ *pKey_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; *pAlgorithm = ICP_QAT_HW_CIPHER_ALGO_NULL; *pMode = ICP_QAT_HW_CIPHER_ECB_MODE; *pDir = ICP_QAT_HW_CIPHER_ENCRYPT; - /* decrease since it's numbered from 1 instead of 0 */ - cipherAlgorithm = pSession->cipherAlgorithm - 1; + /* offset index as CpaCySymCipherAlgorithm enum starts from 1, not from + * 0 */ + cipherIdx = pSession->cipherAlgorithm - 1; cipherDirection = pSession->cipherDirection == CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT ? ICP_QAT_HW_CIPHER_ENCRYPT : ICP_QAT_HW_CIPHER_DECRYPT; - *pAlgorithm = icp_qat_alg_info[cipherAlgorithm].algorithm; - *pMode = icp_qat_alg_info[cipherAlgorithm].mode; - *pDir = icp_qat_alg_info[cipherAlgorithm].dir[cipherDirection]; + /* Boundary check against the last value in the algorithm enum */ + if (!(pSession->cipherAlgorithm <= CPA_CY_SYM_CIPHER_SM4_CTR)) { + QAT_UTILS_LOG("Invalid cipherAlgorithm value\n"); + return; + } + + if (!(cipherDirection <= ICP_QAT_HW_CIPHER_DECRYPT)) { + QAT_UTILS_LOG("Invalid cipherDirection value\n"); + return; + } + + *pAlgorithm = icp_qat_alg_info[cipherIdx].algorithm; + *pMode = icp_qat_alg_info[cipherIdx].mode; + *pDir = icp_qat_alg_info[cipherIdx].dir[cipherDirection]; *pKey_convert = - icp_qat_alg_info[cipherAlgorithm].key_convert[cipherDirection]; + icp_qat_alg_info[cipherIdx].key_convert[cipherDirection]; - if (IS_KEY_DEP_NO != icp_qat_alg_info[cipherAlgorithm].isKeyLenDepend) { - *pAlgorithm = icp_qat_alg_info[cipherAlgorithm] + if (IS_KEY_DEP_NO != icp_qat_alg_info[cipherIdx].isKeyLenDepend) { + *pAlgorithm = icp_qat_alg_info[cipherIdx] .pAlgByKeySize[pSession->cipherKeyLenInBytes]; } /* CCP and AES_GCM single pass, despite being limited to CTR/AEAD mode, * support both Encrypt/Decrypt modes - this is because of the * differences in the hash computation/verification paths in * encrypt/decrypt modes respectively. * By default CCP is set as CTR Mode.Set AEAD Mode for AES_GCM. */ if (SPC == pSession->singlePassState) { if (LAC_CIPHER_IS_GCM(pSession->cipherAlgorithm)) *pMode = ICP_QAT_HW_CIPHER_AEAD_MODE; else if (isCyGen4x(pService) && LAC_CIPHER_IS_CCM(pSession->cipherAlgorithm)) *pMode = ICP_QAT_HW_CIPHER_CCM_MODE; if (cipherDirection == ICP_QAT_HW_CIPHER_DECRYPT) *pDir = ICP_QAT_HW_CIPHER_DECRYPT; } } void LacSymQat_CipherHwBlockPopulateCfgData(lac_session_desc_t *pSession, const void *pCipherHwBlock, Cpa32U *pSizeInBytes) { icp_qat_hw_cipher_algo_t algorithm = ICP_QAT_HW_CIPHER_ALGO_NULL; icp_qat_hw_cipher_mode_t mode = ICP_QAT_HW_CIPHER_ECB_MODE; icp_qat_hw_cipher_dir_t dir = ICP_QAT_HW_CIPHER_ENCRYPT; icp_qat_hw_cipher_convert_t key_convert; icp_qat_hw_cipher_config_t *pCipherConfig = (icp_qat_hw_cipher_config_t *)pCipherHwBlock; icp_qat_hw_ucs_cipher_config_t *pUCSCipherConfig = (icp_qat_hw_ucs_cipher_config_t *)pCipherHwBlock; Cpa32U val, reserved; Cpa32U aed_hash_cmp_length = 0; *pSizeInBytes = 0; LacSymQat_CipherGetCfgData( pSession, &algorithm, &mode, &dir, &key_convert); /* Build the cipher config into the hardware setup block */ if (SPC == pSession->singlePassState) { aed_hash_cmp_length = pSession->hashResultSize; reserved = ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER( pSession->aadLenInBytes); } else { reserved = 0; } val = ICP_QAT_HW_CIPHER_CONFIG_BUILD( mode, algorithm, key_convert, dir, aed_hash_cmp_length); /* UCS slice has 128-bit configuration register. Leacy cipher slice has 64-bit config register */ if (ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE == pSession->cipherSliceType) { pUCSCipherConfig->val = val; pUCSCipherConfig->reserved[0] = reserved; pUCSCipherConfig->reserved[1] = 0; pUCSCipherConfig->reserved[2] = 0; *pSizeInBytes = sizeof(icp_qat_hw_ucs_cipher_config_t); } else { pCipherConfig->val = val; pCipherConfig->reserved = reserved; *pSizeInBytes = sizeof(icp_qat_hw_cipher_config_t); } } void LacSymQat_CipherHwBlockPopulateKeySetup( lac_session_desc_t *pSessionDesc, const CpaCySymCipherSetupData *pCipherSetupData, Cpa32U targetKeyLenInBytes, Cpa32U sliceType, const void *pCipherHwBlock, Cpa32U *pSizeInBytes) { Cpa8U *pCipherKey = (Cpa8U *)pCipherHwBlock; Cpa32U actualKeyLenInBytes = pCipherSetupData->cipherKeyLenInBytes; *pSizeInBytes = 0; /* Key is copied into content descriptor for all cases except for * Arc4 and Null cipher */ if (!(LAC_CIPHER_IS_ARC4(pCipherSetupData->cipherAlgorithm) || LAC_CIPHER_IS_NULL(pCipherSetupData->cipherAlgorithm))) { /* Special handling of AES 192 key for UCS slice. UCS requires it to have 32 bytes - set is as targetKeyLen in this case, and add padding. It makes no sense to force applications to provide such key length for couple reasons: 1. It won't be possible to distinguish between AES 192 and 256 based on key length only 2. Only some modes of AES will use UCS slice, then application will have to know which ones */ if (ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE == sliceType && ICP_QAT_HW_AES_192_KEY_SZ == targetKeyLenInBytes) { targetKeyLenInBytes = ICP_QAT_HW_UCS_AES_192_KEY_SZ; } /* Set the Cipher key field in the cipher block */ memcpy(pCipherKey, pCipherSetupData->pCipherKey, actualKeyLenInBytes); /* Pad the key with 0's if required */ if (0 < (targetKeyLenInBytes - actualKeyLenInBytes)) { LAC_OS_BZERO(pCipherKey + actualKeyLenInBytes, targetKeyLenInBytes - actualKeyLenInBytes); } *pSizeInBytes += targetKeyLenInBytes; switch (pCipherSetupData->cipherAlgorithm) { /* For Kasumi in F8 mode Cipher Key is concatenated with * Cipher Key XOR-ed with Key Modifier (CK||CK^KM) */ case CPA_CY_SYM_CIPHER_KASUMI_F8: { Cpa32U wordIndex = 0; Cpa32U *pu32CipherKey = (Cpa32U *)pCipherSetupData->pCipherKey; Cpa32U *pTempKey = (Cpa32U *)(pCipherKey + targetKeyLenInBytes); /* XOR Key with KASUMI F8 key modifier at 4 bytes level */ for (wordIndex = 0; wordIndex < LAC_BYTES_TO_LONGWORDS(targetKeyLenInBytes); wordIndex++) { pTempKey[wordIndex] = pu32CipherKey[wordIndex] ^ LAC_CIPHER_KASUMI_F8_KEY_MODIFIER_4_BYTES; } *pSizeInBytes += targetKeyLenInBytes; /* also add padding for F8 */ *pSizeInBytes += LAC_QUADWORDS_TO_BYTES( ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR); LAC_OS_BZERO((Cpa8U *)pTempKey + targetKeyLenInBytes, LAC_QUADWORDS_TO_BYTES( ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR)); } break; /* For AES in F8 mode Cipher Key is concatenated with * Cipher Key XOR-ed with Key Mask (CK||CK^KM) */ case CPA_CY_SYM_CIPHER_AES_F8: { Cpa32U index = 0; Cpa8U *pTempKey = pCipherKey + (targetKeyLenInBytes / 2); *pSizeInBytes += targetKeyLenInBytes; /* XOR Key with key Mask */ for (index = 0; index < targetKeyLenInBytes; index++) { pTempKey[index] = pCipherKey[index] ^ pTempKey[index]; } pTempKey = (pCipherKey + targetKeyLenInBytes); /* also add padding for AES F8 */ *pSizeInBytes += 2 * targetKeyLenInBytes; LAC_OS_BZERO(pTempKey, 2 * targetKeyLenInBytes); } break; case CPA_CY_SYM_CIPHER_SNOW3G_UEA2: { /* For Snow3G zero area after the key for FW */ LAC_OS_BZERO(pCipherKey + targetKeyLenInBytes, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ); *pSizeInBytes += ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ; } break; case CPA_CY_SYM_CIPHER_ZUC_EEA3: { /* For ZUC zero area after the key for FW */ LAC_OS_BZERO(pCipherKey + targetKeyLenInBytes, ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ); *pSizeInBytes += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; } break; case CPA_CY_SYM_CIPHER_AES_XTS: { /* For AES in XTS mode Cipher Key is concatenated with * second Cipher Key which is used for tweak calculation * (CK1||CK2). For decryption Cipher Key needs to be * converted to reverse key.*/ if (ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE == sliceType) { Cpa32U key_len = pCipherSetupData->cipherKeyLenInBytes / 2; memcpy(pSessionDesc->cipherAesXtsKey1Forward, pCipherSetupData->pCipherKey, key_len); qatUtilsAESKeyExpansionForward( pSessionDesc->cipherAesXtsKey1Forward, key_len, (uint32_t *) pSessionDesc->cipherAesXtsKey1Reverse); memcpy(pSessionDesc->cipherAesXtsKey2, pCipherSetupData->pCipherKey + key_len, key_len); if (CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT == pCipherSetupData->cipherDirection) { memcpy(pCipherKey, pSessionDesc ->cipherAesXtsKey1Reverse, key_len); } else { memcpy(pCipherKey, pSessionDesc ->cipherAesXtsKey1Forward, key_len); } } } break; default: break; } } } /***************************************************************************** * External functions *****************************************************************************/ Cpa8U LacSymQat_CipherBlockSizeBytesGet(CpaCySymCipherAlgorithm cipherAlgorithm) { Cpa8U blockSize = 0; switch (cipherAlgorithm) { case CPA_CY_SYM_CIPHER_ARC4: blockSize = LAC_CIPHER_ARC4_BLOCK_LEN_BYTES; break; /* Handle AES or AES_F8 */ case CPA_CY_SYM_CIPHER_AES_ECB: case CPA_CY_SYM_CIPHER_AES_CBC: case CPA_CY_SYM_CIPHER_AES_CTR: case CPA_CY_SYM_CIPHER_AES_CCM: case CPA_CY_SYM_CIPHER_AES_GCM: case CPA_CY_SYM_CIPHER_AES_XTS: case CPA_CY_SYM_CIPHER_AES_F8: blockSize = ICP_QAT_HW_AES_BLK_SZ; break; /* Handle DES */ case CPA_CY_SYM_CIPHER_DES_ECB: case CPA_CY_SYM_CIPHER_DES_CBC: blockSize = ICP_QAT_HW_DES_BLK_SZ; break; /* Handle TRIPLE DES */ case CPA_CY_SYM_CIPHER_3DES_ECB: case CPA_CY_SYM_CIPHER_3DES_CBC: case CPA_CY_SYM_CIPHER_3DES_CTR: blockSize = ICP_QAT_HW_3DES_BLK_SZ; break; case CPA_CY_SYM_CIPHER_KASUMI_F8: blockSize = ICP_QAT_HW_KASUMI_BLK_SZ; break; case CPA_CY_SYM_CIPHER_SNOW3G_UEA2: blockSize = ICP_QAT_HW_SNOW_3G_BLK_SZ; break; case CPA_CY_SYM_CIPHER_ZUC_EEA3: blockSize = ICP_QAT_HW_ZUC_3G_BLK_SZ; break; case CPA_CY_SYM_CIPHER_NULL: blockSize = LAC_CIPHER_NULL_BLOCK_LEN_BYTES; break; case CPA_CY_SYM_CIPHER_CHACHA: blockSize = ICP_QAT_HW_CHACHAPOLY_BLK_SZ; break; case CPA_CY_SYM_CIPHER_SM4_ECB: case CPA_CY_SYM_CIPHER_SM4_CBC: case CPA_CY_SYM_CIPHER_SM4_CTR: blockSize = ICP_QAT_HW_SM4_BLK_SZ; break; default: QAT_UTILS_LOG("Algorithm not supported in Cipher"); } return blockSize; } Cpa32U LacSymQat_CipherIvSizeBytesGet(CpaCySymCipherAlgorithm cipherAlgorithm) { Cpa32U ivSize = 0; switch (cipherAlgorithm) { case CPA_CY_SYM_CIPHER_ARC4: ivSize = LAC_CIPHER_ARC4_STATE_LEN_BYTES; break; case CPA_CY_SYM_CIPHER_KASUMI_F8: ivSize = ICP_QAT_HW_KASUMI_BLK_SZ; break; case CPA_CY_SYM_CIPHER_SNOW3G_UEA2: ivSize = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ; break; case CPA_CY_SYM_CIPHER_ZUC_EEA3: ivSize = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; break; case CPA_CY_SYM_CIPHER_CHACHA: ivSize = ICP_QAT_HW_CHACHAPOLY_IV_SZ; break; case CPA_CY_SYM_CIPHER_AES_ECB: case CPA_CY_SYM_CIPHER_DES_ECB: case CPA_CY_SYM_CIPHER_3DES_ECB: case CPA_CY_SYM_CIPHER_SM4_ECB: case CPA_CY_SYM_CIPHER_NULL: /* for all ECB Mode IV size is 0 */ break; default: ivSize = LacSymQat_CipherBlockSizeBytesGet(cipherAlgorithm); } return ivSize; } inline CpaStatus LacSymQat_CipherRequestParamsPopulate(lac_session_desc_t *pSessionDesc, icp_qat_fw_la_bulk_req_t *pReq, Cpa32U cipherOffsetInBytes, Cpa32U cipherLenInBytes, Cpa64U ivBufferPhysAddr, Cpa8U *pIvBufferVirt) { icp_qat_fw_la_cipher_req_params_t *pCipherReqParams; icp_qat_fw_cipher_cd_ctrl_hdr_t *pCipherCdCtrlHdr; icp_qat_fw_serv_specif_flags *pCipherSpecificFlags; Cpa32U usedBufSize = 0; Cpa32U totalBufSize = 0; pCipherReqParams = (icp_qat_fw_la_cipher_req_params_t *)((Cpa8U *)&(pReq->serv_specif_rqpars) + ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET); pCipherCdCtrlHdr = (icp_qat_fw_cipher_cd_ctrl_hdr_t *)&(pReq->cd_ctrl); pCipherSpecificFlags = &(pReq->comn_hdr.serv_specif_flags); pCipherReqParams->cipher_offset = cipherOffsetInBytes; pCipherReqParams->cipher_length = cipherLenInBytes; /* Don't copy the buffer into the Msg if * it's too big for the cipher_IV_array * OR if the FW needs to update it * OR if there's no buffer supplied * OR if last partial */ if ((pCipherCdCtrlHdr->cipher_state_sz > LAC_SYM_QAT_HASH_IV_REQ_MAX_SIZE_QW) || (ICP_QAT_FW_LA_UPDATE_STATE_GET(*pCipherSpecificFlags) == ICP_QAT_FW_LA_UPDATE_STATE) || (pIvBufferVirt == NULL) || (ICP_QAT_FW_LA_PARTIAL_GET(*pCipherSpecificFlags) == ICP_QAT_FW_LA_PARTIAL_END)) { /* Populate the field with a ptr to the flat buffer */ pCipherReqParams->u.s.cipher_IV_ptr = ivBufferPhysAddr; pCipherReqParams->u.s.resrvd1 = 0; /* Set the flag indicating the field format */ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( *pCipherSpecificFlags, ICP_QAT_FW_CIPH_IV_64BIT_PTR); } else { /* Populate the field with the contents of the buffer, * zero field first as data may be smaller than the field */ /* In case of XTS mode using UCS slice always encrypt the embedded IV. * IV provided by user needs to be encrypted to calculate initial tweak, * use pCipherReqParams->u.cipher_IV_array as destination buffer for * tweak value */ if (ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE == pSessionDesc->cipherSliceType && LAC_CIPHER_IS_XTS_MODE(pSessionDesc->cipherAlgorithm)) { memset(pCipherReqParams->u.cipher_IV_array, 0, LAC_LONGWORDS_TO_BYTES( ICP_QAT_FW_NUM_LONGWORDS_4)); qatUtilsAESEncrypt( pSessionDesc->cipherAesXtsKey2, pSessionDesc->cipherKeyLenInBytes / 2, pIvBufferVirt, (Cpa8U *)pCipherReqParams->u.cipher_IV_array); } else { totalBufSize = LAC_LONGWORDS_TO_BYTES(ICP_QAT_FW_NUM_LONGWORDS_4); usedBufSize = LAC_QUADWORDS_TO_BYTES( pCipherCdCtrlHdr->cipher_state_sz); /* Only initialise unused buffer if applicable*/ if (usedBufSize < totalBufSize) { memset( (&pCipherReqParams->u.cipher_IV_array [usedBufSize & LAC_UNUSED_POS_MASK]), 0, totalBufSize - usedBufSize); } memcpy(pCipherReqParams->u.cipher_IV_array, pIvBufferVirt, usedBufSize); } /* Set the flag indicating the field format */ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( *pCipherSpecificFlags, ICP_QAT_FW_CIPH_IV_16BYTE_DATA); } return CPA_STATUS_SUCCESS; } void LacSymQat_CipherArc4StateInit(const Cpa8U *pKey, Cpa32U keyLenInBytes, Cpa8U *pArc4CipherState) { Cpa32U i = 0; Cpa32U j = 0; Cpa32U k = 0; for (i = 0; i < LAC_CIPHER_ARC4_KEY_MATRIX_LEN_BYTES; ++i) { pArc4CipherState[i] = (Cpa8U)i; } for (i = 0, k = 0; i < LAC_CIPHER_ARC4_KEY_MATRIX_LEN_BYTES; ++i, ++k) { Cpa8U swap = 0; if (k >= keyLenInBytes) k -= keyLenInBytes; j = (j + pArc4CipherState[i] + pKey[k]); if (j >= LAC_CIPHER_ARC4_KEY_MATRIX_LEN_BYTES) j %= LAC_CIPHER_ARC4_KEY_MATRIX_LEN_BYTES; /* Swap state[i] & state[j] */ swap = pArc4CipherState[i]; pArc4CipherState[i] = pArc4CipherState[j]; pArc4CipherState[j] = swap; } /* Initialise i & j values for QAT */ pArc4CipherState[LAC_CIPHER_ARC4_KEY_MATRIX_LEN_BYTES] = 0; pArc4CipherState[LAC_CIPHER_ARC4_KEY_MATRIX_LEN_BYTES + 1] = 0; } /* Update the cipher_key_sz in the Request cache prepared and stored * in the session */ void LacSymQat_CipherXTSModeUpdateKeyLen(lac_session_desc_t *pSessionDesc, Cpa32U newKeySizeInBytes) { icp_qat_fw_cipher_cd_ctrl_hdr_t *pCipherControlBlock = NULL; pCipherControlBlock = (icp_qat_fw_cipher_cd_ctrl_hdr_t *)&( pSessionDesc->reqCacheFtr.cd_ctrl); pCipherControlBlock->cipher_key_sz = LAC_BYTES_TO_QUADWORDS(newKeySizeInBytes); } diff --git a/sys/dev/qat/qat_api/common/ctrl/sal_compression.c b/sys/dev/qat/qat_api/common/ctrl/sal_compression.c index c0f5a411d87e..e8ae47f0f0d3 100644 --- a/sys/dev/qat/qat_api/common/ctrl/sal_compression.c +++ b/sys/dev/qat/qat_api/common/ctrl/sal_compression.c @@ -1,1667 +1,1665 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ /** ***************************************************************************** * @file sal_compression.c * * @ingroup SalCtrl * * @description * This file contains the sal implementation for compression. * *****************************************************************************/ /* QAT-API includes */ #include "cpa.h" #include "cpa_dc.h" /* QAT utils includes */ #include "qat_utils.h" /* ADF includes */ #include "icp_adf_init.h" #include "icp_adf_transport.h" #include "icp_accel_devices.h" #include "icp_adf_cfg.h" #include "icp_adf_accel_mgr.h" #include "icp_adf_poll.h" #include "icp_adf_debug.h" #include "icp_adf_esram.h" #include "icp_qat_hw.h" /* SAL includes */ #include "lac_mem.h" #include "lac_common.h" #include "lac_mem_pools.h" #include "sal_statistics.h" #include "lac_list.h" #include "icp_sal_poll.h" #include "sal_types_compression.h" #include "dc_session.h" #include "dc_datapath.h" #include "dc_stats.h" #include "lac_sal.h" #include "lac_sal_ctrl.h" #include "sal_string_parse.h" #include "sal_service_state.h" #include "lac_buffer_desc.h" #include "icp_qat_fw_comp.h" #include "icp_qat_hw_20_comp_defs.h" #include "icp_sal_versions.h" /* C string null terminator size */ #define SAL_NULL_TERM_SIZE 1 /* * Prints statistics for a compression instance */ static int SalCtrl_CompresionDebug(void *private_data, char *data, int size, int offset) { sal_compression_service_t *pCompressionService = (sal_compression_service_t *)private_data; CpaStatus status = CPA_STATUS_SUCCESS; CpaDcStats dcStats = { 0 }; Cpa32S len = 0; status = cpaDcGetStats(pCompressionService, &dcStats); if (status != CPA_STATUS_SUCCESS) { QAT_UTILS_LOG("cpaDcGetStats returned error.\n"); return (-1); } /* Engine Info */ if (NULL != pCompressionService->debug_file) { len += snprintf(data + len, size - len, SEPARATOR BORDER " Statistics for Instance %24s | \n" SEPARATOR, pCompressionService->debug_file->name); } /* Perform Info */ len += snprintf(data + len, size - len, BORDER " DC comp Requests: %16llu " BORDER "\n" BORDER " DC comp Request Errors: %16llu " BORDER "\n" BORDER " DC comp Completed: %16llu " BORDER "\n" BORDER " DC comp Completed Errors: %16llu " BORDER "\n" SEPARATOR, (long long unsigned int)dcStats.numCompRequests, (long long unsigned int)dcStats.numCompRequestsErrors, (long long unsigned int)dcStats.numCompCompleted, (long long unsigned int)dcStats.numCompCompletedErrors); /* Perform Info */ len += snprintf( data + len, size - len, BORDER " DC decomp Requests: %16llu " BORDER "\n" BORDER " DC decomp Request Errors: %16llu " BORDER "\n" BORDER " DC decomp Completed: %16llu " BORDER "\n" BORDER " DC decomp Completed Errors: %16llu " BORDER "\n" SEPARATOR, (long long unsigned int)dcStats.numDecompRequests, (long long unsigned int)dcStats.numDecompRequestsErrors, (long long unsigned int)dcStats.numDecompCompleted, (long long unsigned int)dcStats.numDecompCompletedErrors); return 0; } /* Initialise device specific information needed by compression service */ static CpaStatus SalCtrl_CompressionInit_CompData(icp_accel_dev_t *device, sal_compression_service_t *pCompService) { int level = 0; pCompService->comp_device_data.asbEnableSupport = CPA_FALSE; pCompService->comp_device_data.uniqueCompressionLevels[0] = CPA_FALSE; switch (device->deviceType) { case DEVICE_DH895XCC: case DEVICE_DH895XCCVF: pCompService->generic_service_info.integrityCrcCheck = CPA_FALSE; pCompService->numInterBuffs = DC_QAT_MAX_NUM_INTER_BUFFERS_6COMP_SLICES; pCompService->comp_device_data.minOutputBuffSize = DC_DEST_BUFFER_STA_MIN_SIZE; pCompService->comp_device_data.oddByteDecompNobFinal = CPA_TRUE; pCompService->comp_device_data.oddByteDecompInterim = CPA_FALSE; pCompService->comp_device_data.translatorOverflow = CPA_FALSE; pCompService->comp_device_data.useDevRam = ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF; pCompService->comp_device_data.enableDmm = ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED; pCompService->comp_device_data.inflateContextSize = DC_INFLATE_CONTEXT_SIZE; pCompService->comp_device_data.highestHwCompressionDepth = ICP_QAT_HW_COMPRESSION_DEPTH_16; pCompService->comp_device_data.windowSizeMask = (1 << DC_8K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE); pCompService->comp_device_data.cnvnrSupported = CPA_FALSE; for (level = CPA_DC_L1; level <= CPA_DC_L12; level++) { switch (level) { case CPA_DC_L1: case CPA_DC_L2: case CPA_DC_L3: case CPA_DC_L4: pCompService->comp_device_data .uniqueCompressionLevels[level] = CPA_TRUE; break; default: pCompService->comp_device_data .uniqueCompressionLevels[level] = CPA_FALSE; break; } } pCompService->comp_device_data.numCompressionLevels = DC_NUM_COMPRESSION_LEVELS; break; case DEVICE_C3XXX: case DEVICE_C3XXXVF: case DEVICE_200XX: case DEVICE_200XXVF: pCompService->generic_service_info.integrityCrcCheck = CPA_FALSE; pCompService->numInterBuffs = DC_QAT_MAX_NUM_INTER_BUFFERS_6COMP_SLICES; pCompService->comp_device_data.oddByteDecompNobFinal = CPA_FALSE; pCompService->comp_device_data.oddByteDecompInterim = CPA_TRUE; pCompService->comp_device_data.translatorOverflow = CPA_FALSE; pCompService->comp_device_data.useDevRam = ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF; pCompService->comp_device_data.inflateContextSize = DC_INFLATE_EH_CONTEXT_SIZE; pCompService->comp_device_data.highestHwCompressionDepth = ICP_QAT_HW_COMPRESSION_DEPTH_16; pCompService->comp_device_data.windowSizeMask = (1 << DC_16K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE); pCompService->comp_device_data.minOutputBuffSize = DC_DEST_BUFFER_STA_MIN_SIZE; pCompService->comp_device_data.enableDmm = ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED; pCompService->comp_device_data.cnvnrSupported = CPA_TRUE; for (level = CPA_DC_L1; level <= CPA_DC_L12; level++) { switch (level) { case CPA_DC_L1: case CPA_DC_L2: case CPA_DC_L3: case CPA_DC_L4: pCompService->comp_device_data .uniqueCompressionLevels[level] = CPA_TRUE; break; default: pCompService->comp_device_data .uniqueCompressionLevels[level] = CPA_FALSE; break; } } pCompService->comp_device_data.numCompressionLevels = DC_NUM_COMPRESSION_LEVELS; break; case DEVICE_C62X: case DEVICE_C62XVF: pCompService->generic_service_info.integrityCrcCheck = CPA_FALSE; pCompService->numInterBuffs = DC_QAT_MAX_NUM_INTER_BUFFERS_10COMP_SLICES; pCompService->comp_device_data.oddByteDecompNobFinal = CPA_FALSE; pCompService->comp_device_data.oddByteDecompInterim = CPA_TRUE; pCompService->comp_device_data.translatorOverflow = CPA_FALSE; pCompService->comp_device_data.useDevRam = ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF; pCompService->comp_device_data.inflateContextSize = DC_INFLATE_EH_CONTEXT_SIZE; pCompService->comp_device_data.highestHwCompressionDepth = ICP_QAT_HW_COMPRESSION_DEPTH_16; pCompService->comp_device_data.windowSizeMask = (1 << DC_4K_WINDOW_SIZE | 1 << DC_8K_WINDOW_SIZE | 1 << DC_16K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE); pCompService->comp_device_data.minOutputBuffSize = DC_DEST_BUFFER_STA_MIN_SIZE; pCompService->comp_device_data.minOutputBuffSizeDynamic = pCompService->comp_device_data.minOutputBuffSize; pCompService->comp_device_data.enableDmm = ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED; pCompService->comp_device_data.cnvnrSupported = CPA_TRUE; for (level = CPA_DC_L1; level <= CPA_DC_L12; level++) { switch (level) { case CPA_DC_L1: case CPA_DC_L2: case CPA_DC_L3: case CPA_DC_L4: pCompService->comp_device_data .uniqueCompressionLevels[level] = CPA_TRUE; break; default: pCompService->comp_device_data .uniqueCompressionLevels[level] = CPA_FALSE; break; } } pCompService->comp_device_data.numCompressionLevels = DC_NUM_COMPRESSION_LEVELS; break; case DEVICE_C4XXX: case DEVICE_C4XXXVF: pCompService->generic_service_info.integrityCrcCheck = CPA_TRUE; pCompService->numInterBuffs = DC_QAT_MAX_NUM_INTER_BUFFERS_24COMP_SLICES; pCompService->comp_device_data.minOutputBuffSize = DC_DEST_BUFFER_MIN_SIZE; pCompService->comp_device_data.oddByteDecompNobFinal = CPA_TRUE; pCompService->comp_device_data.oddByteDecompInterim = CPA_TRUE; pCompService->comp_device_data.translatorOverflow = CPA_TRUE; if (pCompService->generic_service_info.capabilitiesMask & ICP_ACCEL_CAPABILITIES_INLINE) { pCompService->comp_device_data.useDevRam = ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF; } else { pCompService->comp_device_data.useDevRam = ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF; } pCompService->comp_device_data.enableDmm = ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED; pCompService->comp_device_data.inflateContextSize = DC_INFLATE_EH_CONTEXT_SIZE; pCompService->comp_device_data.highestHwCompressionDepth = ICP_QAT_HW_COMPRESSION_DEPTH_128; pCompService->comp_device_data.windowSizeMask = (1 << DC_16K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE); pCompService->comp_device_data.cnvnrSupported = CPA_TRUE; for (level = CPA_DC_L1; level <= CPA_DC_L12; level++) { switch (level) { case CPA_DC_L1: case CPA_DC_L2: case CPA_DC_L3: case CPA_DC_L4: case CPA_DC_L5: pCompService->comp_device_data .uniqueCompressionLevels[level] = CPA_TRUE; break; default: pCompService->comp_device_data .uniqueCompressionLevels[level] = CPA_FALSE; break; } } pCompService->comp_device_data.numCompressionLevels = DC_NUM_COMPRESSION_LEVELS; break; case DEVICE_4XXX: case DEVICE_4XXXVF: pCompService->generic_service_info.integrityCrcCheck = CPA_TRUE; pCompService->numInterBuffs = 0; pCompService->comp_device_data.minOutputBuffSize = DC_DEST_BUFFER_STA_MIN_SIZE_GEN4; pCompService->comp_device_data.minOutputBuffSizeDynamic = DC_DEST_BUFFER_DYN_MIN_SIZE_GEN4; pCompService->comp_device_data.oddByteDecompNobFinal = CPA_TRUE; pCompService->comp_device_data.oddByteDecompInterim = CPA_FALSE; pCompService->comp_device_data.translatorOverflow = CPA_TRUE; pCompService->comp_device_data.useDevRam = ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF; pCompService->comp_device_data.enableDmm = ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED; pCompService->comp_device_data.inflateContextSize = DC_INFLATE_CONTEXT_SIZE; pCompService->comp_device_data.highestHwCompressionDepth = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_9; pCompService->comp_device_data.windowSizeMask = (1 << DC_4K_WINDOW_SIZE | 1 << DC_8K_WINDOW_SIZE | 1 << DC_16K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE); for (level = CPA_DC_L1; level <= CPA_DC_L12; level++) { switch (level) { case CPA_DC_L1: case CPA_DC_L6: case CPA_DC_L9: pCompService->comp_device_data .uniqueCompressionLevels[level] = CPA_TRUE; break; default: pCompService->comp_device_data .uniqueCompressionLevels[level] = CPA_FALSE; break; } } pCompService->comp_device_data.numCompressionLevels = DC_NUM_COMPRESSION_LEVELS; break; default: QAT_UTILS_LOG("Unknown device type! - %d.\n", device->deviceType); return CPA_STATUS_FAIL; } return CPA_STATUS_SUCCESS; } CpaStatus SalCtrl_CompressionInit(icp_accel_dev_t *device, sal_service_t *service) { CpaStatus status = CPA_STATUS_SUCCESS; Cpa32U numCompConcurrentReq = 0; Cpa32U request_ring_id = 0; Cpa32U response_ring_id = 0; char adfGetParam[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char compMemPool[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char temp_string[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char temp_string2[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char *instance_name = NULL; sal_statistics_collection_t *pStatsCollection = (sal_statistics_collection_t *)device->pQatStats; icp_resp_deliv_method rx_resp_type = ICP_RESP_TYPE_IRQ; sal_compression_service_t *pCompressionService = (sal_compression_service_t *)service; Cpa32U msgSize = 0; char *section = DYN_SEC; SAL_SERVICE_GOOD_FOR_INIT(pCompressionService); - pCompressionService->generic_service_info.state = - SAL_SERVICE_STATE_INITIALIZING; - if (CPA_FALSE == pCompressionService->generic_service_info.is_dyn) { section = icpGetProcessName(); } if (pStatsCollection == NULL) { return CPA_STATUS_FAIL; } /* Get Config Info: Accel Num, bank Num, packageID, coreAffinity, nodeAffinity and response mode */ pCompressionService->acceleratorNum = 0; /* Initialise device specific compression data */ SalCtrl_CompressionInit_CompData(device, pCompressionService); status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "BankNumber", temp_string); LAC_CHECK_STATUS(status); status = icp_adf_cfgGetParamValue(device, section, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", temp_string); return status; } pCompressionService->bankNum = Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "IsPolled", temp_string); LAC_CHECK_STATUS(status); status = icp_adf_cfgGetParamValue(device, section, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", temp_string); return status; } pCompressionService->isPolled = (Cpa8U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); /* User instances only support poll and epoll mode */ if (SAL_RESP_POLL_CFG_FILE != pCompressionService->isPolled) { QAT_UTILS_LOG( "IsPolled %u is not supported for user instance %s.\n", pCompressionService->isPolled, temp_string); return CPA_STATUS_FAIL; } if (SAL_RESP_POLL_CFG_FILE == pCompressionService->isPolled) { rx_resp_type = ICP_RESP_TYPE_POLL; } status = icp_adf_cfgGetParamValue(device, LAC_CFG_SECTION_GENERAL, ADF_DEV_PKG_ID, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", ADF_DEV_PKG_ID); return status; } pCompressionService->pkgID = (Cpa16U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); status = icp_adf_cfgGetParamValue(device, LAC_CFG_SECTION_GENERAL, ADF_DEV_NODE_ID, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", ADF_DEV_NODE_ID); return status; } pCompressionService->nodeAffinity = (Cpa32U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); /* In case of interrupt instance, use the bank affinity set by adf_ctl * Otherwise, use the instance affinity for backwards compatibility */ if (SAL_RESP_POLL_CFG_FILE != pCompressionService->isPolled) { /* Next need to read the [AcceleratorX] section of the config * file */ status = Sal_StringParsing("Accelerator", pCompressionService->acceleratorNum, "", temp_string2); LAC_CHECK_STATUS(status); status = Sal_StringParsing("Bank", pCompressionService->bankNum, "CoreAffinity", temp_string); LAC_CHECK_STATUS(status); } else { strncpy(temp_string2, section, sizeof(temp_string2) - SAL_NULL_TERM_SIZE); temp_string2[SAL_CFG_MAX_VAL_LEN_IN_BYTES - SAL_NULL_TERM_SIZE] = '\0'; status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "CoreAffinity", temp_string); LAC_CHECK_STATUS(status); } status = icp_adf_cfgGetParamValue(device, temp_string2, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", temp_string); return status; } pCompressionService->coreAffinity = (Cpa32U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "NumConcurrentRequests", temp_string); LAC_CHECK_STATUS(status); status = icp_adf_cfgGetParamValue(device, section, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", temp_string); return status; } numCompConcurrentReq = (Cpa32U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); if (validateConcurrRequest(numCompConcurrentReq)) { QAT_UTILS_LOG( "Invalid NumConcurrentRequests, valid values are: {64, 128, 256, ... 32768, 65536}.\n"); return CPA_STATUS_FAIL; } /* ADF does not allow us to completely fill the ring for batch requests */ pCompressionService->maxNumCompConcurrentReq = (numCompConcurrentReq - SAL_BATCH_SUBMIT_FREE_SPACE); /* 1. Create transport handles */ status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "RingTx", temp_string); LAC_CHECK_STATUS(status); msgSize = LAC_QAT_DC_REQ_SZ_LW * LAC_LONG_WORD_IN_BYTES; status = icp_adf_transCreateHandle( device, ICP_TRANS_TYPE_ETR, section, pCompressionService->acceleratorNum, pCompressionService->bankNum, temp_string, lac_getRingType(SAL_RING_TYPE_DC), NULL, ICP_RESP_TYPE_NONE, numCompConcurrentReq, msgSize, (icp_comms_trans_handle *)&( pCompressionService->trans_handle_compression_tx)); LAC_CHECK_STATUS(status); if (icp_adf_transGetRingNum( pCompressionService->trans_handle_compression_tx, &request_ring_id) != CPA_STATUS_SUCCESS) { icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); QAT_UTILS_LOG("Failed to get DC TX ring number.\n"); return CPA_STATUS_FAIL; } status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "RingRx", temp_string); if (CPA_STATUS_SUCCESS != status) { icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); return status; } msgSize = LAC_QAT_DC_RESP_SZ_LW * LAC_LONG_WORD_IN_BYTES; status = icp_adf_transCreateHandle( device, ICP_TRANS_TYPE_ETR, section, pCompressionService->acceleratorNum, pCompressionService->bankNum, temp_string, lac_getRingType(SAL_RING_TYPE_NONE), (icp_trans_callback)dcCompression_ProcessCallback, rx_resp_type, numCompConcurrentReq, msgSize, (icp_comms_trans_handle *)&( pCompressionService->trans_handle_compression_rx)); if (CPA_STATUS_SUCCESS != status) { icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); return status; } if (icp_adf_transGetRingNum( pCompressionService->trans_handle_compression_rx, &response_ring_id) != CPA_STATUS_SUCCESS) { icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); QAT_UTILS_LOG("Failed to get DC RX ring number.\n"); return CPA_STATUS_FAIL; } /* 2. Allocates memory pools */ /* Valid initialisation value for a pool ID */ pCompressionService->compression_mem_pool = LAC_MEM_POOL_INIT_POOL_ID; status = Sal_StringParsing( "Comp", pCompressionService->generic_service_info.instance, "_MemPool", compMemPool); if (CPA_STATUS_SUCCESS != status) { icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); return status; } status = Lac_MemPoolCreate(&pCompressionService->compression_mem_pool, compMemPool, (numCompConcurrentReq + 1), sizeof(dc_compression_cookie_t), LAC_64BYTE_ALIGNMENT, CPA_FALSE, pCompressionService->nodeAffinity); if (CPA_STATUS_SUCCESS != status) { icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); return status; } /* Init compression statistics */ status = dcStatsInit(pCompressionService); if (CPA_STATUS_SUCCESS != status) { Lac_MemPoolDestroy(pCompressionService->compression_mem_pool); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); return status; } if (CPA_TRUE == pStatsCollection->bDcStatsEnabled) { /* Get instance name for stats */ instance_name = LAC_OS_MALLOC(ADF_CFG_MAX_VAL_LEN_IN_BYTES); if (NULL == instance_name) { Lac_MemPoolDestroy( pCompressionService->compression_mem_pool); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); return CPA_STATUS_RESOURCE; } status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "Name", temp_string); if (CPA_STATUS_SUCCESS != status) { Lac_MemPoolDestroy( pCompressionService->compression_mem_pool); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); LAC_OS_FREE(instance_name); return status; } status = icp_adf_cfgGetParamValue(device, section, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", temp_string); Lac_MemPoolDestroy( pCompressionService->compression_mem_pool); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); LAC_OS_FREE(instance_name); return status; } snprintf(instance_name, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%s", adfGetParam); pCompressionService->debug_file = LAC_OS_MALLOC(sizeof(debug_file_info_t)); if (NULL == pCompressionService->debug_file) { Lac_MemPoolDestroy( pCompressionService->compression_mem_pool); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); LAC_OS_FREE(instance_name); return CPA_STATUS_RESOURCE; } memset(pCompressionService->debug_file, 0, sizeof(debug_file_info_t)); pCompressionService->debug_file->name = instance_name; pCompressionService->debug_file->seq_read = SalCtrl_CompresionDebug; pCompressionService->debug_file->private_data = pCompressionService; pCompressionService->debug_file->parent = pCompressionService->generic_service_info.debug_parent_dir; status = icp_adf_debugAddFile(device, pCompressionService->debug_file); if (CPA_STATUS_SUCCESS != status) { Lac_MemPoolDestroy( pCompressionService->compression_mem_pool); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); LAC_OS_FREE(instance_name); LAC_OS_FREE(pCompressionService->debug_file); return status; } } pCompressionService->generic_service_info.stats = pStatsCollection; pCompressionService->generic_service_info.state = SAL_SERVICE_STATE_INITIALIZED; return status; } CpaStatus SalCtrl_CompressionStart(icp_accel_dev_t *device, sal_service_t *service) { CpaStatus status = CPA_STATUS_SUCCESS; sal_compression_service_t *pCompressionService = (sal_compression_service_t *)service; if (SAL_SERVICE_STATE_INITIALIZED != pCompressionService->generic_service_info.state) { QAT_UTILS_LOG("Not in the correct state to call start.\n"); return CPA_STATUS_FAIL; } /**************************************************************/ /* Obtain Extended Features. I.e. Compress And Verify */ /**************************************************************/ pCompressionService->generic_service_info.dcExtendedFeatures = device->dcExtendedFeatures; pCompressionService->generic_service_info.state = SAL_SERVICE_STATE_RUNNING; return status; } CpaStatus SalCtrl_CompressionStop(icp_accel_dev_t *device, sal_service_t *service) { sal_compression_service_t *pCompressionService = (sal_compression_service_t *)service; if (SAL_SERVICE_STATE_RUNNING != pCompressionService->generic_service_info.state) { QAT_UTILS_LOG("Not in the correct state to call stop.\n"); return CPA_STATUS_FAIL; } if (icp_adf_is_dev_in_reset(device)) { pCompressionService->generic_service_info.state = SAL_SERVICE_STATE_RESTARTING; return CPA_STATUS_SUCCESS; } pCompressionService->generic_service_info.state = SAL_SERVICE_STATE_SHUTTING_DOWN; return CPA_STATUS_RETRY; } CpaStatus SalCtrl_CompressionShutdown(icp_accel_dev_t *device, sal_service_t *service) { CpaStatus status = CPA_STATUS_SUCCESS; sal_compression_service_t *pCompressionService = (sal_compression_service_t *)service; sal_statistics_collection_t *pStatsCollection = (sal_statistics_collection_t *)device->pQatStats; if ((SAL_SERVICE_STATE_INITIALIZED != pCompressionService->generic_service_info.state) && (SAL_SERVICE_STATE_SHUTTING_DOWN != pCompressionService->generic_service_info.state) && (SAL_SERVICE_STATE_RESTARTING != pCompressionService->generic_service_info.state)) { QAT_UTILS_LOG("Not in the correct state to call shutdown.\n"); return CPA_STATUS_FAIL; } Lac_MemPoolDestroy(pCompressionService->compression_mem_pool); status = icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); LAC_CHECK_STATUS(status); status = icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); LAC_CHECK_STATUS(status); if (CPA_TRUE == pStatsCollection->bDcStatsEnabled) { /* Clean stats */ if (NULL != pCompressionService->debug_file) { icp_adf_debugRemoveFile( pCompressionService->debug_file); LAC_OS_FREE(pCompressionService->debug_file->name); LAC_OS_FREE(pCompressionService->debug_file); pCompressionService->debug_file = NULL; } } pCompressionService->generic_service_info.stats = NULL; dcStatsFree(pCompressionService); if (icp_adf_is_dev_in_reset(device)) { pCompressionService->generic_service_info.state = SAL_SERVICE_STATE_RESTARTING; return CPA_STATUS_SUCCESS; } pCompressionService->generic_service_info.state = SAL_SERVICE_STATE_SHUTDOWN; return status; } CpaStatus cpaDcGetStatusText(const CpaInstanceHandle dcInstance, const CpaStatus errStatus, Cpa8S *pStatusText) { CpaStatus status = CPA_STATUS_SUCCESS; LAC_CHECK_NULL_PARAM(pStatusText); switch (errStatus) { case CPA_STATUS_SUCCESS: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_SUCCESS); break; case CPA_STATUS_FAIL: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_FAIL); break; case CPA_STATUS_RETRY: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_RETRY); break; case CPA_STATUS_RESOURCE: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_RESOURCE); break; case CPA_STATUS_INVALID_PARAM: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_INVALID_PARAM); break; case CPA_STATUS_FATAL: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_FATAL); break; case CPA_STATUS_UNSUPPORTED: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_UNSUPPORTED); break; default: status = CPA_STATUS_INVALID_PARAM; break; } return status; } CpaStatus cpaDcGetNumIntermediateBuffers(CpaInstanceHandle dcInstance, Cpa16U *pNumBuffers) { CpaInstanceHandle insHandle = NULL; sal_compression_service_t *pService = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); } else { insHandle = dcInstance; } LAC_CHECK_NULL_PARAM(insHandle); LAC_CHECK_NULL_PARAM(pNumBuffers); pService = (sal_compression_service_t *)insHandle; *pNumBuffers = pService->numInterBuffs; return CPA_STATUS_SUCCESS; } CpaStatus cpaDcStartInstance(CpaInstanceHandle instanceHandle, Cpa16U numBuffers, CpaBufferList **pIntermediateBufferPtrsArray) { icp_qat_addr_width_t *pInterBuffPtrsArray = NULL; icp_qat_addr_width_t pArrayBufferListDescPhyAddr = 0; icp_qat_addr_width_t bufListDescPhyAddr; icp_qat_addr_width_t bufListAlignedPhyAddr; CpaFlatBuffer *pClientCurrFlatBuffer = NULL; icp_buffer_list_desc_t *pBufferListDesc = NULL; icp_flat_buffer_desc_t *pCurrFlatBufDesc = NULL; CpaInstanceInfo2 info = { 0 }; icp_accel_dev_t *dev = NULL; CpaStatus status = CPA_STATUS_SUCCESS; sal_compression_service_t *pService = NULL; CpaInstanceHandle insHandle = NULL; Cpa16U bufferIndex = 0; Cpa32U numFlatBuffers = 0; Cpa64U clientListSize = 0; CpaBufferList *pClientCurrentIntermediateBuffer = NULL; Cpa32U bufferIndex2 = 0; CpaBufferList **pTempIntermediateBufferPtrsArray; Cpa64U lastClientListSize = 0; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle) { insHandle = dcGetFirstHandle(); } else { insHandle = instanceHandle; } LAC_CHECK_NULL_PARAM(insHandle); status = cpaDcInstanceGetInfo2(insHandle, &info); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Can not get instance info.\n"); return status; } dev = icp_adf_getAccelDevByAccelId(info.physInstId.packageId); if (NULL == dev) { QAT_UTILS_LOG("Can not find device for the instance\n"); return CPA_STATUS_FAIL; } if (NULL == pIntermediateBufferPtrsArray) { /* Increment dev ref counter and return - DRAM is not used */ icp_qa_dev_get(dev); return CPA_STATUS_SUCCESS; } if (0 == numBuffers) { /* Increment dev ref counter and return - DRAM is not used */ icp_qa_dev_get(dev); return CPA_STATUS_SUCCESS; } pService = (sal_compression_service_t *)insHandle; LAC_CHECK_NULL_PARAM(insHandle); if ((numBuffers > 0) && (NULL == pIntermediateBufferPtrsArray)) { QAT_UTILS_LOG("Invalid Intermediate Buffers Array pointer\n"); return CPA_STATUS_INVALID_PARAM; } /* Check number of intermediate buffers allocated by user */ if ((pService->numInterBuffs != numBuffers)) { QAT_UTILS_LOG("Invalid number of buffers\n"); return CPA_STATUS_INVALID_PARAM; } pTempIntermediateBufferPtrsArray = pIntermediateBufferPtrsArray; for (bufferIndex = 0; bufferIndex < numBuffers; bufferIndex++) { if (NULL == *pTempIntermediateBufferPtrsArray) { QAT_UTILS_LOG( "Intermediate Buffer - Invalid Buffer List pointer\n"); return CPA_STATUS_INVALID_PARAM; } if (NULL == (*pTempIntermediateBufferPtrsArray)->pBuffers) { QAT_UTILS_LOG( "Intermediate Buffer - Invalid Flat Buffer descriptor pointer\n"); return CPA_STATUS_INVALID_PARAM; } if (NULL == (*pTempIntermediateBufferPtrsArray)->pPrivateMetaData) { QAT_UTILS_LOG( "Intermediate Buffer - Invalid Private MetaData descriptor pointer\n"); return CPA_STATUS_INVALID_PARAM; } clientListSize = 0; for (bufferIndex2 = 0; bufferIndex2 < (*pTempIntermediateBufferPtrsArray)->numBuffers; bufferIndex2++) { if ((0 != (*pTempIntermediateBufferPtrsArray) ->pBuffers[bufferIndex2] .dataLenInBytes) && NULL == (*pTempIntermediateBufferPtrsArray) ->pBuffers[bufferIndex2] .pData) { QAT_UTILS_LOG( "Intermediate Buffer - Invalid Flat Buffer pointer\n"); return CPA_STATUS_INVALID_PARAM; } clientListSize += (*pTempIntermediateBufferPtrsArray) ->pBuffers[bufferIndex2] .dataLenInBytes; } if (bufferIndex != 0) { if (lastClientListSize != clientListSize) { QAT_UTILS_LOG( "SGLs have to be of the same size.\n"); return CPA_STATUS_INVALID_PARAM; } } else { lastClientListSize = clientListSize; } pTempIntermediateBufferPtrsArray++; } /* Allocate array of physical pointers to icp_buffer_list_desc_t */ status = LAC_OS_CAMALLOC(&pInterBuffPtrsArray, (numBuffers * sizeof(icp_qat_addr_width_t)), LAC_64BYTE_ALIGNMENT, pService->nodeAffinity); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Can not allocate Intermediate Buffers array.\n"); return status; } /* Get physical address of the intermediate buffer pointers array */ pArrayBufferListDescPhyAddr = LAC_MEM_CAST_PTR_TO_UINT64( LAC_OS_VIRT_TO_PHYS_INTERNAL(pInterBuffPtrsArray)); pService->pInterBuffPtrsArray = pInterBuffPtrsArray; pService->pInterBuffPtrsArrayPhyAddr = pArrayBufferListDescPhyAddr; /* Get the full size of the buffer list */ /* Assumption: all the SGLs allocated by the user have the same size */ clientListSize = 0; for (bufferIndex = 0; bufferIndex < (*pIntermediateBufferPtrsArray)->numBuffers; bufferIndex++) { clientListSize += ((*pIntermediateBufferPtrsArray) ->pBuffers[bufferIndex] .dataLenInBytes); } pService->minInterBuffSizeInBytes = clientListSize; for (bufferIndex = 0; bufferIndex < numBuffers; bufferIndex++) { /* Get pointer to the client Intermediate Buffer List * (CpaBufferList) */ pClientCurrentIntermediateBuffer = *pIntermediateBufferPtrsArray; /* Get number of flat buffers in the buffer list */ numFlatBuffers = pClientCurrentIntermediateBuffer->numBuffers; /* Get pointer to the client array of CpaFlatBuffers */ pClientCurrFlatBuffer = pClientCurrentIntermediateBuffer->pBuffers; /* Calculate Physical address of current private SGL */ bufListDescPhyAddr = LAC_OS_VIRT_TO_PHYS_EXTERNAL( (*pService), pClientCurrentIntermediateBuffer->pPrivateMetaData); if (bufListDescPhyAddr == 0) { QAT_UTILS_LOG( "Unable to get the physical address of the metadata.\n"); return CPA_STATUS_FAIL; } /* Align SGL physical address */ bufListAlignedPhyAddr = LAC_ALIGN_POW2_ROUNDUP(bufListDescPhyAddr, ICP_DESCRIPTOR_ALIGNMENT_BYTES); /* Set physical address of the Intermediate Buffer SGL in the * SGLs array */ *pInterBuffPtrsArray = LAC_MEM_CAST_PTR_TO_UINT64(bufListAlignedPhyAddr); /* Calculate (virtual) offset to the buffer list descriptor */ pBufferListDesc = (icp_buffer_list_desc_t *)((LAC_ARCH_UINT)pClientCurrentIntermediateBuffer ->pPrivateMetaData + (LAC_ARCH_UINT)(bufListAlignedPhyAddr - bufListDescPhyAddr)); /* Set number of flat buffers in the physical Buffer List * descriptor */ pBufferListDesc->numBuffers = numFlatBuffers; /* Go past the Buffer List descriptor to the list of buffer * descriptors */ pCurrFlatBufDesc = (icp_flat_buffer_desc_t *)((pBufferListDesc->phyBuffers)); /* Loop for each flat buffer in the SGL */ while (0 != numFlatBuffers) { /* Set length of the current flat buffer */ pCurrFlatBufDesc->dataLenInBytes = pClientCurrFlatBuffer->dataLenInBytes; /* Set physical address of the flat buffer */ pCurrFlatBufDesc->phyBuffer = LAC_MEM_CAST_PTR_TO_UINT64( LAC_OS_VIRT_TO_PHYS_EXTERNAL( (*pService), pClientCurrFlatBuffer->pData)); if (pCurrFlatBufDesc->phyBuffer == 0) { QAT_UTILS_LOG( "Unable to get the physical address of the flat buffer.\n"); return CPA_STATUS_FAIL; } pCurrFlatBufDesc++; pClientCurrFlatBuffer++; numFlatBuffers--; } pIntermediateBufferPtrsArray++; pInterBuffPtrsArray++; } pService->generic_service_info.isInstanceStarted = CPA_TRUE; /* Increment dev ref counter */ icp_qa_dev_get(dev); return CPA_STATUS_SUCCESS; } CpaStatus cpaDcStopInstance(CpaInstanceHandle instanceHandle) { CpaInstanceHandle insHandle = NULL; CpaInstanceInfo2 info = { 0 }; icp_accel_dev_t *dev = NULL; CpaStatus status = CPA_STATUS_SUCCESS; sal_compression_service_t *pService = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle) { insHandle = dcGetFirstHandle(); } else { insHandle = instanceHandle; } LAC_CHECK_NULL_PARAM(insHandle); pService = (sal_compression_service_t *)insHandle; /* Free Intermediate Buffer Pointers Array */ if (pService->pInterBuffPtrsArray != NULL) { LAC_OS_CAFREE(pService->pInterBuffPtrsArray); pService->pInterBuffPtrsArray = 0; } pService->pInterBuffPtrsArrayPhyAddr = 0; status = cpaDcInstanceGetInfo2(insHandle, &info); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Can not get instance info.\n"); return status; } dev = icp_adf_getAccelDevByAccelId(info.physInstId.packageId); if (NULL == dev) { QAT_UTILS_LOG("Can not find device for the instance.\n"); return CPA_STATUS_FAIL; } pService->generic_service_info.isInstanceStarted = CPA_FALSE; /* Decrement dev ref counter */ icp_qa_dev_put(dev); return CPA_STATUS_SUCCESS; } CpaStatus cpaDcGetNumInstances(Cpa16U *pNumInstances) { CpaStatus status = CPA_STATUS_SUCCESS; icp_accel_dev_t **pAdfInsts = NULL; icp_accel_dev_t *dev_addr = NULL; sal_t *base_addr = NULL; sal_list_t *list_temp = NULL; Cpa16U num_accel_dev = 0; Cpa16U num = 0; Cpa16U i = 0; LAC_CHECK_NULL_PARAM(pNumInstances); /* Get the number of accel_dev in the system */ status = icp_amgr_getNumInstances(&num_accel_dev); LAC_CHECK_STATUS(status); /* Allocate memory to store addr of accel_devs */ pAdfInsts = malloc(num_accel_dev * sizeof(icp_accel_dev_t *), M_QAT, M_WAITOK); num_accel_dev = 0; /* Get ADF to return accel_devs with dc enabled */ status = icp_amgr_getAllAccelDevByCapabilities( ICP_ACCEL_CAPABILITIES_COMPRESSION, pAdfInsts, &num_accel_dev); if (CPA_STATUS_SUCCESS == status) { for (i = 0; i < num_accel_dev; i++) { dev_addr = (icp_accel_dev_t *)pAdfInsts[i]; if (NULL != dev_addr) { base_addr = dev_addr->pSalHandle; if (NULL != base_addr) { list_temp = base_addr->compression_services; while (NULL != list_temp) { num++; list_temp = SalList_next(list_temp); } } } } *pNumInstances = num; } free(pAdfInsts, M_QAT); return status; } CpaStatus cpaDcGetInstances(Cpa16U numInstances, CpaInstanceHandle *dcInstances) { CpaStatus status = CPA_STATUS_SUCCESS; icp_accel_dev_t **pAdfInsts = NULL; icp_accel_dev_t *dev_addr = NULL; sal_t *base_addr = NULL; sal_list_t *list_temp = NULL; Cpa16U num_accel_dev = 0; Cpa16U index = 0; Cpa16U i = 0; LAC_CHECK_NULL_PARAM(dcInstances); if (0 == numInstances) { QAT_UTILS_LOG("numInstances is 0.\n"); return CPA_STATUS_INVALID_PARAM; } /* Get the number of accel_dev in the system */ status = icp_amgr_getNumInstances(&num_accel_dev); LAC_CHECK_STATUS(status); /* Allocate memory to store addr of accel_devs */ pAdfInsts = malloc(num_accel_dev * sizeof(icp_accel_dev_t *), M_QAT, M_WAITOK); num_accel_dev = 0; /* Get ADF to return accel_devs with dc enabled */ status = icp_amgr_getAllAccelDevByCapabilities( ICP_ACCEL_CAPABILITIES_COMPRESSION, pAdfInsts, &num_accel_dev); if (CPA_STATUS_SUCCESS == status) { /* First check the number of instances in the system */ for (i = 0; i < num_accel_dev; i++) { dev_addr = (icp_accel_dev_t *)pAdfInsts[i]; if (NULL != dev_addr) { base_addr = dev_addr->pSalHandle; if (NULL != base_addr) { list_temp = base_addr->compression_services; while (NULL != list_temp) { if (index > (numInstances - 1)) { break; } dcInstances[index] = SalList_getObject( list_temp); list_temp = SalList_next(list_temp); index++; } } } } if (numInstances > index) { QAT_UTILS_LOG("Only %d dc instances available.\n", index); status = CPA_STATUS_RESOURCE; } } if (CPA_STATUS_SUCCESS == status) { index = 0; for (i = 0; i < num_accel_dev; i++) { dev_addr = (icp_accel_dev_t *)pAdfInsts[i]; /* Note dev_addr cannot be NULL here as numInstances=0 is not valid and if dev_addr=NULL then index=0 (which is less than numInstances and status is set to _RESOURCE above */ base_addr = dev_addr->pSalHandle; if (NULL != base_addr) { list_temp = base_addr->compression_services; while (NULL != list_temp) { if (index > (numInstances - 1)) { break; } dcInstances[index] = SalList_getObject(list_temp); list_temp = SalList_next(list_temp); index++; } } } } free(pAdfInsts, M_QAT); return status; } CpaStatus cpaDcInstanceGetInfo2(const CpaInstanceHandle instanceHandle, CpaInstanceInfo2 *pInstanceInfo2) { sal_compression_service_t *pCompressionService = NULL; CpaInstanceHandle insHandle = NULL; icp_accel_dev_t *dev = NULL; CpaStatus status = CPA_STATUS_SUCCESS; char keyStr[ADF_CFG_MAX_KEY_LEN_IN_BYTES] = { 0 }; char valStr[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char *section = DYN_SEC; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle) { insHandle = dcGetFirstHandle(); } else { insHandle = instanceHandle; } LAC_CHECK_NULL_PARAM(insHandle); SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); LAC_CHECK_NULL_PARAM(pInstanceInfo2); LAC_OS_BZERO(pInstanceInfo2, sizeof(CpaInstanceInfo2)); pInstanceInfo2->accelerationServiceType = CPA_ACC_SVC_TYPE_DATA_COMPRESSION; snprintf((char *)pInstanceInfo2->vendorName, CPA_INST_VENDOR_NAME_SIZE, "%s", SAL_INFO2_VENDOR_NAME); pInstanceInfo2->vendorName[CPA_INST_VENDOR_NAME_SIZE - 1] = '\0'; snprintf((char *)pInstanceInfo2->swVersion, CPA_INST_SW_VERSION_SIZE, "Version %d.%d", SAL_INFO2_DRIVER_SW_VERSION_MAJ_NUMBER, SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER); pInstanceInfo2->swVersion[CPA_INST_SW_VERSION_SIZE - 1] = '\0'; /* Note we can safely read the contents of the compression service instance here because icp_amgr_getAccelDevByCapabilities() only returns devs that have started */ pCompressionService = (sal_compression_service_t *)insHandle; pInstanceInfo2->physInstId.packageId = pCompressionService->pkgID; pInstanceInfo2->physInstId.acceleratorId = pCompressionService->acceleratorNum; pInstanceInfo2->physInstId.executionEngineId = 0; pInstanceInfo2->physInstId.busAddress = icp_adf_get_busAddress(pInstanceInfo2->physInstId.packageId); /* set coreAffinity to zero before use */ LAC_OS_BZERO(pInstanceInfo2->coreAffinity, sizeof(pInstanceInfo2->coreAffinity)); CPA_BITMAP_BIT_SET(pInstanceInfo2->coreAffinity, pCompressionService->coreAffinity); pInstanceInfo2->nodeAffinity = pCompressionService->nodeAffinity; if (CPA_TRUE == pCompressionService->generic_service_info.isInstanceStarted) { pInstanceInfo2->operState = CPA_OPER_STATE_UP; } else { pInstanceInfo2->operState = CPA_OPER_STATE_DOWN; } pInstanceInfo2->requiresPhysicallyContiguousMemory = CPA_TRUE; if (SAL_RESP_POLL_CFG_FILE == pCompressionService->isPolled) { pInstanceInfo2->isPolled = CPA_TRUE; } else { pInstanceInfo2->isPolled = CPA_FALSE; } pInstanceInfo2->isOffloaded = CPA_TRUE; /* Get the instance name and part name from the config file */ dev = icp_adf_getAccelDevByAccelId(pCompressionService->pkgID); - if (NULL == dev) { + if (NULL == dev || + 0 == strnlen(dev->deviceName, ADF_DEVICE_TYPE_LENGTH + 1)) { QAT_UTILS_LOG("Can not find device for the instance.\n"); LAC_OS_BZERO(pInstanceInfo2, sizeof(CpaInstanceInfo2)); return CPA_STATUS_FAIL; } snprintf((char *)pInstanceInfo2->partName, CPA_INST_PART_NAME_SIZE, SAL_INFO2_PART_NAME, dev->deviceName); pInstanceInfo2->partName[CPA_INST_PART_NAME_SIZE - 1] = '\0'; if (CPA_FALSE == pCompressionService->generic_service_info.is_dyn) { section = icpGetProcessName(); } status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "Name", keyStr); LAC_CHECK_STATUS(status); status = icp_adf_cfgGetParamValue(dev, section, keyStr, valStr); LAC_CHECK_STATUS(status); strncpy((char *)pInstanceInfo2->instName, valStr, sizeof(pInstanceInfo2->instName) - 1); pInstanceInfo2->instName[CPA_INST_NAME_SIZE - 1] = '\0'; #if __GNUC__ >= 7 #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wformat-truncation" #endif snprintf((char *)pInstanceInfo2->instID, CPA_INST_ID_SIZE, "%s_%s", section, valStr); #if __GNUC__ >= 7 #pragma GCC diagnostic pop #endif return CPA_STATUS_SUCCESS; } CpaStatus cpaDcQueryCapabilities(CpaInstanceHandle dcInstance, CpaDcInstanceCapabilities *pInstanceCapabilities) { CpaInstanceHandle insHandle = NULL; sal_compression_service_t *pService = NULL; Cpa32U capabilitiesMask = 0; dc_extd_ftrs_t *pExtendedFtrs = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); if (NULL == insHandle) { QAT_UTILS_LOG("Can not get the instance.\n"); return CPA_STATUS_FAIL; } } else { insHandle = dcInstance; } pService = (sal_compression_service_t *)insHandle; LAC_CHECK_NULL_PARAM(insHandle); SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); LAC_CHECK_NULL_PARAM(pInstanceCapabilities); memset(pInstanceCapabilities, 0, sizeof(CpaDcInstanceCapabilities)); capabilitiesMask = pService->generic_service_info.capabilitiesMask; /* Set compression capabilities */ if (capabilitiesMask & ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY) { pInstanceCapabilities->integrityCrcs = CPA_TRUE; } pInstanceCapabilities->endOfLastBlock = CPA_TRUE; pInstanceCapabilities->statefulDeflateCompression = CPA_FALSE; pInstanceCapabilities->statefulDeflateDecompression = CPA_TRUE; pInstanceCapabilities->statelessDeflateCompression = CPA_TRUE; pInstanceCapabilities->statelessDeflateDecompression = CPA_TRUE; pInstanceCapabilities->checksumCRC32 = CPA_TRUE; pInstanceCapabilities->checksumAdler32 = CPA_TRUE; pInstanceCapabilities->dynamicHuffman = CPA_TRUE; pInstanceCapabilities->precompiledHuffman = CPA_FALSE; pInstanceCapabilities->dynamicHuffmanBufferReq = CPA_TRUE; pInstanceCapabilities->autoSelectBestHuffmanTree = CPA_TRUE; pInstanceCapabilities->validWindowSizeMaskCompression = pService->comp_device_data.windowSizeMask; pInstanceCapabilities->validWindowSizeMaskDecompression = pService->comp_device_data.windowSizeMask; pExtendedFtrs = (dc_extd_ftrs_t *)&( ((sal_service_t *)insHandle)->dcExtendedFeatures); pInstanceCapabilities->batchAndPack = CPA_FALSE; pInstanceCapabilities->compressAndVerify = (CpaBoolean)pExtendedFtrs->is_cnv; pInstanceCapabilities->compressAndVerifyStrict = CPA_TRUE; pInstanceCapabilities->compressAndVerifyAndRecover = (CpaBoolean)pExtendedFtrs->is_cnvnr; return CPA_STATUS_SUCCESS; } CpaStatus cpaDcSetAddressTranslation(const CpaInstanceHandle instanceHandle, CpaVirtualToPhysical virtual2Physical) { sal_service_t *pService = NULL; CpaInstanceHandle insHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle) { insHandle = dcGetFirstHandle(); } else { insHandle = instanceHandle; } LAC_CHECK_NULL_PARAM(insHandle); SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); LAC_CHECK_NULL_PARAM(virtual2Physical); pService = (sal_service_t *)insHandle; pService->virt2PhysClient = virtual2Physical; return CPA_STATUS_SUCCESS; } /** ****************************************************************************** * @ingroup cpaDcCommon * Data compression specific polling function which polls a DC instance. *****************************************************************************/ CpaStatus icp_sal_DcPollInstance(CpaInstanceHandle instanceHandle_in, Cpa32U response_quota) { CpaStatus status = CPA_STATUS_SUCCESS; sal_compression_service_t *dc_handle = NULL; sal_service_t *gen_handle = NULL; icp_comms_trans_handle trans_hndTable[DC_NUM_RX_RINGS]; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { dc_handle = (sal_compression_service_t *)dcGetFirstHandle(); } else { dc_handle = (sal_compression_service_t *)instanceHandle_in; } LAC_CHECK_NULL_PARAM(dc_handle); SAL_RUNNING_CHECK(dc_handle); gen_handle = &(dc_handle->generic_service_info); if (SAL_SERVICE_TYPE_COMPRESSION != gen_handle->type) { QAT_UTILS_LOG("Instance handle type is incorrect.\n"); return CPA_STATUS_FAIL; } /* * From the instanceHandle we must get the trans_handle and send * down to adf for polling. * Populate our trans handle table with the appropriate handles. */ trans_hndTable[0] = dc_handle->trans_handle_compression_rx; /* Call adf to do the polling. */ status = icp_adf_pollInstance(trans_hndTable, DC_NUM_RX_RINGS, response_quota); return status; } /** ****************************************************************************** * @ingroup cpaDcCommon *****************************************************************************/ CpaStatus cpaDcInstanceSetNotificationCb( const CpaInstanceHandle instanceHandle, const CpaDcInstanceNotificationCbFunc pInstanceNotificationCb, void *pCallbackTag) { CpaStatus status = CPA_STATUS_SUCCESS; sal_service_t *gen_handle = instanceHandle; LAC_CHECK_NULL_PARAM(gen_handle); gen_handle->notification_cb = pInstanceNotificationCb; gen_handle->cb_tag = pCallbackTag; return status; } CpaInstanceHandle dcGetFirstHandle(void) { CpaStatus status = CPA_STATUS_SUCCESS; static icp_accel_dev_t *adfInsts[ADF_MAX_DEVICES] = { 0 }; CpaInstanceHandle dcInst = NULL; icp_accel_dev_t *dev_addr = NULL; sal_t *base_addr = NULL; sal_list_t *list_temp = NULL; Cpa16U i, num_dc = 0; /* Only need 1 dev with compression enabled - so check all devices */ status = icp_amgr_getAllAccelDevByCapabilities( ICP_ACCEL_CAPABILITIES_COMPRESSION, adfInsts, &num_dc); if ((0 == num_dc) || (CPA_STATUS_SUCCESS != status)) { QAT_UTILS_LOG( "No compression devices enabled in the system.\n"); return dcInst; } for (i = 0; i < num_dc; i++) { dev_addr = (icp_accel_dev_t *)adfInsts[i]; if (NULL != dev_addr) { base_addr = dev_addr->pSalHandle; if (NULL != base_addr) { list_temp = base_addr->compression_services; if (NULL != list_temp) { dcInst = SalList_getObject(list_temp); break; } } } } return dcInst; } diff --git a/sys/dev/qat/qat_api/common/ctrl/sal_crypto.c b/sys/dev/qat/qat_api/common/ctrl/sal_crypto.c index 7bfc5ec3ba10..3e134f43af6e 100644 --- a/sys/dev/qat/qat_api/common/ctrl/sal_crypto.c +++ b/sys/dev/qat/qat_api/common/ctrl/sal_crypto.c @@ -1,1814 +1,1814 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2025 Intel Corporation */ /** *************************************************************************** * @file sal_crypto.c Instance handling functions for crypto * * @ingroup SalCtrl * ***************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ /* QAT-API includes */ #include "cpa.h" #include "cpa_types.h" #include "cpa_cy_common.h" #include "cpa_cy_im.h" #include "cpa_cy_key.h" #include "cpa_cy_sym.h" #include "qat_utils.h" /* ADF includes */ #include "icp_adf_init.h" #include "icp_adf_transport.h" #include "icp_accel_devices.h" #include "icp_adf_cfg.h" #include "icp_adf_accel_mgr.h" #include "icp_adf_poll.h" #include "icp_adf_debug.h" /* SAL includes */ #include "lac_log.h" #include "lac_mem.h" #include "lac_mem_pools.h" #include "sal_statistics.h" #include "lac_common.h" #include "lac_list.h" #include "lac_hooks.h" #include "lac_sym_qat_hash_defs_lookup.h" #include "lac_sym.h" #include "lac_sym_key.h" #include "lac_sym_hash.h" #include "lac_sym_cb.h" #include "lac_sym_stats.h" #include "lac_sal_types_crypto.h" #include "lac_sal.h" #include "lac_sal_ctrl.h" #include "sal_string_parse.h" #include "sal_service_state.h" #include "icp_sal_poll.h" #include "lac_sync.h" #include "lac_sym_qat.h" #include "icp_sal_versions.h" #include "icp_sal_user.h" #include "sal_hw_gen.h" #define HMAC_MODE_1 1 #define HMAC_MODE_2 2 #define TH_CY_RX_0 0 #define TH_CY_RX_1 1 #define MAX_CY_RX_RINGS 2 #define DOUBLE_INCR 2 #define TH_SINGLE_RX 0 #define NUM_CRYPTO_SYM_RX_RINGS 1 #define NUM_CRYPTO_ASYM_RX_RINGS 1 #define NUM_CRYPTO_NRBG_RX_RINGS 1 CpaStatus Lac_GetCyInstancesByType( const CpaAccelerationServiceType accelerationServiceType, Cpa16U numInstances, CpaInstanceHandle *pInstances); CpaStatus Lac_GetCyNumInstancesByType( const CpaAccelerationServiceType accelerationServiceType, Cpa16U *pNumInstances); static CpaInstanceHandle Lac_CryptoGetFirstHandle(void) { CpaInstanceHandle instHandle; instHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO); if (!instHandle) { instHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); if (!instHandle) { instHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_ASYM); } } return instHandle; } /* Function to release the sym handles. */ static CpaStatus SalCtrl_SymReleaseTransHandle(sal_service_t *service) { CpaStatus status = CPA_STATUS_SUCCESS; CpaStatus ret_status = CPA_STATUS_SUCCESS; sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)service; if (NULL != pCryptoService->trans_handle_sym_tx) { status = icp_adf_transReleaseHandle( pCryptoService->trans_handle_sym_tx); if (CPA_STATUS_SUCCESS != status) { ret_status = status; } } if (NULL != pCryptoService->trans_handle_sym_rx) { status = icp_adf_transReleaseHandle( pCryptoService->trans_handle_sym_rx); if (CPA_STATUS_SUCCESS != status) { ret_status = status; } } return ret_status; } /* * @ingroup sal_crypto * Frees resources (memory and transhandles) if allocated * * @param[in] pCryptoService Pointer to sym service instance * @retval SUCCESS if transhandles released * successfully. */ static CpaStatus SalCtrl_SymFreeResources(sal_crypto_service_t *pCryptoService) { CpaStatus status = CPA_STATUS_SUCCESS; /* Free memory pools if not NULL */ Lac_MemPoolDestroy(pCryptoService->lac_sym_cookie_pool); /* Free misc memory if allocated */ /* Frees memory allocated for Hmac precomputes */ LacSymHash_HmacPrecompShutdown(pCryptoService); /* Free memory allocated for key labels Also clears key stats */ LacSymKey_Shutdown(pCryptoService); /* Free hash lookup table if allocated */ if (NULL != pCryptoService->pLacHashLookupDefs) { LAC_OS_FREE(pCryptoService->pLacHashLookupDefs); } /* Free statistics */ LacSym_StatsFree(pCryptoService); /* Free transport handles */ status = SalCtrl_SymReleaseTransHandle((sal_service_t *)pCryptoService); return status; } /** *********************************************************************** * @ingroup SalCtrl * This macro verifies that the status is _SUCCESS * If status is not _SUCCESS then Sym Instance resources are * freed before the function returns the error * * @param[in] status status we are checking * * @return void status is ok (CPA_STATUS_SUCCESS) * @return status The value in the status parameter is an error one * ****************************************************************************/ #define LAC_CHECK_STATUS_SYM_INIT(status) \ do { \ if (CPA_STATUS_SUCCESS != status) { \ SalCtrl_SymFreeResources(pCryptoService); \ return status; \ } \ } while (0) /* Function that creates the Sym Handles. */ static CpaStatus SalCtrl_SymCreateTransHandle(icp_accel_dev_t *device, sal_service_t *service, Cpa32U numSymRequests, char *section) { CpaStatus status = CPA_STATUS_SUCCESS; char temp_string[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)service; icp_resp_deliv_method rx_resp_type = ICP_RESP_TYPE_IRQ; Cpa32U msgSize = 0; if (SAL_RESP_POLL_CFG_FILE == pCryptoService->isPolled) { rx_resp_type = ICP_RESP_TYPE_POLL; } if (CPA_FALSE == pCryptoService->generic_service_info.is_dyn) { section = icpGetProcessName(); } /* Parse Sym ring details */ status = Sal_StringParsing("Cy", pCryptoService->generic_service_info.instance, "RingSymTx", temp_string); /* Need to free resources in case not _SUCCESS from here */ LAC_CHECK_STATUS_SYM_INIT(status); msgSize = LAC_QAT_SYM_REQ_SZ_LW * LAC_LONG_WORD_IN_BYTES; status = icp_adf_transCreateHandle(device, ICP_TRANS_TYPE_ETR, section, pCryptoService->acceleratorNum, pCryptoService->bankNumSym, temp_string, lac_getRingType(SAL_RING_TYPE_A_SYM_HI), NULL, ICP_RESP_TYPE_NONE, numSymRequests, msgSize, (icp_comms_trans_handle *)&( pCryptoService->trans_handle_sym_tx)); LAC_CHECK_STATUS_SYM_INIT(status); status = Sal_StringParsing("Cy", pCryptoService->generic_service_info.instance, "RingSymRx", temp_string); LAC_CHECK_STATUS_SYM_INIT(status); msgSize = LAC_QAT_SYM_RESP_SZ_LW * LAC_LONG_WORD_IN_BYTES; status = icp_adf_transCreateHandle( device, ICP_TRANS_TYPE_ETR, section, pCryptoService->acceleratorNum, pCryptoService->bankNumSym, temp_string, lac_getRingType(SAL_RING_TYPE_NONE), (icp_trans_callback)LacSymQat_SymRespHandler, rx_resp_type, numSymRequests, msgSize, (icp_comms_trans_handle *)&(pCryptoService->trans_handle_sym_rx)); LAC_CHECK_STATUS_SYM_INIT(status); return status; } static int SalCtrl_CryptoDebug(void *private_data, char *data, int size, int offset) { CpaStatus status = CPA_STATUS_SUCCESS; Cpa32U len = 0; sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)private_data; switch (offset) { case SAL_STATS_SYM: { CpaCySymStats64 symStats = { 0 }; if (CPA_TRUE != pCryptoService->generic_service_info.stats ->bSymStatsEnabled) { break; } status = cpaCySymQueryStats64(pCryptoService, &symStats); if (status != CPA_STATUS_SUCCESS) { LAC_LOG_ERROR("cpaCySymQueryStats64 returned error\n"); return 0; } /* Engine Info */ len += snprintf( data + len, size - len, SEPARATOR BORDER " Statistics for Instance %24s |\n" BORDER " Symmetric Stats " BORDER "\n" SEPARATOR, pCryptoService->debug_file->name); /* Session Info */ len += snprintf( data + len, size - len, BORDER " Sessions Initialized: %16llu " BORDER "\n" BORDER " Sessions Removed: %16llu " BORDER "\n" BORDER " Session Errors: %16llu " BORDER "\n" SEPARATOR, (long long unsigned int)symStats.numSessionsInitialized, (long long unsigned int)symStats.numSessionsRemoved, (long long unsigned int)symStats.numSessionErrors); /* Session info */ len += snprintf( data + len, size - len, BORDER " Symmetric Requests: %16llu " BORDER "\n" BORDER " Symmetric Request Errors: %16llu " BORDER "\n" BORDER " Symmetric Completed: %16llu " BORDER "\n" BORDER " Symmetric Completed Errors: %16llu " BORDER "\n" BORDER " Symmetric Verify Failures: %16llu " BORDER "\n", (long long unsigned int)symStats.numSymOpRequests, (long long unsigned int)symStats.numSymOpRequestErrors, (long long unsigned int)symStats.numSymOpCompleted, (long long unsigned int)symStats.numSymOpCompletedErrors, (long long unsigned int)symStats.numSymOpVerifyFailures); break; } default: { len += snprintf(data + len, size - len, SEPARATOR); return 0; } } return ++offset; } static CpaStatus SalCtrl_SymInit(icp_accel_dev_t *device, sal_service_t *service) { CpaStatus status = CPA_STATUS_SUCCESS; Cpa32U qatHmacMode = 0; Cpa32U numSymConcurrentReq = 0; char adfGetParam[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char temp_string[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)service; char *section = DYN_SEC; /*Instance may not in the DYN section*/ if (CPA_FALSE == pCryptoService->generic_service_info.is_dyn) { section = icpGetProcessName(); } /* Register callbacks for the symmetric services * (Hash, Cipher, Algorithm-Chaining) (returns void)*/ LacSymCb_CallbacksRegister(); qatHmacMode = (Cpa32U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); switch (qatHmacMode) { case HMAC_MODE_1: pCryptoService->qatHmacMode = ICP_QAT_HW_AUTH_MODE1; break; case HMAC_MODE_2: pCryptoService->qatHmacMode = ICP_QAT_HW_AUTH_MODE2; break; default: pCryptoService->qatHmacMode = ICP_QAT_HW_AUTH_MODE1; break; } /* Get num concurrent requests from config file */ status = Sal_StringParsing("Cy", pCryptoService->generic_service_info.instance, "NumConcurrentSymRequests", temp_string); LAC_CHECK_STATUS(status); status = icp_adf_cfgGetParamValue(device, section, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration file\n", temp_string); return status; } numSymConcurrentReq = (Cpa32U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); if (CPA_STATUS_FAIL == validateConcurrRequest(numSymConcurrentReq)) { LAC_LOG_ERROR("Invalid NumConcurrentSymRequests, valid " "values {64, 128, 256, ... 32768, 65536}"); return CPA_STATUS_FAIL; } /* ADF does not allow us to completely fill the ring for batch requests */ pCryptoService->maxNumSymReqBatch = (numSymConcurrentReq - SAL_BATCH_SUBMIT_FREE_SPACE); /* Create transport handles */ status = SalCtrl_SymCreateTransHandle(device, service, numSymConcurrentReq, section); LAC_CHECK_STATUS(status); /* Allocates memory pools */ /* Create and initialise symmetric cookie memory pool */ pCryptoService->lac_sym_cookie_pool = LAC_MEM_POOL_INIT_POOL_ID; status = Sal_StringParsing("Cy", pCryptoService->generic_service_info.instance, "SymPool", temp_string); LAC_CHECK_STATUS_SYM_INIT(status); /* Note we need twice (i.e. <<1) the number of sym cookies to support sym ring pairs (and some, for partials) */ status = Lac_MemPoolCreate(&pCryptoService->lac_sym_cookie_pool, temp_string, ((numSymConcurrentReq + numSymConcurrentReq + 1) << 1), sizeof(lac_sym_cookie_t), LAC_64BYTE_ALIGNMENT, CPA_FALSE, pCryptoService->nodeAffinity); LAC_CHECK_STATUS_SYM_INIT(status); /* For all sym cookies fill out the physical address of data that will be set to QAT */ Lac_MemPoolInitSymCookiesPhyAddr(pCryptoService->lac_sym_cookie_pool); /* Clear stats */ /* Clears Key stats and allocate memory of SSL and TLS labels These labels are initialised to standard values */ status = LacSymKey_Init(pCryptoService); LAC_CHECK_STATUS_SYM_INIT(status); /* Initialises the hash lookup table*/ status = LacSymQat_Init(pCryptoService); LAC_CHECK_STATUS_SYM_INIT(status); /* Fills out content descriptor for precomputes and registers the hash precompute callback */ status = LacSymHash_HmacPrecompInit(pCryptoService); LAC_CHECK_STATUS_SYM_INIT(status); /* Init the Sym stats */ status = LacSym_StatsInit(pCryptoService); LAC_CHECK_STATUS_SYM_INIT(status); return status; } static void SalCtrl_DebugShutdown(icp_accel_dev_t *device, sal_service_t *service) { sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)service; sal_statistics_collection_t *pStatsCollection = (sal_statistics_collection_t *)device->pQatStats; if (CPA_TRUE == pStatsCollection->bStatsEnabled) { /* Clean stats */ if (NULL != pCryptoService->debug_file) { icp_adf_debugRemoveFile(pCryptoService->debug_file); LAC_OS_FREE(pCryptoService->debug_file->name); LAC_OS_FREE(pCryptoService->debug_file); pCryptoService->debug_file = NULL; } } pCryptoService->generic_service_info.stats = NULL; } static CpaStatus SalCtrl_DebugInit(icp_accel_dev_t *device, sal_service_t *service) { char adfGetParam[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char temp_string[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char *instance_name = NULL; sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)service; sal_statistics_collection_t *pStatsCollection = (sal_statistics_collection_t *)device->pQatStats; CpaStatus status = CPA_STATUS_SUCCESS; char *section = DYN_SEC; /*Instance may not in the DYN section*/ if (CPA_FALSE == pCryptoService->generic_service_info.is_dyn) { section = icpGetProcessName(); } if (CPA_TRUE == pStatsCollection->bStatsEnabled) { /* Get instance name for stats */ instance_name = LAC_OS_MALLOC(ADF_CFG_MAX_VAL_LEN_IN_BYTES); if (NULL == instance_name) { return CPA_STATUS_RESOURCE; } status = Sal_StringParsing( "Cy", pCryptoService->generic_service_info.instance, "Name", temp_string); if (CPA_STATUS_SUCCESS != status) { LAC_OS_FREE(instance_name); return status; } status = icp_adf_cfgGetParamValue(device, section, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG( "Failed to get %s from configuration file\n", temp_string); LAC_OS_FREE(instance_name); return status; } snprintf(instance_name, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%s", adfGetParam); pCryptoService->debug_file = LAC_OS_MALLOC(sizeof(debug_file_info_t)); if (NULL == pCryptoService->debug_file) { LAC_OS_FREE(instance_name); return CPA_STATUS_RESOURCE; } memset(pCryptoService->debug_file, 0, sizeof(debug_file_info_t)); pCryptoService->debug_file->name = instance_name; pCryptoService->debug_file->seq_read = SalCtrl_CryptoDebug; pCryptoService->debug_file->private_data = pCryptoService; pCryptoService->debug_file->parent = pCryptoService->generic_service_info.debug_parent_dir; status = icp_adf_debugAddFile(device, pCryptoService->debug_file); if (CPA_STATUS_SUCCESS != status) { LAC_OS_FREE(instance_name); LAC_OS_FREE(pCryptoService->debug_file); return status; } } pCryptoService->generic_service_info.stats = pStatsCollection; return status; } static CpaStatus SalCtrl_GetBankNum(icp_accel_dev_t *device, Cpa32U inst, char *section, char *bank_name, Cpa16U *bank) { char adfParamValue[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char adfParamName[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; CpaStatus status = CPA_STATUS_SUCCESS; status = Sal_StringParsing("Cy", inst, bank_name, adfParamName); LAC_CHECK_STATUS(status); status = icp_adf_cfgGetParamValue(device, section, adfParamName, adfParamValue); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration file\n", adfParamName); return status; } *bank = (Cpa16U)Sal_Strtoul(adfParamValue, NULL, SAL_CFG_BASE_DEC); return status; } static CpaStatus SalCtr_InstInit(icp_accel_dev_t *device, sal_service_t *service) { char adfGetParam[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char temp_string[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char temp_string2[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)service; CpaStatus status = CPA_STATUS_SUCCESS; char *section = DYN_SEC; /*Instance may not in the DYN section*/ if (CPA_FALSE == pCryptoService->generic_service_info.is_dyn) { section = icpGetProcessName(); } /* Get Config Info: Accel Num, bank Num, packageID, coreAffinity, nodeAffinity and response mode */ pCryptoService->acceleratorNum = 0; /* Gen4, a bank only has 2 rings (1 ring pair), only one type of service can be assigned one time. asym and sym will be in different bank*/ if (isCyGen4x(pCryptoService)) { switch (service->type) { case SAL_SERVICE_TYPE_CRYPTO_ASYM: status = SalCtrl_GetBankNum( device, pCryptoService->generic_service_info.instance, section, "BankNumberAsym", &pCryptoService->bankNumAsym); if (CPA_STATUS_SUCCESS != status) return status; break; case SAL_SERVICE_TYPE_CRYPTO_SYM: status = SalCtrl_GetBankNum( device, pCryptoService->generic_service_info.instance, section, "BankNumberSym", &pCryptoService->bankNumSym); if (CPA_STATUS_SUCCESS != status) return status; break; case SAL_SERVICE_TYPE_CRYPTO: status = SalCtrl_GetBankNum( device, pCryptoService->generic_service_info.instance, section, "BankNumberAsym", &pCryptoService->bankNumAsym); if (CPA_STATUS_SUCCESS != status) return status; status = SalCtrl_GetBankNum( device, pCryptoService->generic_service_info.instance, section, "BankNumberSym", &pCryptoService->bankNumSym); if (CPA_STATUS_SUCCESS != status) return status; break; default: return CPA_STATUS_FAIL; } } else { status = SalCtrl_GetBankNum( device, pCryptoService->generic_service_info.instance, section, "BankNumber", &pCryptoService->bankNumSym); if (CPA_STATUS_SUCCESS != status) return status; pCryptoService->bankNumAsym = pCryptoService->bankNumSym; } status = Sal_StringParsing("Cy", pCryptoService->generic_service_info.instance, "IsPolled", temp_string); LAC_CHECK_STATUS(status); status = icp_adf_cfgGetParamValue(device, section, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration file\n", temp_string); return status; } pCryptoService->isPolled = (Cpa8U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); /* Kernel instances do not support epoll mode */ if (SAL_RESP_EPOLL_CFG_FILE == pCryptoService->isPolled) { QAT_UTILS_LOG( "IsPolled %u is not supported for kernel instance %s", pCryptoService->isPolled, temp_string); return CPA_STATUS_FAIL; } status = icp_adf_cfgGetParamValue(device, LAC_CFG_SECTION_GENERAL, ADF_DEV_PKG_ID, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration file\n", ADF_DEV_PKG_ID); return status; } pCryptoService->pkgID = (Cpa16U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); status = icp_adf_cfgGetParamValue(device, LAC_CFG_SECTION_GENERAL, ADF_DEV_NODE_ID, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration file\n", ADF_DEV_NODE_ID); return status; } pCryptoService->nodeAffinity = (Cpa32U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); /* In case of interrupt instance, use the bank affinity set by adf_ctl * Otherwise, use the instance affinity for backwards compatibility */ if (SAL_RESP_POLL_CFG_FILE != pCryptoService->isPolled) { /* Next need to read the [AcceleratorX] section of the config * file */ status = Sal_StringParsing("Accelerator", pCryptoService->acceleratorNum, "", temp_string2); LAC_CHECK_STATUS(status); if (service->type == SAL_SERVICE_TYPE_CRYPTO_ASYM) status = Sal_StringParsing("Bank", pCryptoService->bankNumAsym, "CoreAffinity", temp_string); else /* For cy service, asym bank and sym bank will set the same core affinity. So Just read one*/ status = Sal_StringParsing("Bank", pCryptoService->bankNumSym, "CoreAffinity", temp_string); LAC_CHECK_STATUS(status); } else { strncpy(temp_string2, section, (strlen(section) + 1)); status = Sal_StringParsing( "Cy", pCryptoService->generic_service_info.instance, "CoreAffinity", temp_string); LAC_CHECK_STATUS(status); } status = icp_adf_cfgGetParamValue(device, temp_string2, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration file\n", temp_string); return status; } pCryptoService->coreAffinity = (Cpa32U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); /*No Execution Engine in DH895xcc, so make sure it is zero*/ pCryptoService->executionEngine = 0; return status; } /* This function: * 1. Creates sym and asym transport handles * 2. Allocates memory pools required by sym and asym services .* 3. Clears the sym and asym stats counters * 4. In case service asym or sym is enabled then this function * only allocates resources for these services. i.e if the * service asym is enabled then only asym transport handles * are created and vice versa. */ CpaStatus SalCtrl_CryptoInit(icp_accel_dev_t *device, sal_service_t *service) { CpaStatus status = CPA_STATUS_SUCCESS; sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)service; sal_service_type_t svc_type = service->type; SAL_SERVICE_GOOD_FOR_INIT(pCryptoService); pCryptoService->generic_service_info.state = SAL_SERVICE_STATE_INITIALIZING; /* Set up the instance parameters such as bank number, * coreAffinity, pkgId and node affinity etc */ status = SalCtr_InstInit(device, service); LAC_CHECK_STATUS(status); /* Create debug directory for service */ status = SalCtrl_DebugInit(device, service); LAC_CHECK_STATUS(status); switch (svc_type) { case SAL_SERVICE_TYPE_CRYPTO_ASYM: break; case SAL_SERVICE_TYPE_CRYPTO_SYM: status = SalCtrl_SymInit(device, service); if (CPA_STATUS_SUCCESS != status) { SalCtrl_DebugShutdown(device, service); return status; } break; case SAL_SERVICE_TYPE_CRYPTO: status = SalCtrl_SymInit(device, service); if (CPA_STATUS_SUCCESS != status) { SalCtrl_DebugShutdown(device, service); return status; } break; default: LAC_LOG_ERROR("Invalid service type\n"); status = CPA_STATUS_FAIL; break; } pCryptoService->generic_service_info.state = SAL_SERVICE_STATE_INITIALIZED; return status; } CpaStatus SalCtrl_CryptoStart(icp_accel_dev_t *device, sal_service_t *service) { sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)service; CpaStatus status = CPA_STATUS_SUCCESS; if (pCryptoService->generic_service_info.state != SAL_SERVICE_STATE_INITIALIZED) { LAC_LOG_ERROR("Not in the correct state to call start\n"); return CPA_STATUS_FAIL; } pCryptoService->generic_service_info.state = SAL_SERVICE_STATE_RUNNING; return status; } CpaStatus SalCtrl_CryptoStop(icp_accel_dev_t *device, sal_service_t *service) { sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)service; if (SAL_SERVICE_STATE_RUNNING != pCryptoService->generic_service_info.state) { LAC_LOG_ERROR("Not in the correct state to call stop"); } pCryptoService->generic_service_info.state = SAL_SERVICE_STATE_SHUTTING_DOWN; return CPA_STATUS_SUCCESS; } CpaStatus SalCtrl_CryptoShutdown(icp_accel_dev_t *device, sal_service_t *service) { sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)service; CpaStatus status = CPA_STATUS_SUCCESS; sal_service_type_t svc_type = service->type; if ((SAL_SERVICE_STATE_INITIALIZED != pCryptoService->generic_service_info.state) && (SAL_SERVICE_STATE_SHUTTING_DOWN != pCryptoService->generic_service_info.state)) { LAC_LOG_ERROR("Not in the correct state to call shutdown \n"); return CPA_STATUS_FAIL; } /* Free memory and transhandles */ switch (svc_type) { case SAL_SERVICE_TYPE_CRYPTO_ASYM: break; case SAL_SERVICE_TYPE_CRYPTO_SYM: if (SalCtrl_SymFreeResources(pCryptoService)) { status = CPA_STATUS_FAIL; } break; case SAL_SERVICE_TYPE_CRYPTO: if (SalCtrl_SymFreeResources(pCryptoService)) { status = CPA_STATUS_FAIL; } break; default: LAC_LOG_ERROR("Invalid service type\n"); status = CPA_STATUS_FAIL; break; } SalCtrl_DebugShutdown(device, service); pCryptoService->generic_service_info.state = SAL_SERVICE_STATE_SHUTDOWN; return status; } /** ****************************************************************************** * @ingroup cpaCyCommon *****************************************************************************/ CpaStatus cpaCyGetStatusText(const CpaInstanceHandle instanceHandle, CpaStatus errStatus, Cpa8S *pStatusText) { CpaStatus status = CPA_STATUS_SUCCESS; LAC_CHECK_NULL_PARAM(pStatusText); switch (errStatus) { case CPA_STATUS_SUCCESS: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_SUCCESS); break; case CPA_STATUS_FAIL: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_FAIL); break; case CPA_STATUS_RETRY: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_RETRY); break; case CPA_STATUS_RESOURCE: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_RESOURCE); break; case CPA_STATUS_INVALID_PARAM: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_INVALID_PARAM); break; case CPA_STATUS_FATAL: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_FATAL); break; case CPA_STATUS_UNSUPPORTED: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_UNSUPPORTED); break; default: status = CPA_STATUS_INVALID_PARAM; break; } return status; } void SalCtrl_CyQueryCapabilities(sal_service_t *pGenericService, CpaCyCapabilitiesInfo *pCapInfo) { memset(pCapInfo, 0, sizeof(CpaCyCapabilitiesInfo)); if (SAL_SERVICE_TYPE_CRYPTO == pGenericService->type || SAL_SERVICE_TYPE_CRYPTO_SYM == pGenericService->type) { pCapInfo->symSupported = CPA_TRUE; if (pGenericService->capabilitiesMask & ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN) { pCapInfo->extAlgchainSupported = CPA_TRUE; } if (pGenericService->capabilitiesMask & ICP_ACCEL_CAPABILITIES_HKDF) { pCapInfo->hkdfSupported = CPA_TRUE; } } if (pGenericService->capabilitiesMask & ICP_ACCEL_CAPABILITIES_ECEDMONT) { pCapInfo->ecEdMontSupported = CPA_TRUE; } if (pGenericService->capabilitiesMask & ICP_ACCEL_CAPABILITIES_RANDOM_NUMBER) { pCapInfo->nrbgSupported = CPA_TRUE; } pCapInfo->drbgSupported = CPA_FALSE; pCapInfo->randSupported = CPA_FALSE; pCapInfo->nrbgSupported = CPA_FALSE; } /** ****************************************************************************** * @ingroup cpaCyCommon *****************************************************************************/ CpaStatus cpaCyStartInstance(CpaInstanceHandle instanceHandle_in) { CpaInstanceHandle instanceHandle = NULL; /* Structure initializer is supported by C99, but it is * not supported by some former Intel compilers. */ CpaInstanceInfo2 info = { 0 }; icp_accel_dev_t *dev = NULL; CpaStatus status = CPA_STATUS_SUCCESS; sal_crypto_service_t *pService = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO); if (!instanceHandle) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } } else { instanceHandle = instanceHandle_in; } LAC_CHECK_NULL_PARAM(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_ASYM | SAL_SERVICE_TYPE_CRYPTO_SYM)); pService = (sal_crypto_service_t *)instanceHandle; status = cpaCyInstanceGetInfo2(instanceHandle, &info); if (CPA_STATUS_SUCCESS != status) { LAC_LOG_ERROR("Can not get instance info\n"); return status; } dev = icp_adf_getAccelDevByAccelId(info.physInstId.packageId); if (NULL == dev) { LAC_LOG_ERROR("Can not find device for the instance\n"); return CPA_STATUS_FAIL; } pService->generic_service_info.isInstanceStarted = CPA_TRUE; /* Increment dev ref counter */ icp_qa_dev_get(dev); return CPA_STATUS_SUCCESS; } /** ****************************************************************************** * @ingroup cpaCyCommon *****************************************************************************/ CpaStatus cpaCyStopInstance(CpaInstanceHandle instanceHandle_in) { CpaInstanceHandle instanceHandle = NULL; /* Structure initializer is supported by C99, but it is * not supported by some former Intel compilers. */ CpaInstanceInfo2 info = { 0 }; icp_accel_dev_t *dev = NULL; CpaStatus status = CPA_STATUS_SUCCESS; sal_crypto_service_t *pService = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_CryptoGetFirstHandle(); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_NULL_PARAM(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_ASYM | SAL_SERVICE_TYPE_CRYPTO_SYM)); status = cpaCyInstanceGetInfo2(instanceHandle, &info); if (CPA_STATUS_SUCCESS != status) { LAC_LOG_ERROR("Can not get instance info\n"); return status; } dev = icp_adf_getAccelDevByAccelId(info.physInstId.packageId); if (NULL == dev) { LAC_LOG_ERROR("Can not find device for the instance\n"); return CPA_STATUS_FAIL; } pService = (sal_crypto_service_t *)instanceHandle; pService->generic_service_info.isInstanceStarted = CPA_FALSE; /* Decrement dev ref counter */ icp_qa_dev_put(dev); return CPA_STATUS_SUCCESS; } /** ****************************************************************************** * @ingroup cpaCyCommon *****************************************************************************/ CpaStatus cpaCyInstanceSetNotificationCb( const CpaInstanceHandle instanceHandle, const CpaCyInstanceNotificationCbFunc pInstanceNotificationCb, void *pCallbackTag) { CpaStatus status = CPA_STATUS_SUCCESS; sal_service_t *gen_handle = instanceHandle; LAC_CHECK_NULL_PARAM(gen_handle); gen_handle->notification_cb = pInstanceNotificationCb; gen_handle->cb_tag = pCallbackTag; return status; } /** ****************************************************************************** * @ingroup cpaCyCommon *****************************************************************************/ CpaStatus cpaCyGetNumInstances(Cpa16U *pNumInstances) { return Lac_GetCyNumInstancesByType(CPA_ACC_SVC_TYPE_CRYPTO, pNumInstances); } /** ****************************************************************************** * @ingroup cpaCyCommon *****************************************************************************/ CpaStatus cpaCyGetInstances(Cpa16U numInstances, CpaInstanceHandle *pCyInstances) { return Lac_GetCyInstancesByType(CPA_ACC_SVC_TYPE_CRYPTO, numInstances, pCyInstances); } /** ****************************************************************************** * @ingroup cpaCyCommon *****************************************************************************/ CpaStatus cpaCyInstanceGetInfo(const CpaInstanceHandle instanceHandle_in, struct _CpaInstanceInfo *pInstanceInfo) { CpaInstanceHandle instanceHandle = NULL; sal_crypto_service_t *pCryptoService = NULL; sal_service_t *pGenericService = NULL; Cpa8U name[CPA_INST_NAME_SIZE] = "Intel(R) DH89XXCC instance number: %02x, type: Crypto"; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_CryptoGetFirstHandle(); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_NULL_PARAM(instanceHandle); LAC_CHECK_NULL_PARAM(pInstanceInfo); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_ASYM | SAL_SERVICE_TYPE_CRYPTO_SYM)); pCryptoService = (sal_crypto_service_t *)instanceHandle; pInstanceInfo->type = CPA_INSTANCE_TYPE_CRYPTO; /* According to cpa.h instance state is initialized and ready for use * or shutdown. Therefore need to map our running state to initialised * or shutdown */ if (SAL_SERVICE_STATE_RUNNING == pCryptoService->generic_service_info.state) { pInstanceInfo->state = CPA_INSTANCE_STATE_INITIALISED; } else { pInstanceInfo->state = CPA_INSTANCE_STATE_SHUTDOWN; } pGenericService = (sal_service_t *)instanceHandle; snprintf((char *)pInstanceInfo->name, CPA_INST_NAME_SIZE, (char *)name, pGenericService->instance); pInstanceInfo->name[CPA_INST_NAME_SIZE - 1] = '\0'; snprintf((char *)pInstanceInfo->version, CPA_INSTANCE_MAX_NAME_SIZE_IN_BYTES, "%d.%d", CPA_CY_API_VERSION_NUM_MAJOR, CPA_CY_API_VERSION_NUM_MINOR); pInstanceInfo->version[CPA_INSTANCE_MAX_VERSION_SIZE_IN_BYTES - 1] = '\0'; return CPA_STATUS_SUCCESS; } /** ****************************************************************************** * @ingroup cpaCyCommon *****************************************************************************/ CpaStatus cpaCyInstanceGetInfo2(const CpaInstanceHandle instanceHandle_in, CpaInstanceInfo2 *pInstanceInfo2) { CpaInstanceHandle instanceHandle = NULL; sal_crypto_service_t *pCryptoService = NULL; icp_accel_dev_t *dev = NULL; CpaStatus status = CPA_STATUS_SUCCESS; char keyStr[ADF_CFG_MAX_KEY_LEN_IN_BYTES] = { 0 }; char valStr[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char *section = DYN_SEC; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_CryptoGetFirstHandle(); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_NULL_PARAM(instanceHandle); LAC_CHECK_NULL_PARAM(pInstanceInfo2); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_ASYM | SAL_SERVICE_TYPE_CRYPTO_SYM)); LAC_OS_BZERO(pInstanceInfo2, sizeof(CpaInstanceInfo2)); pInstanceInfo2->accelerationServiceType = CPA_ACC_SVC_TYPE_CRYPTO; snprintf((char *)pInstanceInfo2->vendorName, CPA_INST_VENDOR_NAME_SIZE, "%s", SAL_INFO2_VENDOR_NAME); pInstanceInfo2->vendorName[CPA_INST_VENDOR_NAME_SIZE - 1] = '\0'; snprintf((char *)pInstanceInfo2->swVersion, CPA_INST_SW_VERSION_SIZE, "Version %d.%d", SAL_INFO2_DRIVER_SW_VERSION_MAJ_NUMBER, SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER); pInstanceInfo2->swVersion[CPA_INST_SW_VERSION_SIZE - 1] = '\0'; /* Note we can safely read the contents of the crypto service instance here because icp_amgr_getAllAccelDevByCapabilities() only returns devs that have started */ pCryptoService = (sal_crypto_service_t *)instanceHandle; pInstanceInfo2->physInstId.packageId = pCryptoService->pkgID; pInstanceInfo2->physInstId.acceleratorId = pCryptoService->acceleratorNum; pInstanceInfo2->physInstId.executionEngineId = pCryptoService->executionEngine; pInstanceInfo2->physInstId.busAddress = icp_adf_get_busAddress(pInstanceInfo2->physInstId.packageId); /*set coreAffinity to zero before use */ LAC_OS_BZERO(pInstanceInfo2->coreAffinity, sizeof(pInstanceInfo2->coreAffinity)); CPA_BITMAP_BIT_SET(pInstanceInfo2->coreAffinity, pCryptoService->coreAffinity); pInstanceInfo2->nodeAffinity = pCryptoService->nodeAffinity; if (SAL_SERVICE_STATE_RUNNING == pCryptoService->generic_service_info.state) { pInstanceInfo2->operState = CPA_OPER_STATE_UP; } else { pInstanceInfo2->operState = CPA_OPER_STATE_DOWN; } pInstanceInfo2->requiresPhysicallyContiguousMemory = CPA_TRUE; if (SAL_RESP_POLL_CFG_FILE == pCryptoService->isPolled) { pInstanceInfo2->isPolled = CPA_TRUE; } else { pInstanceInfo2->isPolled = CPA_FALSE; } pInstanceInfo2->isOffloaded = CPA_TRUE; /* Get the instance name and part name */ dev = icp_adf_getAccelDevByAccelId(pCryptoService->pkgID); - if (NULL == dev) { + if (NULL == dev || + 0 == strnlen(dev->deviceName, ADF_DEVICE_TYPE_LENGTH + 1)) { LAC_LOG_ERROR("Can not find device for the instance\n"); LAC_OS_BZERO(pInstanceInfo2, sizeof(CpaInstanceInfo2)); return CPA_STATUS_FAIL; } snprintf((char *)pInstanceInfo2->partName, CPA_INST_PART_NAME_SIZE, SAL_INFO2_PART_NAME, dev->deviceName); pInstanceInfo2->partName[CPA_INST_PART_NAME_SIZE - 1] = '\0'; status = Sal_StringParsing("Cy", pCryptoService->generic_service_info.instance, "Name", keyStr); LAC_CHECK_STATUS(status); if (CPA_FALSE == pCryptoService->generic_service_info.is_dyn) { section = icpGetProcessName(); } status = icp_adf_cfgGetParamValue(dev, section, keyStr, valStr); LAC_CHECK_STATUS(status); snprintf((char *)pInstanceInfo2->instName, CPA_INST_NAME_SIZE, "%s", valStr); snprintf((char *)pInstanceInfo2->instID, CPA_INST_ID_SIZE, "%s_%s", section, valStr); return CPA_STATUS_SUCCESS; } /** ****************************************************************************** * @ingroup cpaCyCommon *****************************************************************************/ CpaStatus cpaCyQueryCapabilities(const CpaInstanceHandle instanceHandle_in, CpaCyCapabilitiesInfo *pCapInfo) { /* Verify Instance exists */ CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_CryptoGetFirstHandle(); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_NULL_PARAM(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_ASYM | SAL_SERVICE_TYPE_CRYPTO_SYM)); LAC_CHECK_NULL_PARAM(pCapInfo); SalCtrl_CyQueryCapabilities((sal_service_t *)instanceHandle, pCapInfo); return CPA_STATUS_SUCCESS; } /** ****************************************************************************** * @ingroup cpaCySym *****************************************************************************/ CpaStatus cpaCySymQueryCapabilities(const CpaInstanceHandle instanceHandle_in, CpaCySymCapabilitiesInfo *pCapInfo) { sal_crypto_service_t *pCryptoService = NULL; sal_service_t *pGenericService = NULL; CpaInstanceHandle instanceHandle = NULL; /* Verify Instance exists */ if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO); if (!instanceHandle) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } } else { instanceHandle = instanceHandle_in; } LAC_CHECK_NULL_PARAM(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_ASYM | SAL_SERVICE_TYPE_CRYPTO_SYM)); LAC_CHECK_NULL_PARAM(pCapInfo); pCryptoService = (sal_crypto_service_t *)instanceHandle; pGenericService = &(pCryptoService->generic_service_info); memset(pCapInfo, '\0', sizeof(CpaCySymCapabilitiesInfo)); /* An asym crypto instance does not support sym service */ if (SAL_SERVICE_TYPE_CRYPTO_ASYM == pGenericService->type) { return CPA_STATUS_SUCCESS; } CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_NULL); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_ECB); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_CBC); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_CTR); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_CCM); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_GCM); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_XTS); if (isCyGen2x(pCryptoService)) { CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_ARC4); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_DES_ECB); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_DES_CBC); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_3DES_ECB); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_3DES_CBC); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_3DES_CTR); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_KASUMI_F8); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_SNOW3G_UEA2); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_F8); } CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA1); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA224); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA256); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA384); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA512); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_AES_XCBC); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_AES_CCM); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_AES_GCM); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_AES_CMAC); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_AES_GMAC); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_AES_CBC_MAC); if (isCyGen2x(pCryptoService)) { CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_MD5); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_KASUMI_F9); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SNOW3G_UIA2); } if (pGenericService->capabilitiesMask & ICP_ACCEL_CAPABILITIES_CRYPTO_ZUC) { CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_ZUC_EEA3); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_ZUC_EIA3); } if (pGenericService->capabilitiesMask & ICP_ACCEL_CAPABILITIES_CHACHA_POLY) { CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_POLY); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_CHACHA); } if (pGenericService->capabilitiesMask & ICP_ACCEL_CAPABILITIES_SM3) { CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SM3); } pCapInfo->partialPacketSupported = CPA_TRUE; if (pGenericService->capabilitiesMask & ICP_ACCEL_CAPABILITIES_SHA3) { CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA3_256); pCapInfo->partialPacketSupported = CPA_FALSE; } if (pGenericService->capabilitiesMask & ICP_ACCEL_CAPABILITIES_SHA3_EXT) { CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA3_224); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA3_256); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA3_384); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA3_512); pCapInfo->partialPacketSupported = CPA_FALSE; } if (pGenericService->capabilitiesMask & ICP_ACCEL_CAPABILITIES_SM4) { CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_SM4_ECB); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_SM4_CBC); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_SM4_CTR); pCapInfo->partialPacketSupported = CPA_FALSE; } return CPA_STATUS_SUCCESS; } /** ****************************************************************************** * @ingroup cpaCyCommon *****************************************************************************/ CpaStatus cpaCySetAddressTranslation(const CpaInstanceHandle instanceHandle_in, CpaVirtualToPhysical virtual2physical) { CpaInstanceHandle instanceHandle = NULL; sal_service_t *pService = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_CryptoGetFirstHandle(); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_NULL_PARAM(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_ASYM | SAL_SERVICE_TYPE_CRYPTO_SYM)); LAC_CHECK_NULL_PARAM(virtual2physical); pService = (sal_service_t *)instanceHandle; pService->virt2PhysClient = virtual2physical; return CPA_STATUS_SUCCESS; } /** ****************************************************************************** * @ingroup cpaCyCommon * Crypto specific polling function which polls a crypto instance. *****************************************************************************/ CpaStatus icp_sal_CyPollInstance(CpaInstanceHandle instanceHandle_in, Cpa32U response_quota) { CpaStatus status = CPA_STATUS_SUCCESS; sal_crypto_service_t *crypto_handle = NULL; sal_service_t *gen_handle = NULL; icp_comms_trans_handle trans_hndTable[MAX_CY_RX_RINGS] = { 0 }; Cpa32U num_rx_rings = 0; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { crypto_handle = (sal_crypto_service_t *)Lac_CryptoGetFirstHandle(); } else { crypto_handle = (sal_crypto_service_t *)instanceHandle_in; } LAC_CHECK_NULL_PARAM(crypto_handle); SAL_RUNNING_CHECK(crypto_handle); SAL_CHECK_INSTANCE_TYPE(crypto_handle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_ASYM | SAL_SERVICE_TYPE_CRYPTO_SYM)); gen_handle = &(crypto_handle->generic_service_info); /* * From the instanceHandle we must get the trans_handle and send * down to adf for polling. * Populate our trans handle table with the appropriate handles. */ switch (gen_handle->type) { case SAL_SERVICE_TYPE_CRYPTO_ASYM: trans_hndTable[TH_CY_RX_0] = crypto_handle->trans_handle_asym_rx; num_rx_rings = 1; break; case SAL_SERVICE_TYPE_CRYPTO_SYM: trans_hndTable[TH_CY_RX_0] = crypto_handle->trans_handle_sym_rx; num_rx_rings = 1; break; case SAL_SERVICE_TYPE_CRYPTO: trans_hndTable[TH_CY_RX_0] = crypto_handle->trans_handle_sym_rx; trans_hndTable[TH_CY_RX_1] = crypto_handle->trans_handle_asym_rx; num_rx_rings = MAX_CY_RX_RINGS; break; default: break; } /* Call adf to do the polling. */ status = icp_adf_pollInstance(trans_hndTable, num_rx_rings, response_quota); return status; } /** ****************************************************************************** * @ingroup cpaCyCommon * Crypto specific polling function which polls sym crypto ring. *****************************************************************************/ CpaStatus icp_sal_CyPollSymRing(CpaInstanceHandle instanceHandle_in, Cpa32U response_quota) { CpaStatus status = CPA_STATUS_SUCCESS; sal_crypto_service_t *crypto_handle = NULL; icp_comms_trans_handle trans_hndTable[NUM_CRYPTO_SYM_RX_RINGS] = { 0 }; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { crypto_handle = (sal_crypto_service_t *)Lac_GetFirstHandle( SAL_SERVICE_TYPE_CRYPTO_SYM); } else { crypto_handle = (sal_crypto_service_t *)instanceHandle_in; } LAC_CHECK_NULL_PARAM(crypto_handle); SAL_CHECK_INSTANCE_TYPE(crypto_handle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); SAL_RUNNING_CHECK(crypto_handle); /* * From the instanceHandle we must get the trans_handle and send * down to adf for polling. * Populate our trans handle table with the appropriate handles. */ trans_hndTable[TH_SINGLE_RX] = crypto_handle->trans_handle_sym_rx; /* Call adf to do the polling. */ status = icp_adf_pollInstance(trans_hndTable, NUM_CRYPTO_SYM_RX_RINGS, response_quota); return status; } /** ****************************************************************************** * @ingroup cpaCyCommon * Crypto specific polling function which polls an nrbg crypto ring. *****************************************************************************/ CpaStatus icp_sal_CyPollNRBGRing(CpaInstanceHandle instanceHandle_in, Cpa32U response_quota) { return CPA_STATUS_UNSUPPORTED; } /* Returns the handle to the first asym crypto instance */ static CpaInstanceHandle Lac_GetFirstAsymHandle(icp_accel_dev_t *adfInsts[ADF_MAX_DEVICES], Cpa16U num_dev) { CpaStatus status = CPA_STATUS_SUCCESS; icp_accel_dev_t *dev_addr = NULL; sal_t *base_addr = NULL; sal_list_t *list_temp = NULL; CpaInstanceHandle cyInst = NULL; CpaInstanceInfo2 info; Cpa16U i = 0; for (i = 0; i < num_dev; i++) { dev_addr = (icp_accel_dev_t *)adfInsts[i]; base_addr = dev_addr->pSalHandle; if (NULL == base_addr) { continue; } list_temp = base_addr->asym_services; while (NULL != list_temp) { cyInst = SalList_getObject(list_temp); status = cpaCyInstanceGetInfo2(cyInst, &info); list_temp = SalList_next(list_temp); if (CPA_STATUS_SUCCESS != status || CPA_TRUE != info.isPolled) { cyInst = NULL; continue; } break; } if (cyInst) { break; } } return cyInst; } /* Returns the handle to the first sym crypto instance */ static CpaInstanceHandle Lac_GetFirstSymHandle(icp_accel_dev_t *adfInsts[ADF_MAX_DEVICES], Cpa16U num_dev) { CpaStatus status = CPA_STATUS_SUCCESS; icp_accel_dev_t *dev_addr = NULL; sal_t *base_addr = NULL; sal_list_t *list_temp = NULL; CpaInstanceHandle cyInst = NULL; CpaInstanceInfo2 info; Cpa16U i = 0; for (i = 0; i < num_dev; i++) { dev_addr = (icp_accel_dev_t *)adfInsts[i]; base_addr = dev_addr->pSalHandle; if (NULL == base_addr) { continue; } list_temp = base_addr->sym_services; while (NULL != list_temp) { cyInst = SalList_getObject(list_temp); status = cpaCyInstanceGetInfo2(cyInst, &info); list_temp = SalList_next(list_temp); if (CPA_STATUS_SUCCESS != status || CPA_TRUE != info.isPolled) { cyInst = NULL; continue; } break; } if (cyInst) { break; } } return cyInst; } /* Returns the handle to the first crypto instance * Note that the crypto instance in this case supports * both asym and sym services */ static CpaInstanceHandle Lac_GetFirstCyHandle(icp_accel_dev_t *adfInsts[ADF_MAX_DEVICES], Cpa16U num_dev) { CpaStatus status = CPA_STATUS_SUCCESS; icp_accel_dev_t *dev_addr = NULL; sal_t *base_addr = NULL; sal_list_t *list_temp = NULL; CpaInstanceHandle cyInst = NULL; CpaInstanceInfo2 info; Cpa16U i = 0; for (i = 0; i < num_dev; i++) { dev_addr = (icp_accel_dev_t *)adfInsts[i]; base_addr = dev_addr->pSalHandle; if (NULL == base_addr) { continue; } list_temp = base_addr->crypto_services; while (NULL != list_temp) { cyInst = SalList_getObject(list_temp); status = cpaCyInstanceGetInfo2(cyInst, &info); list_temp = SalList_next(list_temp); if (CPA_STATUS_SUCCESS != status || CPA_TRUE != info.isPolled) { cyInst = NULL; continue; } break; } if (cyInst) { break; } } return cyInst; } CpaInstanceHandle Lac_GetFirstHandle(sal_service_type_t svc_type) { CpaStatus status = CPA_STATUS_SUCCESS; static icp_accel_dev_t *adfInsts[ADF_MAX_DEVICES] = { 0 }; CpaInstanceHandle cyInst = NULL; Cpa16U num_cy_dev = 0; Cpa32U capabilities = 0; switch (svc_type) { case SAL_SERVICE_TYPE_CRYPTO_ASYM: capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; break; case SAL_SERVICE_TYPE_CRYPTO_SYM: capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; break; case SAL_SERVICE_TYPE_CRYPTO: capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; capabilities |= ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; break; default: LAC_LOG_ERROR("Invalid service type\n"); return NULL; - break; } /* Only need 1 dev with crypto enabled - so check all devices*/ status = icp_amgr_getAllAccelDevByEachCapability(capabilities, adfInsts, &num_cy_dev); if ((0 == num_cy_dev) || (CPA_STATUS_SUCCESS != status)) { LAC_LOG_ERROR("No crypto devices enabled in the system\n"); return NULL; } switch (svc_type) { case SAL_SERVICE_TYPE_CRYPTO_ASYM: /* Try to find an asym only instance first */ cyInst = Lac_GetFirstAsymHandle(adfInsts, num_cy_dev); /* Try to find a cy instance since it also supports asym */ if (NULL == cyInst) { cyInst = Lac_GetFirstCyHandle(adfInsts, num_cy_dev); } break; case SAL_SERVICE_TYPE_CRYPTO_SYM: /* Try to find a sym only instance first */ cyInst = Lac_GetFirstSymHandle(adfInsts, num_cy_dev); /* Try to find a cy instance since it also supports sym */ if (NULL == cyInst) { cyInst = Lac_GetFirstCyHandle(adfInsts, num_cy_dev); } break; case SAL_SERVICE_TYPE_CRYPTO: /* Try to find a cy instance */ cyInst = Lac_GetFirstCyHandle(adfInsts, num_cy_dev); break; default: break; } if (NULL == cyInst) { LAC_LOG_ERROR("No remaining crypto instances available\n"); } return cyInst; } CpaStatus icp_sal_NrbgGetInflightRequests(CpaInstanceHandle instanceHandle_in, Cpa32U *maxInflightRequests, Cpa32U *numInflightRequests) { return CPA_STATUS_UNSUPPORTED; } CpaStatus icp_sal_SymGetInflightRequests(CpaInstanceHandle instanceHandle, Cpa32U *maxInflightRequests, Cpa32U *numInflightRequests) { sal_crypto_service_t *crypto_handle = NULL; crypto_handle = (sal_crypto_service_t *)instanceHandle; LAC_CHECK_NULL_PARAM(crypto_handle); LAC_CHECK_NULL_PARAM(maxInflightRequests); LAC_CHECK_NULL_PARAM(numInflightRequests); SAL_RUNNING_CHECK(crypto_handle); return icp_adf_getInflightRequests(crypto_handle->trans_handle_sym_tx, maxInflightRequests, numInflightRequests); } CpaStatus icp_sal_dp_SymGetInflightRequests(CpaInstanceHandle instanceHandle, Cpa32U *maxInflightRequests, Cpa32U *numInflightRequests) { sal_crypto_service_t *crypto_handle = NULL; crypto_handle = (sal_crypto_service_t *)instanceHandle; return icp_adf_dp_getInflightRequests( crypto_handle->trans_handle_sym_tx, maxInflightRequests, numInflightRequests); } CpaStatus icp_sal_setForceAEADMACVerify(CpaInstanceHandle instanceHandle, CpaBoolean forceAEADMacVerify) { sal_crypto_service_t *crypto_handle = NULL; crypto_handle = (sal_crypto_service_t *)instanceHandle; LAC_CHECK_NULL_PARAM(crypto_handle); crypto_handle->forceAEADMacVerify = forceAEADMacVerify; return CPA_STATUS_SUCCESS; } diff --git a/sys/dev/qat/qat_api/common/include/sal_types_compression.h b/sys/dev/qat/qat_api/common/include/sal_types_compression.h index d7bfe33cab08..4b17438b3258 100644 --- a/sys/dev/qat/qat_api/common/include/sal_types_compression.h +++ b/sys/dev/qat/qat_api/common/include/sal_types_compression.h @@ -1,161 +1,163 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ /** *************************************************************************** * @file sal_types_compression.h * * @ingroup SalCtrl * * Generic compression instance type definition * ***************************************************************************/ #ifndef SAL_TYPES_COMPRESSION_H_ #define SAL_TYPES_COMPRESSION_H_ #include "cpa_dc.h" #include "cpa_dc_dp.h" #include "lac_sal_types.h" #include "icp_qat_hw.h" #include "icp_buffer_desc.h" #include "lac_mem_pools.h" #include "icp_adf_transport.h" #define DC_NUM_RX_RINGS (1) #define DC_NUM_COMPRESSION_LEVELS (CPA_DC_L12) +#define MAX_SGL_NUM 0x10000 + /** ***************************************************************************** * @ingroup SalCtrl * Compression device specific data * * @description * Contains device specific information for a compression service. * *****************************************************************************/ typedef struct sal_compression_device_data { /* Device specific minimum output buffer size for static compression */ Cpa32U minOutputBuffSize; /* Device specific minimum output buffer size for dynamic compression */ Cpa32U minOutputBuffSizeDynamic; /* Enable/disable secureRam/acceleratorRam for intermediate buffers*/ Cpa8U useDevRam; /* When set, implies device can decompress interim odd byte length * stateful decompression requests. */ CpaBoolean oddByteDecompInterim; /* When set, implies device can decompress odd byte length * stateful decompression requests when bFinal is absent */ CpaBoolean oddByteDecompNobFinal; /* Flag to indicate if translator slice overflow is supported */ CpaBoolean translatorOverflow; /* Flag to enable/disable delayed match mode */ icp_qat_hw_compression_delayed_match_t enableDmm; Cpa32U inflateContextSize; Cpa8U highestHwCompressionDepth; /* Mask that reports supported window sizes for comp/decomp */ Cpa8U windowSizeMask; /* List representing compression levels that are the first to have a unique search depth. */ CpaBoolean uniqueCompressionLevels[DC_NUM_COMPRESSION_LEVELS + 1]; Cpa8U numCompressionLevels; /* Flag to indicate CompressAndVerifyAndRecover feature support */ CpaBoolean cnvnrSupported; /* When set, implies device supports ASB_ENABLE */ CpaBoolean asbEnableSupport; } sal_compression_device_data_t; /** ***************************************************************************** * @ingroup SalCtrl * Compression specific Service Container * * @description * Contains information required per compression service instance. * *****************************************************************************/ typedef struct sal_compression_service_s { /* An instance of the Generic Service Container */ sal_service_t generic_service_info; /* Memory pool ID used for compression */ lac_memory_pool_id_t compression_mem_pool; /* Pointer to an array of atomic stats for compression */ QatUtilsAtomic *pCompStatsArr; /* Size of the DRAM intermediate buffer in bytes */ Cpa64U minInterBuffSizeInBytes; /* Number of DRAM intermediate buffers */ Cpa16U numInterBuffs; /* Address of the array of DRAM intermediate buffers*/ icp_qat_addr_width_t *pInterBuffPtrsArray; CpaPhysicalAddr pInterBuffPtrsArrayPhyAddr; icp_comms_trans_handle trans_handle_compression_tx; icp_comms_trans_handle trans_handle_compression_rx; /* Maximum number of in flight requests */ Cpa32U maxNumCompConcurrentReq; /* Callback function defined for the DcDp API compression session */ CpaDcDpCallbackFn pDcDpCb; /* Config info */ Cpa16U acceleratorNum; Cpa16U bankNum; Cpa16U pkgID; Cpa16U isPolled; Cpa32U coreAffinity; Cpa32U nodeAffinity; sal_compression_device_data_t comp_device_data; /* Statistics handler */ debug_file_info_t *debug_file; } sal_compression_service_t; /************************************************************************* * @ingroup SalCtrl * @description * This function returns a valid compression instance handle for the system * if it exists. * * @performance * To avoid calling this function the user of the QA api should not use * instanceHandle = CPA_INSTANCE_HANDLE_SINGLE. * * @context * This function is called whenever instanceHandle = * CPA_INSTANCE_HANDLE_SINGLE at the QA Dc api. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @retval Pointer to first compression instance handle or NULL if no * compression instances in the system. * *************************************************************************/ CpaInstanceHandle dcGetFirstHandle(void); #endif /*SAL_TYPES_COMPRESSION_H_*/ diff --git a/sys/dev/qat/qat_api/include/icp_sal_versions.h b/sys/dev/qat/qat_api/include/icp_sal_versions.h index db1ba297adc6..03bcef4fcbbb 100644 --- a/sys/dev/qat/qat_api/include/icp_sal_versions.h +++ b/sys/dev/qat/qat_api/include/icp_sal_versions.h @@ -1,96 +1,96 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ /** *************************************************************************** * @file icp_sal_versions.h * * @defgroup SalVersions * * @ingroup SalVersions * * API and structures definition for obtaining software and hardware versions * ***************************************************************************/ #ifndef _ICP_SAL_VERSIONS_H_ #define _ICP_SAL_VERSIONS_H_ #define ICP_SAL_VERSIONS_FW_VERSION_SIZE 16 /**< Max length of firmware version string */ #define ICP_SAL_VERSIONS_SW_VERSION_SIZE 16 /**< Max length of software version string */ #define ICP_SAL_VERSIONS_MMP_VERSION_SIZE 16 /**< Max length of MMP binary version string */ #define ICP_SAL_VERSIONS_HW_VERSION_SIZE 4 /**< Max length of hardware version string */ /* Part name and number of the accelerator device */ #define SAL_INFO2_DRIVER_SW_VERSION_MAJ_NUMBER 3 -#define SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER 14 +#define SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER 15 #define SAL_INFO2_DRIVER_SW_VERSION_PATCH_NUMBER 0 /** ******************************************************************************* * @ingroup SalVersions * Structure holding versions information * * @description * This structure stores information about versions of software * and hardware being run on a particular device. *****************************************************************************/ typedef struct icp_sal_dev_version_info_s { Cpa32U devId; /**< Number of acceleration device for which this structure holds * version * information */ Cpa8U firmwareVersion[ICP_SAL_VERSIONS_FW_VERSION_SIZE]; /**< String identifying the version of the firmware associated with * the device. */ Cpa8U mmpVersion[ICP_SAL_VERSIONS_MMP_VERSION_SIZE]; /**< String identifying the version of the MMP binary associated with * the device. */ Cpa8U softwareVersion[ICP_SAL_VERSIONS_SW_VERSION_SIZE]; /**< String identifying the version of the software associated with * the device. */ Cpa8U hardwareVersion[ICP_SAL_VERSIONS_HW_VERSION_SIZE]; /**< String identifying the version of the hardware (stepping and * revision ID) associated with the device. */ } icp_sal_dev_version_info_t; /** ******************************************************************************* * @ingroup SalVersions * Obtains the version information for a given device * @description * This function obtains hardware and software version information * associated with a given device. * * @param[in] accelId ID of the acceleration device for which version * information is to be obtained. * @param[out] pVerInfo Pointer to a structure that will hold version * information * * @context * This function might sleep. It cannot be executed in a context that * does not permit sleeping. * @assumptions * The system has been started * @sideEffects * None * @blocking * No * @reentrant * No * @threadSafe * Yes * * @return CPA_STATUS_SUCCESS Operation finished successfully * @return CPA_STATUS_INVALID_PARAM Invalid parameter passed to the function * @return CPA_STATUS_RESOURCE System resources problem * @return CPA_STATUS_FAIL Operation failed * *****************************************************************************/ CpaStatus icp_sal_getDevVersionInfo(Cpa32U accelId, icp_sal_dev_version_info_t *pVerInfo); #endif diff --git a/sys/dev/qat/qat_common/adf_aer.c b/sys/dev/qat/qat_common/adf_aer.c index 7fdeba873420..123baaa244c3 100644 --- a/sys/dev/qat/qat_common/adf_aer.c +++ b/sys/dev/qat/qat_common/adf_aer.c @@ -1,339 +1,348 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include #include #include #include #define ADF_PPAERUCM_MASK (BIT(14) | BIT(20) | BIT(22)) static struct workqueue_struct *fatal_error_wq; struct adf_fatal_error_data { struct adf_accel_dev *accel_dev; struct work_struct work; }; static struct workqueue_struct *device_reset_wq; void linux_complete_common(struct completion *c, int all) { int wakeup_swapper; sleepq_lock(c); c->done++; if (all) wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); else wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); sleepq_release(c); if (wakeup_swapper) kick_proc0(); } /* reset dev data */ struct adf_reset_dev_data { int mode; struct adf_accel_dev *accel_dev; struct completion compl; struct work_struct reset_work; }; int adf_aer_store_ppaerucm_reg(device_t dev, struct adf_hw_device_data *hw_data) { unsigned int aer_offset, reg_val = 0; if (!hw_data) return -EINVAL; if (pci_find_extcap(dev, PCIZ_AER, &aer_offset) == 0) { reg_val = pci_read_config(dev, aer_offset + PCIR_AER_UC_MASK, 4); hw_data->aerucm_mask = reg_val; } else { device_printf(dev, "Unable to find AER capability of the device\n"); return -ENODEV; } return 0; } void adf_reset_sbr(struct adf_accel_dev *accel_dev) { device_t pdev = accel_to_pci_dev(accel_dev); device_t parent = device_get_parent(device_get_parent(pdev)); uint16_t bridge_ctl = 0; if (accel_dev->is_vf) return; if (!parent) parent = pdev; if (!pcie_wait_for_pending_transactions(pdev, 0)) device_printf(GET_DEV(accel_dev), "Transaction still in progress. Proceeding\n"); device_printf(GET_DEV(accel_dev), "Secondary bus reset\n"); pci_save_state(pdev); bridge_ctl = pci_read_config(parent, PCIR_BRIDGECTL_1, 2); bridge_ctl |= PCIB_BCR_SECBUS_RESET; pci_write_config(parent, PCIR_BRIDGECTL_1, bridge_ctl, 2); pause_ms("adfrst", 100); bridge_ctl &= ~PCIB_BCR_SECBUS_RESET; pci_write_config(parent, PCIR_BRIDGECTL_1, bridge_ctl, 2); pause_ms("adfrst", 100); pci_restore_state(pdev); } void adf_reset_flr(struct adf_accel_dev *accel_dev) { device_t pdev = accel_to_pci_dev(accel_dev); pci_save_state(pdev); if (pcie_flr(pdev, max(pcie_get_max_completion_timeout(pdev) / 1000, 10), true)) { pci_restore_state(pdev); return; } pci_restore_state(pdev); device_printf(GET_DEV(accel_dev), "FLR qat_dev%d failed trying secondary bus reset\n", accel_dev->accel_id); adf_reset_sbr(accel_dev); } void adf_dev_pre_reset(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_device = accel_dev->hw_device; device_t pdev = accel_to_pci_dev(accel_dev); u32 aer_offset, reg_val = 0; if (pci_find_extcap(pdev, PCIZ_AER, &aer_offset) == 0) { reg_val = pci_read_config(pdev, aer_offset + PCIR_AER_UC_MASK, 4); reg_val |= ADF_PPAERUCM_MASK; pci_write_config(pdev, aer_offset + PCIR_AER_UC_MASK, reg_val, 4); } else { device_printf(pdev, "Unable to find AER capability of the device\n"); } if (hw_device->disable_arb) { device_printf(GET_DEV(accel_dev), "Disable arbiter.\n"); hw_device->disable_arb(accel_dev); } } void adf_dev_post_reset(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_device = accel_dev->hw_device; device_t pdev = accel_to_pci_dev(accel_dev); u32 aer_offset; if (pci_find_extcap(pdev, PCIZ_AER, &aer_offset) == 0) { pci_write_config(pdev, aer_offset + PCIR_AER_UC_MASK, hw_device->aerucm_mask, 4); } else { device_printf(pdev, "Unable to find AER capability of the device\n"); } } void adf_dev_restore(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_device = accel_dev->hw_device; device_t pdev = accel_to_pci_dev(accel_dev); if (hw_device->pre_reset) { dev_dbg(GET_DEV(accel_dev), "Performing pre reset save\n"); hw_device->pre_reset(accel_dev); } if (hw_device->reset_device) { device_printf(GET_DEV(accel_dev), "Resetting device qat_dev%d\n", accel_dev->accel_id); hw_device->reset_device(accel_dev); pci_restore_state(pdev); pci_save_state(pdev); } if (hw_device->post_reset) { dev_dbg(GET_DEV(accel_dev), "Performing post reset restore\n"); hw_device->post_reset(accel_dev); } } static void adf_device_reset_worker(struct work_struct *work) { struct adf_reset_dev_data *reset_data = container_of(work, struct adf_reset_dev_data, reset_work); struct adf_accel_dev *accel_dev = reset_data->accel_dev; if (adf_dev_restarting_notify(accel_dev)) { device_printf(GET_DEV(accel_dev), "Unable to send RESTARTING notification.\n"); return; } if (adf_dev_stop(accel_dev)) { device_printf(GET_DEV(accel_dev), "Stopping device failed.\n"); return; } adf_dev_shutdown(accel_dev); if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) { /* The device hanged and we can't restart it */ /* so stop here */ device_printf(GET_DEV(accel_dev), "Restart device failed\n"); if (reset_data->mode == ADF_DEV_RESET_ASYNC) kfree(reset_data); WARN(1, "QAT: device restart failed. Device is unusable\n"); return; } adf_dev_restarted_notify(accel_dev); clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); /* The dev is back alive. Notify the caller if in sync mode */ if (reset_data->mode == ADF_DEV_RESET_SYNC) complete(&reset_data->compl); else kfree(reset_data); } int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, enum adf_dev_reset_mode mode) { struct adf_reset_dev_data *reset_data; if (!adf_dev_started(accel_dev) || test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) return 0; set_bit(ADF_STATUS_RESTARTING, &accel_dev->status); reset_data = kzalloc(sizeof(*reset_data), GFP_ATOMIC); if (!reset_data) return -ENOMEM; reset_data->accel_dev = accel_dev; init_completion(&reset_data->compl); reset_data->mode = mode; INIT_WORK(&reset_data->reset_work, adf_device_reset_worker); queue_work(device_reset_wq, &reset_data->reset_work); /* If in sync mode wait for the result */ if (mode == ADF_DEV_RESET_SYNC) { int ret = 0; /* Maximum device reset time is 10 seconds */ unsigned long wait_jiffies = msecs_to_jiffies(10000); unsigned long timeout = wait_for_completion_timeout(&reset_data->compl, wait_jiffies); if (!timeout) { device_printf(GET_DEV(accel_dev), "Reset device timeout expired\n"); ret = -EFAULT; } kfree(reset_data); return ret; } return 0; } int adf_dev_autoreset(struct adf_accel_dev *accel_dev) { if (accel_dev->autoreset_on_error) return adf_dev_reset(accel_dev, ADF_DEV_RESET_ASYNC); return 0; } static void adf_notify_fatal_error_work(struct work_struct *work) { struct adf_fatal_error_data *wq_data = container_of(work, struct adf_fatal_error_data, work); struct adf_accel_dev *accel_dev = wq_data->accel_dev; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + + if (adf_dev_in_use(accel_dev)) { + if (hw_device->pre_reset) { + device_printf(GET_DEV(accel_dev), + "Performing pre reset save\n"); + hw_device->pre_reset(accel_dev); + } + } adf_error_notifier((uintptr_t)accel_dev); if (!accel_dev->is_vf) { adf_dev_autoreset(accel_dev); } kfree(wq_data); } int adf_notify_fatal_error(struct adf_accel_dev *accel_dev) { struct adf_fatal_error_data *wq_data; wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC); if (!wq_data) { device_printf(GET_DEV(accel_dev), "Failed to allocate memory\n"); return ENOMEM; } wq_data->accel_dev = accel_dev; INIT_WORK(&wq_data->work, adf_notify_fatal_error_work); queue_work(fatal_error_wq, &wq_data->work); return 0; } int __init adf_init_fatal_error_wq(void) { fatal_error_wq = create_workqueue("qat_fatal_error_wq"); return !fatal_error_wq ? EFAULT : 0; } void adf_exit_fatal_error_wq(void) { if (fatal_error_wq) destroy_workqueue(fatal_error_wq); fatal_error_wq = NULL; } int adf_init_aer(void) { device_reset_wq = create_workqueue("qat_device_reset_wq"); return !device_reset_wq ? -EFAULT : 0; } void adf_exit_aer(void) { if (device_reset_wq) destroy_workqueue(device_reset_wq); device_reset_wq = NULL; } diff --git a/sys/dev/qat/qat_common/adf_cfg_device.c b/sys/dev/qat/qat_common/adf_cfg_device.c index a26d2fdfd32e..4860a4064b97 100644 --- a/sys/dev/qat/qat_common/adf_cfg_device.c +++ b/sys/dev/qat/qat_common/adf_cfg_device.c @@ -1,1284 +1,1296 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "adf_cfg_instance.h" #include "adf_cfg_section.h" #include "adf_cfg_device.h" #include "icp_qat_hw.h" #include "adf_common_drv.h" #define ADF_CFG_SVCS_MAX (12) #define ADF_CFG_DEPRE_PARAMS_NUM (4) #define ADF_CFG_CAP_DC ADF_ACCEL_CAPABILITIES_COMPRESSION #define ADF_CFG_CAP_ASYM ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC #define ADF_CFG_CAP_SYM \ (ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | \ ADF_ACCEL_CAPABILITIES_CIPHER | \ ADF_ACCEL_CAPABILITIES_AUTHENTICATION) #define ADF_CFG_CAP_CY (ADF_CFG_CAP_ASYM | ADF_CFG_CAP_SYM) #define ADF_CFG_FW_CAP_RL ICP_ACCEL_CAPABILITIES_RL #define ADF_CFG_FW_CAP_HKDF ICP_ACCEL_CAPABILITIES_HKDF #define ADF_CFG_FW_CAP_ECEDMONT ICP_ACCEL_CAPABILITIES_ECEDMONT #define ADF_CFG_FW_CAP_EXT_ALGCHAIN ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN #define ADF_CFG_CY_RINGS \ (CRYPTO | CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ CRYPTO << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ CRYPTO << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_CFG_SYM_RINGS \ (SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ SYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_CFG_ASYM_RINGS \ (ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ ASYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_CFG_CY_DC_RINGS \ (CRYPTO | CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_CFG_ASYM_DC_RINGS \ (ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_CFG_SYM_DC_RINGS \ (SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_CFG_DC_RINGS \ (COMP | COMP << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) static char adf_cfg_deprecated_params[][ADF_CFG_MAX_KEY_LEN_IN_BYTES] = { ADF_DEV_KPT_ENABLE, ADF_STORAGE_FIRMWARE_ENABLED, ADF_RL_FIRMWARE_ENABLED, ADF_PKE_DISABLED }; struct adf_cfg_enabled_services { const char svcs_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; u16 rng_to_svc_msk; u32 enabled_svc_cap; u32 enabled_fw_cap; }; struct adf_cfg_profile { enum adf_cfg_fw_image_type fw_image_type; struct adf_cfg_enabled_services supported_svcs[ADF_CFG_SVCS_MAX]; }; static struct adf_cfg_profile adf_profiles[] = { { ADF_FW_IMAGE_DEFAULT, { { "cy", ADF_CFG_CY_RINGS, ADF_CFG_CAP_CY, ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "asym;sym", ADF_CFG_CY_RINGS, ADF_CFG_CAP_CY, ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "sym;asym", ADF_CFG_CY_RINGS, ADF_CFG_CAP_CY, ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "dc", ADF_CFG_DC_RINGS, ADF_CFG_CAP_DC, 0 }, { "sym", ADF_CFG_SYM_RINGS, ADF_CFG_CAP_SYM, ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "asym", ADF_CFG_ASYM_RINGS, ADF_CFG_CAP_ASYM, ADF_CFG_FW_CAP_ECEDMONT }, { "cy;dc", ADF_CFG_CY_DC_RINGS, ADF_CFG_CAP_CY | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "dc;cy", ADF_CFG_CY_DC_RINGS, ADF_CFG_CAP_CY | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "asym;dc", ADF_CFG_ASYM_DC_RINGS, ADF_CFG_CAP_ASYM | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_ECEDMONT }, { "dc;asym", ADF_CFG_ASYM_DC_RINGS, ADF_CFG_CAP_ASYM | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_ECEDMONT }, { "sym;dc", ADF_CFG_SYM_DC_RINGS, ADF_CFG_CAP_SYM | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "dc;sym", ADF_CFG_SYM_DC_RINGS, ADF_CFG_CAP_SYM | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_EXT_ALGCHAIN }, } }, { ADF_FW_IMAGE_CRYPTO, { { "cy", ADF_CFG_CY_RINGS, ADF_CFG_CAP_CY, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_HKDF | ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "sym", ADF_CFG_SYM_RINGS, ADF_CFG_CAP_SYM, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_HKDF | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "asym", ADF_CFG_ASYM_RINGS, ADF_CFG_CAP_ASYM, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_ECEDMONT }, } }, { ADF_FW_IMAGE_COMPRESSION, { { "dc", ADF_CFG_DC_RINGS, ADF_CFG_CAP_DC, 0 }, } }, { ADF_FW_IMAGE_CUSTOM1, { { "cy", ADF_CFG_CY_RINGS, ADF_CFG_CAP_CY, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_HKDF | ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "dc", ADF_CFG_DC_RINGS, ADF_CFG_CAP_DC, 0 }, { "sym", ADF_CFG_SYM_RINGS, ADF_CFG_CAP_SYM, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_HKDF | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "asym", ADF_CFG_ASYM_RINGS, ADF_CFG_CAP_ASYM, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_ECEDMONT }, { "cy;dc", ADF_CFG_CY_DC_RINGS, ADF_CFG_CAP_CY | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_HKDF | ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "dc;cy", ADF_CFG_CY_DC_RINGS, ADF_CFG_CAP_CY | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_HKDF | ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "asym;dc", ADF_CFG_ASYM_DC_RINGS, ADF_CFG_CAP_ASYM | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_ECEDMONT }, { "dc;asym", ADF_CFG_ASYM_DC_RINGS, ADF_CFG_CAP_ASYM | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_ECEDMONT }, { "sym;dc", ADF_CFG_SYM_DC_RINGS, ADF_CFG_CAP_SYM | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_HKDF | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "dc;sym", ADF_CFG_SYM_DC_RINGS, ADF_CFG_CAP_SYM | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_HKDF | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, } } }; int adf_cfg_get_ring_pairs(struct adf_cfg_device *device, struct adf_cfg_instance *inst, const char *process_name, struct adf_accel_dev *accel_dev) { int i = 0; int ret = EFAULT; struct adf_cfg_instance *free_inst = NULL; enum adf_cfg_bundle_type free_bundle_type; int first_user_bundle = 0; /* Section of user process with poll mode */ if (strcmp(ADF_KERNEL_SEC, process_name) && strcmp(ADF_KERNEL_SAL_SEC, process_name) && inst->polling_mode == ADF_CFG_RESP_POLL) { first_user_bundle = device->max_kernel_bundle_nr + 1; for (i = first_user_bundle; i < device->bundle_num; i++) { free_inst = adf_cfg_get_free_instance( device, device->bundles[i], inst, process_name); if (!free_inst) continue; ret = adf_cfg_get_ring_pairs_from_bundle( device->bundles[i], inst, process_name, free_inst); return ret; } } else { /* Section of in-tree, or kernel API or user process * with epoll mode */ if (!strcmp(ADF_KERNEL_SEC, process_name) || !strcmp(ADF_KERNEL_SAL_SEC, process_name)) free_bundle_type = KERNEL; else free_bundle_type = USER; for (i = 0; i < device->bundle_num; i++) { /* Since both in-tree and kernel API's bundle type * are kernel, use cpumask_subset to check if the * ring's affinity mask is a subset of a bundle's * one. */ if (free_bundle_type == device->bundles[i]->type && CPU_SUBSET(&device->bundles[i]->affinity_mask, &inst->affinity_mask)) { free_inst = adf_cfg_get_free_instance( device, device->bundles[i], inst, process_name); if (!free_inst) continue; ret = adf_cfg_get_ring_pairs_from_bundle( device->bundles[i], inst, process_name, free_inst); return ret; } } for (i = 0; i < device->bundle_num; i++) { if (adf_cfg_is_free(device->bundles[i])) { free_inst = adf_cfg_get_free_instance( device, device->bundles[i], inst, process_name); if (!free_inst) continue; ret = adf_cfg_get_ring_pairs_from_bundle( device->bundles[i], inst, process_name, free_inst); return ret; } } } pr_err("Don't have enough rings for instance %s in process %s\n", inst->name, process_name); return ret; } int adf_cfg_get_services_enabled(struct adf_accel_dev *accel_dev, u16 *ring_to_svc_map) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; u32 i = 0; struct adf_cfg_enabled_services *svcs = NULL; enum adf_cfg_fw_image_type fw_image_type = ADF_FW_IMAGE_DEFAULT; struct adf_hw_device_data *hw_data = accel_dev->hw_device; *ring_to_svc_map = 0; /* Get the services enabled by user */ snprintf(key, sizeof(key), ADF_SERVICES_ENABLED); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) return EFAULT; if (hw_data->get_fw_image_type) { if (hw_data->get_fw_image_type(accel_dev, &fw_image_type)) return EFAULT; } for (i = 0; i < ADF_CFG_SVCS_MAX; i++) { svcs = &adf_profiles[fw_image_type].supported_svcs[i]; if (!strncmp(svcs->svcs_enabled, "", ADF_CFG_MAX_VAL_LEN_IN_BYTES)) break; if (!strncmp(val, svcs->svcs_enabled, ADF_CFG_MAX_VAL_LEN_IN_BYTES)) { *ring_to_svc_map = svcs->rng_to_svc_msk; return 0; } } device_printf(GET_DEV(accel_dev), "Invalid ServicesEnabled %s for ServicesProfile: %d\n", val, fw_image_type); return EFAULT; } void adf_cfg_set_asym_rings_mask(struct adf_accel_dev *accel_dev) { int service; u16 ena_srv_mask; u16 service_type; u16 asym_mask = 0; struct adf_cfg_device *cfg_dev = accel_dev->cfg->dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; if (!cfg_dev) { hw_data->asym_rings_mask = ADF_CFG_DEF_ASYM_MASK; return; } ena_srv_mask = accel_dev->hw_device->ring_to_svc_map; /* parse each service */ for (service = 0; service < ADF_CFG_MAX_SERVICES; service++) { service_type = GET_SRV_TYPE(ena_srv_mask, service); switch (service_type) { case CRYPTO: case ASYM: SET_ASYM_MASK(asym_mask, service); if (service_type == CRYPTO) service++; break; } } hw_data->asym_rings_mask = asym_mask; } void adf_cfg_gen_dispatch_arbiter(struct adf_accel_dev *accel_dev, const u32 *thrd_to_arb_map, u32 *thrd_to_arb_map_gen, u32 total_engines) { int engine, thread, service, bits; u32 thread_ability, ability_map, service_mask, service_type; u16 ena_srv_mask = GET_HW_DATA(accel_dev)->ring_to_svc_map; for (engine = 0; engine < total_engines; engine++) { if (!(GET_HW_DATA(accel_dev)->ae_mask & (1 << engine))) continue; bits = 0; /* ability_map is used to indicate the threads ability */ ability_map = thrd_to_arb_map[engine]; thrd_to_arb_map_gen[engine] = 0; /* parse each thread on the engine */ for (thread = 0; thread < ADF_NUM_THREADS_PER_AE; thread++) { /* get the ability of this thread */ thread_ability = ability_map & ADF_THRD_ABILITY_MASK; ability_map >>= ADF_THRD_ABILITY_BIT_LEN; /* parse each service */ for (service = 0; service < ADF_CFG_MAX_SERVICES; service++) { service_type = GET_SRV_TYPE(ena_srv_mask, service); switch (service_type) { case CRYPTO: service_mask = ADF_CFG_ASYM_SRV_MASK; if (thread_ability & service_mask) thrd_to_arb_map_gen[engine] |= (1 << bits); bits++; service++; service_mask = ADF_CFG_SYM_SRV_MASK; break; case COMP: service_mask = ADF_CFG_DC_SRV_MASK; break; case SYM: service_mask = ADF_CFG_SYM_SRV_MASK; break; case ASYM: service_mask = ADF_CFG_ASYM_SRV_MASK; break; default: service_mask = ADF_CFG_UNKNOWN_SRV_MASK; } if (thread_ability & service_mask) thrd_to_arb_map_gen[engine] |= (1 << bits); bits++; } } } } int adf_cfg_get_fw_image_type(struct adf_accel_dev *accel_dev, enum adf_cfg_fw_image_type *fw_image_type) { *fw_image_type = ADF_FW_IMAGE_CUSTOM1; return 0; } static int adf_cfg_get_caps_enabled(struct adf_accel_dev *accel_dev, u32 *enabled_svc_caps, u32 *enabled_fw_caps) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; u8 i = 0; struct adf_cfg_enabled_services *svcs = NULL; enum adf_cfg_fw_image_type fw_image_type = ADF_FW_IMAGE_DEFAULT; struct adf_hw_device_data *hw_data = accel_dev->hw_device; *enabled_svc_caps = 0; *enabled_fw_caps = 0; /* Get the services enabled by user */ snprintf(key, sizeof(key), ADF_SERVICES_ENABLED); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) return EFAULT; /* * Only the PF driver has the hook for get_fw_image_type as the VF's * enabled service is from PFVF communication. The fw_image_type for * the VF is set to DEFAULT since this type contains all kinds of * enabled service. */ if (hw_data->get_fw_image_type) { if (hw_data->get_fw_image_type(accel_dev, &fw_image_type)) return EFAULT; } for (i = 0; i < ADF_CFG_SVCS_MAX; i++) { svcs = &adf_profiles[fw_image_type].supported_svcs[i]; if (!strncmp(svcs->svcs_enabled, "", ADF_CFG_MAX_VAL_LEN_IN_BYTES)) break; if (!strncmp(val, svcs->svcs_enabled, ADF_CFG_MAX_VAL_LEN_IN_BYTES)) { *enabled_svc_caps = svcs->enabled_svc_cap; *enabled_fw_caps = svcs->enabled_fw_cap; return 0; } } device_printf(GET_DEV(accel_dev), "Invalid ServicesEnabled %s for ServicesProfile: %d\n", val, fw_image_type); return EFAULT; } static void adf_cfg_check_deprecated_params(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; u8 i = 0; for (i = 0; i < ADF_CFG_DEPRE_PARAMS_NUM; i++) { /* give a warning if the deprecated params are set by user */ snprintf(key, sizeof(key), "%s", adf_cfg_deprecated_params[i]); if (!adf_cfg_get_param_value( accel_dev, ADF_GENERAL_SEC, key, val)) { device_printf(GET_DEV(accel_dev), "Parameter '%s' has been deprecated\n", key); } } } static int adf_cfg_check_enabled_services(struct adf_accel_dev *accel_dev, u32 enabled_svc_caps) { u32 hw_caps = GET_HW_DATA(accel_dev)->accel_capabilities_mask; if ((enabled_svc_caps & hw_caps) == enabled_svc_caps) return 0; device_printf(GET_DEV(accel_dev), "Unsupported device configuration\n"); return EFAULT; } static int adf_cfg_update_pf_accel_cap_mask(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 enabled_svc_caps = 0; u32 enabled_fw_caps = 0; if (hw_data->get_accel_cap) { hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); } if (adf_cfg_get_caps_enabled(accel_dev, &enabled_svc_caps, &enabled_fw_caps)) return EFAULT; if (adf_cfg_check_enabled_services(accel_dev, enabled_svc_caps)) return EFAULT; if (!(enabled_svc_caps & ADF_CFG_CAP_ASYM)) hw_data->accel_capabilities_mask &= ~ADF_CFG_CAP_ASYM; if (!(enabled_svc_caps & ADF_CFG_CAP_SYM)) hw_data->accel_capabilities_mask &= ~ADF_CFG_CAP_SYM; if (!(enabled_svc_caps & ADF_CFG_CAP_DC)) hw_data->accel_capabilities_mask &= ~ADF_CFG_CAP_DC; /* Enable FW defined capabilities*/ if (enabled_fw_caps) hw_data->accel_capabilities_mask |= enabled_fw_caps; return 0; } static int adf_cfg_update_vf_accel_cap_mask(struct adf_accel_dev *accel_dev) { u32 enabled_svc_caps = 0; u32 enabled_fw_caps = 0; if (adf_cfg_get_caps_enabled(accel_dev, &enabled_svc_caps, &enabled_fw_caps)) return EFAULT; if (adf_cfg_check_enabled_services(accel_dev, enabled_svc_caps)) return EFAULT; return 0; } int adf_cfg_device_init(struct adf_cfg_device *device, struct adf_accel_dev *accel_dev) { int i = 0; /* max_inst indicates the max instance number one bank can hold */ int max_inst = accel_dev->hw_device->tx_rx_gap; int ret = ENOMEM; struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); adf_cfg_check_deprecated_params(accel_dev); device->bundle_num = 0; device->bundles = (struct adf_cfg_bundle **)malloc( sizeof(struct adf_cfg_bundle *) * accel_dev->hw_device->num_banks, M_QAT, M_WAITOK | M_ZERO); device->bundle_num = accel_dev->hw_device->num_banks; device->instances = (struct adf_cfg_instance **)malloc( sizeof(struct adf_cfg_instance *) * device->bundle_num * max_inst, M_QAT, M_WAITOK | M_ZERO); device->instance_index = 0; device->max_kernel_bundle_nr = -1; ret = EFAULT; /* Update the acceleration capability mask based on User capability */ if (!accel_dev->is_vf) { if (adf_cfg_update_pf_accel_cap_mask(accel_dev)) goto failed; } else { if (adf_cfg_update_vf_accel_cap_mask(accel_dev)) goto failed; } /* Based on the svc configured, get ring_to_svc_map */ if (hw_data->get_ring_to_svc_map) { if (hw_data->get_ring_to_svc_map(accel_dev, &hw_data->ring_to_svc_map)) goto failed; } ret = ENOMEM; /* * 1) get the config information to generate the ring to service * mapping table * 2) init each bundle of this device */ for (i = 0; i < device->bundle_num; i++) { device->bundles[i] = malloc(sizeof(struct adf_cfg_bundle), M_QAT, M_WAITOK | M_ZERO); device->bundles[i]->max_section = max_inst; adf_cfg_bundle_init(device->bundles[i], device, i, accel_dev); } return 0; failed: for (i = 0; i < device->bundle_num; i++) { if (device->bundles[i]) adf_cfg_bundle_clear(device->bundles[i], accel_dev); } for (i = 0; i < (device->bundle_num * max_inst); i++) { if (device->instances && device->instances[i]) free(device->instances[i], M_QAT); } free(device->instances, M_QAT); device->instances = NULL; device_printf(GET_DEV(accel_dev), "Failed to do device init\n"); return ret; } void adf_cfg_device_clear(struct adf_cfg_device *device, struct adf_accel_dev *accel_dev) { int i = 0; for (i = 0; i < device->bundle_num; i++) { if (device->bundles && device->bundles[i]) { adf_cfg_bundle_clear(device->bundles[i], accel_dev); free(device->bundles[i], M_QAT); device->bundles[i] = NULL; } } free(device->bundles, M_QAT); device->bundles = NULL; for (i = 0; i < device->instance_index; i++) { if (device->instances && device->instances[i]) { free(device->instances[i], M_QAT); device->instances[i] = NULL; } } free(device->instances, M_QAT); device->instances = NULL; } +void +adf_cfg_device_clear_all(struct adf_accel_dev *accel_dev) +{ + sx_xlock(&accel_dev->cfg->lock); + if (accel_dev->cfg->dev) { + adf_cfg_device_clear(accel_dev->cfg->dev, accel_dev); + free(accel_dev->cfg->dev, M_QAT); + accel_dev->cfg->dev = NULL; + } + sx_xunlock(&accel_dev->cfg->lock); +} + /* * Static configuration for userspace */ static int adf_cfg_static_conf_user(struct adf_accel_dev *accel_dev, int cy_enabled, int dc_enabled) { int ret = 0; unsigned long val = 0; char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char value[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; int cy_user_instances = 0; int dc_user_instances = 0; int i = 0; int cpus = num_online_cpus(); if (!(IS_QAT_GEN4(pci_get_device(GET_DEV(accel_dev))))) { device_printf( GET_DEV(accel_dev), "User space configuration supported only on QAT 4xxx devices\n"); return ENXIO; } ret |= adf_cfg_section_add(accel_dev, ADF_SAL_SEC); if (accel_dev->is_vf) { if (cy_enabled) cy_user_instances = ADF_CFG_STATIC_CONF_USER_INST_NUM_CY_VF; if (dc_enabled) dc_user_instances = ADF_CFG_STATIC_CONF_USER_INST_NUM_DC_VF; } else { if (cy_enabled) cy_user_instances = ADF_CFG_STATIC_CONF_USER_INST_NUM_CY; if (dc_enabled) dc_user_instances = ADF_CFG_STATIC_CONF_USER_INST_NUM_DC; } val = cy_user_instances; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_NUM_CY); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_SAL_SEC, key, (void *)&val, ADF_DEC); val = dc_user_instances; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_NUM_DC); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_SAL_SEC, key, (void *)&val, ADF_DEC); val = accel_dev->cfg->num_user_processes; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_NUM_PROCESSES); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_SAL_SEC, key, (void *)&val, ADF_DEC); for (i = 0; i < cy_user_instances; i++) { val = (accel_dev->accel_id * cy_user_instances + i) % cpus; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_SAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_POLL; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY "%d" ADF_POLL_MODE, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_SAL_SEC, key, (void *)&val, ADF_DEC); snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ADF_CY "%d", i); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_NAME_FORMAT, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_SAL_SEC, key, (void *)value, ADF_STR); } for (i = 0; i < dc_user_instances; i++) { val = (accel_dev->accel_id * dc_user_instances + i) % cpus; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_DC "%d" ADF_ETRMGR_CORE_AFFINITY, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_SAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_POLL; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_DC "%d" ADF_POLL_MODE, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_SAL_SEC, key, (void *)&val, ADF_DEC); snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ADF_DC "%d", i); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_DC_NAME_FORMAT, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_SAL_SEC, key, (void *)value, ADF_STR); } return ret; } static int adf_cfg_static_conf_kernel(struct adf_accel_dev *accel_dev, int asym_enabled, int sym_enabled, int dc_enabled) { int ret = 0; char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char value[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; unsigned long val = 0; int i = 0; int instances = 0; int cy_poll_instances = 0; int cy_irq_instances = 0; int dc_instances = 0; int def_cy_poll_inst = ADF_CFG_STATIC_CONF_INST_NUM_CY_POLL; int def_cy_irq_inst = ADF_CFG_STATIC_CONF_INST_NUM_CY_IRQ; int def_dc_inst = ADF_CFG_STATIC_CONF_INST_NUM_DC; int cpus = num_online_cpus(); instances = GET_MAX_BANKS(accel_dev); if (!instances) return EFAULT; if (accel_dev->is_vf) { def_cy_poll_inst = ADF_CFG_STATIC_CONF_INST_NUM_CY_POLL_VF; def_cy_irq_inst = ADF_CFG_STATIC_CONF_INST_NUM_CY_IRQ_VF; def_dc_inst = ADF_CFG_STATIC_CONF_INST_NUM_DC_VF; } /* Get the mode enabled by user */ ret |= adf_cfg_section_add(accel_dev, ADF_KERNEL_SAL_SEC); if (dc_enabled) { if (instances >= def_dc_inst) { dc_instances = def_dc_inst; instances -= dc_instances; } else { return EFAULT; } } if (asym_enabled || sym_enabled) { if (instances >= def_cy_poll_inst) { cy_poll_instances = def_cy_poll_inst; instances -= cy_poll_instances; } else { return EFAULT; } if (sym_enabled) { if (instances >= def_cy_irq_inst) { cy_irq_instances = def_cy_irq_inst; instances -= cy_irq_instances; } else { return EFAULT; } } } val = (cy_poll_instances + cy_irq_instances); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_NUM_CY); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC); val = dc_instances; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_NUM_DC); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC); for (i = 0; i < (cy_irq_instances); i++) { val = (accel_dev->accel_id * cy_irq_instances + i) % cpus; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_IRQ; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY "%d" ADF_POLL_MODE, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC); snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ADF_CY "%d", i); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_NAME_FORMAT, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)value, ADF_STR); } for (i = cy_irq_instances; i < (cy_poll_instances + cy_irq_instances); i++) { val = (accel_dev->accel_id * cy_poll_instances + i) % cpus; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_POLL; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY "%d" ADF_POLL_MODE, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC); snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ADF_CY "%d", i); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_NAME_FORMAT, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)value, ADF_STR); } for (i = 0; i < dc_instances; i++) { val = (accel_dev->accel_id * dc_instances + i) % cpus; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_DC "%d" ADF_ETRMGR_CORE_AFFINITY, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_POLL; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_DC "%d" ADF_POLL_MODE, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC); snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ADF_DC "%d", i); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_DC_NAME_FORMAT, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)value, ADF_STR); } return ret; } static int adf_cfg_static_conf(struct adf_accel_dev *accel_dev) { int ret = 0; unsigned long val = 0; char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char value[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; char *token, *cur_str; int ks_enabled = 0; int us_enabled = 0; int asym_enabled = 0; int sym_enabled = 0; int cy_enabled = 0; int dc_enabled = 0; strncpy(value, accel_dev->cfg->cfg_mode, ADF_CFG_MAX_VAL); cur_str = value; token = strsep(&cur_str, ADF_SERVICES_SEPARATOR); while (token) { if (!strncmp(token, ADF_CFG_KERNEL, strlen(ADF_CFG_KERNEL))) ks_enabled = 1; if (!strncmp(token, ADF_CFG_USER, strlen(ADF_CFG_USER))) us_enabled = 1; token = strsep(&cur_str, ADF_SERVICES_SEPARATOR); } /* Get the services enabled by user */ strncpy(value, accel_dev->cfg->cfg_services, ADF_CFG_MAX_VAL); cur_str = value; token = strsep(&cur_str, ADF_SERVICES_SEPARATOR); while (token) { if (!strncmp(token, ADF_CFG_SYM, strlen(ADF_CFG_SYM))) { sym_enabled = 1; } if (!strncmp(token, ADF_CFG_ASYM, strlen(ADF_CFG_ASYM))) { asym_enabled = 1; } /* cy means both asym & crypto should be enabled * Hardware resources allocation check will be done later */ if (!strncmp(token, ADF_CFG_CY, strlen(ADF_CFG_CY))) { asym_enabled = 1; sym_enabled = 1; } if (!strncmp(token, ADF_SERVICE_DC, strlen(ADF_SERVICE_DC))) { dc_enabled = 1; } token = strsep(&cur_str, ADF_SERVICES_SEPARATOR); } if (asym_enabled || sym_enabled) { cy_enabled = 1; } ret |= adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_SERVICES_ENABLED); if (strcmp(ADF_CFG_SYM_ASYM, accel_dev->cfg->cfg_services) == 0) { strncpy(value, ADF_CFG_CY, ADF_CFG_MAX_VAL_LEN_IN_BYTES); } else { strncpy(value, accel_dev->cfg->cfg_services, ADF_CFG_MAX_VAL_LEN_IN_BYTES); } ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)value, ADF_STR); val = ADF_CFG_STATIC_CONF_VER; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CONFIG_VERSION); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_AUTO_RESET; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_AUTO_RESET_ON_ERROR); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); if (accel_dev->hw_device->get_num_accel_units) { int cy_au = 0; int dc_au = 0; int num_au = accel_dev->hw_device->get_num_accel_units( accel_dev->hw_device); if (num_au > ADF_CFG_STATIC_CONF_NUM_DC_ACCEL_UNITS) { cy_au = num_au - ADF_CFG_STATIC_CONF_NUM_DC_ACCEL_UNITS; dc_au = ADF_CFG_STATIC_CONF_NUM_DC_ACCEL_UNITS; } else if (num_au == ADF_CFG_STATIC_CONF_NUM_DC_ACCEL_UNITS) { cy_au = 1; dc_au = 1; } else { return EFAULT; } /* User defined adjustement basing on serives enabled */ if (cy_enabled && !dc_enabled) { cy_au += dc_au; dc_au = 0; } else if (!cy_enabled && dc_enabled) { dc_au += cy_au; cy_au = 0; } val = cy_au; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_NUM_CY_ACCEL_UNITS); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = dc_au; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_NUM_DC_ACCEL_UNITS); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_NUM_INLINE_ACCEL_UNITS; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_NUM_INLINE_ACCEL_UNITS); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); } val = ADF_CFG_STATIC_CONF_CY_ASYM_RING_SIZE; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY ADF_RING_ASYM_SIZE); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_CY_SYM_RING_SIZE; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY ADF_RING_SYM_SIZE); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_DC_INTER_BUF_SIZE; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_INTER_BUF_SIZE); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DC; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_DC); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DH; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_DH); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DRBG; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_DRBG); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DSA; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_DSA); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_ECC; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_ECC); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_ENABLED; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_ENABLED); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_KEYGEN; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_KEYGEN); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_LN; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_LN); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_PRIME; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_PRIME); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_RSA; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_RSA); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_SYM; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_SYM); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); if (ks_enabled) { ret |= adf_cfg_static_conf_kernel(accel_dev, asym_enabled, sym_enabled, dc_enabled); } if (us_enabled) { ret |= adf_cfg_static_conf_user(accel_dev, cy_enabled, dc_enabled); } if (ret) ret = ENXIO; return ret; } int adf_config_device(struct adf_accel_dev *accel_dev) { struct adf_cfg_device_data *cfg = NULL; struct adf_cfg_device *cfg_device = NULL; struct adf_cfg_section *sec; struct list_head *list; int ret = ENOMEM; if (!accel_dev) return ret; ret = adf_cfg_static_conf(accel_dev); if (ret) goto failed; cfg = accel_dev->cfg; cfg->dev = NULL; cfg_device = (struct adf_cfg_device *)malloc(sizeof(*cfg_device), M_QAT, M_WAITOK | M_ZERO); ret = EFAULT; if (adf_cfg_device_init(cfg_device, accel_dev)) goto failed; cfg->dev = cfg_device; /* GENERAL and KERNEL section must be processed before others */ list_for_each(list, &cfg->sec_list) { sec = list_entry(list, struct adf_cfg_section, list); if (!strcmp(sec->name, ADF_GENERAL_SEC)) { ret = adf_cfg_process_section(accel_dev, sec->name, accel_dev->accel_id); if (ret) goto failed; sec->processed = true; break; } } list_for_each(list, &cfg->sec_list) { sec = list_entry(list, struct adf_cfg_section, list); if (!strcmp(sec->name, ADF_KERNEL_SEC)) { ret = adf_cfg_process_section(accel_dev, sec->name, accel_dev->accel_id); if (ret) goto failed; sec->processed = true; break; } } list_for_each(list, &cfg->sec_list) { sec = list_entry(list, struct adf_cfg_section, list); if (!strcmp(sec->name, ADF_KERNEL_SAL_SEC)) { ret = adf_cfg_process_section(accel_dev, sec->name, accel_dev->accel_id); if (ret) goto failed; sec->processed = true; break; } } list_for_each(list, &cfg->sec_list) { sec = list_entry(list, struct adf_cfg_section, list); /* avoid reprocessing one section */ if (!sec->processed && !sec->is_derived) { ret = adf_cfg_process_section(accel_dev, sec->name, accel_dev->accel_id); if (ret) goto failed; sec->processed = true; } } /* newly added accel section */ ret = adf_cfg_process_section(accel_dev, ADF_ACCEL_SEC, accel_dev->accel_id); if (ret) goto failed; /* * put item-remove task after item-process * because during process we may fetch values from those items */ list_for_each(list, &cfg->sec_list) { sec = list_entry(list, struct adf_cfg_section, list); if (!sec->is_derived) { ret = adf_cfg_cleanup_section(accel_dev, sec->name, accel_dev->accel_id); if (ret) goto failed; } } ret = 0; set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); failed: if (ret) { if (cfg_device) { adf_cfg_device_clear(cfg_device, accel_dev); free(cfg_device, M_QAT); cfg->dev = NULL; } adf_cfg_del_all(accel_dev); device_printf(GET_DEV(accel_dev), "Failed to config device\n"); } return ret; } diff --git a/sys/dev/qat/qat_common/adf_freebsd_cnvnr_ctrs_dbg.c b/sys/dev/qat/qat_common/adf_freebsd_cnvnr_ctrs_dbg.c index 539059589bc8..e4ddbf489192 100644 --- a/sys/dev/qat/qat_common/adf_freebsd_cnvnr_ctrs_dbg.c +++ b/sys/dev/qat/qat_common/adf_freebsd_cnvnr_ctrs_dbg.c @@ -1,199 +1,200 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2025 Intel Corporation */ #include #include #include #include #include "adf_cnvnr_freq_counters.h" #include "adf_common_drv.h" #include "adf_cfg.h" #include "icp_qat_fw_init_admin.h" #define ADF_CNVNR_ERR_MASK 0xFFF #define LINE \ "+-----------------------------------------------------------------+\n" #define BANNER \ "| CNV Error Freq Statistics for Qat Device |\n" #define NEW_LINE "\n" #define REPORT_ENTRY_FORMAT \ "|[AE %2d]: TotalErrors: %5d : LastError: %s [%5d] |\n" #define MAX_LINE_LENGTH 128 #define MAX_REPORT_SIZE ((ADF_MAX_ACCELENGINES + 3) * MAX_LINE_LENGTH) #define PRINT_LINE(line) \ (snprintf( \ report_ptr, MAX_REPORT_SIZE - (report_ptr - report), "%s", line)) const char *cnvnr_err_str[] = {"No Error ", "Checksum Error", "Length Error-P", "Decomp Error ", "Xlat Error ", "Length Error-C", "Unknown Error "}; /* Handler for HB status check */ static int qat_cnvnr_ctrs_dbg_read(SYSCTL_HANDLER_ARGS) { struct adf_accel_dev *accel_dev = arg1; struct adf_hw_device_data *hw_device; struct icp_qat_fw_init_admin_req request; struct icp_qat_fw_init_admin_resp response; unsigned long dc_ae_msk = 0; u8 num_aes = 0, ae = 0, error_type = 0, bytes_written = 0; s16 latest_error = 0; char report[MAX_REPORT_SIZE]; char *report_ptr = report; if (priv_check(curthread, PRIV_DRIVER) != 0) return EPERM; /* Defensive check */ if (!accel_dev || accel_dev->accel_id > ADF_MAX_DEVICES) return EINVAL; if (!adf_dev_started(accel_dev)) { device_printf(GET_DEV(accel_dev), "QAT Device not started\n"); return EINVAL; } hw_device = accel_dev->hw_device; if (!hw_device) { device_printf(GET_DEV(accel_dev), "Failed to get hw_device.\n"); return EFAULT; } /* Clean report memory */ explicit_bzero(report, sizeof(report)); /* Adding banner to report */ bytes_written = PRINT_LINE(NEW_LINE); if (bytes_written <= 0) return EINVAL; report_ptr += bytes_written; bytes_written = PRINT_LINE(LINE); if (bytes_written <= 0) return EINVAL; report_ptr += bytes_written; bytes_written = PRINT_LINE(BANNER); if (bytes_written <= 0) return EINVAL; report_ptr += bytes_written; bytes_written = PRINT_LINE(LINE); if (bytes_written <= 0) return EINVAL; report_ptr += bytes_written; if (accel_dev->au_info) dc_ae_msk = accel_dev->au_info->dc_ae_msk; /* Extracting number of Acceleration Engines */ num_aes = hw_device->get_num_aes(hw_device); + explicit_bzero(&request, sizeof(struct icp_qat_fw_init_admin_req)); for (ae = 0; ae < num_aes; ae++) { if (accel_dev->au_info && !test_bit(ae, &dc_ae_msk)) continue; explicit_bzero(&response, sizeof(struct icp_qat_fw_init_admin_resp)); request.cmd_id = ICP_QAT_FW_CNV_STATS_GET; if (adf_put_admin_msg_sync( accel_dev, ae, &request, &response) || response.status) { return EFAULT; } error_type = CNV_ERROR_TYPE_GET(response.latest_error); if (error_type == CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH_ERROR || error_type == CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH_ERROR) { latest_error = CNV_ERROR_LENGTH_DELTA_GET(response.latest_error); } else if (error_type == CNV_ERR_TYPE_DECOMPRESSION_ERROR || error_type == CNV_ERR_TYPE_TRANSLATION_ERROR) { latest_error = CNV_ERROR_DECOMP_STATUS_GET(response.latest_error); } else { latest_error = response.latest_error & ADF_CNVNR_ERR_MASK; } bytes_written = snprintf(report_ptr, MAX_REPORT_SIZE - (report_ptr - report), REPORT_ENTRY_FORMAT, ae, response.error_count, cnvnr_err_str[error_type], latest_error); if (bytes_written <= 0) { device_printf( GET_DEV(accel_dev), "ERROR: No space left in CnV ctrs line buffer\n" "\tAcceleration ID: %d, Engine: %d\n", accel_dev->accel_id, ae); break; } report_ptr += bytes_written; } sysctl_handle_string(oidp, report, sizeof(report), req); return 0; } int adf_cnvnr_freq_counters_add(struct adf_accel_dev *accel_dev) { struct sysctl_ctx_list *qat_sysctl_ctx; struct sysctl_oid *qat_cnvnr_ctrs_sysctl_tree; /* Defensive checks */ if (!accel_dev) return EINVAL; /* Creating context and tree */ qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); qat_cnvnr_ctrs_sysctl_tree = device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev); /* Create "cnv_error" string type leaf - with callback */ accel_dev->cnv_error_oid = SYSCTL_ADD_PROC(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_cnvnr_ctrs_sysctl_tree), OID_AUTO, "cnv_error", CTLTYPE_STRING | CTLFLAG_RD, accel_dev, 0, qat_cnvnr_ctrs_dbg_read, "IU", "QAT CnVnR status"); if (!accel_dev->cnv_error_oid) { device_printf( GET_DEV(accel_dev), "Failed to create qat cnvnr freq counters sysctl entry.\n"); return ENOMEM; } return 0; } void adf_cnvnr_freq_counters_remove(struct adf_accel_dev *accel_dev) { struct sysctl_ctx_list *qat_sysctl_ctx; if (!accel_dev) return; qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); if (accel_dev->cnv_error_oid) { sysctl_ctx_entry_del(qat_sysctl_ctx, accel_dev->cnv_error_oid); sysctl_remove_oid(accel_dev->cnv_error_oid, 1, 1); accel_dev->cnv_error_oid = NULL; } } diff --git a/sys/dev/qat/qat_common/adf_freebsd_dev_processes.c b/sys/dev/qat/qat_common/adf_freebsd_dev_processes.c index 661d5bd0f14e..67e1d4ad2cab 100644 --- a/sys/dev/qat/qat_common/adf_freebsd_dev_processes.c +++ b/sys/dev/qat/qat_common/adf_freebsd_dev_processes.c @@ -1,672 +1,660 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ - +/* Copyright(c) 2007-2025 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_uio_control.h" #include "adf_uio_cleanup.h" #include "adf_uio.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #define ADF_DEV_PROCESSES_NAME "qat_dev_processes" #define ADF_DEV_STATE_NAME "qat_dev_state" #define ADF_STATE_CALLOUT_TIME 10 static const char *mtx_name = "state_mtx"; static const char *mtx_callout_name = "callout_mtx"; static d_open_t adf_processes_open; static void adf_processes_release(void *data); static d_read_t adf_processes_read; static d_write_t adf_processes_write; static d_open_t adf_state_open; static void adf_state_release(void *data); static d_read_t adf_state_read; static int adf_state_kqfilter(struct cdev *dev, struct knote *kn); static int adf_state_kqread_event(struct knote *kn, long hint); static void adf_state_kqread_detach(struct knote *kn); static struct callout callout; static struct mtx mtx; static struct mtx callout_mtx; static struct service_hndl adf_state_hndl; struct entry_proc_events { struct adf_state_priv_data *proc_events; SLIST_ENTRY(entry_proc_events) entries_proc_events; }; struct entry_state { struct adf_state state; STAILQ_ENTRY(entry_state) entries_state; }; SLIST_HEAD(proc_events_head, entry_proc_events); STAILQ_HEAD(state_head, entry_state); static struct proc_events_head proc_events_head; struct adf_processes_priv_data { char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES]; int read_flag; struct list_head list; }; struct adf_state_priv_data { struct cdev *cdev; struct selinfo rsel; struct state_head state_head; }; static struct cdevsw adf_processes_cdevsw = { .d_version = D_VERSION, .d_open = adf_processes_open, .d_read = adf_processes_read, .d_write = adf_processes_write, .d_name = ADF_DEV_PROCESSES_NAME, }; static struct cdevsw adf_state_cdevsw = { .d_version = D_VERSION, .d_open = adf_state_open, .d_read = adf_state_read, .d_kqfilter = adf_state_kqfilter, .d_name = ADF_DEV_STATE_NAME, }; -static const struct filterops adf_state_read_filterops = { +static struct filterops adf_state_read_filterops = { .f_isfd = 1, .f_attach = NULL, .f_detach = adf_state_kqread_detach, .f_event = adf_state_kqread_event, }; static struct cdev *adf_processes_dev; static struct cdev *adf_state_dev; static LINUX_LIST_HEAD(processes_list); struct sx processes_list_sema; SX_SYSINIT(processes_list_sema, &processes_list_sema, "adf proc list"); static void adf_chr_drv_destroy(void) { destroy_dev(adf_processes_dev); } static int adf_chr_drv_create(void) { adf_processes_dev = make_dev(&adf_processes_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, ADF_DEV_PROCESSES_NAME); if (adf_processes_dev == NULL) { printf("QAT: failed to create device\n"); goto err_cdev_del; } return 0; err_cdev_del: return EFAULT; } static int adf_processes_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { int i = 0, devices = 0; struct adf_accel_dev *accel_dev = NULL; struct adf_processes_priv_data *prv_data = NULL; int error = 0; for (i = 0; i < ADF_MAX_DEVICES; i++) { accel_dev = adf_devmgr_get_dev_by_id(i); if (!accel_dev) continue; if (!adf_dev_started(accel_dev)) continue; devices++; } if (!devices) { printf("QAT: No active devices found.\n"); return ENXIO; } prv_data = malloc(sizeof(*prv_data), M_QAT, M_WAITOK | M_ZERO); INIT_LIST_HEAD(&prv_data->list); error = devfs_set_cdevpriv(prv_data, adf_processes_release); if (error) { free(prv_data, M_QAT); return error; } return 0; } static int adf_get_first_started_dev(void) { int i = 0; struct adf_accel_dev *accel_dev = NULL; for (i = 0; i < ADF_MAX_DEVICES; i++) { accel_dev = adf_devmgr_get_dev_by_id(i); if (!accel_dev) continue; if (adf_dev_started(accel_dev)) return i; } return -1; } static int adf_processes_write(struct cdev *dev, struct uio *uio, int ioflag) { struct adf_processes_priv_data *prv_data = NULL; struct adf_processes_priv_data *pdata = NULL; int dev_num = 0, pr_num = 0; struct list_head *lpos = NULL; char usr_name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES] = { 0 }; struct adf_accel_dev *accel_dev = NULL; struct adf_cfg_section *section_ptr = NULL; bool pr_name_available = 1; uint32_t num_accel_devs = 0; int error = 0; ssize_t count; int dev_id; error = devfs_get_cdevpriv((void **)&prv_data); if (error) { printf("QAT: invalid file descriptor\n"); return error; } if (prv_data->read_flag == 1) { printf("QAT: can only write once\n"); return EBADF; } count = uio->uio_resid; if ((count <= 0) || (count > ADF_CFG_MAX_SECTION_LEN_IN_BYTES)) { printf("QAT: wrong size %d\n", (int)count); return EIO; } error = uiomove(usr_name, count, uio); if (error) { printf("QAT: can't copy data\n"); return error; } /* Lock other processes and try to find out the process name */ if (sx_xlock_sig(&processes_list_sema)) { printf("QAT: can't aquire process info lock\n"); return EBADF; } dev_id = adf_get_first_started_dev(); if (-1 == dev_id) { pr_err("QAT: could not find started device\n"); sx_xunlock(&processes_list_sema); return -EIO; } accel_dev = adf_devmgr_get_dev_by_id(dev_id); if (!accel_dev) { pr_err("QAT: could not find started device\n"); sx_xunlock(&processes_list_sema); return -EIO; } /* If there is nothing there then take the first name and return */ if (list_empty(&processes_list)) { snprintf(prv_data->name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES, "%s" ADF_INTERNAL_USERSPACE_SEC_SUFF "%d", usr_name, 0); list_add(&prv_data->list, &processes_list); sx_xunlock(&processes_list_sema); prv_data->read_flag = 1; return 0; } /* If there are processes running then search for a first free name */ adf_devmgr_get_num_dev(&num_accel_devs); for (dev_num = 0; dev_num < num_accel_devs; dev_num++) { accel_dev = adf_devmgr_get_dev_by_id(dev_num); if (!accel_dev) continue; if (!adf_dev_started(accel_dev)) continue; /* to next device */ for (pr_num = 0; pr_num < GET_MAX_PROCESSES(accel_dev); pr_num++) { snprintf(prv_data->name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES, "%s" ADF_INTERNAL_USERSPACE_SEC_SUFF "%d", usr_name, pr_num); pr_name_available = 1; /* Figure out if section exists in the config table */ section_ptr = adf_cfg_sec_find(accel_dev, prv_data->name); if (NULL == section_ptr) { /* This section name doesn't exist */ pr_name_available = 0; /* As process_num enumerates from 0, once we get * to one which doesn't exist no further ones * will exist. On to next device */ break; } /* Figure out if it's been taken already */ list_for_each(lpos, &processes_list) { pdata = list_entry(lpos, struct adf_processes_priv_data, list); if (!strncmp( pdata->name, prv_data->name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES)) { pr_name_available = 0; break; } } if (pr_name_available) break; } if (pr_name_available) break; } /* * If we have a valid name that is not on * the list take it and add to the list */ if (pr_name_available) { list_add(&prv_data->list, &processes_list); sx_xunlock(&processes_list_sema); prv_data->read_flag = 1; return 0; } /* If not then the process needs to wait */ sx_xunlock(&processes_list_sema); explicit_bzero(prv_data->name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES); prv_data->read_flag = 0; return 1; } static int adf_processes_read(struct cdev *dev, struct uio *uio, int ioflag) { struct adf_processes_priv_data *prv_data = NULL; int error = 0; error = devfs_get_cdevpriv((void **)&prv_data); if (error) { printf("QAT: invalid file descriptor\n"); return error; } /* * If there is a name that the process can use then give it * to the proocess. */ if (prv_data->read_flag) { error = uiomove(prv_data->name, strnlen(prv_data->name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES), uio); if (error) { printf("QAT: failed to copy data to user\n"); return error; } return 0; } return EIO; } static void adf_processes_release(void *data) { struct adf_processes_priv_data *prv_data = NULL; prv_data = (struct adf_processes_priv_data *)data; sx_xlock(&processes_list_sema); list_del(&prv_data->list); sx_xunlock(&processes_list_sema); free(prv_data, M_QAT); } int adf_processes_dev_register(void) { return adf_chr_drv_create(); } void adf_processes_dev_unregister(void) { adf_chr_drv_destroy(); } static void adf_state_callout_notify_ev(void *arg) { int notified = 0; struct adf_state_priv_data *priv = NULL; struct entry_proc_events *proc_events = NULL; SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) { if (!STAILQ_EMPTY(&proc_events->proc_events->state_head)) { notified = 1; priv = proc_events->proc_events; wakeup(priv); selwakeup(&priv->rsel); KNOTE_UNLOCKED(&priv->rsel.si_note, 0); } } if (notified) callout_schedule(&callout, ADF_STATE_CALLOUT_TIME); } static void adf_state_set(int dev, enum adf_event event) { struct adf_accel_dev *accel_dev = NULL; struct state_head *head = NULL; struct entry_proc_events *proc_events = NULL; struct entry_state *state = NULL; accel_dev = adf_devmgr_get_dev_by_id(dev); if (!accel_dev) return; mtx_lock(&mtx); SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) { state = NULL; head = &proc_events->proc_events->state_head; state = malloc(sizeof(struct entry_state), M_QAT, M_NOWAIT | M_ZERO); if (!state) continue; state->state.dev_state = event; state->state.dev_id = dev; STAILQ_INSERT_TAIL(head, state, entries_state); - if (event == ADF_EVENT_STOP) { - state = NULL; - state = malloc(sizeof(struct entry_state), - M_QAT, - M_NOWAIT | M_ZERO); - if (!state) - continue; - state->state.dev_state = ADF_EVENT_SHUTDOWN; - state->state.dev_id = dev; - STAILQ_INSERT_TAIL(head, state, entries_state); - } } mtx_unlock(&mtx); callout_schedule(&callout, ADF_STATE_CALLOUT_TIME); } static int adf_state_event_handler(struct adf_accel_dev *accel_dev, enum adf_event event) { int ret = 0; #if defined(QAT_UIO) && defined(QAT_DBG) if (event > ADF_EVENT_DBG_SHUTDOWN) return -EINVAL; #else if (event > ADF_EVENT_ERROR) return -EINVAL; #endif /* defined(QAT_UIO) && defined(QAT_DBG) */ switch (event) { case ADF_EVENT_INIT: return ret; case ADF_EVENT_SHUTDOWN: return ret; case ADF_EVENT_RESTARTING: break; case ADF_EVENT_RESTARTED: break; case ADF_EVENT_START: return ret; case ADF_EVENT_STOP: - break; + return ret; case ADF_EVENT_ERROR: break; #if defined(QAT_UIO) && defined(QAT_DBG) case ADF_EVENT_PROC_CRASH: break; case ADF_EVENT_MANUAL_DUMP: break; case ADF_EVENT_SLICE_HANG: break; case ADF_EVENT_DBG_SHUTDOWN: break; #endif /* defined(QAT_UIO) && defined(QAT_DBG) */ default: return -1; } adf_state_set(accel_dev->accel_id, event); return 0; } static int adf_state_kqfilter(struct cdev *dev, struct knote *kn) { struct adf_state_priv_data *priv; mtx_lock(&mtx); priv = dev->si_drv1; switch (kn->kn_filter) { case EVFILT_READ: kn->kn_fop = &adf_state_read_filterops; kn->kn_hook = priv; knlist_add(&priv->rsel.si_note, kn, 1); mtx_unlock(&mtx); return 0; default: mtx_unlock(&mtx); return -EINVAL; } } static int adf_state_kqread_event(struct knote *kn, long hint) { return 1; } static void adf_state_kqread_detach(struct knote *kn) { struct adf_state_priv_data *priv = NULL; mtx_lock(&mtx); if (!kn) { mtx_unlock(&mtx); return; } priv = kn->kn_hook; if (!priv) { mtx_unlock(&mtx); return; } knlist_remove(&priv->rsel.si_note, kn, 1); mtx_unlock(&mtx); } void adf_state_init(void) { adf_state_dev = make_dev(&adf_state_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "%s", ADF_DEV_STATE_NAME); SLIST_INIT(&proc_events_head); mtx_init(&mtx, mtx_name, NULL, MTX_DEF); mtx_init(&callout_mtx, mtx_callout_name, NULL, MTX_DEF); callout_init_mtx(&callout, &callout_mtx, 0); explicit_bzero(&adf_state_hndl, sizeof(adf_state_hndl)); adf_state_hndl.event_hld = adf_state_event_handler; adf_state_hndl.name = "adf_state_event_handler"; adf_service_register(&adf_state_hndl); callout_reset(&callout, ADF_STATE_CALLOUT_TIME, adf_state_callout_notify_ev, NULL); } void adf_state_destroy(void) { struct entry_proc_events *proc_events = NULL; adf_service_unregister(&adf_state_hndl); + destroy_dev(adf_state_dev); mtx_lock(&callout_mtx); callout_stop(&callout); mtx_unlock(&callout_mtx); mtx_destroy(&callout_mtx); mtx_lock(&mtx); while (!SLIST_EMPTY(&proc_events_head)) { proc_events = SLIST_FIRST(&proc_events_head); SLIST_REMOVE_HEAD(&proc_events_head, entries_proc_events); free(proc_events, M_QAT); } mtx_unlock(&mtx); mtx_destroy(&mtx); - destroy_dev(adf_state_dev); } static int adf_state_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct adf_state_priv_data *prv_data = NULL; struct entry_proc_events *entry_proc_events = NULL; int ret = 0; prv_data = malloc(sizeof(*prv_data), M_QAT, M_WAITOK | M_ZERO); entry_proc_events = malloc(sizeof(struct entry_proc_events), M_QAT, M_WAITOK | M_ZERO); mtx_lock(&mtx); prv_data->cdev = dev; prv_data->cdev->si_drv1 = prv_data; knlist_init_mtx(&prv_data->rsel.si_note, &mtx); STAILQ_INIT(&prv_data->state_head); entry_proc_events->proc_events = prv_data; SLIST_INSERT_HEAD(&proc_events_head, entry_proc_events, entries_proc_events); mtx_unlock(&mtx); ret = devfs_set_cdevpriv(prv_data, adf_state_release); if (ret) { SLIST_REMOVE(&proc_events_head, entry_proc_events, entry_proc_events, entries_proc_events); free(entry_proc_events, M_QAT); free(prv_data, M_QAT); } callout_schedule(&callout, ADF_STATE_CALLOUT_TIME); return ret; } static int adf_state_read(struct cdev *dev, struct uio *uio, int ioflag) { int ret = 0; struct adf_state_priv_data *prv_data = NULL; struct state_head *state_head = NULL; struct entry_state *entry_state = NULL; struct adf_state *state = NULL; struct entry_proc_events *proc_events = NULL; mtx_lock(&mtx); ret = devfs_get_cdevpriv((void **)&prv_data); if (ret) { mtx_unlock(&mtx); return 0; } state_head = &prv_data->state_head; if (STAILQ_EMPTY(state_head)) { mtx_unlock(&mtx); return 0; } entry_state = STAILQ_FIRST(state_head); state = &entry_state->state; ret = uiomove(state, sizeof(struct adf_state), uio); if (!ret && !STAILQ_EMPTY(state_head)) { STAILQ_REMOVE_HEAD(state_head, entries_state); free(entry_state, M_QAT); } SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) { if (!STAILQ_EMPTY(&proc_events->proc_events->state_head)) { prv_data = proc_events->proc_events; wakeup(prv_data); selwakeup(&prv_data->rsel); KNOTE_UNLOCKED(&prv_data->rsel.si_note, 0); } } mtx_unlock(&mtx); callout_schedule(&callout, ADF_STATE_CALLOUT_TIME); return ret; } static void adf_state_release(void *data) { struct adf_state_priv_data *prv_data = NULL; struct entry_state *entry_state = NULL; struct entry_proc_events *entry_proc_events = NULL; struct entry_proc_events *tmp = NULL; mtx_lock(&mtx); prv_data = (struct adf_state_priv_data *)data; knlist_delete(&prv_data->rsel.si_note, curthread, 1); knlist_destroy(&prv_data->rsel.si_note); seldrain(&prv_data->rsel); while (!STAILQ_EMPTY(&prv_data->state_head)) { entry_state = STAILQ_FIRST(&prv_data->state_head); STAILQ_REMOVE_HEAD(&prv_data->state_head, entries_state); free(entry_state, M_QAT); } SLIST_FOREACH_SAFE (entry_proc_events, &proc_events_head, entries_proc_events, tmp) { if (entry_proc_events->proc_events == prv_data) { SLIST_REMOVE(&proc_events_head, entry_proc_events, entry_proc_events, entries_proc_events); free(entry_proc_events, M_QAT); } } free(prv_data, M_QAT); mtx_unlock(&mtx); } diff --git a/sys/dev/qat/qat_common/adf_gen2_hw_data.c b/sys/dev/qat/qat_common/adf_gen2_hw_data.c index 10e86f8cd218..92fd2b9bc8e5 100644 --- a/sys/dev/qat/qat_common/adf_gen2_hw_data.c +++ b/sys/dev/qat/qat_common/adf_gen2_hw_data.c @@ -1,147 +1,147 @@ -/* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2021 Intel Corporation */ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "adf_gen2_hw_data.h" #include "icp_qat_hw.h" static u64 build_csr_ring_base_addr(bus_addr_t addr, u32 size) { return BUILD_RING_BASE_ADDR(addr, size); } static u32 read_csr_ring_head(struct resource *csr_base_addr, u32 bank, u32 ring) { return READ_CSR_RING_HEAD(csr_base_addr, bank, ring); } static void write_csr_ring_head(struct resource *csr_base_addr, u32 bank, u32 ring, u32 value) { WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value); } static u32 read_csr_ring_tail(struct resource *csr_base_addr, u32 bank, u32 ring) { return READ_CSR_RING_TAIL(csr_base_addr, bank, ring); } static void write_csr_ring_tail(struct resource *csr_base_addr, u32 bank, u32 ring, u32 value) { WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); } static u32 read_csr_e_stat(struct resource *csr_base_addr, u32 bank) { return READ_CSR_E_STAT(csr_base_addr, bank); } static void write_csr_ring_config(struct resource *csr_base_addr, u32 bank, u32 ring, u32 value) { WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); } -static dma_addr_t +static bus_addr_t read_csr_ring_base(struct resource *csr_base_addr, u32 bank, u32 ring) { return READ_CSR_RING_BASE(csr_base_addr, bank, ring); } static void write_csr_ring_base(struct resource *csr_base_addr, u32 bank, u32 ring, bus_addr_t addr) { WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); } static void write_csr_int_flag(struct resource *csr_base_addr, u32 bank, u32 value) { WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); } static void write_csr_int_srcsel(struct resource *csr_base_addr, u32 bank) { WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); } static void write_csr_int_col_en(struct resource *csr_base_addr, u32 bank, u32 value) { WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); } static void write_csr_int_col_ctl(struct resource *csr_base_addr, u32 bank, u32 value) { WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); } static void write_csr_int_flag_and_col(struct resource *csr_base_addr, u32 bank, u32 value) { WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); } static u32 read_csr_ring_srv_arb_en(struct resource *csr_base_addr, u32 bank) { return READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank); } static void write_csr_ring_srv_arb_en(struct resource *csr_base_addr, u32 bank, u32 value) { WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); } static u32 get_int_col_ctl_enable_mask(void) { return ADF_RING_CSR_INT_COL_CTL_ENABLE; } void adf_gen2_init_hw_csr_info(struct adf_hw_csr_info *csr_info) { struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops; csr_info->arb_enable_mask = 0xFF; csr_info->csr_addr_offset = ADF_RING_CSR_ADDR_OFFSET; csr_info->ring_bundle_size = ADF_RING_BUNDLE_SIZE; csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; csr_ops->read_csr_ring_head = read_csr_ring_head; csr_ops->write_csr_ring_head = write_csr_ring_head; csr_ops->read_csr_ring_tail = read_csr_ring_tail; csr_ops->write_csr_ring_tail = write_csr_ring_tail; csr_ops->read_csr_e_stat = read_csr_e_stat; csr_ops->write_csr_ring_config = write_csr_ring_config; csr_ops->read_csr_ring_base = read_csr_ring_base; csr_ops->write_csr_ring_base = write_csr_ring_base; csr_ops->write_csr_int_flag = write_csr_int_flag; csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; csr_ops->write_csr_int_col_en = write_csr_int_col_en; csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; csr_ops->read_csr_ring_srv_arb_en = read_csr_ring_srv_arb_en; csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; csr_ops->get_int_col_ctl_enable_mask = get_int_col_ctl_enable_mask; } diff --git a/sys/dev/qat/qat_common/adf_gen4_pfvf.c b/sys/dev/qat/qat_common/adf_gen4_pfvf.c index 03bb90938e85..122abb301d31 100644 --- a/sys/dev/qat/qat_common/adf_gen4_pfvf.c +++ b/sys/dev/qat/qat_common/adf_gen4_pfvf.c @@ -1,130 +1,129 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include -#include #include #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_gen4_pfvf.h" #include "adf_pfvf_utils.h" #include "adf_pfvf_vf_proto.h" #define ADF_4XXX_PF2VM_OFFSET(i) (0x40B010 + ((i)*0x20)) #define ADF_4XXX_VM2PF_OFFSET(i) (0x40B014 + ((i)*0x20)) /* VF2PF interrupt source registers */ #define ADF_4XXX_VM2PF_SOU 0x41A180 #define ADF_4XXX_VM2PF_MSK 0x41A1C0 #define ADF_GEN4_VF_MSK 0xFFFF #define ADF_PFVF_GEN4_MSGTYPE_SHIFT 2 #define ADF_PFVF_GEN4_MSGTYPE_MASK 0x3F #define ADF_PFVF_GEN4_MSGDATA_SHIFT 8 #define ADF_PFVF_GEN4_MSGDATA_MASK 0xFFFFFF #define ADF_4XXXIOV_PF2VM_OFFSET 0x100C #define ADF_4XXXIOV_VM2PF_OFFSET 0x1008 static const struct pfvf_csr_format csr_gen4_fmt = { { ADF_PFVF_GEN4_MSGTYPE_SHIFT, ADF_PFVF_GEN4_MSGTYPE_MASK }, { ADF_PFVF_GEN4_MSGDATA_SHIFT, ADF_PFVF_GEN4_MSGDATA_MASK }, }; static u32 adf_gen4_vf_get_pfvf_offset(u32 i) { return ADF_4XXXIOV_PF2VM_OFFSET; } static u32 adf_gen4_vf_get_vfpf_offset(u32 i) { return ADF_4XXXIOV_VM2PF_OFFSET; } static int adf_gen4_pfvf_send(struct adf_accel_dev *accel_dev, struct pfvf_message msg, u32 pfvf_offset, struct mutex *csr_lock) { struct resource *pmisc_addr = adf_get_pmisc_base(accel_dev); u32 csr_val; int ret; csr_val = adf_pfvf_csr_msg_of(accel_dev, msg, &csr_gen4_fmt); if (unlikely(!csr_val)) return -EINVAL; mutex_lock(csr_lock); ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val | ADF_PFVF_INT); /* Wait for confirmation from remote that it received the message */ ret = read_poll_timeout(ADF_CSR_RD, csr_val, !(csr_val & ADF_PFVF_INT), ADF_PFVF_MSG_ACK_DELAY_US, ADF_PFVF_MSG_ACK_MAX_DELAY_US, true, pmisc_addr, pfvf_offset); if (ret < 0) device_printf(GET_DEV(accel_dev), "ACK not received from remote\n"); mutex_unlock(csr_lock); return ret; } static int adf_gen4_vf2pf_send(struct adf_accel_dev *accel_dev, struct pfvf_message msg, u32 pfvf_offset, struct mutex *csr_lock) { return adf_gen4_pfvf_send(accel_dev, msg, pfvf_offset, csr_lock); } static struct pfvf_message adf_gen4_pfvf_recv(struct adf_accel_dev *accel_dev, u32 pfvf_offset, u8 compat_ver) { struct resource *pmisc_addr = adf_get_pmisc_base(accel_dev); struct pfvf_message msg = { 0 }; u32 csr_val; /* Read message from the CSR */ csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset); if (!(csr_val & ADF_PFVF_INT)) { device_printf(GET_DEV(accel_dev), "Spurious PFVF interrupt, msg 0x%.8x. Ignored\n", csr_val); return msg; } /* We can now acknowledge the message reception by clearing the * interrupt bit */ ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val & ~ADF_PFVF_INT); /* Return the pfvf_message format */ return adf_pfvf_message_of(accel_dev, csr_val, &csr_gen4_fmt); } static struct pfvf_message adf_gen4_pf2vf_recv(struct adf_accel_dev *accel_dev, u32 pfvf_offset, u8 compat_ver) { return adf_gen4_pfvf_recv(accel_dev, pfvf_offset, compat_ver); } void adf_gen4_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops) { pfvf_ops->enable_comms = adf_enable_vf2pf_comms; pfvf_ops->get_pf2vf_offset = adf_gen4_vf_get_pfvf_offset; pfvf_ops->get_vf2pf_offset = adf_gen4_vf_get_vfpf_offset; pfvf_ops->send_msg = adf_gen4_vf2pf_send; pfvf_ops->recv_msg = adf_gen4_pf2vf_recv; } diff --git a/sys/dev/qat/qat_common/adf_gen4vf_hw_csr_data.c b/sys/dev/qat/qat_common/adf_gen4vf_hw_csr_data.c index 5c06b413b528..bfa778ea301b 100644 --- a/sys/dev/qat/qat_common/adf_gen4vf_hw_csr_data.c +++ b/sys/dev/qat/qat_common/adf_gen4vf_hw_csr_data.c @@ -1,161 +1,161 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "adf_accel_devices.h" #include "adf_gen4vf_hw_csr_data.h" static u64 -build_csr_ring_base_addr(dma_addr_t addr, u32 size) +build_csr_ring_base_addr(bus_addr_t addr, u32 size) { return BUILD_RING_BASE_ADDR_GEN4(addr, size); } static u32 read_csr_ring_head(struct resource *csr_base_addr, u32 bank, u32 ring) { return READ_CSR_RING_HEAD_GEN4VF(csr_base_addr, bank, ring); } static void write_csr_ring_head(struct resource *csr_base_addr, u32 bank, u32 ring, u32 value) { WRITE_CSR_RING_HEAD_GEN4VF(csr_base_addr, bank, ring, value); } static u32 read_csr_ring_tail(struct resource *csr_base_addr, u32 bank, u32 ring) { return READ_CSR_RING_TAIL_GEN4VF(csr_base_addr, bank, ring); } static void write_csr_ring_tail(struct resource *csr_base_addr, u32 bank, u32 ring, u32 value) { WRITE_CSR_RING_TAIL_GEN4VF(csr_base_addr, bank, ring, value); } static u32 read_csr_e_stat(struct resource *csr_base_addr, u32 bank) { return READ_CSR_E_STAT_GEN4VF(csr_base_addr, bank); } static void write_csr_ring_config(struct resource *csr_base_addr, u32 bank, u32 ring, u32 value) { WRITE_CSR_RING_CONFIG_GEN4VF(csr_base_addr, bank, ring, value); } -static dma_addr_t +static bus_addr_t read_csr_ring_base(struct resource *csr_base_addr, u32 bank, u32 ring) { return READ_CSR_RING_BASE_GEN4VF(csr_base_addr, bank, ring); } static void write_csr_ring_base(struct resource *csr_base_addr, u32 bank, u32 ring, - dma_addr_t addr) + bus_addr_t addr) { WRITE_CSR_RING_BASE_GEN4VF(csr_base_addr, bank, ring, addr); } static void write_csr_int_flag(struct resource *csr_base_addr, u32 bank, u32 value) { WRITE_CSR_INT_FLAG_GEN4VF(csr_base_addr, bank, value); } static void write_csr_int_srcsel(struct resource *csr_base_addr, u32 bank) { WRITE_CSR_INT_SRCSEL_GEN4VF(csr_base_addr, bank); } static void write_csr_int_col_en(struct resource *csr_base_addr, u32 bank, u32 value) { WRITE_CSR_INT_COL_EN_GEN4VF(csr_base_addr, bank, value); } static void write_csr_int_col_ctl(struct resource *csr_base_addr, u32 bank, u32 value) { WRITE_CSR_INT_COL_CTL_GEN4VF(csr_base_addr, bank, value); } static void write_csr_int_flag_and_col(struct resource *csr_base_addr, u32 bank, u32 value) { WRITE_CSR_INT_FLAG_AND_COL_GEN4VF(csr_base_addr, bank, value); } static u32 read_csr_ring_srv_arb_en(struct resource *csr_base_addr, u32 bank) { return READ_CSR_RING_SRV_ARB_EN_GEN4VF(csr_base_addr, bank); } static void write_csr_ring_srv_arb_en(struct resource *csr_base_addr, u32 bank, u32 value) { WRITE_CSR_RING_SRV_ARB_EN_GEN4VF(csr_base_addr, bank, value); } static u32 get_src_sel_mask(void) { return ADF_BANK_INT_SRC_SEL_MASK_GEN4; } static u32 get_int_col_ctl_enable_mask(void) { return ADF_RING_CSR_INT_COL_CTL_ENABLE; } static u32 get_bank_irq_mask(u32 irq_mask) { return 0x1; } void gen4vf_init_hw_csr_info(struct adf_hw_csr_info *csr_info) { struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops; csr_info->csr_addr_offset = ADF_RING_CSR_ADDR_OFFSET_GEN4VF; csr_info->ring_bundle_size = ADF_RING_BUNDLE_SIZE_GEN4; csr_info->bank_int_flag_clear_mask = ADF_BANK_INT_FLAG_CLEAR_MASK_GEN4; csr_info->num_rings_per_int_srcsel = ADF_RINGS_PER_INT_SRCSEL_GEN4; csr_info->arb_enable_mask = 0x1; csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; csr_ops->read_csr_ring_head = read_csr_ring_head; csr_ops->write_csr_ring_head = write_csr_ring_head; csr_ops->read_csr_ring_tail = read_csr_ring_tail; csr_ops->write_csr_ring_tail = write_csr_ring_tail; csr_ops->read_csr_e_stat = read_csr_e_stat; csr_ops->write_csr_ring_config = write_csr_ring_config; csr_ops->read_csr_ring_base = read_csr_ring_base; csr_ops->write_csr_ring_base = write_csr_ring_base; csr_ops->write_csr_int_flag = write_csr_int_flag; csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; csr_ops->write_csr_int_col_en = write_csr_int_col_en; csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; csr_ops->read_csr_ring_srv_arb_en = read_csr_ring_srv_arb_en; csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; csr_ops->get_src_sel_mask = get_src_sel_mask; csr_ops->get_int_col_ctl_enable_mask = get_int_col_ctl_enable_mask; csr_ops->get_bank_irq_mask = get_bank_irq_mask; } diff --git a/sys/dev/qat/qat_common/adf_init.c b/sys/dev/qat/qat_common/adf_init.c index 9f239b8f63d3..6bd0d85aae69 100644 --- a/sys/dev/qat/qat_common/adf_init.c +++ b/sys/dev/qat/qat_common/adf_init.c @@ -1,755 +1,819 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2025 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_dbgfs.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_dev_err.h" #include "adf_uio.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include #include #include "adf_accel_devices.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "icp_qat_fw.h" +#if defined(QAT_UIO) +#include "adf_cfg_device.h" +#endif /* QAT_UIO*/ + /* Mask used to check the CompressAndVerify capability bit */ #define DC_CNV_EXTENDED_CAPABILITY (0x01) /* Mask used to check the CompressAndVerifyAndRecover capability bit */ #define DC_CNVNR_EXTENDED_CAPABILITY (0x100) static LIST_HEAD(service_table); static DEFINE_MUTEX(service_lock); +static int adf_dev_init_locked(struct adf_accel_dev *accel_dev); +static int adf_dev_start_locked(struct adf_accel_dev *accel_dev); +static int adf_dev_stop_locked(struct adf_accel_dev *accel_dev); +static void adf_dev_shutdown_locked(struct adf_accel_dev *accel_dev); + static void adf_service_add(struct service_hndl *service) { mutex_lock(&service_lock); list_add(&service->list, &service_table); mutex_unlock(&service_lock); } int adf_service_register(struct service_hndl *service) { memset(service->init_status, 0, sizeof(service->init_status)); memset(service->start_status, 0, sizeof(service->start_status)); adf_service_add(service); return 0; } static void adf_service_remove(struct service_hndl *service) { mutex_lock(&service_lock); list_del(&service->list); mutex_unlock(&service_lock); } int adf_service_unregister(struct service_hndl *service) { int i; for (i = 0; i < ARRAY_SIZE(service->init_status); i++) { if (service->init_status[i] || service->start_status[i]) { pr_err("QAT: Could not remove active service [%d]\n", i); return EFAULT; } } adf_service_remove(service); return 0; } static int adf_cfg_add_device_params(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char hw_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; char mmp_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; struct adf_hw_device_data *hw_data = NULL; unsigned long val; if (!accel_dev) return -EINVAL; hw_data = accel_dev->hw_device; if (adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC)) goto err; snprintf(key, sizeof(key), ADF_DEV_MAX_BANKS); val = GET_MAX_BANKS(accel_dev); if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) goto err; snprintf(key, sizeof(key), ADF_DEV_CAPABILITIES_MASK); val = hw_data->accel_capabilities_mask; if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)val, ADF_HEX)) goto err; snprintf(key, sizeof(key), ADF_DEV_PKG_ID); val = accel_dev->accel_id; if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) goto err; snprintf(key, sizeof(key), ADF_DEV_NODE_ID); val = dev_to_node(GET_DEV(accel_dev)); if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) goto err; snprintf(key, sizeof(key), ADF_DEV_MAX_RINGS_PER_BANK); val = hw_data->num_rings_per_bank; if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) goto err; snprintf(key, sizeof(key), ADF_HW_REV_ID_KEY); snprintf(hw_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%d", accel_dev->accel_pci_dev.revid); if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)hw_version, ADF_STR)) goto err; snprintf(key, sizeof(key), ADF_MMP_VER_KEY); snprintf(mmp_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%d.%d.%d", accel_dev->fw_versions.mmp_version_major, accel_dev->fw_versions.mmp_version_minor, accel_dev->fw_versions.mmp_version_patch); if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)mmp_version, ADF_STR)) goto err; return 0; err: device_printf(GET_DEV(accel_dev), "Failed to add internal values to accel_dev cfg\n"); return -EINVAL; } static int adf_cfg_add_fw_version(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char fw_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; snprintf(key, sizeof(key), ADF_UOF_VER_KEY); snprintf(fw_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%d.%d.%d", accel_dev->fw_versions.fw_version_major, accel_dev->fw_versions.fw_version_minor, accel_dev->fw_versions.fw_version_patch); if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)fw_version, ADF_STR)) return EFAULT; return 0; } static int adf_cfg_add_ext_params(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; struct adf_hw_device_data *hw_data = accel_dev->hw_device; unsigned long val; snprintf(key, sizeof(key), ADF_DC_EXTENDED_FEATURES); val = hw_data->extended_dc_capabilities; if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)val, ADF_HEX)) return -EINVAL; return 0; } void adf_error_notifier(uintptr_t arg) { struct adf_accel_dev *accel_dev = (struct adf_accel_dev *)arg; struct service_hndl *service; struct list_head *list_itr; list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_ERROR)) device_printf(GET_DEV(accel_dev), "Failed to send error event to %s.\n", service->name); } } /** * adf_set_ssm_wdtimer() - Initialize the slice hang watchdog timer. * * Return: 0 on success, error code otherwise. */ int adf_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_bar *misc_bar = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; struct resource *csr = misc_bar->virt_addr; u32 i; unsigned int mask; u32 clk_per_sec = hw_data->get_clock_speed(hw_data); u32 timer_val = ADF_WDT_TIMER_SYM_COMP_MS * (clk_per_sec / 1000); u32 timer_val_pke = ADF_GEN2_SSM_WDT_PKE_DEFAULT_VALUE; char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; /* Get Watch Dog Timer for CySym+Comp from the configuration */ if (!adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, ADF_DEV_SSM_WDT_BULK, (char *)timer_str)) { if (!compat_strtouint((char *)timer_str, ADF_CFG_BASE_DEC, &timer_val)) /* Convert msec to CPP clocks */ timer_val = timer_val * (clk_per_sec / 1000); } /* Get Watch Dog Timer for CyAsym from the configuration */ if (!adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, ADF_DEV_SSM_WDT_PKE, (char *)timer_str)) { if (!compat_strtouint((char *)timer_str, ADF_CFG_BASE_DEC, &timer_val_pke)) /* Convert msec to CPP clocks */ timer_val_pke = timer_val_pke * (clk_per_sec / 1000); } for (i = 0, mask = hw_data->accel_mask; mask; i++, mask >>= 1) { if (!(mask & 1)) continue; /* Enable Watch Dog Timer for CySym + Comp */ ADF_CSR_WR(csr, ADF_SSMWDT(i), timer_val); /* Enable Watch Dog Timer for CyAsym */ ADF_CSR_WR(csr, ADF_SSMWDTPKE(i), timer_val_pke); } return 0; } /** * adf_dev_init() - Init data structures and services for the given accel device * @accel_dev: Pointer to acceleration device. * * Initialize the ring data structures and the admin comms and arbitration * services. * * Return: 0 on success, error code otherwise. */ int adf_dev_init(struct adf_accel_dev *accel_dev) +{ + int ret = 0; + + mutex_lock(&accel_dev->lock); + ret = adf_dev_init_locked(accel_dev); + mutex_unlock(&accel_dev->lock); + + return ret; +} + +static int +adf_dev_init_locked(struct adf_accel_dev *accel_dev) { struct service_hndl *service; struct list_head *list_itr; struct adf_hw_device_data *hw_data = accel_dev->hw_device; char value[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; int ret = 0; sysctl_ctx_init(&accel_dev->sysctl_ctx); set_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED, &accel_dev->status); if (!hw_data) { device_printf(GET_DEV(accel_dev), "Failed to init device - hw_data not set\n"); return EFAULT; } if (hw_data->reset_hw_units) hw_data->reset_hw_units(accel_dev); if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) && !accel_dev->is_vf) { device_printf(GET_DEV(accel_dev), "Device not configured\n"); return EFAULT; } if (adf_init_etr_data(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed initialize etr\n"); return EFAULT; } if (hw_data->init_device && hw_data->init_device(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to initialize device\n"); return EFAULT; } if (hw_data->init_accel_units && hw_data->init_accel_units(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed initialize accel_units\n"); return EFAULT; } if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed initialize admin comms\n"); return EFAULT; } if (hw_data->init_arb && hw_data->init_arb(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed initialize hw arbiter\n"); return EFAULT; } if (hw_data->set_asym_rings_mask) hw_data->set_asym_rings_mask(accel_dev); hw_data->enable_ints(accel_dev); if (adf_ae_init(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to initialise Acceleration Engine\n"); return EFAULT; } set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status); if (adf_ae_fw_load(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to load acceleration FW\n"); return EFAULT; } set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status); if (hw_data->alloc_irq(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to allocate interrupts\n"); return EFAULT; } set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); if (hw_data->init_ras && hw_data->init_ras(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to init RAS\n"); return EFAULT; } hw_data->enable_ints(accel_dev); hw_data->enable_error_correction(accel_dev); ret = hw_data->csr_info.pfvf_ops.enable_comms(accel_dev); if (ret) return ret; if (adf_cfg_add_device_params(accel_dev)) return EFAULT; if (hw_data->add_pke_stats && hw_data->add_pke_stats(accel_dev)) return EFAULT; if (hw_data->add_misc_error && hw_data->add_misc_error(accel_dev)) return EFAULT; /* * Subservice initialisation is divided into two stages: init and start. * This is to facilitate any ordering dependencies between services * prior to starting any of the accelerators. */ list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { device_printf(GET_DEV(accel_dev), "Failed to initialise service %s\n", service->name); return EFAULT; } set_bit(accel_dev->accel_id, service->init_status); } /* Read autoreset on error parameter */ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, ADF_AUTO_RESET_ON_ERROR, value); if (!ret) { if (compat_strtouint(value, 10, &accel_dev->autoreset_on_error)) { device_printf( GET_DEV(accel_dev), "Failed converting %s to a decimal value\n", ADF_AUTO_RESET_ON_ERROR); return EFAULT; } } return 0; } /** * adf_dev_start() - Start acceleration service for the given accel device * @accel_dev: Pointer to acceleration device. * * Function notifies all the registered services that the acceleration device * is ready to be used. * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. */ int adf_dev_start(struct adf_accel_dev *accel_dev) +{ + int ret = 0; + + mutex_lock(&accel_dev->lock); + ret = adf_dev_start_locked(accel_dev); + mutex_unlock(&accel_dev->lock); + + return ret; +} + +static int +adf_dev_start_locked(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; struct list_head *list_itr; set_bit(ADF_STATUS_STARTING, &accel_dev->status); if (adf_devmgr_verify_id(&accel_dev->accel_id)) { device_printf(GET_DEV(accel_dev), "QAT: Device %d not found\n", accel_dev->accel_id); return ENODEV; } if (adf_ae_start(accel_dev)) { device_printf(GET_DEV(accel_dev), "AE Start Failed\n"); return EFAULT; } set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); if (hw_data->send_admin_init(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to send init message\n"); return EFAULT; } if (adf_cfg_add_fw_version(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to update configuration FW version\n"); return EFAULT; } if (hw_data->measure_clock) hw_data->measure_clock(accel_dev); /* * Set ssm watch dog timer for slice hang detection * Note! Not supported on devices older than C62x */ if (hw_data->set_ssm_wdtimer && hw_data->set_ssm_wdtimer(accel_dev)) { device_printf(GET_DEV(accel_dev), "QAT: Failed to set ssm watch dog timer\n"); return EFAULT; } if (hw_data->int_timer_init && hw_data->int_timer_init(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to init heartbeat interrupt timer\n"); return -EFAULT; } list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_START)) { device_printf(GET_DEV(accel_dev), "Failed to start service %s\n", service->name); return EFAULT; } set_bit(accel_dev->accel_id, service->start_status); } if (accel_dev->is_vf || !accel_dev->u1.pf.vf_info) { /*Register UIO devices */ if (adf_uio_register(accel_dev)) { adf_uio_remove(accel_dev); device_printf(GET_DEV(accel_dev), "Failed to register UIO devices\n"); set_bit(ADF_STATUS_STARTING, &accel_dev->status); clear_bit(ADF_STATUS_STARTED, &accel_dev->status); return ENODEV; } } if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status) && adf_cfg_add_ext_params(accel_dev)) return EFAULT; clear_bit(ADF_STATUS_STARTING, &accel_dev->status); set_bit(ADF_STATUS_STARTED, &accel_dev->status); adf_dbgfs_add(accel_dev); return 0; } /** * adf_dev_stop() - Stop acceleration service for the given accel device * @accel_dev: Pointer to acceleration device. * * Function notifies all the registered services that the acceleration device * is shuting down. * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. */ int adf_dev_stop(struct adf_accel_dev *accel_dev) +{ + int ret = 0; + + mutex_lock(&accel_dev->lock); + ret = adf_dev_stop_locked(accel_dev); + mutex_unlock(&accel_dev->lock); + + return ret; +} + +static int +adf_dev_stop_locked(struct adf_accel_dev *accel_dev) { struct service_hndl *service; struct list_head *list_itr; if (adf_devmgr_verify_id(&accel_dev->accel_id)) { device_printf(GET_DEV(accel_dev), "QAT: Device %d not found\n", accel_dev->accel_id); return ENODEV; } + + if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) + return 0; + if (!adf_dev_started(accel_dev) && !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) { return 0; } if (adf_dev_stop_notify_sync(accel_dev)) { device_printf( GET_DEV(accel_dev), "Waiting for device un-busy failed. Retries limit reached\n"); return EBUSY; } adf_dbgfs_rm(accel_dev); clear_bit(ADF_STATUS_STARTING, &accel_dev->status); clear_bit(ADF_STATUS_STARTED, &accel_dev->status); if (accel_dev->hw_device->int_timer_exit) accel_dev->hw_device->int_timer_exit(accel_dev); list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (!test_bit(accel_dev->accel_id, service->start_status)) continue; clear_bit(accel_dev->accel_id, service->start_status); } if (accel_dev->is_vf || !accel_dev->u1.pf.vf_info) { /* Remove UIO Devices */ adf_uio_remove(accel_dev); } if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) { if (adf_ae_stop(accel_dev)) device_printf(GET_DEV(accel_dev), "failed to stop AE\n"); else clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); } return 0; } /** * adf_dev_shutdown() - shutdown acceleration services and data strucutures * @accel_dev: Pointer to acceleration device * * Cleanup the ring data structures and the admin comms and arbitration * services. */ void adf_dev_shutdown(struct adf_accel_dev *accel_dev) +{ + mutex_lock(&accel_dev->lock); + adf_dev_shutdown_locked(accel_dev); + mutex_unlock(&accel_dev->lock); +} + +static void +adf_dev_shutdown_locked(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; struct list_head *list_itr; + if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) + return; + if (test_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED, &accel_dev->status)) { sysctl_ctx_free(&accel_dev->sysctl_ctx); clear_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED, &accel_dev->status); } if (!hw_data) { device_printf( GET_DEV(accel_dev), "QAT: Failed to shutdown device - hw_data not set\n"); return; } if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) { adf_ae_fw_release(accel_dev); clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status); } if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) { if (adf_ae_shutdown(accel_dev)) device_printf(GET_DEV(accel_dev), "Failed to shutdown Accel Engine\n"); else clear_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status); } list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (!test_bit(accel_dev->accel_id, service->init_status)) continue; if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) device_printf(GET_DEV(accel_dev), "Failed to shutdown service %s\n", service->name); else clear_bit(accel_dev->accel_id, service->init_status); } hw_data->disable_iov(accel_dev); if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) { hw_data->free_irq(accel_dev); clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); } /* Delete configuration only if not restarting */ - if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) + if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) { adf_cfg_del_all(accel_dev); +#ifdef QAT_UIO + adf_cfg_device_clear_all(accel_dev); +#endif + } if (hw_data->remove_pke_stats) hw_data->remove_pke_stats(accel_dev); if (hw_data->remove_misc_error) hw_data->remove_misc_error(accel_dev); if (hw_data->exit_ras) hw_data->exit_ras(accel_dev); if (hw_data->exit_arb) hw_data->exit_arb(accel_dev); if (hw_data->exit_admin_comms) hw_data->exit_admin_comms(accel_dev); if (hw_data->exit_accel_units) hw_data->exit_accel_units(accel_dev); adf_cleanup_etr_data(accel_dev); if (hw_data->restore_device) hw_data->restore_device(accel_dev); } /** * adf_dev_reset() - Reset acceleration service for the given accel device * @accel_dev: Pointer to acceleration device. * @mode: Specifies reset mode - synchronous or asynchronous. * Function notifies all the registered services that the acceleration device * is resetting. * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. */ int adf_dev_reset(struct adf_accel_dev *accel_dev, enum adf_dev_reset_mode mode) { return adf_dev_aer_schedule_reset(accel_dev, mode); } int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) { struct service_hndl *service; struct list_head *list_itr; list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) device_printf(GET_DEV(accel_dev), "Failed to restart service %s.\n", service->name); } return 0; } int adf_dev_restarting_notify_sync(struct adf_accel_dev *accel_dev) { int times; adf_dev_restarting_notify(accel_dev); for (times = 0; times < ADF_STOP_RETRY; times++) { if (!adf_dev_in_use(accel_dev)) break; dev_dbg(GET_DEV(accel_dev), "retry times=%d\n", times); pause_ms("adfstop", 100); } if (adf_dev_in_use(accel_dev)) { clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); device_printf(GET_DEV(accel_dev), "Device still in use during reset sequence.\n"); return EBUSY; } return 0; } int adf_dev_stop_notify_sync(struct adf_accel_dev *accel_dev) { int times; struct service_hndl *service; struct list_head *list_itr; list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_STOP)) device_printf(GET_DEV(accel_dev), "Failed to restart service %s.\n", service->name); } for (times = 0; times < ADF_STOP_RETRY; times++) { if (!adf_dev_in_use(accel_dev)) break; dev_dbg(GET_DEV(accel_dev), "retry times=%d\n", times); pause_ms("adfstop", 100); } if (adf_dev_in_use(accel_dev)) { clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); device_printf(GET_DEV(accel_dev), "Device still in use during stop sequence.\n"); return EBUSY; } return 0; } int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) { struct service_hndl *service; struct list_head *list_itr; list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) device_printf(GET_DEV(accel_dev), "Failed to restart service %s.\n", service->name); } return 0; } diff --git a/sys/dev/qat/qat_common/adf_pfvf_vf_msg.c b/sys/dev/qat/qat_common/adf_pfvf_vf_msg.c index dc0cb0110ec3..8c0221d344ff 100644 --- a/sys/dev/qat/qat_common/adf_pfvf_vf_msg.c +++ b/sys/dev/qat/qat_common/adf_pfvf_vf_msg.c @@ -1,184 +1,202 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_pfvf_msg.h" #include "adf_pfvf_vf_msg.h" #include "adf_pfvf_vf_proto.h" /** * adf_vf2pf_notify_init() - send init msg to PF * @accel_dev: Pointer to acceleration VF device. * * Function sends an init message from the VF to a PF * * Return: 0 on success, error code otherwise. */ int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev) { struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_INIT }; if (adf_send_vf2pf_msg(accel_dev, msg)) { device_printf(GET_DEV(accel_dev), "Failed to send Init event to PF\n"); return -EFAULT; } set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); return 0; } /** * adf_vf2pf_notify_shutdown() - send shutdown msg to PF * @accel_dev: Pointer to acceleration VF device. * * Function sends a shutdown message from the VF to a PF * * Return: void */ void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev) { struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_SHUTDOWN }; if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status)) if (adf_send_vf2pf_msg(accel_dev, msg)) device_printf(GET_DEV(accel_dev), "Failed to send Shutdown event to PF\n"); } int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev) { u8 pf_version; int compat; int ret; struct pfvf_message resp; struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ, .data = ADF_PFVF_COMPAT_THIS_VERSION, }; BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255); ret = adf_send_vf2pf_req(accel_dev, msg, &resp); if (ret) { device_printf( GET_DEV(accel_dev), "Failed to send Compatibility Version Request.\n"); return ret; } pf_version = FIELD_GET(ADF_PF2VF_VERSION_RESP_VERS_MASK, resp.data); compat = FIELD_GET(ADF_PF2VF_VERSION_RESP_RESULT_MASK, resp.data); /* Response from PF received, check compatibility */ switch (compat) { case ADF_PF2VF_VF_COMPATIBLE: break; case ADF_PF2VF_VF_COMPAT_UNKNOWN: /* VF is newer than PF - compatible for now */ break; case ADF_PF2VF_VF_INCOMPATIBLE: device_printf( GET_DEV(accel_dev), "PF (vers %d) and VF (vers %d) are not compatible\n", pf_version, ADF_PFVF_COMPAT_THIS_VERSION); return -EINVAL; default: device_printf( GET_DEV(accel_dev), "Invalid response from PF; assume not compatible\n"); return -EINVAL; } accel_dev->u1.vf.pf_compat_ver = pf_version; return 0; } +void +adf_vf2pf_restarting_complete(struct adf_accel_dev *accel_dev) +{ + struct pfvf_message msg = { .type = + ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE }; + + if (accel_dev->u1.vf.pf_compat_ver < ADF_PFVF_COMPAT_FALLBACK) + return; + + if (adf_send_vf2pf_msg(accel_dev, msg)) { + device_printf( + GET_DEV(accel_dev), + "Failed to send Restarting complete event to PF\n"); + } +} + int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct capabilities_v3 cap_msg = { 0 }; unsigned int len = sizeof(cap_msg); if (accel_dev->u1.vf.pf_compat_ver < ADF_PFVF_COMPAT_CAPABILITIES) /* The PF is too old to support the extended capabilities */ return 0; if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY, (u8 *)&cap_msg, &len)) { device_printf(GET_DEV(accel_dev), "QAT: Failed to get block message response\n"); return -EFAULT; } switch (cap_msg.hdr.version) { default: /* Newer version received, handle only the know parts */ fallthrough; case ADF_PFVF_CAPABILITIES_V3_VERSION: if (likely(len >= sizeof(struct capabilities_v3))) hw_data->clock_frequency = cap_msg.frequency; else device_printf(GET_DEV(accel_dev), "Could not get frequency"); fallthrough; case ADF_PFVF_CAPABILITIES_V2_VERSION: if (likely(len >= sizeof(struct capabilities_v2))) { hw_data->accel_capabilities_mask = cap_msg.capabilities; } else { device_printf(GET_DEV(accel_dev), "Could not get capabilities"); } fallthrough; case ADF_PFVF_CAPABILITIES_V1_VERSION: if (likely(len >= sizeof(struct capabilities_v1))) { hw_data->extended_dc_capabilities = cap_msg.ext_dc_caps; } else { device_printf( GET_DEV(accel_dev), "Capabilities message truncated to %d bytes\n", len); return -EFAULT; } } return 0; } int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev) { struct ring_to_svc_map_v1 rts_map_msg = { 0 }; unsigned int len = sizeof(rts_map_msg); if (accel_dev->u1.vf.pf_compat_ver < ADF_PFVF_COMPAT_RING_TO_SVC_MAP) /* Use already set default mappings */ return 0; if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP, (u8 *)&rts_map_msg, &len)) { device_printf(GET_DEV(accel_dev), "QAT: Failed to get block message response\n"); return -EFAULT; } if (unlikely(len < sizeof(struct ring_to_svc_map_v1))) { device_printf(GET_DEV(accel_dev), "RING_TO_SVC message truncated to %d bytes\n", len); return -EFAULT; } /* Only v1 at present */ accel_dev->hw_device->ring_to_svc_map = rts_map_msg.map; + accel_dev->hw_device->get_ring_to_svc_done = true; + return 0; } diff --git a/sys/dev/qat/qat_common/adf_pfvf_vf_proto.c b/sys/dev/qat/qat_common/adf_pfvf_vf_proto.c index a09ddb819831..2a338b96a5f3 100644 --- a/sys/dev/qat/qat_common/adf_pfvf_vf_proto.c +++ b/sys/dev/qat/qat_common/adf_pfvf_vf_proto.c @@ -1,409 +1,418 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_pfvf_msg.h" #include "adf_pfvf_utils.h" #include "adf_pfvf_vf_msg.h" #include "adf_pfvf_vf_proto.h" #define __bf_shf(x) (__builtin_ffsll(x) - 1) #define FIELD_MAX(_mask) ({ (typeof(_mask))((_mask) >> __bf_shf(_mask)); }) #define FIELD_PREP(_mask, _val) \ ({ ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); }) #define FIELD_GET(_mask, _reg) \ ({ (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); }) /** * adf_send_vf2pf_msg() - send VF to PF message * @accel_dev: Pointer to acceleration device * @msg: Message to send * * This function allows the VF to send a message to the PF. * * Return: 0 on success, error code otherwise. */ int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg) { struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev); u32 pfvf_offset = pfvf_ops->get_pf2vf_offset(0); int ret = pfvf_ops->send_msg(accel_dev, msg, pfvf_offset, &accel_dev->u1.vf.vf2pf_lock); return ret; } /** * adf_recv_pf2vf_msg() - receive a PF to VF message * @accel_dev: Pointer to acceleration device * * This function allows the VF to receive a message from the PF. * * Return: a valid message on success, zero otherwise. */ static struct pfvf_message adf_recv_pf2vf_msg(struct adf_accel_dev *accel_dev) { struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev); u32 pfvf_offset = pfvf_ops->get_vf2pf_offset(0); // 1008 return pfvf_ops->recv_msg(accel_dev, pfvf_offset, accel_dev->u1.vf.pf_compat_ver); } /** * adf_send_vf2pf_req() - send VF2PF request message * @accel_dev: Pointer to acceleration device. * @msg: Request message to send * @resp: Returned PF response * * This function sends a message that requires a response from the VF to the PF * and waits for a reply. * * Return: 0 on success, error code otherwise. */ int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, struct pfvf_message msg, struct pfvf_message *resp) { unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT); unsigned int retries = ADF_PFVF_MSG_RESP_RETRIES; int ret; reinit_completion(&accel_dev->u1.vf.msg_received); /* Send request from VF to PF */ do { ret = adf_send_vf2pf_msg(accel_dev, msg); if (ret) { device_printf(GET_DEV(accel_dev), "Failed to send request msg to PF\n"); return ret; } /* Wait for response, if it times out retry */ - ret = - wait_for_completion_timeout(&accel_dev->u1.vf.msg_received, - timeout); + if (!cold) { + ret = wait_for_completion_timeout( + &accel_dev->u1.vf.msg_received, timeout); + } else { + /* In cold start timers may not be initialized yet */ + DELAY(ADF_PFVF_MSG_RESP_TIMEOUT * 1000); + ret = try_wait_for_completion( + &accel_dev->u1.vf.msg_received); + } if (ret) { if (likely(resp)) *resp = accel_dev->u1.vf.response; /* Once copied, set to an invalid value */ accel_dev->u1.vf.response.type = 0; return 0; } device_printf(GET_DEV(accel_dev), "PFVF response message timeout\n"); } while (--retries); return -EIO; } static int adf_vf2pf_blkmsg_data_req(struct adf_accel_dev *accel_dev, bool crc, u8 *type, u8 *data) { struct pfvf_message req = { 0 }; struct pfvf_message resp = { 0 }; u8 blk_type; u8 blk_byte; u8 msg_type; u8 max_data; int err; /* Convert the block type to {small, medium, large} size category */ if (*type <= ADF_VF2PF_SMALL_BLOCK_TYPE_MAX) { msg_type = ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ; blk_type = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK, *type); blk_byte = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, *data); max_data = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX; } else if (*type <= ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX) { msg_type = ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ; blk_type = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK, *type - ADF_VF2PF_SMALL_BLOCK_TYPE_MAX); blk_byte = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK, *data); max_data = ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX; } else if (*type <= ADF_VF2PF_LARGE_BLOCK_TYPE_MAX) { msg_type = ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ; blk_type = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK, *type - ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX); blk_byte = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK, *data); max_data = ADF_VF2PF_LARGE_BLOCK_BYTE_MAX; } else { device_printf(GET_DEV(accel_dev), "Invalid message type %u\n", *type); return -EINVAL; } /* Sanity check */ if (*data > max_data) { device_printf(GET_DEV(accel_dev), "Invalid byte %s %u for message type %u\n", crc ? "count" : "index", *data, *type); return -EINVAL; } /* Build the block message */ req.type = msg_type; req.data = blk_type | blk_byte | FIELD_PREP(ADF_VF2PF_BLOCK_CRC_REQ_MASK, crc); err = adf_send_vf2pf_req(accel_dev, req, &resp); if (err) return err; *type = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK, resp.data); *data = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_DATA_MASK, resp.data); return 0; } static int adf_vf2pf_blkmsg_get_byte(struct adf_accel_dev *accel_dev, u8 type, u8 index, u8 *data) { int ret; ret = adf_vf2pf_blkmsg_data_req(accel_dev, false, &type, &index); if (ret < 0) return ret; if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_DATA)) { device_printf(GET_DEV(accel_dev), "Unexpected BLKMSG response type %u, byte 0x%x\n", type, index); return -EFAULT; } *data = index; return 0; } static int adf_vf2pf_blkmsg_get_crc(struct adf_accel_dev *accel_dev, u8 type, u8 bytes, u8 *crc) { int ret; /* The count of bytes refers to a length, however shift it to a 0-based * count to avoid overflows. Thus, a request for 0 bytes is technically * valid. */ --bytes; ret = adf_vf2pf_blkmsg_data_req(accel_dev, true, &type, &bytes); if (ret < 0) return ret; if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_CRC)) { device_printf( GET_DEV(accel_dev), "Unexpected CRC BLKMSG response type %u, crc 0x%x\n", type, bytes); return -EFAULT; } *crc = bytes; return 0; } /** * adf_send_vf2pf_blkmsg_req() - retrieve block message * @accel_dev: Pointer to acceleration VF device. * @type: The block message type, see adf_pfvf_msg.h for allowed values * @buffer: input buffer where to place the received data * @buffer_len: buffer length as input, the amount of written bytes on output * * Request a message of type 'type' over the block message transport. * This function will send the required amount block message requests and * return the overall content back to the caller through the provided buffer. * The buffer should be large enough to contain the requested message type, * otherwise the response will be truncated. * * Return: 0 on success, error code otherwise. */ int adf_send_vf2pf_blkmsg_req(struct adf_accel_dev *accel_dev, u8 type, u8 *buffer, unsigned int *buffer_len) { unsigned int index; unsigned int msg_len; int ret; u8 remote_crc; u8 local_crc; if (unlikely(type > ADF_VF2PF_LARGE_BLOCK_TYPE_MAX)) { device_printf(GET_DEV(accel_dev), "Invalid block message type %d\n", type); return -EINVAL; } if (unlikely(*buffer_len < ADF_PFVF_BLKMSG_HEADER_SIZE)) { device_printf(GET_DEV(accel_dev), "Buffer size too small for a block message\n"); return -EINVAL; } ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type, ADF_PFVF_BLKMSG_VER_BYTE, &buffer[ADF_PFVF_BLKMSG_VER_BYTE]); if (unlikely(ret)) return ret; if (unlikely(!buffer[ADF_PFVF_BLKMSG_VER_BYTE])) { device_printf(GET_DEV(accel_dev), "Invalid version 0 received for block request %u", type); return -EFAULT; } ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type, ADF_PFVF_BLKMSG_LEN_BYTE, &buffer[ADF_PFVF_BLKMSG_LEN_BYTE]); if (unlikely(ret)) return ret; if (unlikely(!buffer[ADF_PFVF_BLKMSG_LEN_BYTE])) { device_printf(GET_DEV(accel_dev), "Invalid size 0 received for block request %u", type); return -EFAULT; } /* We need to pick the minimum since there is no way to request a * specific version. As a consequence any scenario is possible: * - PF has a newer (longer) version which doesn't fit in the buffer * - VF expects a newer (longer) version, so we must not ask for * bytes in excess * - PF and VF share the same version, no problem */ msg_len = ADF_PFVF_BLKMSG_HEADER_SIZE + buffer[ADF_PFVF_BLKMSG_LEN_BYTE]; msg_len = min(*buffer_len, msg_len); /* Get the payload */ for (index = ADF_PFVF_BLKMSG_HEADER_SIZE; index < msg_len; index++) { ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type, index, &buffer[index]); if (unlikely(ret)) return ret; } ret = adf_vf2pf_blkmsg_get_crc(accel_dev, type, msg_len, &remote_crc); if (unlikely(ret)) return ret; local_crc = adf_pfvf_calc_blkmsg_crc(buffer, msg_len); if (unlikely(local_crc != remote_crc)) { device_printf( GET_DEV(accel_dev), "CRC error on msg type %d. Local %02X, remote %02X\n", type, local_crc, remote_crc); return -EIO; } *buffer_len = msg_len; return 0; } static bool adf_handle_pf2vf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg) { switch (msg.type) { case ADF_PF2VF_MSGTYPE_RESTARTING: adf_pf2vf_handle_pf_restarting(accel_dev); return false; case ADF_PF2VF_MSGTYPE_RP_RESET_RESP: adf_pf2vf_handle_pf_rp_reset(accel_dev, msg); return true; + case ADF_PF2VF_MSGTYPE_FATAL_ERROR: + adf_pf2vf_handle_pf_error(accel_dev); + return true; case ADF_PF2VF_MSGTYPE_VERSION_RESP: case ADF_PF2VF_MSGTYPE_BLKMSG_RESP: accel_dev->u1.vf.response = msg; complete(&accel_dev->u1.vf.msg_received); return true; default: device_printf( GET_DEV(accel_dev), "Unknown message from PF (type 0x%.4x, data: 0x%.4x)\n", msg.type, msg.data); } return false; } bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev) { struct pfvf_message msg; msg = adf_recv_pf2vf_msg(accel_dev); if (msg.type) /* Invalid or no message */ return adf_handle_pf2vf_msg(accel_dev, msg); /* No replies for PF->VF messages at present */ return true; } /** * adf_enable_vf2pf_comms() - Function enables communication from vf to pf * * @accel_dev: Pointer to acceleration device virtual function. * * Return: 0 on success, error code otherwise. */ int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; int ret; /* init workqueue for VF */ ret = adf_init_vf_wq(); if (ret) return ret; hw_data->enable_pf2vf_interrupt(accel_dev); ret = adf_vf2pf_request_version(accel_dev); if (ret) return ret; ret = adf_vf2pf_get_capabilities(accel_dev); if (ret) return ret; ret = adf_vf2pf_get_ring_to_svc(accel_dev); return ret; } diff --git a/sys/dev/qat/qat_common/adf_vf_isr.c b/sys/dev/qat/qat_common/adf_vf_isr.c index 10eaf9d8da1b..e61f8a57d9b8 100644 --- a/sys/dev/qat/qat_common/adf_vf_isr.c +++ b/sys/dev/qat/qat_common/adf_vf_isr.c @@ -1,390 +1,403 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "qat_freebsd.h" #include #include #include #include #include #include #include #include #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_cfg.h" #include "adf_cfg_strings.h" #include "adf_cfg_common.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include "adf_pfvf_utils.h" +#include "adf_pfvf_vf_msg.h" static TASKQUEUE_DEFINE_THREAD(qat_vf); static TASKQUEUE_DEFINE_THREAD(qat_bank_handler); static struct workqueue_struct *adf_vf_stop_wq; static DEFINE_MUTEX(vf_stop_wq_lock); struct adf_vf_stop_data { struct adf_accel_dev *accel_dev; struct work_struct work; }; static int adf_enable_msi(struct adf_accel_dev *accel_dev) { int stat; int count = 1; stat = pci_alloc_msi(accel_to_pci_dev(accel_dev), &count); if (stat) { device_printf(GET_DEV(accel_dev), "Failed to enable MSI interrupts\n"); return stat; } return stat; } static void adf_disable_msi(struct adf_accel_dev *accel_dev) { device_t pdev = accel_to_pci_dev(accel_dev); pci_release_msi(pdev); } static void adf_dev_stop_async(struct work_struct *work) { struct adf_vf_stop_data *stop_data = container_of(work, struct adf_vf_stop_data, work); struct adf_accel_dev *accel_dev = stop_data->accel_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; adf_dev_restarting_notify(accel_dev); adf_dev_stop(accel_dev); adf_dev_shutdown(accel_dev); /* Re-enable PF2VF interrupts */ hw_data->enable_pf2vf_interrupt(accel_dev); + adf_vf2pf_restarting_complete(accel_dev); kfree(stop_data); } int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev) { struct adf_vf_stop_data *stop_data; clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC); if (!stop_data) { device_printf(GET_DEV(accel_dev), "Couldn't schedule stop for vf_%d\n", accel_dev->accel_id); return -ENOMEM; } stop_data->accel_dev = accel_dev; INIT_WORK(&stop_data->work, adf_dev_stop_async); queue_work(adf_vf_stop_wq, &stop_data->work); return 0; } int adf_pf2vf_handle_pf_rp_reset(struct adf_accel_dev *accel_dev, struct pfvf_message msg) { accel_dev->u1.vf.rpreset_sts = msg.data; if (accel_dev->u1.vf.rpreset_sts == RPRESET_SUCCESS) device_printf( GET_DEV(accel_dev), "rpreset resp(success) from PF type:0x%x data:0x%x\n", msg.type, msg.data); else if (accel_dev->u1.vf.rpreset_sts == RPRESET_NOT_SUPPORTED) device_printf( GET_DEV(accel_dev), "rpreset resp(not supported) from PF type:0x%x data:0x%x\n", msg.type, msg.data); else if (accel_dev->u1.vf.rpreset_sts == RPRESET_INVAL_BANK) device_printf( GET_DEV(accel_dev), "rpreset resp(invalid bank) from PF type:0x%x data:0x%x\n", msg.type, msg.data); else device_printf( GET_DEV(accel_dev), "rpreset resp(timeout) from PF type:0x%x data:0x%x\nn", msg.type, msg.data); complete(&accel_dev->u1.vf.msg_received); return 0; } +int +adf_pf2vf_handle_pf_error(struct adf_accel_dev *accel_dev) +{ + device_printf(GET_DEV(accel_dev), "Fatal error received from PF\n"); + + if (adf_notify_fatal_error(accel_dev)) + device_printf(GET_DEV(accel_dev), "Couldn't notify fatal error\n"); + + return 0; +} + static void adf_pf2vf_bh_handler(void *data, int pending) { struct adf_accel_dev *accel_dev = data; struct adf_hw_device_data *hw_data = accel_dev->hw_device; if (adf_recv_and_handle_pf2vf_msg(accel_dev)) /* Re-enable PF2VF interrupts */ hw_data->enable_pf2vf_interrupt(accel_dev); return; } static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev) { TASK_INIT(&accel_dev->u1.vf.pf2vf_bh_tasklet, 0, adf_pf2vf_bh_handler, accel_dev); mutex_init(&accel_dev->u1.vf.vf2pf_lock); return 0; } static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev) { taskqueue_cancel(taskqueue_qat_vf, &accel_dev->u1.vf.pf2vf_bh_tasklet, NULL); taskqueue_drain(taskqueue_qat_vf, &accel_dev->u1.vf.pf2vf_bh_tasklet); mutex_destroy(&accel_dev->u1.vf.vf2pf_lock); } static void adf_bh_handler(void *data, int pending) { struct adf_etr_bank_data *bank = (void *)data; adf_response_handler((uintptr_t)bank); return; } static int adf_setup_bh(struct adf_accel_dev *accel_dev) { int i = 0; struct adf_etr_data *priv_data = accel_dev->transport; for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) { TASK_INIT(&priv_data->banks[i].resp_handler, 0, adf_bh_handler, &priv_data->banks[i]); } return 0; } static void adf_cleanup_bh(struct adf_accel_dev *accel_dev) { int i = 0; struct adf_etr_data *transport; if (!accel_dev || !accel_dev->transport) return; transport = accel_dev->transport; for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) { taskqueue_cancel(taskqueue_qat_bank_handler, &transport->banks[i].resp_handler, NULL); taskqueue_drain(taskqueue_qat_bank_handler, &transport->banks[i].resp_handler); } } static void adf_isr(void *privdata) { struct adf_accel_dev *accel_dev = privdata; struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_hw_csr_ops *csr_ops = &hw_data->csr_info.csr_ops; int int_active_bundles = 0; int i = 0; /* Check for PF2VF interrupt */ if (hw_data->interrupt_active_pf2vf(accel_dev)) { /* Disable PF to VF interrupt */ hw_data->disable_pf2vf_interrupt(accel_dev); /* Schedule tasklet to handle interrupt BH */ taskqueue_enqueue(taskqueue_qat_vf, &accel_dev->u1.vf.pf2vf_bh_tasklet); } if (hw_data->get_int_active_bundles) int_active_bundles = hw_data->get_int_active_bundles(accel_dev); for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) { if (int_active_bundles & BIT(i)) { struct adf_etr_data *etr_data = accel_dev->transport; struct adf_etr_bank_data *bank = &etr_data->banks[i]; /* Disable Flag and Coalesce Ring Interrupts */ csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number, 0); /* Schedule tasklet to handle interrupt BH */ taskqueue_enqueue(taskqueue_qat_bank_handler, &bank->resp_handler); } } } static int adf_request_msi_irq(struct adf_accel_dev *accel_dev) { device_t pdev = accel_to_pci_dev(accel_dev); int ret; int rid = 1; int cpu; accel_dev->u1.vf.irq = bus_alloc_resource_any(pdev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (accel_dev->u1.vf.irq == NULL) { device_printf(GET_DEV(accel_dev), "failed to allocate IRQ\n"); return ENXIO; } ret = bus_setup_intr(pdev, accel_dev->u1.vf.irq, INTR_TYPE_MISC | INTR_MPSAFE, NULL, adf_isr, accel_dev, &accel_dev->u1.vf.cookie); if (ret) { device_printf(GET_DEV(accel_dev), "failed to enable irq\n"); goto errout; } cpu = accel_dev->accel_id % num_online_cpus(); ret = bus_bind_intr(pdev, accel_dev->u1.vf.irq, cpu); if (ret) { device_printf(GET_DEV(accel_dev), "failed to bind IRQ handler to cpu core\n"); goto errout; } accel_dev->u1.vf.irq_enabled = true; return ret; errout: bus_free_resource(pdev, SYS_RES_IRQ, accel_dev->u1.vf.irq); return ret; } /** * adf_vf_isr_resource_free() - Free IRQ for acceleration device * @accel_dev: Pointer to acceleration device. * * Function frees interrupts for acceleration device virtual function. */ void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev) { device_t pdev = accel_to_pci_dev(accel_dev); if (accel_dev->u1.vf.irq_enabled) { bus_teardown_intr(pdev, accel_dev->u1.vf.irq, accel_dev->u1.vf.cookie); bus_free_resource(pdev, SYS_RES_IRQ, accel_dev->u1.vf.irq); } adf_cleanup_bh(accel_dev); adf_cleanup_pf2vf_bh(accel_dev); adf_disable_msi(accel_dev); } /** * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device * @accel_dev: Pointer to acceleration device. * * Function allocates interrupts for acceleration device virtual function. * * Return: 0 on success, error code otherwise. */ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev) { if (adf_enable_msi(accel_dev)) goto err_out; if (adf_setup_pf2vf_bh(accel_dev)) goto err_disable_msi; if (adf_setup_bh(accel_dev)) goto err_out; if (adf_request_msi_irq(accel_dev)) goto err_disable_msi; return 0; err_disable_msi: adf_disable_msi(accel_dev); err_out: return -EFAULT; } /** * adf_flush_vf_wq() - Flush workqueue for VF * @accel_dev: Pointer to acceleration device. * * Function disables the PF/VF interrupts on the VF so that no new messages * are received and flushes the workqueue 'adf_vf_stop_wq'. * * Return: void. */ void adf_flush_vf_wq(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; hw_data->disable_pf2vf_interrupt(accel_dev); if (adf_vf_stop_wq) flush_workqueue(adf_vf_stop_wq); } /** * adf_init_vf_wq() - Init workqueue for VF * * Function init workqueue 'adf_vf_stop_wq' for VF. * * Return: 0 on success, error code otherwise. */ int adf_init_vf_wq(void) { int ret = 0; mutex_lock(&vf_stop_wq_lock); if (!adf_vf_stop_wq) adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0); if (!adf_vf_stop_wq) ret = ENOMEM; mutex_unlock(&vf_stop_wq_lock); return ret; } void adf_exit_vf_wq(void) { if (adf_vf_stop_wq) destroy_workqueue(adf_vf_stop_wq); adf_vf_stop_wq = NULL; } diff --git a/sys/dev/qat/qat_common/qat_hal.c b/sys/dev/qat/qat_common/qat_hal.c index b1e35e77272a..75190246ee1d 100644 --- a/sys/dev/qat/qat_common/qat_hal.c +++ b/sys/dev/qat/qat_common/qat_hal.c @@ -1,1967 +1,1974 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "icp_qat_hal.h" #include "icp_qat_uclo.h" #define BAD_REGADDR 0xffff #define MAX_RETRY_TIMES 1000000 #define INIT_CTX_ARB_VALUE 0x0 #define INIT_CTX_ENABLE_VALUE 0x0 #define INIT_PC_VALUE 0x0 #define INIT_WAKEUP_EVENTS_VALUE 0x1 #define INIT_SIG_EVENTS_VALUE 0x1 #define INIT_CCENABLE_VALUE 0x2000 #define RST_CSR_QAT_LSB 20 #define RST_CSR_AE_LSB 0 #define MC_TIMESTAMP_ENABLE (0x1 << 7) #define IGNORE_W1C_MASK \ ((~(1 << CE_BREAKPOINT_BITPOS)) & \ (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \ (~(1 << CE_REG_PAR_ERR_BITPOS))) #define INSERT_IMMED_GPRA_CONST(inst, const_val) \ (inst = ((inst & 0xFFFF00C03FFull) | \ ((((const_val) << 12) & 0x0FF00000ull) | \ (((const_val) << 10) & 0x0003FC00ull)))) #define INSERT_IMMED_GPRB_CONST(inst, const_val) \ (inst = ((inst & 0xFFFF00FFF00ull) | \ ((((const_val) << 12) & 0x0FF00000ull) | \ (((const_val) << 0) & 0x000000FFull)))) #define AE(handle, ae) ((handle)->hal_handle->aes[ae]) static const uint64_t inst_4b[] = { 0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull, 0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0A021000000ull }; static const uint64_t inst[] = { 0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull, 0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull, 0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull, 0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull, 0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull, 0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull, 0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull, 0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull, 0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull, 0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull, 0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull, 0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull, 0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull, 0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull, 0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull, 0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull, 0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull, 0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull, 0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull, }; static const uint64_t inst_CPM2X[] = { 0x0F0000C0000ull, 0x0D802C00011ull, 0x0F0000C0001ull, 0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F000500300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull, 0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull, 0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull, 0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull, 0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull, 0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull, 0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull, 0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull, 0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull, 0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull, 0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull, 0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull, 0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull, 0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull, 0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull, 0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull, 0x0D81341C010ull, 0x0E000000001ull, 0x0E000010000ull, }; void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask) { AE(handle, ae).live_ctx_mask = ctx_mask; } #define CSR_RETRY_TIMES 500 static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int csr, unsigned int *value) { unsigned int iterations = CSR_RETRY_TIMES; do { *value = GET_AE_CSR(handle, ae, csr); if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS)) return 0; } while (iterations--); pr_err("QAT: Read CSR timeout\n"); return EFAULT; } static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int csr, unsigned int value) { unsigned int iterations = CSR_RETRY_TIMES; do { SET_AE_CSR(handle, ae, csr, value); if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS)) return 0; } while (iterations--); pr_err("QAT: Write CSR Timeout\n"); return EFAULT; } static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, unsigned int *events) { unsigned int cur_ctx; qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events); qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); } static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int cycles, int chk_inactive) { unsigned int base_cnt = 0, cur_cnt = 0; unsigned int csr = (1 << ACS_ABO_BITPOS); int times = MAX_RETRY_TIMES; int elapsed_cycles = 0; qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt); base_cnt &= 0xffff; while ((int)cycles > elapsed_cycles && times--) { if (chk_inactive) qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr); qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt); cur_cnt &= 0xffff; elapsed_cycles = cur_cnt - base_cnt; if (elapsed_cycles < 0) elapsed_cycles += 0x10000; /* ensure at least 8 time cycles elapsed in wait_cycles */ if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS))) return 0; } if (times < 0) { pr_err("QAT: wait_num_cycles time out\n"); return EFAULT; } return 0; } void qat_hal_get_scs_neigh_ae(unsigned char ae, unsigned char *ae_neigh) { *ae_neigh = (ae & 0x1) ? (ae - 1) : (ae + 1); } #define CLR_BIT(wrd, bit) ((wrd) & ~(1 << (bit))) #define SET_BIT(wrd, bit) ((wrd) | 1 << (bit)) int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char mode) { unsigned int csr, new_csr; if (mode != 4 && mode != 8) { pr_err("QAT: bad ctx mode=%d\n", mode); return EINVAL; } /* Sets the accelaration engine context mode to either four or eight */ qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); csr = IGNORE_W1C_MASK & csr; new_csr = (mode == 4) ? SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) : CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS); qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr); return 0; } int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char mode) { unsigned int csr, new_csr; if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { pr_err("QAT: No next neigh for CPM2X\n"); return EINVAL; } qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); csr &= IGNORE_W1C_MASK; new_csr = (mode) ? SET_BIT(csr, CE_NN_MODE_BITPOS) : CLR_BIT(csr, CE_NN_MODE_BITPOS); if (new_csr != csr) qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr); return 0; } int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, enum icp_qat_uof_regtype lm_type, unsigned char mode) { unsigned int csr, new_csr; qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); csr &= IGNORE_W1C_MASK; switch (lm_type) { case ICP_LMEM0: new_csr = (mode) ? SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) : CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS); break; case ICP_LMEM1: new_csr = (mode) ? SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) : CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS); break; case ICP_LMEM2: new_csr = (mode) ? SET_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS) : CLR_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS); break; case ICP_LMEM3: new_csr = (mode) ? SET_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS) : CLR_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS); break; default: pr_err("QAT: lmType = 0x%x\n", lm_type); return EINVAL; } if (new_csr != csr) qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr); return 0; } void qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char mode) { unsigned int csr, new_csr; qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); csr &= IGNORE_W1C_MASK; new_csr = (mode) ? SET_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS) : CLR_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS); if (new_csr != csr) qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr); } void qat_hal_set_ae_scs_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char mode) { unsigned int csr, new_csr; qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr); new_csr = (mode) ? SET_BIT(csr, MMC_SHARE_CS_BITPOS) : CLR_BIT(csr, MMC_SHARE_CS_BITPOS); if (new_csr != csr) qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, new_csr); } static unsigned short qat_hal_get_reg_addr(unsigned int type, unsigned short reg_num) { unsigned short reg_addr; switch (type) { case ICP_GPA_ABS: case ICP_GPB_ABS: reg_addr = 0x80 | (reg_num & 0x7f); break; case ICP_GPA_REL: case ICP_GPB_REL: reg_addr = reg_num & 0x1f; break; case ICP_SR_RD_REL: case ICP_SR_WR_REL: case ICP_SR_REL: reg_addr = 0x180 | (reg_num & 0x1f); break; case ICP_SR_ABS: reg_addr = 0x140 | ((reg_num & 0x3) << 1); break; case ICP_DR_RD_REL: case ICP_DR_WR_REL: case ICP_DR_REL: reg_addr = 0x1c0 | (reg_num & 0x1f); break; case ICP_DR_ABS: reg_addr = 0x100 | ((reg_num & 0x3) << 1); break; case ICP_NEIGH_REL: reg_addr = 0x280 | (reg_num & 0x1f); break; case ICP_LMEM0: reg_addr = 0x200; break; case ICP_LMEM1: reg_addr = 0x220; break; case ICP_LMEM2: reg_addr = 0x2c0; break; case ICP_LMEM3: reg_addr = 0x2e0; break; case ICP_NO_DEST: reg_addr = 0x300 | (reg_num & 0xff); break; default: reg_addr = BAD_REGADDR; break; } return reg_addr; } static u32 qat_hal_get_ae_mask_gen4(struct icp_qat_fw_loader_handle *handle) { u32 tg = 0, ae; u32 valid_ae_mask = 0; for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { if (handle->hal_handle->ae_mask & (1 << ae)) { tg = ae / 4; valid_ae_mask |= (1 << (tg * 2)); } } return valid_ae_mask; } void qat_hal_reset(struct icp_qat_fw_loader_handle *handle) { unsigned int ae_reset_csr[MAX_CPP_NUM]; unsigned int ae_reset_val[MAX_CPP_NUM]; unsigned int valid_ae_mask, valid_slice_mask; unsigned int cpp_num = 1; unsigned int i; if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) { ae_reset_csr[0] = ICP_RESET_CPP0; ae_reset_csr[1] = ICP_RESET_CPP1; if (handle->hal_handle->ae_mask > 0xffff) ++cpp_num; } else if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { ae_reset_csr[0] = ICP_RESET_CPP0; } else { ae_reset_csr[0] = ICP_RESET; } for (i = 0; i < cpp_num; i++) { if (i == 0) { if (IS_QAT_GEN4( pci_get_device(GET_DEV(handle->accel_dev)))) { valid_ae_mask = qat_hal_get_ae_mask_gen4(handle); valid_slice_mask = handle->hal_handle->slice_mask; } else { valid_ae_mask = handle->hal_handle->ae_mask & 0xFFFF; valid_slice_mask = handle->hal_handle->slice_mask & 0x3F; } } else { valid_ae_mask = (handle->hal_handle->ae_mask >> AES_PER_CPP) & 0xFFFF; valid_slice_mask = (handle->hal_handle->slice_mask >> SLICES_PER_CPP) & 0x3F; } ae_reset_val[i] = GET_GLB_CSR(handle, ae_reset_csr[i]); ae_reset_val[i] |= valid_ae_mask << RST_CSR_AE_LSB; ae_reset_val[i] |= valid_slice_mask << RST_CSR_QAT_LSB; SET_GLB_CSR(handle, ae_reset_csr[i], ae_reset_val[i]); } } static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask, unsigned int ae_csr, unsigned int csr_val) { unsigned int ctx, cur_ctx; qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { if (!(ctx_mask & (1 << ctx))) continue; qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val); } qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); } static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, unsigned int ae_csr, unsigned int *csr_val) { unsigned int cur_ctx; qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val); qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); } static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask, unsigned int events) { unsigned int ctx, cur_ctx; qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { if (!(ctx_mask & (1 << ctx))) continue; qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events); } qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); } static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask, unsigned int events) { unsigned int ctx, cur_ctx; qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { if (!(ctx_mask & (1 << ctx))) continue; qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events); } qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); } static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle) { unsigned int base_cnt, cur_cnt; unsigned char ae; unsigned long ae_mask = handle->hal_handle->ae_mask; int times = MAX_RETRY_TIMES; for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, (unsigned int *)&base_cnt); base_cnt &= 0xffff; do { qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, (unsigned int *)&cur_cnt); cur_cnt &= 0xffff; } while (times-- && (cur_cnt == base_cnt)); if (times < 0) { pr_err("QAT: AE%d is inactive!!\n", ae); return EFAULT; } } return 0; } int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle, unsigned int ae) { unsigned int enable = 0, active = 0; qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable); qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active); if ((enable & (0xff << CE_ENABLE_BITPOS)) || (active & (1 << ACS_ABO_BITPOS))) return 1; else return 0; } static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle) { unsigned int misc_ctl_csr, misc_ctl; unsigned char ae; unsigned long ae_mask = handle->hal_handle->ae_mask; misc_ctl_csr = (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) ? MISC_CONTROL_C4XXX : MISC_CONTROL; /* stop the timestamp timers */ misc_ctl = GET_GLB_CSR(handle, misc_ctl_csr); if (misc_ctl & MC_TIMESTAMP_ENABLE) SET_GLB_CSR(handle, misc_ctl_csr, misc_ctl & (~MC_TIMESTAMP_ENABLE)); for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0); qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0); } /* start timestamp timers */ SET_GLB_CSR(handle, misc_ctl_csr, misc_ctl | MC_TIMESTAMP_ENABLE); } #define ESRAM_AUTO_TINIT BIT(2) #define ESRAM_AUTO_TINIT_DONE BIT(3) #define ESRAM_AUTO_INIT_USED_CYCLES (1640) #define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle) { uintptr_t csr_addr = ((uintptr_t)handle->hal_ep_csr_addr_v + ESRAM_AUTO_INIT_CSR_OFFSET); unsigned int csr_val; int times = 30; if (pci_get_device(GET_DEV(handle->accel_dev)) != ADF_DH895XCC_PCI_DEVICE_ID) return 0; csr_val = ADF_CSR_RD(handle->hal_misc_addr_v, csr_addr); if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE)) return 0; csr_val = ADF_CSR_RD(handle->hal_misc_addr_v, csr_addr); csr_val |= ESRAM_AUTO_TINIT; ADF_CSR_WR(handle->hal_misc_addr_v, csr_addr, csr_val); do { qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0); csr_val = ADF_CSR_RD(handle->hal_misc_addr_v, csr_addr); } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--); if (times < 0) { pr_err("QAT: Fail to init eSram!\n"); return EFAULT; } return 0; } #define SHRAM_INIT_CYCLES 2060 int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle) { unsigned int ae_reset_csr[MAX_CPP_NUM]; unsigned int ae_reset_val[MAX_CPP_NUM]; unsigned int cpp_num = 1; unsigned int valid_ae_mask, valid_slice_mask; unsigned char ae; unsigned int i; unsigned int clk_csr[MAX_CPP_NUM]; unsigned int clk_val[MAX_CPP_NUM]; unsigned int times = 100; unsigned long ae_mask = handle->hal_handle->ae_mask; if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) { ae_reset_csr[0] = ICP_RESET_CPP0; ae_reset_csr[1] = ICP_RESET_CPP1; clk_csr[0] = ICP_GLOBAL_CLK_ENABLE_CPP0; clk_csr[1] = ICP_GLOBAL_CLK_ENABLE_CPP1; if (handle->hal_handle->ae_mask > 0xffff) ++cpp_num; } else if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { ae_reset_csr[0] = ICP_RESET_CPP0; clk_csr[0] = ICP_GLOBAL_CLK_ENABLE_CPP0; } else { ae_reset_csr[0] = ICP_RESET; clk_csr[0] = ICP_GLOBAL_CLK_ENABLE; } for (i = 0; i < cpp_num; i++) { if (i == 0) { if (IS_QAT_GEN4( pci_get_device(GET_DEV(handle->accel_dev)))) { valid_ae_mask = qat_hal_get_ae_mask_gen4(handle); valid_slice_mask = handle->hal_handle->slice_mask; } else { valid_ae_mask = handle->hal_handle->ae_mask & 0xFFFF; valid_slice_mask = handle->hal_handle->slice_mask & 0x3F; } } else { valid_ae_mask = (handle->hal_handle->ae_mask >> AES_PER_CPP) & 0xFFFF; valid_slice_mask = (handle->hal_handle->slice_mask >> SLICES_PER_CPP) & 0x3F; } /* write to the reset csr */ ae_reset_val[i] = GET_GLB_CSR(handle, ae_reset_csr[i]); ae_reset_val[i] &= ~(valid_ae_mask << RST_CSR_AE_LSB); ae_reset_val[i] &= ~(valid_slice_mask << RST_CSR_QAT_LSB); do { SET_GLB_CSR(handle, ae_reset_csr[i], ae_reset_val[i]); if (!(times--)) goto out_err; ae_reset_val[i] = GET_GLB_CSR(handle, ae_reset_csr[i]); } while ( (valid_ae_mask | (valid_slice_mask << RST_CSR_QAT_LSB)) & ae_reset_val[i]); /* enable clock */ clk_val[i] = GET_GLB_CSR(handle, clk_csr[i]); clk_val[i] |= valid_ae_mask << 0; clk_val[i] |= valid_slice_mask << 20; SET_GLB_CSR(handle, clk_csr[i], clk_val[i]); } if (qat_hal_check_ae_alive(handle)) goto out_err; /* Set undefined power-up/reset states to reasonable default values */ for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, INIT_CTX_ENABLE_VALUE); qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX, CTX_STS_INDIRECT, handle->hal_handle->upc_mask & INIT_PC_VALUE); qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE); qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE); qat_hal_put_wakeup_event(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX, INIT_WAKEUP_EVENTS_VALUE); qat_hal_put_sig_event(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX, INIT_SIG_EVENTS_VALUE); } if (qat_hal_init_esram(handle)) goto out_err; if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0)) goto out_err; qat_hal_reset_timestamp(handle); return 0; out_err: pr_err("QAT: failed to get device out of reset\n"); return EFAULT; } static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask) { unsigned int ctx; qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx); ctx &= IGNORE_W1C_MASK & (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS)); qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); } static uint64_t qat_hal_parity_64bit(uint64_t word) { word ^= word >> 1; word ^= word >> 2; word ^= word >> 4; word ^= word >> 8; word ^= word >> 16; word ^= word >> 32; return word & 1; } static uint64_t qat_hal_set_uword_ecc(uint64_t uword) { uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL, bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL, bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL, bit6_mask = 0xdaf69a46910ULL; /* clear the ecc bits */ uword &= ~(0x7fULL << 0x2C); uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C; uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D; uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E; uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F; uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30; uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31; uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32; return uword; } void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, unsigned int words_num, const uint64_t *uword) { unsigned int ustore_addr; unsigned int i, ae_in_group; if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { ae_in_group = ae / 4 * 4; for (i = 0; i < AE_TG_NUM_CPM2X; i++) { if (ae_in_group + i == ae) continue; if (ae_in_group + i >= handle->hal_handle->ae_max_num) break; if (qat_hal_check_ae_active(handle, ae_in_group + i)) { pr_err( "ae%d in T_group is active, cannot write to ustore!\n", ae_in_group + i); return; } } } qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); uaddr |= UA_ECS; qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); for (i = 0; i < words_num; i++) { unsigned int uwrd_lo, uwrd_hi; uint64_t tmp; tmp = qat_hal_set_uword_ecc(uword[i]); uwrd_lo = (unsigned int)(tmp & 0xffffffff); uwrd_hi = (unsigned int)(tmp >> 0x20); qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo); qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi); } qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); } void qat_hal_wr_coalesce_uwords(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, unsigned int words_num, u64 *uword) { u64 *even_uwrods, *odd_uwords; unsigned char neigh_ae, odd_ae, even_ae; int i, even_cpy_cnt = 0, odd_cpy_cnt = 0; even_uwrods = malloc(16 * 1024 * sizeof(*uword), M_QAT, M_WAITOK | M_ZERO); odd_uwords = malloc(16 * 1024 * sizeof(*uword), M_QAT, M_WAITOK | M_ZERO); qat_hal_get_scs_neigh_ae(ae, &neigh_ae); if (ae & 1) { odd_ae = ae; even_ae = neigh_ae; } else { odd_ae = neigh_ae; even_ae = ae; } for (i = 0; i < words_num; i++) { if ((uaddr + i) & 1) odd_uwords[odd_cpy_cnt++] = uword[i]; else even_uwrods[even_cpy_cnt++] = uword[i]; } if (even_cpy_cnt) qat_hal_wr_uwords(handle, even_ae, (uaddr + 1) / 2, even_cpy_cnt, even_uwrods); if (odd_cpy_cnt) qat_hal_wr_uwords( handle, odd_ae, uaddr / 2, odd_cpy_cnt, odd_uwords); free(even_uwrods, M_QAT); free(odd_uwords, M_QAT); } static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask) { unsigned int ctx; qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx); ctx &= IGNORE_W1C_MASK; ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF; ctx |= (ctx_mask << CE_ENABLE_BITPOS); qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); } static void qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle) { unsigned char ae; unsigned short reg; unsigned long ae_mask = handle->hal_handle->ae_mask; for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) { qat_hal_init_rd_xfer( handle, ae, 0, ICP_SR_RD_ABS, reg, 0); qat_hal_init_rd_xfer( handle, ae, 0, ICP_DR_RD_ABS, reg, 0); } } } static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) { unsigned char ae; unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX; int times = MAX_RETRY_TIMES; unsigned int csr_val = 0; unsigned int savctx = 0; unsigned int scs_flag = 0; unsigned long ae_mask = handle->hal_handle->ae_mask; int ret = 0; for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); scs_flag = csr_val & (1 << MMC_SHARE_CS_BITPOS); csr_val &= ~(1 << MMC_SHARE_CS_BITPOS); qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val); qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val); csr_val &= IGNORE_W1C_MASK; if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { csr_val |= CE_NN_MODE; } qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val); if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { if (ae % 4 == 0) qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst_CPM2X), (const uint64_t *)inst_CPM2X); } else { qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst), (const uint64_t *)inst); } qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, handle->hal_handle->upc_mask & INIT_PC_VALUE); qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0); qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY); qat_hal_wr_indr_csr( handle, ae, ctx_mask, CTX_SIG_EVENTS_INDIRECT, 0); qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0); qat_hal_enable_ctx(handle, ae, ctx_mask); } for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { /* wait for AE to finish */ do { ret = qat_hal_wait_cycles(handle, ae, 20, 1); } while (ret && times--); if (times < 0) { pr_err("QAT: clear GPR of AE %d failed", ae); return EINVAL; } qat_hal_disable_ctx(handle, ae, ctx_mask); qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); if (scs_flag) csr_val |= (1 << MMC_SHARE_CS_BITPOS); qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val); qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO); qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, INIT_CTX_ENABLE_VALUE); qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, handle->hal_handle->upc_mask & INIT_PC_VALUE); qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE); qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE); qat_hal_put_wakeup_event(handle, ae, ctx_mask, INIT_WAKEUP_EVENTS_VALUE); qat_hal_put_sig_event(handle, ae, ctx_mask, INIT_SIG_EVENTS_VALUE); } return 0; } static int qat_hal_check_imr(struct icp_qat_fw_loader_handle *handle) { device_t dev = accel_to_pci_dev(handle->accel_dev); u8 reg_val = 0; if (pci_get_device(GET_DEV(handle->accel_dev)) != ADF_C3XXX_PCI_DEVICE_ID && pci_get_device(GET_DEV(handle->accel_dev)) != ADF_200XX_PCI_DEVICE_ID) return 0; reg_val = pci_read_config(dev, 0x04, 1); /* * PCI command register memory bit and rambaseaddr_lo address * are checked to confirm IMR2 is enabled in BIOS settings */ if ((reg_val & 0x2) && GET_FCU_CSR(handle, FCU_RAMBASE_ADDR_LO)) return 0; return EINVAL; } int qat_hal_init(struct adf_accel_dev *accel_dev) { unsigned char ae; unsigned int cap_offset, ae_offset, ep_offset; unsigned int sram_offset = 0; unsigned int max_en_ae_id = 0; int ret = 0; unsigned long ae_mask; struct icp_qat_fw_loader_handle *handle; if (!accel_dev) { return EFAULT; } struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_bar *misc_bar = &pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)]; struct adf_bar *sram_bar; handle = malloc(sizeof(*handle), M_QAT, M_WAITOK | M_ZERO); handle->hal_misc_addr_v = misc_bar->virt_addr; handle->accel_dev = accel_dev; if (pci_get_device(GET_DEV(handle->accel_dev)) == ADF_DH895XCC_PCI_DEVICE_ID || IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) { sram_bar = &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)]; if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) sram_offset = 0x400000 + accel_dev->aram_info->mmp_region_offset; handle->hal_sram_addr_v = sram_bar->virt_addr; handle->hal_sram_offset = sram_offset; handle->hal_sram_size = sram_bar->size; } GET_CSR_OFFSET(pci_get_device(GET_DEV(handle->accel_dev)), cap_offset, ae_offset, ep_offset); handle->hal_cap_g_ctl_csr_addr_v = cap_offset; handle->hal_cap_ae_xfer_csr_addr_v = ae_offset; handle->hal_ep_csr_addr_v = ep_offset; handle->hal_cap_ae_local_csr_addr_v = - ((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v + - LOCAL_TO_XFER_REG_OFFSET); + ((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET); handle->fw_auth = (pci_get_device(GET_DEV(handle->accel_dev)) == ADF_DH895XCC_PCI_DEVICE_ID) ? false : true; if (handle->fw_auth && qat_hal_check_imr(handle)) { device_printf(GET_DEV(accel_dev), "IMR2 not enabled in BIOS\n"); ret = EINVAL; goto out_hal_handle; } handle->hal_handle = malloc(sizeof(*handle->hal_handle), M_QAT, M_WAITOK | M_ZERO); handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid; handle->hal_handle->ae_mask = hw_data->ae_mask; handle->hal_handle->admin_ae_mask = hw_data->admin_ae_mask; handle->hal_handle->slice_mask = hw_data->accel_mask; handle->cfg_ae_mask = 0xFFFFFFFF; /* create AE objects */ if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) { handle->hal_handle->upc_mask = 0xffff; handle->hal_handle->max_ustore = 0x2000; } else { handle->hal_handle->upc_mask = 0x1ffff; handle->hal_handle->max_ustore = 0x4000; } ae_mask = hw_data->ae_mask; for_each_set_bit(ae, &ae_mask, ICP_QAT_UCLO_MAX_AE) { handle->hal_handle->aes[ae].free_addr = 0; handle->hal_handle->aes[ae].free_size = handle->hal_handle->max_ustore; handle->hal_handle->aes[ae].ustore_size = handle->hal_handle->max_ustore; handle->hal_handle->aes[ae].live_ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX; max_en_ae_id = ae; } handle->hal_handle->ae_max_num = max_en_ae_id + 1; /* take all AEs out of reset */ if (qat_hal_clr_reset(handle)) { device_printf(GET_DEV(accel_dev), "qat_hal_clr_reset error\n"); ret = EIO; goto out_err; } qat_hal_clear_xfer(handle); if (!handle->fw_auth) { if (qat_hal_clear_gpr(handle)) { ret = EIO; goto out_err; } } /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */ for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { unsigned int csr_val = 0; qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val); csr_val |= 0x1; qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val); } accel_dev->fw_loader->fw_loader = handle; return 0; out_err: free(handle->hal_handle, M_QAT); out_hal_handle: free(handle, M_QAT); return ret; } void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle) { if (!handle) return; free(handle->hal_handle, M_QAT); free(handle, M_QAT); } int qat_hal_start(struct icp_qat_fw_loader_handle *handle) { unsigned char ae = 0; int retry = 0; unsigned int fcu_sts = 0; unsigned int fcu_ctl_csr, fcu_sts_csr; unsigned long ae_mask = handle->hal_handle->ae_mask; u32 ae_ctr = 0; if (handle->fw_auth) { for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { ae_ctr++; } if (IS_QAT_GEN3_OR_GEN4( pci_get_device(GET_DEV(handle->accel_dev)))) { fcu_ctl_csr = FCU_CONTROL_C4XXX; fcu_sts_csr = FCU_STATUS_C4XXX; } else { fcu_ctl_csr = FCU_CONTROL; fcu_sts_csr = FCU_STATUS; } SET_FCU_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_START); do { pause_ms("adfstop", FW_AUTH_WAIT_PERIOD); fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr); if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1)) return ae_ctr; } while (retry++ < FW_AUTH_MAX_RETRY); pr_err("QAT: start error (AE 0x%x FCU_STS = 0x%x)\n", ae, fcu_sts); return 0; } else { for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { qat_hal_put_wakeup_event(handle, ae, 0, IS_QAT_GEN4( pci_get_device(GET_DEV( handle->accel_dev))) ? 0x80000000 : 0x10000); qat_hal_enable_ctx(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX); ae_ctr++; } return ae_ctr; } } void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask) { if (!handle->fw_auth) qat_hal_disable_ctx(handle, ae, ctx_mask); } void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask, unsigned int upc) { qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, handle->hal_handle->upc_mask & upc); } static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, unsigned int words_num, uint64_t *uword) { unsigned int i, uwrd_lo, uwrd_hi; unsigned int ustore_addr, misc_control; unsigned int scs_flag = 0; qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control); scs_flag = misc_control & (0x1 << MMC_SHARE_CS_BITPOS); /*disable scs*/ qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control & 0xfffffffb); qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); uaddr |= UA_ECS; for (i = 0; i < words_num; i++) { qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); uaddr++; qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo); qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi); uword[i] = uwrd_hi; uword[i] = (uword[i] << 0x20) | uwrd_lo; } if (scs_flag) misc_control |= (0x1 << MMC_SHARE_CS_BITPOS); qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control); qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); } void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, unsigned int words_num, unsigned int *data) { unsigned int i, ustore_addr; qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); uaddr |= UA_ECS; qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); for (i = 0; i < words_num; i++) { unsigned int uwrd_lo, uwrd_hi, tmp; uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) | ((data[i] & 0xff00) << 2) | (0x3 << 8) | (data[i] & 0xff); uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28); uwrd_hi |= (bitcount32(data[i] & 0xffff) & 0x1) << 8; tmp = ((data[i] >> 0x10) & 0xffff); uwrd_hi |= (bitcount32(tmp) & 0x1) << 9; qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo); qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi); } qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); } #define MAX_EXEC_INST 100 static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, uint64_t *micro_inst, unsigned int inst_num, int code_off, unsigned int max_cycle, unsigned int *endpc) { - uint64_t savuwords[MAX_EXEC_INST]; + u64 *savuwords = NULL; unsigned int ind_lm_addr0, ind_lm_addr1; unsigned int ind_lm_addr2, ind_lm_addr3; unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1; unsigned int ind_lm_addr_byte2, ind_lm_addr_byte3; unsigned int ind_t_index, ind_t_index_byte; unsigned int ind_cnt_sig; unsigned int ind_sig, act_sig; unsigned int csr_val = 0, newcsr_val; unsigned int savctx, scs_flag; unsigned int savcc, wakeup_events, savpc; unsigned int ctxarb_ctl, ctx_enables; if (inst_num > handle->hal_handle->max_ustore || !micro_inst) { pr_err("QAT: invalid instruction num %d\n", inst_num); return EINVAL; } + + savuwords = kzalloc(sizeof(u64) * MAX_EXEC_INST, GFP_KERNEL); + if (!savuwords) + return ENOMEM; + /* save current context */ qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0); qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1); qat_hal_rd_indr_csr( handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX, &ind_lm_addr_byte0); qat_hal_rd_indr_csr( handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX, &ind_lm_addr_byte1); if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { qat_hal_rd_indr_csr( handle, ae, ctx, LM_ADDR_2_INDIRECT, &ind_lm_addr2); qat_hal_rd_indr_csr( handle, ae, ctx, LM_ADDR_3_INDIRECT, &ind_lm_addr3); qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_2_BYTE_INDEX, &ind_lm_addr_byte2); qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_3_BYTE_INDEX, &ind_lm_addr_byte3); qat_hal_rd_indr_csr( handle, ae, ctx, INDIRECT_T_INDEX, &ind_t_index); qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_T_INDEX_BYTE_INDEX, &ind_t_index_byte); } qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); scs_flag = csr_val & (1 << MMC_SHARE_CS_BITPOS); newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS); qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val); if (inst_num <= MAX_EXEC_INST) qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords); qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events); qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc); savpc = (savpc & handle->hal_handle->upc_mask) >> 0; qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); ctx_enables &= IGNORE_W1C_MASK; qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc); qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl); qat_hal_rd_indr_csr( handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT, &ind_cnt_sig); qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig); qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig); /* execute micro codes */ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst); qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0); qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO); if (code_off) qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff); qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY); qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0); qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0); qat_hal_enable_ctx(handle, ae, (1 << ctx)); /* wait for micro codes to finish */ - if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0) + if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0) { + kfree(savuwords); return EFAULT; + } if (endpc) { unsigned int ctx_status; qat_hal_rd_indr_csr( handle, ae, ctx, CTX_STS_INDIRECT, &ctx_status); *endpc = ctx_status & handle->hal_handle->upc_mask; } /* retore to saved context */ qat_hal_disable_ctx(handle, ae, (1 << ctx)); if (inst_num <= MAX_EXEC_INST) qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords); qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events); qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, handle->hal_handle->upc_mask & savpc); qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); newcsr_val = scs_flag ? SET_BIT(csr_val, MMC_SHARE_CS_BITPOS) : CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS); qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val); qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc); qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO); qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl); qat_hal_wr_indr_csr( handle, ae, (1 << ctx), LM_ADDR_0_INDIRECT, ind_lm_addr0); qat_hal_wr_indr_csr( handle, ae, (1 << ctx), LM_ADDR_1_INDIRECT, ind_lm_addr1); qat_hal_wr_indr_csr(handle, ae, (1 << ctx), INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0); qat_hal_wr_indr_csr(handle, ae, (1 << ctx), INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1); if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { qat_hal_wr_indr_csr( handle, ae, (1 << ctx), LM_ADDR_2_INDIRECT, ind_lm_addr2); qat_hal_wr_indr_csr( handle, ae, (1 << ctx), LM_ADDR_3_INDIRECT, ind_lm_addr3); qat_hal_wr_indr_csr(handle, ae, (1 << ctx), INDIRECT_LM_ADDR_2_BYTE_INDEX, ind_lm_addr_byte2); qat_hal_wr_indr_csr(handle, ae, (1 << ctx), INDIRECT_LM_ADDR_3_BYTE_INDEX, ind_lm_addr_byte3); qat_hal_wr_indr_csr( handle, ae, (1 << ctx), INDIRECT_T_INDEX, ind_t_index); qat_hal_wr_indr_csr(handle, ae, (1 << ctx), INDIRECT_T_INDEX_BYTE_INDEX, ind_t_index_byte); } qat_hal_wr_indr_csr( handle, ae, (1 << ctx), FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig); qat_hal_wr_indr_csr( handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, ind_sig); qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig); qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); + kfree(savuwords); return 0; } static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, enum icp_qat_uof_regtype reg_type, unsigned short reg_num, unsigned int *data) { unsigned int savctx, uaddr, uwrd_lo, uwrd_hi; unsigned int ctxarb_cntl, ustore_addr, ctx_enables; unsigned short reg_addr; int status = 0; unsigned int scs_flag = 0; unsigned int csr_val = 0, newcsr_val = 0; u64 insts, savuword; reg_addr = qat_hal_get_reg_addr(reg_type, reg_num); if (reg_addr == BAD_REGADDR) { pr_err("QAT: bad regaddr=0x%x\n", reg_addr); return EINVAL; } switch (reg_type) { case ICP_GPA_REL: insts = 0xA070000000ull | (reg_addr & 0x3ff); break; default: insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10); break; } qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); scs_flag = csr_val & (1 << MMC_SHARE_CS_BITPOS); newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS); qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val); qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl); qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); ctx_enables &= IGNORE_W1C_MASK; if (ctx != (savctx & ACS_ACNO)) qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO); qat_hal_get_uwords(handle, ae, 0, 1, &savuword); qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); uaddr = UA_ECS; qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); insts = qat_hal_set_uword_ecc(insts); uwrd_lo = (unsigned int)(insts & 0xffffffff); uwrd_hi = (unsigned int)(insts >> 0x20); qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo); qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi); qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); /* delay for at least 8 cycles */ qat_hal_wait_cycles(handle, ae, 0x8, 0); /* * read ALU output * the instruction should have been executed * prior to clearing the ECS in putUwords */ qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data); qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); qat_hal_wr_uwords(handle, ae, 0, 1, &savuword); if (ctx != (savctx & ACS_ACNO)) qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO); qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl); qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); newcsr_val = scs_flag ? SET_BIT(csr_val, MMC_SHARE_CS_BITPOS) : CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS); qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val); qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); return status; } static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, enum icp_qat_uof_regtype reg_type, unsigned short reg_num, unsigned int data) { unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo; uint64_t insts[] = { 0x0F440000000ull, 0x0F040000000ull, 0x0F0000C0300ull, 0x0E000010000ull }; const int num_inst = ARRAY_SIZE(insts), code_off = 1; const int imm_w1 = 0, imm_w0 = 1; dest_addr = qat_hal_get_reg_addr(reg_type, reg_num); if (dest_addr == BAD_REGADDR) { pr_err("QAT: bad destAddr=0x%x\n", dest_addr); return EINVAL; } data16lo = 0xffff & data; data16hi = 0xffff & (data >> 0x10); src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)(0xff & data16hi)); src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)(0xff & data16lo)); switch (reg_type) { case ICP_GPA_REL: insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) | ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff); insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) | ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff); break; default: insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) | ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff); insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) | ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff); break; } return qat_hal_exec_micro_inst( handle, ae, ctx, insts, num_inst, code_off, num_inst * 0x5, NULL); } int qat_hal_get_ins_num(void) { return ARRAY_SIZE(inst_4b); } static int qat_hal_concat_micro_code(uint64_t *micro_inst, unsigned int inst_num, unsigned int size, unsigned int addr, unsigned int *value) { int i; unsigned int cur_value; const uint64_t *inst_arr; unsigned int fixup_offset; int usize = 0; unsigned int orig_num; unsigned int delta; orig_num = inst_num; fixup_offset = inst_num; cur_value = value[0]; inst_arr = inst_4b; usize = ARRAY_SIZE(inst_4b); for (i = 0; i < usize; i++) micro_inst[inst_num++] = inst_arr[i]; INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr)); fixup_offset++; INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0); fixup_offset++; INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0)); fixup_offset++; INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10)); delta = inst_num - orig_num; return (int)delta; } static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, int *pfirst_exec, uint64_t *micro_inst, unsigned int inst_num) { int stat = 0; unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0; unsigned int gprb0 = 0, gprb1 = 0; if (*pfirst_exec) { qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0); qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1); qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2); qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0); qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1); *pfirst_exec = 0; } stat = qat_hal_exec_micro_inst( handle, ae, ctx, micro_inst, inst_num, 1, inst_num * 0x5, NULL); if (stat != 0) return EFAULT; qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0); qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1); qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2); qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0); qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1); return 0; } int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle, unsigned char ae, struct icp_qat_uof_batch_init *lm_init_header) { struct icp_qat_uof_batch_init *plm_init; uint64_t *micro_inst_arry; int micro_inst_num; int alloc_inst_size; int first_exec = 1; int stat = 0; if (!lm_init_header) return 0; plm_init = lm_init_header->next; alloc_inst_size = lm_init_header->size; if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore) alloc_inst_size = handle->hal_handle->max_ustore; micro_inst_arry = malloc(alloc_inst_size * sizeof(uint64_t), M_QAT, M_WAITOK | M_ZERO); micro_inst_num = 0; while (plm_init) { unsigned int addr, *value, size; ae = plm_init->ae; addr = plm_init->addr; value = plm_init->value; size = plm_init->size; micro_inst_num += qat_hal_concat_micro_code( micro_inst_arry, micro_inst_num, size, addr, value); plm_init = plm_init->next; } /* exec micro codes */ if (micro_inst_arry && micro_inst_num > 0) { micro_inst_arry[micro_inst_num++] = 0x0E000010000ull; stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec, micro_inst_arry, micro_inst_num); } free(micro_inst_arry, M_QAT); return stat; } static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, enum icp_qat_uof_regtype reg_type, unsigned short reg_num, unsigned int val) { int status = 0; unsigned int reg_addr; unsigned int ctx_enables; unsigned short mask; unsigned short dr_offset = 0x10; status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); if (CE_INUSE_CONTEXTS & ctx_enables) { if (ctx & 0x1) { pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx); return EINVAL; } mask = 0x1f; dr_offset = 0x20; } else { mask = 0x0f; } if (reg_num & ~mask) return EINVAL; reg_addr = reg_num + (ctx << 0x5); switch (reg_type) { case ICP_SR_RD_REL: case ICP_SR_REL: SET_AE_XFER(handle, ae, reg_addr, val); break; case ICP_DR_RD_REL: case ICP_DR_REL: SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val); break; default: status = EINVAL; break; } return status; } static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, enum icp_qat_uof_regtype reg_type, unsigned short reg_num, unsigned int data) { unsigned int gprval, ctx_enables; unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi, data16low; unsigned short reg_mask; int status = 0; uint64_t micro_inst[] = { 0x0F440000000ull, 0x0F040000000ull, 0x0A000000000ull, 0x0F0000C0300ull, 0x0E000010000ull }; const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1; const unsigned short gprnum = 0, dly = num_inst * 0x5; qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); if (CE_INUSE_CONTEXTS & ctx_enables) { if (ctx & 0x1) { pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx); return EINVAL; } reg_mask = (unsigned short)~0x1f; } else { reg_mask = (unsigned short)~0xf; } if (reg_num & reg_mask) return EINVAL; xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num); if (xfr_addr == BAD_REGADDR) { pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr); return EINVAL; } qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval); gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum); data16low = 0xffff & data; data16hi = 0xffff & (data >> 0x10); src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)(0xff & data16hi)); src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)(0xff & data16low)); micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) | ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff); micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) | ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff); micro_inst[0x2] = micro_inst[0x2] | ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10); status = qat_hal_exec_micro_inst( handle, ae, ctx, micro_inst, num_inst, code_off, dly, NULL); qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval); return status; } static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, unsigned short nn, unsigned int val) { unsigned int ctx_enables; int stat = 0; qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); ctx_enables &= IGNORE_W1C_MASK; qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE); stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val); qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); return stat; } static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned short absreg_num, unsigned short *relreg, unsigned char *ctx) { unsigned int ctx_enables; qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); if (ctx_enables & CE_INUSE_CONTEXTS) { /* 4-ctx mode */ *relreg = absreg_num & 0x1F; *ctx = (absreg_num >> 0x4) & 0x6; } else { /* 8-ctx mode */ *relreg = absreg_num & 0x0F; *ctx = (absreg_num >> 0x4) & 0x7; } return 0; } int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned long ctx_mask, enum icp_qat_uof_regtype reg_type, unsigned short reg_num, unsigned int regdata) { int stat = 0; unsigned short reg; unsigned char ctx = 0; enum icp_qat_uof_regtype type; if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG) return EINVAL; do { if (ctx_mask == 0) { qat_hal_convert_abs_to_rel( handle, ae, reg_num, ®, &ctx); type = reg_type - 1; } else { reg = reg_num; type = reg_type; if (!test_bit(ctx, &ctx_mask)) continue; } stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata); if (stat) { pr_err("QAT: write gpr fail\n"); return EINVAL; } } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX)); return 0; } int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned long ctx_mask, enum icp_qat_uof_regtype reg_type, unsigned short reg_num, unsigned int regdata) { int stat = 0; unsigned short reg; unsigned char ctx = 0; enum icp_qat_uof_regtype type; if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG) return EINVAL; do { if (ctx_mask == 0) { qat_hal_convert_abs_to_rel( handle, ae, reg_num, ®, &ctx); type = reg_type - 3; } else { reg = reg_num; type = reg_type; if (!test_bit(ctx, &ctx_mask)) continue; } stat = qat_hal_put_rel_wr_xfer( handle, ae, ctx, type, reg, regdata); if (stat) { pr_err("QAT: write wr xfer fail\n"); return EINVAL; } } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX)); return 0; } int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned long ctx_mask, enum icp_qat_uof_regtype reg_type, unsigned short reg_num, unsigned int regdata) { int stat = 0; unsigned short reg; unsigned char ctx = 0; enum icp_qat_uof_regtype type; if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG) return EINVAL; do { if (ctx_mask == 0) { qat_hal_convert_abs_to_rel( handle, ae, reg_num, ®, &ctx); type = reg_type - 3; } else { reg = reg_num; type = reg_type; if (!test_bit(ctx, &ctx_mask)) continue; } stat = qat_hal_put_rel_rd_xfer( handle, ae, ctx, type, reg, regdata); if (stat) { pr_err("QAT: write rd xfer fail\n"); return EINVAL; } } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX)); return 0; } int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned long ctx_mask, unsigned short reg_num, unsigned int regdata) { int stat = 0; unsigned char ctx; if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { pr_err("QAT: No next neigh for CPM2X\n"); return EINVAL; } if (ctx_mask == 0) return EINVAL; for_each_set_bit(ctx, &ctx_mask, ICP_QAT_UCLO_MAX_CTX) { stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata); if (stat) { pr_err("QAT: write neigh error\n"); return EINVAL; } } return 0; } diff --git a/sys/dev/qat/qat_common/qat_uclo.c b/sys/dev/qat/qat_common/qat_uclo.c index 2f4556fc71d5..54e8e8eb7421 100644 --- a/sys/dev/qat/qat_common/qat_uclo.c +++ b/sys/dev/qat/qat_common/qat_uclo.c @@ -1,2414 +1,2414 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include #include #include #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "icp_qat_uclo.h" #include "icp_qat_hal.h" #include "icp_qat_fw_loader_handle.h" #define UWORD_CPYBUF_SIZE 1024 #define INVLD_UWORD 0xffffffffffull #define PID_MINOR_REV 0xf #define PID_MAJOR_REV (0xf << 4) #define MAX_UINT32_VAL 0xfffffffful static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle, unsigned int ae, unsigned int image_num) { struct icp_qat_uclo_aedata *ae_data; struct icp_qat_uclo_encapme *encap_image; struct icp_qat_uclo_page *page = NULL; struct icp_qat_uclo_aeslice *ae_slice = NULL; ae_data = &obj_handle->ae_data[ae]; encap_image = &obj_handle->ae_uimage[image_num]; ae_slice = &ae_data->ae_slices[ae_data->slice_num]; ae_slice->encap_image = encap_image; if (encap_image->img_ptr) { ae_slice->ctx_mask_assigned = encap_image->img_ptr->ctx_assigned; ae_data->shareable_ustore = ICP_QAT_SHARED_USTORE_MODE(encap_image->img_ptr->ae_mode); if (obj_handle->prod_type == ICP_QAT_AC_4XXX_A_DEV_TYPE) ae_data->eff_ustore_size = obj_handle->ustore_phy_size; else { ae_data->eff_ustore_size = ae_data->shareable_ustore ? (obj_handle->ustore_phy_size << 1) : obj_handle->ustore_phy_size; } } else { ae_slice->ctx_mask_assigned = 0; } ae_slice->region = malloc(sizeof(*ae_slice->region), M_QAT, M_WAITOK | M_ZERO); ae_slice->page = malloc(sizeof(*ae_slice->page), M_QAT, M_WAITOK | M_ZERO); page = ae_slice->page; page->encap_page = encap_image->page; ae_slice->page->region = ae_slice->region; ae_data->slice_num++; return 0; } static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data) { unsigned int i; if (!ae_data) { pr_err("QAT: bad argument, ae_data is NULL\n "); return EINVAL; } for (i = 0; i < ae_data->slice_num; i++) { free(ae_data->ae_slices[i].region, M_QAT); ae_data->ae_slices[i].region = NULL; free(ae_data->ae_slices[i].page, M_QAT); ae_data->ae_slices[i].page = NULL; } return 0; } static char * qat_uclo_get_string(struct icp_qat_uof_strtable *str_table, unsigned int str_offset) { if (!str_table->table_len || str_offset > str_table->table_len) return NULL; return (char *)(((uintptr_t)(str_table->strings)) + str_offset); } static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr) { int maj = hdr->maj_ver & 0xff; int min = hdr->min_ver & 0xff; if (hdr->file_id != ICP_QAT_UOF_FID) { pr_err("QAT: Invalid header 0x%x\n", hdr->file_id); return EINVAL; } if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) { pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n", maj, min); return EINVAL; } return 0; } static int qat_uclo_check_suof_format(const struct icp_qat_suof_filehdr *suof_hdr) { int maj = suof_hdr->maj_ver & 0xff; int min = suof_hdr->min_ver & 0xff; if (suof_hdr->file_id != ICP_QAT_SUOF_FID) { pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id); return EINVAL; } if (suof_hdr->fw_type != 0) { pr_err("QAT: unsupported firmware type\n"); return EINVAL; } if (suof_hdr->num_chunks <= 0x1) { pr_err("QAT: SUOF chunk amount is incorrect\n"); return EINVAL; } if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) { pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n", maj, min); return EINVAL; } return 0; } static int qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle, unsigned int addr, const unsigned int *val, unsigned int num_in_bytes) { unsigned int outval; const unsigned char *ptr = (const unsigned char *)val; if (num_in_bytes > handle->hal_sram_size) { pr_err("QAT: error, mmp size overflow %d\n", num_in_bytes); return EINVAL; } while (num_in_bytes) { memcpy(&outval, ptr, 4); SRAM_WRITE(handle, addr, outval); num_in_bytes -= 4; ptr += 4; addr += 4; } return 0; } static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int addr, unsigned int *val, unsigned int num_in_bytes) { unsigned int outval; unsigned char *ptr = (unsigned char *)val; addr >>= 0x2; /* convert to uword address */ while (num_in_bytes) { memcpy(&outval, ptr, 4); qat_hal_wr_umem(handle, ae, addr++, 1, &outval); num_in_bytes -= 4; ptr += 4; } } static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae, struct icp_qat_uof_batch_init *umem_init_header) { struct icp_qat_uof_batch_init *umem_init; if (!umem_init_header) return; umem_init = umem_init_header->next; while (umem_init) { unsigned int addr, *value, size; ae = umem_init->ae; addr = umem_init->addr; value = umem_init->value; size = umem_init->size; qat_uclo_wr_umem_by_words(handle, ae, addr, value, size); umem_init = umem_init->next; } } static void qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uof_batch_init **base) { struct icp_qat_uof_batch_init *umem_init; umem_init = *base; while (umem_init) { struct icp_qat_uof_batch_init *pre; pre = umem_init; umem_init = umem_init->next; free(pre, M_QAT); } *base = NULL; } static int qat_uclo_parse_num(char *str, unsigned int *num) { char buf[16] = { 0 }; unsigned long ae = 0; int i; strncpy(buf, str, 15); for (i = 0; i < 16; i++) { if (!isdigit(buf[i])) { buf[i] = '\0'; break; } } if ((compat_strtoul(buf, 10, &ae))) return EFAULT; if (ae > MAX_UINT32_VAL) return EFAULT; *num = (unsigned int)ae; return 0; } static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uof_initmem *init_mem, unsigned int size_range, unsigned int *ae) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; char *str; if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) { pr_err("QAT: initmem is out of range"); return EINVAL; } if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) { pr_err("QAT: Memory scope for init_mem error\n"); return EINVAL; } str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name); if (!str) { pr_err("QAT: AE name assigned in UOF init table is NULL\n"); return EINVAL; } if (qat_uclo_parse_num(str, ae)) { pr_err("QAT: Parse num for AE number failed\n"); return EINVAL; } if (*ae >= ICP_QAT_UCLO_MAX_AE) { pr_err("QAT: ae %d out of range\n", *ae); return EINVAL; } return 0; } static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uof_initmem *init_mem, unsigned int ae, struct icp_qat_uof_batch_init **init_tab_base) { struct icp_qat_uof_batch_init *init_header, *tail; struct icp_qat_uof_batch_init *mem_init, *tail_old; struct icp_qat_uof_memvar_attr *mem_val_attr; unsigned int i = 0; mem_val_attr = (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem + sizeof( struct icp_qat_uof_initmem)); init_header = *init_tab_base; if (!init_header) { init_header = malloc(sizeof(*init_header), M_QAT, M_WAITOK | M_ZERO); init_header->size = 1; *init_tab_base = init_header; } tail_old = init_header; while (tail_old->next) tail_old = tail_old->next; tail = tail_old; for (i = 0; i < init_mem->val_attr_num; i++) { mem_init = malloc(sizeof(*mem_init), M_QAT, M_WAITOK | M_ZERO); mem_init->ae = ae; mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte; mem_init->value = &mem_val_attr->value; mem_init->size = 4; mem_init->next = NULL; tail->next = mem_init; tail = mem_init; init_header->size += qat_hal_get_ins_num(); mem_val_attr++; } return 0; } static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uof_initmem *init_mem) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int ae; unsigned int lmem; lmem = IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev))) ? ICP_QAT_UCLO_MAX_LMEM_REG_2X : ICP_QAT_UCLO_MAX_LMEM_REG; if (qat_uclo_fetch_initmem_ae(handle, init_mem, lmem, &ae)) return EINVAL; if (qat_uclo_create_batch_init_list( handle, init_mem, ae, &obj_handle->lm_init_tab[ae])) return EINVAL; return 0; } static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uof_initmem *init_mem) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int ae, ustore_size, uaddr, i; struct icp_qat_uclo_aedata *aed; ustore_size = obj_handle->ustore_phy_size; if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae)) return EINVAL; if (qat_uclo_create_batch_init_list( handle, init_mem, ae, &obj_handle->umem_init_tab[ae])) return EINVAL; /* set the highest ustore address referenced */ uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2; aed = &obj_handle->ae_data[ae]; for (i = 0; i < aed->slice_num; i++) { if (aed->ae_slices[i].encap_image->uwords_num < uaddr) aed->ae_slices[i].encap_image->uwords_num = uaddr; } return 0; } #define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uof_initmem *init_mem) { switch (init_mem->region) { case ICP_QAT_UOF_LMEM_REGION: if (qat_uclo_init_lmem_seg(handle, init_mem)) return EINVAL; break; case ICP_QAT_UOF_UMEM_REGION: if (qat_uclo_init_umem_seg(handle, init_mem)) return EINVAL; break; default: pr_err("QAT: initmem region error. region type=0x%x\n", init_mem->region); return EINVAL; } return 0; } static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uclo_encapme *image) { unsigned int i; struct icp_qat_uclo_encap_page *page; struct icp_qat_uof_image *uof_image; unsigned char ae = 0; unsigned char neigh_ae; unsigned int ustore_size; unsigned int patt_pos; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; uint64_t *fill_data; static unsigned int init[32] = { 0 }; unsigned long ae_mask = handle->hal_handle->ae_mask; uof_image = image->img_ptr; /*if shared CS mode, the ustore size should be 2*ustore_phy_size*/ fill_data = malloc(obj_handle->ustore_phy_size * 2 * sizeof(uint64_t), M_QAT, M_WAITOK | M_ZERO); for (i = 0; i < obj_handle->ustore_phy_size * 2; i++) memcpy(&fill_data[i], &uof_image->fill_pattern, sizeof(uint64_t)); page = image->page; for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { unsigned long cfg_ae_mask = handle->cfg_ae_mask; unsigned long ae_assigned = uof_image->ae_assigned; const bool gen4 = IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev))); if (!test_bit(ae, &cfg_ae_mask)) continue; if (!test_bit(ae, &ae_assigned)) continue; if (obj_handle->ae_data[ae].shareable_ustore && (ae & 1) && !gen4) { qat_hal_get_scs_neigh_ae(ae, &neigh_ae); if (test_bit(neigh_ae, &ae_assigned)) continue; } ustore_size = obj_handle->ae_data[ae].eff_ustore_size; patt_pos = page->beg_addr_p + page->micro_words_num; if (obj_handle->ae_data[ae].shareable_ustore && !gen4) { qat_hal_get_scs_neigh_ae(ae, &neigh_ae); if (init[ae] == 0 && page->beg_addr_p != 0) { qat_hal_wr_coalesce_uwords(handle, (unsigned char)ae, 0, page->beg_addr_p, &fill_data[0]); } qat_hal_wr_coalesce_uwords( handle, (unsigned char)ae, patt_pos, ustore_size - patt_pos, &fill_data[page->beg_addr_p]); init[ae] = 1; init[neigh_ae] = 1; } else { if (gen4 && (ae % 4 != 0)) continue; qat_hal_wr_uwords(handle, (unsigned char)ae, 0, page->beg_addr_p, &fill_data[0]); qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos, ustore_size - patt_pos + 1, &fill_data[page->beg_addr_p]); } } free(fill_data, M_QAT); return 0; } static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) { int i; int ae = 0; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem; unsigned long ae_mask = handle->hal_handle->ae_mask; for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) { if (initmem->num_in_bytes) { if (qat_uclo_init_ae_memory(handle, initmem)) return EINVAL; } initmem = (struct icp_qat_uof_initmem *)((uintptr_t)((uintptr_t)initmem + sizeof(struct icp_qat_uof_initmem)) + (sizeof(struct icp_qat_uof_memvar_attr) * initmem->val_attr_num)); } for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { if (qat_hal_batch_wr_lm(handle, ae, obj_handle->lm_init_tab[ae])) { pr_err("QAT: fail to batch init lmem for AE %d\n", ae); return EINVAL; } qat_uclo_cleanup_batch_init_list(handle, &obj_handle->lm_init_tab[ae]); qat_uclo_batch_wr_umem(handle, ae, obj_handle->umem_init_tab[ae]); qat_uclo_cleanup_batch_init_list( handle, &obj_handle->umem_init_tab[ae]); } return 0; } static void * qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr, char *chunk_id, void *cur) { int i; struct icp_qat_uof_chunkhdr *chunk_hdr = (struct icp_qat_uof_chunkhdr *)((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr)); for (i = 0; i < obj_hdr->num_chunks; i++) { if ((cur < (void *)&chunk_hdr[i]) && !strncmp(chunk_hdr[i].chunk_id, chunk_id, ICP_QAT_UOF_OBJID_LEN)) { return &chunk_hdr[i]; } } return NULL; } static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch) { int i; unsigned int topbit = 1 << 0xF; unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch); reg ^= inbyte << 0x8; for (i = 0; i < 0x8; i++) { if (reg & topbit) reg = (reg << 1) ^ 0x1021; else reg <<= 1; } return reg & 0xFFFF; } static unsigned int qat_uclo_calc_str_checksum(const char *ptr, int num) { unsigned int chksum = 0; if (ptr) while (num--) chksum = qat_uclo_calc_checksum(chksum, *ptr++); return chksum; } static struct icp_qat_uclo_objhdr * qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr, char *chunk_id) { struct icp_qat_uof_filechunkhdr *file_chunk; struct icp_qat_uclo_objhdr *obj_hdr; char *chunk; int i; file_chunk = (struct icp_qat_uof_filechunkhdr *)(buf + sizeof(struct icp_qat_uof_filehdr)); for (i = 0; i < file_hdr->num_chunks; i++) { if (!strncmp(file_chunk->chunk_id, chunk_id, ICP_QAT_UOF_OBJID_LEN)) { chunk = buf + file_chunk->offset; if (file_chunk->checksum != qat_uclo_calc_str_checksum(chunk, file_chunk->size)) break; obj_hdr = malloc(sizeof(*obj_hdr), M_QAT, M_WAITOK | M_ZERO); obj_hdr->file_buff = chunk; obj_hdr->checksum = file_chunk->checksum; obj_hdr->size = file_chunk->size; return obj_hdr; } file_chunk++; } return NULL; } static unsigned int qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj, struct icp_qat_uof_image *image) { struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab; struct icp_qat_uof_objtable *neigh_reg_tab; struct icp_qat_uof_code_page *code_page; code_page = (struct icp_qat_uof_code_page *)((char *)image + sizeof(struct icp_qat_uof_image)); uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + code_page->uc_var_tab_offset); imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + code_page->imp_var_tab_offset); imp_expr_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + code_page->imp_expr_tab_offset); if (uc_var_tab->entry_num || imp_var_tab->entry_num || imp_expr_tab->entry_num) { pr_err("QAT: UOF can't contain imported variable to be parsed"); return EINVAL; } neigh_reg_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + code_page->neigh_reg_tab_offset); if (neigh_reg_tab->entry_num) { pr_err("QAT: UOF can't contain neighbor register table\n"); return EINVAL; } if (image->numpages > 1) { pr_err("QAT: UOF can't contain multiple pages\n"); return EINVAL; } if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) { pr_err("QAT: UOF can't use reloadable feature\n"); return EFAULT; } return 0; } static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj *encap_uof_obj, struct icp_qat_uof_image *img, struct icp_qat_uclo_encap_page *page) { struct icp_qat_uof_code_page *code_page; struct icp_qat_uof_code_area *code_area; struct icp_qat_uof_objtable *uword_block_tab; struct icp_qat_uof_uword_block *uwblock; int i; code_page = (struct icp_qat_uof_code_page *)((char *)img + sizeof(struct icp_qat_uof_image)); page->def_page = code_page->def_page; page->page_region = code_page->page_region; page->beg_addr_v = code_page->beg_addr_v; page->beg_addr_p = code_page->beg_addr_p; code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof + code_page->code_area_offset); page->micro_words_num = code_area->micro_words_num; uword_block_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + code_area->uword_block_tab); page->uwblock_num = uword_block_tab->entry_num; uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab + sizeof(struct icp_qat_uof_objtable)); page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock; for (i = 0; i < uword_block_tab->entry_num; i++) page->uwblock[i].micro_words = (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset; } static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, struct icp_qat_uclo_encapme *ae_uimage, int max_image) { int i, j; struct icp_qat_uof_chunkhdr *chunk_hdr = NULL; struct icp_qat_uof_image *image; struct icp_qat_uof_objtable *ae_regtab; struct icp_qat_uof_objtable *init_reg_sym_tab; struct icp_qat_uof_objtable *sbreak_tab; struct icp_qat_uof_encap_obj *encap_uof_obj = &obj_handle->encap_uof_obj; for (j = 0; j < max_image; j++) { chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, ICP_QAT_UOF_IMAG, chunk_hdr); if (!chunk_hdr) break; image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof + chunk_hdr->offset); ae_regtab = (struct icp_qat_uof_objtable *)(image->reg_tab_offset + obj_handle->obj_hdr ->file_buff); ae_uimage[j].ae_reg_num = ae_regtab->entry_num; ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)(((char *)ae_regtab) + sizeof(struct icp_qat_uof_objtable)); init_reg_sym_tab = (struct icp_qat_uof_objtable *)(image->init_reg_sym_tab + obj_handle->obj_hdr ->file_buff); ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num; ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)(((char *)init_reg_sym_tab) + sizeof(struct icp_qat_uof_objtable)); sbreak_tab = (struct icp_qat_uof_objtable *)(image->sbreak_tab + obj_handle->obj_hdr ->file_buff); ae_uimage[j].sbreak_num = sbreak_tab->entry_num; ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)(((char *)sbreak_tab) + sizeof(struct icp_qat_uof_objtable)); ae_uimage[j].img_ptr = image; if (qat_uclo_check_image_compat(encap_uof_obj, image)) goto out_err; ae_uimage[j].page = malloc(sizeof(struct icp_qat_uclo_encap_page), M_QAT, M_WAITOK | M_ZERO); qat_uclo_map_image_page(encap_uof_obj, image, ae_uimage[j].page); } return j; out_err: for (i = 0; i < j; i++) free(ae_uimage[i].page, M_QAT); return 0; } static int UcLo_checkTGroupList2X(struct icp_qat_fw_loader_handle *handle) { int i; unsigned int swAe = 0; unsigned int ii, jj; struct icp_qat_uclo_aedata *ae_data0, *ae_datax; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; for (i = 0; i < obj_handle->uimage_num; i++) { struct icp_qat_uof_image *image = obj_handle->ae_uimage[i].img_ptr; if (image->numpages > 1) { pr_err( "Only 1 page is allowed in a UOF for CPM2X; We found %d in %s\n", image->numpages, qat_uclo_get_string(&obj_handle->str_table, image->img_name)); return EINVAL; } } for (swAe = 0; (swAe < obj_handle->ae_num) && (swAe < ICP_QAT_UCLO_MAX_AE); swAe += AE_TG_NUM_CPM2X) { if (!qat_hal_check_ae_active(handle, swAe)) { continue; } for (ii = swAe; ii < (swAe + AE_TG_NUM_CPM2X); ii++) { ae_data0 = &obj_handle->ae_data[ii]; if (ae_data0->slice_num != 1) // not assigned continue; for (jj = ii + 1; jj < (swAe + AE_TG_NUM_CPM2X); jj++) { ae_datax = &obj_handle->ae_data[jj]; if (ae_datax->slice_num != 1) // not assigned continue; if (ae_data0->ae_slices[0] .encap_image->img_ptr != ae_datax->ae_slices[0] .encap_image->img_ptr) { pr_err("Only 1 list is allowed in a "); pr_err("Tgroup for CPM2X;\n"); pr_err("ME%d, %d is assigned", ii, jj); pr_err(" different list files\n"); return EINVAL; } } } } return 0; } static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae) { int i; int ae = 0; unsigned long ae_mask = handle->hal_handle->ae_mask; unsigned long cfg_ae_mask = handle->cfg_ae_mask; int mflag = 0; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; for_each_set_bit(ae, &ae_mask, max_ae) { if (!test_bit(ae, &cfg_ae_mask)) continue; for (i = 0; i < obj_handle->uimage_num; i++) { unsigned long ae_assigned = obj_handle->ae_uimage[i].img_ptr->ae_assigned; if (!test_bit(ae, &ae_assigned)) continue; mflag = 1; if (qat_uclo_init_ae_data(obj_handle, ae, i)) return EINVAL; } } if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { if (UcLo_checkTGroupList2X(handle)) { return EINVAL; } } if (!mflag) { pr_err("QAT: uimage uses AE not set"); return EINVAL; } return 0; } static struct icp_qat_uof_strtable * qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr, char *tab_name, struct icp_qat_uof_strtable *str_table) { struct icp_qat_uof_chunkhdr *chunk_hdr; chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)obj_hdr->file_buff, tab_name, NULL); if (chunk_hdr) { int hdr_size; memcpy(&str_table->table_len, obj_hdr->file_buff + chunk_hdr->offset, sizeof(str_table->table_len)); hdr_size = (char *)&str_table->strings - (char *)str_table; str_table->strings = (uintptr_t)obj_hdr->file_buff + chunk_hdr->offset + hdr_size; return str_table; } return NULL; } static void qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj, struct icp_qat_uclo_init_mem_table *init_mem_tab) { struct icp_qat_uof_chunkhdr *chunk_hdr; chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, ICP_QAT_UOF_IMEM, NULL); if (chunk_hdr) { memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof + chunk_hdr->offset, sizeof(unsigned int)); init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)(encap_uof_obj->beg_uof + chunk_hdr->offset + sizeof(unsigned int)); } } static unsigned int qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle) { switch (pci_get_device(GET_DEV(handle->accel_dev))) { case ADF_DH895XCC_PCI_DEVICE_ID: return ICP_QAT_AC_895XCC_DEV_TYPE; case ADF_C62X_PCI_DEVICE_ID: return ICP_QAT_AC_C62X_DEV_TYPE; case ADF_C3XXX_PCI_DEVICE_ID: return ICP_QAT_AC_C3XXX_DEV_TYPE; case ADF_200XX_PCI_DEVICE_ID: return ICP_QAT_AC_200XX_DEV_TYPE; case ADF_C4XXX_PCI_DEVICE_ID: return ICP_QAT_AC_C4XXX_DEV_TYPE; case ADF_4XXX_PCI_DEVICE_ID: case ADF_401XX_PCI_DEVICE_ID: return ICP_QAT_AC_4XXX_A_DEV_TYPE; default: pr_err("QAT: unsupported device 0x%x\n", pci_get_device(GET_DEV(handle->accel_dev))); return 0; } } static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle) { unsigned int maj_ver, prod_type = obj_handle->prod_type; if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) { pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n", obj_handle->encap_uof_obj.obj_hdr->ac_dev_type, prod_type); return EINVAL; } maj_ver = obj_handle->prod_rev & 0xff; if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver || obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) { pr_err("QAT: UOF maj_ver 0x%x out of range\n", maj_ver); return EINVAL; } return 0; } static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx_mask, enum icp_qat_uof_regtype reg_type, unsigned short reg_addr, unsigned int value) { switch (reg_type) { case ICP_GPA_ABS: case ICP_GPB_ABS: ctx_mask = 0; return qat_hal_init_gpr( handle, ae, ctx_mask, reg_type, reg_addr, value); case ICP_GPA_REL: case ICP_GPB_REL: return qat_hal_init_gpr( handle, ae, ctx_mask, reg_type, reg_addr, value); case ICP_SR_ABS: case ICP_DR_ABS: case ICP_SR_RD_ABS: case ICP_DR_RD_ABS: ctx_mask = 0; return qat_hal_init_rd_xfer( handle, ae, ctx_mask, reg_type, reg_addr, value); case ICP_SR_REL: case ICP_DR_REL: case ICP_SR_RD_REL: case ICP_DR_RD_REL: return qat_hal_init_rd_xfer( handle, ae, ctx_mask, reg_type, reg_addr, value); case ICP_SR_WR_ABS: case ICP_DR_WR_ABS: ctx_mask = 0; return qat_hal_init_wr_xfer( handle, ae, ctx_mask, reg_type, reg_addr, value); case ICP_SR_WR_REL: case ICP_DR_WR_REL: return qat_hal_init_wr_xfer( handle, ae, ctx_mask, reg_type, reg_addr, value); case ICP_NEIGH_REL: return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value); default: pr_err("QAT: UOF uses unsupported reg type 0x%x\n", reg_type); return EFAULT; } return 0; } static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle, unsigned int ae, struct icp_qat_uclo_encapme *encap_ae) { unsigned int i; unsigned char ctx_mask; struct icp_qat_uof_init_regsym *init_regsym; if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) == ICP_QAT_UCLO_MAX_CTX) ctx_mask = 0xff; else ctx_mask = 0x55; for (i = 0; i < encap_ae->init_regsym_num; i++) { unsigned int exp_res; init_regsym = &encap_ae->init_regsym[i]; exp_res = init_regsym->value; switch (init_regsym->init_type) { case ICP_QAT_UOF_INIT_REG: qat_uclo_init_reg(handle, ae, ctx_mask, (enum icp_qat_uof_regtype) init_regsym->reg_type, (unsigned short)init_regsym->reg_addr, exp_res); break; case ICP_QAT_UOF_INIT_REG_CTX: /* check if ctx is appropriate for the ctxMode */ if (!((1 << init_regsym->ctx) & ctx_mask)) { pr_err("QAT: invalid ctx num = 0x%x\n", init_regsym->ctx); return EINVAL; } qat_uclo_init_reg( handle, ae, (unsigned char)(1 << init_regsym->ctx), (enum icp_qat_uof_regtype)init_regsym->reg_type, (unsigned short)init_regsym->reg_addr, exp_res); break; case ICP_QAT_UOF_INIT_EXPR: pr_err("QAT: INIT_EXPR feature not supported\n"); return EINVAL; case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP: pr_err("QAT: INIT_EXPR_ENDIAN_SWAP not supported\n"); return EINVAL; default: break; } } return 0; } static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int s; unsigned int ae = 0; struct icp_qat_uclo_aedata *aed; unsigned long ae_mask = handle->hal_handle->ae_mask; if (obj_handle->global_inited) return 0; if (obj_handle->init_mem_tab.entry_num) { if (qat_uclo_init_memory(handle)) { pr_err("QAT: initialize memory failed\n"); return EINVAL; } } for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { aed = &obj_handle->ae_data[ae]; for (s = 0; s < aed->slice_num; s++) { if (!aed->ae_slices[s].encap_image) continue; if (qat_uclo_init_reg_sym( handle, ae, aed->ae_slices[s].encap_image)) return EINVAL; } } obj_handle->global_inited = 1; return 0; } static int qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uclo_objhandle *obj_handle, unsigned char ae, struct icp_qat_uof_image *uof_image) { unsigned char nn_mode; char ae_mode = 0; ae_mode = (char)ICP_QAT_CTX_MODE(uof_image->ae_mode); if (qat_hal_set_ae_ctx_mode(handle, ae, ae_mode)) { pr_err("QAT: qat_hal_set_ae_ctx_mode error\n"); return EFAULT; } ae_mode = (char)ICP_QAT_SHARED_USTORE_MODE(uof_image->ae_mode); qat_hal_set_ae_scs_mode(handle, ae, ae_mode); if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode); if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) { pr_err("QAT: qat_hal_set_ae_nn_mode error\n"); return EFAULT; } } ae_mode = (char)ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode); if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, ae_mode)) { pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n"); return EFAULT; } ae_mode = (char)ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode); if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, ae_mode)) { pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n"); return EFAULT; } if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { ae_mode = (char)ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode); if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, ae_mode)) { pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n"); return EFAULT; } ae_mode = (char)ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode); if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, ae_mode)) { pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n"); return EFAULT; } ae_mode = (char)ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode); qat_hal_set_ae_tindex_mode(handle, ae, ae_mode); } return 0; } static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle) { int error; unsigned char s; unsigned char ae = 0; struct icp_qat_uof_image *uof_image; struct icp_qat_uclo_aedata *ae_data; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned long ae_mask = handle->hal_handle->ae_mask; for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { unsigned long cfg_ae_mask = handle->cfg_ae_mask; if (!test_bit(ae, &cfg_ae_mask)) continue; ae_data = &obj_handle->ae_data[ae]; for (s = 0; s < min_t(unsigned int, ae_data->slice_num, ICP_QAT_UCLO_MAX_CTX); s++) { if (!obj_handle->ae_data[ae].ae_slices[s].encap_image) continue; uof_image = ae_data->ae_slices[s].encap_image->img_ptr; error = qat_hal_set_modes(handle, obj_handle, ae, uof_image); if (error) return error; } } return 0; } static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; struct icp_qat_uclo_encapme *image; int a; for (a = 0; a < obj_handle->uimage_num; a++) { image = &obj_handle->ae_uimage[a]; image->uwords_num = image->page->beg_addr_p + image->page->micro_words_num; } } static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int ae; obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff; obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)obj_handle->obj_hdr->file_buff; obj_handle->uword_in_bytes = 6; obj_handle->prod_type = qat_uclo_get_dev_type(handle); obj_handle->prod_rev = PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id); if (qat_uclo_check_uof_compat(obj_handle)) { pr_err("QAT: UOF incompatible\n"); return EINVAL; } obj_handle->uword_buf = malloc(UWORD_CPYBUF_SIZE * sizeof(uint64_t), M_QAT, M_WAITOK | M_ZERO); obj_handle->ustore_phy_size = (obj_handle->prod_type == ICP_QAT_AC_C4XXX_DEV_TYPE) ? 0x2000 : 0x4000; if (!obj_handle->obj_hdr->file_buff || !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT, &obj_handle->str_table)) { pr_err("QAT: UOF doesn't have effective images\n"); goto out_err; } obj_handle->uimage_num = qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage, ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX); if (!obj_handle->uimage_num) goto out_err; if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) { pr_err("QAT: Bad object\n"); goto out_check_uof_aemask_err; } qat_uclo_init_uword_num(handle); qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj, &obj_handle->init_mem_tab); if (qat_uclo_set_ae_mode(handle)) goto out_check_uof_aemask_err; return 0; out_check_uof_aemask_err: for (ae = 0; ae < obj_handle->uimage_num; ae++) free(obj_handle->ae_uimage[ae].page, M_QAT); out_err: free(obj_handle->uword_buf, M_QAT); obj_handle->uword_buf = NULL; return EFAULT; } static int qat_uclo_map_suof_file_hdr(const struct icp_qat_fw_loader_handle *handle, const struct icp_qat_suof_filehdr *suof_ptr, int suof_size) { unsigned int check_sum = 0; unsigned int min_ver_offset = 0; struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; suof_handle->file_id = ICP_QAT_SUOF_FID; suof_handle->suof_buf = (const char *)suof_ptr; suof_handle->suof_size = suof_size; min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr, min_ver); check_sum = qat_uclo_calc_str_checksum((const char *)&suof_ptr->min_ver, min_ver_offset); if (check_sum != suof_ptr->check_sum) { pr_err("QAT: incorrect SUOF checksum\n"); return EINVAL; } suof_handle->check_sum = suof_ptr->check_sum; suof_handle->min_ver = suof_ptr->min_ver; suof_handle->maj_ver = suof_ptr->maj_ver; suof_handle->fw_type = suof_ptr->fw_type; return 0; } static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle, struct icp_qat_suof_img_hdr *suof_img_hdr, struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) { struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; const struct icp_qat_simg_ae_mode *ae_mode; struct icp_qat_suof_objhdr *suof_objhdr; unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev)); suof_img_hdr->simg_buf = (suof_handle->suof_buf + suof_chunk_hdr->offset + sizeof(*suof_objhdr)); suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)(suof_handle->suof_buf + suof_chunk_hdr->offset)) ->img_length; suof_img_hdr->css_header = suof_img_hdr->simg_buf; suof_img_hdr->css_key = (suof_img_hdr->css_header + sizeof(struct icp_qat_css_hdr)); suof_img_hdr->css_signature = suof_img_hdr->css_key + ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) + ICP_QAT_CSS_FWSK_EXPONENT_LEN(device_id); suof_img_hdr->css_simg = suof_img_hdr->css_signature + ICP_QAT_CSS_SIGNATURE_LEN(device_id); ae_mode = (const struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg); suof_img_hdr->ae_mask = ae_mode->ae_mask; suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name; suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data; suof_img_hdr->fw_type = ae_mode->fw_type; } static void qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle, struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) { char **sym_str = (char **)&suof_handle->sym_str; unsigned int *sym_size = &suof_handle->sym_size; struct icp_qat_suof_strtable *str_table_obj; *sym_size = *(unsigned int *)(uintptr_t)(suof_chunk_hdr->offset + suof_handle->suof_buf); *sym_str = (char *)(uintptr_t)(suof_handle->suof_buf + suof_chunk_hdr->offset + sizeof(str_table_obj->tab_length)); } static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle, struct icp_qat_suof_img_hdr *img_hdr) { const struct icp_qat_simg_ae_mode *img_ae_mode = NULL; unsigned int prod_rev, maj_ver, prod_type; prod_type = qat_uclo_get_dev_type(handle); img_ae_mode = (const struct icp_qat_simg_ae_mode *)img_hdr->css_simg; prod_rev = PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id); if (img_ae_mode->dev_type != prod_type) { pr_err("QAT: incompatible product type %x\n", img_ae_mode->dev_type); return EINVAL; } maj_ver = prod_rev & 0xff; if (maj_ver > img_ae_mode->devmax_ver || maj_ver < img_ae_mode->devmin_ver) { pr_err("QAT: incompatible device maj_ver 0x%x\n", maj_ver); return EINVAL; } return 0; } static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle) { struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; free(sobj_handle->img_table.simg_hdr, M_QAT); sobj_handle->img_table.simg_hdr = NULL; free(handle->sobj_handle, M_QAT); handle->sobj_handle = NULL; } static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr, unsigned int img_id, unsigned int num_simgs) { struct icp_qat_suof_img_hdr img_header; if ((img_id != num_simgs - 1) && img_id != ICP_QAT_UCLO_MAX_AE) { memcpy(&img_header, &suof_img_hdr[num_simgs - 1], sizeof(*suof_img_hdr)); memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id], sizeof(*suof_img_hdr)); memcpy(&suof_img_hdr[img_id], &img_header, sizeof(*suof_img_hdr)); } } static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle, const struct icp_qat_suof_filehdr *suof_ptr, int suof_size) { struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL; struct icp_qat_suof_img_hdr *suof_img_hdr = NULL; int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE, aeMax_img = ICP_QAT_UCLO_MAX_AE; unsigned int i = 0; struct icp_qat_suof_img_hdr img_header; if (!suof_ptr || suof_size == 0) { pr_err("QAT: input parameter SUOF pointer/size is NULL\n"); return EINVAL; } if (qat_uclo_check_suof_format(suof_ptr)) return EINVAL; ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size); if (ret) return ret; suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)((uintptr_t)suof_ptr + sizeof(*suof_ptr)); qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr); suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1; if (suof_handle->img_table.num_simgs != 0) { suof_img_hdr = malloc(suof_handle->img_table.num_simgs * sizeof(img_header), M_QAT, M_WAITOK | M_ZERO); suof_handle->img_table.simg_hdr = suof_img_hdr; } for (i = 0; i < suof_handle->img_table.num_simgs; i++) { qat_uclo_map_simg(handle, &suof_img_hdr[i], &suof_chunk_hdr[1 + i]); ret = qat_uclo_check_simg_compat(handle, &suof_img_hdr[i]); if (ret) return ret; suof_img_hdr[i].ae_mask &= handle->cfg_ae_mask; if ((suof_img_hdr[i].ae_mask & 0x1) != 0) ae0_img = i; } if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { qat_uclo_tail_img(suof_img_hdr, ae0_img, suof_handle->img_table.num_simgs); } else { if (suof_handle->img_table.num_simgs == 1) return 0; qat_uclo_tail_img(suof_img_hdr, ae0_img, suof_handle->img_table.num_simgs - 1); for (i = 0; i < suof_handle->img_table.num_simgs; i++) { if ((suof_img_hdr[i].ae_mask & (0x1 << (handle->hal_handle->ae_max_num - 1))) != 0) { aeMax_img = i; break; } } qat_uclo_tail_img(suof_img_hdr, aeMax_img, suof_handle->img_table.num_simgs); } return 0; } #define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + (low)) #define BITS_IN_DWORD 32 static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle, struct icp_qat_fw_auth_desc *desc) { unsigned int fcu_sts, mem_cfg_err, retry = 0; unsigned int fcu_ctl_csr, fcu_sts_csr; unsigned int fcu_dram_hi_csr, fcu_dram_lo_csr; u64 bus_addr; bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low) - sizeof(struct icp_qat_auth_chunk); if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { fcu_ctl_csr = FCU_CONTROL_C4XXX; fcu_sts_csr = FCU_STATUS_C4XXX; fcu_dram_hi_csr = FCU_DRAM_ADDR_HI_C4XXX; fcu_dram_lo_csr = FCU_DRAM_ADDR_LO_C4XXX; } else { fcu_ctl_csr = FCU_CONTROL; fcu_sts_csr = FCU_STATUS; fcu_dram_hi_csr = FCU_DRAM_ADDR_HI; fcu_dram_lo_csr = FCU_DRAM_ADDR_LO; } SET_FCU_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD)); SET_FCU_CSR(handle, fcu_dram_lo_csr, bus_addr); SET_FCU_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH); do { pause_ms("adfstop", FW_AUTH_WAIT_PERIOD); fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr); if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL) goto auth_fail; if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1)) if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE) return 0; } while (retry++ < FW_AUTH_MAX_RETRY); auth_fail: pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n", fcu_sts & FCU_AUTH_STS_MASK, retry); if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) { mem_cfg_err = (GET_FCU_CSR(handle, FCU_STATUS1_C4XXX) & MEM_CFG_ERR_BIT); if (mem_cfg_err) pr_err("QAT: MEM_CFG_ERR\n"); } return EINVAL; } static int qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle *handle, int imgid) { struct icp_qat_suof_handle *sobj_handle; if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) return 0; sobj_handle = (struct icp_qat_suof_handle *)handle->sobj_handle; if (handle->hal_handle->admin_ae_mask & sobj_handle->img_table.simg_hdr[imgid].ae_mask) return 0; return 1; } static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle, struct icp_qat_fw_auth_desc *desc) { unsigned int i = 0; unsigned int fcuSts = 0, fcuAeBroadcastMask = 0; unsigned int retry = 0; unsigned int fcuStsCsr = 0; unsigned int fcuCtlCsr = 0; unsigned int loadedAes = 0; unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev)); if (IS_QAT_GEN4(device_id)) { fcuCtlCsr = FCU_CONTROL_4XXX; fcuStsCsr = FCU_STATUS_4XXX; } else { pr_err("Uclo_BroadcastLoadFW only applicable for CPM20\n"); return EINVAL; } for (i = 0; i < ICP_QAT_UCLO_MAX_AE; i++) { if (!test_bit(i, (unsigned long *)&handle->hal_handle->ae_mask)) continue; if (qat_hal_check_ae_active(handle, (unsigned char)i)) { pr_err( "Uclo_BroadcastLoadFW error (invalid AE status)\n"); return EINVAL; } if ((desc->ae_mask >> i) & 0x1) { fcuAeBroadcastMask |= 1 << i; } } if (fcuAeBroadcastMask) { retry = 0; SET_FCU_CSR(handle, FCU_ME_BROADCAST_MASK_TYPE, fcuAeBroadcastMask); SET_FCU_CSR(handle, fcuCtlCsr, FCU_CTRL_CMD_LOAD); do { msleep(FW_AUTH_WAIT_PERIOD); fcuSts = GET_FCU_CSR(handle, fcuStsCsr); if ((fcuSts & FCU_AUTH_STS_MASK) == FCU_STS_LOAD_FAIL) { pr_err( "Uclo_BroadcastLoadFW fail (fcu_status = 0x%x)\n", fcuSts & FCU_AUTH_STS_MASK); return EINVAL; } else if ((fcuSts & FCU_AUTH_STS_MASK) == FCU_STS_LOAD_DONE) { if (IS_QAT_GEN4(device_id)) loadedAes = GET_FCU_CSR(handle, FCU_AE_LOADED_4XXX); else loadedAes = (fcuSts >> FCU_LOADED_AE_POS); if ((loadedAes & fcuAeBroadcastMask) == fcuAeBroadcastMask) break; } else if ((fcuSts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE) { SET_FCU_CSR(handle, fcuCtlCsr, FCU_CTRL_CMD_LOAD); } } while (retry++ < FW_BROADCAST_MAX_RETRY); if (retry > FW_BROADCAST_MAX_RETRY) { pr_err( "Uclo_BroadcastLoadFW fail(fcu_status = 0x%x),retry = %d\n", fcuSts & FCU_AUTH_STS_MASK, retry); return EINVAL; } } return 0; } static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle, struct icp_firml_dram_desc *dram_desc, unsigned int size) { int ret; ret = bus_dma_mem_create(&dram_desc->dram_mem, handle->accel_dev->dma_tag, 1, BUS_SPACE_MAXADDR, size, 0); if (ret != 0) return ret; dram_desc->dram_base_addr_v = dram_desc->dram_mem.dma_vaddr; dram_desc->dram_bus_addr = dram_desc->dram_mem.dma_baddr; dram_desc->dram_size = size; return 0; } static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle, struct icp_firml_dram_desc *dram_desc) { if (handle && dram_desc && dram_desc->dram_base_addr_v) bus_dma_mem_free(&dram_desc->dram_mem); if (dram_desc) explicit_bzero(dram_desc, sizeof(*dram_desc)); } static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, const char *image, unsigned int size, struct icp_firml_dram_desc *img_desc, struct icp_qat_fw_auth_desc **desc) { const struct icp_qat_css_hdr *css_hdr = (const struct icp_qat_css_hdr *)image; struct icp_qat_fw_auth_desc *auth_desc; struct icp_qat_auth_chunk *auth_chunk; u64 virt_addr, bus_addr, virt_base; unsigned int length, simg_offset = sizeof(*auth_chunk); unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev)); + if (size <= ICP_QAT_AE_IMG_OFFSET(device_id)) { + pr_err("QAT: error, input image size too small %d\n", size); + return EINVAL; + } + if (size > (ICP_QAT_AE_IMG_OFFSET(device_id) + ICP_QAT_CSS_MAX_IMAGE_LEN)) { pr_err("QAT: error, input image size overflow %d\n", size); return EINVAL; } length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ? ICP_QAT_CSS_AE_SIMG_LEN(device_id) + simg_offset : size + ICP_QAT_CSS_FWSK_PAD_LEN(device_id) + simg_offset; if (qat_uclo_simg_alloc(handle, img_desc, length)) { pr_err("QAT: error, allocate continuous dram fail\n"); return -ENOMEM; } auth_chunk = img_desc->dram_base_addr_v; auth_chunk->chunk_size = img_desc->dram_size; auth_chunk->chunk_bus_addr = img_desc->dram_bus_addr; virt_base = (uintptr_t)img_desc->dram_base_addr_v + simg_offset; bus_addr = img_desc->dram_bus_addr + simg_offset; auth_desc = img_desc->dram_base_addr_v; auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); auth_desc->css_hdr_low = (unsigned int)bus_addr; virt_addr = virt_base; memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr)); /* pub key */ bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) + sizeof(*css_hdr); virt_addr = virt_addr + sizeof(*css_hdr); auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); auth_desc->fwsk_pub_low = (unsigned int)bus_addr; memcpy((void *)(uintptr_t)virt_addr, (const void *)(image + sizeof(*css_hdr)), ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id)); /* padding */ explicit_bzero((void *)(uintptr_t)( virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id)), ICP_QAT_CSS_FWSK_PAD_LEN(device_id)); /* exponent */ memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) + ICP_QAT_CSS_FWSK_PAD_LEN(device_id)), (const void *)(image + sizeof(*css_hdr) + ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id)), sizeof(unsigned int)); /* signature */ bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high, auth_desc->fwsk_pub_low) + ICP_QAT_CSS_FWSK_PUB_LEN(device_id); virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(device_id); auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); auth_desc->signature_low = (unsigned int)bus_addr; memcpy((void *)(uintptr_t)virt_addr, (const void *)(image + sizeof(*css_hdr) + ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) + ICP_QAT_CSS_FWSK_EXPONENT_LEN(device_id)), ICP_QAT_CSS_SIGNATURE_LEN(device_id)); bus_addr = ADD_ADDR(auth_desc->signature_high, auth_desc->signature_low) + ICP_QAT_CSS_SIGNATURE_LEN(device_id); virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(device_id); auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); auth_desc->img_low = (unsigned int)bus_addr; auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(device_id); memcpy((void *)(uintptr_t)virt_addr, (const void *)(image + ICP_QAT_AE_IMG_OFFSET(device_id)), auth_desc->img_len); virt_addr = virt_base; /* AE firmware */ if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type == CSS_AE_FIRMWARE) { auth_desc->img_ae_mode_data_high = auth_desc->img_high; auth_desc->img_ae_mode_data_low = auth_desc->img_low; bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high, auth_desc->img_ae_mode_data_low) + sizeof(struct icp_qat_simg_ae_mode); auth_desc->img_ae_init_data_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); auth_desc->img_ae_init_data_low = (unsigned int)bus_addr; bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN; auth_desc->img_ae_insts_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); auth_desc->img_ae_insts_low = (unsigned int)bus_addr; virt_addr += sizeof(struct icp_qat_css_hdr) + ICP_QAT_CSS_FWSK_PUB_LEN(device_id) + ICP_QAT_CSS_SIGNATURE_LEN(device_id); auth_desc->ae_mask = ((struct icp_qat_simg_ae_mode *)virt_addr)->ae_mask & handle->cfg_ae_mask; } else { auth_desc->img_ae_insts_high = auth_desc->img_high; auth_desc->img_ae_insts_low = auth_desc->img_low; } *desc = auth_desc; return 0; } static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle, struct icp_qat_fw_auth_desc *desc) { unsigned int i = 0; unsigned int fcu_sts; unsigned int fcu_sts_csr, fcu_ctl_csr; unsigned int loaded_aes = FCU_LOADED_AE_POS; unsigned long ae_mask = handle->hal_handle->ae_mask; if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { fcu_ctl_csr = FCU_CONTROL_C4XXX; fcu_sts_csr = FCU_STATUS_C4XXX; } else { fcu_ctl_csr = FCU_CONTROL; fcu_sts_csr = FCU_STATUS; } for_each_set_bit(i, &ae_mask, handle->hal_handle->ae_max_num) { int retry = 0; if (!((desc->ae_mask >> i) & 0x1)) continue; if (qat_hal_check_ae_active(handle, i)) { pr_err("QAT: AE %d is active\n", i); return EINVAL; } SET_FCU_CSR(handle, fcu_ctl_csr, (FCU_CTRL_CMD_LOAD | (IS_QAT_GEN4( pci_get_device(GET_DEV(handle->accel_dev))) ? (1 << FCU_CTRL_BROADCAST_POS) : 0) | (i << FCU_CTRL_AE_POS))); do { pause_ms("adfstop", FW_AUTH_WAIT_PERIOD); fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr); if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_LOAD_DONE) { loaded_aes = IS_QAT_GEN3_OR_GEN4(pci_get_device( GET_DEV(handle->accel_dev))) ? GET_FCU_CSR(handle, FCU_AE_LOADED_C4XXX) : (fcu_sts >> FCU_LOADED_AE_POS); if (loaded_aes & (1 << i)) break; } } while (retry++ < FW_AUTH_MAX_RETRY); if (retry > FW_AUTH_MAX_RETRY) { pr_err("QAT: firmware load failed timeout %x\n", retry); return EINVAL; } } return 0; } static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle, const void *addr_ptr, int mem_size) { struct icp_qat_suof_handle *suof_handle; suof_handle = malloc(sizeof(*suof_handle), M_QAT, M_WAITOK | M_ZERO); handle->sobj_handle = suof_handle; if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) { qat_uclo_del_suof(handle); pr_err("QAT: map SUOF failed\n"); return EINVAL; } return 0; } int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, const void *addr_ptr, int mem_size) { struct icp_qat_fw_auth_desc *desc = NULL; struct icp_firml_dram_desc img_desc; int status = 0; if (handle->fw_auth) { status = qat_uclo_map_auth_fw( handle, addr_ptr, mem_size, &img_desc, &desc); if (!status) status = qat_uclo_auth_fw(handle, desc); qat_uclo_simg_free(handle, &img_desc); } else { if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { device_printf( NULL, "QAT: PKE service is not allowed because "); device_printf(NULL, "MMP fw will not be loaded for "); device_printf(NULL, "device 0x%x", pci_get_device( GET_DEV(handle->accel_dev))); return status; } - if (pci_get_device(GET_DEV(handle->accel_dev)) == - ADF_C3XXX_PCI_DEVICE_ID) { - pr_err("QAT: C3XXX doesn't support unsigned MMP\n"); - return EINVAL; - } status = qat_uclo_wr_sram_by_words(handle, handle->hal_sram_offset, addr_ptr, mem_size); } return status; } static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, const void *addr_ptr, int mem_size) { struct icp_qat_uof_filehdr *filehdr; struct icp_qat_uclo_objhandle *objhdl; objhdl = malloc(sizeof(*objhdl), M_QAT, M_WAITOK | M_ZERO); objhdl->obj_buf = malloc(mem_size, M_QAT, M_WAITOK); bcopy(addr_ptr, objhdl->obj_buf, mem_size); filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf; if (qat_uclo_check_uof_format(filehdr)) goto out_objhdr_err; objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr, ICP_QAT_UOF_OBJS); if (!objhdl->obj_hdr) { pr_err("QAT: object file chunk is null\n"); goto out_objhdr_err; } handle->obj_handle = objhdl; if (qat_uclo_parse_uof_obj(handle)) goto out_overlay_obj_err; return 0; out_overlay_obj_err: handle->obj_handle = NULL; free(objhdl->obj_hdr, M_QAT); out_objhdr_err: free(objhdl->obj_buf, M_QAT); free(objhdl, M_QAT); return ENOMEM; } static int qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle, const struct icp_qat_mof_file_hdr *mof_ptr, u32 mof_size) { unsigned int checksum = 0; unsigned int min_ver_offset = 0; struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle; mobj_handle->file_id = ICP_QAT_MOF_FID; mobj_handle->mof_buf = (const char *)mof_ptr; mobj_handle->mof_size = mof_size; min_ver_offset = mof_size - offsetof(struct icp_qat_mof_file_hdr, min_ver); checksum = qat_uclo_calc_str_checksum((const char *)&mof_ptr->min_ver, min_ver_offset); if (checksum != mof_ptr->checksum) { pr_err("QAT: incorrect MOF checksum\n"); return EINVAL; } mobj_handle->checksum = mof_ptr->checksum; mobj_handle->min_ver = mof_ptr->min_ver; mobj_handle->maj_ver = mof_ptr->maj_ver; return 0; } void qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle) { struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle; free(mobj_handle->obj_table.obj_hdr, M_QAT); mobj_handle->obj_table.obj_hdr = NULL; free(handle->mobj_handle, M_QAT); handle->mobj_handle = NULL; } static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle, const char *obj_name, const char **obj_ptr, unsigned int *obj_size) { unsigned int i; struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr; for (i = 0; i < mobj_handle->obj_table.num_objs; i++) { if (!strncmp(obj_hdr[i].obj_name, obj_name, ICP_QAT_SUOF_OBJ_NAME_LEN)) { *obj_ptr = obj_hdr[i].obj_buf; *obj_size = obj_hdr[i].obj_size; break; } } if (i >= mobj_handle->obj_table.num_objs) { pr_err("QAT: object %s is not found inside MOF\n", obj_name); return EFAULT; } return 0; } static int qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle, struct icp_qat_mof_objhdr *mobj_hdr, struct icp_qat_mof_obj_chunkhdr *obj_chunkhdr) { if ((strncmp((char *)obj_chunkhdr->chunk_id, ICP_QAT_UOF_IMAG, ICP_QAT_MOF_OBJ_CHUNKID_LEN)) == 0) { mobj_hdr->obj_buf = (const char *)((unsigned long)obj_chunkhdr->offset + mobj_handle->uobjs_hdr); } else if ((strncmp((char *)(obj_chunkhdr->chunk_id), ICP_QAT_SUOF_IMAG, ICP_QAT_MOF_OBJ_CHUNKID_LEN)) == 0) { mobj_hdr->obj_buf = (const char *)((unsigned long)obj_chunkhdr->offset + mobj_handle->sobjs_hdr); } else { pr_err("QAT: unsupported chunk id\n"); return EINVAL; } mobj_hdr->obj_size = (unsigned int)obj_chunkhdr->size; mobj_hdr->obj_name = (char *)(obj_chunkhdr->name + mobj_handle->sym_str); return 0; } static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle) { struct icp_qat_mof_objhdr *mof_obj_hdr; const struct icp_qat_mof_obj_hdr *uobj_hdr; const struct icp_qat_mof_obj_hdr *sobj_hdr; struct icp_qat_mof_obj_chunkhdr *uobj_chunkhdr; struct icp_qat_mof_obj_chunkhdr *sobj_chunkhdr; unsigned int uobj_chunk_num = 0, sobj_chunk_num = 0; unsigned int *valid_chunks = 0; int ret, i; uobj_hdr = (const struct icp_qat_mof_obj_hdr *)mobj_handle->uobjs_hdr; sobj_hdr = (const struct icp_qat_mof_obj_hdr *)mobj_handle->sobjs_hdr; if (uobj_hdr) uobj_chunk_num = uobj_hdr->num_chunks; if (sobj_hdr) sobj_chunk_num = sobj_hdr->num_chunks; mof_obj_hdr = (struct icp_qat_mof_objhdr *) malloc((uobj_chunk_num + sobj_chunk_num) * sizeof(*mof_obj_hdr), M_QAT, M_WAITOK | M_ZERO); mobj_handle->obj_table.obj_hdr = mof_obj_hdr; valid_chunks = &mobj_handle->obj_table.num_objs; uobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)((uintptr_t)uobj_hdr + sizeof(*uobj_hdr)); sobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)((uintptr_t)sobj_hdr + sizeof(*sobj_hdr)); /* map uof objects */ for (i = 0; i < uobj_chunk_num; i++) { ret = qat_uclo_map_obj_from_mof(mobj_handle, &mof_obj_hdr[*valid_chunks], &uobj_chunkhdr[i]); if (ret) return ret; (*valid_chunks)++; } /* map suof objects */ for (i = 0; i < sobj_chunk_num; i++) { ret = qat_uclo_map_obj_from_mof(mobj_handle, &mof_obj_hdr[*valid_chunks], &sobj_chunkhdr[i]); if (ret) return ret; (*valid_chunks)++; } if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunks) { pr_err("QAT: inconsistent UOF/SUOF chunk amount\n"); return EINVAL; } return 0; } static void qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle *mobj_handle, struct icp_qat_mof_chunkhdr *mof_chunkhdr) { char **sym_str = (char **)&mobj_handle->sym_str; unsigned int *sym_size = &mobj_handle->sym_size; struct icp_qat_mof_str_table *str_table_obj; *sym_size = *(unsigned int *)(uintptr_t)(mof_chunkhdr->offset + mobj_handle->mof_buf); *sym_str = (char *)(uintptr_t)(mobj_handle->mof_buf + mof_chunkhdr->offset + sizeof(str_table_obj->tab_len)); } static void qat_uclo_map_mof_chunk(struct icp_qat_mof_handle *mobj_handle, struct icp_qat_mof_chunkhdr *mof_chunkhdr) { if (!strncmp(mof_chunkhdr->chunk_id, ICP_QAT_MOF_SYM_OBJS, ICP_QAT_MOF_OBJ_ID_LEN)) qat_uclo_map_mof_symobjs(mobj_handle, mof_chunkhdr); else if (!strncmp(mof_chunkhdr->chunk_id, ICP_QAT_UOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN)) mobj_handle->uobjs_hdr = mobj_handle->mof_buf + (unsigned long)mof_chunkhdr->offset; else if (!strncmp(mof_chunkhdr->chunk_id, ICP_QAT_SUOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN)) mobj_handle->sobjs_hdr = mobj_handle->mof_buf + (unsigned long)mof_chunkhdr->offset; } static int qat_uclo_check_mof_format(const struct icp_qat_mof_file_hdr *mof_hdr) { int maj = mof_hdr->maj_ver & 0xff; int min = mof_hdr->min_ver & 0xff; if (mof_hdr->file_id != ICP_QAT_MOF_FID) { pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id); return EINVAL; } if (mof_hdr->num_chunks <= 0x1) { pr_err("QAT: MOF chunk amount is incorrect\n"); return EINVAL; } if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) { pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n", maj, min); return EINVAL; } return 0; } static int qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle, const struct icp_qat_mof_file_hdr *mof_ptr, u32 mof_size, const char *obj_name, const char **obj_ptr, unsigned int *obj_size) { struct icp_qat_mof_handle *mobj_handle; struct icp_qat_mof_chunkhdr *mof_chunkhdr; unsigned short chunks_num; int ret; unsigned int i; if (mof_ptr->file_id == ICP_QAT_UOF_FID || mof_ptr->file_id == ICP_QAT_SUOF_FID) { if (obj_ptr) *obj_ptr = (const char *)mof_ptr; if (obj_size) *obj_size = (unsigned int)mof_size; return 0; } if (qat_uclo_check_mof_format(mof_ptr)) return EINVAL; mobj_handle = malloc(sizeof(*mobj_handle), M_QAT, M_WAITOK | M_ZERO); handle->mobj_handle = mobj_handle; ret = qat_uclo_map_mof_file_hdr(handle, mof_ptr, mof_size); if (ret) return ret; mof_chunkhdr = (struct icp_qat_mof_chunkhdr *)((uintptr_t)mof_ptr + sizeof(*mof_ptr)); chunks_num = mof_ptr->num_chunks; /*Parse MOF file chunks*/ for (i = 0; i < chunks_num; i++) qat_uclo_map_mof_chunk(mobj_handle, &mof_chunkhdr[i]); /*All sym_objs uobjs and sobjs should be available*/ if (!mobj_handle->sym_str || (!mobj_handle->uobjs_hdr && !mobj_handle->sobjs_hdr)) return EINVAL; ret = qat_uclo_map_objs_from_mof(mobj_handle); if (ret) return ret; /*Seek specified uof object in MOF*/ ret = qat_uclo_seek_obj_inside_mof(mobj_handle, obj_name, obj_ptr, obj_size); if (ret) return ret; return 0; } int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, const void *addr_ptr, u32 mem_size, const char *obj_name) { const char *obj_addr; u32 obj_size; int ret; BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE > (sizeof(handle->hal_handle->ae_mask) * 8)); if (!handle || !addr_ptr || mem_size < 24) return EINVAL; if (obj_name) { ret = qat_uclo_map_mof_obj( handle, addr_ptr, mem_size, obj_name, &obj_addr, &obj_size); if (ret) return ret; } else { obj_addr = addr_ptr; obj_size = mem_size; } return (handle->fw_auth) ? qat_uclo_map_suof_obj(handle, obj_addr, obj_size) : qat_uclo_map_uof_obj(handle, obj_addr, obj_size); } void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int a; unsigned long ae_mask = handle->hal_handle->ae_mask; if (handle->mobj_handle) qat_uclo_del_mof(handle); if (handle->sobj_handle) qat_uclo_del_suof(handle); if (!obj_handle) return; free(obj_handle->uword_buf, M_QAT); for (a = 0; a < obj_handle->uimage_num; a++) free(obj_handle->ae_uimage[a].page, M_QAT); for_each_set_bit(a, &ae_mask, handle->hal_handle->ae_max_num) { qat_uclo_free_ae_data(&obj_handle->ae_data[a]); } free(obj_handle->obj_hdr, M_QAT); free(obj_handle->obj_buf, M_QAT); free(obj_handle, M_QAT); handle->obj_handle = NULL; } static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle, struct icp_qat_uclo_encap_page *encap_page, uint64_t *uword, unsigned int addr_p, unsigned int raddr, uint64_t fill) { uint64_t uwrd = 0; unsigned int i, addr; if (!encap_page) { *uword = fill; return; } addr = (encap_page->page_region) ? raddr : addr_p; for (i = 0; i < encap_page->uwblock_num; i++) { if (addr >= encap_page->uwblock[i].start_addr && addr <= encap_page->uwblock[i].start_addr + encap_page->uwblock[i].words_num - 1) { addr -= encap_page->uwblock[i].start_addr; addr *= obj_handle->uword_in_bytes; memcpy(&uwrd, (void *)(((uintptr_t)encap_page->uwblock[i] .micro_words) + addr), obj_handle->uword_in_bytes); uwrd = uwrd & 0xbffffffffffull; } } *uword = uwrd; if (*uword == INVLD_UWORD) *uword = fill; } static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uclo_encap_page *encap_page, unsigned int ae) { unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; uint64_t fill_pat; /* load the page starting at appropriate ustore address */ /* get fill-pattern from an image -- they are all the same */ memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern, sizeof(uint64_t)); uw_physical_addr = encap_page->beg_addr_p; uw_relative_addr = 0; words_num = encap_page->micro_words_num; while (words_num) { if (words_num < UWORD_CPYBUF_SIZE) cpylen = words_num; else cpylen = UWORD_CPYBUF_SIZE; /* load the buffer */ for (i = 0; i < cpylen; i++) qat_uclo_fill_uwords(obj_handle, encap_page, &obj_handle->uword_buf[i], uw_physical_addr + i, uw_relative_addr + i, fill_pat); if (obj_handle->ae_data[ae].shareable_ustore && !IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) /* copy the buffer to ustore */ qat_hal_wr_coalesce_uwords(handle, (unsigned char)ae, uw_physical_addr, cpylen, obj_handle->uword_buf); else /* copy the buffer to ustore */ qat_hal_wr_uwords(handle, (unsigned char)ae, uw_physical_addr, cpylen, obj_handle->uword_buf); uw_physical_addr += cpylen; uw_relative_addr += cpylen; words_num -= cpylen; } } static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uof_image *image) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int ctx_mask, s; struct icp_qat_uclo_page *page; unsigned char ae = 0; int ctx; struct icp_qat_uclo_aedata *aed; unsigned long ae_mask = handle->hal_handle->ae_mask; if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX) ctx_mask = 0xff; else ctx_mask = 0x55; /* load the default page and set assigned CTX PC * to the entrypoint address */ for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { unsigned long cfg_ae_mask = handle->cfg_ae_mask; unsigned long ae_assigned = image->ae_assigned; if (!test_bit(ae, &cfg_ae_mask)) continue; if (!test_bit(ae, &ae_assigned)) continue; aed = &obj_handle->ae_data[ae]; /* find the slice to which this image is assigned */ for (s = 0; s < aed->slice_num; s++) { if (image->ctx_assigned & aed->ae_slices[s].ctx_mask_assigned) break; } if (s >= aed->slice_num) continue; page = aed->ae_slices[s].page; if (!page->encap_page->def_page) continue; qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae); page = aed->ae_slices[s].page; for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) aed->ae_slices[s].cur_page[ctx] = (ctx_mask & (1 << ctx)) ? page : NULL; qat_hal_set_live_ctx(handle, (unsigned char)ae, image->ctx_assigned); qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned, image->entry_address); } } static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle) { unsigned int i; struct icp_qat_fw_auth_desc *desc = NULL; struct icp_firml_dram_desc img_desc; struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr; for (i = 0; i < sobj_handle->img_table.num_simgs; i++) { if (qat_uclo_map_auth_fw(handle, (const char *)simg_hdr[i].simg_buf, (unsigned int)(simg_hdr[i].simg_len), &img_desc, &desc)) goto wr_err; if (qat_uclo_auth_fw(handle, desc)) goto wr_err; if (qat_uclo_is_broadcast(handle, i)) { if (qat_uclo_broadcast_load_fw(handle, desc)) goto wr_err; } else { if (qat_uclo_load_fw(handle, desc)) goto wr_err; } qat_uclo_simg_free(handle, &img_desc); } return 0; wr_err: qat_uclo_simg_free(handle, &img_desc); return -EINVAL; } static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int i; if (qat_uclo_init_globals(handle)) return EINVAL; for (i = 0; i < obj_handle->uimage_num; i++) { if (!obj_handle->ae_uimage[i].img_ptr) return EINVAL; if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i])) return EINVAL; qat_uclo_wr_uimage_page(handle, obj_handle->ae_uimage[i].img_ptr); } return 0; } int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) { return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) : qat_uclo_wr_uof_img(handle); } int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle, unsigned int cfg_ae_mask) { if (!cfg_ae_mask) return EINVAL; handle->cfg_ae_mask = cfg_ae_mask; return 0; } diff --git a/sys/dev/qat/qat_hw/qat_200xx/adf_200xx_hw_data.h b/sys/dev/qat/qat_hw/qat_200xx/adf_200xx_hw_data.h index 67560a7a7d1c..a7324509bea7 100644 --- a/sys/dev/qat/qat_hw/qat_200xx/adf_200xx_hw_data.h +++ b/sys/dev/qat/qat_hw/qat_200xx/adf_200xx_hw_data.h @@ -1,127 +1,127 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #ifndef ADF_200XX_HW_DATA_H_ #define ADF_200XX_HW_DATA_H_ /* PCIe configuration space */ -#define ADF_200XX_PMISC_BAR 0 -#define ADF_200XX_ETR_BAR 1 +#define ADF_200XX_PMISC_BAR 1 +#define ADF_200XX_ETR_BAR 2 #define ADF_200XX_RX_RINGS_OFFSET 8 #define ADF_200XX_TX_RINGS_MASK 0xFF #define ADF_200XX_MAX_ACCELERATORS 3 #define ADF_200XX_MAX_ACCELENGINES 6 #define ADF_200XX_ACCELERATORS_REG_OFFSET 16 #define ADF_200XX_ACCELERATORS_MASK 0x7 #define ADF_200XX_ACCELENGINES_MASK 0x3F #define ADF_200XX_ETR_MAX_BANKS 16 #define ADF_200XX_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) #define ADF_200XX_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) #define ADF_200XX_SMIA0_MASK 0xFFFF #define ADF_200XX_SMIA1_MASK 0x1 #define ADF_200XX_SOFTSTRAP_CSR_OFFSET 0x2EC #define ADF_200XX_POWERGATE_PKE BIT(24) #define ADF_200XX_POWERGATE_CY BIT(23) #define ADF_200XX_PFIEERRUNCSTSR 0x280 /* Error detection and correction */ #define ADF_200XX_AE_CTX_ENABLES(i) ((i)*0x1000 + 0x20818) #define ADF_200XX_AE_MISC_CONTROL(i) ((i)*0x1000 + 0x20960) #define ADF_200XX_ENABLE_AE_ECC_ERR BIT(28) #define ADF_200XX_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12)) #define ADF_200XX_UERRSSMSH(i) (i * 0x4000 + 0x18) #define ADF_200XX_CERRSSMSH(i) (i * 0x4000 + 0x10) #define ADF_200XX_ERRSSMSH_EN BIT(3) #define ADF_200XX_ERRSOU3 (0x3A000 + 0x0C) #define ADF_200XX_ERRSOU5 (0x3A000 + 0xD8) /* BIT(2) enables the logging of push/pull data errors. */ #define ADF_200XX_PPERR_EN (BIT(2)) /* Mask for VF2PF interrupts */ #define ADF_200XX_VF2PF1_16 (0xFFFF << 9) #define ADF_200XX_ERRSOU3_VF2PF(errsou3) (((errsou3)&0x01FFFE00) >> 9) #define ADF_200XX_ERRMSK3_VF2PF(vf_mask) (((vf_mask)&0xFFFF) << 9) /* Masks for correctable error interrupts. */ #define ADF_200XX_ERRMSK0_CERR (BIT(24) | BIT(16) | BIT(8) | BIT(0)) #define ADF_200XX_ERRMSK1_CERR (BIT(8) | BIT(0)) #define ADF_200XX_ERRMSK5_CERR (0) /* Masks for uncorrectable error interrupts. */ #define ADF_200XX_ERRMSK0_UERR (BIT(25) | BIT(17) | BIT(9) | BIT(1)) #define ADF_200XX_ERRMSK1_UERR (BIT(9) | BIT(1)) #define ADF_200XX_ERRMSK3_UERR \ (BIT(6) | BIT(5) | BIT(4) | BIT(3) | BIT(2) | BIT(0)) #define ADF_200XX_ERRMSK5_UERR (BIT(16)) /* RI CPP control */ #define ADF_200XX_RICPPINTCTL (0x3A000 + 0x110) /* * BIT(2) enables error detection and reporting on the RI Parity Error. * BIT(1) enables error detection and reporting on the RI CPP Pull interface. * BIT(0) enables error detection and reporting on the RI CPP Push interface. */ #define ADF_200XX_RICPP_EN (BIT(2) | BIT(1) | BIT(0)) /* TI CPP control */ #define ADF_200XX_TICPPINTCTL (0x3A400 + 0x138) /* * BIT(3) enables error detection and reporting on the ETR Parity Error. * BIT(2) enables error detection and reporting on the TI Parity Error. * BIT(1) enables error detection and reporting on the TI CPP Pull interface. * BIT(0) enables error detection and reporting on the TI CPP Push interface. */ #define ADF_200XX_TICPP_EN (BIT(3) | BIT(2) | BIT(1) | BIT(0)) /* CFC Uncorrectable Errors */ #define ADF_200XX_CPP_CFC_ERR_CTRL (0x30000 + 0xC00) /* * BIT(1) enables interrupt. * BIT(0) enables detecting and logging of push/pull data errors. */ #define ADF_200XX_CPP_CFC_UE (BIT(1) | BIT(0)) #define ADF_200XX_SLICEPWRDOWN(i) ((i)*0x4000 + 0x2C) /* Enabling PKE4-PKE0. */ #define ADF_200XX_MMP_PWR_UP_MSK \ (BIT(20) | BIT(19) | BIT(18) | BIT(17) | BIT(16)) /* CPM Uncorrectable Errors */ #define ADF_200XX_INTMASKSSM(i) ((i)*0x4000 + 0x0) /* Disabling interrupts for correctable errors. */ #define ADF_200XX_INTMASKSSM_UERR \ (BIT(11) | BIT(9) | BIT(7) | BIT(5) | BIT(3) | BIT(1)) /* MMP */ /* BIT(3) enables correction. */ #define ADF_200XX_CERRSSMMMP_EN (BIT(3)) /* BIT(3) enables logging. */ #define ADF_200XX_UERRSSMMMP_EN (BIT(3)) #define ADF_200XX_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i)*0x04)) #define ADF_200XX_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i)*0x04)) /* Arbiter configuration */ #define ADF_200XX_ARB_OFFSET 0x30000 #define ADF_200XX_ARB_WRK_2_SER_MAP_OFFSET 0x180 #define ADF_200XX_ARB_WQCFG_OFFSET 0x100 /* Admin Interface Reg Offset */ #define ADF_200XX_ADMINMSGUR_OFFSET (0x3A000 + 0x574) #define ADF_200XX_ADMINMSGLR_OFFSET (0x3A000 + 0x578) #define ADF_200XX_MAILBOX_BASE_OFFSET 0x20970 /* Firmware Binary */ #define ADF_200XX_FW "qat_200xx_fw" #define ADF_200XX_MMP "qat_200xx_mmp_fw" void adf_init_hw_data_200xx(struct adf_hw_device_data *hw_data); void adf_clean_hw_data_200xx(struct adf_hw_device_data *hw_data); #define ADF_200XX_AE_FREQ (685 * 1000000) #define ADF_200XX_MIN_AE_FREQ (333 * 1000000) #define ADF_200XX_MAX_AE_FREQ (685 * 1000000) #endif diff --git a/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c b/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c index 9a84ad652282..d730efd5952b 100644 --- a/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c +++ b/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c @@ -1,1039 +1,1042 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2025 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include "adf_4xxx_hw_data.h" #include "adf_heartbeat.h" #include "icp_qat_fw_init_admin.h" #include "icp_qat_hw.h" #define ADF_CONST_TABLE_SIZE 1024 struct adf_fw_config { u32 ae_mask; char *obj_name; }; /* Accel unit information */ static const struct adf_accel_unit adf_4xxx_au_a_ae[] = { { 0x1, 0x1, 0xF, 0x1B, 4, ADF_ACCEL_SERVICE_NULL }, { 0x2, 0x1, 0xF0, 0x6C0, 4, ADF_ACCEL_SERVICE_NULL }, { 0x4, 0x1, 0x100, 0xF000, 1, ADF_ACCEL_ADMIN }, }; /* Worker thread to service arbiter mappings */ static u32 thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = { 0x5555555, 0x5555555, 0x5555555, 0x5555555, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0x0 }; /* Masks representing ME thread-service mappings. * Thread 7 carries out Admin work and is thus * left out. */ static u8 default_active_thd_mask = 0x7F; static u8 dc_me_active_thd_mask = 0x03; static u32 thrd_to_arb_map_gen[ADF_4XXX_MAX_ACCELENGINES] = { 0 }; #define ADF_4XXX_ASYM_SYM \ (ASYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_4XXX_DC \ (COMP | COMP << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_4XXX_SYM \ (SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ SYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_4XXX_ASYM \ (ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ ASYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_4XXX_ASYM_DC \ (ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_4XXX_SYM_DC \ (SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_4XXX_NA \ (NA | NA << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ NA << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_4XXX_DEFAULT_RING_TO_SRV_MAP ADF_4XXX_ASYM_SYM struct adf_enabled_services { const char svcs_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; u16 rng_to_svc_msk; }; static struct adf_enabled_services adf_4xxx_svcs[] = { { "dc", ADF_4XXX_DC }, { "sym", ADF_4XXX_SYM }, { "asym", ADF_4XXX_ASYM }, { "dc;asym", ADF_4XXX_ASYM_DC }, { "asym;dc", ADF_4XXX_ASYM_DC }, { "sym;dc", ADF_4XXX_SYM_DC }, { "dc;sym", ADF_4XXX_SYM_DC }, { "asym;sym", ADF_4XXX_ASYM_SYM }, { "sym;asym", ADF_4XXX_ASYM_SYM }, { "cy", ADF_4XXX_ASYM_SYM } }; static struct adf_hw_device_class adf_4xxx_class = { .name = ADF_4XXX_DEVICE_NAME, .type = DEV_4XXX, .instances = 0, }; static u32 get_accel_mask(struct adf_accel_dev *accel_dev) { return ADF_4XXX_ACCELERATORS_MASK; } static u32 get_ae_mask(struct adf_accel_dev *accel_dev) { u32 fusectl4 = accel_dev->hw_device->fuses; return ~fusectl4 & ADF_4XXX_ACCELENGINES_MASK; } static void adf_set_asym_rings_mask(struct adf_accel_dev *accel_dev) { accel_dev->hw_device->asym_rings_mask = ADF_4XXX_DEF_ASYM_MASK; } static int get_ring_to_svc_map(struct adf_accel_dev *accel_dev, u16 *ring_to_svc_map) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; u32 i = 0; *ring_to_svc_map = 0; /* Get the services enabled by user */ snprintf(key, sizeof(key), ADF_SERVICES_ENABLED); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) return EFAULT; for (i = 0; i < ARRAY_SIZE(adf_4xxx_svcs); i++) { if (!strncmp(val, adf_4xxx_svcs[i].svcs_enabled, ADF_CFG_MAX_KEY_LEN_IN_BYTES)) { *ring_to_svc_map = adf_4xxx_svcs[i].rng_to_svc_msk; return 0; } } device_printf(GET_DEV(accel_dev), "Invalid services enabled: %s\n", val); return EFAULT; } static u32 get_num_accels(struct adf_hw_device_data *self) { return ADF_4XXX_MAX_ACCELERATORS; } static u32 get_num_aes(struct adf_hw_device_data *self) { if (!self || !self->ae_mask) return 0; return hweight32(self->ae_mask); } static u32 get_misc_bar_id(struct adf_hw_device_data *self) { return ADF_4XXX_PMISC_BAR; } static u32 get_etr_bar_id(struct adf_hw_device_data *self) { return ADF_4XXX_ETR_BAR; } static u32 get_sram_bar_id(struct adf_hw_device_data *self) { return ADF_4XXX_SRAM_BAR; } /* * The vector routing table is used to select the MSI-X entry to use for each * interrupt source. * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts. * The final entry corresponds to VF2PF or error interrupts. * This vector table could be used to configure one MSI-X entry to be shared * between multiple interrupt sources. * * The default routing is set to have a one to one correspondence between the * interrupt source and the MSI-X entry used. */ static void set_msix_default_rttable(struct adf_accel_dev *accel_dev) { struct resource *csr; int i; csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++) ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i); } static u32 adf_4xxx_get_hw_cap(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 fusectl1; u32 capabilities_sym, capabilities_sym_cipher, capabilities_sym_auth, capabilities_asym, capabilities_dc, capabilities_other; capabilities_other = ICP_ACCEL_CAPABILITIES_RL; /* Read accelerator capabilities mask */ fusectl1 = pci_read_config(pdev, ADF_4XXX_FUSECTL1_OFFSET, 4); capabilities_sym_cipher = ICP_ACCEL_CAPABILITIES_HKDF | ICP_ACCEL_CAPABILITIES_SM4 | ICP_ACCEL_CAPABILITIES_CHACHA_POLY | ICP_ACCEL_CAPABILITIES_AESGCM_SPC | ICP_ACCEL_CAPABILITIES_AES_V2; capabilities_sym_auth = ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SHA3 | ICP_ACCEL_CAPABILITIES_SHA3_EXT; /* A set bit in fusectl1 means the feature is OFF in this SKU */ if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) { capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_HKDF; capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_SM4; } if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) { capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY; capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC; capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_AES_V2; } if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) { capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SM3; capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SHA3; capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT; } if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) { capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_SM4; capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SM3; } if (capabilities_sym_cipher) capabilities_sym_cipher |= ICP_ACCEL_CAPABILITIES_CIPHER; if (capabilities_sym_auth) capabilities_sym_auth |= ICP_ACCEL_CAPABILITIES_AUTHENTICATION; capabilities_sym = capabilities_sym_cipher | capabilities_sym_auth; if (capabilities_sym) capabilities_sym |= ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | ICP_ACCEL_CAPABILITIES_SM2 | ICP_ACCEL_CAPABILITIES_ECEDMONT; if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) { capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2; capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT; } capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION | ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION | ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION | ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) { capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION; capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION; capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; } return capabilities_sym | capabilities_dc | capabilities_asym | capabilities_other; } static u32 get_hb_clock(struct adf_hw_device_data *self) { /* * 4XXX uses KPT counter for HB */ return ADF_4XXX_KPT_COUNTER_FREQ; } static u32 get_ae_clock(struct adf_hw_device_data *self) { /* * Clock update interval is <16> ticks for qat_4xxx. */ return self->clock_frequency / 16; } static int measure_clock(struct adf_accel_dev *accel_dev) { u32 frequency; int ret = 0; ret = adf_dev_measure_clock(accel_dev, &frequency, ADF_4XXX_MIN_AE_FREQ, ADF_4XXX_MAX_AE_FREQ); if (ret) return ret; accel_dev->hw_device->clock_frequency = frequency; return 0; } static int adf_4xxx_configure_accel_units(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES] = { 0 }; char val_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; if (adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC)) goto err; snprintf(key, sizeof(key), ADF_SERVICES_ENABLED); snprintf(val_str, sizeof(val_str), ADF_CFG_ASYM ADF_SERVICES_SEPARATOR ADF_CFG_SYM); if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)val_str, ADF_STR)) goto err; return 0; err: device_printf(GET_DEV(accel_dev), "Failed to configure accel units\n"); return EINVAL; } static u32 get_num_accel_units(struct adf_hw_device_data *self) { return ADF_4XXX_MAX_ACCELUNITS; } static void get_accel_unit(struct adf_hw_device_data *self, struct adf_accel_unit **accel_unit) { memcpy(*accel_unit, adf_4xxx_au_a_ae, sizeof(adf_4xxx_au_a_ae)); } static void adf_exit_accel_unit_services(struct adf_accel_dev *accel_dev) { if (accel_dev->au_info) { kfree(accel_dev->au_info->au); accel_dev->au_info->au = NULL; kfree(accel_dev->au_info); accel_dev->au_info = NULL; } } static int get_accel_unit_config(struct adf_accel_dev *accel_dev, u8 *num_sym_au, u8 *num_dc_au, u8 *num_asym_au) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; u32 num_au = hw_data->get_num_accel_units(hw_data); /* One AU will be allocated by default if a service enabled */ u32 alloc_au = 1; /* There's always one AU that is used for Admin AE */ u32 service_mask = ADF_ACCEL_ADMIN; char *token, *cur_str; u32 disabled_caps = 0; /* Get the services enabled by user */ snprintf(key, sizeof(key), ADF_SERVICES_ENABLED); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) return EFAULT; cur_str = val; token = strsep(&cur_str, ADF_SERVICES_SEPARATOR); while (token) { if (!strncmp(token, ADF_CFG_SYM, strlen(ADF_CFG_SYM))) service_mask |= ADF_ACCEL_CRYPTO; if (!strncmp(token, ADF_CFG_ASYM, strlen(ADF_CFG_ASYM))) service_mask |= ADF_ACCEL_ASYM; /* cy means both asym & crypto should be enabled * Hardware resources allocation check will be done later */ if (!strncmp(token, ADF_CFG_CY, strlen(ADF_CFG_CY))) service_mask |= ADF_ACCEL_ASYM | ADF_ACCEL_CRYPTO; if (!strncmp(token, ADF_SERVICE_DC, strlen(ADF_SERVICE_DC))) service_mask |= ADF_ACCEL_COMPRESSION; token = strsep(&cur_str, ADF_SERVICES_SEPARATOR); } /* Ensure the user won't enable more services than it can support */ if (hweight32(service_mask) > num_au) { device_printf(GET_DEV(accel_dev), "Can't enable more services than "); device_printf(GET_DEV(accel_dev), "%d!\n", num_au); return EFAULT; } else if (hweight32(service_mask) == 2) { /* Due to limitation, besides AU for Admin AE * only 2 more AUs can be allocated */ alloc_au = 2; } if (service_mask & ADF_ACCEL_CRYPTO) *num_sym_au = alloc_au; if (service_mask & ADF_ACCEL_ASYM) *num_asym_au = alloc_au; if (service_mask & ADF_ACCEL_COMPRESSION) *num_dc_au = alloc_au; /*update capability*/ if (!*num_sym_au || !(service_mask & ADF_ACCEL_CRYPTO)) { disabled_caps = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | ICP_ACCEL_CAPABILITIES_CIPHER | ICP_ACCEL_CAPABILITIES_SHA3 | ICP_ACCEL_CAPABILITIES_SHA3_EXT | ICP_ACCEL_CAPABILITIES_HKDF | ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 | ICP_ACCEL_CAPABILITIES_CHACHA_POLY | ICP_ACCEL_CAPABILITIES_AESGCM_SPC | ICP_ACCEL_CAPABILITIES_AES_V2 | ICP_ACCEL_CAPABILITIES_AUTHENTICATION; } if (!*num_asym_au || !(service_mask & ADF_ACCEL_ASYM)) { disabled_caps |= ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | ICP_ACCEL_CAPABILITIES_ECEDMONT; } if (!*num_dc_au || !(service_mask & ADF_ACCEL_COMPRESSION)) { disabled_caps |= ICP_ACCEL_CAPABILITIES_COMPRESSION | ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION | ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION | ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; accel_dev->hw_device->extended_dc_capabilities = 0; } accel_dev->hw_device->accel_capabilities_mask = adf_4xxx_get_hw_cap(accel_dev) & ~disabled_caps; hw_data->service_mask = service_mask; hw_data->service_to_load_mask = service_mask; return 0; } static int adf_init_accel_unit_services(struct adf_accel_dev *accel_dev) { u8 num_sym_au = 0, num_dc_au = 0, num_asym_au = 0; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 num_au = hw_data->get_num_accel_units(hw_data); u32 au_size = num_au * sizeof(struct adf_accel_unit); u8 i; if (get_accel_unit_config( accel_dev, &num_sym_au, &num_dc_au, &num_asym_au)) return EFAULT; accel_dev->au_info = kzalloc(sizeof(*accel_dev->au_info), GFP_KERNEL); if (!accel_dev->au_info) return ENOMEM; accel_dev->au_info->au = kzalloc(au_size, GFP_KERNEL); if (!accel_dev->au_info->au) { kfree(accel_dev->au_info); accel_dev->au_info = NULL; return ENOMEM; } accel_dev->au_info->num_cy_au = num_sym_au; accel_dev->au_info->num_dc_au = num_dc_au; accel_dev->au_info->num_asym_au = num_asym_au; get_accel_unit(hw_data, &accel_dev->au_info->au); /* Enable ASYM accel units */ for (i = 0; i < num_au && num_asym_au > 0; i++) { if (accel_dev->au_info->au[i].services == ADF_ACCEL_SERVICE_NULL) { accel_dev->au_info->au[i].services = ADF_ACCEL_ASYM; num_asym_au--; } } /* Enable SYM accel units */ for (i = 0; i < num_au && num_sym_au > 0; i++) { if (accel_dev->au_info->au[i].services == ADF_ACCEL_SERVICE_NULL) { accel_dev->au_info->au[i].services = ADF_ACCEL_CRYPTO; num_sym_au--; } } /* Enable compression accel units */ for (i = 0; i < num_au && num_dc_au > 0; i++) { if (accel_dev->au_info->au[i].services == ADF_ACCEL_SERVICE_NULL) { accel_dev->au_info->au[i].services = ADF_ACCEL_COMPRESSION; num_dc_au--; } } accel_dev->au_info->dc_ae_msk |= hw_data->get_obj_cfg_ae_mask(accel_dev, ADF_ACCEL_COMPRESSION); return 0; } static int adf_init_accel_units(struct adf_accel_dev *accel_dev) { return adf_init_accel_unit_services(accel_dev); } static void adf_exit_accel_units(struct adf_accel_dev *accel_dev) { /* reset the AU service */ adf_exit_accel_unit_services(accel_dev); } static const char * get_obj_name(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services service) { switch (service) { case ADF_ACCEL_ASYM: return ADF_4XXX_ASYM_OBJ; case ADF_ACCEL_CRYPTO: return ADF_4XXX_SYM_OBJ; case ADF_ACCEL_COMPRESSION: return ADF_4XXX_DC_OBJ; case ADF_ACCEL_ADMIN: return ADF_4XXX_ADMIN_OBJ; default: return NULL; } } static uint32_t get_objs_num(struct adf_accel_dev *accel_dev) { return ADF_4XXX_MAX_OBJ; } static uint32_t get_obj_cfg_ae_mask(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services service) { u32 ae_mask = 0; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 num_au = hw_data->get_num_accel_units(hw_data); struct adf_accel_unit *accel_unit = accel_dev->au_info->au; u32 i = 0; if (service == ADF_ACCEL_SERVICE_NULL) return 0; for (i = 0; i < num_au; i++) { if (accel_unit[i].services == service) ae_mask |= accel_unit[i].ae_mask; } return ae_mask; } static enum adf_accel_unit_services adf_4xxx_get_service_type(struct adf_accel_dev *accel_dev, s32 obj_num) { struct adf_accel_unit *accel_unit; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u8 num_au = hw_data->get_num_accel_units(hw_data); int i; if (!hw_data->service_to_load_mask) return ADF_ACCEL_SERVICE_NULL; if (accel_dev->au_info && accel_dev->au_info->au) accel_unit = accel_dev->au_info->au; else return ADF_ACCEL_SERVICE_NULL; for (i = num_au - 2; i >= 0; i--) { if (hw_data->service_to_load_mask & accel_unit[i].services) { hw_data->service_to_load_mask &= ~accel_unit[i].services; return accel_unit[i].services; } } /* admin AE should be loaded last */ if (hw_data->service_to_load_mask & accel_unit[num_au - 1].services) { hw_data->service_to_load_mask &= ~accel_unit[num_au - 1].services; return accel_unit[num_au - 1].services; } return ADF_ACCEL_SERVICE_NULL; } static void get_ring_svc_map_data(int ring_pair_index, u16 ring_to_svc_map, u8 *serv_type, int *ring_index, int *num_rings_per_srv, int bundle_num) { *serv_type = GET_SRV_TYPE(ring_to_svc_map, bundle_num % ADF_CFG_NUM_SERVICES); *ring_index = 0; *num_rings_per_srv = ADF_4XXX_NUM_RINGS_PER_BANK / 2; } static int adf_get_dc_extcapabilities(struct adf_accel_dev *accel_dev, u32 *capabilities) { struct icp_qat_fw_init_admin_req req; struct icp_qat_fw_init_admin_resp resp; u8 i; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u8 num_au = hw_data->get_num_accel_units(hw_data); u32 first_dc_ae = 0; for (i = 0; i < num_au; i++) { if (accel_dev->au_info->au[i].services & ADF_ACCEL_COMPRESSION) { first_dc_ae = accel_dev->au_info->au[i].ae_mask; first_dc_ae &= ~(first_dc_ae - 1); } } memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); req.cmd_id = ICP_QAT_FW_COMP_CAPABILITY_GET; if (likely(first_dc_ae)) { if (adf_send_admin(accel_dev, &req, &resp, first_dc_ae) || resp.status) { *capabilities = 0; return EFAULT; } *capabilities = resp.extended_features; } return 0; } static int adf_get_fw_status(struct adf_accel_dev *accel_dev, u8 *major, u8 *minor, u8 *patch) { struct icp_qat_fw_init_admin_req req; struct icp_qat_fw_init_admin_resp resp; u32 ae_mask = 1; memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); req.cmd_id = ICP_QAT_FW_STATUS_GET; if (adf_send_admin(accel_dev, &req, &resp, ae_mask)) return EFAULT; *major = resp.version_major_num; *minor = resp.version_minor_num; *patch = resp.version_patch_num; return 0; } static int adf_4xxx_send_admin_init(struct adf_accel_dev *accel_dev) { int ret = 0; struct icp_qat_fw_init_admin_req req; struct icp_qat_fw_init_admin_resp resp; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 ae_mask = hw_data->ae_mask; u32 admin_ae_mask = hw_data->admin_ae_mask; u8 num_au = hw_data->get_num_accel_units(hw_data); u8 i; u32 dc_capabilities = 0; for (i = 0; i < num_au; i++) { if (accel_dev->au_info->au[i].services == ADF_ACCEL_SERVICE_NULL) ae_mask &= ~accel_dev->au_info->au[i].ae_mask; if (accel_dev->au_info->au[i].services != ADF_ACCEL_ADMIN) admin_ae_mask &= ~accel_dev->au_info->au[i].ae_mask; } if (!accel_dev->admin) { device_printf(GET_DEV(accel_dev), "adf_admin not available\n"); return EFAULT; } memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG; req.init_cfg_sz = ADF_CONST_TABLE_SIZE; req.init_cfg_ptr = accel_dev->admin->const_tbl_addr; if (adf_send_admin(accel_dev, &req, &resp, admin_ae_mask)) { device_printf(GET_DEV(accel_dev), "Error sending constants config message\n"); return EFAULT; } memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); req.cmd_id = ICP_QAT_FW_INIT_ME; #ifdef QAT_DISABLE_SAFE_DC_MODE if (accel_dev->disable_safe_dc_mode) req.fw_flags = ICP_QAT_FW_INIT_DISABLE_SAFE_DC_MODE_FLAG; #endif /* QAT_DISABLE_SAFE_DC_MODE */ if (adf_send_admin(accel_dev, &req, &resp, ae_mask)) { device_printf(GET_DEV(accel_dev), "Error sending init message\n"); return EFAULT; } memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); req.cmd_id = ICP_QAT_FW_HEARTBEAT_TIMER_SET; req.init_cfg_ptr = accel_dev->admin->phy_hb_addr; if (adf_get_hb_timer(accel_dev, &req.heartbeat_ticks)) return EINVAL; if (adf_send_admin(accel_dev, &req, &resp, ae_mask)) device_printf(GET_DEV(accel_dev), "Heartbeat is not supported\n"); ret = adf_get_dc_extcapabilities(accel_dev, &dc_capabilities); if (unlikely(ret)) { device_printf(GET_DEV(accel_dev), "Could not get FW ext. capabilities\n"); } accel_dev->hw_device->extended_dc_capabilities = dc_capabilities; adf_get_fw_status(accel_dev, &accel_dev->fw_versions.fw_version_major, &accel_dev->fw_versions.fw_version_minor, &accel_dev->fw_versions.fw_version_patch); device_printf(GET_DEV(accel_dev), "FW version: %d.%d.%d\n", accel_dev->fw_versions.fw_version_major, accel_dev->fw_versions.fw_version_minor, accel_dev->fw_versions.fw_version_patch); return ret; } static enum dev_sku_info get_sku(struct adf_hw_device_data *self) { return DEV_SKU_1; } static struct adf_accel_unit * get_au_by_ae(struct adf_accel_dev *accel_dev, int ae_num) { int i = 0; struct adf_accel_unit *accel_unit = accel_dev->au_info->au; if (!accel_unit) return NULL; for (i = 0; i < ADF_4XXX_MAX_ACCELUNITS; i++) if (accel_unit[i].ae_mask & BIT(ae_num)) return &accel_unit[i]; return NULL; } static bool check_accel_unit_service(enum adf_accel_unit_services au_srv, enum adf_cfg_service_type ring_srv) { if ((ADF_ACCEL_SERVICE_NULL == au_srv) && ring_srv == NA) return true; if ((au_srv & ADF_ACCEL_COMPRESSION) && ring_srv == COMP) return true; if ((au_srv & ADF_ACCEL_ASYM) && ring_srv == ASYM) return true; if ((au_srv & ADF_ACCEL_CRYPTO) && ring_srv == SYM) return true; return false; } static void adf_4xxx_cfg_gen_dispatch_arbiter(struct adf_accel_dev *accel_dev, u32 *thrd_to_arb_map_gen) { struct adf_accel_unit *au = NULL; int engine = 0; int thread = 0; int service; u16 ena_srv_mask; u16 service_type; u32 service_mask; unsigned long thd_srv_mask = default_active_thd_mask; struct adf_hw_device_data *hw_data = accel_dev->hw_device; ena_srv_mask = accel_dev->hw_device->ring_to_svc_map; /* If ring_to_svc_map is not changed, return default arbiter value */ if (ena_srv_mask == ADF_4XXX_DEFAULT_RING_TO_SRV_MAP) { memcpy(thrd_to_arb_map_gen, thrd_to_arb_map, sizeof(thrd_to_arb_map_gen[0]) * ADF_4XXX_MAX_ACCELENGINES); return; } for (engine = 0; engine < ADF_4XXX_MAX_ACCELENGINES - 1; engine++) { thrd_to_arb_map_gen[engine] = 0; service_mask = 0; au = get_au_by_ae(accel_dev, engine); if (!au) continue; for (service = 0; service < ADF_CFG_MAX_SERVICES; service++) { service_type = GET_SRV_TYPE(ena_srv_mask, service); if (check_accel_unit_service(au->services, service_type)) service_mask |= BIT(service); } if (au->services == ADF_ACCEL_COMPRESSION) thd_srv_mask = dc_me_active_thd_mask; else if (au->services == ADF_ACCEL_ASYM) thd_srv_mask = hw_data->asym_ae_active_thd_mask; else thd_srv_mask = default_active_thd_mask; for_each_set_bit(thread, &thd_srv_mask, 8) { thrd_to_arb_map_gen[engine] |= (service_mask << (ADF_CFG_MAX_SERVICES * thread)); } } } static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, u32 const **arb_map_config) { int i; struct adf_hw_device_data *hw_device = accel_dev->hw_device; for (i = 1; i < ADF_4XXX_MAX_ACCELENGINES; i++) { if (~hw_device->ae_mask & (1 << i)) thrd_to_arb_map[i] = 0; } adf_4xxx_cfg_gen_dispatch_arbiter(accel_dev, thrd_to_arb_map_gen); *arb_map_config = thrd_to_arb_map_gen; } static void get_arb_info(struct arb_info *arb_info) { arb_info->wrk_cfg_offset = ADF_4XXX_ARB_CONFIG; arb_info->arbiter_offset = ADF_4XXX_ARB_OFFSET; arb_info->wrk_thd_2_srv_arb_map = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET; } static void get_admin_info(struct admin_info *admin_csrs_info) { admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET; admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET; admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET; } static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) { struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR]; struct resource *csr = misc_bar->virt_addr; /* Enable all in errsou3 except VFLR notification on host */ ADF_CSR_WR(csr, ADF_4XXX_ERRMSK3, ADF_4XXX_VFLNOTIFY); } static void adf_enable_ints(struct adf_accel_dev *accel_dev) { struct resource *addr; addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; /* Enable bundle interrupts */ ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0); ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0); /* Enable misc interrupts */ ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0); } static int adf_init_device(struct adf_accel_dev *accel_dev) { struct resource *addr; u32 status; u32 csr; int ret; addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; /* Temporarily mask PM interrupt */ csr = ADF_CSR_RD(addr, ADF_4XXX_ERRMSK2); csr |= ADF_4XXX_PM_SOU; ADF_CSR_WR(addr, ADF_4XXX_ERRMSK2, csr); /* Set DRV_ACTIVE bit to power up the device */ ADF_CSR_WR(addr, ADF_4XXX_PM_INTERRUPT, ADF_4XXX_PM_DRV_ACTIVE); /* Poll status register to make sure the device is powered up */ status = 0; ret = read_poll_timeout(ADF_CSR_RD, status, status & ADF_4XXX_PM_INIT_STATE, ADF_4XXX_PM_POLL_DELAY_US, ADF_4XXX_PM_POLL_TIMEOUT_US, true, addr, ADF_4XXX_PM_STATUS); if (ret) device_printf(GET_DEV(accel_dev), "Failed to power up the device\n"); return ret; } void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 id) { hw_data->dev_class = &adf_4xxx_class; hw_data->instance_id = adf_4xxx_class.instances++; hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS; hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK; hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS; hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES; hw_data->num_logical_accel = 1; hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET; hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK; hw_data->alloc_irq = adf_isr_resource_alloc; hw_data->free_irq = adf_isr_resource_free; hw_data->enable_error_correction = adf_enable_error_correction; hw_data->get_accel_mask = get_accel_mask; hw_data->get_ae_mask = get_ae_mask; hw_data->get_num_accels = get_num_accels; hw_data->get_num_aes = get_num_aes; hw_data->get_sram_bar_id = get_sram_bar_id; hw_data->get_etr_bar_id = get_etr_bar_id; hw_data->get_misc_bar_id = get_misc_bar_id; hw_data->get_arb_info = get_arb_info; hw_data->get_admin_info = get_admin_info; hw_data->get_accel_cap = adf_4xxx_get_hw_cap; hw_data->clock_frequency = ADF_4XXX_AE_FREQ; hw_data->get_sku = get_sku; hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE; hw_data->fw_name = ADF_4XXX_FW; hw_data->fw_mmp_name = ADF_4XXX_MMP; hw_data->init_admin_comms = adf_init_admin_comms; hw_data->exit_admin_comms = adf_exit_admin_comms; hw_data->send_admin_init = adf_4xxx_send_admin_init; hw_data->init_arb = adf_init_gen2_arb; hw_data->exit_arb = adf_exit_arb; hw_data->get_arb_mapping = adf_get_arbiter_mapping; hw_data->enable_ints = adf_enable_ints; hw_data->init_device = adf_init_device; hw_data->reset_device = adf_reset_flr; hw_data->restore_device = adf_dev_restore; hw_data->init_accel_units = adf_init_accel_units; hw_data->exit_accel_units = adf_exit_accel_units; hw_data->get_num_accel_units = get_num_accel_units; hw_data->configure_accel_units = adf_4xxx_configure_accel_units; hw_data->get_ring_to_svc_map = get_ring_to_svc_map; hw_data->get_ring_svc_map_data = get_ring_svc_map_data; hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK; hw_data->get_objs_num = get_objs_num; hw_data->get_obj_name = get_obj_name; hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask; hw_data->get_service_type = adf_4xxx_get_service_type; hw_data->set_msix_rttable = set_msix_default_rttable; hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; hw_data->disable_iov = adf_disable_sriov; hw_data->config_device = adf_config_device; hw_data->set_asym_rings_mask = adf_set_asym_rings_mask; hw_data->get_hb_clock = get_hb_clock; hw_data->int_timer_init = adf_int_timer_init; hw_data->int_timer_exit = adf_int_timer_exit; + hw_data->pre_reset = adf_dev_pre_reset; + hw_data->post_reset = adf_dev_post_reset; + hw_data->disable_arb = adf_disable_arb; hw_data->get_heartbeat_status = adf_get_heartbeat_status; hw_data->get_ae_clock = get_ae_clock; hw_data->measure_clock = measure_clock; hw_data->query_storage_cap = 1; hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; switch (id) { case ADF_401XX_PCI_DEVICE_ID: hw_data->asym_ae_active_thd_mask = DEFAULT_401XX_ASYM_AE_MASK; break; case ADF_4XXX_PCI_DEVICE_ID: default: hw_data->asym_ae_active_thd_mask = DEFAULT_4XXX_ASYM_AE_MASK; } adf_gen4_init_hw_csr_info(&hw_data->csr_info); adf_gen4_init_pf_pfvf_ops(&hw_data->csr_info.pfvf_ops); } void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data) { hw_data->dev_class->instances--; } diff --git a/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c b/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c index 08fbf5d989e2..0813ccf5baf0 100644 --- a/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c +++ b/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c @@ -1,347 +1,342 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2025 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "adf_4xxx_hw_data.h" #include "adf_gen4_hw_data.h" #include "adf_fw_counters.h" #include "adf_cfg_device.h" #include "adf_dbgfs.h" #include #include #include #include #include static MALLOC_DEFINE(M_QAT_4XXX, "qat_4xxx", "qat_4xxx"); #define ADF_SYSTEM_DEVICE(device_id) \ { \ PCI_VENDOR_ID_INTEL, device_id \ } static const struct pci_device_id adf_pci_tbl[] = { ADF_SYSTEM_DEVICE(ADF_4XXX_PCI_DEVICE_ID), ADF_SYSTEM_DEVICE(ADF_401XX_PCI_DEVICE_ID), { 0, } }; static int adf_probe(device_t dev) { const struct pci_device_id *id; for (id = adf_pci_tbl; id->vendor != 0; id++) { if (pci_get_vendor(dev) == id->vendor && pci_get_device(dev) == id->device) { device_set_desc(dev, "Intel " ADF_4XXX_DEVICE_NAME " QuickAssist"); return BUS_PROBE_GENERIC; } } return ENXIO; } #ifdef QAT_DISABLE_SAFE_DC_MODE static int adf_4xxx_sysctl_disable_safe_dc_mode(SYSCTL_HANDLER_ARGS) { struct adf_accel_dev *accel_dev = arg1; int error, value = accel_dev->disable_safe_dc_mode; error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) return error; if (value != 1 && value != 0) return EINVAL; if (adf_dev_started(accel_dev)) { device_printf( GET_DEV(accel_dev), "QAT: configuration can only be changed in \"down\" device state\n"); return EBUSY; } accel_dev->disable_safe_dc_mode = (u8)value; return 0; } static void adf_4xxx_disable_safe_dc_sysctl_add(struct adf_accel_dev *accel_dev) { struct sysctl_ctx_list *qat_sysctl_ctx; struct sysctl_oid *qat_sysctl_tree; qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); qat_sysctl_tree = device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev); accel_dev->safe_dc_mode = SYSCTL_ADD_OID(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_sysctl_tree), OID_AUTO, "disable_safe_dc_mode", CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_TUN | CTLFLAG_SKIP, accel_dev, 0, adf_4xxx_sysctl_disable_safe_dc_mode, "LU", "Disable QAT safe data compression mode"); } static void adf_4xxx_disable_safe_dc_sysctl_remove(struct adf_accel_dev *accel_dev) { int ret; struct sysctl_ctx_list *qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); ret = sysctl_ctx_entry_del(qat_sysctl_ctx, accel_dev->safe_dc_mode); if (ret) { device_printf(GET_DEV(accel_dev), "Failed to delete entry\n"); } else { ret = sysctl_remove_oid(accel_dev->safe_dc_mode, 1, 1); if (ret) device_printf(GET_DEV(accel_dev), "Failed to delete oid\n"); } } #endif /* QAT_DISABLE_SAFE_DC_MODE */ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; int i; if (accel_dev->dma_tag) bus_dma_tag_destroy(accel_dev->dma_tag); for (i = 0; i < ADF_PCI_MAX_BARS; i++) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; if (bar->virt_addr) bus_free_resource(accel_pci_dev->pci_dev, SYS_RES_MEMORY, bar->virt_addr); } if (accel_dev->hw_device) { switch (pci_get_device(accel_pci_dev->pci_dev)) { case ADF_4XXX_PCI_DEVICE_ID: case ADF_401XX_PCI_DEVICE_ID: adf_clean_hw_data_4xxx(accel_dev->hw_device); break; default: break; } free(accel_dev->hw_device, M_QAT_4XXX); accel_dev->hw_device = NULL; } #ifdef QAT_DISABLE_SAFE_DC_MODE adf_4xxx_disable_safe_dc_sysctl_remove(accel_dev); #endif /* QAT_DISABLE_SAFE_DC_MODE */ adf_dbgfs_exit(accel_dev); adf_cfg_dev_remove(accel_dev); adf_devmgr_rm_dev(accel_dev, NULL); } static int adf_attach(device_t dev) { struct adf_accel_dev *accel_dev; struct adf_accel_pci *accel_pci_dev; struct adf_hw_device_data *hw_data; unsigned int bar_nr; int ret, rid; struct adf_cfg_device *cfg_dev = NULL; /* Set pci MaxPayLoad to 512. Implemented to avoid the issue of * Pci-passthrough causing Maxpayload to be reset to 128 bytes * when the device is reset. */ if (pci_get_max_payload(dev) != 512) pci_set_max_payload(dev, 512); accel_dev = device_get_softc(dev); INIT_LIST_HEAD(&accel_dev->crypto_list); accel_pci_dev = &accel_dev->accel_pci_dev; accel_pci_dev->pci_dev = dev; if (bus_get_domain(dev, &accel_pci_dev->node) != 0) accel_pci_dev->node = 0; /* Add accel device to accel table. * This should be called before adf_cleanup_accel is called */ if (adf_devmgr_add_dev(accel_dev, NULL)) { device_printf(dev, "Failed to add new accelerator device.\n"); return ENXIO; } /* Allocate and configure device configuration structure */ hw_data = malloc(sizeof(*hw_data), M_QAT_4XXX, M_WAITOK | M_ZERO); accel_dev->hw_device = hw_data; adf_init_hw_data_4xxx(accel_dev->hw_device, pci_get_device(dev)); accel_pci_dev->revid = pci_get_revid(dev); hw_data->fuses = pci_read_config(dev, ADF_4XXX_FUSECTL4_OFFSET, 4); - if (accel_pci_dev->revid == 0x00) { - device_printf(dev, "A0 stepping is not supported.\n"); - ret = ENODEV; - goto out_err; - } /* Get PPAERUCM values and store */ ret = adf_aer_store_ppaerucm_reg(dev, hw_data); if (ret) goto out_err; /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(accel_dev); hw_data->ae_mask = hw_data->get_ae_mask(accel_dev); accel_pci_dev->sku = hw_data->get_sku(hw_data); /* If the device has no acceleration engines then ignore it. */ if (!hw_data->accel_mask || !hw_data->ae_mask || (~hw_data->ae_mask & 0x01)) { device_printf(dev, "No acceleration units found\n"); ret = ENXIO; goto out_err; } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); if (ret) goto out_err; ret = adf_clock_debugfs_add(accel_dev); if (ret) goto out_err; #ifdef QAT_DISABLE_SAFE_DC_MODE adf_4xxx_disable_safe_dc_sysctl_add(accel_dev); #endif /* QAT_DISABLE_SAFE_DC_MODE */ pci_set_max_read_req(dev, 4096); ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, /* BUS_SPACE_UNRESTRICTED */ 1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &accel_dev->dma_tag); if (ret) goto out_err; if (hw_data->get_accel_cap) { hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); } /* Find and map all the device's BARS */ /* Logical BARs configuration for 64bit BARs: bar 0 and 1 - logical BAR0 bar 2 and 3 - logical BAR1 bar 4 and 5 - logical BAR3 */ for (bar_nr = 0; bar_nr < (ADF_PCI_MAX_BARS * 2) && bar_nr < PCIR_MAX_BAR_0; bar_nr += 2) { struct adf_bar *bar; rid = PCIR_BAR(bar_nr); bar = &accel_pci_dev->pci_bars[bar_nr / 2]; bar->virt_addr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!bar->virt_addr) { device_printf(dev, "Failed to map BAR %d\n", bar_nr); ret = ENXIO; goto out_err; } bar->base_addr = rman_get_start(bar->virt_addr); bar->size = rman_get_size(bar->virt_addr); } pci_enable_busmaster(dev); adf_dbgfs_init(accel_dev); if (!accel_dev->hw_device->config_device) { ret = EFAULT; goto out_err; } ret = accel_dev->hw_device->config_device(accel_dev); if (ret) goto out_err; ret = adf_dev_init(accel_dev); if (ret) goto out_dev_shutdown; ret = adf_dev_start(accel_dev); if (ret) goto out_dev_stop; cfg_dev = accel_dev->cfg->dev; adf_cfg_device_clear(cfg_dev, accel_dev); free(cfg_dev, M_QAT); accel_dev->cfg->dev = NULL; return ret; out_dev_stop: adf_dev_stop(accel_dev); out_dev_shutdown: adf_dev_shutdown(accel_dev); out_err: adf_cleanup_accel(accel_dev); return ret; } static int adf_detach(device_t dev) { struct adf_accel_dev *accel_dev = device_get_softc(dev); if (adf_dev_stop(accel_dev)) { device_printf(dev, "Failed to stop QAT accel dev\n"); return EBUSY; } adf_dev_shutdown(accel_dev); adf_cleanup_accel(accel_dev); return 0; } static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe), DEVMETHOD(device_attach, adf_attach), DEVMETHOD(device_detach, adf_detach), DEVMETHOD_END }; static driver_t adf_driver = { "qat", adf_methods, sizeof(struct adf_accel_dev) }; DRIVER_MODULE_ORDERED(qat_4xxx, pci, adf_driver, NULL, NULL, SI_ORDER_THIRD); MODULE_VERSION(qat_4xxx, 1); MODULE_DEPEND(qat_4xxx, qat_common, 1, 1, 1); MODULE_DEPEND(qat_4xxx, qat_api, 1, 1, 1); MODULE_DEPEND(qat_4xxx, linuxkpi, 1, 1, 1); diff --git a/sys/dev/qat/qat_hw/qat_4xxxvf/adf_4xxxvf_hw_data.c b/sys/dev/qat/qat_hw/qat_4xxxvf/adf_4xxxvf_hw_data.c index f3d4ae3c7b38..37de24ba9e23 100644 --- a/sys/dev/qat/qat_hw/qat_4xxxvf/adf_4xxxvf_hw_data.c +++ b/sys/dev/qat/qat_hw/qat_4xxxvf/adf_4xxxvf_hw_data.c @@ -1,418 +1,421 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2025 Intel Corporation */ #include #include #include #include #include #include #include "adf_4xxxvf_hw_data.h" #include "icp_qat_hw.h" #include "adf_transport_internal.h" #include "adf_pfvf_vf_proto.h" static struct adf_hw_device_class adf_4xxxiov_class = { .name = ADF_4XXXVF_DEVICE_NAME, .type = DEV_4XXXVF, .instances = 0 }; #define ADF_4XXXIOV_DEFAULT_RING_TO_SRV_MAP \ (ASYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_4XXXIOV_ASYM_SYM ADF_4XXXIOV_DEFAULT_RING_TO_SRV_MAP #define ADF_4XXXIOV_DC \ (COMP | COMP << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_4XXXIOV_SYM \ (SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ SYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_4XXXIOV_ASYM \ (ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ ASYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_4XXXIOV_ASYM_DC \ (ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_4XXXIOV_SYM_DC \ (SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_4XXXIOV_NA \ (NA | NA << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ NA << ADF_CFG_SERV_RING_PAIR_3_SHIFT) struct adf_enabled_services { const char svcs_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; u16 rng_to_svc_msk; }; static struct adf_enabled_services adf_4xxxiov_svcs[] = { { "dc", ADF_4XXXIOV_DC }, { "sym", ADF_4XXXIOV_SYM }, { "asym", ADF_4XXXIOV_ASYM }, { "dc;asym", ADF_4XXXIOV_ASYM_DC }, { "asym;dc", ADF_4XXXIOV_ASYM_DC }, { "sym;dc", ADF_4XXXIOV_SYM_DC }, { "dc;sym", ADF_4XXXIOV_SYM_DC }, { "asym;sym", ADF_4XXXIOV_ASYM_SYM }, { "sym;asym", ADF_4XXXIOV_ASYM_SYM }, { "cy", ADF_4XXXIOV_ASYM_SYM } }; static u32 get_accel_mask(struct adf_accel_dev *accel_dev) { return ADF_4XXXIOV_ACCELERATORS_MASK; } static u32 get_ae_mask(struct adf_accel_dev *accel_dev) { return ADF_4XXXIOV_ACCELENGINES_MASK; } static u32 get_num_accels(struct adf_hw_device_data *self) { return ADF_4XXXIOV_MAX_ACCELERATORS; } static u32 get_num_aes(struct adf_hw_device_data *self) { return ADF_4XXXIOV_MAX_ACCELENGINES; } static u32 get_misc_bar_id(struct adf_hw_device_data *self) { return ADF_4XXXIOV_PMISC_BAR; } static u32 get_etr_bar_id(struct adf_hw_device_data *self) { return ADF_4XXXIOV_ETR_BAR; } static u32 get_clock_speed(struct adf_hw_device_data *self) { /* CPP clock is half high-speed clock */ return self->clock_frequency / 2; } static enum dev_sku_info get_sku(struct adf_hw_device_data *self) { return DEV_SKU_VF; } static int adf_vf_int_noop(struct adf_accel_dev *accel_dev) { return 0; } static void adf_vf_void_noop(struct adf_accel_dev *accel_dev) { } u32 adf_4xxxvf_get_hw_cap(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 vffusectl1; u32 capabilities_sym, capabilities_sym_cipher, capabilities_sym_auth, capabilities_asym, capabilities_dc; /* Get fused capabilities */ vffusectl1 = pci_read_config(pdev, ADF_4XXXIOV_VFFUSECTL1_OFFSET, 4); capabilities_sym_cipher = ICP_ACCEL_CAPABILITIES_HKDF | ICP_ACCEL_CAPABILITIES_SM4 | ICP_ACCEL_CAPABILITIES_CHACHA_POLY | ICP_ACCEL_CAPABILITIES_AESGCM_SPC | ICP_ACCEL_CAPABILITIES_AES_V2; capabilities_sym_auth = ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SHA3 | ICP_ACCEL_CAPABILITIES_SHA3_EXT; /* A set bit in vffusectl1 means the feature is OFF in this SKU */ if (vffusectl1 & ICP_ACCEL_4XXXVF_MASK_CIPHER_SLICE) { capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_HKDF; capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_SM4; } if (vffusectl1 & ICP_ACCEL_4XXXVF_MASK_UCS_SLICE) { capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY; capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC; capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_AES_V2; } if (vffusectl1 & ICP_ACCEL_4XXXVF_MASK_AUTH_SLICE) { capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SM3; capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SHA3; capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT; } if (vffusectl1 & ICP_ACCEL_4XXXVF_MASK_SMX_SLICE) { capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_SM4; capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SM3; } if (capabilities_sym_cipher) capabilities_sym_cipher |= ICP_ACCEL_CAPABILITIES_CIPHER; if (capabilities_sym_auth) capabilities_sym_auth |= ICP_ACCEL_CAPABILITIES_AUTHENTICATION; capabilities_sym = capabilities_sym_cipher | capabilities_sym_auth; if (capabilities_sym) capabilities_sym |= ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | ICP_ACCEL_CAPABILITIES_SM2 | ICP_ACCEL_CAPABILITIES_ECEDMONT; if (vffusectl1 & ICP_ACCEL_4XXXVF_MASK_PKE_SLICE) { capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2; capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT; } capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION | ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION | ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION | ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; if (vffusectl1 & ICP_ACCEL_4XXXVF_MASK_COMPRESS_SLICE) { capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION; capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION; capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; } return capabilities_sym | capabilities_dc | capabilities_asym; } static void adf_set_asym_rings_mask(struct adf_accel_dev *accel_dev) { accel_dev->hw_device->asym_rings_mask = ADF_4XXX_DEF_ASYM_MASK; } static void enable_pf2vm_interrupt(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data; struct adf_bar *pmisc; struct resource *pmisc_bar_addr; hw_data = accel_dev->hw_device; pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; pmisc_bar_addr = pmisc->virt_addr; ADF_CSR_WR(pmisc_bar_addr, ADF_4XXXIOV_VINTMSKPF2VM_OFFSET, 0x0); } static void disable_pf2vm_interrupt(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data; struct adf_bar *pmisc; struct resource *pmisc_bar_addr; hw_data = accel_dev->hw_device; pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; pmisc_bar_addr = pmisc->virt_addr; ADF_CSR_WR(pmisc_bar_addr, ADF_4XXXIOV_VINTMSKPF2VM_OFFSET, BIT(0)); } static int interrupt_active_pf2vm(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data; struct adf_bar *pmisc; struct resource *pmisc_bar_addr; u32 v_sou, v_msk; hw_data = accel_dev->hw_device; pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; pmisc_bar_addr = pmisc->virt_addr; v_sou = ADF_CSR_RD(pmisc_bar_addr, ADF_4XXXIOV_VINTSOUPF2VM_OFFSET); v_msk = ADF_CSR_RD(pmisc_bar_addr, ADF_4XXXIOV_VINTMSKPF2VM_OFFSET); return ((v_sou & ~v_msk) & BIT(0)) ? 1 : 0; } static int get_int_active_bundles(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data; struct adf_bar *pmisc; struct resource *pmisc_bar_addr; u32 v_sou, v_msk; hw_data = accel_dev->hw_device; pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; pmisc_bar_addr = pmisc->virt_addr; v_sou = ADF_CSR_RD(pmisc_bar_addr, ADF_4XXXIOV_VINTSOU_OFFSET); v_msk = ADF_CSR_RD(pmisc_bar_addr, ADF_4XXXIOV_VINTMSK_OFFSET); return v_sou & ~v_msk & 0xF; } static void get_ring_svc_map_data(int ring_pair_index, u16 ring_to_svc_map, u8 *serv_type, int *ring_index, int *num_rings_per_srv, int bank_num) { *serv_type = GET_SRV_TYPE(ring_to_svc_map, bank_num % ADF_CFG_NUM_SERVICES); *ring_index = 0; *num_rings_per_srv = ADF_4XXXIOV_NUM_RINGS_PER_BANK / 2; } static int get_ring_to_svc_map(struct adf_accel_dev *accel_dev, u16 *ring_to_svc_map) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; u32 i = 0; + if (accel_dev->hw_device->get_ring_to_svc_done) + return 0; + /* Get the services enabled by user if provided. * The function itself will also be called during the driver probe * procedure where no ServicesEnable is provided. Then the device * should still start with default configuration without * ServicesEnable. Hence it still returns 0 when the * adf_cfg_get_param_value() function returns failure. */ snprintf(key, sizeof(key), ADF_SERVICES_ENABLED); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) return 0; for (i = 0; i < ARRAY_SIZE(adf_4xxxiov_svcs); i++) { if (!strncmp(val, adf_4xxxiov_svcs[i].svcs_enabled, ADF_CFG_MAX_KEY_LEN_IN_BYTES)) { *ring_to_svc_map = adf_4xxxiov_svcs[i].rng_to_svc_msk; return 0; } } device_printf(GET_DEV(accel_dev), "Invalid services enabled: %s\n", val); return EFAULT; } static int adf_4xxxvf_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number) { struct pfvf_message req = { 0 }; unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT); int ret = 0; if (bank_number >= accel_dev->hw_device->num_banks) return EINVAL; req.type = ADF_VF2PF_MSGTYPE_RP_RESET; req.data = bank_number; mutex_lock(&accel_dev->u1.vf.rpreset_lock); init_completion(&accel_dev->u1.vf.msg_received); accel_dev->u1.vf.rpreset_sts = RPRESET_SUCCESS; if (adf_send_vf2pf_msg(accel_dev, req)) { device_printf(GET_DEV(accel_dev), "vf ring pair reset failure (vf2pf msg error)\n"); ret = EFAULT; goto out; } if (!wait_for_completion_timeout(&accel_dev->u1.vf.msg_received, timeout)) { device_printf( GET_DEV(accel_dev), "vf ring pair reset failure (pf2vf msg timeout)\n"); ret = EFAULT; goto out; } if (accel_dev->u1.vf.rpreset_sts != RPRESET_SUCCESS) { device_printf( GET_DEV(accel_dev), "vf ring pair reset failure (pf reports error)\n"); ret = EFAULT; goto out; } out: mutex_unlock(&accel_dev->u1.vf.rpreset_lock); return ret; } void adf_init_hw_data_4xxxiov(struct adf_hw_device_data *hw_data) { hw_data->dev_class = &adf_4xxxiov_class; hw_data->num_banks = ADF_4XXXIOV_ETR_MAX_BANKS; hw_data->num_rings_per_bank = ADF_4XXXIOV_NUM_RINGS_PER_BANK; hw_data->num_accel = ADF_4XXXIOV_MAX_ACCELERATORS; hw_data->num_logical_accel = 1; hw_data->num_engines = ADF_4XXXIOV_MAX_ACCELENGINES; hw_data->tx_rx_gap = ADF_4XXXIOV_RX_RINGS_OFFSET; hw_data->tx_rings_mask = ADF_4XXXIOV_TX_RINGS_MASK; hw_data->ring_to_svc_map = ADF_4XXXIOV_DEFAULT_RING_TO_SRV_MAP; hw_data->alloc_irq = adf_vf_isr_resource_alloc; hw_data->free_irq = adf_vf_isr_resource_free; hw_data->enable_error_correction = adf_vf_void_noop; hw_data->init_admin_comms = adf_vf_int_noop; hw_data->exit_admin_comms = adf_vf_void_noop; hw_data->send_admin_init = adf_vf2pf_notify_init; hw_data->init_arb = adf_vf_int_noop; hw_data->exit_arb = adf_vf_void_noop; hw_data->disable_iov = adf_vf2pf_notify_shutdown; hw_data->get_accel_mask = get_accel_mask; hw_data->get_ae_mask = get_ae_mask; hw_data->get_num_accels = get_num_accels; hw_data->get_num_aes = get_num_aes; hw_data->get_etr_bar_id = get_etr_bar_id; hw_data->get_misc_bar_id = get_misc_bar_id; hw_data->get_clock_speed = get_clock_speed; hw_data->get_sku = get_sku; hw_data->enable_ints = adf_vf_void_noop; hw_data->reset_device = adf_reset_flr; hw_data->restore_device = adf_dev_restore; hw_data->get_ring_svc_map_data = get_ring_svc_map_data; hw_data->get_ring_to_svc_map = get_ring_to_svc_map; hw_data->get_accel_cap = adf_4xxxvf_get_hw_cap; hw_data->config_device = adf_config_device; hw_data->set_asym_rings_mask = adf_set_asym_rings_mask; hw_data->ring_pair_reset = adf_4xxxvf_ring_pair_reset; hw_data->enable_pf2vf_interrupt = enable_pf2vm_interrupt; hw_data->disable_pf2vf_interrupt = disable_pf2vm_interrupt; hw_data->interrupt_active_pf2vf = interrupt_active_pf2vm; hw_data->get_int_active_bundles = get_int_active_bundles; hw_data->dev_class->instances++; adf_devmgr_update_class_index(hw_data); gen4vf_init_hw_csr_info(&hw_data->csr_info); adf_gen4_init_vf_pfvf_ops(&hw_data->csr_info.pfvf_ops); } void adf_clean_hw_data_4xxxiov(struct adf_hw_device_data *hw_data) { hw_data->dev_class->instances--; adf_devmgr_update_class_index(hw_data); } diff --git a/sys/dev/qat/qat_hw/qat_c3xxx/adf_c3xxx_hw_data.h b/sys/dev/qat/qat_hw/qat_c3xxx/adf_c3xxx_hw_data.h index bfc5db1f5e5c..cddfc3f84853 100644 --- a/sys/dev/qat/qat_hw/qat_c3xxx/adf_c3xxx_hw_data.h +++ b/sys/dev/qat/qat_hw/qat_c3xxx/adf_c3xxx_hw_data.h @@ -1,126 +1,126 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #ifndef ADF_C3XXX_HW_DATA_H_ #define ADF_C3XXX_HW_DATA_H_ /* PCIe configuration space */ -#define ADF_C3XXX_PMISC_BAR 0 -#define ADF_C3XXX_ETR_BAR 1 +#define ADF_C3XXX_PMISC_BAR 1 +#define ADF_C3XXX_ETR_BAR 2 #define ADF_C3XXX_RX_RINGS_OFFSET 8 #define ADF_C3XXX_TX_RINGS_MASK 0xFF #define ADF_C3XXX_MAX_ACCELERATORS 3 #define ADF_C3XXX_MAX_ACCELENGINES 6 #define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16 #define ADF_C3XXX_ACCELERATORS_MASK 0x7 #define ADF_C3XXX_ACCELENGINES_MASK 0x3F #define ADF_C3XXX_ETR_MAX_BANKS 16 #define ADF_C3XXX_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) #define ADF_C3XXX_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) #define ADF_C3XXX_SMIA0_MASK 0xFFFF #define ADF_C3XXX_SMIA1_MASK 0x1 #define ADF_C3XXX_SOFTSTRAP_CSR_OFFSET 0x2EC #define ADF_C3XXX_POWERGATE_PKE BIT(24) #define ADF_C3XXX_POWERGATE_CY BIT(23) /* Error detection and correction */ #define ADF_C3XXX_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818) #define ADF_C3XXX_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960) #define ADF_C3XXX_ENABLE_AE_ECC_ERR BIT(28) #define ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12)) #define ADF_C3XXX_UERRSSMSH(i) (i * 0x4000 + 0x18) #define ADF_C3XXX_CERRSSMSH(i) (i * 0x4000 + 0x10) #define ADF_C3XXX_ERRSSMSH_EN BIT(3) #define ADF_C3XXX_ERRSOU3 (0x3A000 + 0x0C) #define ADF_C3XXX_ERRSOU5 (0x3A000 + 0xD8) /* BIT(2) enables the logging of push/pull data errors. */ #define ADF_C3XXX_PPERR_EN (BIT(2)) /* Mask for VF2PF interrupts */ #define ADF_C3XXX_VF2PF1_16 (0xFFFF << 9) #define ADF_C3XXX_ERRSOU3_VF2PF(errsou3) (((errsou3)&0x01FFFE00) >> 9) #define ADF_C3XXX_ERRMSK3_VF2PF(vf_mask) (((vf_mask)&0xFFFF) << 9) /* Masks for correctable error interrupts. */ #define ADF_C3XXX_ERRMSK0_CERR (BIT(24) | BIT(16) | BIT(8) | BIT(0)) #define ADF_C3XXX_ERRMSK1_CERR (BIT(8) | BIT(0)) #define ADF_C3XXX_ERRMSK5_CERR (0) /* Masks for uncorrectable error interrupts. */ #define ADF_C3XXX_ERRMSK0_UERR (BIT(25) | BIT(17) | BIT(9) | BIT(1)) #define ADF_C3XXX_ERRMSK1_UERR (BIT(9) | BIT(1)) #define ADF_C3XXX_ERRMSK3_UERR \ (BIT(6) | BIT(5) | BIT(4) | BIT(3) | BIT(2) | BIT(0)) #define ADF_C3XXX_ERRMSK5_UERR (BIT(16)) /* RI CPP control */ #define ADF_C3XXX_RICPPINTCTL (0x3A000 + 0x110) /* * BIT(2) enables error detection and reporting on the RI Parity Error. * BIT(1) enables error detection and reporting on the RI CPP Pull interface. * BIT(0) enables error detection and reporting on the RI CPP Push interface. */ #define ADF_C3XXX_RICPP_EN (BIT(2) | BIT(1) | BIT(0)) /* TI CPP control */ #define ADF_C3XXX_TICPPINTCTL (0x3A400 + 0x138) /* * BIT(3) enables error detection and reporting on the ETR Parity Error. * BIT(2) enables error detection and reporting on the TI Parity Error. * BIT(1) enables error detection and reporting on the TI CPP Pull interface. * BIT(0) enables error detection and reporting on the TI CPP Push interface. */ #define ADF_C3XXX_TICPP_EN (BIT(3) | BIT(2) | BIT(1) | BIT(0)) /* CFC Uncorrectable Errors */ #define ADF_C3XXX_CPP_CFC_ERR_CTRL (0x30000 + 0xC00) /* * BIT(1) enables interrupt. * BIT(0) enables detecting and logging of push/pull data errors. */ #define ADF_C3XXX_CPP_CFC_UE (BIT(1) | BIT(0)) #define ADF_C3XXX_SLICEPWRDOWN(i) ((i)*0x4000 + 0x2C) /* Enabling PKE4-PKE0. */ #define ADF_C3XXX_MMP_PWR_UP_MSK \ (BIT(20) | BIT(19) | BIT(18) | BIT(17) | BIT(16)) /* CPM Uncorrectable Errors */ #define ADF_C3XXX_INTMASKSSM(i) ((i)*0x4000 + 0x0) /* Disabling interrupts for correctable errors. */ #define ADF_C3XXX_INTMASKSSM_UERR \ (BIT(11) | BIT(9) | BIT(7) | BIT(5) | BIT(3) | BIT(1)) /* MMP */ /* BIT(3) enables correction. */ #define ADF_C3XXX_CERRSSMMMP_EN (BIT(3)) #define ADF_C3X_CLK_PER_SEC (343 * 1000000) /* BIT(3) enables logging. */ #define ADF_C3XXX_UERRSSMMMP_EN (BIT(3)) #define ADF_C3XXX_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i)*0x04)) #define ADF_C3XXX_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i)*0x04)) /* Arbiter configuration */ #define ADF_C3XXX_ARB_OFFSET 0x30000 #define ADF_C3XXX_ARB_WRK_2_SER_MAP_OFFSET 0x180 #define ADF_C3XXX_ARB_WQCFG_OFFSET 0x100 /* Admin Interface Reg Offset */ #define ADF_C3XXX_ADMINMSGUR_OFFSET (0x3A000 + 0x574) #define ADF_C3XXX_ADMINMSGLR_OFFSET (0x3A000 + 0x578) #define ADF_C3XXX_MAILBOX_BASE_OFFSET 0x20970 /* Firmware Binary */ #define ADF_C3XXX_FW "qat_c3xxx_fw" #define ADF_C3XXX_MMP "qat_c3xxx_mmp_fw" void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data); void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data); #define ADF_C3XXX_AE_FREQ (685 * 1000000) #define ADF_C3XXX_MIN_AE_FREQ (320 * 1000000) #define ADF_C3XXX_MAX_AE_FREQ (685 * 1000000) #endif diff --git a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_hw_data.c b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_hw_data.c index 36bdbe82d5a9..d2969c1b06ed 100644 --- a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_hw_data.c +++ b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_hw_data.c @@ -1,2222 +1,2222 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include #include "adf_c4xxx_hw_data.h" #include "adf_c4xxx_reset.h" #include "adf_c4xxx_inline.h" #include "adf_c4xxx_ras.h" #include "adf_c4xxx_misc_error_stats.h" #include "adf_c4xxx_pke_replay_stats.h" #include "adf_heartbeat.h" #include "icp_qat_fw_init_admin.h" #include "icp_qat_hw.h" /* accel unit information */ static struct adf_accel_unit adf_c4xxx_au_32_ae[] = { { 0x1, 0x3, 0x3F, 0x1B, 6, ADF_ACCEL_SERVICE_NULL }, { 0x2, 0xC, 0xFC0, 0x6C0, 6, ADF_ACCEL_SERVICE_NULL }, { 0x4, 0x30, 0xF000, 0xF000, 4, ADF_ACCEL_SERVICE_NULL }, { 0x8, 0xC0, 0x3F0000, 0x1B0000, 6, ADF_ACCEL_SERVICE_NULL }, { 0x10, 0x300, 0xFC00000, 0x6C00000, 6, ADF_ACCEL_SERVICE_NULL }, { 0x20, 0xC00, 0xF0000000, 0xF0000000, 4, ADF_ACCEL_SERVICE_NULL } }; static struct adf_accel_unit adf_c4xxx_au_24_ae[] = { { 0x1, 0x3, 0x3F, 0x1B, 6, ADF_ACCEL_SERVICE_NULL }, { 0x2, 0xC, 0xFC0, 0x6C0, 6, ADF_ACCEL_SERVICE_NULL }, { 0x8, 0xC0, 0x3F0000, 0x1B0000, 6, ADF_ACCEL_SERVICE_NULL }, { 0x10, 0x300, 0xFC00000, 0x6C00000, 6, ADF_ACCEL_SERVICE_NULL }, }; static struct adf_accel_unit adf_c4xxx_au_12_ae[] = { { 0x1, 0x3, 0x3F, 0x1B, 6, ADF_ACCEL_SERVICE_NULL }, { 0x8, 0xC0, 0x3F0000, 0x1B0000, 6, ADF_ACCEL_SERVICE_NULL }, }; static struct adf_accel_unit adf_c4xxx_au_emulation[] = { { 0x1, 0x3, 0x3F, 0x1B, 6, ADF_ACCEL_SERVICE_NULL }, { 0x2, 0xC, 0xC0, 0xC0, 2, ADF_ACCEL_SERVICE_NULL } }; /* Accel engine threads for each of the following services * , , , */ /* Thread mapping for SKU capable of symmetric cryptography */ static const struct adf_ae_info adf_c4xxx_32_ae_sym[] = { { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 2, 6, 3 }, { 2, 6, 3 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 2, 6, 3 }, { 2, 6, 3 } }; static const struct adf_ae_info adf_c4xxx_24_ae_sym[] = { { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }; static const struct adf_ae_info adf_c4xxx_12_ae_sym[] = { { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }; /* Thread mapping for SKU capable of asymmetric and symmetric cryptography */ static const struct adf_ae_info adf_c4xxx_32_ae[] = { { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 2, 5, 3 }, { 2, 5, 3 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 2, 5, 3 }, { 2, 5, 3 } }; static const struct adf_ae_info adf_c4xxx_24_ae[] = { { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }; static const struct adf_ae_info adf_c4xxx_12_ae[] = { { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }; static struct adf_hw_device_class c4xxx_class = {.name = ADF_C4XXX_DEVICE_NAME, .type = DEV_C4XXX, .instances = 0 }; struct icp_qat_fw_init_c4xxx_admin_hb_stats { struct icp_qat_fw_init_admin_hb_cnt stats[ADF_NUM_THREADS_PER_AE]; }; struct adf_hb_count { u16 ae_thread[ADF_NUM_THREADS_PER_AE]; }; static const int sku_cy_au[] = ADF_C4XXX_NUM_CY_AU; static const int sku_dc_au[] = ADF_C4XXX_NUM_DC_AU; static const int sku_inline_au[] = ADF_C4XXX_NUM_INLINE_AU; /* * C4xxx devices introduce new fuses and soft straps and * are different from previous gen device implementations. */ static u32 get_accel_mask(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 fusectl0; u32 softstrappull0; fusectl0 = pci_read_config(pdev, ADF_C4XXX_FUSECTL0_OFFSET, 4); softstrappull0 = pci_read_config(pdev, ADF_C4XXX_SOFTSTRAPPULL0_OFFSET, 4); return (~(fusectl0 | softstrappull0)) & ADF_C4XXX_ACCELERATORS_MASK; } static u32 get_ae_mask(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 fusectl1; u32 softstrappull1; fusectl1 = pci_read_config(pdev, ADF_C4XXX_FUSECTL1_OFFSET, 4); softstrappull1 = pci_read_config(pdev, ADF_C4XXX_SOFTSTRAPPULL1_OFFSET, 4); /* Assume that AE and AU disable masks are consistent, so no * checks against the AU mask are performed */ return (~(fusectl1 | softstrappull1)) & ADF_C4XXX_ACCELENGINES_MASK; } static u32 get_num_accels(struct adf_hw_device_data *self) { return self ? hweight32(self->accel_mask) : 0; } static u32 get_num_aes(struct adf_hw_device_data *self) { return self ? hweight32(self->ae_mask) : 0; } static u32 get_misc_bar_id(struct adf_hw_device_data *self) { return ADF_C4XXX_PMISC_BAR; } static u32 get_etr_bar_id(struct adf_hw_device_data *self) { return ADF_C4XXX_ETR_BAR; } static u32 get_sram_bar_id(struct adf_hw_device_data *self) { return ADF_C4XXX_SRAM_BAR; } static inline void c4xxx_unpack_ssm_wdtimer(u64 value, u32 *upper, u32 *lower) { *lower = lower_32_bits(value); *upper = upper_32_bits(value); } /** * c4xxx_set_ssm_wdtimer() - Initialize the slice hang watchdog timer. * * @param accel_dev Structure holding accelerator data. * @return 0 on success, error code otherwise. */ static int c4xxx_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_device = accel_dev->hw_device; struct adf_bar *misc_bar = &GET_BARS(accel_dev)[hw_device->get_misc_bar_id(hw_device)]; struct resource *csr = misc_bar->virt_addr; unsigned long accel_mask = hw_device->accel_mask; u32 accel = 0; u64 timer_val = ADF_C4XXX_SSM_WDT_64BIT_DEFAULT_VALUE; u64 timer_val_pke = ADF_C4XXX_SSM_WDT_PKE_64BIT_DEFAULT_VALUE; u32 ssm_wdt_low = 0, ssm_wdt_high = 0; u32 ssm_wdt_pke_low = 0, ssm_wdt_pke_high = 0; /* Convert 64bit Slice Hang watchdog value into 32bit values for * mmio write to 32bit CSRs. */ c4xxx_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low); c4xxx_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high, &ssm_wdt_pke_low); /* Configures Slice Hang watchdogs */ for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS) { ADF_CSR_WR(csr, ADF_C4XXX_SSMWDTL_OFFSET(accel), ssm_wdt_low); ADF_CSR_WR(csr, ADF_C4XXX_SSMWDTH_OFFSET(accel), ssm_wdt_high); ADF_CSR_WR(csr, ADF_C4XXX_SSMWDTPKEL_OFFSET(accel), ssm_wdt_pke_low); ADF_CSR_WR(csr, ADF_C4XXX_SSMWDTPKEH_OFFSET(accel), ssm_wdt_pke_high); } return 0; } /** * c4xxx_check_slice_hang() - Check slice hang status * * Return: true if a slice hange interrupt is serviced.. */ static bool c4xxx_check_slice_hang(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_device = accel_dev->hw_device; struct adf_bar *misc_bar = &GET_BARS(accel_dev)[hw_device->get_misc_bar_id(hw_device)]; struct resource *csr = misc_bar->virt_addr; u32 slice_hang_offset; u32 ia_slice_hang_offset; u32 fw_irq_source; u32 ia_irq_source; u32 accel_num = 0; bool handled = false; u32 errsou10 = ADF_CSR_RD(csr, ADF_C4XXX_ERRSOU10); unsigned long accel_mask; accel_mask = hw_device->accel_mask; for_each_set_bit(accel_num, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS) { if (!(errsou10 & ADF_C4XXX_IRQ_SRC_MASK(accel_num))) continue; fw_irq_source = ADF_CSR_RD(csr, ADF_INTSTATSSM(accel_num)); ia_irq_source = ADF_CSR_RD(csr, ADF_C4XXX_IAINTSTATSSM(accel_num)); ia_slice_hang_offset = ADF_C4XXX_IASLICEHANGSTATUS_OFFSET(accel_num); /* FW did not clear SliceHang error, IA logs and clears * the error */ if ((fw_irq_source & ADF_INTSTATSSM_SHANGERR) && (ia_irq_source & ADF_INTSTATSSM_SHANGERR)) { slice_hang_offset = ADF_C4XXX_SLICEHANGSTATUS_OFFSET(accel_num); /* Bring hung slice out of reset */ adf_csr_fetch_and_and(csr, slice_hang_offset, ~0); /* Log SliceHang error and clear an interrupt */ handled = adf_handle_slice_hang(accel_dev, accel_num, csr, ia_slice_hang_offset); atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]); } /* FW cleared SliceHang, IA only logs an error */ else if (!(fw_irq_source & ADF_INTSTATSSM_SHANGERR) && (ia_irq_source & ADF_INTSTATSSM_SHANGERR)) { /* Log SliceHang error and clear an interrupt */ handled = adf_handle_slice_hang(accel_dev, accel_num, csr, ia_slice_hang_offset); atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]); } /* Clear the associated IA interrupt */ adf_csr_fetch_and_and(csr, ADF_C4XXX_IAINTSTATSSM(accel_num), ~BIT(13)); } return handled; } static bool get_eth_doorbell_msg(struct adf_accel_dev *accel_dev) { struct resource *csr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr; struct adf_hw_device_data *hw_device = accel_dev->hw_device; u32 errsou11 = ADF_CSR_RD(csr, ADF_C4XXX_ERRSOU11); u32 doorbell_int = ADF_CSR_RD(csr, ADF_C4XXX_ETH_DOORBELL_INT); u32 eth_doorbell_reg[ADF_C4XXX_NUM_ETH_DOORBELL_REGS]; bool handled = false; u32 data_reg; u8 i; /* Reset cannot be acknowledged until the reset */ hw_device->reset_ack = false; /* Check if doorbell interrupt occurred. */ if (errsou11 & ADF_C4XXX_DOORBELL_INT_SRC) { /* Decode doorbell messages from ethernet device */ for (i = 0; i < ADF_C4XXX_NUM_ETH_DOORBELL_REGS; i++) { eth_doorbell_reg[i] = 0; if (doorbell_int & BIT(i)) { data_reg = ADF_C4XXX_ETH_DOORBELL(i); eth_doorbell_reg[i] = ADF_CSR_RD(csr, data_reg); device_printf( GET_DEV(accel_dev), "Receives Doorbell message(0x%08x)\n", eth_doorbell_reg[i]); } } /* Only need to check PF0 */ if (eth_doorbell_reg[0] == ADF_C4XXX_IOSFSB_RESET_ACK) { device_printf(GET_DEV(accel_dev), "Receives pending reset ACK\n"); hw_device->reset_ack = true; } /* Clear the interrupt source */ ADF_CSR_WR(csr, ADF_C4XXX_ETH_DOORBELL_INT, ADF_C4XXX_ETH_DOORBELL_MASK); handled = true; } return handled; } static enum dev_sku_info get_sku(struct adf_hw_device_data *self) { int aes = get_num_aes(self); u32 capabilities = self->accel_capabilities_mask; bool sym_only_sku = false; /* Check if SKU is capable only of symmetric cryptography * via device capabilities. */ if ((capabilities & ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC) && !(capabilities & ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC) && !(capabilities & ADF_ACCEL_CAPABILITIES_COMPRESSION)) sym_only_sku = true; switch (aes) { case ADF_C4XXX_HIGH_SKU_AES: if (sym_only_sku) return DEV_SKU_1_CY; return DEV_SKU_1; case ADF_C4XXX_MED_SKU_AES: if (sym_only_sku) return DEV_SKU_2_CY; return DEV_SKU_2; case ADF_C4XXX_LOW_SKU_AES: if (sym_only_sku) return DEV_SKU_3_CY; return DEV_SKU_3; }; return DEV_SKU_UNKNOWN; } static bool c4xxx_check_prod_sku(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 fusectl0 = 0; fusectl0 = pci_read_config(pdev, ADF_C4XXX_FUSECTL0_OFFSET, 4); if (fusectl0 & ADF_C4XXX_FUSE_PROD_SKU_MASK) return true; else return false; } static bool adf_check_sym_only_sku_c4xxx(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 legfuse = 0; legfuse = pci_read_config(pdev, ADF_DEVICE_LEGFUSE_OFFSET, 4); if (legfuse & ADF_C4XXX_LEGFUSE_BASE_SKU_MASK) return true; else return false; } static void adf_enable_slice_hang_detection(struct adf_accel_dev *accel_dev) { struct resource *csr; struct adf_hw_device_data *hw_device = accel_dev->hw_device; u32 accel = 0; unsigned long accel_mask; csr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr; accel_mask = hw_device->accel_mask; for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS) { /* Unmasks Slice Hang interrupts so they can be seen by IA. */ ADF_CSR_WR(csr, ADF_C4XXX_SHINTMASKSSM_OFFSET(accel), ADF_C4XXX_SHINTMASKSSM_VAL); } } static void adf_enable_ras(struct adf_accel_dev *accel_dev) { struct resource *csr; struct adf_hw_device_data *hw_device = accel_dev->hw_device; u32 accel = 0; unsigned long accel_mask; csr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr; accel_mask = hw_device->accel_mask; for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS) { ADF_CSR_WR(csr, ADF_C4XXX_GET_SSMFEATREN_OFFSET(accel), ADF_C4XXX_SSMFEATREN_VAL); } } static u32 get_clock_speed(struct adf_hw_device_data *self) { /* c4xxx CPP clock is equal to high-speed clock */ return self->clock_frequency; } static void adf_enable_error_interrupts(struct adf_accel_dev *accel_dev) { struct resource *csr, *aram_csr; struct adf_hw_device_data *hw_device = accel_dev->hw_device; u32 accel = 0; unsigned long accel_mask; csr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr; aram_csr = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr; accel_mask = hw_device->accel_mask; for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS) { /* Enable shared memory, MMP, CPP, PPERR interrupts * for a given accel */ ADF_CSR_WR(csr, ADF_C4XXX_GET_INTMASKSSM_OFFSET(accel), 0); /* Enable SPP parity error interrupts for a given accel */ ADF_CSR_WR(csr, ADF_C4XXX_GET_SPPPARERRMSK_OFFSET(accel), 0); /* Enable ssm soft parity errors on given accel */ ADF_CSR_WR(csr, ADF_C4XXX_GET_SSMSOFTERRORPARITY_MASK_OFFSET(accel), ADF_C4XXX_SSMSOFTERRORPARITY_MASK_VAL); } /* Enable interrupts for VFtoPF0_127. */ ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK4, ADF_C4XXX_VF2PF0_31); ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK5, ADF_C4XXX_VF2PF32_63); ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK6, ADF_C4XXX_VF2PF64_95); ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK7, ADF_C4XXX_VF2PF96_127); /* Enable interrupts signaling ECC correctable errors for all AEs */ ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK8, ADF_C4XXX_ERRMSK8_COERR); ADF_CSR_WR(csr, ADF_C4XXX_HI_ME_COR_ERRLOG_ENABLE, ADF_C4XXX_HI_ME_COR_ERRLOG_ENABLE_MASK); /* Enable error interrupts reported by ERRSOU9 */ ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK9, ADF_C4XXX_ERRMSK9_IRQ_MASK); /* Enable uncorrectable errors on all the AE */ ADF_CSR_WR(csr, ADF_C4XXX_HI_ME_UNCERR_LOG_ENABLE, ADF_C4XXX_HI_ME_UNCERR_LOG_ENABLE_MASK); /* Enable CPP Agent to report command parity errors */ ADF_CSR_WR(csr, ADF_C4XXX_HI_CPP_AGENT_CMD_PAR_ERR_LOG_ENABLE, ADF_C4XXX_HI_CPP_AGENT_CMD_PAR_ERR_LOG_ENABLE_MASK); /* Enable reporting of RI memory parity errors */ ADF_CSR_WR(csr, ADF_C4XXX_RI_MEM_PAR_ERR_EN0, ADF_C4XXX_RI_MEM_PAR_ERR_EN0_MASK); /* Enable reporting of TI memory parity errors */ ADF_CSR_WR(csr, ADF_C4XXX_TI_MEM_PAR_ERR_EN0, ADF_C4XXX_TI_MEM_PAR_ERR_EN0_MASK); ADF_CSR_WR(csr, ADF_C4XXX_TI_MEM_PAR_ERR_EN1, ADF_C4XXX_TI_MEM_PAR_ERR_EN1_MASK); /* Enable SSM errors */ ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK10, ADF_C4XXX_ERRMSK10_SSM_ERR); /* Enable miscellaneous errors (ethernet doorbell aram, ici, ice) */ ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK11, ADF_C4XXX_ERRMSK11_ERR); /* RI CPP bus interface error detection and reporting. */ ADF_CSR_WR(csr, ADF_C4XXX_RICPPINTCTL, ADF_C4XXX_RICPP_EN); /* TI CPP bus interface error detection and reporting. */ ADF_CSR_WR(csr, ADF_C4XXX_TICPPINTCTL, ADF_C4XXX_TICPP_EN); /* Enable CFC Error interrupts and logging. */ ADF_CSR_WR(csr, ADF_C4XXX_CPP_CFC_ERR_CTRL, ADF_C4XXX_CPP_CFC_UE); /* Enable ARAM correctable error detection. */ ADF_CSR_WR(aram_csr, ADF_C4XXX_ARAMCERR, ADF_C4XXX_ARAM_CERR); /* Enable ARAM uncorrectable error detection. */ ADF_CSR_WR(aram_csr, ADF_C4XXX_ARAMUERR, ADF_C4XXX_ARAM_UERR); /* Enable Push/Pull Misc Uncorrectable error interrupts and logging */ ADF_CSR_WR(aram_csr, ADF_C4XXX_CPPMEMTGTERR, ADF_C4XXX_TGT_UERR); } static void adf_enable_mmp_error_correction(struct resource *csr, struct adf_hw_device_data *hw_data) { unsigned int accel = 0, mmp; unsigned long uerrssmmmp_mask, cerrssmmmp_mask; enum operation op; unsigned long accel_mask; /* Prepare values and operation that will be performed on * UERRSSMMMP and CERRSSMMMP registers on each MMP */ if (hw_data->accel_capabilities_mask & ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC) { uerrssmmmp_mask = ADF_C4XXX_UERRSSMMMP_EN; cerrssmmmp_mask = ADF_C4XXX_CERRSSMMMP_EN; op = OR; } else { uerrssmmmp_mask = ~ADF_C4XXX_UERRSSMMMP_EN; cerrssmmmp_mask = ~ADF_C4XXX_CERRSSMMMP_EN; op = AND; } accel_mask = hw_data->accel_mask; /* Enable MMP Logging */ for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS) { /* Set power-up */ adf_csr_fetch_and_and(csr, ADF_C4XXX_SLICEPWRDOWN(accel), ~ADF_C4XXX_MMP_PWR_UP_MSK); for (mmp = 0; mmp < ADF_C4XXX_MAX_MMP; ++mmp) { adf_csr_fetch_and_update(op, csr, ADF_C4XXX_UERRSSMMMP(accel, mmp), uerrssmmmp_mask); adf_csr_fetch_and_update(op, csr, ADF_C4XXX_CERRSSMMMP(accel, mmp), cerrssmmmp_mask); } /* Restore power-down value */ adf_csr_fetch_and_or(csr, ADF_C4XXX_SLICEPWRDOWN(accel), ADF_C4XXX_MMP_PWR_UP_MSK); } } static void get_arb_info(struct arb_info *arb_csrs_info) { arb_csrs_info->arbiter_offset = ADF_C4XXX_ARB_OFFSET; arb_csrs_info->wrk_cfg_offset = ADF_C4XXX_ARB_WQCFG_OFFSET; } static void get_admin_info(struct admin_info *admin_csrs_info) { admin_csrs_info->mailbox_offset = ADF_C4XXX_MAILBOX_BASE_OFFSET; admin_csrs_info->admin_msg_ur = ADF_C4XXX_ADMINMSGUR_OFFSET; admin_csrs_info->admin_msg_lr = ADF_C4XXX_ADMINMSGLR_OFFSET; } static void get_errsou_offset(u32 *errsou3, u32 *errsou5) { *errsou3 = ADF_C4XXX_ERRSOU3; *errsou5 = ADF_C4XXX_ERRSOU5; } static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_device = accel_dev->hw_device; struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR]; struct resource *csr = misc_bar->virt_addr; unsigned int val, i = 0; unsigned long ae_mask; unsigned long accel_mask; ae_mask = hw_device->ae_mask; /* Enable Accel Engine error detection & correction */ for_each_set_bit(i, &ae_mask, ADF_C4XXX_MAX_ACCELENGINES) { val = ADF_CSR_RD(csr, ADF_C4XXX_AE_CTX_ENABLES(i)); val |= ADF_C4XXX_ENABLE_AE_ECC_ERR; ADF_CSR_WR(csr, ADF_C4XXX_AE_CTX_ENABLES(i), val); val = ADF_CSR_RD(csr, ADF_C4XXX_AE_MISC_CONTROL(i)); val |= ADF_C4XXX_ENABLE_AE_ECC_PARITY_CORR; ADF_CSR_WR(csr, ADF_C4XXX_AE_MISC_CONTROL(i), val); } accel_mask = hw_device->accel_mask; /* Enable shared memory error detection & correction */ for_each_set_bit(i, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS) { val = ADF_CSR_RD(csr, ADF_C4XXX_UERRSSMSH(i)); val |= ADF_C4XXX_ERRSSMSH_EN; ADF_CSR_WR(csr, ADF_C4XXX_UERRSSMSH(i), val); val = ADF_CSR_RD(csr, ADF_C4XXX_CERRSSMSH(i)); val |= ADF_C4XXX_ERRSSMSH_EN; ADF_CSR_WR(csr, ADF_C4XXX_CERRSSMSH(i), val); } adf_enable_ras(accel_dev); adf_enable_mmp_error_correction(csr, hw_device); adf_enable_slice_hang_detection(accel_dev); adf_enable_error_interrupts(accel_dev); } static void adf_enable_ints(struct adf_accel_dev *accel_dev) { struct resource *addr; addr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr; /* Enable bundle interrupts */ ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF0_MASK_OFFSET, ADF_C4XXX_SMIA0_MASK); ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF1_MASK_OFFSET, ADF_C4XXX_SMIA1_MASK); ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF2_MASK_OFFSET, ADF_C4XXX_SMIA2_MASK); ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF3_MASK_OFFSET, ADF_C4XXX_SMIA3_MASK); /*Enable misc interrupts*/ ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF4_MASK_OFFSET, ADF_C4XXX_SMIA4_MASK); } static u32 get_ae_clock(struct adf_hw_device_data *self) { /* Clock update interval is <16> ticks for c4xxx. */ return self->clock_frequency / 16; } static int measure_clock(struct adf_accel_dev *accel_dev) { u32 frequency; int ret = 0; ret = adf_dev_measure_clock(accel_dev, &frequency, ADF_C4XXX_MIN_AE_FREQ, ADF_C4XXX_MAX_AE_FREQ); if (ret) return ret; accel_dev->hw_device->clock_frequency = frequency; return 0; } static int get_storage_enabled(struct adf_accel_dev *accel_dev, uint32_t *storage_enabled) { if (accel_dev->au_info->num_dc_au > 0) { *storage_enabled = 1; GET_HW_DATA(accel_dev)->extended_dc_capabilities = ICP_ACCEL_CAPABILITIES_ADVANCED_COMPRESSION; } return 0; } static u32 c4xxx_get_hw_cap(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 legfuses; u32 softstrappull0, softstrappull2; u32 fusectl0, fusectl2; u32 capabilities; /* Read accelerator capabilities mask */ legfuses = pci_read_config(pdev, ADF_DEVICE_LEGFUSE_OFFSET, 4); capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | ICP_ACCEL_CAPABILITIES_CIPHER | ICP_ACCEL_CAPABILITIES_AUTHENTICATION | ICP_ACCEL_CAPABILITIES_COMPRESSION | ICP_ACCEL_CAPABILITIES_ZUC | ICP_ACCEL_CAPABILITIES_HKDF | ICP_ACCEL_CAPABILITIES_SHA3_EXT | ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 | ICP_ACCEL_CAPABILITIES_CHACHA_POLY | ICP_ACCEL_CAPABILITIES_AESGCM_SPC | ICP_ACCEL_CAPABILITIES_ECEDMONT; if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) { capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER; } if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; if (legfuses & ICP_ACCEL_MASK_PKE_SLICE) capabilities &= ~(ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | ICP_ACCEL_CAPABILITIES_ECEDMONT); if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE) { capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; capabilities &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY; } if (legfuses & ICP_ACCEL_MASK_EIA3_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_ZUC; if (legfuses & ICP_ACCEL_MASK_SM3_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_SM3; if (legfuses & ICP_ACCEL_MASK_SM4_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_SM4; /* Read fusectl0 & softstrappull0 registers to ensure inline * acceleration is not disabled */ softstrappull0 = pci_read_config(pdev, ADF_C4XXX_SOFTSTRAPPULL0_OFFSET, 4); fusectl0 = pci_read_config(pdev, ADF_C4XXX_FUSECTL0_OFFSET, 4); if ((fusectl0 | softstrappull0) & ADF_C4XXX_FUSE_DISABLE_INLINE_MASK) capabilities &= ~ICP_ACCEL_CAPABILITIES_INLINE; /* Read fusectl2 & softstrappull2 registers to check out if * PKE/DC are enabled/disabled */ softstrappull2 = pci_read_config(pdev, ADF_C4XXX_SOFTSTRAPPULL2_OFFSET, 4); fusectl2 = pci_read_config(pdev, ADF_C4XXX_FUSECTL2_OFFSET, 4); /* Disable PKE/DC cap if there are no PKE/DC-enabled AUs. */ if (!(~fusectl2 & ~softstrappull2 & ADF_C4XXX_FUSE_PKE_MASK)) capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; if (!(~fusectl2 & ~softstrappull2 & ADF_C4XXX_FUSE_COMP_MASK)) capabilities &= ~(ICP_ACCEL_CAPABILITIES_COMPRESSION | ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY); return capabilities; } static int c4xxx_configure_accel_units(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES] = { 0 }; unsigned long val; char val_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; int sku; struct adf_hw_device_data *hw_data = accel_dev->hw_device; sku = get_sku(hw_data); if (adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC)) goto err; snprintf(key, sizeof(key), ADF_SERVICES_ENABLED); /* Base station SKU supports symmetric cryptography only. */ if (adf_check_sym_only_sku_c4xxx(accel_dev)) snprintf(val_str, sizeof(val_str), ADF_SERVICE_SYM); else snprintf(val_str, sizeof(val_str), ADF_SERVICE_CY); val = sku_dc_au[sku]; if (val) { strncat(val_str, ADF_SERVICES_SEPARATOR ADF_SERVICE_DC, ADF_CFG_MAX_VAL_LEN_IN_BYTES - strnlen(val_str, sizeof(val_str)) - ADF_CFG_NULL_TERM_SIZE); } if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)val_str, ADF_STR)) goto err; snprintf(key, sizeof(key), ADF_NUM_CY_ACCEL_UNITS); val = sku_cy_au[sku]; if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) goto err; snprintf(key, sizeof(key), ADF_NUM_DC_ACCEL_UNITS); val = sku_dc_au[sku]; if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) goto err; snprintf(key, sizeof(key), ADF_NUM_INLINE_ACCEL_UNITS); val = sku_inline_au[sku]; if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) goto err; return 0; err: device_printf(GET_DEV(accel_dev), "Failed to configure accel units\n"); return EINVAL; } static void update_hw_capability(struct adf_accel_dev *accel_dev) { struct adf_accel_unit_info *au_info = accel_dev->au_info; struct adf_hw_device_data *hw_device = accel_dev->hw_device; u32 disabled_caps = 0; if (!au_info->asym_ae_msk) disabled_caps = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | - ICP_ACCEL_CAPABILITIES_AUTHENTICATION; - + ICP_ACCEL_CAPABILITIES_ECEDMONT; if (!au_info->sym_ae_msk) disabled_caps |= ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | + ICP_ACCEL_CAPABILITIES_AUTHENTICATION | ICP_ACCEL_CAPABILITIES_CIPHER | ICP_ACCEL_CAPABILITIES_ZUC | ICP_ACCEL_CAPABILITIES_SHA3_EXT | ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 | ICP_ACCEL_CAPABILITIES_CHACHA_POLY | ICP_ACCEL_CAPABILITIES_AESGCM_SPC; if (!au_info->dc_ae_msk) { disabled_caps |= ICP_ACCEL_CAPABILITIES_COMPRESSION | ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY; hw_device->extended_dc_capabilities = 0; } if (!au_info->inline_ingress_msk && !au_info->inline_egress_msk) disabled_caps |= ICP_ACCEL_CAPABILITIES_INLINE; hw_device->accel_capabilities_mask = c4xxx_get_hw_cap(accel_dev) & ~disabled_caps; } static void c4xxx_set_sadb_size(struct adf_accel_dev *accel_dev) { u32 sadb_reg_value = 0; struct resource *aram_csr_base; aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr; if (accel_dev->au_info->num_inline_au) { /* REG_SA_DB_CTRL register initialisation */ sadb_reg_value = ADF_C4XXX_SADB_REG_VALUE(accel_dev); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_DB_CTRL, sadb_reg_value); } else { /* Zero the SADB size when inline is disabled. */ adf_csr_fetch_and_and(aram_csr_base, ADF_C4XXX_REG_SA_DB_CTRL, ADF_C4XXX_SADB_SIZE_BIT); } /* REG_SA_CTRL_LOCK register initialisation. We set the lock * bit in order to prevent the REG_SA_DB_CTRL to be * overwritten */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_CTRL_LOCK, ADF_C4XXX_DEFAULT_SA_CTRL_LOCKOUT); } static void c4xxx_init_error_notification_configuration(struct adf_accel_dev *accel_dev, u32 offset) { struct resource *aram_csr_base; aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr; /* configure error notification configuration registers */ /* Set CD Parity error */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CD_RF_PARITY_ERR_0 + offset, ADF_C4XXX_CD_RF_PARITY_ERR_0_VAL); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CD_RF_PARITY_ERR_1 + offset, ADF_C4XXX_CD_RF_PARITY_ERR_1_VAL); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CD_RF_PARITY_ERR_2 + offset, ADF_C4XXX_CD_RF_PARITY_ERR_2_VAL); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CD_RF_PARITY_ERR_3 + offset, ADF_C4XXX_CD_RF_PARITY_ERR_3_VAL); /* Set CD RAM ECC Correctable Error */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CD_CERR + offset, ADF_C4XXX_CD_CERR_VAL); /* Set CD RAM ECC UnCorrectable Error */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CD_UERR + offset, ADF_C4XXX_CD_UERR_VAL); /* Set Inline (excl cmd_dis) Parity Error */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_INLN_RF_PARITY_ERR_0 + offset, ADF_C4XXX_INLN_RF_PARITY_ERR_0_VAL); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_INLN_RF_PARITY_ERR_1 + offset, ADF_C4XXX_INLN_RF_PARITY_ERR_1_VAL); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_INLN_RF_PARITY_ERR_2 + offset, ADF_C4XXX_INLN_RF_PARITY_ERR_2_VAL); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_INLN_RF_PARITY_ERR_3 + offset, ADF_C4XXX_INLN_RF_PARITY_ERR_3_VAL); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_INLN_RF_PARITY_ERR_4 + offset, ADF_C4XXX_INLN_RF_PARITY_ERR_4_VAL); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_INLN_RF_PARITY_ERR_5 + offset, ADF_C4XXX_INLN_RF_PARITY_ERR_5_VAL); /* Set Parser RAM ECC Correctable Error */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSER_CERR + offset, ADF_C4XXX_PARSER_CERR_VAL); /* Set Parser RAM ECC UnCorrectable Error */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSER_UERR + offset, ADF_C4XXX_PARSER_UERR_VAL); /* Set CTPB RAM ECC Correctable Error */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CTPB_CERR + offset, ADF_C4XXX_CTPB_CERR_VAL); /* Set CTPB RAM ECC UnCorrectable Error */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CTPB_UERR + offset, ADF_C4XXX_CTPB_UERR_VAL); /* Set CPP Interface Status */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CPPM_ERR_STAT + offset, ADF_C4XXX_CPPM_ERR_STAT_VAL); /* Set CGST_MGMT_INT */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CONGESTION_MGMT_INT + offset, ADF_C4XXX_CONGESTION_MGMT_INI_VAL); /* CPP Interface Status */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CPPT_ERR_STAT + offset, ADF_C4XXX_CPPT_ERR_STAT_VAL); /* MAC Interrupt Mask */ ADF_CSR_WR64(aram_csr_base, ADF_C4XXX_IC_MAC_IM + offset, ADF_C4XXX_MAC_IM_VAL); } static void c4xxx_enable_parse_extraction(struct adf_accel_dev *accel_dev) { struct resource *aram_csr_base; aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr; /* Enable Inline Parse Extraction CRSs */ /* Set IC_PARSE_CTRL register */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_CTRL_OFFSET, ADF_C4XXX_IC_PARSE_CTRL_OFFSET_DEFAULT_VALUE); /* Set IC_PARSE_FIXED_DATA(0) */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_FIXED_DATA(0), ADF_C4XXX_DEFAULT_IC_PARSE_FIXED_DATA_0); /* Set IC_PARSE_FIXED_LENGTH */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_FIXED_LENGTH, ADF_C4XXX_DEFAULT_IC_PARSE_FIXED_LEN); /* Configure ESP protocol from an IPv4 header */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV4_OFFSET_0, ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_OFFS_0_VALUE); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV4_LENGTH_0, ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_LEN_0_VALUE); /* Configure protocol extraction field from an IPv4 header */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV4_OFFSET_1, ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_OFFS_1_VALUE); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV4_LENGTH_1, ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_LEN_1_VALUE); /* Configure SPI extraction field from an IPv4 header */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV4_OFFSET_2, ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_OFFS_2_VALUE); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV4_LENGTH_2, ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_LEN_2_VALUE); /* Configure destination field IP address from an IPv4 header */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV4_OFFSET_3, ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_OFFS_3_VALUE); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV4_LENGTH_3, ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_LEN_3_VALUE); /* Configure function number extraction field from an IPv6 header */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV6_OFFSET_0, ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_OFFS_0_VALUE); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV6_LENGTH_0, ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_LEN_0_VALUE); /* Configure protocol extraction field from an IPv6 header */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV6_OFFSET_1, ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_OFFS_1_VALUE); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV6_LENGTH_1, ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_LEN_1_VALUE); /* Configure SPI extraction field from an IPv6 header */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV6_OFFSET_2, ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_OFFS_2_VALUE); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV6_LENGTH_2, ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_LEN_2_VALUE); /* Configure destination field IP address from an IPv6 header */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV6_OFFSET_3, ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_OFFS_3_VALUE); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV6_LENGTH_3, ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_LEN_3_VALUE); } static int adf_get_inline_ipsec_algo_group(struct adf_accel_dev *accel_dev, unsigned long *ipsec_algo_group) { char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; if (adf_cfg_get_param_value( accel_dev, ADF_INLINE_SEC, ADF_INLINE_IPSEC_ALGO_GROUP, val)) return EFAULT; if (kstrtoul(val, 0, ipsec_algo_group)) return EFAULT; /* Verify the ipsec_algo_group */ if (*ipsec_algo_group >= IPSEC_ALGO_GROUP_DELIMITER) { device_printf( GET_DEV(accel_dev), "Unsupported IPSEC algo group %lu in config file!\n", *ipsec_algo_group); return EFAULT; } return 0; } static int c4xxx_init_inline_hw(struct adf_accel_dev *accel_dev) { u32 sa_entry_reg_value = 0; u32 sa_fn_lim = 0; u32 supported_algo = 0; struct resource *aram_csr_base; u32 offset; unsigned long ipsec_algo_group = IPSEC_DEFAUL_ALGO_GROUP; aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr; if (adf_get_inline_ipsec_algo_group(accel_dev, &ipsec_algo_group)) return EFAULT; sa_entry_reg_value |= (ADF_C4XXX_DEFAULT_LU_KEY_LEN << ADF_C4XXX_LU_KEY_LEN_BIT_OFFSET); if (ipsec_algo_group == IPSEC_DEFAUL_ALGO_GROUP) { sa_entry_reg_value |= ADF_C4XXX_DEFAULT_SA_SIZE; sa_fn_lim = ADF_C4XXX_FUNC_LIMIT(accel_dev, ADF_C4XXX_DEFAULT_SA_SIZE); supported_algo = ADF_C4XXX_DEFAULT_SUPPORTED_ALGORITHMS; } else if (ipsec_algo_group == IPSEC_ALGO_GROUP1) { sa_entry_reg_value |= ADF_C4XXX_ALGO_GROUP1_SA_SIZE; sa_fn_lim = ADF_C4XXX_FUNC_LIMIT(accel_dev, ADF_C4XXX_ALGO_GROUP1_SA_SIZE); supported_algo = ADF_C4XXX_SUPPORTED_ALGORITHMS_GROUP1; } else { return EFAULT; } /* REG_SA_ENTRY_CTRL register initialisation */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_ENTRY_CTRL, sa_entry_reg_value); /* REG_SAL_FUNC_LIMITS register initialisation. Only the first register * needs to be initialised to enable as it is assigned to a physical * function. Other registers will be initialised by the LAN PF driver. * The function limits is initialised to its maximal value. */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_FUNC_LIMITS, sa_fn_lim); /* Initialize REG_SA_SCRATCH[0] register to * advertise supported crypto algorithms */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_SCRATCH_0, supported_algo); /* REG_SA_SCRATCH[2] register initialisation * to advertise supported crypto offload features. */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_SCRATCH_2, ADF_C4XXX_DEFAULT_CY_OFFLOAD_FEATURES); /* Overwrite default MAC_CFG register in ingress offset */ ADF_CSR_WR64(aram_csr_base, ADF_C4XXX_MAC_CFG + ADF_C4XXX_INLINE_INGRESS_OFFSET, ADF_C4XXX_MAC_CFG_VALUE); /* Overwrite default MAC_CFG register in egress offset */ ADF_CSR_WR64(aram_csr_base, ADF_C4XXX_MAC_CFG + ADF_C4XXX_INLINE_EGRESS_OFFSET, ADF_C4XXX_MAC_CFG_VALUE); /* Overwrite default MAC_PIA_CFG * (Packet Interface Adapter Configuration) registers * in ingress offset */ ADF_CSR_WR64(aram_csr_base, ADF_C4XXX_MAC_PIA_CFG + ADF_C4XXX_INLINE_INGRESS_OFFSET, ADF_C4XXX_MAC_PIA_CFG_VALUE); /* Overwrite default MAC_PIA_CFG in egress offset */ ADF_CSR_WR64(aram_csr_base, ADF_C4XXX_MAC_PIA_CFG + ADF_C4XXX_INLINE_EGRESS_OFFSET, ADF_C4XXX_MAC_PIA_CFG_VALUE); c4xxx_enable_parse_extraction(accel_dev); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_INGRESS_CMD_DIS_MISC, ADF_C4XXX_REG_CMD_DIS_MISC_DEFAULT_VALUE); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_EGRESS_CMD_DIS_MISC, ADF_C4XXX_REG_CMD_DIS_MISC_DEFAULT_VALUE); /* Set bits<1:0> in ADF_C4XXX_INLINE_CAPABILITY register to * advertize that both ingress and egress directions are available */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_INLINE_CAPABILITY, ADF_C4XXX_INLINE_CAPABILITIES); /* Set error notification configuration of ingress */ offset = ADF_C4XXX_INLINE_INGRESS_OFFSET; c4xxx_init_error_notification_configuration(accel_dev, offset); /* Set error notification configuration of egress */ offset = ADF_C4XXX_INLINE_EGRESS_OFFSET; c4xxx_init_error_notification_configuration(accel_dev, offset); return 0; } static void adf_enable_inline_notification(struct adf_accel_dev *accel_dev) { struct resource *aram_csr_base; aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr; /* Set bit<0> in ADF_C4XXX_REG_SA_INLINE_ENABLE to advertise * that inline is enabled. */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_INLINE_ENABLE, ADF_C4XXX_INLINE_ENABLED); } static int c4xxx_init_aram_config(struct adf_accel_dev *accel_dev) { u32 aram_size = ADF_C4XXX_2MB_ARAM_SIZE; u32 ibuff_mem_needed = 0; u32 usable_aram_size = 0; struct adf_hw_aram_info *aram_info; u32 sa_db_ctl_value; struct resource *aram_csr_base; u8 profile = 0; u32 sadb_size = 0; u32 sa_size = 0; unsigned long ipsec_algo_group = IPSEC_DEFAUL_ALGO_GROUP; u32 i; if (accel_dev->au_info->num_inline_au > 0) if (adf_get_inline_ipsec_algo_group(accel_dev, &ipsec_algo_group)) return EFAULT; /* Allocate memory for adf_hw_aram_info */ aram_info = kzalloc(sizeof(*accel_dev->aram_info), GFP_KERNEL); if (!aram_info) return ENOMEM; /* Initialise Inline direction */ aram_info->inline_direction_egress_mask = 0; if (accel_dev->au_info->num_inline_au) { /* Set inline direction bitmap in the ARAM to * inform firmware which ME is egress */ aram_info->inline_direction_egress_mask = accel_dev->au_info->inline_egress_msk; /* User profile is valid, we can now add it * in the ARAM partition table */ aram_info->inline_congest_mngt_profile = profile; } /* Initialise DC ME mask, "1" = ME is used for DC operations */ aram_info->dc_ae_mask = accel_dev->au_info->dc_ae_msk; /* Initialise CY ME mask, "1" = ME is used for CY operations * Since asym service can also be enabled on inline AEs, here * we use the sym ae mask for configuring the cy_ae_msk */ aram_info->cy_ae_mask = accel_dev->au_info->sym_ae_msk; /* Configure number of long words in the ARAM */ aram_info->num_aram_lw_entries = ADF_C4XXX_NUM_ARAM_ENTRIES; /* Reset region offset values to 0xffffffff */ aram_info->mmp_region_offset = ~aram_info->mmp_region_offset; aram_info->skm_region_offset = ~aram_info->skm_region_offset; aram_info->inter_buff_aram_region_offset = ~aram_info->inter_buff_aram_region_offset; /* Determine ARAM size */ aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr; sa_db_ctl_value = ADF_CSR_RD(aram_csr_base, ADF_C4XXX_REG_SA_DB_CTRL); aram_size = (sa_db_ctl_value & ADF_C4XXX_SADB_SIZE_BIT) ? ADF_C4XXX_2MB_ARAM_SIZE : ADF_C4XXX_4MB_ARAM_SIZE; device_printf(GET_DEV(accel_dev), "Total available accelerator memory: %uMB\n", aram_size / ADF_C4XXX_1MB_SIZE); /* Compute MMP region offset */ aram_info->mmp_region_size = ADF_C4XXX_DEFAULT_MMP_REGION_SIZE; aram_info->mmp_region_offset = aram_size - aram_info->mmp_region_size; if (accel_dev->au_info->num_cy_au || accel_dev->au_info->num_inline_au) { /* Crypto is available therefore we must * include space in the ARAM for SKM. */ aram_info->skm_region_size = ADF_C4XXX_DEFAULT_SKM_REGION_SIZE; /* Compute SKM region offset */ aram_info->skm_region_offset = aram_size - (aram_info->mmp_region_size + aram_info->skm_region_size); } /* SADB always start at offset 0. */ if (accel_dev->au_info->num_inline_au) { /* Inline is available therefore we must * use remaining ARAM for the SADB. */ sadb_size = aram_size - (aram_info->mmp_region_size + aram_info->skm_region_size); /* * When the inline service is enabled, the policy is that * compression gives up it's space in ARAM to allow for a * larger SADB. Compression must use DRAM instead of ARAM. */ aram_info->inter_buff_aram_region_size = 0; /* the SADB size must be an integral multiple of the SA size */ if (ipsec_algo_group == IPSEC_DEFAUL_ALGO_GROUP) { sa_size = ADF_C4XXX_DEFAULT_SA_SIZE; } else { /* IPSEC_ALGO_GROUP1 * Total 2 algo groups. */ sa_size = ADF_C4XXX_ALGO_GROUP1_SA_SIZE; } sadb_size = sadb_size - (sadb_size % ADF_C4XXX_SA_SIZE_IN_BYTES(sa_size)); aram_info->sadb_region_size = sadb_size; } if (accel_dev->au_info->num_dc_au && !accel_dev->au_info->num_inline_au) { /* Compression is available therefore we must see if there is * space in the ARAM for intermediate buffers. */ aram_info->inter_buff_aram_region_size = 0; usable_aram_size = aram_size - (aram_info->mmp_region_size + aram_info->skm_region_size); for (i = 1; i <= accel_dev->au_info->num_dc_au; i++) { if ((i * ADF_C4XXX_AU_COMPR_INTERM_SIZE) > usable_aram_size) break; ibuff_mem_needed = i * ADF_C4XXX_AU_COMPR_INTERM_SIZE; } /* Set remaining ARAM to intermediate buffers. Firmware handles * fallback to DRAM for cases were number of AU assigned * to compression exceeds available ARAM memory. */ aram_info->inter_buff_aram_region_size = ibuff_mem_needed; /* If ARAM is used for compression set its initial offset. */ if (aram_info->inter_buff_aram_region_size) aram_info->inter_buff_aram_region_offset = 0; } accel_dev->aram_info = aram_info; return 0; } static void c4xxx_exit_aram_config(struct adf_accel_dev *accel_dev) { kfree(accel_dev->aram_info); accel_dev->aram_info = NULL; } static u32 get_num_accel_units(struct adf_hw_device_data *self) { u32 i = 0, num_accel = 0; unsigned long accel_mask = 0; if (!self || !self->accel_mask) return 0; accel_mask = self->accel_mask; for_each_set_bit(i, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS) { num_accel++; } return num_accel / ADF_C4XXX_NUM_ACCEL_PER_AU; } static int get_accel_unit(struct adf_hw_device_data *self, struct adf_accel_unit **accel_unit) { enum dev_sku_info sku; sku = get_sku(self); switch (sku) { case DEV_SKU_1: case DEV_SKU_1_CY: *accel_unit = adf_c4xxx_au_32_ae; break; case DEV_SKU_2: case DEV_SKU_2_CY: *accel_unit = adf_c4xxx_au_24_ae; break; case DEV_SKU_3: case DEV_SKU_3_CY: *accel_unit = adf_c4xxx_au_12_ae; break; default: *accel_unit = adf_c4xxx_au_emulation; break; } return 0; } static int get_ae_info(struct adf_hw_device_data *self, const struct adf_ae_info **ae_info) { enum dev_sku_info sku; sku = get_sku(self); switch (sku) { case DEV_SKU_1: *ae_info = adf_c4xxx_32_ae; break; case DEV_SKU_1_CY: *ae_info = adf_c4xxx_32_ae_sym; break; case DEV_SKU_2: *ae_info = adf_c4xxx_24_ae; break; case DEV_SKU_2_CY: *ae_info = adf_c4xxx_24_ae_sym; break; case DEV_SKU_3: *ae_info = adf_c4xxx_12_ae; break; case DEV_SKU_3_CY: *ae_info = adf_c4xxx_12_ae_sym; break; default: *ae_info = adf_c4xxx_12_ae; break; } return 0; } static int adf_add_debugfs_info(struct adf_accel_dev *accel_dev) { /* Add Accel Unit configuration table to debug FS interface */ if (c4xxx_init_ae_config(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to create entry for AE configuration\n"); return EFAULT; } return 0; } static void adf_remove_debugfs_info(struct adf_accel_dev *accel_dev) { /* Remove Accel Unit configuration table from debug FS interface */ c4xxx_exit_ae_config(accel_dev); } static int check_svc_to_hw_capabilities(struct adf_accel_dev *accel_dev, const char *svc_name, enum icp_qat_capabilities_mask cap) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 hw_cap = hw_data->accel_capabilities_mask; hw_cap &= cap; if (hw_cap != cap) { device_printf(GET_DEV(accel_dev), "Service not supported by accelerator: %s\n", svc_name); return EPERM; } return 0; } static int check_accel_unit_config(struct adf_accel_dev *accel_dev, u8 num_cy_au, u8 num_dc_au, u8 num_inline_au) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; u32 num_au = hw_data->get_num_accel_units(hw_data); u32 service_mask = ADF_ACCEL_SERVICE_NULL; char *token, *cur_str; int ret = 0; /* Get the services enabled by user */ snprintf(key, sizeof(key), ADF_SERVICES_ENABLED); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) return EFAULT; cur_str = val; token = strsep(&cur_str, ADF_SERVICES_SEPARATOR); while (token) { if (!strncmp(token, ADF_SERVICE_CY, strlen(ADF_SERVICE_CY))) { service_mask |= ADF_ACCEL_CRYPTO; ret |= check_svc_to_hw_capabilities( accel_dev, token, ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC); } if (!strncmp(token, ADF_CFG_SYM, strlen(ADF_CFG_SYM))) { service_mask |= ADF_ACCEL_CRYPTO; ret |= check_svc_to_hw_capabilities( accel_dev, token, ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC); } if (!strncmp(token, ADF_CFG_ASYM, strlen(ADF_CFG_ASYM))) { /* Handle a special case of services 'asym;inline' * enabled where ASYM is handled by Inline firmware * at AE level. This configuration allows to enable * ASYM service without accel units assigned to * CRYPTO service, e.g. * num_inline_au = 6 * num_cy_au = 0 */ if (num_inline_au < num_au) service_mask |= ADF_ACCEL_CRYPTO; ret |= check_svc_to_hw_capabilities( accel_dev, token, ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC); } if (!strncmp(token, ADF_SERVICE_DC, strlen(ADF_SERVICE_DC))) { service_mask |= ADF_ACCEL_COMPRESSION; ret |= check_svc_to_hw_capabilities( accel_dev, token, ICP_ACCEL_CAPABILITIES_COMPRESSION); } if (!strncmp(token, ADF_SERVICE_INLINE, strlen(ADF_SERVICE_INLINE))) { service_mask |= ADF_ACCEL_INLINE_CRYPTO; ret |= check_svc_to_hw_capabilities( accel_dev, token, ICP_ACCEL_CAPABILITIES_INLINE); } token = strsep(&cur_str, ADF_SERVICES_SEPARATOR); } /* Ensure the user doesn't enable services that are not supported by * accelerator. */ if (ret) { device_printf(GET_DEV(accel_dev), "Invalid accelerator configuration.\n"); return EFAULT; } if (!(service_mask & ADF_ACCEL_COMPRESSION) && num_dc_au > 0) { device_printf(GET_DEV(accel_dev), "Invalid accel unit config.\n"); device_printf( GET_DEV(accel_dev), "DC accel units set when dc service not enabled\n"); return EFAULT; } if (!(service_mask & ADF_ACCEL_CRYPTO) && num_cy_au > 0) { device_printf(GET_DEV(accel_dev), "Invalid accel unit config.\n"); device_printf( GET_DEV(accel_dev), "CY accel units set when cy service not enabled\n"); return EFAULT; } if (!(service_mask & ADF_ACCEL_INLINE_CRYPTO) && num_inline_au > 0) { device_printf(GET_DEV(accel_dev), "Invalid accel unit config.\n" "Inline feature not supported.\n"); return EFAULT; } hw_data->service_mask = service_mask; /* Ensure the user doesn't allocate more than max accel units */ if (num_au != (num_cy_au + num_dc_au + num_inline_au)) { device_printf(GET_DEV(accel_dev), "Invalid accel unit config.\n"); device_printf(GET_DEV(accel_dev), "Max accel units is %d\n", num_au); return EFAULT; } /* Ensure user allocates hardware resources for enabled services */ if (!num_cy_au && (service_mask & ADF_ACCEL_CRYPTO)) { device_printf(GET_DEV(accel_dev), "Failed to enable cy service!\n"); device_printf(GET_DEV(accel_dev), "%s should not be 0", ADF_NUM_CY_ACCEL_UNITS); return EFAULT; } if (!num_dc_au && (service_mask & ADF_ACCEL_COMPRESSION)) { device_printf(GET_DEV(accel_dev), "Failed to enable dc service!\n"); device_printf(GET_DEV(accel_dev), "%s should not be 0", ADF_NUM_DC_ACCEL_UNITS); return EFAULT; } if (!num_inline_au && (service_mask & ADF_ACCEL_INLINE_CRYPTO)) { device_printf(GET_DEV(accel_dev), "Failed to enable"); device_printf(GET_DEV(accel_dev), " inline service!"); device_printf(GET_DEV(accel_dev), " %s should not be 0\n", ADF_NUM_INLINE_ACCEL_UNITS); return EFAULT; } return 0; } static int get_accel_unit_config(struct adf_accel_dev *accel_dev, u8 *num_cy_au, u8 *num_dc_au, u8 *num_inline_au) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; /* Get the number of accel units allocated for each service */ snprintf(key, sizeof(key), ADF_NUM_CY_ACCEL_UNITS); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) return EFAULT; if (compat_strtou8(val, 10, num_cy_au)) return EFAULT; snprintf(key, sizeof(key), ADF_NUM_DC_ACCEL_UNITS); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) return EFAULT; if (compat_strtou8(val, 10, num_dc_au)) return EFAULT; snprintf(key, sizeof(key), ADF_NUM_INLINE_ACCEL_UNITS); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) return EFAULT; if (compat_strtou8(val, 10, num_inline_au)) return EFAULT; return 0; } /* Function reads the inline ingress/egress configuration * and returns the number of AEs reserved for ingress * and egress for accel units which are allocated for * inline service */ static int adf_get_inline_config(struct adf_accel_dev *accel_dev, u32 *num_ingress_aes) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; char *value; u32 num_au = hw_data->get_num_accel_units(hw_data); unsigned long ingress, egress = 0; struct adf_accel_unit *accel_unit = accel_dev->au_info->au; u32 num_inline_aes = 0, num_ingress_ae = 0; u32 i = 0; snprintf(key, sizeof(key), ADF_INLINE_INGRESS); if (adf_cfg_get_param_value(accel_dev, ADF_INLINE_SEC, key, val)) { device_printf(GET_DEV(accel_dev), "Failed to find ingress\n"); return EFAULT; } value = val; value = strsep(&value, ADF_C4XXX_PERCENTAGE); if (compat_strtoul(value, 10, &ingress)) return EFAULT; snprintf(key, sizeof(key), ADF_INLINE_EGRESS); if (adf_cfg_get_param_value(accel_dev, ADF_INLINE_SEC, key, val)) { device_printf(GET_DEV(accel_dev), "Failed to find egress\n"); return EFAULT; } value = val; value = strsep(&value, ADF_C4XXX_PERCENTAGE); if (compat_strtoul(value, 10, &egress)) return EFAULT; if (ingress + egress != ADF_C4XXX_100) { device_printf(GET_DEV(accel_dev), "The sum of ingress and egress should be 100\n"); return EFAULT; } for (i = 0; i < num_au; i++) { if (accel_unit[i].services == ADF_ACCEL_INLINE_CRYPTO) num_inline_aes += accel_unit[i].num_ae; } num_ingress_ae = num_inline_aes * ingress / ADF_C4XXX_100; if (((num_inline_aes * ingress) % ADF_C4XXX_100) > ADF_C4XXX_ROUND_LIMIT) num_ingress_ae++; *num_ingress_aes = num_ingress_ae; return 0; } static int adf_set_inline_ae_mask(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 num_au = hw_data->get_num_accel_units(hw_data); struct adf_accel_unit_info *au_info = accel_dev->au_info; struct adf_accel_unit *accel_unit = accel_dev->au_info->au; u32 num_ingress_ae = 0; u32 ingress_msk = 0; u32 i, j, ae_mask; if (adf_get_inline_config(accel_dev, &num_ingress_ae)) return EFAULT; for (i = 0; i < num_au; i++) { j = 0; if (accel_unit[i].services == ADF_ACCEL_INLINE_CRYPTO) { /* AEs with inline service enabled are also used * for asymmetric crypto */ au_info->asym_ae_msk |= accel_unit[i].ae_mask; ae_mask = accel_unit[i].ae_mask; while (num_ingress_ae && ae_mask) { if (ae_mask & 1) { ingress_msk |= BIT(j); num_ingress_ae--; } ae_mask = ae_mask >> 1; j++; } au_info->inline_ingress_msk |= ingress_msk; au_info->inline_egress_msk |= ~(au_info->inline_ingress_msk) & accel_unit[i].ae_mask; } } return 0; } static int adf_set_ae_mask(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 num_au = hw_data->get_num_accel_units(hw_data); struct adf_accel_unit_info *au_info = accel_dev->au_info; struct adf_accel_unit *accel_unit = accel_dev->au_info->au; char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; char *token, *cur_str; bool asym_en = false, sym_en = false; u32 i; /* Get the services enabled by user */ snprintf(key, sizeof(key), ADF_SERVICES_ENABLED); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) return EFAULT; cur_str = val; token = strsep(&cur_str, ADF_SERVICES_SEPARATOR); while (token) { if (!strncmp(token, ADF_CFG_ASYM, strlen(ADF_CFG_ASYM))) asym_en = true; if (!strncmp(token, ADF_CFG_SYM, strlen(ADF_CFG_SYM))) sym_en = true; if (!strncmp(token, ADF_CFG_CY, strlen(ADF_CFG_CY))) { sym_en = true; asym_en = true; } token = strsep(&cur_str, ADF_SERVICES_SEPARATOR); } for (i = 0; i < num_au; i++) { if (accel_unit[i].services == ADF_ACCEL_CRYPTO) { /* AEs that support crypto can perform both * symmetric and asymmetric crypto, however * we only enable the threads if the relevant * service is also enabled */ if (asym_en) au_info->asym_ae_msk |= accel_unit[i].ae_mask; if (sym_en) au_info->sym_ae_msk |= accel_unit[i].ae_mask; } else if (accel_unit[i].services == ADF_ACCEL_COMPRESSION) { au_info->dc_ae_msk |= accel_unit[i].comp_ae_mask; } } return 0; } static int adf_init_accel_unit_services(struct adf_accel_dev *accel_dev) { u8 num_cy_au, num_dc_au, num_inline_au; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 num_au = hw_data->get_num_accel_units(hw_data); struct adf_accel_unit *accel_unit; const struct adf_ae_info *ae_info; int i; if (get_accel_unit_config( accel_dev, &num_cy_au, &num_dc_au, &num_inline_au)) { device_printf(GET_DEV(accel_dev), "Invalid accel unit cfg\n"); return EFAULT; } if (check_accel_unit_config( accel_dev, num_cy_au, num_dc_au, num_inline_au)) return EFAULT; accel_dev->au_info = kzalloc(sizeof(*accel_dev->au_info), GFP_KERNEL); if (!accel_dev->au_info) return ENOMEM; accel_dev->au_info->num_cy_au = num_cy_au; accel_dev->au_info->num_dc_au = num_dc_au; accel_dev->au_info->num_inline_au = num_inline_au; if (get_ae_info(hw_data, &ae_info)) { device_printf(GET_DEV(accel_dev), "Failed to get ae info\n"); goto err_au_info; } accel_dev->au_info->ae_info = ae_info; if (get_accel_unit(hw_data, &accel_unit)) { device_printf(GET_DEV(accel_dev), "Failed to get accel unit\n"); goto err_ae_info; } /* Enable compression accel units */ /* Accel units with 4AEs are reserved for compression first */ for (i = num_au - 1; i >= 0 && num_dc_au > 0; i--) { if (accel_unit[i].num_ae == ADF_C4XXX_4_AE) { accel_unit[i].services = ADF_ACCEL_COMPRESSION; num_dc_au--; } } for (i = num_au - 1; i >= 0 && num_dc_au > 0; i--) { if (accel_unit[i].services == ADF_ACCEL_SERVICE_NULL) { accel_unit[i].services = ADF_ACCEL_COMPRESSION; num_dc_au--; } } /* Enable inline accel units */ for (i = 0; i < num_au && num_inline_au > 0; i++) { if (accel_unit[i].services == ADF_ACCEL_SERVICE_NULL) { accel_unit[i].services = ADF_ACCEL_INLINE_CRYPTO; num_inline_au--; } } /* Enable crypto accel units */ for (i = 0; i < num_au && num_cy_au > 0; i++) { if (accel_unit[i].services == ADF_ACCEL_SERVICE_NULL) { accel_unit[i].services = ADF_ACCEL_CRYPTO; num_cy_au--; } } accel_dev->au_info->au = accel_unit; return 0; err_ae_info: accel_dev->au_info->ae_info = NULL; err_au_info: kfree(accel_dev->au_info); accel_dev->au_info = NULL; return EFAULT; } static void adf_exit_accel_unit_services(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 num_au = hw_data->get_num_accel_units(hw_data); int i; if (accel_dev->au_info) { if (accel_dev->au_info->au) { for (i = 0; i < num_au; i++) { accel_dev->au_info->au[i].services = ADF_ACCEL_SERVICE_NULL; } } accel_dev->au_info->au = NULL; accel_dev->au_info->ae_info = NULL; kfree(accel_dev->au_info); accel_dev->au_info = NULL; } } static inline void adf_c4xxx_reset_hw_units(struct adf_accel_dev *accel_dev) { struct resource *pmisc = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr; u32 global_clk_enable = ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC_ARAM | ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC_ICI_ENABLE | ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC_ICE_ENABLE; u32 ixp_reset_generic = ADF_C4XXX_IXP_RESET_GENERIC_ARAM | ADF_C4XXX_IXP_RESET_GENERIC_INLINE_EGRESS | ADF_C4XXX_IXP_RESET_GENERIC_INLINE_INGRESS; /* To properly reset each of the units driver must: * 1)Call out resetactive state using ixp reset generic * register; * 2)Disable generic clock; * 3)Take device out of reset by clearing ixp reset * generic register; * 4)Re-enable generic clock; */ ADF_CSR_WR(pmisc, ADF_C4XXX_IXP_RESET_GENERIC, ixp_reset_generic); ADF_CSR_WR(pmisc, ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC, ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC_DISABLE_ALL); ADF_CSR_WR(pmisc, ADF_C4XXX_IXP_RESET_GENERIC, ADF_C4XXX_IXP_RESET_GENERIC_OUT_OF_RESET_TRIGGER); ADF_CSR_WR(pmisc, ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC, global_clk_enable); } static int adf_init_accel_units(struct adf_accel_dev *accel_dev) { struct resource *csr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr; if (adf_init_accel_unit_services(accel_dev)) return EFAULT; /* Set cy and dc enabled AE masks */ if (accel_dev->au_info->num_cy_au || accel_dev->au_info->num_dc_au) { if (adf_set_ae_mask(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to set ae masks\n"); goto err_au; } } /* Set ingress/egress ae mask if inline is enabled */ if (accel_dev->au_info->num_inline_au) { if (adf_set_inline_ae_mask(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to set inline ae masks\n"); goto err_au; } } /* Define ARAM regions */ if (c4xxx_init_aram_config(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to init aram config\n"); goto err_au; } /* Configure h/w registers for inline operations */ if (accel_dev->au_info->num_inline_au > 0) /* Initialise configuration parsing registers */ if (c4xxx_init_inline_hw(accel_dev)) goto err_au; c4xxx_set_sadb_size(accel_dev); if (accel_dev->au_info->num_inline_au > 0) { /* ici/ice interrupt shall be enabled after msi-x enabled */ ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK11, ADF_C4XXX_ERRMSK11_ERR_DISABLE_ICI_ICE_INTR); adf_enable_inline_notification(accel_dev); } update_hw_capability(accel_dev); if (adf_add_debugfs_info(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to add debug FS information\n"); goto err_au; } return 0; err_au: /* Free and clear accel unit data structures */ adf_exit_accel_unit_services(accel_dev); return EFAULT; } static void adf_exit_accel_units(struct adf_accel_dev *accel_dev) { adf_exit_accel_unit_services(accel_dev); /* Free aram mapping structure */ c4xxx_exit_aram_config(accel_dev); /* Remove entries in debug FS */ adf_remove_debugfs_info(accel_dev); } static const char * get_obj_name(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services service) { u32 capabilities = GET_HW_DATA(accel_dev)->accel_capabilities_mask; bool sym_only_sku = false; /* Check if SKU is capable only of symmetric cryptography * via device capabilities. */ if ((capabilities & ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC) && !(capabilities & ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC) && !(capabilities & ADF_ACCEL_CAPABILITIES_COMPRESSION)) sym_only_sku = true; switch (service) { case ADF_ACCEL_INLINE_CRYPTO: return ADF_C4XXX_INLINE_OBJ; case ADF_ACCEL_CRYPTO: if (sym_only_sku) return ADF_C4XXX_SYM_OBJ; else return ADF_C4XXX_CY_OBJ; break; case ADF_ACCEL_COMPRESSION: return ADF_C4XXX_DC_OBJ; default: return NULL; } } static uint32_t get_objs_num(struct adf_accel_dev *accel_dev) { u32 srv = 0; u32 max_srv_id = 0; unsigned long service_mask = accel_dev->hw_device->service_mask; /* The objects number corresponds to the number of services */ for_each_set_bit(srv, &service_mask, ADF_C4XXX_MAX_OBJ) { max_srv_id = srv; } return (max_srv_id + 1); } static uint32_t get_obj_cfg_ae_mask(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services service) { u32 ae_mask = 0; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 num_au = hw_data->get_num_accel_units(hw_data); struct adf_accel_unit *accel_unit = accel_dev->au_info->au; u32 i = 0; if (service == ADF_ACCEL_SERVICE_NULL) return 0; for (i = 0; i < num_au; i++) { if (accel_unit[i].services == service) ae_mask |= accel_unit[i].ae_mask; } return ae_mask; } static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable) { struct resource *addr; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 num_aes = hw_data->get_num_aes(hw_data); u32 reg = 0x0; u32 i; addr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr; /* Set/Unset Valid bits in AE Thread to PCIe Function Mapping */ for (i = 0; i < ADF_C4XXX_AE2FUNC_REG_PER_AE * num_aes; i++) { reg = ADF_CSR_RD(addr + ADF_C4XXX_AE2FUNC_MAP_OFFSET, i * ADF_C4XXX_AE2FUNC_MAP_REG_SIZE); if (enable) reg |= ADF_C4XXX_AE2FUNC_MAP_VALID; else reg &= ~ADF_C4XXX_AE2FUNC_MAP_VALID; ADF_CSR_WR(addr + ADF_C4XXX_AE2FUNC_MAP_OFFSET, i * ADF_C4XXX_AE2FUNC_MAP_REG_SIZE, reg); } } void adf_init_hw_data_c4xxx(struct adf_hw_device_data *hw_data) { hw_data->dev_class = &c4xxx_class; hw_data->instance_id = c4xxx_class.instances++; hw_data->num_banks = ADF_C4XXX_ETR_MAX_BANKS; hw_data->num_rings_per_bank = ADF_C4XXX_NUM_RINGS_PER_BANK; hw_data->num_accel = ADF_C4XXX_MAX_ACCELERATORS; hw_data->num_engines = ADF_C4XXX_MAX_ACCELENGINES; hw_data->num_logical_accel = 1; hw_data->tx_rx_gap = ADF_C4XXX_RX_RINGS_OFFSET; hw_data->tx_rings_mask = ADF_C4XXX_TX_RINGS_MASK; hw_data->alloc_irq = adf_isr_resource_alloc; hw_data->free_irq = adf_isr_resource_free; hw_data->enable_error_correction = adf_enable_error_correction; hw_data->init_ras = adf_init_ras; hw_data->exit_ras = adf_exit_ras; hw_data->ras_interrupts = adf_ras_interrupts; hw_data->get_accel_mask = get_accel_mask; hw_data->get_ae_mask = get_ae_mask; hw_data->get_num_accels = get_num_accels; hw_data->get_num_aes = get_num_aes; hw_data->get_num_accel_units = get_num_accel_units; hw_data->get_sram_bar_id = get_sram_bar_id; hw_data->get_etr_bar_id = get_etr_bar_id; hw_data->get_misc_bar_id = get_misc_bar_id; hw_data->get_arb_info = get_arb_info; hw_data->get_admin_info = get_admin_info; hw_data->get_errsou_offset = get_errsou_offset; hw_data->get_clock_speed = get_clock_speed; hw_data->get_eth_doorbell_msg = get_eth_doorbell_msg; hw_data->get_sku = get_sku; hw_data->heartbeat_ctr_num = ADF_NUM_THREADS_PER_AE; hw_data->check_prod_sku = c4xxx_check_prod_sku; hw_data->fw_name = ADF_C4XXX_FW; hw_data->fw_mmp_name = ADF_C4XXX_MMP; hw_data->get_obj_name = get_obj_name; hw_data->get_objs_num = get_objs_num; hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask; hw_data->init_admin_comms = adf_init_admin_comms; hw_data->exit_admin_comms = adf_exit_admin_comms; hw_data->configure_iov_threads = configure_iov_threads; hw_data->disable_iov = adf_disable_sriov; hw_data->send_admin_init = adf_send_admin_init; hw_data->init_arb = adf_init_arb_c4xxx; hw_data->exit_arb = adf_exit_arb_c4xxx; hw_data->disable_arb = adf_disable_arb; hw_data->enable_ints = adf_enable_ints; hw_data->set_ssm_wdtimer = c4xxx_set_ssm_wdtimer; hw_data->check_slice_hang = c4xxx_check_slice_hang; hw_data->reset_device = adf_reset_flr; hw_data->restore_device = adf_c4xxx_dev_restore; hw_data->init_accel_units = adf_init_accel_units; hw_data->reset_hw_units = adf_c4xxx_reset_hw_units; hw_data->exit_accel_units = adf_exit_accel_units; hw_data->ring_to_svc_map = ADF_DEFAULT_RING_TO_SRV_MAP; hw_data->get_heartbeat_status = adf_get_heartbeat_status; hw_data->get_ae_clock = get_ae_clock; hw_data->clock_frequency = ADF_C4XXX_AE_FREQ; hw_data->measure_clock = measure_clock; hw_data->add_pke_stats = adf_pke_replay_counters_add_c4xxx; hw_data->remove_pke_stats = adf_pke_replay_counters_remove_c4xxx; hw_data->add_misc_error = adf_misc_error_add_c4xxx; hw_data->remove_misc_error = adf_misc_error_remove_c4xxx; hw_data->extended_dc_capabilities = 0; hw_data->get_storage_enabled = get_storage_enabled; hw_data->query_storage_cap = 0; hw_data->get_accel_cap = c4xxx_get_hw_cap; hw_data->configure_accel_units = c4xxx_configure_accel_units; hw_data->pre_reset = adf_dev_pre_reset; hw_data->post_reset = adf_dev_post_reset; hw_data->get_ring_to_svc_map = adf_cfg_get_services_enabled; hw_data->count_ras_event = adf_fw_count_ras_event; hw_data->config_device = adf_config_device; hw_data->set_asym_rings_mask = adf_cfg_set_asym_rings_mask; adf_gen2_init_hw_csr_info(&hw_data->csr_info); adf_gen2_init_pf_pfvf_ops(&hw_data->csr_info.pfvf_ops); hw_data->csr_info.arb_enable_mask = 0xF; } void adf_clean_hw_data_c4xxx(struct adf_hw_device_data *hw_data) { hw_data->dev_class->instances--; } void remove_oid(struct adf_accel_dev *accel_dev, struct sysctl_oid *oid) { struct sysctl_ctx_list *qat_sysctl_ctx; int ret; qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); ret = sysctl_ctx_entry_del(qat_sysctl_ctx, oid); if (ret) device_printf(GET_DEV(accel_dev), "Failed to delete entry\n"); ret = sysctl_remove_oid(oid, 1, 1); if (ret) device_printf(GET_DEV(accel_dev), "Failed to delete oid\n"); }