diff --git a/share/man/man4/qat.4 b/share/man/man4/qat.4 index c6082f873a44..e8a46a99e949 100644 --- a/share/man/man4/qat.4 +++ b/share/man/man4/qat.4 @@ -1,127 +1,131 @@ .\" SPDX-License-Identifier: BSD-3-Clause .\" Copyright(c) 2007-2022 Intel Corporation .\" $FreeBSD$ -.Dd June 30, 2022 +.Dd September 1, 2022 .Dt QAT 4 .Os .Sh NAME .Nm qat .Nd Intel (R) QuickAssist Technology (QAT) driver .Sh SYNOPSIS To load the driver call: .Pp .Bl -item -compact .It kldload qat .El .Pp In order to load the driver on boot add these lines to .Xr loader.conf 5 selecting firmware(s) suitable for installed device(s) .Pp .Bl -item -compact .It qat_200xx_fw_load="YES" .It qat_c3xxx_fw_load="YES" .It qat_c4xxx_fw_load="YES" .It qat_c62x_fw_load="YES" .It qat_dh895xcc_fw_load="YES" .It +qat_4xxx_fw_load="YES" +.It qat_load="YES" .El .Sh DESCRIPTION The .Nm driver supports cryptography and compression acceleration of the Intel (R) QuickAssist Technology (QAT) devices. .Pp The .Nm driver is intended for platforms that contain: .Bl -bullet -compact .It Intel (R) C62x Chipset .It Intel (R) Atom C3000 processor product family .It Intel (R) QuickAssist Adapter 8960/Intel (R) QuickAssist Adapter 8970 (formerly known as "Lewis Hill") .It Intel (R) Communications Chipset 8925 to 8955 Series .It Intel (R) Atom P5300 processor product family +.It +Intel (R) QAT 4xxx Series .El .Pp The .Nm driver supports cryptography and compression acceleration. A complete API for offloading these operations is exposed in the kernel and may be used by any other entity directly. For details of usage and supported operations and algorithms refer to the following documentation available from .Lk 01.org : .Bl -bullet -compact .It .Rs .%A Intel (R) .%T QuickAssist Technology API Programmer's Guide .Re .It .Rs .%A Intel (R) .%T QuickAssist Technology Cryptographic API Reference Manual .Re .It .Rs .%A Intel (R) .%T QuickAssist Technology Data Compression API Reference Manual .Re .It .Rs .%A Intel (R) .%T QuickAssist Technology Performance Optimization Guide .Re .El .Pp In addition to exposing complete kernel API for offloading cryptography and compression operations, the .Nm driver also integrates with .Xr crypto 4 , allowing offloading supported cryptography operations to Intel (R) QuickAssist Technology (QAT) devices. For details of usage and supported operations and algorithms refer to the documentation mentioned above and .Sx SEE ALSO section. .Sh COMPATIBILITY The .Nm driver replaced previous implementation introduced in .Fx 13.0 . Current version, in addition to .Xr crypto 4 integration, supports also data compression and exposes a complete API for offloading data compression and cryptography operations. .Sh SEE ALSO .Xr crypto 4 , .Xr ipsec 4 , .Xr pci 4 , .Xr crypto 7 , .Xr crypto 9 .Sh HISTORY This .Nm driver was introduced in .Fx 14.0 . .Fx 13.0 included a different version of .Nm driver. .Sh AUTHORS The .Nm driver was written by .An Intel (R) Corporation . diff --git a/sys/contrib/dev/qat/qat_4xxx.bin b/sys/contrib/dev/qat/qat_4xxx.bin new file mode 100644 index 000000000000..baec3ad9ca32 Binary files /dev/null and b/sys/contrib/dev/qat/qat_4xxx.bin differ diff --git a/sys/contrib/dev/qat/qat_4xxx_mmp.bin b/sys/contrib/dev/qat/qat_4xxx_mmp.bin new file mode 100644 index 000000000000..7b7c560b97d0 Binary files /dev/null and b/sys/contrib/dev/qat/qat_4xxx_mmp.bin differ diff --git a/sys/dev/qat/include/adf_cfg_device.h b/sys/dev/qat/include/adf_cfg_device.h index 40fb91119f03..9def937cc9db 100644 --- a/sys/dev/qat/include/adf_cfg_device.h +++ b/sys/dev/qat/include/adf_cfg_device.h @@ -1,75 +1,75 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #ifndef ADF_CFG_DEVICE_H_ #define ADF_CFG_DEVICE_H_ #include "adf_cfg.h" #include "sal_statistics_strings.h" #define ADF_CFG_STATIC_CONF_VER 2 #define ADF_CFG_STATIC_CONF_CY_ASYM_RING_SIZE 64 #define ADF_CFG_STATIC_CONF_CY_SYM_RING_SIZE 512 #define ADF_CFG_STATIC_CONF_DC_INTER_BUF_SIZE 64 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_ENABLED 1 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DC 1 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DH 0 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DRBG 0 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DSA 0 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_ECC 0 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_KEYGEN 0 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_LN 0 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_PRIME 0 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_RSA 0 #define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_SYM 1 #define ADF_CFG_STATIC_CONF_POLL 1 #define ADF_CFG_STATIC_CONF_IRQ 0 #define ADF_CFG_STATIC_CONF_AUTO_RESET 0 #define ADF_CFG_STATIC_CONF_NUM_DC_ACCEL_UNITS 2 #define ADF_CFG_STATIC_CONF_NUM_INLINE_ACCEL_UNITS 0 #define ADF_CFG_STATIC_CONF_INST_NUM_DC 2 -#define ADF_CFG_STATIC_CONF_INST_NUM_CY_POLL 2 +#define ADF_CFG_STATIC_CONF_INST_NUM_CY_POLL 6 #define ADF_CFG_STATIC_CONF_INST_NUM_CY_IRQ 2 #define ADF_CFG_FW_STRING_TO_ID(str, acc, id) \ do { \ typeof(id) id_ = (id); \ typeof(str) str_; \ memcpy(str_, (str), sizeof(str_)); \ if (!strncmp(str_, \ ADF_SERVICES_DEFAULT, \ sizeof(ADF_SERVICES_DEFAULT))) \ *id_ = ADF_FW_IMAGE_DEFAULT; \ else if (!strncmp(str_, \ ADF_SERVICES_CRYPTO, \ sizeof(ADF_SERVICES_CRYPTO))) \ *id_ = ADF_FW_IMAGE_CRYPTO; \ else if (!strncmp(str_, \ ADF_SERVICES_COMPRESSION, \ sizeof(ADF_SERVICES_COMPRESSION))) \ *id_ = ADF_FW_IMAGE_COMPRESSION; \ else if (!strncmp(str_, \ ADF_SERVICES_CUSTOM1, \ sizeof(ADF_SERVICES_CUSTOM1))) \ *id_ = ADF_FW_IMAGE_CUSTOM1; \ else { \ *id_ = ADF_FW_IMAGE_DEFAULT; \ device_printf(GET_DEV(acc), \ "Invalid SerivesProfile: %s," \ "Using DEFAULT image\n", \ str_); \ } \ } while (0) int adf_cfg_get_ring_pairs(struct adf_cfg_device *device, struct adf_cfg_instance *inst, const char *process_name, struct adf_accel_dev *accel_dev); int adf_cfg_device_init(struct adf_cfg_device *device, struct adf_accel_dev *accel_dev); void adf_cfg_device_clear(struct adf_cfg_device *device, struct adf_accel_dev *accel_dev); #endif diff --git a/sys/dev/qat/include/common/adf_accel_devices.h b/sys/dev/qat/include/common/adf_accel_devices.h index ad0e74335259..9503069ac2a2 100644 --- a/sys/dev/qat/include/common/adf_accel_devices.h +++ b/sys/dev/qat/include/common/adf_accel_devices.h @@ -1,585 +1,668 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #ifndef ADF_ACCEL_DEVICES_H_ #define ADF_ACCEL_DEVICES_H_ #include "qat_freebsd.h" #include "adf_cfg_common.h" #define ADF_CFG_NUM_SERVICES 4 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf" #define ADF_C62X_DEVICE_NAME "c6xx" #define ADF_C62XVF_DEVICE_NAME "c6xxvf" #define ADF_C3XXX_DEVICE_NAME "c3xxx" #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf" #define ADF_200XX_DEVICE_NAME "200xx" #define ADF_200XXVF_DEVICE_NAME "200xxvf" #define ADF_C4XXX_DEVICE_NAME "c4xxx" #define ADF_C4XXXVF_DEVICE_NAME "c4xxxvf" +#define ADF_4XXX_DEVICE_NAME "4xxx" #define ADF_DH895XCC_PCI_DEVICE_ID 0x435 #define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443 #define ADF_C62X_PCI_DEVICE_ID 0x37c8 #define ADF_C62XIOV_PCI_DEVICE_ID 0x37c9 #define ADF_C3XXX_PCI_DEVICE_ID 0x19e2 #define ADF_C3XXXIOV_PCI_DEVICE_ID 0x19e3 #define ADF_200XX_PCI_DEVICE_ID 0x18ee #define ADF_200XXIOV_PCI_DEVICE_ID 0x18ef #define ADF_D15XX_PCI_DEVICE_ID 0x6f54 #define ADF_D15XXIOV_PCI_DEVICE_ID 0x6f55 #define ADF_C4XXX_PCI_DEVICE_ID 0x18a0 #define ADF_C4XXXIOV_PCI_DEVICE_ID 0x18a1 +#define ADF_4XXX_PCI_DEVICE_ID 0x4940 +#define ADF_401XX_PCI_DEVICE_ID 0x4942 #define IS_QAT_GEN3(ID) ({ (ID == ADF_C4XXX_PCI_DEVICE_ID); }) +static inline bool +IS_QAT_GEN4(const unsigned int id) +{ + return (id == ADF_4XXX_PCI_DEVICE_ID || id == ADF_401XX_PCI_DEVICE_ID); +} + +#define IS_QAT_GEN3_OR_GEN4(ID) (IS_QAT_GEN3(ID) || IS_QAT_GEN4(ID)) #define ADF_VF2PF_SET_SIZE 32 #define ADF_MAX_VF2PF_SET 4 #define ADF_VF2PF_SET_OFFSET(set_nr) ((set_nr)*ADF_VF2PF_SET_SIZE) #define ADF_VF2PF_VFNR_TO_SET(vf_nr) ((vf_nr) / ADF_VF2PF_SET_SIZE) #define ADF_VF2PF_VFNR_TO_MASK(vf_nr) \ ({ \ u32 vf_nr_ = (vf_nr); \ BIT((vf_nr_)-ADF_VF2PF_SET_SIZE *ADF_VF2PF_VFNR_TO_SET( \ vf_nr_)); \ }) #define ADF_DEVICE_FUSECTL_OFFSET 0x40 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C #define ADF_DEVICE_FUSECTL_MASK 0x80000000 #define ADF_PCI_MAX_BARS 3 #define ADF_DEVICE_NAME_LENGTH 32 #define ADF_ETR_MAX_RINGS_PER_BANK 16 -#define ADF_MAX_MSIX_VECTOR_NAME 16 +#define ADF_MAX_MSIX_VECTOR_NAME 32 #define ADF_DEVICE_NAME_PREFIX "qat_" #define ADF_STOP_RETRY 50 #define ADF_NUM_THREADS_PER_AE (8) #define ADF_AE_ADMIN_THREAD (7) #define ADF_NUM_PKE_STRAND (2) #define ADF_AE_STRAND0_THREAD (8) #define ADF_AE_STRAND1_THREAD (9) -#define ADF_NUM_HB_CNT_PER_AE (ADF_NUM_THREADS_PER_AE + ADF_NUM_PKE_STRAND) #define ADF_CFG_NUM_SERVICES 4 #define ADF_SRV_TYPE_BIT_LEN 3 #define ADF_SRV_TYPE_MASK 0x7 #define ADF_RINGS_PER_SRV_TYPE 2 #define ADF_THRD_ABILITY_BIT_LEN 4 #define ADF_THRD_ABILITY_MASK 0xf #define ADF_VF_OFFSET 0x8 #define ADF_MAX_FUNC_PER_DEV 0x7 #define ADF_PCI_DEV_OFFSET 0x3 #define ADF_SRV_TYPE_BIT_LEN 3 #define ADF_SRV_TYPE_MASK 0x7 #define GET_SRV_TYPE(ena_srv_mask, srv) \ (((ena_srv_mask) >> (ADF_SRV_TYPE_BIT_LEN * (srv))) & ADF_SRV_TYPE_MASK) +#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_info.csr_ops) + #define ADF_DEFAULT_RING_TO_SRV_MAP \ (CRYPTO | CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) enum adf_accel_capabilities { ADF_ACCEL_CAPABILITIES_NULL = 0, ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1, ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2, ADF_ACCEL_CAPABILITIES_CIPHER = 4, ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8, ADF_ACCEL_CAPABILITIES_COMPRESSION = 32, ADF_ACCEL_CAPABILITIES_DEPRECATED = 64, ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128 }; struct adf_bar { rman_res_t base_addr; struct resource *virt_addr; rman_res_t size; } __packed; struct adf_accel_msix { struct msix_entry *entries; u32 num_entries; } __packed; struct adf_accel_pci { device_t pci_dev; struct adf_accel_msix msix_entries; struct adf_bar pci_bars[ADF_PCI_MAX_BARS]; uint8_t revid; uint8_t sku; int node; } __packed; enum dev_state { DEV_DOWN = 0, DEV_UP }; enum dev_sku_info { DEV_SKU_1 = 0, DEV_SKU_2, DEV_SKU_3, DEV_SKU_4, DEV_SKU_VF, DEV_SKU_1_CY, DEV_SKU_2_CY, DEV_SKU_3_CY, DEV_SKU_UNKNOWN }; static inline const char * get_sku_info(enum dev_sku_info info) { switch (info) { case DEV_SKU_1: return "SKU1"; case DEV_SKU_1_CY: return "SKU1CY"; case DEV_SKU_2: return "SKU2"; case DEV_SKU_2_CY: return "SKU2CY"; case DEV_SKU_3: return "SKU3"; case DEV_SKU_3_CY: return "SKU3CY"; case DEV_SKU_4: return "SKU4"; case DEV_SKU_VF: return "SKUVF"; case DEV_SKU_UNKNOWN: default: break; } return "Unknown SKU"; } enum adf_accel_unit_services { ADF_ACCEL_SERVICE_NULL = 0, ADF_ACCEL_INLINE_CRYPTO = 1, ADF_ACCEL_CRYPTO = 2, - ADF_ACCEL_COMPRESSION = 4 + ADF_ACCEL_COMPRESSION = 4, + ADF_ACCEL_ASYM = 8, + ADF_ACCEL_ADMIN = 16 }; struct adf_ae_info { u32 num_asym_thd; u32 num_sym_thd; u32 num_dc_thd; } __packed; struct adf_accel_unit { u8 au_mask; u32 accel_mask; u64 ae_mask; u64 comp_ae_mask; u32 num_ae; enum adf_accel_unit_services services; } __packed; struct adf_accel_unit_info { u32 inline_ingress_msk; u32 inline_egress_msk; u32 sym_ae_msk; u32 asym_ae_msk; u32 dc_ae_msk; u8 num_cy_au; u8 num_dc_au; + u8 num_asym_au; u8 num_inline_au; struct adf_accel_unit *au; const struct adf_ae_info *ae_info; } __packed; struct adf_hw_aram_info { /* Inline Egress mask. "1" = AE is working with egress traffic */ u32 inline_direction_egress_mask; /* Inline congestion managmenet profiles set in config file */ u32 inline_congest_mngt_profile; /* Initialise CY AE mask, "1" = AE is used for CY operations */ u32 cy_ae_mask; /* Initialise DC AE mask, "1" = AE is used for DC operations */ u32 dc_ae_mask; /* Number of long words used to define the ARAM regions */ u32 num_aram_lw_entries; /* ARAM region definitions */ u32 mmp_region_size; u32 mmp_region_offset; u32 skm_region_size; u32 skm_region_offset; /* * Defines size and offset of compression intermediate buffers stored * in ARAM (device's on-chip memory). */ u32 inter_buff_aram_region_size; u32 inter_buff_aram_region_offset; u32 sadb_region_size; u32 sadb_region_offset; } __packed; struct adf_hw_device_class { const char *name; const enum adf_device_type type; uint32_t instances; } __packed; struct arb_info { u32 arbiter_offset; u32 wrk_thd_2_srv_arb_map; u32 wrk_cfg_offset; } __packed; struct admin_info { u32 admin_msg_ur; u32 admin_msg_lr; u32 mailbox_offset; } __packed; +struct adf_hw_csr_ops { + u64 (*build_csr_ring_base_addr)(bus_addr_t addr, u32 size); + u32 (*read_csr_ring_head)(struct resource *csr_base_addr, + u32 bank, + u32 ring); + void (*write_csr_ring_head)(struct resource *csr_base_addr, + u32 bank, + u32 ring, + u32 value); + u32 (*read_csr_ring_tail)(struct resource *csr_base_addr, + u32 bank, + u32 ring); + void (*write_csr_ring_tail)(struct resource *csr_base_addr, + u32 bank, + u32 ring, + u32 value); + u32 (*read_csr_e_stat)(struct resource *csr_base_addr, u32 bank); + void (*write_csr_ring_config)(struct resource *csr_base_addr, + u32 bank, + u32 ring, + u32 value); + void (*write_csr_ring_base)(struct resource *csr_base_addr, + u32 bank, + u32 ring, + bus_addr_t addr); + void (*write_csr_int_flag)(struct resource *csr_base_addr, + u32 bank, + u32 value); + void (*write_csr_int_srcsel)(struct resource *csr_base_addr, u32 bank); + void (*write_csr_int_col_en)(struct resource *csr_base_addr, + u32 bank, + u32 value); + void (*write_csr_int_col_ctl)(struct resource *csr_base_addr, + u32 bank, + u32 value); + void (*write_csr_int_flag_and_col)(struct resource *csr_base_addr, + u32 bank, + u32 value); + u32 (*read_csr_ring_srv_arb_en)(struct resource *csr_base_addr, + u32 bank); + void (*write_csr_ring_srv_arb_en)(struct resource *csr_base_addr, + u32 bank, + u32 value); +}; + +struct adf_hw_csr_info { + struct adf_hw_csr_ops csr_ops; + u32 csr_addr_offset; + u32 ring_bundle_size; + u32 bank_int_flag_clear_mask; + u32 num_rings_per_int_srcsel; + u32 arb_enable_mask; +}; + struct adf_cfg_device_data; struct adf_accel_dev; struct adf_etr_data; struct adf_etr_ring_data; struct adf_hw_device_data { struct adf_hw_device_class *dev_class; uint32_t (*get_accel_mask)(struct adf_accel_dev *accel_dev); uint32_t (*get_ae_mask)(struct adf_accel_dev *accel_dev); uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self); uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self); uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self); uint32_t (*get_num_aes)(struct adf_hw_device_data *self); uint32_t (*get_num_accels)(struct adf_hw_device_data *self); void (*notify_and_wait_ethernet)(struct adf_accel_dev *accel_dev); bool (*get_eth_doorbell_msg)(struct adf_accel_dev *accel_dev); uint32_t (*get_pf2vf_offset)(uint32_t i); uint32_t (*get_vintmsk_offset)(uint32_t i); u32 (*get_vintsou_offset)(void); void (*get_arb_info)(struct arb_info *arb_csrs_info); void (*get_admin_info)(struct admin_info *admin_csrs_info); void (*get_errsou_offset)(u32 *errsou3, u32 *errsou5); uint32_t (*get_num_accel_units)(struct adf_hw_device_data *self); int (*init_accel_units)(struct adf_accel_dev *accel_dev); void (*exit_accel_units)(struct adf_accel_dev *accel_dev); uint32_t (*get_clock_speed)(struct adf_hw_device_data *self); enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self); bool (*check_prod_sku)(struct adf_accel_dev *accel_dev); int (*alloc_irq)(struct adf_accel_dev *accel_dev); void (*free_irq)(struct adf_accel_dev *accel_dev); void (*enable_error_correction)(struct adf_accel_dev *accel_dev); int (*check_uncorrectable_error)(struct adf_accel_dev *accel_dev); void (*print_err_registers)(struct adf_accel_dev *accel_dev); void (*disable_error_interrupts)(struct adf_accel_dev *accel_dev); int (*init_ras)(struct adf_accel_dev *accel_dev); void (*exit_ras)(struct adf_accel_dev *accel_dev); void (*disable_arb)(struct adf_accel_dev *accel_dev); void (*update_ras_errors)(struct adf_accel_dev *accel_dev, int error); bool (*ras_interrupts)(struct adf_accel_dev *accel_dev, bool *reset_required); int (*init_admin_comms)(struct adf_accel_dev *accel_dev); void (*exit_admin_comms)(struct adf_accel_dev *accel_dev); int (*send_admin_init)(struct adf_accel_dev *accel_dev); void (*set_asym_rings_mask)(struct adf_accel_dev *accel_dev); int (*get_ring_to_svc_map)(struct adf_accel_dev *accel_dev, u16 *ring_to_svc_map); uint32_t (*get_accel_cap)(struct adf_accel_dev *accel_dev); int (*init_arb)(struct adf_accel_dev *accel_dev); void (*exit_arb)(struct adf_accel_dev *accel_dev); void (*get_arb_mapping)(struct adf_accel_dev *accel_dev, const uint32_t **cfg); + int (*init_device)(struct adf_accel_dev *accel_dev); int (*get_heartbeat_status)(struct adf_accel_dev *accel_dev); uint32_t (*get_ae_clock)(struct adf_hw_device_data *self); + uint32_t (*get_hb_clock)(struct adf_hw_device_data *self); void (*disable_iov)(struct adf_accel_dev *accel_dev); void (*configure_iov_threads)(struct adf_accel_dev *accel_dev, bool enable); void (*enable_ints)(struct adf_accel_dev *accel_dev); bool (*check_slice_hang)(struct adf_accel_dev *accel_dev); int (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev); int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev); int (*disable_vf2pf_comms)(struct adf_accel_dev *accel_dev); void (*reset_device)(struct adf_accel_dev *accel_dev); void (*reset_hw_units)(struct adf_accel_dev *accel_dev); int (*measure_clock)(struct adf_accel_dev *accel_dev); void (*restore_device)(struct adf_accel_dev *accel_dev); uint32_t (*get_obj_cfg_ae_mask)(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services services); + enum adf_accel_unit_services ( + *get_service_type)(struct adf_accel_dev *accel_dev, s32 obj_num); int (*add_pke_stats)(struct adf_accel_dev *accel_dev); void (*remove_pke_stats)(struct adf_accel_dev *accel_dev); int (*add_misc_error)(struct adf_accel_dev *accel_dev); int (*count_ras_event)(struct adf_accel_dev *accel_dev, u32 *ras_event, char *aeidstr); void (*remove_misc_error)(struct adf_accel_dev *accel_dev); int (*configure_accel_units)(struct adf_accel_dev *accel_dev); uint32_t (*get_objs_num)(struct adf_accel_dev *accel_dev); const char *(*get_obj_name)(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services services); void (*pre_reset)(struct adf_accel_dev *accel_dev); void (*post_reset)(struct adf_accel_dev *accel_dev); + void (*set_msix_rttable)(struct adf_accel_dev *accel_dev); + void (*get_ring_svc_map_data)(int ring_pair_index, + u16 ring_to_svc_map, + u8 *serv_type, + int *ring_index, + int *num_rings_per_srv, + int bundle_num); + struct adf_hw_csr_info csr_info; const char *fw_name; const char *fw_mmp_name; bool reset_ack; uint32_t fuses; uint32_t accel_capabilities_mask; uint32_t instance_id; uint16_t accel_mask; u32 aerucm_mask; u32 ae_mask; + u32 admin_ae_mask; u32 service_mask; + u32 service_to_load_mask; + u32 heartbeat_ctr_num; uint16_t tx_rings_mask; uint8_t tx_rx_gap; uint8_t num_banks; u8 num_rings_per_bank; uint8_t num_accel; uint8_t num_logical_accel; uint8_t num_engines; uint8_t min_iov_compat_ver; int (*get_storage_enabled)(struct adf_accel_dev *accel_dev, uint32_t *storage_enabled); u8 query_storage_cap; u32 clock_frequency; u8 storage_enable; u32 extended_dc_capabilities; int (*config_device)(struct adf_accel_dev *accel_dev); u16 asym_rings_mask; int (*get_fw_image_type)(struct adf_accel_dev *accel_dev, enum adf_cfg_fw_image_type *fw_image_type); u16 ring_to_svc_map; } __packed; /* helper enum for performing CSR operations */ enum operation { AND, OR, }; /* 32-bit CSR write macro */ #define ADF_CSR_WR(csr_base, csr_offset, val) \ bus_write_4(csr_base, csr_offset, val) /* 64-bit CSR write macro */ #ifdef __x86_64__ #define ADF_CSR_WR64(csr_base, csr_offset, val) \ bus_write_8(csr_base, csr_offset, val) #else static __inline void adf_csr_wr64(struct resource *csr_base, bus_size_t offset, uint64_t value) { bus_write_4(csr_base, offset, (uint32_t)value); bus_write_4(csr_base, offset + 4, (uint32_t)(value >> 32)); } #define ADF_CSR_WR64(csr_base, csr_offset, val) \ adf_csr_wr64(csr_base, csr_offset, val) #endif /* 32-bit CSR read macro */ #define ADF_CSR_RD(csr_base, csr_offset) bus_read_4(csr_base, csr_offset) /* 64-bit CSR read macro */ #ifdef __x86_64__ #define ADF_CSR_RD64(csr_base, csr_offset) bus_read_8(csr_base, csr_offset) #else static __inline uint64_t adf_csr_rd64(struct resource *csr_base, bus_size_t offset) { return (((uint64_t)bus_read_4(csr_base, offset)) | (((uint64_t)bus_read_4(csr_base, offset + 4)) << 32)); } #define ADF_CSR_RD64(csr_base, csr_offset) adf_csr_rd64(csr_base, csr_offset) #endif #define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev) #define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars) #define GET_HW_DATA(accel_dev) (accel_dev->hw_device) #define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks) #define GET_DEV_SKU(accel_dev) (accel_dev->accel_pci_dev.sku) #define GET_NUM_RINGS_PER_BANK(accel_dev) \ (GET_HW_DATA(accel_dev)->num_rings_per_bank) #define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines) #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev #define GET_SRV_TYPE(ena_srv_mask, srv) \ (((ena_srv_mask) >> (ADF_SRV_TYPE_BIT_LEN * (srv))) & ADF_SRV_TYPE_MASK) #define SET_ASYM_MASK(asym_mask, srv) \ ({ \ typeof(srv) srv_ = (srv); \ (asym_mask) |= ((1 << (srv_)*ADF_RINGS_PER_SRV_TYPE) | \ (1 << ((srv_)*ADF_RINGS_PER_SRV_TYPE + 1))); \ }) #define GET_NUM_RINGS_PER_BANK(accel_dev) \ (GET_HW_DATA(accel_dev)->num_rings_per_bank) #define GET_MAX_PROCESSES(accel_dev) \ ({ \ typeof(accel_dev) dev = (accel_dev); \ (GET_MAX_BANKS(dev) * (GET_NUM_RINGS_PER_BANK(dev) / 2)); \ }) #define GET_DU_TABLE(accel_dev) (accel_dev->du_table) static inline void adf_csr_fetch_and_and(struct resource *csr, size_t offs, unsigned long mask) { unsigned int val = ADF_CSR_RD(csr, offs); val &= mask; ADF_CSR_WR(csr, offs, val); } static inline void adf_csr_fetch_and_or(struct resource *csr, size_t offs, unsigned long mask) { unsigned int val = ADF_CSR_RD(csr, offs); val |= mask; ADF_CSR_WR(csr, offs, val); } static inline void adf_csr_fetch_and_update(enum operation op, struct resource *csr, size_t offs, unsigned long mask) { switch (op) { case AND: adf_csr_fetch_and_and(csr, offs, mask); break; case OR: adf_csr_fetch_and_or(csr, offs, mask); break; } } struct pfvf_stats { struct dentry *stats_file; /* Messages put in CSR */ unsigned int tx; /* Messages read from CSR */ unsigned int rx; /* Interrupt fired but int bit was clear */ unsigned int spurious; /* Block messages sent */ unsigned int blk_tx; /* Block messages received */ unsigned int blk_rx; /* Blocks received with CRC errors */ unsigned int crc_err; /* CSR in use by other side */ unsigned int busy; /* Receiver did not acknowledge */ unsigned int no_ack; /* Collision detected */ unsigned int collision; /* Couldn't send a response */ unsigned int tx_timeout; /* Didn't receive a response */ unsigned int rx_timeout; /* Responses received */ unsigned int rx_rsp; /* Messages re-transmitted */ unsigned int retry; /* Event put timeout */ unsigned int event_timeout; }; #define NUM_PFVF_COUNTERS 14 void adf_get_admin_info(struct admin_info *admin_csrs_info); struct adf_admin_comms { bus_addr_t phy_addr; bus_addr_t const_tbl_addr; bus_addr_t aram_map_phys_addr; bus_addr_t phy_hb_addr; bus_dmamap_t aram_map; bus_dmamap_t const_tbl_map; bus_dmamap_t hb_map; char *virt_addr; char *virt_hb_addr; struct resource *mailbox_addr; struct sx lock; struct bus_dmamem dma_mem; struct bus_dmamem dma_hb; }; struct icp_qat_fw_loader_handle; struct adf_fw_loader_data { struct icp_qat_fw_loader_handle *fw_loader; const struct firmware *uof_fw; const struct firmware *mmp_fw; }; struct adf_accel_vf_info { struct adf_accel_dev *accel_dev; struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */ u32 vf_nr; bool init; u8 compat_ver; struct pfvf_stats pfvf_counters; }; struct adf_fw_versions { u8 fw_version_major; u8 fw_version_minor; u8 fw_version_patch; u8 mmp_version_major; u8 mmp_version_minor; u8 mmp_version_patch; }; #define ADF_COMPAT_CHECKER_MAX 8 typedef int (*adf_iov_compat_checker_t)(struct adf_accel_dev *accel_dev, u8 vf_compat_ver); struct adf_accel_compat_manager { u8 num_chker; adf_iov_compat_checker_t iov_compat_checkers[ADF_COMPAT_CHECKER_MAX]; }; struct adf_heartbeat; struct adf_accel_dev { struct adf_hw_aram_info *aram_info; struct adf_accel_unit_info *au_info; struct adf_etr_data *transport; struct adf_hw_device_data *hw_device; struct adf_cfg_device_data *cfg; struct adf_fw_loader_data *fw_loader; struct adf_admin_comms *admin; struct adf_heartbeat *heartbeat; struct adf_fw_versions fw_versions; unsigned int autoreset_on_error; struct adf_fw_counters_data *fw_counters_data; struct sysctl_oid *debugfs_ae_config; struct list_head crypto_list; atomic_t *ras_counters; unsigned long status; atomic_t ref_count; bus_dma_tag_t dma_tag; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *ras_correctable; struct sysctl_oid *ras_uncorrectable; struct sysctl_oid *ras_fatal; struct sysctl_oid *ras_reset; struct sysctl_oid *pke_replay_dbgfile; struct sysctl_oid *misc_error_dbgfile; struct list_head list; struct adf_accel_pci accel_pci_dev; struct adf_accel_compat_manager *cm; u8 compat_ver; union { struct { /* vf_info is non-zero when SR-IOV is init'ed */ struct adf_accel_vf_info *vf_info; int num_vfs; } pf; struct { struct resource *irq; void *cookie; char *irq_name; struct task pf2vf_bh_tasklet; struct mutex vf2pf_lock; /* protect CSR access */ int iov_msg_completion; uint8_t compatible; uint8_t pf_version; u8 pf2vf_block_byte; u8 pf2vf_block_resp_type; struct pfvf_stats pfvf_counters; } vf; } u1; bool is_vf; u32 accel_id; void *lac_dev; }; #endif diff --git a/sys/dev/qat/include/common/adf_cfg.h b/sys/dev/qat/include/common/adf_cfg.h index edc4813cb69e..58502c8605b8 100644 --- a/sys/dev/qat/include/common/adf_cfg.h +++ b/sys/dev/qat/include/common/adf_cfg.h @@ -1,79 +1,89 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #ifndef ADF_CFG_H_ #define ADF_CFG_H_ #include #include "adf_accel_devices.h" #include "adf_cfg_common.h" #include "adf_cfg_strings.h" struct adf_cfg_key_val { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; enum adf_cfg_val_type type; struct list_head list; }; struct adf_cfg_section { char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES]; bool processed; bool is_derived; struct list_head list; struct list_head param_head; }; struct adf_cfg_device_data { struct adf_cfg_device *dev; struct list_head sec_list; struct sysctl_oid *debug; struct sx lock; }; struct adf_cfg_depot_list { struct list_head sec_list; }; int adf_cfg_dev_add(struct adf_accel_dev *accel_dev); void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev); int adf_cfg_depot_restore_all(struct adf_accel_dev *accel_dev, struct adf_cfg_depot_list *dev_hp_cfg); int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name); void adf_cfg_del_all(struct adf_accel_dev *accel_dev); void adf_cfg_depot_del_all(struct list_head *head); int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, const char *section_name, const char *key, const void *val, enum adf_cfg_val_type type); int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev, const char *section, const char *name, char *value); int adf_cfg_save_section(struct adf_accel_dev *accel_dev, const char *name, struct adf_cfg_section *section); int adf_cfg_depot_save_all(struct adf_accel_dev *accel_dev, struct adf_cfg_depot_list *dev_hp_cfg); struct adf_cfg_section *adf_cfg_sec_find(struct adf_accel_dev *accel_dev, const char *sec_name); int adf_cfg_derived_section_add(struct adf_accel_dev *accel_dev, const char *name); int adf_cfg_remove_key_param(struct adf_accel_dev *accel_dev, const char *section_name, const char *key); int adf_cfg_setup_irq(struct adf_accel_dev *accel_dev); void adf_cfg_set_asym_rings_mask(struct adf_accel_dev *accel_dev); void adf_cfg_gen_dispatch_arbiter(struct adf_accel_dev *accel_dev, const u32 *thrd_to_arb_map, u32 *thrd_to_arb_map_gen, u32 total_engines); int adf_cfg_get_fw_image_type(struct adf_accel_dev *accel_dev, enum adf_cfg_fw_image_type *fw_image_type); int adf_cfg_get_services_enabled(struct adf_accel_dev *accel_dev, u16 *ring_to_svc_map); int adf_cfg_restore_section(struct adf_accel_dev *accel_dev, struct adf_cfg_section *section); void adf_cfg_keyval_del_all(struct list_head *head); + +static inline int +adf_cy_inst_cross_banks(struct adf_accel_dev *accel_dev) +{ + if (accel_dev->hw_device->num_rings_per_bank == 2) + return 1; + else + return 0; +} + #endif diff --git a/sys/dev/qat/include/common/adf_cfg_common.h b/sys/dev/qat/include/common/adf_cfg_common.h index 68fb5e8a98b3..65fc60fc8c3d 100644 --- a/sys/dev/qat/include/common/adf_cfg_common.h +++ b/sys/dev/qat/include/common/adf_cfg_common.h @@ -1,211 +1,213 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #ifndef ADF_CFG_COMMON_H_ #define ADF_CFG_COMMON_H_ #include #include #include #define ADF_CFG_MAX_STR_LEN 128 #define ADF_CFG_MAX_KEY_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN /* * Max value length increased to 128 to support more length of values. * like Dc0CoreAffinity = 0, 1, 2,... config values to max cores */ #define ADF_CFG_MAX_VAL_LEN_IN_BYTES 128 #define ADF_CFG_MAX_SECTION_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN #define ADF_CFG_NULL_TERM_SIZE 1 #define ADF_CFG_BASE_DEC 10 #define ADF_CFG_BASE_HEX 16 #define ADF_CFG_ALL_DEVICES 0xFFFE #define ADF_CFG_NO_DEVICE 0xFFFF #define ADF_CFG_AFFINITY_WHATEVER 0xFF #define MAX_DEVICE_NAME_SIZE 32 #define ADF_MAX_DEVICES (32 * 32) #define ADF_MAX_ACCELENGINES 12 #define ADF_CFG_STORAGE_ENABLED 1 #define ADF_DEVS_ARRAY_SIZE BITS_TO_LONGS(ADF_MAX_DEVICES) -#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x3000000 +#define ADF_GEN2_SSM_WDT_PKE_DEFAULT_VALUE 0x3000000 #define ADF_WDT_TIMER_SYM_COMP_MS 3 #define ADF_MIN_HB_TIMER_MS 100 #define ADF_CFG_MAX_NUM_OF_SECTIONS 16 #define ADF_CFG_MAX_NUM_OF_TOKENS 16 #define ADF_CFG_MAX_TOKENS_IN_CONFIG 8 #define ADF_CFG_RESP_POLL 1 #define ADF_CFG_RESP_EPOLL 2 #define ADF_CFG_DEF_CY_RING_ASYM_SIZE 64 #define ADF_CFG_DEF_CY_RING_SYM_SIZE 512 #define ADF_CFG_DEF_DC_RING_SIZE 512 #define ADF_CFG_MAX_CORE_NUM 256 #define ADF_CFG_MAX_TOKENS ADF_CFG_MAX_CORE_NUM #define ADF_CFG_MAX_TOKEN_LEN 10 #define ADF_CFG_ACCEL_DEF_COALES 1 #define ADF_CFG_ACCEL_DEF_COALES_TIMER 10000 #define ADF_CFG_ACCEL_DEF_COALES_NUM_MSG 0 #define ADF_CFG_ASYM_SRV_MASK 1 #define ADF_CFG_SYM_SRV_MASK 2 #define ADF_CFG_DC_SRV_MASK 8 #define ADF_CFG_UNKNOWN_SRV_MASK 0 #define ADF_CFG_DEF_ASYM_MASK 0x03 #define ADF_CFG_MAX_SERVICES 4 #define ADF_MAX_SERVICES 3 enum adf_svc_type { ADF_SVC_ASYM = 0, ADF_SVC_SYM = 1, ADF_SVC_DC = 2, ADF_SVC_NONE = 3 }; struct adf_pci_address { unsigned char bus; unsigned char dev; unsigned char func; } __packed; #define ADF_CFG_SERV_RING_PAIR_0_SHIFT 0 #define ADF_CFG_SERV_RING_PAIR_1_SHIFT 3 #define ADF_CFG_SERV_RING_PAIR_2_SHIFT 6 #define ADF_CFG_SERV_RING_PAIR_3_SHIFT 9 enum adf_cfg_service_type { NA = 0, CRYPTO, COMP, SYM, ASYM, USED }; enum adf_cfg_bundle_type { FREE, KERNEL, USER }; enum adf_cfg_val_type { ADF_DEC, ADF_HEX, ADF_STR }; enum adf_device_type { DEV_UNKNOWN = 0, DEV_DH895XCC, DEV_DH895XCCVF, DEV_C62X, DEV_C62XVF, DEV_C3XXX, DEV_C3XXXVF, DEV_200XX, DEV_200XXVF, DEV_C4XXX, - DEV_C4XXXVF + DEV_C4XXXVF, + DEV_4XXX }; enum adf_cfg_fw_image_type { ADF_FW_IMAGE_DEFAULT = 0, ADF_FW_IMAGE_CRYPTO, ADF_FW_IMAGE_COMPRESSION, ADF_FW_IMAGE_CUSTOM1 }; struct adf_dev_status_info { enum adf_device_type type; uint16_t accel_id; uint16_t instance_id; uint8_t num_ae; uint8_t num_accel; uint8_t num_logical_accel; uint8_t banks_per_accel; uint8_t state; uint8_t bus; uint8_t dev; uint8_t fun; int domain; char name[MAX_DEVICE_NAME_SIZE]; u8 sku; u32 node_id; u32 device_mem_available; u32 pci_device_id; }; struct adf_cfg_device { /* contains all the bundles info */ struct adf_cfg_bundle **bundles; /* contains all the instances info */ struct adf_cfg_instance **instances; int bundle_num; int instance_index; char name[ADF_CFG_MAX_STR_LEN]; int dev_id; int max_kernel_bundle_nr; u16 total_num_inst; }; enum adf_accel_serv_type { ADF_ACCEL_SERV_NA = 0x0, ADF_ACCEL_SERV_ASYM, ADF_ACCEL_SERV_SYM, ADF_ACCEL_SERV_RND, ADF_ACCEL_SERV_DC }; struct adf_cfg_ring { u8 mode : 1; enum adf_accel_serv_type serv_type; u8 number : 4; }; struct adf_cfg_bundle { /* Section(s) name this bundle is shared by */ char **sections; int max_section; int section_index; int number; enum adf_cfg_bundle_type type; cpuset_t affinity_mask; int polling_mode; int instance_num; int num_of_rings; /* contains all the info about rings */ struct adf_cfg_ring **rings; u16 in_use; + u16 max_cfg_svc_num; }; struct adf_cfg_instance { enum adf_cfg_service_type stype; char name[ADF_CFG_MAX_STR_LEN]; int polling_mode; cpuset_t affinity_mask; /* rings within an instance for services */ int asym_tx; int asym_rx; int sym_tx; int sym_rx; int dc_tx; int dc_rx; int bundle; }; #define ADF_CFG_MAX_CORE_NUM 256 #define ADF_CFG_MAX_TOKENS_IN_CONFIG 8 #define ADF_CFG_MAX_TOKEN_LEN 10 #define ADF_CFG_MAX_TOKENS ADF_CFG_MAX_CORE_NUM #define ADF_CFG_ACCEL_DEF_COALES 1 #define ADF_CFG_ACCEL_DEF_COALES_TIMER 10000 #define ADF_CFG_ACCEL_DEF_COALES_NUM_MSG 0 #define ADF_CFG_RESP_EPOLL 2 #define ADF_CFG_SERV_RING_PAIR_1_SHIFT 3 #define ADF_CFG_SERV_RING_PAIR_2_SHIFT 6 #define ADF_CFG_SERV_RING_PAIR_3_SHIFT 9 #define ADF_CFG_RESP_POLL 1 #define ADF_CFG_ASYM_SRV_MASK 1 #define ADF_CFG_SYM_SRV_MASK 2 #define ADF_CFG_DC_SRV_MASK 8 #define ADF_CFG_UNKNOWN_SRV_MASK 0 #define ADF_CFG_DEF_ASYM_MASK 0x03 #define ADF_CFG_MAX_SERVICES 4 #define ADF_CFG_HB_DEFAULT_VALUE 500 #define ADF_CFG_HB_COUNT_THRESHOLD 3 #define ADF_MIN_HB_TIMER_MS 100 enum adf_device_heartbeat_status { DEV_HB_UNRESPONSIVE = 0, DEV_HB_ALIVE, DEV_HB_UNSUPPORTED }; struct adf_dev_heartbeat_status_ctl { uint16_t device_id; enum adf_device_heartbeat_status status; }; #endif diff --git a/sys/dev/qat/include/common/adf_cfg_strings.h b/sys/dev/qat/include/common/adf_cfg_strings.h index 2f05dadadc45..933ffe0ba6ad 100644 --- a/sys/dev/qat/include/common/adf_cfg_strings.h +++ b/sys/dev/qat/include/common/adf_cfg_strings.h @@ -1,132 +1,136 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #ifndef ADF_CFG_STRINGS_H_ #define ADF_CFG_STRINGS_H_ #define ADF_GENERAL_SEC "GENERAL" #define ADF_KERNEL_SEC "KERNEL" #define ADF_ACCEL_SEC "Accelerator" #define ADF_NUM_CY "NumberCyInstances" #define ADF_NUM_DC "NumberDcInstances" #define ADF_RING_SYM_SIZE "NumConcurrentSymRequests" #define ADF_RING_ASYM_SIZE "NumConcurrentAsymRequests" #define ADF_RING_DC_SIZE "NumConcurrentRequests" #define ADF_RING_ASYM_TX "RingAsymTx" #define ADF_RING_SYM_TX "RingSymTx" #define ADF_RING_RND_TX "RingNrbgTx" #define ADF_RING_ASYM_RX "RingAsymRx" #define ADF_RING_SYM_RX "RingSymRx" #define ADF_RING_RND_RX "RingNrbgRx" #define ADF_RING_DC_TX "RingTx" #define ADF_RING_DC_RX "RingRx" #define ADF_ETRMGR_BANK "Bank" #define ADF_RING_BANK_NUM "BankNumber" +#define ADF_RING_BANK_NUM_ASYM "BankNumberAsym" +#define ADF_RING_BANK_NUM_SYM "BankNumberSym" #define ADF_CY "Cy" #define ADF_DC "Dc" #define ADF_DC_EXTENDED_FEATURES "Device_DcExtendedFeatures" #define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled" #define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \ ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_ENABLED #define ADF_ETRMGR_COALESCE_TIMER "InterruptCoalescingTimerNs" #define ADF_ETRMGR_COALESCE_TIMER_FORMAT \ ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCE_TIMER #define ADF_ETRMGR_COALESCING_MSG_ENABLED "InterruptCoalescingNumResponses" #define ADF_ETRMGR_COALESCING_MSG_ENABLED_FORMAT \ ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_MSG_ENABLED #define ADF_ETRMGR_CORE_AFFINITY "CoreAffinity" #define ADF_ETRMGR_CORE_AFFINITY_FORMAT \ ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY #define ADF_ACCEL_STR "Accelerator%d" #define ADF_INLINE_SEC "INLINE" #define ADF_NUM_CY_ACCEL_UNITS "NumCyAccelUnits" #define ADF_NUM_DC_ACCEL_UNITS "NumDcAccelUnits" #define ADF_NUM_INLINE_ACCEL_UNITS "NumInlineAccelUnits" #define ADF_INLINE_INGRESS "InlineIngress" #define ADF_INLINE_EGRESS "InlineEgress" #define ADF_INLINE_CONGEST_MNGT_PROFILE "InlineCongestionManagmentProfile" #define ADF_INLINE_IPSEC_ALGO_GROUP "InlineIPsecAlgoGroup" #define ADF_SERVICE_CY "cy" #define ADF_SERVICE_SYM "sym" #define ADF_SERVICE_DC "dc" #define ADF_CFG_CY "cy" #define ADF_CFG_DC "dc" #define ADF_CFG_ASYM "asym" #define ADF_CFG_SYM "sym" #define ADF_SERVICE_INLINE "inline" #define ADF_SERVICES_ENABLED "ServicesEnabled" #define ADF_SERVICES_SEPARATOR ";" #define ADF_DEV_SSM_WDT_BULK "CySymAndDcWatchDogTimer" #define ADF_DEV_SSM_WDT_PKE "CyAsymWatchDogTimer" #define ADF_DH895XCC_AE_FW_NAME "icp_qat_ae.uof" #define ADF_CXXX_AE_FW_NAME "icp_qat_ae.suof" #define ADF_HEARTBEAT_TIMER "HeartbeatTimer" #define ADF_MMP_VER_KEY "Firmware_MmpVer" #define ADF_UOF_VER_KEY "Firmware_UofVer" #define ADF_HW_REV_ID_KEY "HW_RevId" #define ADF_STORAGE_FIRMWARE_ENABLED "StorageEnabled" #define ADF_DEV_MAX_BANKS "Device_Max_Banks" #define ADF_DEV_CAPABILITIES_MASK "Device_Capabilities_Mask" #define ADF_DEV_NODE_ID "Device_NodeId" #define ADF_DEV_PKG_ID "Device_PkgId" #define ADF_FIRST_USER_BUNDLE "FirstUserBundle" #define ADF_INTERNAL_USERSPACE_SEC_SUFF "_INT_" #define ADF_LIMIT_DEV_ACCESS "LimitDevAccess" #define DEV_LIMIT_CFG_ACCESS_TMPL "_D_L_ACC" #define ADF_DEV_MAX_RINGS_PER_BANK "Device_Max_Rings_Per_Bank" #define ADF_NUM_PROCESSES "NumProcesses" #define ADF_DH895XCC_AE_FW_NAME_COMPRESSION "compression.uof" #define ADF_DH895XCC_AE_FW_NAME_CRYPTO "crypto.uof" #define ADF_DH895XCC_AE_FW_NAME_CUSTOM1 "custom1.uof" #define ADF_CXXX_AE_FW_NAME_COMPRESSION "compression.suof" #define ADF_CXXX_AE_FW_NAME_CRYPTO "crypto.suof" #define ADF_CXXX_AE_FW_NAME_CUSTOM1 "custom1.suof" #define ADF_DC_EXTENDED_FEATURES "Device_DcExtendedFeatures" #define ADF_PKE_DISABLED "PkeServiceDisabled" #define ADF_INTER_BUF_SIZE "DcIntermediateBufferSizeInKB" #define ADF_AUTO_RESET_ON_ERROR "AutoResetOnError" #define ADF_KERNEL_SAL_SEC "KERNEL_QAT" #define ADF_CFG_DEF_CY_RING_ASYM_SIZE 64 #define ADF_CFG_DEF_CY_RING_SYM_SIZE 512 #define ADF_CFG_DEF_DC_RING_SIZE 512 #define ADF_NUM_PROCESSES "NumProcesses" #define ADF_SERVICES_ENABLED "ServicesEnabled" #define ADF_CFG_CY "cy" #define ADF_CFG_SYM "sym" #define ADF_CFG_ASYM "asym" #define ADF_CFG_DC "dc" #define ADF_POLL_MODE "IsPolled" #define ADF_DEV_KPT_ENABLE "KptEnabled" #define ADF_STORAGE_FIRMWARE_ENABLED "StorageEnabled" #define ADF_RL_FIRMWARE_ENABLED "RateLimitingEnabled" #define ADF_SERVICES_PROFILE "ServicesProfile" #define ADF_SERVICES_DEFAULT "DEFAULT" #define ADF_SERVICES_CRYPTO "CRYPTO" #define ADF_SERVICES_COMPRESSION "COMPRESSION" #define ADF_SERVICES_CUSTOM1 "CUSTOM1" #define ADF_DC_RING_SIZE (ADF_DC ADF_RING_DC_SIZE) #define ADF_CY_RING_SYM_SIZE (ADF_CY ADF_RING_SYM_SIZE) #define ADF_CY_RING_ASYM_SIZE (ADF_CY ADF_RING_ASYM_SIZE) #define ADF_CY_CORE_AFFINITY_FORMAT ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY #define ADF_DC_CORE_AFFINITY_FORMAT ADF_DC "%d" ADF_ETRMGR_CORE_AFFINITY #define ADF_CY_BANK_NUM_FORMAT ADF_CY "%d" ADF_RING_BANK_NUM +#define ADF_CY_ASYM_BANK_NUM_FORMAT ADF_CY "%d" ADF_RING_BANK_NUM_ASYM +#define ADF_CY_SYM_BANK_NUM_FORMAT ADF_CY "%d" ADF_RING_BANK_NUM_SYM #define ADF_DC_BANK_NUM_FORMAT ADF_DC "%d" ADF_RING_BANK_NUM #define ADF_CY_ASYM_TX_FORMAT ADF_CY "%d" ADF_RING_ASYM_TX #define ADF_CY_SYM_TX_FORMAT ADF_CY "%d" ADF_RING_SYM_TX #define ADF_CY_ASYM_RX_FORMAT ADF_CY "%d" ADF_RING_ASYM_RX #define ADF_CY_SYM_RX_FORMAT ADF_CY "%d" ADF_RING_SYM_RX #define ADF_DC_TX_FORMAT ADF_DC "%d" ADF_RING_DC_TX #define ADF_DC_RX_FORMAT ADF_DC "%d" ADF_RING_DC_RX #define ADF_CY_RING_SYM_SIZE_FORMAT ADF_CY "%d" ADF_RING_SYM_SIZE #define ADF_CY_RING_ASYM_SIZE_FORMAT ADF_CY "%d" ADF_RING_ASYM_SIZE #define ADF_DC_RING_SIZE_FORMAT ADF_DC "%d" ADF_RING_DC_SIZE #define ADF_CY_NAME_FORMAT ADF_CY "%dName" #define ADF_DC_NAME_FORMAT ADF_DC "%dName" #define ADF_CY_POLL_MODE_FORMAT ADF_CY "%d" ADF_POLL_MODE #define ADF_DC_POLL_MODE_FORMAT ADF_DC "%d" ADF_POLL_MODE #define ADF_USER_SECTION_NAME_FORMAT "%s_INT_%d" #define ADF_LIMITED_USER_SECTION_NAME_FORMAT "%s_DEV%d_INT_%d" #define ADF_CONFIG_VERSION "ConfigVersion" #endif diff --git a/sys/dev/qat/include/common/adf_common_drv.h b/sys/dev/qat/include/common/adf_common_drv.h index 3bb35ed55da3..7ec380540336 100644 --- a/sys/dev/qat/include/common/adf_common_drv.h +++ b/sys/dev/qat/include/common/adf_common_drv.h @@ -1,368 +1,370 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #ifndef ADF_DRV_H #define ADF_DRV_H #include #include "adf_accel_devices.h" #include "icp_qat_fw_loader_handle.h" #include "icp_qat_hal.h" #include "adf_cfg_user.h" #define ADF_MAJOR_VERSION 0 #define ADF_MINOR_VERSION 6 #define ADF_BUILD_VERSION 0 #define ADF_DRV_VERSION \ __stringify(ADF_MAJOR_VERSION) "." __stringify( \ ADF_MINOR_VERSION) "." __stringify(ADF_BUILD_VERSION) #define ADF_STATUS_RESTARTING 0 #define ADF_STATUS_STARTING 1 #define ADF_STATUS_CONFIGURED 2 #define ADF_STATUS_STARTED 3 #define ADF_STATUS_AE_INITIALISED 4 #define ADF_STATUS_AE_UCODE_LOADED 5 #define ADF_STATUS_AE_STARTED 6 #define ADF_STATUS_PF_RUNNING 7 #define ADF_STATUS_IRQ_ALLOCATED 8 #define ADF_PCIE_FLR_ATTEMPT 10 #define ADF_STATUS_SYSCTL_CTX_INITIALISED 9 #define PCI_EXP_AERUCS 0x104 /* PMISC BAR upper and lower offsets in PCIe config space */ #define ADF_PMISC_L_OFFSET 0x18 #define ADF_PMISC_U_OFFSET 0x1c enum adf_dev_reset_mode { ADF_DEV_RESET_ASYNC = 0, ADF_DEV_RESET_SYNC }; enum adf_event { ADF_EVENT_INIT = 0, ADF_EVENT_START, ADF_EVENT_STOP, ADF_EVENT_SHUTDOWN, ADF_EVENT_RESTARTING, ADF_EVENT_RESTARTED, ADF_EVENT_ERROR, }; struct adf_state { enum adf_event dev_state; int dev_id; }; struct service_hndl { int (*event_hld)(struct adf_accel_dev *accel_dev, enum adf_event event); unsigned long init_status[ADF_DEVS_ARRAY_SIZE]; unsigned long start_status[ADF_DEVS_ARRAY_SIZE]; char *name; struct list_head list; }; static inline int get_current_node(void) { return PCPU_GET(domain); } int adf_service_register(struct service_hndl *service); int adf_service_unregister(struct service_hndl *service); int adf_dev_init(struct adf_accel_dev *accel_dev); int adf_dev_start(struct adf_accel_dev *accel_dev); int adf_dev_stop(struct adf_accel_dev *accel_dev); void adf_dev_shutdown(struct adf_accel_dev *accel_dev); int adf_dev_autoreset(struct adf_accel_dev *accel_dev); int adf_dev_reset(struct adf_accel_dev *accel_dev, enum adf_dev_reset_mode mode); int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, enum adf_dev_reset_mode mode); void adf_error_notifier(uintptr_t arg); int adf_init_fatal_error_wq(void); void adf_exit_fatal_error_wq(void); int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr); int adf_iov_notify(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr); void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev); int adf_notify_fatal_error(struct adf_accel_dev *accel_dev); void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev); void adf_pf2vf_notify_uncorrectable_error(struct adf_accel_dev *accel_dev); void adf_pf2vf_notify_heartbeat_error(struct adf_accel_dev *accel_dev); typedef int (*adf_iov_block_provider)(struct adf_accel_dev *accel_dev, u8 **buffer, u8 *length, u8 *block_version, u8 compatibility, u8 byte_num); int adf_iov_block_provider_register(u8 block_type, const adf_iov_block_provider provider); u8 adf_iov_is_block_provider_registered(u8 block_type); int adf_iov_block_provider_unregister(u8 block_type, const adf_iov_block_provider provider); int adf_iov_block_get(struct adf_accel_dev *accel_dev, u8 block_type, u8 *block_version, u8 *buffer, u8 *length); u8 adf_pfvf_crc(u8 start_crc, u8 *buf, u8 len); int adf_iov_init_compat_manager(struct adf_accel_dev *accel_dev, struct adf_accel_compat_manager **cm); int adf_iov_shutdown_compat_manager(struct adf_accel_dev *accel_dev, struct adf_accel_compat_manager **cm); int adf_iov_register_compat_checker(struct adf_accel_dev *accel_dev, const adf_iov_compat_checker_t cc); int adf_iov_unregister_compat_checker(struct adf_accel_dev *accel_dev, const adf_iov_compat_checker_t cc); int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev); int adf_pf_disable_vf2pf_comms(struct adf_accel_dev *accel_dev); int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev); int adf_disable_vf2pf_comms(struct adf_accel_dev *accel_dev); void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info); void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data); void adf_clean_vf_map(bool); int adf_sysctl_add_fw_versions(struct adf_accel_dev *accel_dev); int adf_sysctl_remove_fw_versions(struct adf_accel_dev *accel_dev); int adf_ctl_dev_register(void); void adf_ctl_dev_unregister(void); int adf_pf_vf_capabilities_init(struct adf_accel_dev *accel_dev); int adf_pf_ext_dc_cap_msg_provider(struct adf_accel_dev *accel_dev, u8 **buffer, u8 *length, u8 *block_version, u8 compatibility); int adf_pf_vf_ring_to_svc_init(struct adf_accel_dev *accel_dev); int adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev, u8 **buffer, u8 *length, u8 *block_version, u8 compatibility, u8 byte_num); int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf); void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf); struct list_head *adf_devmgr_get_head(void); struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id); struct adf_accel_dev *adf_devmgr_get_first(void); struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(device_t pci_dev); int adf_devmgr_verify_id(uint32_t *id); void adf_devmgr_get_num_dev(uint32_t *num); int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev); int adf_dev_started(struct adf_accel_dev *accel_dev); int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev); int adf_dev_restarting_notify_sync(struct adf_accel_dev *accel_dev); int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev); int adf_dev_stop_notify_sync(struct adf_accel_dev *accel_dev); int adf_ae_init(struct adf_accel_dev *accel_dev); int adf_ae_shutdown(struct adf_accel_dev *accel_dev); int adf_ae_fw_load(struct adf_accel_dev *accel_dev); void adf_ae_fw_release(struct adf_accel_dev *accel_dev); int adf_ae_start(struct adf_accel_dev *accel_dev); int adf_ae_stop(struct adf_accel_dev *accel_dev); int adf_aer_store_ppaerucm_reg(device_t pdev, struct adf_hw_device_data *hw_data); int adf_enable_aer(struct adf_accel_dev *accel_dev, device_t *adf); void adf_disable_aer(struct adf_accel_dev *accel_dev); void adf_reset_sbr(struct adf_accel_dev *accel_dev); void adf_reset_flr(struct adf_accel_dev *accel_dev); void adf_dev_pre_reset(struct adf_accel_dev *accel_dev); void adf_dev_post_reset(struct adf_accel_dev *accel_dev); void adf_dev_restore(struct adf_accel_dev *accel_dev); int adf_init_aer(void); void adf_exit_aer(void); int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae, void *in, void *out); struct icp_qat_fw_init_admin_req; struct icp_qat_fw_init_admin_resp; int adf_send_admin(struct adf_accel_dev *accel_dev, struct icp_qat_fw_init_admin_req *req, struct icp_qat_fw_init_admin_resp *resp, u32 ae_mask); int adf_config_device(struct adf_accel_dev *accel_dev); int adf_init_admin_comms(struct adf_accel_dev *accel_dev); void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); int adf_send_admin_init(struct adf_accel_dev *accel_dev); int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); int adf_get_fw_pke_stats(struct adf_accel_dev *accel_dev, u64 *suc_count, u64 *unsuc_count); int adf_dev_measure_clock(struct adf_accel_dev *accel_dev, u32 *frequency, u32 min, u32 max); int adf_clock_debugfs_add(struct adf_accel_dev *accel_dev); u64 adf_clock_get_current_time(void); int adf_init_arb(struct adf_accel_dev *accel_dev); int adf_init_gen2_arb(struct adf_accel_dev *accel_dev); void adf_exit_arb(struct adf_accel_dev *accel_dev); void adf_disable_arb(struct adf_accel_dev *accel_dev); void adf_update_ring_arb(struct adf_etr_ring_data *ring); -void -adf_enable_ring_arb(void *csr_addr, unsigned int bank_nr, unsigned int mask); -void -adf_disable_ring_arb(void *csr_addr, unsigned int bank_nr, unsigned int mask); +void adf_enable_ring_arb(struct adf_accel_dev *accel_dev, + void *csr_addr, + unsigned int bank_nr, + unsigned int mask); +void adf_disable_ring_arb(struct adf_accel_dev *accel_dev, + void *csr_addr, + unsigned int bank_nr, + unsigned int mask); int adf_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); struct adf_accel_dev *adf_devmgr_get_dev_by_bdf(struct adf_pci_address *addr); struct adf_accel_dev *adf_devmgr_get_dev_by_pci_bus(u8 bus); int adf_get_vf_nr(struct adf_pci_address *vf_pci_addr, int *vf_nr); u32 adf_get_slices_for_svc(struct adf_accel_dev *accel_dev, enum adf_svc_type svc); bool adf_is_bdf_equal(struct adf_pci_address *bdf1, struct adf_pci_address *bdf2); int adf_is_vf_nr_valid(struct adf_accel_dev *accel_dev, int vf_nr); void adf_dev_get(struct adf_accel_dev *accel_dev); void adf_dev_put(struct adf_accel_dev *accel_dev); int adf_dev_in_use(struct adf_accel_dev *accel_dev); int adf_init_etr_data(struct adf_accel_dev *accel_dev); void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev); struct qat_crypto_instance *qat_crypto_get_instance_node(int node); void qat_crypto_put_instance(struct qat_crypto_instance *inst); void qat_alg_callback(void *resp); void qat_alg_asym_callback(void *resp); int qat_algs_register(void); void qat_algs_unregister(void); int qat_asym_algs_register(void); void qat_asym_algs_unregister(void); int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev); void adf_isr_resource_free(struct adf_accel_dev *accel_dev); int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev); void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev); int qat_hal_init(struct adf_accel_dev *accel_dev); void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle); -void qat_hal_start(struct icp_qat_fw_loader_handle *handle, - unsigned char ae, - unsigned int ctx_mask); +int qat_hal_start(struct icp_qat_fw_loader_handle *handle); void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask); void qat_hal_reset(struct icp_qat_fw_loader_handle *handle); int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle); void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask); int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle, unsigned int ae); int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, enum icp_qat_uof_regtype lm_type, unsigned char mode); void qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char mode); void qat_hal_set_ae_scs_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char mode); int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char mode); int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char mode); void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask, unsigned int upc); void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, unsigned int words_num, const uint64_t *uword); void qat_hal_wr_coalesce_uwords(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, unsigned int words_num, uint64_t *uword); void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uword_addr, unsigned int words_num, unsigned int *data); int qat_hal_get_ins_num(void); int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle, unsigned char ae, struct icp_qat_uof_batch_init *lm_init_header); int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned long ctx_mask, enum icp_qat_uof_regtype reg_type, unsigned short reg_num, unsigned int regdata); int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned long ctx_mask, enum icp_qat_uof_regtype reg_type, unsigned short reg_num, unsigned int regdata); int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned long ctx_mask, enum icp_qat_uof_regtype reg_type, unsigned short reg_num, unsigned int regdata); int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned long ctx_mask, unsigned short reg_num, unsigned int regdata); int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned short lm_addr, unsigned int value); int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle); void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle); void qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle); int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, const void *addr_ptr, int mem_size); int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, const void *addr_ptr, u32 mem_size, const char *obj_name); void qat_hal_get_scs_neigh_ae(unsigned char ae, unsigned char *ae_neigh); int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle, unsigned int cfg_ae_mask); void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); int adf_init_vf_wq(void); void adf_exit_vf_wq(void); void adf_flush_vf_wq(void); int adf_vf2pf_init(struct adf_accel_dev *accel_dev); void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev); static inline int adf_sriov_configure(device_t *pdev, int numvfs) { return 0; } static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev) { } static inline void adf_vf2pf_handler(struct adf_accel_vf_info *vf_info) { } static inline int adf_init_pf_wq(void) { return 0; } static inline void adf_exit_pf_wq(void) { } #endif diff --git a/sys/dev/qat/include/common/adf_gen2_hw_data.h b/sys/dev/qat/include/common/adf_gen2_hw_data.h new file mode 100644 index 000000000000..395abec81b9f --- /dev/null +++ b/sys/dev/qat/include/common/adf_gen2_hw_data.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright(c) 2021 Intel Corporation */ +/* $FreeBSD$ */ +#ifndef ADF_GEN2_HW_DATA_H_ +#define ADF_GEN2_HW_DATA_H_ + +#include "adf_accel_devices.h" +#include "adf_cfg_common.h" + +/* Transport access */ +#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL +#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL +#define ADF_RING_CSR_RING_CONFIG 0x000 +#define ADF_RING_CSR_RING_LBASE 0x040 +#define ADF_RING_CSR_RING_UBASE 0x080 +#define ADF_RING_CSR_RING_HEAD 0x0C0 +#define ADF_RING_CSR_RING_TAIL 0x100 +#define ADF_RING_CSR_E_STAT 0x14C +#define ADF_RING_CSR_INT_FLAG 0x170 +#define ADF_RING_CSR_INT_SRCSEL 0x174 +#define ADF_RING_CSR_INT_SRCSEL_2 0x178 +#define ADF_RING_CSR_INT_COL_EN 0x17C +#define ADF_RING_CSR_INT_COL_CTL 0x180 +#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 +#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 +#define ADF_RING_BUNDLE_SIZE 0x1000 +#define ADF_GEN2_RX_RINGS_OFFSET 8 +#define ADF_GEN2_TX_RINGS_MASK 0xFF + +#define BUILD_RING_BASE_ADDR(addr, size) \ + (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) +#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ + ADF_CSR_RD(csr_base_addr, \ + (ADF_RING_BUNDLE_SIZE * (bank)) + ADF_RING_CSR_RING_HEAD + \ + ((ring) << 2)) +#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ + ADF_CSR_RD(csr_base_addr, \ + (ADF_RING_BUNDLE_SIZE * (bank)) + ADF_RING_CSR_RING_TAIL + \ + ((ring) << 2)) +#define READ_CSR_E_STAT(csr_base_addr, bank) \ + ADF_CSR_RD(csr_base_addr, \ + (ADF_RING_BUNDLE_SIZE * (bank)) + ADF_RING_CSR_E_STAT) +#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, \ + (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_CONFIG + ((ring) << 2), \ + value) +#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ + do { \ + u32 l_base = 0, u_base = 0; \ + l_base = (u32)((value)&0xFFFFFFFF); \ + u_base = (u32)(((value)&0xFFFFFFFF00000000ULL) >> 32); \ + ADF_CSR_WR(csr_base_addr, \ + (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_LBASE + ((ring) << 2), \ + l_base); \ + ADF_CSR_WR(csr_base_addr, \ + (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_UBASE + ((ring) << 2), \ + u_base); \ + } while (0) + +#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, \ + (ADF_RING_BUNDLE_SIZE * (bank)) + ADF_RING_CSR_RING_HEAD + \ + ((ring) << 2), \ + value) +#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, \ + (ADF_RING_BUNDLE_SIZE * (bank)) + ADF_RING_CSR_RING_TAIL + \ + ((ring) << 2), \ + value) +#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, \ + (ADF_RING_BUNDLE_SIZE * (bank)) + ADF_RING_CSR_INT_FLAG, \ + value) +#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ + do { \ + ADF_CSR_WR(csr_base_addr, \ + (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_SRCSEL, \ + ADF_BANK_INT_SRC_SEL_MASK_0); \ + ADF_CSR_WR(csr_base_addr, \ + (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_SRCSEL_2, \ + ADF_BANK_INT_SRC_SEL_MASK_X); \ + } while (0) +#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, \ + (ADF_RING_BUNDLE_SIZE * (bank)) + ADF_RING_CSR_INT_COL_EN, \ + value) +#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, \ + (ADF_RING_BUNDLE_SIZE * (bank)) + ADF_RING_CSR_INT_COL_CTL, \ + ADF_RING_CSR_INT_COL_CTL_ENABLE | (value)) +#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, \ + (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_FLAG_AND_COL, \ + value) + +/* AE to function map */ +#define AE2FUNCTION_MAP_A_OFFSET (0x3A400 + 0x190) +#define AE2FUNCTION_MAP_B_OFFSET (0x3A400 + 0x310) +#define AE2FUNCTION_MAP_REG_SIZE 4 +#define AE2FUNCTION_MAP_VALID BIT(7) + +#define READ_CSR_AE2FUNCTION_MAP_A(pmisc_bar_addr, index) \ + ADF_CSR_RD(pmisc_bar_addr, \ + AE2FUNCTION_MAP_A_OFFSET + \ + AE2FUNCTION_MAP_REG_SIZE * (index)) +#define WRITE_CSR_AE2FUNCTION_MAP_A(pmisc_bar_addr, index, value) \ + ADF_CSR_WR(pmisc_bar_addr, \ + AE2FUNCTION_MAP_A_OFFSET + \ + AE2FUNCTION_MAP_REG_SIZE * (index), \ + value) +#define READ_CSR_AE2FUNCTION_MAP_B(pmisc_bar_addr, index) \ + ADF_CSR_RD(pmisc_bar_addr, \ + AE2FUNCTION_MAP_B_OFFSET + \ + AE2FUNCTION_MAP_REG_SIZE * (index)) +#define WRITE_CSR_AE2FUNCTION_MAP_B(pmisc_bar_addr, index, value) \ + ADF_CSR_WR(pmisc_bar_addr, \ + AE2FUNCTION_MAP_B_OFFSET + \ + AE2FUNCTION_MAP_REG_SIZE * (index), \ + value) + +/* Admin Interface Offsets */ +#define ADF_ADMINMSGUR_OFFSET (0x3A000 + 0x574) +#define ADF_ADMINMSGLR_OFFSET (0x3A000 + 0x578) +#define ADF_MAILBOX_BASE_OFFSET 0x20970 + +/* Arbiter configuration */ +#define ADF_ARB_OFFSET 0x30000 +#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180 +#define ADF_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0)) +#define ADF_ARB_REG_SLOT 0x1000 +#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C + +#define READ_CSR_RING_SRV_ARB_EN(csr_addr, index) \ + ADF_CSR_RD(csr_addr, \ + ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * (index))) + +#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, \ + ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * (index)), \ + value) + +/* Power gating */ +#define ADF_POWERGATE_DC BIT(23) +#define ADF_POWERGATE_PKE BIT(24) + +/* Default ring mapping */ +#define ADF_GEN2_DEFAULT_RING_TO_SRV_MAP \ + (CRYPTO << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \ + CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ + UNUSED << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ + COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) + +/* Error detection and correction */ +#define ADF_GEN2_AE_CTX_ENABLES(i) ((i)*0x1000 + 0x20818) +#define ADF_GEN2_AE_MISC_CONTROL(i) ((i)*0x1000 + 0x20960) +#define ADF_GEN2_ENABLE_AE_ECC_ERR BIT(28) +#define ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12)) +#define ADF_GEN2_UERRSSMSH(i) ((i)*0x4000 + 0x18) +#define ADF_GEN2_CERRSSMSH(i) ((i)*0x4000 + 0x10) +#define ADF_GEN2_ERRSSMSH_EN BIT(3) + +#define ADF_NUM_HB_CNT_PER_AE (ADF_NUM_THREADS_PER_AE + ADF_NUM_PKE_STRAND) + +void adf_gen2_init_hw_csr_info(struct adf_hw_csr_info *csr_info); + +#endif diff --git a/sys/dev/qat/include/common/adf_gen4_hw_data.h b/sys/dev/qat/include/common/adf_gen4_hw_data.h new file mode 100644 index 000000000000..c0ef0c92772e --- /dev/null +++ b/sys/dev/qat/include/common/adf_gen4_hw_data.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright(c) 2021 Intel Corporation */ +/* $FreeBSD$ */ +#ifndef ADF_GEN4_HW_CSR_DATA_H_ +#define ADF_GEN4_HW_CSR_DATA_H_ + +#include "adf_accel_devices.h" + +/* Transport access */ +#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL +#define ADF_RING_CSR_RING_CONFIG 0x1000 +#define ADF_RING_CSR_RING_LBASE 0x1040 +#define ADF_RING_CSR_RING_UBASE 0x1080 +#define ADF_RING_CSR_RING_HEAD 0x0C0 +#define ADF_RING_CSR_RING_TAIL 0x100 +#define ADF_RING_CSR_E_STAT 0x14C +#define ADF_RING_CSR_INT_FLAG 0x170 +#define ADF_RING_CSR_INT_SRCSEL 0x174 +#define ADF_RING_CSR_INT_COL_CTL 0x180 +#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 +#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 +#define ADF_RING_CSR_INT_COL_EN 0x17C +#define ADF_RING_CSR_ADDR_OFFSET 0x100000 +#define ADF_RING_BUNDLE_SIZE 0x2000 + +#define BUILD_RING_BASE_ADDR(addr, size) \ + ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6) +#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ + ADF_CSR_RD((csr_base_addr), \ + ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_HEAD + ((ring) << 2)) +#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ + ADF_CSR_RD((csr_base_addr), \ + ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_TAIL + ((ring) << 2)) +#define READ_CSR_E_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr), \ + ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_E_STAT) +#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR((csr_base_addr), \ + ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_CONFIG + ((ring) << 2), \ + value) +#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ + do { \ + struct resource *_csr_base_addr = csr_base_addr; \ + u32 _bank = bank; \ + u32 _ring = ring; \ + dma_addr_t _value = value; \ + u32 l_base = 0, u_base = 0; \ + l_base = lower_32_bits(_value); \ + u_base = upper_32_bits(_value); \ + ADF_CSR_WR((_csr_base_addr), \ + ADF_RING_CSR_ADDR_OFFSET + \ + ADF_RING_BUNDLE_SIZE * (_bank) + \ + ADF_RING_CSR_RING_LBASE + ((_ring) << 2), \ + l_base); \ + ADF_CSR_WR((_csr_base_addr), \ + ADF_RING_CSR_ADDR_OFFSET + \ + ADF_RING_BUNDLE_SIZE * (_bank) + \ + ADF_RING_CSR_RING_UBASE + ((_ring) << 2), \ + u_base); \ + } while (0) + +#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR((csr_base_addr), \ + ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_HEAD + ((ring) << 2), \ + value) +#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR((csr_base_addr), \ + ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_TAIL + ((ring) << 2), \ + value) +#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr), \ + ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_FLAG, \ + (value)) +#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ + ADF_CSR_WR((csr_base_addr), \ + ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_SRCSEL, \ + ADF_BANK_INT_SRC_SEL_MASK) +#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr), \ + ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_COL_EN, \ + (value)) +#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr), \ + ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_COL_CTL, \ + ADF_RING_CSR_INT_COL_CTL_ENABLE | (value)) +#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr), \ + ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_FLAG_AND_COL, \ + (value)) + +/* Arbiter configuration */ +#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C + +#define READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr), \ + ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_SRV_ARB_EN) + +#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr), \ + ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_SRV_ARB_EN, \ + (value)) + +/* WDT timers + * + * Timeout is in cycles. Clock speed may vary across products but this + * value should be a few milli-seconds. + */ +#define ADF_SSM_WDT_DEFAULT_VALUE 0x7000000ULL +#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000 +#define ADF_SSMWDTL_OFFSET 0x54 +#define ADF_SSMWDTH_OFFSET 0x5C +#define ADF_SSMWDTPKEL_OFFSET 0x58 +#define ADF_SSMWDTPKEH_OFFSET 0x60 + +#define ADF_NUM_HB_CNT_PER_AE (ADF_NUM_THREADS_PER_AE) + +int adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); +void adf_gen4_init_hw_csr_info(struct adf_hw_csr_info *csr_info); +#endif diff --git a/sys/dev/qat/include/common/icp_qat_fw_loader_handle.h b/sys/dev/qat/include/common/icp_qat_fw_loader_handle.h index a8afb5a4b377..7f6e80eeb431 100644 --- a/sys/dev/qat/include/common/icp_qat_fw_loader_handle.h +++ b/sys/dev/qat/include/common/icp_qat_fw_loader_handle.h @@ -1,53 +1,54 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #ifndef __ICP_QAT_FW_LOADER_HANDLE_H__ #define __ICP_QAT_FW_LOADER_HANDLE_H__ #include "icp_qat_uclo.h" struct icp_qat_fw_loader_ae_data { unsigned int state; unsigned int ustore_size; unsigned int free_addr; unsigned int free_size; unsigned int live_ctx_mask; }; struct icp_qat_fw_loader_hal_handle { struct icp_qat_fw_loader_ae_data aes[ICP_QAT_UCLO_MAX_AE]; unsigned int ae_mask; + unsigned int admin_ae_mask; unsigned int slice_mask; unsigned int revision_id; unsigned int ae_max_num; unsigned int upc_mask; unsigned int max_ustore; }; struct icp_qat_fw_loader_handle { struct icp_qat_fw_loader_hal_handle *hal_handle; struct adf_accel_dev *accel_dev; device_t pci_dev; void *obj_handle; void *sobj_handle; void *mobj_handle; bool fw_auth; unsigned int cfg_ae_mask; rman_res_t hal_sram_size; struct resource *hal_sram_addr_v; unsigned int hal_sram_offset; struct resource *hal_misc_addr_v; uintptr_t hal_cap_g_ctl_csr_addr_v; uintptr_t hal_cap_ae_xfer_csr_addr_v; uintptr_t hal_cap_ae_local_csr_addr_v; uintptr_t hal_ep_csr_addr_v; }; struct icp_firml_dram_desc { struct bus_dmamem dram_mem; struct resource *dram_base_addr; void *dram_base_addr_v; bus_addr_t dram_bus_addr; u64 dram_size; }; #endif diff --git a/sys/dev/qat/include/common/icp_qat_hal.h b/sys/dev/qat/include/common/icp_qat_hal.h index 3a7475f25333..68e12826a7e8 100644 --- a/sys/dev/qat/include/common/icp_qat_hal.h +++ b/sys/dev/qat/include/common/icp_qat_hal.h @@ -1,196 +1,223 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #ifndef __ICP_QAT_HAL_H #define __ICP_QAT_HAL_H #include "adf_accel_devices.h" #include "icp_qat_fw_loader_handle.h" enum hal_global_csr { MISC_CONTROL = 0x04, ICP_RESET = 0x0c, ICP_GLOBAL_CLK_ENABLE = 0x50 }; enum { MISC_CONTROL_C4XXX = 0xAA0, ICP_RESET_CPP0 = 0x938, ICP_RESET_CPP1 = 0x93c, ICP_GLOBAL_CLK_ENABLE_CPP0 = 0x964, ICP_GLOBAL_CLK_ENABLE_CPP1 = 0x968 }; enum hal_ae_csr { USTORE_ADDRESS = 0x000, USTORE_DATA_LOWER = 0x004, USTORE_DATA_UPPER = 0x008, ALU_OUT = 0x010, CTX_ARB_CNTL = 0x014, CTX_ENABLES = 0x018, CC_ENABLE = 0x01c, CSR_CTX_POINTER = 0x020, CTX_STS_INDIRECT = 0x040, ACTIVE_CTX_STATUS = 0x044, CTX_SIG_EVENTS_INDIRECT = 0x048, CTX_SIG_EVENTS_ACTIVE = 0x04c, CTX_WAKEUP_EVENTS_INDIRECT = 0x050, LM_ADDR_0_INDIRECT = 0x060, LM_ADDR_1_INDIRECT = 0x068, LM_ADDR_2_INDIRECT = 0x0cc, LM_ADDR_3_INDIRECT = 0x0d4, INDIRECT_LM_ADDR_0_BYTE_INDEX = 0x0e0, INDIRECT_LM_ADDR_1_BYTE_INDEX = 0x0e8, INDIRECT_LM_ADDR_2_BYTE_INDEX = 0x10c, INDIRECT_LM_ADDR_3_BYTE_INDEX = 0x114, INDIRECT_T_INDEX = 0x0f8, INDIRECT_T_INDEX_BYTE_INDEX = 0x0fc, FUTURE_COUNT_SIGNAL_INDIRECT = 0x078, TIMESTAMP_LOW = 0x0c0, TIMESTAMP_HIGH = 0x0c4, PROFILE_COUNT = 0x144, SIGNATURE_ENABLE = 0x150, AE_MISC_CONTROL = 0x160, LOCAL_CSR_STATUS = 0x180, }; enum fcu_csr { - FCU_CONTROL = 0x0, - FCU_STATUS = 0x4, - FCU_DRAM_ADDR_LO = 0xc, + FCU_CONTROL = 0x00, + FCU_STATUS = 0x04, + FCU_DRAM_ADDR_LO = 0x0c, FCU_DRAM_ADDR_HI = 0x10, FCU_RAMBASE_ADDR_HI = 0x14, FCU_RAMBASE_ADDR_LO = 0x18 }; enum fcu_csr_c4xxx { - FCU_CONTROL_C4XXX = 0x0, - FCU_STATUS_C4XXX = 0x4, - FCU_STATUS1_C4XXX = 0xc, + FCU_CONTROL_C4XXX = 0x00, + FCU_STATUS_C4XXX = 0x04, + FCU_STATUS1_C4XXX = 0x0c, FCU_AE_LOADED_C4XXX = 0x10, FCU_DRAM_ADDR_LO_C4XXX = 0x14, FCU_DRAM_ADDR_HI_C4XXX = 0x18, }; +enum fcu_csr_4xxx { + FCU_CONTROL_4XXX = 0x00, + FCU_STATUS_4XXX = 0x04, + FCU_ME_BROADCAST_MASK_TYPE = 0x08, + FCU_AE_LOADED_4XXX = 0x10, + FCU_DRAM_ADDR_LO_4XXX = 0x14, + FCU_DRAM_ADDR_HI_4XXX = 0x18, +}; + enum fcu_cmd { FCU_CTRL_CMD_NOOP = 0, FCU_CTRL_CMD_AUTH = 1, FCU_CTRL_CMD_LOAD = 2, FCU_CTRL_CMD_START = 3 }; enum fcu_sts { FCU_STS_NO_STS = 0, FCU_STS_VERI_DONE = 1, FCU_STS_LOAD_DONE = 2, FCU_STS_VERI_FAIL = 3, FCU_STS_LOAD_FAIL = 4, FCU_STS_BUSY = 5 }; #define UA_ECS (0x1 << 31) #define ACS_ABO_BITPOS 31 #define ACS_ACNO 0x7 #define CE_ENABLE_BITPOS 0x8 #define CE_LMADDR_0_GLOBAL_BITPOS 16 #define CE_LMADDR_1_GLOBAL_BITPOS 17 #define CE_LMADDR_2_GLOBAL_BITPOS 22 #define CE_LMADDR_3_GLOBAL_BITPOS 23 #define CE_T_INDEX_GLOBAL_BITPOS 21 #define CE_NN_MODE_BITPOS 20 #define CE_REG_PAR_ERR_BITPOS 25 #define CE_BREAKPOINT_BITPOS 27 #define CE_CNTL_STORE_PARITY_ERROR_BITPOS 29 #define CE_INUSE_CONTEXTS_BITPOS 31 #define CE_NN_MODE (0x1 << CE_NN_MODE_BITPOS) #define CE_INUSE_CONTEXTS (0x1 << CE_INUSE_CONTEXTS_BITPOS) #define XCWE_VOLUNTARY (0x1) #define LCS_STATUS (0x1) #define MMC_SHARE_CS_BITPOS 2 #define GLOBAL_CSR 0xA00 +#define FCU_CTRL_BROADCAST_POS 0x4 #define FCU_CTRL_AE_POS 0x8 #define FCU_AUTH_STS_MASK 0x7 #define FCU_STS_DONE_POS 0x9 #define FCU_STS_AUTHFWLD_POS 0X8 #define FCU_LOADED_AE_POS 0x16 #define FW_AUTH_WAIT_PERIOD 10 #define FW_AUTH_MAX_RETRY 300 +#define FW_BROADCAST_MAX_RETRY 300 #define FCU_OFFSET 0x8c0 #define FCU_OFFSET_C4XXX 0x1000 +#define FCU_OFFSET_4XXX 0x1000 #define MAX_CPP_NUM 2 #define AE_CPP_NUM 2 #define AES_PER_CPP 16 #define SLICES_PER_CPP 6 #define ICP_QAT_AE_OFFSET 0x20000 #define ICP_QAT_AE_OFFSET_C4XXX 0x40000 +#define ICP_QAT_AE_OFFSET_4XXX 0x600000 #define ICP_QAT_CAP_OFFSET (ICP_QAT_AE_OFFSET + 0x10000) #define ICP_QAT_CAP_OFFSET_C4XXX 0x70000 +#define ICP_QAT_CAP_OFFSET_4XXX 0x640000 #define LOCAL_TO_XFER_REG_OFFSET 0x800 #define ICP_QAT_EP_OFFSET 0x3a000 #define ICP_QAT_EP_OFFSET_C4XXX 0x60000 +#define ICP_QAT_EP_OFFSET_4XXX 0x200000 /* HI MMIO CSRs */ #define MEM_CFG_ERR_BIT 0x20 +#define AE_TG_NUM_CPM2X 4 #define CAP_CSR_ADDR(csr) (csr + handle->hal_cap_g_ctl_csr_addr_v) #define SET_CAP_CSR(handle, csr, val) \ ADF_CSR_WR(handle->hal_misc_addr_v, CAP_CSR_ADDR(csr), val) #define GET_CAP_CSR(handle, csr) \ ADF_CSR_RD(handle->hal_misc_addr_v, CAP_CSR_ADDR(csr)) #define SET_GLB_CSR(handle, csr, val) \ ({ \ - typeof(handle) handle_ = (handle); \ - typeof(csr) csr_ = (csr); \ - typeof(val) val_ = (val); \ - (IS_QAT_GEN3(pci_get_device(GET_DEV(handle_->accel_dev)))) ? \ - SET_CAP_CSR(handle_, (csr_), (val_)) : \ - SET_CAP_CSR(handle_, csr_ + GLOBAL_CSR, val_); \ + u32 dev_id = pci_get_device(GET_DEV((handle)->accel_dev)); \ + (IS_QAT_GEN3_OR_GEN4(dev_id)) ? \ + SET_CAP_CSR((handle), (csr), (val)) : \ + SET_CAP_CSR((handle), (csr) + GLOBAL_CSR, val); \ }) #define GET_GLB_CSR(handle, csr) \ ({ \ - typeof(handle) handle_ = (handle); \ - typeof(csr) csr_ = (csr); \ - (IS_QAT_GEN3(pci_get_device(GET_DEV(handle_->accel_dev)))) ? \ - (GET_CAP_CSR(handle_, (csr_))) : \ - (GET_CAP_CSR(handle_, (GLOBAL_CSR + (csr_)))); \ + u32 dev_id = pci_get_device(GET_DEV((handle)->accel_dev)); \ + (IS_QAT_GEN3_OR_GEN4(dev_id)) ? \ + GET_CAP_CSR((handle), (csr)) : \ + GET_CAP_CSR((handle), (csr) + GLOBAL_CSR); \ }) #define SET_FCU_CSR(handle, csr, val) \ ({ \ typeof(handle) handle_ = (handle); \ typeof(csr) csr_ = (csr); \ typeof(val) val_ = (val); \ (IS_QAT_GEN3(pci_get_device(GET_DEV(handle_->accel_dev)))) ? \ SET_CAP_CSR(handle_, \ ((csr_) + FCU_OFFSET_C4XXX), \ (val_)) : \ - SET_CAP_CSR(handle_, ((csr_) + FCU_OFFSET), (val_)); \ + ((IS_QAT_GEN4( \ + pci_get_device(GET_DEV(handle_->accel_dev)))) ? \ + SET_CAP_CSR(handle_, \ + ((csr_) + FCU_OFFSET_4XXX), \ + (val_)) : \ + SET_CAP_CSR(handle_, ((csr_) + FCU_OFFSET), (val_))); \ }) #define GET_FCU_CSR(handle, csr) \ ({ \ typeof(handle) handle_ = (handle); \ typeof(csr) csr_ = (csr); \ (IS_QAT_GEN3(pci_get_device(GET_DEV(handle_->accel_dev)))) ? \ GET_CAP_CSR(handle_, (FCU_OFFSET_C4XXX + (csr_))) : \ - GET_CAP_CSR(handle_, (FCU_OFFSET + (csr_))); \ + ((IS_QAT_GEN4( \ + pci_get_device(GET_DEV(handle_->accel_dev)))) ? \ + GET_CAP_CSR(handle_, (FCU_OFFSET_4XXX + (csr_))) : \ + GET_CAP_CSR(handle_, (FCU_OFFSET + (csr_)))); \ }) #define AE_CSR(handle, ae) \ ((handle)->hal_cap_ae_local_csr_addr_v + ((ae) << 12)) #define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & (csr))) #define SET_AE_CSR(handle, ae, csr, val) \ ADF_CSR_WR(handle->hal_misc_addr_v, AE_CSR_ADDR(handle, ae, csr), val) #define GET_AE_CSR(handle, ae, csr) \ ADF_CSR_RD(handle->hal_misc_addr_v, AE_CSR_ADDR(handle, ae, csr)) #define AE_XFER(handle, ae) \ ((handle)->hal_cap_ae_xfer_csr_addr_v + ((ae) << 12)) #define AE_XFER_ADDR(handle, ae, reg) \ (AE_XFER(handle, ae) + (((reg)&0xff) << 2)) #define SET_AE_XFER(handle, ae, reg, val) \ ADF_CSR_WR(handle->hal_misc_addr_v, AE_XFER_ADDR(handle, ae, reg), val) #define SRAM_WRITE(handle, addr, val) \ ADF_CSR_WR((handle)->hal_sram_addr_v, addr, val) #define GET_CSR_OFFSET(device_id, cap_offset_, ae_offset_, ep_offset_) \ ({ \ - int gen3 = IS_QAT_GEN3(device_id); \ - cap_offset_ = \ - (gen3 ? ICP_QAT_CAP_OFFSET_C4XXX : ICP_QAT_CAP_OFFSET); \ - ae_offset_ = \ - (gen3 ? ICP_QAT_AE_OFFSET_C4XXX : ICP_QAT_AE_OFFSET); \ - ep_offset_ = \ - (gen3 ? ICP_QAT_EP_OFFSET_C4XXX : ICP_QAT_EP_OFFSET); \ + if (IS_QAT_GEN3(device_id)) { \ + cap_offset_ = ICP_QAT_CAP_OFFSET_C4XXX; \ + ae_offset_ = ICP_QAT_AE_OFFSET_C4XXX; \ + ep_offset_ = ICP_QAT_EP_OFFSET_C4XXX; \ + } else if (IS_QAT_GEN4(device_id)) { \ + cap_offset_ = ICP_QAT_CAP_OFFSET_4XXX; \ + ae_offset_ = ICP_QAT_AE_OFFSET_4XXX; \ + ep_offset_ = ICP_QAT_EP_OFFSET_4XXX; \ + } else { \ + cap_offset_ = ICP_QAT_CAP_OFFSET; \ + ae_offset_ = ICP_QAT_AE_OFFSET; \ + ep_offset_ = ICP_QAT_EP_OFFSET; \ + } \ }) #endif diff --git a/sys/dev/qat/include/common/icp_qat_uclo.h b/sys/dev/qat/include/common/icp_qat_uclo.h index 21a1c2fc8ace..1bdddce1d85e 100644 --- a/sys/dev/qat/include/common/icp_qat_uclo.h +++ b/sys/dev/qat/include/common/icp_qat_uclo.h @@ -1,558 +1,580 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #ifndef __ICP_QAT_UCLO_H__ #define __ICP_QAT_UCLO_H__ #define ICP_QAT_AC_895XCC_DEV_TYPE 0x00400000 #define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000 #define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000 #define ICP_QAT_AC_200XX_DEV_TYPE 0x02000000 #define ICP_QAT_AC_C4XXX_DEV_TYPE 0x04000000 +#define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000 #define ICP_QAT_UCLO_MAX_AE 32 #define ICP_QAT_UCLO_MAX_CTX 8 #define ICP_QAT_UCLO_MAX_CPPNUM 2 #define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX) #define ICP_QAT_UCLO_MAX_USTORE 0x4000 #define ICP_QAT_UCLO_MAX_XFER_REG 128 #define ICP_QAT_UCLO_MAX_GPR_REG 128 #define ICP_QAT_UCLO_MAX_LMEM_REG 1024 +#define ICP_QAT_UCLO_MAX_LMEM_REG_2X 1280 #define ICP_QAT_UCLO_AE_ALL_CTX 0xff #define ICP_QAT_UOF_OBJID_LEN 8 #define ICP_QAT_UOF_FID 0xc6c2 #define ICP_QAT_UOF_MAJVER 0x4 #define ICP_QAT_UOF_MINVER 0x11 #define ICP_QAT_UOF_OBJS "UOF_OBJS" #define ICP_QAT_UOF_STRT "UOF_STRT" #define ICP_QAT_UOF_IMAG "UOF_IMAG" #define ICP_QAT_UOF_IMEM "UOF_IMEM" #define ICP_QAT_UOF_LOCAL_SCOPE 1 #define ICP_QAT_UOF_INIT_EXPR 0 #define ICP_QAT_UOF_INIT_REG 1 #define ICP_QAT_UOF_INIT_REG_CTX 2 #define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP 3 #define ICP_QAT_SUOF_OBJ_ID_LEN 8 #define ICP_QAT_SUOF_FID 0x53554f46 #define ICP_QAT_SUOF_MAJVER 0x0 #define ICP_QAT_SUOF_MINVER 0x1 #define ICP_QAT_SUOF_OBJ_NAME_LEN 128 #define ICP_QAT_MOF_OBJ_ID_LEN 8 #define ICP_QAT_MOF_OBJ_CHUNKID_LEN 8 #define ICP_QAT_MOF_FID 0x00666f6d #define ICP_QAT_MOF_MAJVER 0x0 #define ICP_QAT_MOF_MINVER 0x1 #define ICP_QAT_MOF_SYM_OBJS "SYM_OBJS" #define ICP_QAT_SUOF_OBJS "SUF_OBJS" #define ICP_QAT_SUOF_IMAG "SUF_IMAG" #define ICP_QAT_SIMG_AE_INIT_SEQ_LEN (50 * sizeof(unsigned long long)) #define ICP_QAT_SIMG_AE_INSTS_LEN (0x4000 * sizeof(unsigned long long)) -#define ICP_QAT_CSS_FWSK_MODULUS_LEN 256 -#define ICP_QAT_CSS_FWSK_EXPONENT_LEN 4 -#define ICP_QAT_CSS_FWSK_PAD_LEN 252 -#define ICP_QAT_CSS_FWSK_PUB_LEN \ - (ICP_QAT_CSS_FWSK_MODULUS_LEN + ICP_QAT_CSS_FWSK_EXPONENT_LEN + \ - ICP_QAT_CSS_FWSK_PAD_LEN) -#define ICP_QAT_CSS_SIGNATURE_LEN 256 + +#define DSS_FWSK_MODULUS_LEN 384 // RSA3K +#define DSS_FWSK_EXPONENT_LEN 4 +#define DSS_FWSK_PADDING_LEN 380 +#define DSS_SIGNATURE_LEN 384 // RSA3K + +#define CSS_FWSK_MODULUS_LEN 256 // RSA2K +#define CSS_FWSK_EXPONENT_LEN 4 +#define CSS_FWSK_PADDING_LEN 252 +#define CSS_SIGNATURE_LEN 256 // RSA2K + +#define ICP_QAT_CSS_FWSK_MODULUS_LEN(ID) \ + (IS_QAT_GEN4(ID) ? DSS_FWSK_MODULUS_LEN : CSS_FWSK_MODULUS_LEN) + +#define ICP_QAT_CSS_FWSK_EXPONENT_LEN(ID) \ + (IS_QAT_GEN4(ID) ? DSS_FWSK_EXPONENT_LEN : CSS_FWSK_EXPONENT_LEN) + +#define ICP_QAT_CSS_FWSK_PAD_LEN(ID) \ + (IS_QAT_GEN4(ID) ? DSS_FWSK_PADDING_LEN : CSS_FWSK_PADDING_LEN) + +#define ICP_QAT_CSS_FWSK_PUB_LEN(ID) \ + (ICP_QAT_CSS_FWSK_MODULUS_LEN(ID) + \ + ICP_QAT_CSS_FWSK_EXPONENT_LEN(ID) + ICP_QAT_CSS_FWSK_PAD_LEN(ID)) + +#define ICP_QAT_CSS_SIGNATURE_LEN(ID) \ + (IS_QAT_GEN4(ID) ? DSS_SIGNATURE_LEN : CSS_SIGNATURE_LEN) + #define ICP_QAT_CSS_AE_IMG_LEN \ (sizeof(struct icp_qat_simg_ae_mode) + ICP_QAT_SIMG_AE_INIT_SEQ_LEN + \ ICP_QAT_SIMG_AE_INSTS_LEN) -#define ICP_QAT_CSS_AE_SIMG_LEN \ - (sizeof(struct icp_qat_css_hdr) + ICP_QAT_CSS_FWSK_PUB_LEN + \ - ICP_QAT_CSS_SIGNATURE_LEN + ICP_QAT_CSS_AE_IMG_LEN) -#define ICP_QAT_AE_IMG_OFFSET \ - (sizeof(struct icp_qat_css_hdr) + ICP_QAT_CSS_FWSK_MODULUS_LEN + \ - ICP_QAT_CSS_FWSK_EXPONENT_LEN + ICP_QAT_CSS_SIGNATURE_LEN) +#define ICP_QAT_CSS_AE_SIMG_LEN(ID) \ + (sizeof(struct icp_qat_css_hdr) + ICP_QAT_CSS_FWSK_PUB_LEN(ID) + \ + ICP_QAT_CSS_SIGNATURE_LEN(ID) + ICP_QAT_CSS_AE_IMG_LEN) +#define ICP_QAT_AE_IMG_OFFSET(ID) \ + (sizeof(struct icp_qat_css_hdr) + ICP_QAT_CSS_FWSK_MODULUS_LEN(ID) + \ + ICP_QAT_CSS_FWSK_EXPONENT_LEN(ID) + ICP_QAT_CSS_SIGNATURE_LEN(ID)) #define ICP_QAT_CSS_MAX_IMAGE_LEN 0x40000 #define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode)&0xf) #define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf) #define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1) #define RELOADABLE_CTX_SHARED_MODE(ae_mode) (((ae_mode) >> 0xc) & 0x1) #define ICP_QAT_LOC_MEM0_MODE(ae_mode) (((ae_mode) >> 0x8) & 0x1) #define ICP_QAT_LOC_MEM1_MODE(ae_mode) (((ae_mode) >> 0x9) & 0x1) #define ICP_QAT_LOC_MEM2_MODE(ae_mode) (((ae_mode) >> 0x6) & 0x1) #define ICP_QAT_LOC_MEM3_MODE(ae_mode) (((ae_mode) >> 0x7) & 0x1) #define ICP_QAT_LOC_TINDEX_MODE(ae_mode) (((ae_mode) >> 0xe) & 0x1) enum icp_qat_uof_mem_region { ICP_QAT_UOF_SRAM_REGION = 0x0, ICP_QAT_UOF_LMEM_REGION = 0x3, ICP_QAT_UOF_UMEM_REGION = 0x5 }; enum icp_qat_uof_regtype { ICP_NO_DEST = 0, ICP_GPA_REL = 1, ICP_GPA_ABS = 2, ICP_GPB_REL = 3, ICP_GPB_ABS = 4, ICP_SR_REL = 5, ICP_SR_RD_REL = 6, ICP_SR_WR_REL = 7, ICP_SR_ABS = 8, ICP_SR_RD_ABS = 9, ICP_SR_WR_ABS = 10, ICP_DR_REL = 19, ICP_DR_RD_REL = 20, ICP_DR_WR_REL = 21, ICP_DR_ABS = 22, ICP_DR_RD_ABS = 23, ICP_DR_WR_ABS = 24, ICP_LMEM = 26, ICP_LMEM0 = 27, ICP_LMEM1 = 28, ICP_NEIGH_REL = 31, ICP_LMEM2 = 61, ICP_LMEM3 = 62, }; enum icp_qat_css_fwtype { CSS_AE_FIRMWARE = 0, CSS_MMP_FIRMWARE = 1 }; struct icp_qat_uclo_page { struct icp_qat_uclo_encap_page *encap_page; struct icp_qat_uclo_region *region; unsigned int flags; }; struct icp_qat_uclo_region { struct icp_qat_uclo_page *loaded; struct icp_qat_uclo_page *page; }; struct icp_qat_uclo_aeslice { struct icp_qat_uclo_region *region; struct icp_qat_uclo_page *page; struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX]; struct icp_qat_uclo_encapme *encap_image; unsigned int ctx_mask_assigned; unsigned int new_uaddr[ICP_QAT_UCLO_MAX_CTX]; }; struct icp_qat_uclo_aedata { unsigned int slice_num; unsigned int eff_ustore_size; struct icp_qat_uclo_aeslice ae_slices[ICP_QAT_UCLO_MAX_CTX]; unsigned int shareable_ustore; }; struct icp_qat_uof_encap_obj { char *beg_uof; struct icp_qat_uof_objhdr *obj_hdr; struct icp_qat_uof_chunkhdr *chunk_hdr; struct icp_qat_uof_varmem_seg *var_mem_seg; }; struct icp_qat_uclo_encap_uwblock { unsigned int start_addr; unsigned int words_num; uint64_t micro_words; }; struct icp_qat_uclo_encap_page { unsigned int def_page; unsigned int page_region; unsigned int beg_addr_v; unsigned int beg_addr_p; unsigned int micro_words_num; unsigned int uwblock_num; struct icp_qat_uclo_encap_uwblock *uwblock; }; struct icp_qat_uclo_encapme { struct icp_qat_uof_image *img_ptr; struct icp_qat_uclo_encap_page *page; unsigned int ae_reg_num; struct icp_qat_uof_ae_reg *ae_reg; unsigned int init_regsym_num; struct icp_qat_uof_init_regsym *init_regsym; unsigned int sbreak_num; struct icp_qat_uof_sbreak *sbreak; unsigned int uwords_num; }; struct icp_qat_uclo_init_mem_table { unsigned int entry_num; struct icp_qat_uof_initmem *init_mem; }; struct icp_qat_uclo_objhdr { char *file_buff; unsigned int checksum; unsigned int size; }; struct icp_qat_uof_strtable { unsigned int table_len; unsigned int reserved; uint64_t strings; }; struct icp_qat_uclo_objhandle { unsigned int prod_type; unsigned int prod_rev; struct icp_qat_uclo_objhdr *obj_hdr; struct icp_qat_uof_encap_obj encap_uof_obj; struct icp_qat_uof_strtable str_table; struct icp_qat_uclo_encapme ae_uimage[ICP_QAT_UCLO_MAX_UIMAGE]; struct icp_qat_uclo_aedata ae_data[ICP_QAT_UCLO_MAX_AE]; struct icp_qat_uclo_init_mem_table init_mem_tab; struct icp_qat_uof_batch_init *lm_init_tab[ICP_QAT_UCLO_MAX_AE]; struct icp_qat_uof_batch_init *umem_init_tab[ICP_QAT_UCLO_MAX_AE]; int uimage_num; int uword_in_bytes; int global_inited; unsigned int ae_num; unsigned int ustore_phy_size; void *obj_buf; uint64_t *uword_buf; }; struct icp_qat_uof_uword_block { unsigned int start_addr; unsigned int words_num; unsigned int uword_offset; unsigned int reserved; }; struct icp_qat_uof_filehdr { unsigned short file_id; unsigned short reserved1; char min_ver; char maj_ver; unsigned short reserved2; unsigned short max_chunks; unsigned short num_chunks; }; struct icp_qat_uof_filechunkhdr { char chunk_id[ICP_QAT_UOF_OBJID_LEN]; unsigned int checksum; unsigned int offset; unsigned int size; }; struct icp_qat_uof_objhdr { unsigned int ac_dev_type; unsigned short min_cpu_ver; unsigned short max_cpu_ver; short max_chunks; short num_chunks; unsigned int reserved1; unsigned int reserved2; }; struct icp_qat_uof_chunkhdr { char chunk_id[ICP_QAT_UOF_OBJID_LEN]; unsigned int offset; unsigned int size; }; struct icp_qat_uof_memvar_attr { unsigned int offset_in_byte; unsigned int value; }; struct icp_qat_uof_initmem { unsigned int sym_name; char region; char scope; unsigned short reserved1; unsigned int addr; unsigned int num_in_bytes; unsigned int val_attr_num; }; struct icp_qat_uof_init_regsym { unsigned int sym_name; char init_type; char value_type; char reg_type; unsigned char ctx; unsigned int reg_addr; unsigned int value; }; struct icp_qat_uof_varmem_seg { unsigned int sram_base; unsigned int sram_size; unsigned int sram_alignment; unsigned int sdram_base; unsigned int sdram_size; unsigned int sdram_alignment; unsigned int sdram1_base; unsigned int sdram1_size; unsigned int sdram1_alignment; unsigned int scratch_base; unsigned int scratch_size; unsigned int scratch_alignment; }; struct icp_qat_uof_gtid { char tool_id[ICP_QAT_UOF_OBJID_LEN]; int tool_ver; unsigned int reserved1; unsigned int reserved2; }; struct icp_qat_uof_sbreak { unsigned int page_num; unsigned int virt_uaddr; unsigned char sbreak_type; unsigned char reg_type; unsigned short reserved1; unsigned int addr_offset; unsigned int reg_addr; }; struct icp_qat_uof_code_page { unsigned int page_region; unsigned int page_num; unsigned char def_page; unsigned char reserved2; unsigned short reserved1; unsigned int beg_addr_v; unsigned int beg_addr_p; unsigned int neigh_reg_tab_offset; unsigned int uc_var_tab_offset; unsigned int imp_var_tab_offset; unsigned int imp_expr_tab_offset; unsigned int code_area_offset; }; struct icp_qat_uof_image { unsigned int img_name; unsigned int ae_assigned; unsigned int ctx_assigned; unsigned int ac_dev_type; unsigned int entry_address; unsigned int fill_pattern[2]; unsigned int reloadable_size; unsigned char sensitivity; unsigned char reserved; unsigned short ae_mode; unsigned short max_ver; unsigned short min_ver; unsigned short image_attrib; unsigned short reserved2; unsigned short page_region_num; unsigned short numpages; unsigned int reg_tab_offset; unsigned int init_reg_sym_tab; unsigned int sbreak_tab; unsigned int app_metadata; }; struct icp_qat_uof_objtable { unsigned int entry_num; }; struct icp_qat_uof_ae_reg { unsigned int name; unsigned int vis_name; unsigned short type; unsigned short addr; unsigned short access_mode; unsigned char visible; unsigned char reserved1; unsigned short ref_count; unsigned short reserved2; unsigned int xo_id; }; struct icp_qat_uof_code_area { unsigned int micro_words_num; unsigned int uword_block_tab; }; struct icp_qat_uof_batch_init { unsigned int ae; unsigned int addr; unsigned int *value; unsigned int size; struct icp_qat_uof_batch_init *next; }; struct icp_qat_suof_img_hdr { const char *simg_buf; unsigned long simg_len; const char *css_header; const char *css_key; const char *css_signature; const char *css_simg; unsigned long simg_size; unsigned int ae_num; unsigned int ae_mask; unsigned int fw_type; unsigned long simg_name; unsigned long appmeta_data; }; struct icp_qat_suof_img_tbl { unsigned int num_simgs; struct icp_qat_suof_img_hdr *simg_hdr; }; struct icp_qat_suof_handle { unsigned int file_id; unsigned int check_sum; char min_ver; char maj_ver; char fw_type; const char *suof_buf; unsigned int suof_size; char *sym_str; unsigned int sym_size; struct icp_qat_suof_img_tbl img_table; }; struct icp_qat_fw_auth_desc { unsigned int img_len; unsigned int ae_mask; unsigned int css_hdr_high; unsigned int css_hdr_low; unsigned int img_high; unsigned int img_low; unsigned int signature_high; unsigned int signature_low; unsigned int fwsk_pub_high; unsigned int fwsk_pub_low; unsigned int img_ae_mode_data_high; unsigned int img_ae_mode_data_low; unsigned int img_ae_init_data_high; unsigned int img_ae_init_data_low; unsigned int img_ae_insts_high; unsigned int img_ae_insts_low; }; struct icp_qat_auth_chunk { struct icp_qat_fw_auth_desc fw_auth_desc; u64 chunk_size; u64 chunk_bus_addr; }; struct icp_qat_css_hdr { unsigned int module_type; unsigned int header_len; unsigned int header_ver; unsigned int module_id; unsigned int module_vendor; unsigned int date; unsigned int size; unsigned int key_size; unsigned int module_size; unsigned int exponent_size; unsigned int fw_type; unsigned int reserved[21]; }; struct icp_qat_simg_ae_mode { unsigned int file_id; unsigned short maj_ver; unsigned short min_ver; unsigned int dev_type; unsigned short devmax_ver; unsigned short devmin_ver; unsigned int ae_mask; unsigned int ctx_enables; char fw_type; char ctx_mode; char nn_mode; char lm0_mode; char lm1_mode; char scs_mode; char lm2_mode; char lm3_mode; char tindex_mode; unsigned char reserved[7]; char simg_name[256]; char appmeta_data[256]; }; struct icp_qat_suof_filehdr { unsigned int file_id; unsigned int check_sum; char min_ver; char maj_ver; char fw_type; char reserved; unsigned short max_chunks; unsigned short num_chunks; }; struct icp_qat_suof_chunk_hdr { char chunk_id[ICP_QAT_SUOF_OBJ_ID_LEN]; u64 offset; u64 size; }; struct icp_qat_suof_strtable { unsigned int tab_length; unsigned int strings; }; struct icp_qat_suof_objhdr { unsigned int img_length; unsigned int reserved; }; struct icp_qat_mof_file_hdr { unsigned int file_id; unsigned int checksum; char min_ver; char maj_ver; unsigned short reserved; unsigned short max_chunks; unsigned short num_chunks; }; struct icp_qat_mof_chunkhdr { char chunk_id[ICP_QAT_MOF_OBJ_ID_LEN]; u64 offset; u64 size; }; struct icp_qat_mof_str_table { unsigned int tab_len; unsigned int strings; }; struct icp_qat_mof_obj_hdr { unsigned short max_chunks; unsigned short num_chunks; unsigned int reserved; }; struct icp_qat_mof_obj_chunkhdr { char chunk_id[ICP_QAT_MOF_OBJ_CHUNKID_LEN]; u64 offset; u64 size; unsigned int name; unsigned int reserved; }; struct icp_qat_mof_objhdr { char *obj_name; const char *obj_buf; unsigned int obj_size; }; struct icp_qat_mof_table { unsigned int num_objs; struct icp_qat_mof_objhdr *obj_hdr; }; struct icp_qat_mof_handle { unsigned int file_id; unsigned int checksum; char min_ver; char maj_ver; const char *mof_buf; u32 mof_size; char *sym_str; unsigned int sym_size; const char *uobjs_hdr; const char *sobjs_hdr; struct icp_qat_mof_table obj_table; }; #endif diff --git a/sys/dev/qat/include/icp_qat_fw_init_admin.h b/sys/dev/qat/include/icp_qat_fw_init_admin.h index 6f88de144770..9c1482158443 100644 --- a/sys/dev/qat/include/icp_qat_fw_init_admin.h +++ b/sys/dev/qat/include/icp_qat_fw_init_admin.h @@ -1,222 +1,218 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #ifndef _ICP_QAT_FW_INIT_ADMIN_H_ #define _ICP_QAT_FW_INIT_ADMIN_H_ #include "icp_qat_fw.h" enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_INIT_ME = 0, ICP_QAT_FW_TRNG_ENABLE = 1, ICP_QAT_FW_TRNG_DISABLE = 2, ICP_QAT_FW_CONSTANTS_CFG = 3, ICP_QAT_FW_STATUS_GET = 4, ICP_QAT_FW_COUNTERS_GET = 5, ICP_QAT_FW_LOOPBACK = 6, ICP_QAT_FW_HEARTBEAT_SYNC = 7, ICP_QAT_FW_HEARTBEAT_GET = 8, ICP_QAT_FW_COMP_CAPABILITY_GET = 9, ICP_QAT_FW_CRYPTO_CAPABILITY_GET = 10, ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13, ICP_QAT_FW_RL_SLA_CONFIG = 14, ICP_QAT_FW_RL_INIT = 15, ICP_QAT_FW_RL_DU_START = 16, ICP_QAT_FW_RL_DU_STOP = 17, ICP_QAT_FW_TIMER_GET = 19, ICP_QAT_FW_CNV_STATS_GET = 20, ICP_QAT_FW_PKE_REPLAY_STATS_GET = 21 }; enum icp_qat_fw_init_admin_resp_status { ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0, ICP_QAT_FW_INIT_RESP_STATUS_FAIL = 1, ICP_QAT_FW_INIT_RESP_STATUS_UNSUPPORTED = 4 }; enum icp_qat_fw_cnv_error_type { CNV_ERR_TYPE_NO_ERROR = 0, CNV_ERR_TYPE_CHECKSUM_ERROR, CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH_ERROR, CNV_ERR_TYPE_DECOMPRESSION_ERROR, CNV_ERR_TYPE_TRANSLATION_ERROR, CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH_ERROR, CNV_ERR_TYPE_UNKNOWN_ERROR }; #define CNV_ERROR_TYPE_GET(latest_error) \ ({ \ __typeof__(latest_error) _lerror = latest_error; \ (_lerror >> 12) > CNV_ERR_TYPE_UNKNOWN_ERROR ? \ CNV_ERR_TYPE_UNKNOWN_ERROR : \ (enum icp_qat_fw_cnv_error_type)(_lerror >> 12); \ }) #define CNV_ERROR_LENGTH_DELTA_GET(latest_error) \ ({ \ __typeof__(latest_error) _lerror = latest_error; \ ((s16)((_lerror & 0x0FFF) | (_lerror & 0x0800 ? 0xF000 : 0))); \ }) #define CNV_ERROR_DECOMP_STATUS_GET(latest_error) ((s8)(latest_error & 0xFF)) struct icp_qat_fw_init_admin_req { u16 init_cfg_sz; u8 resrvd1; u8 cmd_id; u32 max_req_duration; u64 opaque_data; union { /* ICP_QAT_FW_INIT_ME */ struct { u64 resrvd2; u16 ibuf_size_in_kb; u16 resrvd3; u32 resrvd4; }; /* ICP_QAT_FW_CONSTANTS_CFG */ struct { u64 init_cfg_ptr; u64 resrvd5; }; /* ICP_QAT_FW_HEARTBEAT_TIMER_SET */ struct { u64 hb_cfg_ptr; u32 heartbeat_ticks; u32 resrvd6; }; /* ICP_QAT_FW_RL_SLA_CONFIG */ struct { u32 credit_per_sla; u8 service_id; u8 vf_id; u8 resrvd7; u8 resrvd8; u32 resrvd9; u32 resrvd10; }; /* ICP_QAT_FW_RL_INIT */ struct { u32 rl_period; u8 config; u8 resrvd11; u8 num_me; u8 resrvd12; u8 pke_svc_arb_map; u8 bulk_crypto_svc_arb_map; u8 compression_svc_arb_map; u8 resrvd13; u32 resrvd14; }; /* ICP_QAT_FW_RL_DU_STOP */ struct { u64 cfg_ptr; u32 resrvd15; u32 resrvd16; }; }; } __packed; struct icp_qat_fw_init_admin_resp { u8 flags; u8 resrvd1; u8 status; u8 cmd_id; union { u32 resrvd2; u32 ras_event_count; /* ICP_QAT_FW_STATUS_GET */ struct { u16 version_minor_num; u16 version_major_num; }; /* ICP_QAT_FW_COMP_CAPABILITY_GET */ u32 extended_features; /* ICP_QAT_FW_CNV_STATS_GET */ struct { u16 error_count; u16 latest_error; }; }; u64 opaque_data; union { u32 resrvd3[4]; /* ICP_QAT_FW_STATUS_GET */ struct { u32 version_patch_num; u8 context_id; u8 ae_id; u16 resrvd4; u64 resrvd5; }; /* ICP_QAT_FW_COMP_CAPABILITY_GET */ struct { u16 compression_algos; u16 checksum_algos; u32 deflate_capabilities; u32 resrvd6; u32 deprecated; }; /* ICP_QAT_FW_CRYPTO_CAPABILITY_GET */ struct { u32 cipher_algos; u32 hash_algos; u16 keygen_algos; u16 other; u16 public_key_algos; u16 prime_algos; }; /* ICP_QAT_FW_RL_DU_STOP */ struct { u32 resrvd7; u8 granularity; u8 resrvd8; u16 resrvd9; u32 total_du_time; u32 resrvd10; }; /* ICP_QAT_FW_TIMER_GET */ struct { u64 timestamp; u64 resrvd11; }; /* ICP_QAT_FW_COUNTERS_GET */ struct { u64 req_rec_count; u64 resp_sent_count; }; /* ICP_QAT_FW_PKE_REPLAY_STATS_GET */ struct { u32 successful_count; u32 unsuccessful_count; u64 resrvd12; }; }; } __packed; enum icp_qat_fw_init_admin_init_flag { ICP_QAT_FW_INIT_FLAG_PKE_DISABLED = 0 }; struct icp_qat_fw_init_admin_hb_cnt { u16 resp_heartbeat_cnt; u16 req_heartbeat_cnt; }; -struct icp_qat_fw_init_admin_hb_stats { - struct icp_qat_fw_init_admin_hb_cnt stats[ADF_NUM_HB_CNT_PER_AE]; -}; - #define ICP_QAT_FW_COMN_HEARTBEAT_OK 0 #define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1 #define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0 #define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1 #define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE #define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \ ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags) #define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \ ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val) #define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \ ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK) #endif diff --git a/sys/dev/qat/include/qat_ocf_utils.h b/sys/dev/qat/include/qat_ocf_utils.h index 0cacd8f0a84f..30a7e9b7f8ec 100644 --- a/sys/dev/qat/include/qat_ocf_utils.h +++ b/sys/dev/qat/include/qat_ocf_utils.h @@ -1,61 +1,64 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #ifndef _QAT_OCF_UTILS_H_ #define _QAT_OCF_UTILS_H_ /* System headers */ #include #include #include /* Cryptodev headers */ #include #include /* QAT specific headers */ #include "qat_ocf_mem_pool.h" #include "cpa.h" #include "cpa_cy_sym_dp.h" static inline CpaBoolean is_gmac_exception(const struct crypto_session_params *csp) { if (CSP_MODE_DIGEST == csp->csp_mode) if (CRYPTO_AES_NIST_GMAC == csp->csp_auth_alg) return CPA_TRUE; return CPA_FALSE; } static inline CpaBoolean is_sep_aad_supported(const struct crypto_session_params *csp) { if (CPA_TRUE == is_gmac_exception(csp)) return CPA_FALSE; if (CSP_MODE_AEAD == csp->csp_mode) if (CRYPTO_AES_NIST_GCM_16 == csp->csp_cipher_alg || CRYPTO_AES_NIST_GMAC == csp->csp_cipher_alg) return CPA_TRUE; return CPA_FALSE; } static inline CpaBoolean is_use_sep_digest(const struct crypto_session_params *csp) { /* Use separated digest for all digest/hash operations, - * including GMAC */ - if (CSP_MODE_DIGEST == csp->csp_mode || CSP_MODE_ETA == csp->csp_mode) + * including GMAC. ETA and AEAD use separated digest + * due to FW limitation to specify offset to digest + * appended to pay-load buffer. */ + if (CSP_MODE_DIGEST == csp->csp_mode || CSP_MODE_ETA == csp->csp_mode || + CSP_MODE_AEAD == csp->csp_mode) return CPA_TRUE; return CPA_FALSE; } int qat_ocf_handle_session_update(struct qat_ocf_dsession *ocf_dsession, struct cryptop *crp); CpaStatus qat_ocf_wait_for_session(CpaCySymSessionCtx sessionCtx, Cpa32U timeoutMS); #endif /* _QAT_OCF_UTILS_H_ */ diff --git a/sys/dev/qat/qat/qat_ocf.c b/sys/dev/qat/qat/qat_ocf.c index 2461f3134a77..7e5025b0fa28 100644 --- a/sys/dev/qat/qat/qat_ocf.c +++ b/sys/dev/qat/qat/qat_ocf.c @@ -1,1228 +1,1223 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /* System headers */ #include #include #include #include #include #include #include #include /* Cryptodev headers */ #include #include "cryptodev_if.h" /* QAT specific headers */ #include "cpa.h" #include "cpa_cy_im.h" #include "cpa_cy_sym_dp.h" #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "lac_sym_hash_defs.h" #include "lac_sym_qat_hash_defs_lookup.h" /* To get only IRQ instances */ #include "icp_accel_devices.h" #include "icp_adf_accel_mgr.h" #include "lac_sal_types.h" +/* To disable AEAD HW MAC verification */ +#include "icp_sal_user.h" + /* QAT OCF specific headers */ #include "qat_ocf_mem_pool.h" #include "qat_ocf_utils.h" #define QAT_OCF_MAX_INSTANCES (256) #define QAT_OCF_SESSION_WAIT_TIMEOUT_MS (1000) MALLOC_DEFINE(M_QAT_OCF, "qat_ocf", "qat_ocf(4) memory allocations"); /* QAT OCF internal structures */ struct qat_ocf_softc { device_t sc_dev; int32_t cryptodev_id; struct qat_ocf_instance cyInstHandles[QAT_OCF_MAX_INSTANCES]; int32_t numCyInstances; }; /* Function definitions */ static void qat_ocf_freesession(device_t dev, crypto_session_t cses); static int qat_ocf_probesession(device_t dev, const struct crypto_session_params *csp); static int qat_ocf_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp); static int qat_ocf_attach(device_t dev); static int qat_ocf_detach(device_t dev); static void symDpCallback(CpaCySymDpOpData *pOpData, CpaStatus result, CpaBoolean verifyResult) { struct qat_ocf_cookie *qat_cookie; struct cryptop *crp; struct qat_ocf_dsession *qat_dsession = NULL; struct qat_ocf_session *qat_session = NULL; struct qat_ocf_instance *qat_instance = NULL; CpaStatus status; int rc = 0; qat_cookie = (struct qat_ocf_cookie *)pOpData->pCallbackTag; if (!qat_cookie) return; crp = qat_cookie->crp_op; qat_dsession = crypto_get_driver_session(crp->crp_session); qat_instance = qat_dsession->qatInstance; status = qat_ocf_cookie_dma_post_sync(crp, pOpData); if (CPA_STATUS_SUCCESS != status) { rc = EIO; goto exit; } status = qat_ocf_cookie_dma_unload(crp, pOpData); if (CPA_STATUS_SUCCESS != status) { rc = EIO; goto exit; } /* Verify result */ if (CPA_STATUS_SUCCESS != result) { rc = EBADMSG; goto exit; } /* Verify digest by FW (GCM and CCM only) */ if (CPA_TRUE != verifyResult) { rc = EBADMSG; goto exit; } if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) qat_session = &qat_dsession->encSession; else qat_session = &qat_dsession->decSession; /* Copy back digest result if it's stored in separated buffer */ if (pOpData->digestResult && qat_session->authLen > 0) { if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) != 0) { char icv[QAT_OCF_MAX_DIGEST] = { 0 }; crypto_copydata(crp, crp->crp_digest_start, qat_session->authLen, icv); if (timingsafe_bcmp(icv, qat_cookie->qat_ocf_digest, qat_session->authLen) != 0) { rc = EBADMSG; goto exit; } } else { crypto_copyback(crp, crp->crp_digest_start, qat_session->authLen, qat_cookie->qat_ocf_digest); } } exit: qat_ocf_cookie_free(qat_instance, qat_cookie); crp->crp_etype = rc; crypto_done(crp); return; } static inline CpaPhysicalAddr qatVirtToPhys(void *virtAddr) { return (CpaPhysicalAddr)vtophys(virtAddr); } static int qat_ocf_probesession(device_t dev, const struct crypto_session_params *csp) { if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) != 0) { return EINVAL; } switch (csp->csp_mode) { case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: case CRYPTO_AES_ICM: if (csp->csp_ivlen != AES_BLOCK_LEN) return EINVAL; break; case CRYPTO_AES_XTS: if (csp->csp_ivlen != AES_XTS_IV_LEN) return EINVAL; break; default: return EINVAL; } break; case CSP_MODE_DIGEST: switch (csp->csp_auth_alg) { case CRYPTO_SHA1: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_256: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512: case CRYPTO_SHA2_512_HMAC: break; case CRYPTO_AES_NIST_GMAC: if (csp->csp_ivlen != AES_GCM_IV_LEN) return EINVAL; break; default: return EINVAL; } break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: if (csp->csp_ivlen != AES_GCM_IV_LEN) return EINVAL; break; default: return EINVAL; } break; case CSP_MODE_ETA: switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: case CRYPTO_AES_ICM: if (csp->csp_ivlen != AES_BLOCK_LEN) return EINVAL; break; case CRYPTO_AES_XTS: if (csp->csp_ivlen != AES_XTS_IV_LEN) return EINVAL; break; default: return EINVAL; } break; default: return EINVAL; } break; default: return EINVAL; } return CRYPTODEV_PROBE_HARDWARE; } static CpaStatus qat_ocf_session_init(device_t dev, struct cryptop *crp, struct qat_ocf_instance *qat_instance, struct qat_ocf_session *qat_ssession) { CpaStatus status = CPA_STATUS_SUCCESS; /* Crytpodev structures */ crypto_session_t cses; const struct crypto_session_params *csp; /* DP API Session configuration */ CpaCySymSessionSetupData sessionSetupData = { 0 }; CpaCySymSessionCtx sessionCtx = NULL; Cpa32U sessionCtxSize = 0; cses = crp->crp_session; if (NULL == cses) { device_printf(dev, "no crypto session in cryptodev request\n"); return CPA_STATUS_FAIL; } csp = crypto_get_params(cses); if (NULL == csp) { device_printf(dev, "no session in cryptodev session\n"); return CPA_STATUS_FAIL; } /* Common fields */ sessionSetupData.sessionPriority = CPA_CY_PRIORITY_HIGH; /* Cipher key */ if (crp->crp_cipher_key) sessionSetupData.cipherSetupData.pCipherKey = crp->crp_cipher_key; else sessionSetupData.cipherSetupData.pCipherKey = csp->csp_cipher_key; sessionSetupData.cipherSetupData.cipherKeyLenInBytes = csp->csp_cipher_klen; /* Auth key */ if (crp->crp_auth_key) sessionSetupData.hashSetupData.authModeSetupData.authKey = crp->crp_auth_key; else sessionSetupData.hashSetupData.authModeSetupData.authKey = csp->csp_auth_key; sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes = csp->csp_auth_klen; qat_ssession->aadLen = crp->crp_aad_length; if (CPA_TRUE == is_sep_aad_supported(csp)) sessionSetupData.hashSetupData.authModeSetupData.aadLenInBytes = crp->crp_aad_length; else sessionSetupData.hashSetupData.authModeSetupData.aadLenInBytes = 0; /* Just setup algorithm - regardless of mode */ if (csp->csp_cipher_alg) { sessionSetupData.symOperation = CPA_CY_SYM_OP_CIPHER; switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: sessionSetupData.cipherSetupData.cipherAlgorithm = CPA_CY_SYM_CIPHER_AES_CBC; break; case CRYPTO_AES_ICM: sessionSetupData.cipherSetupData.cipherAlgorithm = CPA_CY_SYM_CIPHER_AES_CTR; break; case CRYPTO_AES_XTS: sessionSetupData.cipherSetupData.cipherAlgorithm = CPA_CY_SYM_CIPHER_AES_XTS; break; case CRYPTO_AES_NIST_GCM_16: sessionSetupData.cipherSetupData.cipherAlgorithm = CPA_CY_SYM_CIPHER_AES_GCM; sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_AES_GCM; sessionSetupData.hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH; break; default: device_printf(dev, "cipher_alg: %d not supported\n", csp->csp_cipher_alg); status = CPA_STATUS_UNSUPPORTED; goto fail; } } if (csp->csp_auth_alg) { switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1; sessionSetupData.hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH; break; case CRYPTO_SHA1: sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1; sessionSetupData.hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN; break; case CRYPTO_SHA2_256_HMAC: sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA256; sessionSetupData.hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH; break; case CRYPTO_SHA2_256: sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA256; sessionSetupData.hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN; break; case CRYPTO_SHA2_224_HMAC: sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA224; sessionSetupData.hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH; break; case CRYPTO_SHA2_224: sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA224; sessionSetupData.hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN; break; case CRYPTO_SHA2_384_HMAC: sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA384; sessionSetupData.hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH; break; case CRYPTO_SHA2_384: sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA384; sessionSetupData.hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN; break; case CRYPTO_SHA2_512_HMAC: sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA512; sessionSetupData.hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH; break; case CRYPTO_SHA2_512: sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA512; sessionSetupData.hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN; break; case CRYPTO_AES_NIST_GMAC: sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_AES_GMAC; break; default: status = CPA_STATUS_UNSUPPORTED; goto fail; } } /* csp->csp_auth_alg */ /* Setting digest-length if no cipher-only mode is set */ if (csp->csp_mode != CSP_MODE_CIPHER) { lac_sym_qat_hash_defs_t *pHashDefsInfo = NULL; if (csp->csp_auth_mlen) { sessionSetupData.hashSetupData.digestResultLenInBytes = csp->csp_auth_mlen; qat_ssession->authLen = csp->csp_auth_mlen; } else { LacSymQat_HashDefsLookupGet( qat_instance->cyInstHandle, sessionSetupData.hashSetupData.hashAlgorithm, &pHashDefsInfo); if (NULL == pHashDefsInfo) { device_printf( dev, "unable to find corresponding hash data\n"); status = CPA_STATUS_UNSUPPORTED; goto fail; } sessionSetupData.hashSetupData.digestResultLenInBytes = pHashDefsInfo->algInfo->digestLength; qat_ssession->authLen = pHashDefsInfo->algInfo->digestLength; } sessionSetupData.verifyDigest = CPA_FALSE; } switch (csp->csp_mode) { case CSP_MODE_AEAD: - sessionSetupData.symOperation = - CPA_CY_SYM_OP_ALGORITHM_CHAINING; - /* Place the digest result in a buffer unrelated to srcBuffer */ - sessionSetupData.digestIsAppended = CPA_TRUE; - /* For GCM and CCM driver forces to verify digest on HW */ - sessionSetupData.verifyDigest = CPA_TRUE; - if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { - sessionSetupData.cipherSetupData.cipherDirection = - CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT; - sessionSetupData.algChainOrder = - CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH; - } else { - sessionSetupData.cipherSetupData.cipherDirection = - CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT; - sessionSetupData.algChainOrder = - CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER; - } - break; case CSP_MODE_ETA: sessionSetupData.symOperation = CPA_CY_SYM_OP_ALGORITHM_CHAINING; /* Place the digest result in a buffer unrelated to srcBuffer */ sessionSetupData.digestIsAppended = CPA_FALSE; /* Due to FW limitation to verify only appended MACs */ sessionSetupData.verifyDigest = CPA_FALSE; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { sessionSetupData.cipherSetupData.cipherDirection = CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT; sessionSetupData.algChainOrder = CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH; } else { sessionSetupData.cipherSetupData.cipherDirection = CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT; sessionSetupData.algChainOrder = CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER; } break; case CSP_MODE_CIPHER: if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { sessionSetupData.cipherSetupData.cipherDirection = CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT; } else { sessionSetupData.cipherSetupData.cipherDirection = CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT; } sessionSetupData.symOperation = CPA_CY_SYM_OP_CIPHER; break; case CSP_MODE_DIGEST: sessionSetupData.symOperation = CPA_CY_SYM_OP_HASH; if (csp->csp_auth_alg == CRYPTO_AES_NIST_GMAC) { sessionSetupData.symOperation = CPA_CY_SYM_OP_ALGORITHM_CHAINING; /* GMAC is always encrypt */ sessionSetupData.cipherSetupData.cipherDirection = CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT; sessionSetupData.algChainOrder = CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH; sessionSetupData.cipherSetupData.cipherAlgorithm = CPA_CY_SYM_CIPHER_AES_GCM; sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_AES_GMAC; sessionSetupData.hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH; /* Same key for cipher and auth */ sessionSetupData.cipherSetupData.pCipherKey = csp->csp_auth_key; sessionSetupData.cipherSetupData.cipherKeyLenInBytes = csp->csp_auth_klen; /* Generated GMAC stored in separated buffer */ sessionSetupData.digestIsAppended = CPA_FALSE; /* Digest verification not allowed in GMAC case */ sessionSetupData.verifyDigest = CPA_FALSE; /* No AAD allowed */ sessionSetupData.hashSetupData.authModeSetupData .aadLenInBytes = 0; } else { sessionSetupData.cipherSetupData.cipherDirection = CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT; sessionSetupData.symOperation = CPA_CY_SYM_OP_HASH; sessionSetupData.digestIsAppended = CPA_FALSE; } break; default: device_printf(dev, "%s: unhandled crypto algorithm %d, %d\n", __func__, csp->csp_cipher_alg, csp->csp_auth_alg); status = CPA_STATUS_FAIL; goto fail; } /* Extracting session size */ status = cpaCySymSessionCtxGetSize(qat_instance->cyInstHandle, &sessionSetupData, &sessionCtxSize); if (CPA_STATUS_SUCCESS != status) { device_printf(dev, "unable to get session size\n"); goto fail; } /* Allocating contiguous memory for session */ sessionCtx = contigmalloc(sessionCtxSize, M_QAT_OCF, M_NOWAIT, 0, ~1UL, 1 << (bsrl(sessionCtxSize - 1) + 1), 0); if (NULL == sessionCtx) { device_printf(dev, "unable to allocate memory for session\n"); status = CPA_STATUS_RESOURCE; goto fail; } status = cpaCySymDpInitSession(qat_instance->cyInstHandle, &sessionSetupData, sessionCtx); if (CPA_STATUS_SUCCESS != status) { device_printf(dev, "session initialization failed\n"); goto fail; } /* NOTE: lets keep double session (both directions) approach to overcome * lack of direction update in FBSD QAT. */ qat_ssession->sessionCtx = sessionCtx; qat_ssession->sessionCtxSize = sessionCtxSize; return CPA_STATUS_SUCCESS; fail: /* Release resources if any */ if (sessionCtx) contigfree(sessionCtx, sessionCtxSize, M_QAT_OCF); return status; } static int qat_ocf_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { /* Cryptodev QAT structures */ struct qat_ocf_softc *qat_softc; struct qat_ocf_dsession *qat_dsession; struct qat_ocf_instance *qat_instance; u_int cpu_id = PCPU_GET(cpuid); /* Create cryptodev session */ qat_softc = device_get_softc(dev); qat_instance = &qat_softc->cyInstHandles[cpu_id % qat_softc->numCyInstances]; qat_dsession = crypto_get_driver_session(cses); if (NULL == qat_dsession) { device_printf(dev, "Unable to create new session\n"); return (EINVAL); } /* Add only instance at this point remaining operations moved to * lazy session init */ qat_dsession->qatInstance = qat_instance; return 0; } static CpaStatus qat_ocf_remove_session(device_t dev, CpaInstanceHandle cyInstHandle, struct qat_ocf_session *qat_session) { CpaStatus status = CPA_STATUS_SUCCESS; if (NULL == qat_session->sessionCtx) return CPA_STATUS_SUCCESS; /* User callback is executed right before decrementing pending * callback atomic counter. To avoid removing session rejection * we have to wait a very short while for counter update * after call back execution. */ status = qat_ocf_wait_for_session(qat_session->sessionCtx, QAT_OCF_SESSION_WAIT_TIMEOUT_MS); if (CPA_STATUS_SUCCESS != status) { device_printf(dev, "waiting for session un-busy failed\n"); return CPA_STATUS_FAIL; } status = cpaCySymDpRemoveSession(cyInstHandle, qat_session->sessionCtx); if (CPA_STATUS_SUCCESS != status) { device_printf(dev, "error while removing session\n"); return CPA_STATUS_FAIL; } explicit_bzero(qat_session->sessionCtx, qat_session->sessionCtxSize); contigfree(qat_session->sessionCtx, qat_session->sessionCtxSize, M_QAT_OCF); qat_session->sessionCtx = NULL; qat_session->sessionCtxSize = 0; return CPA_STATUS_SUCCESS; } static void qat_ocf_freesession(device_t dev, crypto_session_t cses) { CpaStatus status = CPA_STATUS_SUCCESS; struct qat_ocf_dsession *qat_dsession = NULL; struct qat_ocf_instance *qat_instance = NULL; qat_dsession = crypto_get_driver_session(cses); qat_instance = qat_dsession->qatInstance; mtx_lock(&qat_instance->cyInstMtx); status = qat_ocf_remove_session(dev, qat_dsession->qatInstance->cyInstHandle, &qat_dsession->encSession); if (CPA_STATUS_SUCCESS != status) device_printf(dev, "unable to remove encrypt session\n"); status = qat_ocf_remove_session(dev, qat_dsession->qatInstance->cyInstHandle, &qat_dsession->decSession); if (CPA_STATUS_SUCCESS != status) device_printf(dev, "unable to remove decrypt session\n"); mtx_unlock(&qat_instance->cyInstMtx); } /* QAT GCM/CCM FW API are only algorithms which support separated AAD. */ static CpaStatus qat_ocf_load_aad_gcm(struct cryptop *crp, struct qat_ocf_cookie *qat_cookie) { CpaCySymDpOpData *pOpData; pOpData = &qat_cookie->pOpdata; if (NULL != crp->crp_aad) memcpy(qat_cookie->qat_ocf_gcm_aad, crp->crp_aad, crp->crp_aad_length); else crypto_copydata(crp, crp->crp_aad_start, crp->crp_aad_length, qat_cookie->qat_ocf_gcm_aad); pOpData->pAdditionalAuthData = qat_cookie->qat_ocf_gcm_aad; pOpData->additionalAuthData = qat_cookie->qat_ocf_gcm_aad_paddr; return CPA_STATUS_SUCCESS; } static CpaStatus qat_ocf_load_aad(struct cryptop *crp, struct qat_ocf_cookie *qat_cookie) { CpaStatus status = CPA_STATUS_SUCCESS; const struct crypto_session_params *csp; CpaCySymDpOpData *pOpData; struct qat_ocf_load_cb_arg args; pOpData = &qat_cookie->pOpdata; pOpData->pAdditionalAuthData = NULL; pOpData->additionalAuthData = 0UL; if (crp->crp_aad_length == 0) return CPA_STATUS_SUCCESS; if (crp->crp_aad_length > ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX) return CPA_STATUS_FAIL; csp = crypto_get_params(crp->crp_session); /* Handle GCM/CCM case */ if (CPA_TRUE == is_sep_aad_supported(csp)) return qat_ocf_load_aad_gcm(crp, qat_cookie); if (NULL == crp->crp_aad) { /* AAD already embedded in source buffer */ pOpData->messageLenToCipherInBytes = crp->crp_payload_length; pOpData->cryptoStartSrcOffsetInBytes = crp->crp_payload_start; pOpData->messageLenToHashInBytes = crp->crp_aad_length + crp->crp_payload_length; pOpData->hashStartSrcOffsetInBytes = crp->crp_aad_start; return CPA_STATUS_SUCCESS; } /* Separated AAD not supported by QAT - lets place the content * of ADD buffer at the very beginning of source SGL */ args.crp_op = crp; args.qat_cookie = qat_cookie; args.pOpData = pOpData; args.error = 0; status = bus_dmamap_load(qat_cookie->gcm_aad_dma_mem.dma_tag, qat_cookie->gcm_aad_dma_mem.dma_map, crp->crp_aad, crp->crp_aad_length, qat_ocf_crypto_load_aadbuf_cb, &args, BUS_DMA_NOWAIT); qat_cookie->is_sep_aad_used = CPA_TRUE; /* Right after this step we have AAD placed in the first flat buffer * in source SGL */ pOpData->messageLenToCipherInBytes = crp->crp_payload_length; pOpData->cryptoStartSrcOffsetInBytes = crp->crp_aad_length + crp->crp_aad_start + crp->crp_payload_start; pOpData->messageLenToHashInBytes = crp->crp_aad_length + crp->crp_payload_length; pOpData->hashStartSrcOffsetInBytes = crp->crp_aad_start; return status; } static CpaStatus qat_ocf_load(struct cryptop *crp, struct qat_ocf_cookie *qat_cookie) { CpaStatus status = CPA_STATUS_SUCCESS; CpaCySymDpOpData *pOpData; struct qat_ocf_load_cb_arg args; /* cryptodev internals */ const struct crypto_session_params *csp; pOpData = &qat_cookie->pOpdata; csp = crypto_get_params(crp->crp_session); /* Load IV buffer if present */ if (csp->csp_ivlen > 0) { memset(qat_cookie->qat_ocf_iv_buf, 0, sizeof(qat_cookie->qat_ocf_iv_buf)); crypto_read_iv(crp, qat_cookie->qat_ocf_iv_buf); pOpData->iv = qat_cookie->qat_ocf_iv_buf_paddr; pOpData->pIv = qat_cookie->qat_ocf_iv_buf; pOpData->ivLenInBytes = csp->csp_ivlen; } /* GCM/CCM - load AAD to separated buffer * AES+SHA - load AAD to first flat in SGL */ status = qat_ocf_load_aad(crp, qat_cookie); if (CPA_STATUS_SUCCESS != status) goto fail; /* Load source buffer */ args.crp_op = crp; args.qat_cookie = qat_cookie; args.pOpData = pOpData; args.error = 0; status = bus_dmamap_load_crp_buffer(qat_cookie->src_dma_mem.dma_tag, qat_cookie->src_dma_mem.dma_map, &crp->crp_buf, qat_ocf_crypto_load_buf_cb, &args, BUS_DMA_NOWAIT); if (CPA_STATUS_SUCCESS != status) goto fail; pOpData->srcBuffer = qat_cookie->src_buffer_list_paddr; pOpData->srcBufferLen = CPA_DP_BUFLIST; /* Load destination buffer */ if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { status = bus_dmamap_load_crp_buffer(qat_cookie->dst_dma_mem.dma_tag, qat_cookie->dst_dma_mem.dma_map, &crp->crp_obuf, qat_ocf_crypto_load_obuf_cb, &args, BUS_DMA_NOWAIT); if (CPA_STATUS_SUCCESS != status) goto fail; pOpData->dstBuffer = qat_cookie->dst_buffer_list_paddr; pOpData->dstBufferLen = CPA_DP_BUFLIST; } else { pOpData->dstBuffer = pOpData->srcBuffer; pOpData->dstBufferLen = pOpData->srcBufferLen; } if (CPA_TRUE == is_use_sep_digest(csp)) pOpData->digestResult = qat_cookie->qat_ocf_digest_paddr; else pOpData->digestResult = 0UL; /* GMAC - aka zero length buffer */ if (CPA_TRUE == is_gmac_exception(csp)) pOpData->messageLenToCipherInBytes = 0; fail: return status; } static int qat_ocf_check_input(device_t dev, struct cryptop *crp) { const struct crypto_session_params *csp; csp = crypto_get_params(crp->crp_session); if (crypto_buffer_len(&crp->crp_buf) > QAT_OCF_MAX_LEN) return E2BIG; if (CPA_TRUE == is_sep_aad_supported(csp) && (crp->crp_aad_length > ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX)) return EBADMSG; return 0; } static int qat_ocf_process(device_t dev, struct cryptop *crp, int hint) { CpaStatus status = CPA_STATUS_SUCCESS; int rc = 0; struct qat_ocf_dsession *qat_dsession = NULL; struct qat_ocf_session *qat_session = NULL; struct qat_ocf_instance *qat_instance = NULL; CpaCySymDpOpData *pOpData = NULL; struct qat_ocf_cookie *qat_cookie = NULL; CpaBoolean memLoaded = CPA_FALSE; rc = qat_ocf_check_input(dev, crp); if (rc) goto fail; qat_dsession = crypto_get_driver_session(crp->crp_session); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) qat_session = &qat_dsession->encSession; else qat_session = &qat_dsession->decSession; qat_instance = qat_dsession->qatInstance; status = qat_ocf_cookie_alloc(qat_instance, &qat_cookie); if (CPA_STATUS_SUCCESS != status) { rc = EAGAIN; goto fail; } qat_cookie->crp_op = crp; /* Common request fields */ pOpData = &qat_cookie->pOpdata; pOpData->instanceHandle = qat_instance->cyInstHandle; pOpData->sessionCtx = NULL; /* Cipher fields */ pOpData->cryptoStartSrcOffsetInBytes = crp->crp_payload_start; pOpData->messageLenToCipherInBytes = crp->crp_payload_length; /* Digest fields - any exceptions from this basic rules are covered * in qat_ocf_load */ pOpData->hashStartSrcOffsetInBytes = crp->crp_payload_start; pOpData->messageLenToHashInBytes = crp->crp_payload_length; status = qat_ocf_load(crp, qat_cookie); if (CPA_STATUS_SUCCESS != status) { device_printf(dev, "unable to load OCF buffers to QAT DMA " "transaction\n"); rc = EIO; goto fail; } memLoaded = CPA_TRUE; status = qat_ocf_cookie_dma_pre_sync(crp, pOpData); if (CPA_STATUS_SUCCESS != status) { device_printf(dev, "unable to sync DMA buffers\n"); rc = EIO; goto fail; } mtx_lock(&qat_instance->cyInstMtx); /* Session initialization at the first request. It's done * in such way to overcome missing QAT specific session data * such like AAD length and limited possibility to update * QAT session while handling traffic. */ if (NULL == qat_session->sessionCtx) { status = qat_ocf_session_init(dev, crp, qat_instance, qat_session); if (CPA_STATUS_SUCCESS != status) { mtx_unlock(&qat_instance->cyInstMtx); device_printf(dev, "unable to init session\n"); rc = EIO; goto fail; } } else { status = qat_ocf_handle_session_update(qat_dsession, crp); if (CPA_STATUS_RESOURCE == status) { mtx_unlock(&qat_instance->cyInstMtx); rc = EAGAIN; goto fail; } else if (CPA_STATUS_SUCCESS != status) { mtx_unlock(&qat_instance->cyInstMtx); rc = EIO; goto fail; } } pOpData->sessionCtx = qat_session->sessionCtx; status = cpaCySymDpEnqueueOp(pOpData, CPA_TRUE); mtx_unlock(&qat_instance->cyInstMtx); if (CPA_STATUS_SUCCESS != status) { if (CPA_STATUS_RETRY == status) { rc = EAGAIN; goto fail; } device_printf(dev, "unable to send request. Status: %d\n", status); rc = EIO; goto fail; } return 0; fail: if (qat_cookie) { if (memLoaded) qat_ocf_cookie_dma_unload(crp, pOpData); qat_ocf_cookie_free(qat_instance, qat_cookie); } crp->crp_etype = rc; crypto_done(crp); return 0; } static void qat_ocf_identify(driver_t *drv, device_t parent) { if (device_find_child(parent, "qat_ocf", -1) == NULL && BUS_ADD_CHILD(parent, 200, "qat_ocf", -1) == 0) device_printf(parent, "qat_ocf: could not attach!"); } static int qat_ocf_probe(device_t dev) { device_set_desc(dev, "QAT engine"); return (BUS_PROBE_NOWILDCARD); } static CpaStatus qat_ocf_get_irq_instances(CpaInstanceHandle *cyInstHandles, Cpa16U cyInstHandlesSize, Cpa16U *foundInstances) { CpaStatus status = CPA_STATUS_SUCCESS; icp_accel_dev_t **pAdfInsts = NULL; icp_accel_dev_t *dev_addr = NULL; sal_t *baseAddr = NULL; sal_list_t *listTemp = NULL; CpaInstanceHandle cyInstHandle; CpaInstanceInfo2 info; Cpa16U numDevices; Cpa32U instCtr = 0; Cpa32U i; /* Get the number of devices */ status = icp_amgr_getNumInstances(&numDevices); if (CPA_STATUS_SUCCESS != status) return status; /* Allocate memory to store addr of accel_devs */ pAdfInsts = malloc(numDevices * sizeof(icp_accel_dev_t *), M_QAT_OCF, M_WAITOK); /* Get ADF to return all accel_devs that support either * symmetric or asymmetric crypto */ status = icp_amgr_getAllAccelDevByCapabilities( (ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC), pAdfInsts, &numDevices); if (CPA_STATUS_SUCCESS != status) { free(pAdfInsts, M_QAT_OCF); return status; } for (i = 0; i < numDevices; i++) { dev_addr = (icp_accel_dev_t *)pAdfInsts[i]; baseAddr = dev_addr->pSalHandle; if (NULL == baseAddr) continue; listTemp = baseAddr->sym_services; while (NULL != listTemp) { cyInstHandle = SalList_getObject(listTemp); status = cpaCyInstanceGetInfo2(cyInstHandle, &info); if (CPA_STATUS_SUCCESS != status) continue; listTemp = SalList_next(listTemp); if (CPA_TRUE == info.isPolled) continue; if (instCtr >= cyInstHandlesSize) break; cyInstHandles[instCtr++] = cyInstHandle; } } free(pAdfInsts, M_QAT_OCF); *foundInstances = instCtr; return CPA_STATUS_SUCCESS; } static CpaStatus qat_ocf_start_instances(struct qat_ocf_softc *qat_softc, device_t dev) { CpaStatus status = CPA_STATUS_SUCCESS; Cpa16U numInstances = 0; CpaInstanceHandle cyInstHandles[QAT_OCF_MAX_INSTANCES] = { 0 }; CpaInstanceHandle cyInstHandle = NULL; Cpa32U startedInstances = 0; Cpa32U i; qat_softc->numCyInstances = 0; status = qat_ocf_get_irq_instances(cyInstHandles, QAT_OCF_MAX_INSTANCES, &numInstances); if (CPA_STATUS_SUCCESS != status) return status; if (0 == numInstances) return CPA_STATUS_RESOURCE; for (i = 0; i < numInstances; i++) { struct qat_ocf_instance *qat_ocf_instance; cyInstHandle = cyInstHandles[i]; if (!cyInstHandle) continue; /* Starting instance */ status = cpaCyStartInstance(cyInstHandle); if (CPA_STATUS_SUCCESS != status) { device_printf(qat_softc->sc_dev, "unable to get start instance\n"); continue; } status = cpaCySetAddressTranslation(cyInstHandle, qatVirtToPhys); if (CPA_STATUS_SUCCESS != status) { device_printf(qat_softc->sc_dev, "unable to add virt to phys callback"); goto fail; } status = cpaCySymDpRegCbFunc(cyInstHandle, symDpCallback); if (CPA_STATUS_SUCCESS != status) { device_printf(qat_softc->sc_dev, "unable to add user callback\n"); goto fail; } qat_ocf_instance = &qat_softc->cyInstHandles[startedInstances]; qat_ocf_instance->cyInstHandle = cyInstHandle; mtx_init(&qat_ocf_instance->cyInstMtx, "Instance MTX", NULL, MTX_DEF); /* Initialize cookie pool */ status = qat_ocf_cookie_pool_init(qat_ocf_instance, dev); if (CPA_STATUS_SUCCESS != status) { device_printf(qat_softc->sc_dev, "unable to create cookie pool\n"); goto fail; } + /* Disable forcing HW MAC validation for AEAD */ + status = icp_sal_setForceAEADMACVerify(cyInstHandle, CPA_FALSE); + if (CPA_STATUS_SUCCESS != status) { + device_printf( + qat_softc->sc_dev, + "unable to disable AEAD HW MAC verification\n"); + goto fail; + } + qat_ocf_instance->driver_id = qat_softc->cryptodev_id; startedInstances++; continue; fail: /* Stop instance */ status = cpaCyStopInstance(cyInstHandle); if (CPA_STATUS_SUCCESS != status) device_printf(qat_softc->sc_dev, "unable to stop the instance\n"); continue; } qat_softc->numCyInstances = startedInstances; /* Success if at least one instance has been set */ if (!qat_softc->numCyInstances) return CPA_STATUS_FAIL; return CPA_STATUS_SUCCESS; } static CpaStatus qat_ocf_stop_instances(struct qat_ocf_softc *qat_softc) { CpaStatus status = CPA_STATUS_SUCCESS; int i; for (i = 0; i < qat_softc->numCyInstances; i++) { struct qat_ocf_instance *qat_instance; qat_instance = &qat_softc->cyInstHandles[i]; status = cpaCyStopInstance(qat_instance->cyInstHandle); if (CPA_STATUS_SUCCESS != status) { pr_err("QAT: stopping instance id: %d failed\n", i); mtx_unlock(&qat_instance->cyInstMtx); continue; } qat_ocf_cookie_pool_deinit(qat_instance); mtx_destroy(&qat_instance->cyInstMtx); } return status; } static int qat_ocf_attach(device_t dev) { int status; struct qat_ocf_softc *qat_softc; int32_t cryptodev_id; qat_softc = device_get_softc(dev); qat_softc->sc_dev = dev; cryptodev_id = crypto_get_driverid(dev, sizeof(struct qat_ocf_dsession), CRYPTOCAP_F_HARDWARE); if (cryptodev_id < 0) { device_printf(dev, "cannot initialize!\n"); goto fail; } qat_softc->cryptodev_id = cryptodev_id; /* Starting instances for OCF */ status = qat_ocf_start_instances(qat_softc, dev); if (status) { device_printf(dev, "no QAT IRQ instances available\n"); goto fail; } return 0; fail: qat_ocf_detach(dev); return (ENXIO); } static int qat_ocf_detach(device_t dev) { struct qat_ocf_softc *qat_softc = NULL; CpaStatus cpaStatus; int status = 0; qat_softc = device_get_softc(dev); if (qat_softc->cryptodev_id >= 0) { status = crypto_unregister_all(qat_softc->cryptodev_id); if (status) device_printf(dev, "unable to unregister QAt backend\n"); } /* Stop QAT instances */ cpaStatus = qat_ocf_stop_instances(qat_softc); if (CPA_STATUS_SUCCESS != cpaStatus) { device_printf(dev, "unable to stop instances\n"); status = EIO; } return status; } static device_method_t qat_ocf_methods[] = { DEVMETHOD(device_identify, qat_ocf_identify), DEVMETHOD(device_probe, qat_ocf_probe), DEVMETHOD(device_attach, qat_ocf_attach), DEVMETHOD(device_detach, qat_ocf_detach), /* Cryptodev interface */ DEVMETHOD(cryptodev_probesession, qat_ocf_probesession), DEVMETHOD(cryptodev_newsession, qat_ocf_newsession), DEVMETHOD(cryptodev_freesession, qat_ocf_freesession), DEVMETHOD(cryptodev_process, qat_ocf_process), DEVMETHOD_END }; static driver_t qat_ocf_driver = { .name = "qat_ocf", .methods = qat_ocf_methods, .size = sizeof(struct qat_ocf_softc), }; DRIVER_MODULE_ORDERED(qat, nexus, qat_ocf_driver, NULL, NULL, SI_ORDER_ANY); MODULE_VERSION(qat, 1); MODULE_DEPEND(qat, qat_c62x, 1, 1, 1); MODULE_DEPEND(qat, qat_200xx, 1, 1, 1); MODULE_DEPEND(qat, qat_c3xxx, 1, 1, 1); MODULE_DEPEND(qat, qat_c4xxx, 1, 1, 1); MODULE_DEPEND(qat, qat_dh895xcc, 1, 1, 1); +MODULE_DEPEND(qat, qat_4xxx, 1, 1, 1); MODULE_DEPEND(qat, crypto, 1, 1, 1); MODULE_DEPEND(qat, qat_common, 1, 1, 1); MODULE_DEPEND(qat, qat_api, 1, 1, 1); MODULE_DEPEND(qat, linuxkpi, 1, 1, 1); diff --git a/sys/dev/qat/qat_api/common/compression/dc_buffers.c b/sys/dev/qat/qat_api/common/compression/dc_buffers.c index 1a5d9bc8973e..4f4e836ccf8f 100644 --- a/sys/dev/qat/qat_api/common/compression/dc_buffers.c +++ b/sys/dev/qat/qat_api/common/compression/dc_buffers.c @@ -1,116 +1,174 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** ***************************************************************************** * @file dc_buffers.c * * @defgroup Dc_DataCompression DC Data Compression * * @ingroup Dc_DataCompression * * @description * Implementation of the buffer management operations for * Data Compression service. * *****************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" #include "cpa_dc.h" #include "cpa_dc_bp.h" #include "sal_types_compression.h" #include "icp_qat_fw_comp.h" +#include "sal_hw_gen.h" #define CPA_DC_CEIL_DIV(x, y) (((x) + (y)-1) / (y)) #define DC_DEST_BUFF_EXTRA_DEFLATE_GEN2 (55) +#define DC_DEST_BUFF_EXTRA_DEFLATE_GEN4_STATIC (1029) +#define DC_DEST_BUFF_EXTRA_DEFLATE_GEN4_DYN (512) +#define DC_DEST_BUFF_MIN_EXTRA_BYTES(x) ((x < 8) ? (8 - x) : 0) +#define DC_BUF_MAX_SIZE (0xFFFFFFFF) CpaStatus cpaDcBufferListGetMetaSize(const CpaInstanceHandle instanceHandle, Cpa32U numBuffers, Cpa32U *pSizeInBytes) { CpaInstanceHandle insHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle) { insHandle = dcGetFirstHandle(); } else { insHandle = instanceHandle; } LAC_CHECK_INSTANCE_HANDLE(insHandle); LAC_CHECK_NULL_PARAM(pSizeInBytes); /* Ensure this is a compression instance */ SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); if (0 == numBuffers) { QAT_UTILS_LOG("Number of buffers is 0.\n"); return CPA_STATUS_INVALID_PARAM; } *pSizeInBytes = (sizeof(icp_buffer_list_desc_t) + (sizeof(icp_flat_buffer_desc_t) * (numBuffers + 1)) + ICP_DESCRIPTOR_ALIGNMENT_BYTES); return CPA_STATUS_SUCCESS; } CpaStatus cpaDcBnpBufferListGetMetaSize(const CpaInstanceHandle instanceHandle, Cpa32U numJobs, Cpa32U *pSizeInBytes) { return CPA_STATUS_UNSUPPORTED; } static inline CpaStatus dcDeflateBoundGen2(CpaDcHuffType huffType, Cpa32U inputSize, Cpa32U *outputSize) { + Cpa64U inBufferSize = inputSize; + Cpa64U outBufferSize = 0; + /* Formula for GEN2 deflate: * ceil(9 * Total input bytes / 8) + 55 bytes. * 55 bytes is the skid pad value for GEN2 devices. + * Adding extra bytes = `DC_DEST_BUFF_MIN_EXTRA_BYTES(inputSize)` + * when calculated value from `CPA_DC_CEIL_DIV(9 * inputSize, 8) + + * DC_DEST_BUFF_EXTRA_DEFLATE_GEN2` is less than 64 bytes to + * achieve a safer output buffer size of 64 bytes. */ - *outputSize = - CPA_DC_CEIL_DIV(9 * inputSize, 8) + DC_DEST_BUFF_EXTRA_DEFLATE_GEN2; + outBufferSize = CPA_DC_CEIL_DIV(9 * inBufferSize, 8) + + DC_DEST_BUFF_EXTRA_DEFLATE_GEN2 + + DC_DEST_BUFF_MIN_EXTRA_BYTES(inputSize); + + if (outBufferSize > DC_BUF_MAX_SIZE) + *outputSize = DC_BUF_MAX_SIZE; + else + *outputSize = (Cpa32U)outBufferSize; + + return CPA_STATUS_SUCCESS; +} + +static inline CpaStatus +dcDeflateBoundGen4(CpaDcHuffType huffType, Cpa32U inputSize, Cpa32U *outputSize) +{ + Cpa64U outputSizeLong; + Cpa64U inputSizeLong = (Cpa64U)inputSize; + + switch (huffType) { + case CPA_DC_HT_STATIC: + /* Formula for GEN4 static deflate: + * ceil((9*sourceLen)/8) + 5 + 1024. */ + outputSizeLong = CPA_DC_CEIL_DIV(9 * inputSizeLong, 8) + + DC_DEST_BUFF_EXTRA_DEFLATE_GEN4_STATIC; + break; + case CPA_DC_HT_FULL_DYNAMIC: + /* Formula for GEN4 dynamic deflate: + * Ceil ((9*sourceLen)/8)â–’| + + * ((((8/7) * sourceLen)/ 16KB) * (150+5)) + 512 + */ + outputSizeLong = DC_DEST_BUFF_EXTRA_DEFLATE_GEN4_DYN; + outputSizeLong += CPA_DC_CEIL_DIV(9 * inputSizeLong, 8); + outputSizeLong += ((8 * inputSizeLong * 155) / 7) / (16 * 1024); + break; + default: + return CPA_STATUS_INVALID_PARAM; + } + /* Avoid output size overflow */ + if (outputSizeLong & 0xffffffff00000000UL) + return CPA_STATUS_INVALID_PARAM; + + *outputSize = (Cpa32U)outputSizeLong; return CPA_STATUS_SUCCESS; } CpaStatus cpaDcDeflateCompressBound(const CpaInstanceHandle dcInstance, CpaDcHuffType huffType, Cpa32U inputSize, Cpa32U *outputSize) { + sal_compression_service_t *pService = NULL; CpaInstanceHandle insHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); } else { insHandle = dcInstance; } LAC_CHECK_INSTANCE_HANDLE(insHandle); LAC_CHECK_NULL_PARAM(outputSize); /* Ensure this is a compression instance */ SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); if (!inputSize) { QAT_UTILS_LOG( "The input size needs to be greater than zero.\n"); return CPA_STATUS_INVALID_PARAM; } if ((CPA_DC_HT_STATIC != huffType) && (CPA_DC_HT_FULL_DYNAMIC != huffType)) { QAT_UTILS_LOG("Invalid huffType value.\n"); return CPA_STATUS_INVALID_PARAM; } - return dcDeflateBoundGen2(huffType, inputSize, outputSize); + pService = (sal_compression_service_t *)insHandle; + if (isDcGen4x(pService)) { + return dcDeflateBoundGen4(huffType, inputSize, outputSize); + } else { + return dcDeflateBoundGen2(huffType, inputSize, outputSize); + } } diff --git a/sys/dev/qat/qat_api/common/compression/dc_datapath.c b/sys/dev/qat/qat_api/common/compression/dc_datapath.c index 0e2aa9f389e2..2e1f9ff96bd8 100644 --- a/sys/dev/qat/qat_api/common/compression/dc_datapath.c +++ b/sys/dev/qat/qat_api/common/compression/dc_datapath.c @@ -1,1790 +1,1982 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** ***************************************************************************** * @file dc_datapath.c * * @defgroup Dc_DataCompression DC Data Compression * * @ingroup Dc_DataCompression * * @description * Implementation of the Data Compression datapath operations. * *****************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" #include "cpa_dc.h" #include "cpa_dc_dp.h" /* ******************************************************************************* * Include private header files ******************************************************************************* */ #include "dc_session.h" #include "dc_datapath.h" #include "sal_statistics.h" #include "lac_common.h" #include "lac_mem.h" #include "lac_mem_pools.h" #include "sal_types_compression.h" #include "dc_stats.h" #include "lac_buffer_desc.h" #include "lac_sal.h" #include "lac_log.h" #include "lac_sync.h" #include "sal_service_state.h" #include "sal_qat_cmn_msg.h" +#include "sal_hw_gen.h" #include "dc_error_counter.h" #define DC_COMP_MAX_BUFF_SIZE (1024 * 64) static QatUtilsAtomic dcErrorCount[MAX_DC_ERROR_TYPE]; void dcErrorLog(CpaDcReqStatus dcError) { Cpa32U absError = 0; absError = abs(dcError); if ((dcError < CPA_DC_OK) && (absError < MAX_DC_ERROR_TYPE)) { qatUtilsAtomicInc(&(dcErrorCount[absError])); } } Cpa64U getDcErrorCounter(CpaDcReqStatus dcError) { Cpa32U absError = 0; absError = abs(dcError); if (!(dcError >= CPA_DC_OK || dcError < CPA_DC_EMPTY_DYM_BLK)) { return (Cpa64U)qatUtilsAtomicGet(&dcErrorCount[absError]); } return 0; } +static inline void +dcUpdateXltOverflowChecksumsGen4(const dc_compression_cookie_t *pCookie, + const icp_qat_fw_resp_comp_pars_t *pRespPars, + CpaDcRqResults *pDcResults) +{ + dc_session_desc_t *pSessionDesc = + DC_SESSION_DESC_FROM_CTX_GET(pCookie->pSessionHandle); + + /* Recompute CRC checksum when either the checksum type + * is CPA_DC_CRC32 or when the integrity CRCs are enabled. + */ + if (CPA_DC_CRC32 == pSessionDesc->checksumType) { + pDcResults->checksum = pRespPars->crc.legacy.curr_crc32; + + /* No need to recalculate the swCrc64I here as this will get + * handled later in dcHandleIntegrityChecksumsGen4. + */ + } else if (CPA_DC_ADLER32 == pSessionDesc->checksumType) { + pDcResults->checksum = pRespPars->crc.legacy.curr_adler_32; + } +} + void dcCompression_ProcessCallback(void *pRespMsg) { CpaStatus status = CPA_STATUS_SUCCESS; icp_qat_fw_comp_resp_t *pCompRespMsg = NULL; void *callbackTag = NULL; Cpa64U *pReqData = NULL; CpaDcDpOpData *pResponse = NULL; CpaDcRqResults *pResults = NULL; CpaDcCallbackFn pCbFunc = NULL; dc_session_desc_t *pSessionDesc = NULL; sal_compression_service_t *pService = NULL; dc_compression_cookie_t *pCookie = NULL; CpaDcOpData *pOpData = NULL; CpaBoolean cmpPass = CPA_TRUE, xlatPass = CPA_TRUE; + CpaBoolean isDcDp = CPA_FALSE; + CpaBoolean integrityCrcCheck = CPA_FALSE; CpaBoolean verifyHwIntegrityCrcs = CPA_FALSE; Cpa8U cmpErr = ERR_CODE_NO_ERROR, xlatErr = ERR_CODE_NO_ERROR; dc_request_dir_t compDecomp = DC_COMPRESSION_REQUEST; Cpa8U opStatus = ICP_QAT_FW_COMN_STATUS_FLAG_OK; Cpa8U hdrFlags = 0; /* Cast response message to compression response message type */ pCompRespMsg = (icp_qat_fw_comp_resp_t *)pRespMsg; /* Extract request data pointer from the opaque data */ LAC_MEM_SHARED_READ_TO_PTR(pCompRespMsg->opaque_data, pReqData); /* Extract fields from the request data structure */ pCookie = (dc_compression_cookie_t *)pReqData; if (!pCookie) return; + pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pCookie->pSessionHandle); + pService = (sal_compression_service_t *)(pCookie->dcInstance); - if (CPA_TRUE == pSessionDesc->isDcDp) { + isDcDp = pSessionDesc->isDcDp; + if (CPA_TRUE == isDcDp) { pResponse = (CpaDcDpOpData *)pReqData; pResults = &(pResponse->results); if (CPA_DC_DIR_DECOMPRESS == pSessionDesc->sessDirection) { compDecomp = DC_DECOMPRESSION_REQUEST; } + pCookie = NULL; } else { - pSessionDesc = pCookie->pSessionDesc; pResults = pCookie->pResults; callbackTag = pCookie->callbackTag; pCbFunc = pCookie->pSessionDesc->pCompressionCb; compDecomp = pCookie->compDecomp; pOpData = pCookie->pDcOpData; } - pService = (sal_compression_service_t *)(pCookie->dcInstance); - opStatus = pCompRespMsg->comn_resp.comn_status; if (NULL != pOpData) { verifyHwIntegrityCrcs = pOpData->verifyHwIntegrityCrcs; } hdrFlags = pCompRespMsg->comn_resp.hdr_flags; /* Get the cmp error code */ cmpErr = pCompRespMsg->comn_resp.comn_error.s1.cmp_err_code; if (ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(opStatus)) { /* Compression not supported by firmware, set produced/consumed to zero and call the cb function with status CPA_STATUS_UNSUPPORTED */ QAT_UTILS_LOG("Compression feature not supported\n"); status = CPA_STATUS_UNSUPPORTED; pResults->status = (Cpa8S)cmpErr; pResults->consumed = 0; pResults->produced = 0; - if (CPA_TRUE == pSessionDesc->isDcDp) { + if (CPA_TRUE == isDcDp) { if (pResponse) pResponse->responseStatus = CPA_STATUS_UNSUPPORTED; (pService->pDcDpCb)(pResponse); } else { /* Free the memory pool */ - Lac_MemPoolEntryFree(pCookie); - pCookie = NULL; + if (NULL != pCookie) { + Lac_MemPoolEntryFree(pCookie); + pCookie = NULL; + } if (NULL != pCbFunc) { pCbFunc(callbackTag, status); } } if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC(numCompCompletedErrors, pService); } else { COMPRESSION_STAT_INC(numDecompCompletedErrors, pService); } return; } else { /* Check compression response status */ cmpPass = (CpaBoolean)(ICP_QAT_FW_COMN_STATUS_FLAG_OK == ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(opStatus)); } - if (CPA_DC_INCOMPLETE_FILE_ERR == (Cpa8S)cmpErr) { - cmpPass = CPA_TRUE; - cmpErr = ERR_CODE_NO_ERROR; + if (isDcGen2x(pService)) { + /* QAT1.7 and QAT 1.8 hardware */ + if (CPA_DC_INCOMPLETE_FILE_ERR == (Cpa8S)cmpErr) { + cmpPass = CPA_TRUE; + cmpErr = ERR_CODE_NO_ERROR; + } + } else { + /* QAT2.0 hardware cancels the incomplete file errors + * only for DEFLATE algorithm. + * Decompression direction is not tested in the callback as + * the request does not allow it. + */ + if ((pSessionDesc->compType == CPA_DC_DEFLATE) && + (CPA_DC_INCOMPLETE_FILE_ERR == (Cpa8S)cmpErr)) { + cmpPass = CPA_TRUE; + cmpErr = ERR_CODE_NO_ERROR; + } } /* log the slice hang and endpoint push/pull error inside the response */ if (ERR_CODE_SSM_ERROR == (Cpa8S)cmpErr) { QAT_UTILS_LOG( "Slice hang detected on the compression slice.\n"); } else if (ERR_CODE_ENDPOINT_ERROR == (Cpa8S)cmpErr) { QAT_UTILS_LOG( "PCIe End Point Push/Pull or TI/RI Parity error detected.\n"); } /* We return the compression error code for now. We would need to update * the API if we decide to return both error codes */ pResults->status = (Cpa8S)cmpErr; /* Check the translator status */ if ((DC_COMPRESSION_REQUEST == compDecomp) && (CPA_DC_HT_FULL_DYNAMIC == pSessionDesc->huffType)) { /* Check translator response status */ xlatPass = (CpaBoolean)(ICP_QAT_FW_COMN_STATUS_FLAG_OK == ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(opStatus)); /* Get the translator error code */ xlatErr = pCompRespMsg->comn_resp.comn_error.s1.xlat_err_code; /* Return a fatal error or a potential error in the translator - * slice - * if the compression slice did not return any error */ + * slice if the compression slice did not return any error */ if ((CPA_DC_OK == pResults->status) || (CPA_DC_FATALERR == (Cpa8S)xlatErr)) { pResults->status = (Cpa8S)xlatErr; } } /* Update dc error counter */ dcErrorLog(pResults->status); - if (CPA_FALSE == pSessionDesc->isDcDp) { + if (CPA_FALSE == isDcDp) { /* In case of any error for an end of packet request, we need to * update * the request type for the following request */ if (CPA_DC_FLUSH_FINAL == pCookie->flushFlag && cmpPass && xlatPass) { pSessionDesc->requestType = DC_REQUEST_FIRST; } else { pSessionDesc->requestType = DC_REQUEST_SUBSEQUENT; } if ((CPA_DC_STATEFUL == pSessionDesc->sessState) || ((CPA_DC_STATELESS == pSessionDesc->sessState) && (DC_COMPRESSION_REQUEST == compDecomp))) { /* Overflow is a valid use case for Traditional API - * only. - * Stateful Overflow is supported in both compression - * and - * decompression direction. - * Stateless Overflow is supported only in compression - * direction. + * only. Stateful Overflow is supported in both + * compression and decompression direction. Stateless + * Overflow is supported only in compression direction. */ if (CPA_DC_OVERFLOW == (Cpa8S)cmpErr) cmpPass = CPA_TRUE; if (CPA_DC_OVERFLOW == (Cpa8S)xlatErr) { + if (isDcGen4x(pService) && + (CPA_TRUE == + pService->comp_device_data + .translatorOverflow)) { + pResults->consumed = + pCompRespMsg->comp_resp_pars + .input_byte_counter; + + dcUpdateXltOverflowChecksumsGen4( + pCookie, + &pCompRespMsg->comp_resp_pars, + pResults); + } xlatPass = CPA_TRUE; } } } else { if (CPA_DC_OVERFLOW == (Cpa8S)cmpErr) { cmpPass = CPA_FALSE; } if (CPA_DC_OVERFLOW == (Cpa8S)xlatErr) { + /* XLT overflow is not valid for Data Plane requests */ xlatPass = CPA_FALSE; } } if ((CPA_TRUE == cmpPass) && (CPA_TRUE == xlatPass)) { /* Extract the response from the firmware */ pResults->consumed = pCompRespMsg->comp_resp_pars.input_byte_counter; pResults->produced = pCompRespMsg->comp_resp_pars.output_byte_counter; pSessionDesc->cumulativeConsumedBytes += pResults->consumed; - if (CPA_DC_OVERFLOW != (Cpa8S)xlatErr) { + /* Handle Checksum for end to end data integrity. */ + if (CPA_TRUE == + pService->generic_service_info.integrityCrcCheck && + CPA_TRUE == integrityCrcCheck) { + pSessionDesc->previousChecksum = + pSessionDesc->seedSwCrc.swCrc32I; + } else if (CPA_DC_OVERFLOW != (Cpa8S)xlatErr) { if (CPA_DC_CRC32 == pSessionDesc->checksumType) { pResults->checksum = pCompRespMsg->comp_resp_pars.crc.legacy .curr_crc32; } else if (CPA_DC_ADLER32 == pSessionDesc->checksumType) { pResults->checksum = pCompRespMsg->comp_resp_pars.crc.legacy .curr_adler_32; } pSessionDesc->previousChecksum = pResults->checksum; } if (DC_DECOMPRESSION_REQUEST == compDecomp) { pResults->endOfLastBlock = (ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET == ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET( opStatus)); } /* Save the checksum for the next request */ if ((CPA_DC_OVERFLOW != (Cpa8S)xlatErr) && (CPA_TRUE == verifyHwIntegrityCrcs)) { pSessionDesc->previousChecksum = - pSessionDesc->seedSwCrc.swCrcI; + pSessionDesc->seedSwCrc.swCrc32I; } /* Check if a CNV recovery happened and * increase stats counter */ if ((DC_COMPRESSION_REQUEST == compDecomp) && ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdrFlags) && ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdrFlags)) { COMPRESSION_STAT_INC(numCompCnvErrorsRecovered, pService); } - if (CPA_TRUE == pSessionDesc->isDcDp) { + if (CPA_TRUE == isDcDp) { if (pResponse) pResponse->responseStatus = CPA_STATUS_SUCCESS; } else { if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC(numCompCompleted, pService); } else { COMPRESSION_STAT_INC(numDecompCompleted, pService); } } } else { +#ifdef ICP_DC_RETURN_COUNTERS_ON_ERROR + /* Extract the response from the firmware */ + pResults->consumed = + pCompRespMsg->comp_resp_pars.input_byte_counter; + pResults->produced = + pCompRespMsg->comp_resp_pars.output_byte_counter; + + if (CPA_DC_STATEFUL == pSessionDesc->sessState) { + pSessionDesc->cumulativeConsumedBytes += + pResults->consumed; + } else { + /* In the stateless case all requests have both SOP and + * EOP set */ + pSessionDesc->cumulativeConsumedBytes = + pResults->consumed; + } +#else pResults->consumed = 0; pResults->produced = 0; +#endif if (CPA_DC_OVERFLOW == pResults->status && CPA_DC_STATELESS == pSessionDesc->sessState) { /* This error message will be returned by Data Plane API * in both * compression and decompression direction. With * Traditional API * this error message will be returned only in stateless * decompression direction */ QAT_UTILS_LOG( "Unrecoverable error: stateless overflow. You may need to increase the size of your destination buffer.\n"); } - if (CPA_TRUE == pSessionDesc->isDcDp) { + if (CPA_TRUE == isDcDp) { if (pResponse) pResponse->responseStatus = CPA_STATUS_FAIL; } else { if (CPA_DC_OK != pResults->status && CPA_DC_INCOMPLETE_FILE_ERR != pResults->status) { status = CPA_STATUS_FAIL; } if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC(numCompCompletedErrors, pService); } else { COMPRESSION_STAT_INC(numDecompCompletedErrors, pService); } } } - if (CPA_TRUE == pSessionDesc->isDcDp) { + if (CPA_TRUE == isDcDp) { /* Decrement number of stateless pending callbacks for session */ pSessionDesc->pendingDpStatelessCbCount--; (pService->pDcDpCb)(pResponse); } else { /* Decrement number of pending callbacks for session */ if (CPA_DC_STATELESS == pSessionDesc->sessState) { qatUtilsAtomicDec( &(pCookie->pSessionDesc->pendingStatelessCbCount)); } else if (0 != qatUtilsAtomicGet(&pCookie->pSessionDesc ->pendingStatefulCbCount)) { qatUtilsAtomicDec( &(pCookie->pSessionDesc->pendingStatefulCbCount)); } /* Free the memory pool */ if (NULL != pCookie) { Lac_MemPoolEntryFree(pCookie); pCookie = NULL; } if (NULL != pCbFunc) { pCbFunc(callbackTag, status); } } } /** ***************************************************************************** * @ingroup Dc_DataCompression * Check that all the parameters in the pOpData structure are valid * * @description * Check that all the parameters in the pOpData structure are valid * * @param[in] pService Pointer to the compression service * @param[in] pOpData Pointer to request information structure * holding parameters for cpaDcCompress2 and * CpaDcDecompressData2 * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in * *****************************************************************************/ -static CpaStatus +CpaStatus dcCheckOpData(sal_compression_service_t *pService, CpaDcOpData *pOpData) { CpaDcSkipMode skipMode = 0; if ((pOpData->flushFlag < CPA_DC_FLUSH_NONE) || (pOpData->flushFlag > CPA_DC_FLUSH_FULL)) { LAC_INVALID_PARAM_LOG("Invalid flushFlag value"); return CPA_STATUS_INVALID_PARAM; } skipMode = pOpData->inputSkipData.skipMode; if ((skipMode < CPA_DC_SKIP_DISABLED) || (skipMode > CPA_DC_SKIP_STRIDE)) { LAC_INVALID_PARAM_LOG("Invalid input skip mode value"); return CPA_STATUS_INVALID_PARAM; } skipMode = pOpData->outputSkipData.skipMode; if ((skipMode < CPA_DC_SKIP_DISABLED) || (skipMode > CPA_DC_SKIP_STRIDE)) { LAC_INVALID_PARAM_LOG("Invalid output skip mode value"); return CPA_STATUS_INVALID_PARAM; } if (pOpData->integrityCrcCheck == CPA_FALSE && pOpData->verifyHwIntegrityCrcs == CPA_TRUE) { LAC_INVALID_PARAM_LOG( "integrityCrcCheck must be set to true" "in order to enable verifyHwIntegrityCrcs"); return CPA_STATUS_INVALID_PARAM; } if (pOpData->integrityCrcCheck != CPA_TRUE && pOpData->integrityCrcCheck != CPA_FALSE) { LAC_INVALID_PARAM_LOG("Invalid integrityCrcCheck value"); return CPA_STATUS_INVALID_PARAM; } if (pOpData->verifyHwIntegrityCrcs != CPA_TRUE && pOpData->verifyHwIntegrityCrcs != CPA_FALSE) { LAC_INVALID_PARAM_LOG("Invalid verifyHwIntegrityCrcs value"); return CPA_STATUS_INVALID_PARAM; } if (pOpData->compressAndVerify != CPA_TRUE && pOpData->compressAndVerify != CPA_FALSE) { LAC_INVALID_PARAM_LOG("Invalid cnv decompress check value"); return CPA_STATUS_INVALID_PARAM; } if (CPA_TRUE == pOpData->integrityCrcCheck && CPA_FALSE == pService->generic_service_info.integrityCrcCheck) { LAC_INVALID_PARAM_LOG("Integrity CRC check is not " "supported on this device"); return CPA_STATUS_INVALID_PARAM; } + + if (CPA_TRUE == pOpData->integrityCrcCheck && + NULL == pOpData->pCrcData) { + LAC_INVALID_PARAM_LOG("Integrity CRC data structure " + "not intialized in CpaDcOpData"); + return CPA_STATUS_INVALID_PARAM; + } + return CPA_STATUS_SUCCESS; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Check the compression source buffer for Batch and Pack API. * * @description * Check that all the parameters used for Pack compression * request are valid. This function essentially checks the source buffer * parameters and results structure parameters. * * @param[in] pSessionHandle Session handle * @param[in] pSrcBuff Pointer to data buffer for compression * @param[in] pDestBuff Pointer to buffer space allocated for * output data * @param[in] pResults Pointer to results structure * @param[in] flushFlag Indicates the type of flush to be * performed * @param[in] srcBuffSize Size of the source buffer * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in * *****************************************************************************/ static CpaStatus dcCheckSourceData(CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, CpaDcFlush flushFlag, Cpa64U srcBuffSize, CpaDcSkipData *skipData) { dc_session_desc_t *pSessionDesc = NULL; LAC_CHECK_NULL_PARAM(pSessionHandle); LAC_CHECK_NULL_PARAM(pSrcBuff); LAC_CHECK_NULL_PARAM(pDestBuff); LAC_CHECK_NULL_PARAM(pResults); pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); if (NULL == pSessionDesc) { LAC_INVALID_PARAM_LOG("Session handle not as expected"); return CPA_STATUS_INVALID_PARAM; } if ((flushFlag < CPA_DC_FLUSH_NONE) || (flushFlag > CPA_DC_FLUSH_FULL)) { LAC_INVALID_PARAM_LOG("Invalid flushFlag value"); return CPA_STATUS_INVALID_PARAM; } if (pSrcBuff == pDestBuff) { LAC_INVALID_PARAM_LOG("In place operation not supported"); return CPA_STATUS_INVALID_PARAM; } /* Compressing zero bytes is not supported for stateless sessions * for non Batch and Pack requests */ if ((CPA_DC_STATELESS == pSessionDesc->sessState) && (0 == srcBuffSize) && (NULL == skipData)) { LAC_INVALID_PARAM_LOG( "The source buffer size needs to be greater than " "zero bytes for stateless sessions"); return CPA_STATUS_INVALID_PARAM; } if (srcBuffSize > DC_BUFFER_MAX_SIZE) { LAC_INVALID_PARAM_LOG( "The source buffer size needs to be less than or " "equal to 2^32-1 bytes"); return CPA_STATUS_INVALID_PARAM; } return CPA_STATUS_SUCCESS; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Check the compression or decompression function parameters. * * @description * Check that all the parameters used for a Batch and Pack compression * request are valid. This function essentially checks the destination * buffer parameters and intermediate buffer parameters. * * @param[in] pService Pointer to the compression service * @param[in] pSessionHandle Session handle * @param[in] pDestBuff Pointer to buffer space allocated for * output data * @param[in] compDecomp Direction of the operation * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in * *****************************************************************************/ static CpaStatus dcCheckDestinationData(sal_compression_service_t *pService, CpaDcSessionHandle pSessionHandle, CpaBufferList *pDestBuff, dc_request_dir_t compDecomp) { dc_session_desc_t *pSessionDesc = NULL; Cpa64U destBuffSize = 0; LAC_CHECK_NULL_PARAM(pSessionHandle); LAC_CHECK_NULL_PARAM(pDestBuff); pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); if (NULL == pSessionDesc) { LAC_INVALID_PARAM_LOG("Session handle not as expected"); return CPA_STATUS_INVALID_PARAM; } if (LacBuffDesc_BufferListVerify(pDestBuff, &destBuffSize, LAC_NO_ALIGNMENT_SHIFT) != CPA_STATUS_SUCCESS) { LAC_INVALID_PARAM_LOG( "Invalid destination buffer list parameter"); return CPA_STATUS_INVALID_PARAM; } if (destBuffSize > DC_BUFFER_MAX_SIZE) { LAC_INVALID_PARAM_LOG( "The destination buffer size needs to be less " "than or equal to 2^32-1 bytes"); return CPA_STATUS_INVALID_PARAM; } if (CPA_TRUE == pSessionDesc->isDcDp) { LAC_INVALID_PARAM_LOG( "The session type should not be data plane"); return CPA_STATUS_INVALID_PARAM; } if (DC_COMPRESSION_REQUEST == compDecomp) { if (CPA_DC_HT_FULL_DYNAMIC == pSessionDesc->huffType) { /* Check if intermediate buffers are supported */ - if ((0 == pService->pInterBuffPtrsArrayPhyAddr) || - (NULL == pService->pInterBuffPtrsArray)) { + if ((isDcGen2x(pService)) && + ((0 == pService->pInterBuffPtrsArrayPhyAddr) || + (NULL == pService->pInterBuffPtrsArray))) { LAC_LOG_ERROR( "No intermediate buffer defined for this instance " "- see cpaDcStartInstance"); return CPA_STATUS_INVALID_PARAM; } /* Ensure that the destination buffer size is greater or * equal to 128B */ if (destBuffSize < DC_DEST_BUFFER_DYN_MIN_SIZE) { LAC_INVALID_PARAM_LOG( "Destination buffer size should be " "greater or equal to 128B"); return CPA_STATUS_INVALID_PARAM; } } else { /* Ensure that the destination buffer size is greater or * equal to devices min output buff size */ if (destBuffSize < pService->comp_device_data.minOutputBuffSize) { LAC_INVALID_PARAM_LOG1( "Destination buffer size should be " "greater or equal to %d bytes", pService->comp_device_data .minOutputBuffSize); return CPA_STATUS_INVALID_PARAM; } } } else { /* Ensure that the destination buffer size is greater than * 0 bytes */ if (destBuffSize < DC_DEST_BUFFER_DEC_MIN_SIZE) { LAC_INVALID_PARAM_LOG( "Destination buffer size should be " "greater than 0 bytes"); return CPA_STATUS_INVALID_PARAM; } } return CPA_STATUS_SUCCESS; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Populate the compression request parameters * * @description * This function will populate the compression request parameters * * @param[out] pCompReqParams Pointer to the compression request parameters * @param[in] pCookie Pointer to the compression cookie * *****************************************************************************/ static void dcCompRequestParamsPopulate(icp_qat_fw_comp_req_params_t *pCompReqParams, dc_compression_cookie_t *pCookie) { pCompReqParams->comp_len = pCookie->srcTotalDataLenInBytes; pCompReqParams->out_buffer_sz = pCookie->dstTotalDataLenInBytes; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Create the requests for compression or decompression * * @description * Create the requests for compression or decompression. This function * will update the cookie will all required information. * * @param{out] pCookie Pointer to the compression cookie * @param[in] pService Pointer to the compression service * @param[in] pSessionDesc Pointer to the session descriptor * @param[in pSessionHandle Session handle * @param[in] pSrcBuff Pointer to data buffer for compression * @param[in] pDestBuff Pointer to buffer space for data after * compression * @param[in] pResults Pointer to results structure * @param[in] flushFlag Indicates the type of flush to be * performed * @param[in] pOpData Pointer to request information structure * holding parameters for cpaDcCompress2 * and CpaDcDecompressData2 * @param[in] callbackTag Pointer to the callback tag * @param[in] compDecomp Direction of the operation * @param[in] compressAndVerify Compress and Verify * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in * *****************************************************************************/ static CpaStatus dcCreateRequest(dc_compression_cookie_t *pCookie, sal_compression_service_t *pService, dc_session_desc_t *pSessionDesc, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, CpaDcFlush flushFlag, CpaDcOpData *pOpData, void *callbackTag, dc_request_dir_t compDecomp, dc_cnv_mode_t cnvMode) { icp_qat_fw_comp_req_t *pMsg = NULL; icp_qat_fw_comp_req_params_t *pCompReqParams = NULL; Cpa64U srcAddrPhys = 0, dstAddrPhys = 0; Cpa64U srcTotalDataLenInBytes = 0, dstTotalDataLenInBytes = 0; Cpa32U rpCmdFlags = 0; Cpa8U sop = ICP_QAT_FW_COMP_SOP; Cpa8U eop = ICP_QAT_FW_COMP_EOP; Cpa8U bFinal = ICP_QAT_FW_COMP_NOT_BFINAL; Cpa8U crcMode = ICP_QAT_FW_COMP_CRC_MODE_LEGACY; Cpa8U cnvDecompReq = ICP_QAT_FW_COMP_NO_CNV; Cpa8U cnvRecovery = ICP_QAT_FW_COMP_NO_CNV_RECOVERY; + CpaBoolean cnvErrorInjection = ICP_QAT_FW_COMP_NO_CNV_DFX; CpaBoolean integrityCrcCheck = CPA_FALSE; CpaStatus status = CPA_STATUS_SUCCESS; CpaDcFlush flush = CPA_DC_FLUSH_NONE; Cpa32U initial_adler = 1; Cpa32U initial_crc32 = 0; icp_qat_fw_comp_req_t *pReqCache = NULL; /* Write the buffer descriptors */ status = LacBuffDesc_BufferListDescWriteAndGetSize( pSrcBuff, &srcAddrPhys, CPA_FALSE, &srcTotalDataLenInBytes, &(pService->generic_service_info)); if (status != CPA_STATUS_SUCCESS) { return status; } status = LacBuffDesc_BufferListDescWriteAndGetSize( pDestBuff, &dstAddrPhys, CPA_FALSE, &dstTotalDataLenInBytes, &(pService->generic_service_info)); if (status != CPA_STATUS_SUCCESS) { return status; } /* Populate the compression cookie */ pCookie->dcInstance = pService; pCookie->pSessionHandle = pSessionHandle; pCookie->callbackTag = callbackTag; pCookie->pSessionDesc = pSessionDesc; pCookie->pDcOpData = pOpData; pCookie->pResults = pResults; pCookie->compDecomp = compDecomp; pCookie->pUserSrcBuff = NULL; pCookie->pUserDestBuff = NULL; /* Extract flush flag from either the opData or from the * parameter. Opdata have been introduce with APIs * cpaDcCompressData2 and cpaDcDecompressData2 */ if (NULL != pOpData) { flush = pOpData->flushFlag; integrityCrcCheck = pOpData->integrityCrcCheck; } else { flush = flushFlag; } pCookie->flushFlag = flush; /* The firmware expects the length in bytes for source and destination * to be Cpa32U parameters. However the total data length could be * bigger as allocated by the user. We ensure that this is not the case * in dcCheckSourceData and cast the values to Cpa32U here */ pCookie->srcTotalDataLenInBytes = (Cpa32U)srcTotalDataLenInBytes; - if ((DC_COMPRESSION_REQUEST == compDecomp) && + if ((isDcGen2x(pService)) && (DC_COMPRESSION_REQUEST == compDecomp) && (CPA_DC_HT_FULL_DYNAMIC == pSessionDesc->huffType)) { if (pService->minInterBuffSizeInBytes < (Cpa32U)dstTotalDataLenInBytes) { pCookie->dstTotalDataLenInBytes = (Cpa32U)(pService->minInterBuffSizeInBytes); } else { pCookie->dstTotalDataLenInBytes = (Cpa32U)dstTotalDataLenInBytes; } } else { pCookie->dstTotalDataLenInBytes = (Cpa32U)dstTotalDataLenInBytes; } /* Device can not decompress an odd byte decompression request * if bFinal is not set */ if (CPA_TRUE != pService->comp_device_data.oddByteDecompNobFinal) { if ((CPA_DC_STATEFUL == pSessionDesc->sessState) && (CPA_DC_FLUSH_FINAL != flushFlag) && (DC_DECOMPRESSION_REQUEST == compDecomp) && (pCookie->srcTotalDataLenInBytes & 0x1)) { pCookie->srcTotalDataLenInBytes--; } } /* Device can not decompress odd byte interim requests */ if (CPA_TRUE != pService->comp_device_data.oddByteDecompInterim) { if ((CPA_DC_STATEFUL == pSessionDesc->sessState) && (CPA_DC_FLUSH_FINAL != flushFlag) && (CPA_DC_FLUSH_FULL != flushFlag) && (DC_DECOMPRESSION_REQUEST == compDecomp) && (pCookie->srcTotalDataLenInBytes & 0x1)) { pCookie->srcTotalDataLenInBytes--; } } pMsg = (icp_qat_fw_comp_req_t *)&pCookie->request; if (DC_COMPRESSION_REQUEST == compDecomp) { pReqCache = &(pSessionDesc->reqCacheComp); } else { pReqCache = &(pSessionDesc->reqCacheDecomp); } /* Fills the msg from the template cached in the session descriptor */ memcpy((void *)pMsg, (void *)(pReqCache), LAC_QAT_DC_REQ_SZ_LW * LAC_LONG_WORD_IN_BYTES); if (DC_REQUEST_FIRST == pSessionDesc->requestType) { initial_adler = 1; initial_crc32 = 0; if (CPA_DC_ADLER32 == pSessionDesc->checksumType) { - pSessionDesc->previousChecksum = 1; + pSessionDesc->previousChecksum = initial_adler; } else { - pSessionDesc->previousChecksum = 0; + pSessionDesc->previousChecksum = initial_crc32; } } else if (CPA_DC_STATELESS == pSessionDesc->sessState) { pSessionDesc->previousChecksum = pResults->checksum; if (CPA_DC_ADLER32 == pSessionDesc->checksumType) { initial_adler = pSessionDesc->previousChecksum; } else { initial_crc32 = pSessionDesc->previousChecksum; } } /* Backup source and destination buffer addresses, * CRC calculations both for CNV and translator overflow * will be performed on them in the callback function. */ pCookie->pUserSrcBuff = pSrcBuff; pCookie->pUserDestBuff = pDestBuff; /* * Due to implementation of CNV support and need for backwards * compatibility certain fields in the request and response structs had * been changed, moved or placed in unions cnvMode flag signifies fields * to be selected from req/res * * Doing extended crc checks makes sense only when we want to do the * actual CNV */ if (CPA_TRUE == pService->generic_service_info.integrityCrcCheck && CPA_TRUE == integrityCrcCheck) { pMsg->comp_pars.crc.crc_data_addr = pSessionDesc->physDataIntegrityCrcs; crcMode = ICP_QAT_FW_COMP_CRC_MODE_E2E; } else { /* Legacy request structure */ pMsg->comp_pars.crc.legacy.initial_adler = initial_adler; pMsg->comp_pars.crc.legacy.initial_crc32 = initial_crc32; crcMode = ICP_QAT_FW_COMP_CRC_MODE_LEGACY; } /* Populate the cmdFlags */ if (CPA_DC_STATEFUL == pSessionDesc->sessState) { pSessionDesc->previousRequestType = pSessionDesc->requestType; if (DC_REQUEST_FIRST == pSessionDesc->requestType) { /* Update the request type for following requests */ pSessionDesc->requestType = DC_REQUEST_SUBSEQUENT; /* Reinitialise the cumulative amount of consumed bytes */ pSessionDesc->cumulativeConsumedBytes = 0; if (DC_COMPRESSION_REQUEST == compDecomp) { pSessionDesc->isSopForCompressionProcessed = CPA_TRUE; } else if (DC_DECOMPRESSION_REQUEST == compDecomp) { pSessionDesc->isSopForDecompressionProcessed = CPA_TRUE; } } else { if (DC_COMPRESSION_REQUEST == compDecomp) { if (CPA_TRUE == pSessionDesc ->isSopForCompressionProcessed) { sop = ICP_QAT_FW_COMP_NOT_SOP; } else { pSessionDesc ->isSopForCompressionProcessed = CPA_TRUE; } } else if (DC_DECOMPRESSION_REQUEST == compDecomp) { if (CPA_TRUE == pSessionDesc ->isSopForDecompressionProcessed) { sop = ICP_QAT_FW_COMP_NOT_SOP; } else { pSessionDesc ->isSopForDecompressionProcessed = CPA_TRUE; } } } if ((CPA_DC_FLUSH_FINAL == flush) || (CPA_DC_FLUSH_FULL == flush)) { /* Update the request type for following requests */ pSessionDesc->requestType = DC_REQUEST_FIRST; } else { eop = ICP_QAT_FW_COMP_NOT_EOP; } } else { - if (DC_REQUEST_FIRST == pSessionDesc->requestType) { /* Reinitialise the cumulative amount of consumed bytes */ pSessionDesc->cumulativeConsumedBytes = 0; } } /* (LW 14 - 15) */ pCompReqParams = &(pMsg->comp_pars); dcCompRequestParamsPopulate(pCompReqParams, pCookie); if (CPA_DC_FLUSH_FINAL == flush) { bFinal = ICP_QAT_FW_COMP_BFINAL; } switch (cnvMode) { case DC_CNVNR: cnvRecovery = ICP_QAT_FW_COMP_CNV_RECOVERY; /* Fall through is intended here, because for CNVNR * cnvDecompReq also needs to be set */ case DC_CNV: cnvDecompReq = ICP_QAT_FW_COMP_CNV; + if (isDcGen4x(pService)) { + cnvErrorInjection = pSessionDesc->cnvErrorInjection; + } break; case DC_NO_CNV: cnvDecompReq = ICP_QAT_FW_COMP_NO_CNV; cnvRecovery = ICP_QAT_FW_COMP_NO_CNV_RECOVERY; break; } /* LW 18 */ - rpCmdFlags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD( - sop, eop, bFinal, cnvDecompReq, cnvRecovery, crcMode); + rpCmdFlags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, + eop, + bFinal, + cnvDecompReq, + cnvRecovery, + cnvErrorInjection, + crcMode); + pMsg->comp_pars.req_par_flags = rpCmdFlags; /* Populates the QAT common request middle part of the message * (LW 6 to 11) */ SalQatMsg_CmnMidWrite((icp_qat_fw_la_bulk_req_t *)pMsg, pCookie, DC_DEFAULT_QAT_PTR_TYPE, srcAddrPhys, dstAddrPhys, 0, 0); return CPA_STATUS_SUCCESS; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Send a compression request to QAT * * @description * Send the requests for compression or decompression to QAT * * @param{in] pCookie Pointer to the compression cookie * @param[in] pService Pointer to the compression service * @param[in] pSessionDesc Pointer to the session descriptor * @param[in] compDecomp Direction of the operation * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in * *****************************************************************************/ static CpaStatus dcSendRequest(dc_compression_cookie_t *pCookie, sal_compression_service_t *pService, dc_session_desc_t *pSessionDesc, dc_request_dir_t compDecomp) { CpaStatus status = CPA_STATUS_SUCCESS; /* Send to QAT */ status = icp_adf_transPutMsg(pService->trans_handle_compression_tx, (void *)&(pCookie->request), LAC_QAT_DC_REQ_SZ_LW); if ((CPA_DC_STATEFUL == pSessionDesc->sessState) && (CPA_STATUS_RETRY == status)) { /* reset requestType after receiving an retry on * the stateful request */ pSessionDesc->requestType = pSessionDesc->previousRequestType; } return status; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Process the synchronous and asynchronous case for compression or * decompression * * @description * Process the synchronous and asynchronous case for compression or * decompression. This function will then create and send the request to * the firmware. * * @param[in] pService Pointer to the compression service * @param[in] pSessionDesc Pointer to the session descriptor * @param[in] dcInstance Instance handle derived from discovery * functions * @param[in] pSessionHandle Session handle * @param[in] numRequests Number of operations in the batch request * @param[in] pBatchOpData Address of the list of jobs to be processed * @param[in] pSrcBuff Pointer to data buffer for compression * @param[in] pDestBuff Pointer to buffer space for data after * compression * @param[in] pResults Pointer to results structure * @param[in] flushFlag Indicates the type of flush to be * performed * @param[in] pOpData Pointer to request information structure * holding parameters for cpaDcCompress2 and * CpaDcDecompressData2 * @param[in] callbackTag Pointer to the callback tag * @param[in] compDecomp Direction of the operation * @param[in] isAsyncMode Used to know if synchronous or asynchronous * mode * @param[in] cnvMode CNV Mode * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_RETRY Retry operation * @retval CPA_STATUS_FAIL Function failed * @retval CPA_STATUS_RESOURCE Resource error * *****************************************************************************/ static CpaStatus dcCompDecompData(sal_compression_service_t *pService, dc_session_desc_t *pSessionDesc, CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, CpaDcFlush flushFlag, CpaDcOpData *pOpData, void *callbackTag, dc_request_dir_t compDecomp, CpaBoolean isAsyncMode, dc_cnv_mode_t cnvMode) { CpaStatus status = CPA_STATUS_SUCCESS; dc_compression_cookie_t *pCookie = NULL; if ((LacSync_GenWakeupSyncCaller == pSessionDesc->pCompressionCb) && isAsyncMode == CPA_TRUE) { lac_sync_op_data_t *pSyncCallbackData = NULL; status = LacSync_CreateSyncCookie(&pSyncCallbackData); if (CPA_STATUS_SUCCESS == status) { status = dcCompDecompData(pService, pSessionDesc, dcInstance, pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, pOpData, pSyncCallbackData, compDecomp, CPA_FALSE, cnvMode); } else { return status; } if (CPA_STATUS_SUCCESS == status) { CpaStatus syncStatus = CPA_STATUS_SUCCESS; syncStatus = LacSync_WaitForCallback(pSyncCallbackData, DC_SYNC_CALLBACK_TIMEOUT, &status, NULL); /* If callback doesn't come back */ if (CPA_STATUS_SUCCESS != syncStatus) { if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC( numCompCompletedErrors, pService); } else { COMPRESSION_STAT_INC( numDecompCompletedErrors, pService); } LAC_LOG_ERROR("Callback timed out"); status = syncStatus; } } else { /* As the Request was not sent the Callback will never * be called, so need to indicate that we're finished * with cookie so it can be destroyed. */ LacSync_SetSyncCookieComplete(pSyncCallbackData); } LacSync_DestroySyncCookie(&pSyncCallbackData); return status; } /* Allocate the compression cookie * The memory is freed in callback or in sendRequest if an error occurs */ pCookie = (dc_compression_cookie_t *)Lac_MemPoolEntryAlloc( pService->compression_mem_pool); if (NULL == pCookie) { LAC_LOG_ERROR("Cannot get mem pool entry for compression"); status = CPA_STATUS_RESOURCE; } else if ((void *)CPA_STATUS_RETRY == pCookie) { pCookie = NULL; status = CPA_STATUS_RETRY; } if (CPA_STATUS_SUCCESS == status) { status = dcCreateRequest(pCookie, pService, pSessionDesc, pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, pOpData, callbackTag, compDecomp, cnvMode); } if (CPA_STATUS_SUCCESS == status) { /* Increment number of pending callbacks for session */ if (CPA_DC_STATELESS == pSessionDesc->sessState) { qatUtilsAtomicInc( &(pSessionDesc->pendingStatelessCbCount)); } status = dcSendRequest(pCookie, pService, pSessionDesc, compDecomp); } if (CPA_STATUS_SUCCESS == status) { if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC(numCompRequests, pService); } else { COMPRESSION_STAT_INC(numDecompRequests, pService); } } else { if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC(numCompRequestsErrors, pService); } else { COMPRESSION_STAT_INC(numDecompRequestsErrors, pService); } /* Decrement number of pending callbacks for session */ if (CPA_DC_STATELESS == pSessionDesc->sessState) { qatUtilsAtomicDec( &(pSessionDesc->pendingStatelessCbCount)); } else { qatUtilsAtomicDec( &(pSessionDesc->pendingStatefulCbCount)); } /* Free the memory pool */ if (NULL != pCookie) { if (status != CPA_STATUS_UNSUPPORTED) { /* Free the memory pool */ Lac_MemPoolEntryFree(pCookie); pCookie = NULL; } } } return status; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Handle zero length compression or decompression requests * * @description * Handle zero length compression or decompression requests * * @param[in] pService Pointer to the compression service * @param[in] pSessionDesc Pointer to the session descriptor * @param[in] pResults Pointer to results structure * @param[in] flushFlag Indicates the type of flush to be * performed * @param[in] callbackTag User supplied value to help correlate * the callback with its associated request * @param[in] compDecomp Direction of the operation * * @retval CPA_TRUE Zero length SOP or MOP processed * @retval CPA_FALSE Zero length EOP * *****************************************************************************/ static CpaStatus dcZeroLengthRequests(sal_compression_service_t *pService, dc_session_desc_t *pSessionDesc, CpaDcRqResults *pResults, CpaDcFlush flushFlag, void *callbackTag, dc_request_dir_t compDecomp) { CpaBoolean status = CPA_FALSE; CpaDcCallbackFn pCbFunc = pSessionDesc->pCompressionCb; if (DC_REQUEST_FIRST == pSessionDesc->requestType) { /* Reinitialise the cumulative amount of consumed bytes */ pSessionDesc->cumulativeConsumedBytes = 0; /* Zero length SOP */ if (CPA_DC_ADLER32 == pSessionDesc->checksumType) { pResults->checksum = 1; } else { pResults->checksum = 0; } status = CPA_TRUE; } else if ((CPA_DC_FLUSH_NONE == flushFlag) || (CPA_DC_FLUSH_SYNC == flushFlag)) { /* Zero length MOP */ pResults->checksum = pSessionDesc->previousChecksum; status = CPA_TRUE; } if (CPA_TRUE == status) { pResults->status = CPA_DC_OK; pResults->produced = 0; pResults->consumed = 0; /* Increment statistics */ if (DC_COMPRESSION_REQUEST == compDecomp) { COMPRESSION_STAT_INC(numCompRequests, pService); COMPRESSION_STAT_INC(numCompCompleted, pService); } else { COMPRESSION_STAT_INC(numDecompRequests, pService); COMPRESSION_STAT_INC(numDecompCompleted, pService); } - if (CPA_STATUS_SUCCESS != - LAC_SPINUNLOCK(&(pSessionDesc->sessionLock))) { - LAC_LOG_ERROR("Cannot unlock session lock"); - } + LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); if ((NULL != pCbFunc) && (LacSync_GenWakeupSyncCaller != pCbFunc)) { pCbFunc(callbackTag, CPA_STATUS_SUCCESS); } return CPA_TRUE; } return CPA_FALSE; } static CpaStatus dcParamCheck(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, sal_compression_service_t *pService, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, dc_session_desc_t *pSessionDesc, CpaDcFlush flushFlag, Cpa64U srcBuffSize) { if (dcCheckSourceData(pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, srcBuffSize, NULL) != CPA_STATUS_SUCCESS) { return CPA_STATUS_INVALID_PARAM; } if (dcCheckDestinationData( pService, pSessionHandle, pDestBuff, DC_COMPRESSION_REQUEST) != CPA_STATUS_SUCCESS) { return CPA_STATUS_INVALID_PARAM; } if (CPA_DC_DIR_DECOMPRESS == pSessionDesc->sessDirection) { LAC_INVALID_PARAM_LOG("Invalid sessDirection value"); return CPA_STATUS_INVALID_PARAM; } return CPA_STATUS_SUCCESS; } CpaStatus cpaDcCompressData(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, CpaDcFlush flushFlag, void *callbackTag) { sal_compression_service_t *pService = NULL; dc_session_desc_t *pSessionDesc = NULL; CpaInstanceHandle insHandle = NULL; Cpa64U srcBuffSize = 0; if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); } else { insHandle = dcInstance; } pService = (sal_compression_service_t *)insHandle; LAC_CHECK_NULL_PARAM(insHandle); LAC_CHECK_NULL_PARAM(pSessionHandle); /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(insHandle); /* This check is outside the parameter checking as it is needed to * manage zero length requests */ if (LacBuffDesc_BufferListVerifyNull(pSrcBuff, &srcBuffSize, LAC_NO_ALIGNMENT_SHIFT) != CPA_STATUS_SUCCESS) { LAC_INVALID_PARAM_LOG("Invalid source buffer list parameter"); return CPA_STATUS_INVALID_PARAM; } /* Ensure this is a compression instance */ SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); if (CPA_STATUS_SUCCESS != dcParamCheck(insHandle, pSessionHandle, pService, pSrcBuff, pDestBuff, pResults, pSessionDesc, flushFlag, srcBuffSize)) { return CPA_STATUS_INVALID_PARAM; } if (CPA_DC_STATEFUL == pSessionDesc->sessState) { LAC_INVALID_PARAM_LOG( "Invalid session state, stateful sessions " "are not supported"); return CPA_STATUS_UNSUPPORTED; } if (!(pService->generic_service_info.dcExtendedFeatures & DC_CNV_EXTENDED_CAPABILITY)) { LAC_INVALID_PARAM_LOG( "CompressAndVerify feature not supported"); return CPA_STATUS_UNSUPPORTED; } if (!(pService->generic_service_info.dcExtendedFeatures & DC_CNVNR_EXTENDED_CAPABILITY)) { LAC_INVALID_PARAM_LOG( "CompressAndVerifyAndRecovery feature not supported"); return CPA_STATUS_UNSUPPORTED; } return dcCompDecompData(pService, pSessionDesc, - dcInstance, + insHandle, pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, NULL, callbackTag, DC_COMPRESSION_REQUEST, CPA_TRUE, DC_CNVNR); } CpaStatus cpaDcCompressData2(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcOpData *pOpData, CpaDcRqResults *pResults, void *callbackTag) { sal_compression_service_t *pService = NULL; dc_session_desc_t *pSessionDesc = NULL; CpaInstanceHandle insHandle = NULL; Cpa64U srcBuffSize = 0; dc_cnv_mode_t cnvMode = DC_NO_CNV; LAC_CHECK_NULL_PARAM(pOpData); if (((CPA_TRUE != pOpData->compressAndVerify) && (CPA_FALSE != pOpData->compressAndVerify)) || ((CPA_FALSE != pOpData->compressAndVerifyAndRecover) && (CPA_TRUE != pOpData->compressAndVerifyAndRecover))) { return CPA_STATUS_INVALID_PARAM; } if ((CPA_FALSE == pOpData->compressAndVerify) && (CPA_TRUE == pOpData->compressAndVerifyAndRecover)) { return CPA_STATUS_INVALID_PARAM; } if ((CPA_TRUE == pOpData->compressAndVerify) && (CPA_TRUE == pOpData->compressAndVerifyAndRecover) && (CPA_FALSE == pOpData->integrityCrcCheck)) { return cpaDcCompressData(dcInstance, pSessionHandle, pSrcBuff, pDestBuff, pResults, pOpData->flushFlag, callbackTag); } if (CPA_FALSE == pOpData->compressAndVerify) { LAC_INVALID_PARAM_LOG( "Data compression without verification not allowed"); return CPA_STATUS_UNSUPPORTED; } if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); } else { insHandle = dcInstance; } pService = (sal_compression_service_t *)insHandle; LAC_CHECK_NULL_PARAM(insHandle); LAC_CHECK_NULL_PARAM(pSessionHandle); LAC_CHECK_NULL_PARAM(pOpData); /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(insHandle); /* This check is outside the parameter checking as it is needed to * manage zero length requests */ if (LacBuffDesc_BufferListVerifyNull(pSrcBuff, &srcBuffSize, LAC_NO_ALIGNMENT_SHIFT) != CPA_STATUS_SUCCESS) { LAC_INVALID_PARAM_LOG("Invalid source buffer list parameter"); return CPA_STATUS_INVALID_PARAM; } /* Ensure this is a compression instance */ SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); if (CPA_TRUE == pOpData->compressAndVerify && CPA_DC_STATEFUL == pSessionDesc->sessState) { LAC_INVALID_PARAM_LOG( "Invalid session state, stateful sessions " "not supported with CNV"); return CPA_STATUS_UNSUPPORTED; } if (!(pService->generic_service_info.dcExtendedFeatures & DC_CNV_EXTENDED_CAPABILITY) && (CPA_TRUE == pOpData->compressAndVerify)) { LAC_INVALID_PARAM_LOG( "CompressAndVerify feature not supported"); return CPA_STATUS_UNSUPPORTED; } if (CPA_STATUS_SUCCESS != dcParamCheck(insHandle, pSessionHandle, pService, pSrcBuff, pDestBuff, pResults, pSessionDesc, pOpData->flushFlag, srcBuffSize)) { return CPA_STATUS_INVALID_PARAM; } if (CPA_STATUS_SUCCESS != dcCheckOpData(pService, pOpData)) { return CPA_STATUS_INVALID_PARAM; } if (CPA_TRUE != pOpData->compressAndVerify) { if (srcBuffSize > DC_COMP_MAX_BUFF_SIZE) { LAC_LOG_ERROR( "Compression payload greater than 64KB is " "unsupported, when CnV is disabled\n"); return CPA_STATUS_UNSUPPORTED; } } if (CPA_DC_STATEFUL == pSessionDesc->sessState) { /* Lock the session to check if there are in-flight stateful * requests */ - if (CPA_STATUS_SUCCESS != - LAC_SPINLOCK(&(pSessionDesc->sessionLock))) { - LAC_LOG_ERROR("Cannot unlock session lock"); - } + LAC_SPINLOCK(&(pSessionDesc->sessionLock)); /* Check if there is already one in-flight stateful request */ if (0 != qatUtilsAtomicGet( &(pSessionDesc->pendingStatefulCbCount))) { LAC_LOG_ERROR( "Only one in-flight stateful request supported"); - if (CPA_STATUS_SUCCESS != - LAC_SPINUNLOCK(&(pSessionDesc->sessionLock))) { - LAC_LOG_ERROR("Cannot unlock session lock"); - } + LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); return CPA_STATUS_RETRY; } if (0 == srcBuffSize) { if (CPA_TRUE == dcZeroLengthRequests(pService, pSessionDesc, pResults, pOpData->flushFlag, callbackTag, DC_COMPRESSION_REQUEST)) { return CPA_STATUS_SUCCESS; } } qatUtilsAtomicInc(&(pSessionDesc->pendingStatefulCbCount)); - if (CPA_STATUS_SUCCESS != - LAC_SPINUNLOCK(&(pSessionDesc->sessionLock))) { - LAC_LOG_ERROR("Cannot unlock session lock"); - } + LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); } if (CPA_TRUE == pOpData->compressAndVerify) { cnvMode = DC_CNV; } return dcCompDecompData(pService, pSessionDesc, - dcInstance, + insHandle, pSessionHandle, pSrcBuff, pDestBuff, pResults, pOpData->flushFlag, pOpData, callbackTag, DC_COMPRESSION_REQUEST, CPA_TRUE, cnvMode); } static CpaStatus dcDecompressDataCheck(CpaInstanceHandle insHandle, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, CpaDcFlush flushFlag, Cpa64U *srcBufferSize) { sal_compression_service_t *pService = NULL; dc_session_desc_t *pSessionDesc = NULL; Cpa64U srcBuffSize = 0; pService = (sal_compression_service_t *)insHandle; LAC_CHECK_NULL_PARAM(insHandle); /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(insHandle); /* This check is outside the parameter checking as it is needed to * manage zero length requests */ if (LacBuffDesc_BufferListVerifyNull(pSrcBuff, &srcBuffSize, LAC_NO_ALIGNMENT_SHIFT) != CPA_STATUS_SUCCESS) { LAC_INVALID_PARAM_LOG("Invalid source buffer list parameter"); return CPA_STATUS_INVALID_PARAM; } /* Ensure this is a compression instance */ SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); if (dcCheckSourceData(pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, srcBuffSize, NULL) != CPA_STATUS_SUCCESS) { return CPA_STATUS_INVALID_PARAM; } if (dcCheckDestinationData(pService, pSessionHandle, pDestBuff, DC_DECOMPRESSION_REQUEST) != CPA_STATUS_SUCCESS) { return CPA_STATUS_INVALID_PARAM; } pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); if (CPA_DC_DIR_COMPRESS == pSessionDesc->sessDirection) { LAC_INVALID_PARAM_LOG("Invalid sessDirection value"); return CPA_STATUS_INVALID_PARAM; } *srcBufferSize = srcBuffSize; return CPA_STATUS_SUCCESS; } CpaStatus cpaDcDecompressData(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcRqResults *pResults, CpaDcFlush flushFlag, void *callbackTag) { sal_compression_service_t *pService = NULL; dc_session_desc_t *pSessionDesc = NULL; CpaInstanceHandle insHandle = NULL; Cpa64U srcBuffSize = 0; CpaStatus status = CPA_STATUS_SUCCESS; if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); } else { insHandle = dcInstance; } status = dcDecompressDataCheck(insHandle, pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, &srcBuffSize); if (CPA_STATUS_SUCCESS != status) { return status; } pService = (sal_compression_service_t *)insHandle; + /* Check if SAL is initialised otherwise return an error */ + SAL_RUNNING_CHECK(insHandle); + + /* This check is outside the parameter checking as it is needed to + * manage zero length requests */ + if (CPA_STATUS_SUCCESS != + LacBuffDesc_BufferListVerifyNull(pSrcBuff, + &srcBuffSize, + LAC_NO_ALIGNMENT_SHIFT)) { + QAT_UTILS_LOG("Invalid source buffer list parameter"); + return CPA_STATUS_INVALID_PARAM; + } + + /* Ensure this is a compression instance */ + SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); + + if (dcCheckSourceData(pSessionHandle, + pSrcBuff, + pDestBuff, + pResults, + flushFlag, + srcBuffSize, + NULL) != CPA_STATUS_SUCCESS) { + return CPA_STATUS_INVALID_PARAM; + } + if (dcCheckDestinationData(pService, + pSessionHandle, + pDestBuff, + DC_DECOMPRESSION_REQUEST) != + CPA_STATUS_SUCCESS) { + return CPA_STATUS_INVALID_PARAM; + } pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); + if (CPA_DC_DIR_COMPRESS == pSessionDesc->sessDirection) { + QAT_UTILS_LOG("Invalid sessDirection value"); + return CPA_STATUS_INVALID_PARAM; + } + + if (CPA_DC_STATEFUL == pSessionDesc->sessState) { /* Lock the session to check if there are in-flight stateful * requests */ - if (CPA_STATUS_SUCCESS != - LAC_SPINLOCK(&(pSessionDesc->sessionLock))) { - LAC_LOG_ERROR("Cannot lock session lock"); - return CPA_STATUS_RESOURCE; - } + LAC_SPINLOCK(&(pSessionDesc->sessionLock)); /* Check if there is already one in-flight stateful request */ if (0 != qatUtilsAtomicGet( &(pSessionDesc->pendingStatefulCbCount))) { LAC_LOG_ERROR( "Only one in-flight stateful request supported"); - if (CPA_STATUS_SUCCESS != - LAC_SPINUNLOCK(&(pSessionDesc->sessionLock))) { - LAC_LOG_ERROR("Cannot unlock session lock"); - } + LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); return CPA_STATUS_RETRY; } - if ((0 == srcBuffSize) || - ((1 == srcBuffSize) && (CPA_DC_FLUSH_FINAL != flushFlag) && - (CPA_DC_FLUSH_FULL != flushFlag))) { - if (CPA_TRUE == - dcZeroLengthRequests(pService, - pSessionDesc, - pResults, - flushFlag, - callbackTag, - DC_DECOMPRESSION_REQUEST)) { - return CPA_STATUS_SUCCESS; + /* Gen 4 handle 0 len requests in FW */ + if (isDcGen2x(pService)) { + if ((0 == srcBuffSize) || + ((1 == srcBuffSize) && + (CPA_DC_FLUSH_FINAL != flushFlag) && + (CPA_DC_FLUSH_FULL != flushFlag))) { + if (CPA_TRUE == + dcZeroLengthRequests( + pService, + pSessionDesc, + pResults, + flushFlag, + callbackTag, + DC_DECOMPRESSION_REQUEST)) { + return CPA_STATUS_SUCCESS; + } } } qatUtilsAtomicInc(&(pSessionDesc->pendingStatefulCbCount)); - if (CPA_STATUS_SUCCESS != - LAC_SPINUNLOCK(&(pSessionDesc->sessionLock))) { - LAC_LOG_ERROR("Cannot unlock session lock"); - } + LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); } return dcCompDecompData(pService, pSessionDesc, - dcInstance, + insHandle, pSessionHandle, pSrcBuff, pDestBuff, pResults, flushFlag, NULL, callbackTag, DC_DECOMPRESSION_REQUEST, CPA_TRUE, DC_NO_CNV); } CpaStatus cpaDcDecompressData2(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaBufferList *pSrcBuff, CpaBufferList *pDestBuff, CpaDcOpData *pOpData, CpaDcRqResults *pResults, void *callbackTag) { sal_compression_service_t *pService = NULL; dc_session_desc_t *pSessionDesc = NULL; CpaInstanceHandle insHandle = NULL; CpaStatus status = CPA_STATUS_SUCCESS; Cpa64U srcBuffSize = 0; LAC_CHECK_NULL_PARAM(pOpData); if (CPA_FALSE == pOpData->integrityCrcCheck) { return cpaDcDecompressData(dcInstance, pSessionHandle, pSrcBuff, pDestBuff, pResults, pOpData->flushFlag, callbackTag); } if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); } else { insHandle = dcInstance; } status = dcDecompressDataCheck(insHandle, pSessionHandle, pSrcBuff, pDestBuff, pResults, pOpData->flushFlag, &srcBuffSize); if (CPA_STATUS_SUCCESS != status) { return status; } pService = (sal_compression_service_t *)insHandle; pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); - if (CPA_DC_STATEFUL == pSessionDesc->sessState) { - LAC_INVALID_PARAM_LOG("Invalid session: Stateful session is " - "not supported"); + LAC_CHECK_NULL_PARAM(insHandle); + + /* Check if SAL is initialised otherwise return an error */ + SAL_RUNNING_CHECK(insHandle); + + /* This check is outside the parameter checking as it is needed to + * manage zero length requests */ + if (CPA_STATUS_SUCCESS != + LacBuffDesc_BufferListVerifyNull(pSrcBuff, + &srcBuffSize, + LAC_NO_ALIGNMENT_SHIFT)) { + QAT_UTILS_LOG("Invalid source buffer list parameter"); + return CPA_STATUS_INVALID_PARAM; + } + + /* Ensure this is a compression instance */ + SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); + + if (CPA_STATUS_SUCCESS != + dcCheckSourceData(pSessionHandle, + pSrcBuff, + pDestBuff, + pResults, + CPA_DC_FLUSH_NONE, + srcBuffSize, + NULL)) { + return CPA_STATUS_INVALID_PARAM; + } + if (CPA_STATUS_SUCCESS != + dcCheckDestinationData(pService, + pSessionHandle, + pDestBuff, + DC_DECOMPRESSION_REQUEST)) { return CPA_STATUS_INVALID_PARAM; } + if (CPA_STATUS_SUCCESS != dcCheckOpData(pService, pOpData)) { + return CPA_STATUS_INVALID_PARAM; + } + + if (CPA_DC_DIR_COMPRESS == pSessionDesc->sessDirection) { + QAT_UTILS_LOG("Invalid sessDirection value"); + return CPA_STATUS_INVALID_PARAM; + } + + + if (CPA_DC_STATEFUL == pSessionDesc->sessState) { + /* Lock the session to check if there are in-flight stateful + * requests */ + LAC_SPINLOCK(&(pSessionDesc->sessionLock)); + + /* Check if there is already one in-flight stateful request */ + if (0 != + qatUtilsAtomicGet( + &(pSessionDesc->pendingStatefulCbCount))) { + LAC_LOG_ERROR( + "Only one in-flight stateful request supported"); + LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); + return CPA_STATUS_RETRY; + } + + /* Gen 4 handle 0 len requests in FW */ + if (isDcGen2x(pService)) { + if ((0 == srcBuffSize) || + ((1 == srcBuffSize) && + (CPA_DC_FLUSH_FINAL != pOpData->flushFlag) && + (CPA_DC_FLUSH_FULL != pOpData->flushFlag))) { + if (CPA_TRUE == + dcZeroLengthRequests( + pService, + pSessionDesc, + pResults, + pOpData->flushFlag, + callbackTag, + DC_DECOMPRESSION_REQUEST)) { + return CPA_STATUS_SUCCESS; + } + } + } + qatUtilsAtomicInc(&(pSessionDesc->pendingStatefulCbCount)); + LAC_SPINUNLOCK(&(pSessionDesc->sessionLock)); + } + return dcCompDecompData(pService, pSessionDesc, insHandle, pSessionHandle, pSrcBuff, pDestBuff, pResults, pOpData->flushFlag, pOpData, callbackTag, DC_DECOMPRESSION_REQUEST, CPA_TRUE, DC_NO_CNV); } diff --git a/sys/dev/qat/qat_api/common/compression/dc_dp.c b/sys/dev/qat/qat_api/common/compression/dc_dp.c index 4a24bf17dc32..9b00c5b09d7e 100644 --- a/sys/dev/qat/qat_api/common/compression/dc_dp.c +++ b/sys/dev/qat/qat_api/common/compression/dc_dp.c @@ -1,545 +1,560 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** ***************************************************************************** * @file dc_dp.c * * @defgroup cpaDcDp Data Compression Data Plane API * * @ingroup cpaDcDp * * @description * Implementation of the Data Compression DP operations. * *****************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" #include "cpa_dc.h" #include "cpa_dc_dp.h" #include "icp_qat_fw_comp.h" /* ******************************************************************************* * Include private header files ******************************************************************************* */ #include "dc_session.h" #include "dc_datapath.h" #include "lac_common.h" #include "lac_mem.h" #include "lac_mem_pools.h" #include "sal_types_compression.h" #include "lac_sal.h" #include "lac_sync.h" #include "sal_service_state.h" #include "sal_qat_cmn_msg.h" #include "icp_sal_poll.h" +#include "sal_hw_gen.h" /** ***************************************************************************** * @ingroup cpaDcDp * Check that pOpData is valid * * @description * Check that all the parameters defined in the pOpData are valid * * @param[in] pOpData Pointer to a structure containing the * request parameters * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in * *****************************************************************************/ static CpaStatus dcDataPlaneParamCheck(const CpaDcDpOpData *pOpData) { sal_compression_service_t *pService = NULL; dc_session_desc_t *pSessionDesc = NULL; LAC_CHECK_NULL_PARAM(pOpData); LAC_CHECK_NULL_PARAM(pOpData->dcInstance); LAC_CHECK_NULL_PARAM(pOpData->pSessionHandle); /* Ensure this is a compression instance */ SAL_CHECK_INSTANCE_TYPE(pOpData->dcInstance, SAL_SERVICE_TYPE_COMPRESSION); pService = (sal_compression_service_t *)(pOpData->dcInstance); pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pOpData->pSessionHandle); if (NULL == pSessionDesc) { QAT_UTILS_LOG("Session handle not as expected.\n"); return CPA_STATUS_INVALID_PARAM; } if (CPA_FALSE == pSessionDesc->isDcDp) { QAT_UTILS_LOG("The session type should be data plane.\n"); return CPA_STATUS_INVALID_PARAM; } /* Compressing zero byte is not supported */ if ((CPA_DC_DIR_COMPRESS == pSessionDesc->sessDirection) && (0 == pOpData->bufferLenToCompress)) { - QAT_UTILS_LOG( - "The source buffer length to compress needs to be greater than zero byte.\n"); + QAT_UTILS_LOG("The source buffer length to compress needs to " + "be greater than zero byte.\n"); return CPA_STATUS_INVALID_PARAM; } if (pOpData->sessDirection > CPA_DC_DIR_DECOMPRESS) { QAT_UTILS_LOG("Invalid direction of operation.\n"); return CPA_STATUS_INVALID_PARAM; } if (0 == pOpData->srcBuffer) { QAT_UTILS_LOG("Invalid srcBuffer\n"); return CPA_STATUS_INVALID_PARAM; } if (0 == pOpData->destBuffer) { QAT_UTILS_LOG("Invalid destBuffer\n"); return CPA_STATUS_INVALID_PARAM; } if (pOpData->srcBuffer == pOpData->destBuffer) { QAT_UTILS_LOG("In place operation is not supported.\n"); return CPA_STATUS_INVALID_PARAM; } if (0 == pOpData->thisPhys) { QAT_UTILS_LOG("Invalid thisPhys\n"); return CPA_STATUS_INVALID_PARAM; } if ((CPA_TRUE != pOpData->compressAndVerify) && (CPA_FALSE != pOpData->compressAndVerify)) { QAT_UTILS_LOG("Invalid compressAndVerify\n"); return CPA_STATUS_INVALID_PARAM; } if ((CPA_TRUE == pOpData->compressAndVerify) && !(pService->generic_service_info.dcExtendedFeatures & DC_CNV_EXTENDED_CAPABILITY)) { QAT_UTILS_LOG("Invalid compressAndVerify, no CNV capability\n"); return CPA_STATUS_UNSUPPORTED; } if ((CPA_TRUE != pOpData->compressAndVerifyAndRecover) && (CPA_FALSE != pOpData->compressAndVerifyAndRecover)) { QAT_UTILS_LOG("Invalid compressAndVerifyAndRecover\n"); return CPA_STATUS_INVALID_PARAM; } if ((CPA_TRUE == pOpData->compressAndVerifyAndRecover) && (CPA_FALSE == pOpData->compressAndVerify)) { QAT_UTILS_LOG("CnVnR option set without setting CnV\n"); return CPA_STATUS_INVALID_PARAM; } if ((CPA_TRUE == pOpData->compressAndVerifyAndRecover) && !(pService->generic_service_info.dcExtendedFeatures & DC_CNVNR_EXTENDED_CAPABILITY)) { QAT_UTILS_LOG( "Invalid CnVnR option set and no CnVnR capability.\n"); return CPA_STATUS_UNSUPPORTED; } if ((CPA_DP_BUFLIST == pOpData->srcBufferLen) && (CPA_DP_BUFLIST != pOpData->destBufferLen)) { QAT_UTILS_LOG( "The source and destination buffers need to be of the same type (both flat buffers or buffer lists).\n"); return CPA_STATUS_INVALID_PARAM; } if ((CPA_DP_BUFLIST != pOpData->srcBufferLen) && (CPA_DP_BUFLIST == pOpData->destBufferLen)) { QAT_UTILS_LOG( "The source and destination buffers need to be of the same type (both flat buffers or buffer lists).\n"); return CPA_STATUS_INVALID_PARAM; } if (CPA_DP_BUFLIST != pOpData->srcBufferLen) { if (pOpData->srcBufferLen < pOpData->bufferLenToCompress) { QAT_UTILS_LOG( "srcBufferLen is smaller than bufferLenToCompress.\n"); return CPA_STATUS_INVALID_PARAM; } if (pOpData->destBufferLen < pOpData->bufferLenForData) { QAT_UTILS_LOG( "destBufferLen is smaller than bufferLenForData.\n"); return CPA_STATUS_INVALID_PARAM; } } else { /* We are assuming that there is enough memory in the source and * destination buffer lists. We only receive physical addresses - * of the - * buffers so we are unable to test it here */ + * of the buffers so we are unable to test it here */ LAC_CHECK_8_BYTE_ALIGNMENT(pOpData->srcBuffer); LAC_CHECK_8_BYTE_ALIGNMENT(pOpData->destBuffer); } LAC_CHECK_8_BYTE_ALIGNMENT(pOpData->thisPhys); if ((CPA_DC_DIR_COMPRESS == pSessionDesc->sessDirection) || (CPA_DC_DIR_COMBINED == pSessionDesc->sessDirection)) { if (CPA_DC_HT_FULL_DYNAMIC == pSessionDesc->huffType) { /* Check if Intermediate Buffer Array pointer is NULL */ - if ((0 == pService->pInterBuffPtrsArrayPhyAddr) || - (NULL == pService->pInterBuffPtrsArray)) { + if (isDcGen2x(pService) && + ((0 == pService->pInterBuffPtrsArrayPhyAddr) || + (NULL == pService->pInterBuffPtrsArray))) { QAT_UTILS_LOG( "No intermediate buffer defined for this instance - see cpaDcStartInstance.\n"); return CPA_STATUS_INVALID_PARAM; } /* Ensure that the destination buffer length for data is * greater * or equal to 128B */ if (pOpData->bufferLenForData < DC_DEST_BUFFER_DYN_MIN_SIZE) { QAT_UTILS_LOG( "Destination buffer length for data should be greater or equal to 128B.\n"); return CPA_STATUS_INVALID_PARAM; } } else { /* Ensure that the destination buffer length for data is * greater * or equal to min output buffsize */ if (pOpData->bufferLenForData < pService->comp_device_data.minOutputBuffSize) { QAT_UTILS_LOG( "Destination buffer size should be greater or equal to %d bytes.\n", pService->comp_device_data .minOutputBuffSize); return CPA_STATUS_INVALID_PARAM; } } } return CPA_STATUS_SUCCESS; } CpaStatus cpaDcDpGetSessionSize(CpaInstanceHandle dcInstance, CpaDcSessionSetupData *pSessionData, Cpa32U *pSessionSize) { return dcGetSessionSize(dcInstance, pSessionData, pSessionSize, NULL); } CpaStatus cpaDcDpInitSession(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaDcSessionSetupData *pSessionData) { CpaStatus status = CPA_STATUS_SUCCESS; dc_session_desc_t *pSessionDesc = NULL; sal_compression_service_t *pService = NULL; LAC_CHECK_INSTANCE_HANDLE(dcInstance); SAL_CHECK_INSTANCE_TYPE(dcInstance, SAL_SERVICE_TYPE_COMPRESSION); pService = (sal_compression_service_t *)dcInstance; /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(pService); /* Stateful is not supported */ if (CPA_DC_STATELESS != pSessionData->sessState) { QAT_UTILS_LOG("Invalid sessState value\n"); return CPA_STATUS_INVALID_PARAM; } status = dcInitSession(dcInstance, pSessionHandle, pSessionData, NULL, NULL); if (CPA_STATUS_SUCCESS == status) { pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); pSessionDesc->isDcDp = CPA_TRUE; ICP_QAT_FW_COMN_PTR_TYPE_SET( pSessionDesc->reqCacheDecomp.comn_hdr.comn_req_flags, DC_DP_QAT_PTR_TYPE); ICP_QAT_FW_COMN_PTR_TYPE_SET( pSessionDesc->reqCacheComp.comn_hdr.comn_req_flags, DC_DP_QAT_PTR_TYPE); } return status; } CpaStatus cpaDcDpRemoveSession(const CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle) { return cpaDcRemoveSession(dcInstance, pSessionHandle); } CpaStatus cpaDcDpRegCbFunc(const CpaInstanceHandle dcInstance, const CpaDcDpCallbackFn pNewCb) { sal_compression_service_t *pService = NULL; LAC_CHECK_NULL_PARAM(dcInstance); SAL_CHECK_INSTANCE_TYPE(dcInstance, SAL_SERVICE_TYPE_COMPRESSION); LAC_CHECK_NULL_PARAM(pNewCb); /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(dcInstance); pService = (sal_compression_service_t *)dcInstance; pService->pDcDpCb = pNewCb; return CPA_STATUS_SUCCESS; } /** ***************************************************************************** * @ingroup cpaDcDp * * @description * Writes the message to the ring * * @param[in] pOpData Pointer to a structure containing the * request parameters * @param[in] pCurrentQatMsg Pointer to current QAT message on the ring * *****************************************************************************/ static void dcDpWriteRingMsg(CpaDcDpOpData *pOpData, icp_qat_fw_comp_req_t *pCurrentQatMsg) { icp_qat_fw_comp_req_t *pReqCache = NULL; dc_session_desc_t *pSessionDesc = NULL; Cpa8U bufferFormat; Cpa8U cnvDecompReq = ICP_QAT_FW_COMP_NO_CNV; Cpa8U cnvnrCompReq = ICP_QAT_FW_COMP_NO_CNV_RECOVERY; + CpaBoolean cnvErrorInjection = ICP_QAT_FW_COMP_NO_CNV_DFX; + sal_compression_service_t *pService = NULL; + pService = (sal_compression_service_t *)(pOpData->dcInstance); pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pOpData->pSessionHandle); if (CPA_DC_DIR_COMPRESS == pOpData->sessDirection) { pReqCache = &(pSessionDesc->reqCacheComp); /* CNV check */ if (CPA_TRUE == pOpData->compressAndVerify) { cnvDecompReq = ICP_QAT_FW_COMP_CNV; + if (isDcGen4x(pService)) { + cnvErrorInjection = + pSessionDesc->cnvErrorInjection; + } + /* CNVNR check */ if (CPA_TRUE == pOpData->compressAndVerifyAndRecover) { cnvnrCompReq = ICP_QAT_FW_COMP_CNV_RECOVERY; } } } else { pReqCache = &(pSessionDesc->reqCacheDecomp); } /* Fills in the template DC ET ring message - cached from the * session descriptor */ memcpy((void *)pCurrentQatMsg, (void *)(pReqCache), (LAC_QAT_DC_REQ_SZ_LW * LAC_LONG_WORD_IN_BYTES)); if (CPA_DP_BUFLIST == pOpData->srcBufferLen) { bufferFormat = QAT_COMN_PTR_TYPE_SGL; } else { bufferFormat = QAT_COMN_PTR_TYPE_FLAT; } pCurrentQatMsg->comp_pars.req_par_flags |= ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD( - 0, 0, 0, cnvDecompReq, cnvnrCompReq, 0); + ICP_QAT_FW_COMP_NOT_SOP, + ICP_QAT_FW_COMP_NOT_EOP, + ICP_QAT_FW_COMP_NOT_BFINAL, + cnvDecompReq, + cnvnrCompReq, + cnvErrorInjection, + ICP_QAT_FW_COMP_CRC_MODE_LEGACY); SalQatMsg_CmnMidWrite((icp_qat_fw_la_bulk_req_t *)pCurrentQatMsg, pOpData, bufferFormat, pOpData->srcBuffer, pOpData->destBuffer, pOpData->srcBufferLen, pOpData->destBufferLen); pCurrentQatMsg->comp_pars.comp_len = pOpData->bufferLenToCompress; pCurrentQatMsg->comp_pars.out_buffer_sz = pOpData->bufferLenForData; } CpaStatus cpaDcDpEnqueueOp(CpaDcDpOpData *pOpData, const CpaBoolean performOpNow) { icp_qat_fw_comp_req_t *pCurrentQatMsg = NULL; icp_comms_trans_handle trans_handle = NULL; dc_session_desc_t *pSessionDesc = NULL; CpaStatus status = CPA_STATUS_SUCCESS; status = dcDataPlaneParamCheck(pOpData); if (CPA_STATUS_SUCCESS != status) { return status; } if ((CPA_FALSE == pOpData->compressAndVerify) && (CPA_DC_DIR_COMPRESS == pOpData->sessDirection)) { return CPA_STATUS_UNSUPPORTED; } /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(pOpData->dcInstance); trans_handle = ((sal_compression_service_t *)pOpData->dcInstance) ->trans_handle_compression_tx; pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pOpData->pSessionHandle); if ((CPA_DC_DIR_COMPRESS == pOpData->sessDirection) && (CPA_DC_DIR_DECOMPRESS == pSessionDesc->sessDirection)) { QAT_UTILS_LOG( "The session does not support this direction of operation.\n"); return CPA_STATUS_INVALID_PARAM; } else if ((CPA_DC_DIR_DECOMPRESS == pOpData->sessDirection) && (CPA_DC_DIR_COMPRESS == pSessionDesc->sessDirection)) { QAT_UTILS_LOG( "The session does not support this direction of operation.\n"); return CPA_STATUS_INVALID_PARAM; } icp_adf_getSingleQueueAddr(trans_handle, (void **)&pCurrentQatMsg); if (NULL == pCurrentQatMsg) { return CPA_STATUS_RETRY; } dcDpWriteRingMsg(pOpData, pCurrentQatMsg); pSessionDesc->pendingDpStatelessCbCount++; if (CPA_TRUE == performOpNow) { SalQatMsg_updateQueueTail(trans_handle); } return CPA_STATUS_SUCCESS; } CpaStatus cpaDcDpEnqueueOpBatch(const Cpa32U numberRequests, CpaDcDpOpData *pOpData[], const CpaBoolean performOpNow) { icp_qat_fw_comp_req_t *pCurrentQatMsg = NULL; icp_comms_trans_handle trans_handle = NULL; dc_session_desc_t *pSessionDesc = NULL; Cpa32U i = 0; CpaStatus status = CPA_STATUS_SUCCESS; sal_compression_service_t *pService = NULL; LAC_CHECK_NULL_PARAM(pOpData); LAC_CHECK_NULL_PARAM(pOpData[0]); LAC_CHECK_NULL_PARAM(pOpData[0]->dcInstance); pService = (sal_compression_service_t *)(pOpData[0]->dcInstance); if ((numberRequests == 0) || (numberRequests > pService->maxNumCompConcurrentReq)) { QAT_UTILS_LOG( "The number of requests needs to be between 1 and %d.\n", pService->maxNumCompConcurrentReq); return CPA_STATUS_INVALID_PARAM; } for (i = 0; i < numberRequests; i++) { status = dcDataPlaneParamCheck(pOpData[i]); if (CPA_STATUS_SUCCESS != status) { return status; } /* Check that all instance handles and session handles are the * same */ if (pOpData[i]->dcInstance != pOpData[0]->dcInstance) { QAT_UTILS_LOG( "All instance handles should be the same in the pOpData.\n"); return CPA_STATUS_INVALID_PARAM; } if (pOpData[i]->pSessionHandle != pOpData[0]->pSessionHandle) { QAT_UTILS_LOG( "All session handles should be the same in the pOpData.\n"); return CPA_STATUS_INVALID_PARAM; } } for (i = 0; i < numberRequests; i++) { if ((CPA_FALSE == pOpData[i]->compressAndVerify) && (CPA_DC_DIR_COMPRESS == pOpData[i]->sessDirection)) { return CPA_STATUS_UNSUPPORTED; } } /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(pOpData[0]->dcInstance); trans_handle = ((sal_compression_service_t *)pOpData[0]->dcInstance) ->trans_handle_compression_tx; pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pOpData[0]->pSessionHandle); for (i = 0; i < numberRequests; i++) { if ((CPA_DC_DIR_COMPRESS == pOpData[i]->sessDirection) && (CPA_DC_DIR_DECOMPRESS == pSessionDesc->sessDirection)) { QAT_UTILS_LOG( "The session does not support this direction of operation.\n"); return CPA_STATUS_INVALID_PARAM; } else if ((CPA_DC_DIR_DECOMPRESS == pOpData[i]->sessDirection) && (CPA_DC_DIR_COMPRESS == pSessionDesc->sessDirection)) { QAT_UTILS_LOG( "The session does not support this direction of operation.\n"); return CPA_STATUS_INVALID_PARAM; } } icp_adf_getQueueMemory(trans_handle, numberRequests, (void **)&pCurrentQatMsg); if (NULL == pCurrentQatMsg) { return CPA_STATUS_RETRY; } for (i = 0; i < numberRequests; i++) { dcDpWriteRingMsg(pOpData[i], pCurrentQatMsg); icp_adf_getQueueNext(trans_handle, (void **)&pCurrentQatMsg); } pSessionDesc->pendingDpStatelessCbCount += numberRequests; if (CPA_TRUE == performOpNow) { SalQatMsg_updateQueueTail(trans_handle); } return CPA_STATUS_SUCCESS; } CpaStatus icp_sal_DcPollDpInstance(CpaInstanceHandle dcInstance, Cpa32U responseQuota) { icp_comms_trans_handle trans_handle = NULL; LAC_CHECK_INSTANCE_HANDLE(dcInstance); SAL_CHECK_INSTANCE_TYPE(dcInstance, SAL_SERVICE_TYPE_COMPRESSION); /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(dcInstance); trans_handle = ((sal_compression_service_t *)dcInstance) ->trans_handle_compression_rx; return icp_adf_pollQueue(trans_handle, responseQuota); } CpaStatus cpaDcDpPerformOpNow(CpaInstanceHandle dcInstance) { icp_comms_trans_handle trans_handle = NULL; LAC_CHECK_NULL_PARAM(dcInstance); SAL_CHECK_INSTANCE_TYPE(dcInstance, SAL_SERVICE_TYPE_COMPRESSION); /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(dcInstance); trans_handle = ((sal_compression_service_t *)dcInstance) ->trans_handle_compression_tx; if (CPA_TRUE == icp_adf_queueDataToSend(trans_handle)) { SalQatMsg_updateQueueTail(trans_handle); } return CPA_STATUS_SUCCESS; } diff --git a/sys/dev/qat/qat_api/common/compression/dc_session.c b/sys/dev/qat/qat_api/common/compression/dc_session.c index 1d742e227a10..fbce72cb7bfb 100644 --- a/sys/dev/qat/qat_api/common/compression/dc_session.c +++ b/sys/dev/qat/qat_api/common/compression/dc_session.c @@ -1,957 +1,1258 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** ***************************************************************************** * @file dc_session.c * * @ingroup Dc_DataCompression * * @description * Implementation of the Data Compression session operations. * *****************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" #include "cpa_dc.h" #include "icp_qat_fw.h" #include "icp_qat_fw_comp.h" #include "icp_qat_hw.h" +#include "icp_qat_hw_20_comp.h" /* ******************************************************************************* * Include private header files ******************************************************************************* */ #include "dc_session.h" #include "dc_datapath.h" #include "lac_mem_pools.h" #include "sal_types_compression.h" #include "lac_buffer_desc.h" #include "sal_service_state.h" #include "sal_qat_cmn_msg.h" +#include "sal_hw_gen.h" /** ***************************************************************************** * @ingroup Dc_DataCompression * Check that pSessionData is valid * * @description * Check that all the parameters defined in the pSessionData are valid * * @param[in] pSessionData Pointer to a user instantiated structure * containing session data * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_FAIL Function failed to find device * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in * @retval CPA_STATUS_UNSUPPORTED Unsupported algorithm/feature * *****************************************************************************/ -static CpaStatus +CpaStatus dcCheckSessionData(const CpaDcSessionSetupData *pSessionData, CpaInstanceHandle dcInstance) { CpaDcInstanceCapabilities instanceCapabilities = { 0 }; cpaDcQueryCapabilities(dcInstance, &instanceCapabilities); if ((pSessionData->compLevel < CPA_DC_L1) || (pSessionData->compLevel > CPA_DC_L9)) { QAT_UTILS_LOG("Invalid compLevel value\n"); return CPA_STATUS_INVALID_PARAM; } + if ((pSessionData->autoSelectBestHuffmanTree < CPA_DC_ASB_DISABLED) || (pSessionData->autoSelectBestHuffmanTree > CPA_DC_ASB_UNCOMP_STATIC_DYNAMIC_WITH_NO_HDRS)) { QAT_UTILS_LOG("Invalid autoSelectBestHuffmanTree value\n"); return CPA_STATUS_INVALID_PARAM; } if (pSessionData->compType != CPA_DC_DEFLATE) { QAT_UTILS_LOG("Invalid compType value\n"); return CPA_STATUS_INVALID_PARAM; } if ((pSessionData->huffType < CPA_DC_HT_STATIC) || (pSessionData->huffType > CPA_DC_HT_FULL_DYNAMIC) || (CPA_DC_HT_PRECOMP == pSessionData->huffType)) { QAT_UTILS_LOG("Invalid huffType value\n"); return CPA_STATUS_INVALID_PARAM; } if ((pSessionData->sessDirection < CPA_DC_DIR_COMPRESS) || (pSessionData->sessDirection > CPA_DC_DIR_COMBINED)) { QAT_UTILS_LOG("Invalid sessDirection value\n"); return CPA_STATUS_INVALID_PARAM; } if ((pSessionData->sessState < CPA_DC_STATEFUL) || (pSessionData->sessState > CPA_DC_STATELESS)) { QAT_UTILS_LOG("Invalid sessState value\n"); return CPA_STATUS_INVALID_PARAM; } if ((pSessionData->checksum < CPA_DC_NONE) || (pSessionData->checksum > CPA_DC_ADLER32)) { QAT_UTILS_LOG("Invalid checksum value\n"); return CPA_STATUS_INVALID_PARAM; } return CPA_STATUS_SUCCESS; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Populate the compression hardware block * * @description * This function will populate the compression hardware block and update * the size in bytes of the block * * @param[in] pSessionDesc Pointer to the session descriptor * @param[in] pCompConfig Pointer to slice config word * @param[in] compDecomp Direction of the operation * @param[in] enableDmm Delayed Match Mode * *****************************************************************************/ static void -dcCompHwBlockPopulate(dc_session_desc_t *pSessionDesc, +dcCompHwBlockPopulate(sal_compression_service_t *pService, + dc_session_desc_t *pSessionDesc, icp_qat_hw_compression_config_t *pCompConfig, - dc_request_dir_t compDecomp, - icp_qat_hw_compression_delayed_match_t enableDmm) + dc_request_dir_t compDecomp) { icp_qat_hw_compression_direction_t dir = ICP_QAT_HW_COMPRESSION_DIR_COMPRESS; icp_qat_hw_compression_algo_t algo = ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE; icp_qat_hw_compression_depth_t depth = ICP_QAT_HW_COMPRESSION_DEPTH_1; icp_qat_hw_compression_file_type_t filetype = ICP_QAT_HW_COMPRESSION_FILE_TYPE_0; + icp_qat_hw_compression_delayed_match_t dmm; /* Set the direction */ if (DC_COMPRESSION_REQUEST == compDecomp) { dir = ICP_QAT_HW_COMPRESSION_DIR_COMPRESS; } else { dir = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS; } if (CPA_DC_DEFLATE == pSessionDesc->compType) { algo = ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE; } else { QAT_UTILS_LOG("Algorithm not supported for Compression\n"); } + /* Set delay match mode */ + if (CPA_TRUE == pService->comp_device_data.enableDmm) { + dmm = ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED; + } else { + dmm = ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED; + } + /* Set the depth */ if (DC_DECOMPRESSION_REQUEST == compDecomp) { depth = ICP_QAT_HW_COMPRESSION_DEPTH_1; } else { switch (pSessionDesc->compLevel) { case CPA_DC_L1: depth = ICP_QAT_HW_COMPRESSION_DEPTH_1; break; case CPA_DC_L2: depth = ICP_QAT_HW_COMPRESSION_DEPTH_4; break; case CPA_DC_L3: depth = ICP_QAT_HW_COMPRESSION_DEPTH_8; break; - default: + case CPA_DC_L4: depth = ICP_QAT_HW_COMPRESSION_DEPTH_16; + break; + default: + depth = pService->comp_device_data + .highestHwCompressionDepth; + break; } } /* The file type is set to ICP_QAT_HW_COMPRESSION_FILE_TYPE_0. The other * modes will be used in the future for precompiled huffman trees */ filetype = ICP_QAT_HW_COMPRESSION_FILE_TYPE_0; - pCompConfig->val = ICP_QAT_HW_COMPRESSION_CONFIG_BUILD( - dir, enableDmm, algo, depth, filetype); + pCompConfig->lower_val = ICP_QAT_HW_COMPRESSION_CONFIG_BUILD( + dir, dmm, algo, depth, filetype); + + /* Upper 32-bits of the configuration word do not need to be + * configured with legacy devices. + */ + pCompConfig->upper_val = 0; +} + +static void +dcCompHwBlockPopulateGen4(sal_compression_service_t *pService, + dc_session_desc_t *pSessionDesc, + icp_qat_hw_compression_config_t *pCompConfig, + dc_request_dir_t compDecomp) +{ + /* Compression related */ + if (DC_COMPRESSION_REQUEST == compDecomp) { + icp_qat_hw_comp_20_config_csr_upper_t hw_comp_upper_csr; + icp_qat_hw_comp_20_config_csr_lower_t hw_comp_lower_csr; + + memset(&hw_comp_upper_csr, 0, sizeof hw_comp_upper_csr); + memset(&hw_comp_lower_csr, 0, sizeof hw_comp_lower_csr); + + /* Disable Literal + Length Limit Block Drop by default and + * enable it only for dynamic deflate compression. + */ + hw_comp_lower_csr.lllbd = + ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_DISABLED; + + switch (pSessionDesc->compType) { + case CPA_DC_DEFLATE: + /* DEFLATE algorithm settings */ + hw_comp_lower_csr.skip_ctrl = + ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL; + + if (CPA_DC_HT_FULL_DYNAMIC == pSessionDesc->huffType) { + hw_comp_lower_csr.algo = + ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77; + } else /* Static DEFLATE */ + { + hw_comp_lower_csr.algo = + ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE; + hw_comp_upper_csr.scb_ctrl = + ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE; + } + + if (CPA_DC_STATEFUL == pSessionDesc->sessState) { + hw_comp_upper_csr.som_ctrl = + ICP_QAT_HW_COMP_20_SOM_CONTROL_REPLAY_MODE; + } + break; + default: + QAT_UTILS_LOG("Compression algorithm not supported\n"); + break; + } + /* Set the search depth */ + switch (pSessionDesc->compLevel) { + case CPA_DC_L1: + case CPA_DC_L2: + case CPA_DC_L3: + case CPA_DC_L4: + case CPA_DC_L5: + hw_comp_lower_csr.sd = + ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1; + hw_comp_lower_csr.hash_col = + ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_DONT_ALLOW; + break; + case CPA_DC_L6: + case CPA_DC_L7: + case CPA_DC_L8: + hw_comp_lower_csr.sd = + ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_6; + break; + case CPA_DC_L9: + hw_comp_lower_csr.sd = + ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_9; + break; + default: + hw_comp_lower_csr.sd = pService->comp_device_data + .highestHwCompressionDepth; + if ((CPA_DC_HT_FULL_DYNAMIC == + pSessionDesc->huffType) && + (CPA_DC_DEFLATE == pSessionDesc->compType)) { + /* Enable Literal + Length Limit Block Drop + * with dynamic deflate compression when + * highest compression levels are selected. + */ + hw_comp_lower_csr.lllbd = + ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED; + } + break; + } + /* Same for all algorithms */ + hw_comp_lower_csr.abd = ICP_QAT_HW_COMP_20_ABD_ABD_DISABLED; + hw_comp_lower_csr.hash_update = + ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW; + hw_comp_lower_csr.edmm = + (CPA_TRUE == pService->comp_device_data.enableDmm) ? + ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED : + ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED; + + /* Hard-coded HW-specific values */ + hw_comp_upper_csr.nice = + ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL; + hw_comp_upper_csr.lazy = + ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL; + + pCompConfig->upper_val = + ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr); + + pCompConfig->lower_val = + ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr); + } else /* Decompress */ + { + icp_qat_hw_decomp_20_config_csr_lower_t hw_decomp_lower_csr; + + memset(&hw_decomp_lower_csr, 0, sizeof hw_decomp_lower_csr); + + /* Set the algorithm */ + if (CPA_DC_DEFLATE == pSessionDesc->compType) { + hw_decomp_lower_csr.algo = + ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE; + } else { + QAT_UTILS_LOG("Algorithm not supported for " + "Decompression\n"); + } - pCompConfig->reserved = 0; + pCompConfig->upper_val = 0; + pCompConfig->lower_val = + ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER( + hw_decomp_lower_csr); + } } /** ***************************************************************************** * @ingroup Dc_DataCompression * Populate the compression content descriptor * * @description * This function will populate the compression content descriptor * * @param[in] pService Pointer to the service * @param[in] pSessionDesc Pointer to the session descriptor * @param[in] contextBufferAddrPhys Physical address of the context buffer * @param[out] pMsg Pointer to the compression message * @param[in] nextSlice Next slice * @param[in] compDecomp Direction of the operation * *****************************************************************************/ static void dcCompContentDescPopulate(sal_compression_service_t *pService, dc_session_desc_t *pSessionDesc, CpaPhysicalAddr contextBufferAddrPhys, icp_qat_fw_comp_req_t *pMsg, icp_qat_fw_slice_t nextSlice, dc_request_dir_t compDecomp) { icp_qat_fw_comp_cd_hdr_t *pCompControlBlock = NULL; icp_qat_hw_compression_config_t *pCompConfig = NULL; CpaBoolean bankEnabled = CPA_FALSE; pCompControlBlock = (icp_qat_fw_comp_cd_hdr_t *)&(pMsg->comp_cd_ctrl); pCompConfig = (icp_qat_hw_compression_config_t *)(pMsg->cd_pars.sl .comp_slice_cfg_word); ICP_QAT_FW_COMN_NEXT_ID_SET(pCompControlBlock, nextSlice); ICP_QAT_FW_COMN_CURR_ID_SET(pCompControlBlock, ICP_QAT_FW_SLICE_COMP); pCompControlBlock->comp_cfg_offset = 0; if ((CPA_DC_STATEFUL == pSessionDesc->sessState) && (CPA_DC_DEFLATE == pSessionDesc->compType) && (DC_DECOMPRESSION_REQUEST == compDecomp)) { /* Enable A, B, C, D, and E (CAMs). */ pCompControlBlock->ram_bank_flags = ICP_QAT_FW_COMP_RAM_FLAGS_BUILD( ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank I */ ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank H */ ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank G */ ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank F */ ICP_QAT_FW_COMP_BANK_ENABLED, /* Bank E */ ICP_QAT_FW_COMP_BANK_ENABLED, /* Bank D */ ICP_QAT_FW_COMP_BANK_ENABLED, /* Bank C */ ICP_QAT_FW_COMP_BANK_ENABLED, /* Bank B */ ICP_QAT_FW_COMP_BANK_ENABLED); /* Bank A */ bankEnabled = CPA_TRUE; } else { /* Disable all banks */ pCompControlBlock->ram_bank_flags = ICP_QAT_FW_COMP_RAM_FLAGS_BUILD( ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank I */ ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank H */ ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank G */ ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank F */ ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank E */ ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank D */ ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank C */ ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank B */ ICP_QAT_FW_COMP_BANK_DISABLED); /* Bank A */ } if (DC_COMPRESSION_REQUEST == compDecomp) { LAC_MEM_SHARED_WRITE_VIRT_TO_PHYS_PTR_EXTERNAL( pService->generic_service_info, pCompControlBlock->comp_state_addr, pSessionDesc->stateRegistersComp); } else { LAC_MEM_SHARED_WRITE_VIRT_TO_PHYS_PTR_EXTERNAL( pService->generic_service_info, pCompControlBlock->comp_state_addr, pSessionDesc->stateRegistersDecomp); } if (CPA_TRUE == bankEnabled) { pCompControlBlock->ram_banks_addr = contextBufferAddrPhys; } else { pCompControlBlock->ram_banks_addr = 0; } pCompControlBlock->resrvd = 0; /* Populate Compression Hardware Setup Block */ - dcCompHwBlockPopulate(pSessionDesc, - pCompConfig, - compDecomp, - pService->comp_device_data.enableDmm); + if (isDcGen4x(pService)) { + dcCompHwBlockPopulateGen4(pService, + pSessionDesc, + pCompConfig, + compDecomp); + } else if (isDcGen2x(pService)) { + dcCompHwBlockPopulate(pService, + pSessionDesc, + pCompConfig, + compDecomp); + } else { + QAT_UTILS_LOG("Invalid QAT generation value\n"); + } } /** ***************************************************************************** * @ingroup Dc_DataCompression * Populate the translator content descriptor * * @description * This function will populate the translator content descriptor * * @param[out] pMsg Pointer to the compression message * @param[in] nextSlice Next slice * *****************************************************************************/ -static void +void dcTransContentDescPopulate(icp_qat_fw_comp_req_t *pMsg, icp_qat_fw_slice_t nextSlice) { icp_qat_fw_xlt_cd_hdr_t *pTransControlBlock = NULL; pTransControlBlock = (icp_qat_fw_xlt_cd_hdr_t *)&(pMsg->u2.xlt_cd_ctrl); ICP_QAT_FW_COMN_NEXT_ID_SET(pTransControlBlock, nextSlice); ICP_QAT_FW_COMN_CURR_ID_SET(pTransControlBlock, ICP_QAT_FW_SLICE_XLAT); pTransControlBlock->resrvd1 = 0; pTransControlBlock->resrvd2 = 0; pTransControlBlock->resrvd3 = 0; } /** ***************************************************************************** * @ingroup Dc_DataCompression * Get the context size and the history size * * @description * This function will get the size of the context buffer and the history * buffer. The history buffer is a subset of the context buffer and its * size is needed for stateful compression. * @param[in] dcInstance DC Instance Handle * * @param[in] pSessionData Pointer to a user instantiated * structure containing session data * @param[out] pContextSize Pointer to the context size * * @retval CPA_STATUS_SUCCESS Function executed successfully * * *****************************************************************************/ static CpaStatus dcGetContextSize(CpaInstanceHandle dcInstance, CpaDcSessionSetupData *pSessionData, Cpa32U *pContextSize) { sal_compression_service_t *pCompService = NULL; pCompService = (sal_compression_service_t *)dcInstance; *pContextSize = 0; if ((CPA_DC_STATEFUL == pSessionData->sessState) && - (CPA_DC_DEFLATE == pSessionData->compType) && (CPA_DC_DIR_COMPRESS != pSessionData->sessDirection)) { - *pContextSize = - pCompService->comp_device_data.inflateContextSize; + switch (pSessionData->compType) { + case CPA_DC_DEFLATE: + *pContextSize = + pCompService->comp_device_data.inflateContextSize; + break; + default: + QAT_UTILS_LOG("Invalid compression algorithm."); + return CPA_STATUS_FAIL; + } } return CPA_STATUS_SUCCESS; } +CpaStatus +dcGetCompressCommandId(sal_compression_service_t *pService, + CpaDcSessionSetupData *pSessionData, + Cpa8U *pDcCmdId) +{ + CpaStatus status = CPA_STATUS_SUCCESS; + LAC_CHECK_NULL_PARAM(pService); + LAC_CHECK_NULL_PARAM(pSessionData); + LAC_CHECK_NULL_PARAM(pDcCmdId); + + switch (pSessionData->compType) { + case CPA_DC_DEFLATE: + *pDcCmdId = (CPA_DC_HT_FULL_DYNAMIC == pSessionData->huffType) ? + ICP_QAT_FW_COMP_CMD_DYNAMIC : + ICP_QAT_FW_COMP_CMD_STATIC; + break; + default: + QAT_UTILS_LOG("Algorithm not supported for " + "compression\n"); + status = CPA_STATUS_UNSUPPORTED; + break; + } + + return status; +} + +CpaStatus +dcGetDecompressCommandId(sal_compression_service_t *pService, + CpaDcSessionSetupData *pSessionData, + Cpa8U *pDcCmdId) +{ + CpaStatus status = CPA_STATUS_SUCCESS; + LAC_CHECK_NULL_PARAM(pService); + LAC_CHECK_NULL_PARAM(pSessionData); + LAC_CHECK_NULL_PARAM(pDcCmdId); + + switch (pSessionData->compType) { + case CPA_DC_DEFLATE: + *pDcCmdId = ICP_QAT_FW_COMP_CMD_DECOMPRESS; + break; + default: + QAT_UTILS_LOG("Algorithm not supported for " + "decompression\n"); + status = CPA_STATUS_UNSUPPORTED; + break; + } + + return status; +} + CpaStatus dcInitSession(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaDcSessionSetupData *pSessionData, CpaBufferList *pContextBuffer, CpaDcCallbackFn callbackFn) { CpaStatus status = CPA_STATUS_SUCCESS; sal_compression_service_t *pService = NULL; icp_qat_fw_comp_req_t *pReqCache = NULL; dc_session_desc_t *pSessionDesc = NULL; CpaPhysicalAddr contextAddrPhys = 0; CpaPhysicalAddr physAddress = 0; CpaPhysicalAddr physAddressAligned = 0; Cpa32U minContextSize = 0, historySize = 0; Cpa32U rpCmdFlags = 0; icp_qat_fw_serv_specif_flags cmdFlags = 0; Cpa8U secureRam = ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF; Cpa8U sessType = ICP_QAT_FW_COMP_STATELESS_SESSION; Cpa8U autoSelectBest = ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST; Cpa8U enhancedAutoSelectBest = ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST; Cpa8U disableType0EnhancedAutoSelectBest = ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST; icp_qat_fw_la_cmd_id_t dcCmdId = (icp_qat_fw_la_cmd_id_t)ICP_QAT_FW_COMP_CMD_STATIC; icp_qat_fw_comn_flags cmnRequestFlags = 0; dc_integrity_crc_fw_t *pDataIntegrityCrcs = NULL; cmnRequestFlags = ICP_QAT_FW_COMN_FLAGS_BUILD(DC_DEFAULT_QAT_PTR_TYPE, QAT_COMN_CD_FLD_TYPE_16BYTE_DATA); pService = (sal_compression_service_t *)dcInstance; secureRam = pService->comp_device_data.useDevRam; LAC_CHECK_NULL_PARAM(pSessionHandle); LAC_CHECK_NULL_PARAM(pSessionData); /* Check that the parameters defined in the pSessionData are valid for * the * device */ if (CPA_STATUS_SUCCESS != dcCheckSessionData(pSessionData, dcInstance)) { return CPA_STATUS_INVALID_PARAM; } if ((CPA_DC_STATEFUL == pSessionData->sessState) && (CPA_DC_DIR_DECOMPRESS != pSessionData->sessDirection)) { QAT_UTILS_LOG("Stateful sessions are not supported.\n"); return CPA_STATUS_UNSUPPORTED; } - if (CPA_DC_HT_FULL_DYNAMIC == pSessionData->huffType) { + /* Check for Gen4 and stateful, return error if both exist */ + if ((isDcGen4x(pService)) && + (CPA_DC_STATEFUL == pSessionData->sessState && + CPA_DC_DIR_DECOMPRESS != pSessionData->sessDirection)) { + QAT_UTILS_LOG("Stateful sessions are not supported for " + "compression direction"); + return CPA_STATUS_UNSUPPORTED; + } + + if ((isDcGen2x(pService)) && + (CPA_DC_HT_FULL_DYNAMIC == pSessionData->huffType)) { /* Test if DRAM is available for the intermediate buffers */ if ((NULL == pService->pInterBuffPtrsArray) && (0 == pService->pInterBuffPtrsArrayPhyAddr)) { if (CPA_DC_ASB_STATIC_DYNAMIC == pSessionData->autoSelectBestHuffmanTree) { /* Define the Huffman tree as static */ pSessionData->huffType = CPA_DC_HT_STATIC; } else { QAT_UTILS_LOG( - "No buffer defined for this instance - see cpaDcStartInstance.\n"); + "No buffer defined for this instance - " + "see cpaDcStartInstance.\n"); return CPA_STATUS_RESOURCE; } } } if ((CPA_DC_STATEFUL == pSessionData->sessState) && (CPA_DC_DEFLATE == pSessionData->compType)) { /* Get the size of the context buffer */ status = dcGetContextSize(dcInstance, pSessionData, &minContextSize); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG( "Unable to get the context size of the session.\n"); return CPA_STATUS_FAIL; } /* If the minContextSize is zero it means we will not save or * restore * any history */ if (0 != minContextSize) { Cpa64U contextBuffSize = 0; LAC_CHECK_NULL_PARAM(pContextBuffer); if (LacBuffDesc_BufferListVerify( pContextBuffer, &contextBuffSize, LAC_NO_ALIGNMENT_SHIFT) != CPA_STATUS_SUCCESS) { return CPA_STATUS_INVALID_PARAM; } /* Ensure that the context buffer size is greater or * equal * to minContextSize */ if (contextBuffSize < minContextSize) { QAT_UTILS_LOG( "Context buffer size should be greater or equal to %d.\n", minContextSize); return CPA_STATUS_INVALID_PARAM; } } } /* Re-align the session structure to 64 byte alignment */ physAddress = LAC_OS_VIRT_TO_PHYS_EXTERNAL(pService->generic_service_info, (Cpa8U *)pSessionHandle + sizeof(void *)); if (physAddress == 0) { QAT_UTILS_LOG( "Unable to get the physical address of the session.\n"); return CPA_STATUS_FAIL; } physAddressAligned = (CpaPhysicalAddr)LAC_ALIGN_POW2_ROUNDUP(physAddress, LAC_64BYTE_ALIGNMENT); pSessionDesc = (dc_session_desc_t *) /* Move the session pointer by the physical offset between aligned and unaligned memory */ ((Cpa8U *)pSessionHandle + sizeof(void *) + (physAddressAligned - physAddress)); /* Save the aligned pointer in the first bytes (size of LAC_ARCH_UINT) * of the session memory */ *((LAC_ARCH_UINT *)pSessionHandle) = (LAC_ARCH_UINT)pSessionDesc; /* Zero the compression session */ LAC_OS_BZERO(pSessionDesc, sizeof(dc_session_desc_t)); /* Write the buffer descriptor for context/history */ if (0 != minContextSize) { status = LacBuffDesc_BufferListDescWrite( pContextBuffer, &contextAddrPhys, CPA_FALSE, &(pService->generic_service_info)); if (status != CPA_STATUS_SUCCESS) { return status; } pSessionDesc->pContextBuffer = pContextBuffer; pSessionDesc->historyBuffSize = historySize; } pSessionDesc->cumulativeConsumedBytes = 0; /* Initialise pSessionDesc */ pSessionDesc->requestType = DC_REQUEST_FIRST; pSessionDesc->huffType = pSessionData->huffType; pSessionDesc->compType = pSessionData->compType; pSessionDesc->checksumType = pSessionData->checksum; pSessionDesc->autoSelectBestHuffmanTree = pSessionData->autoSelectBestHuffmanTree; pSessionDesc->sessDirection = pSessionData->sessDirection; pSessionDesc->sessState = pSessionData->sessState; pSessionDesc->compLevel = pSessionData->compLevel; pSessionDesc->isDcDp = CPA_FALSE; pSessionDesc->minContextSize = minContextSize; pSessionDesc->isSopForCompressionProcessed = CPA_FALSE; pSessionDesc->isSopForDecompressionProcessed = CPA_FALSE; if (CPA_DC_ADLER32 == pSessionDesc->checksumType) { pSessionDesc->previousChecksum = 1; } else { pSessionDesc->previousChecksum = 0; } if (CPA_DC_STATEFUL == pSessionData->sessState) { /* Init the spinlock used to lock the access to the number of * stateful * in-flight requests */ status = LAC_SPINLOCK_INIT(&(pSessionDesc->sessionLock)); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG( "Spinlock init failed for sessionLock.\n"); return CPA_STATUS_RESOURCE; } } /* For asynchronous - use the user supplied callback * for synchronous - use the internal synchronous callback */ pSessionDesc->pCompressionCb = ((void *)NULL != (void *)callbackFn) ? callbackFn : LacSync_GenWakeupSyncCaller; /* Reset the pending callback counters */ qatUtilsAtomicSet(0, &pSessionDesc->pendingStatelessCbCount); qatUtilsAtomicSet(0, &pSessionDesc->pendingStatefulCbCount); pSessionDesc->pendingDpStatelessCbCount = 0; if (CPA_DC_DIR_DECOMPRESS != pSessionData->sessDirection) { - if (CPA_DC_HT_FULL_DYNAMIC == pSessionData->huffType) { + if ((isDcGen2x(pService)) && + CPA_DC_HT_FULL_DYNAMIC == pSessionData->huffType) { /* Populate the compression section of the content * descriptor */ dcCompContentDescPopulate(pService, pSessionDesc, contextAddrPhys, &(pSessionDesc->reqCacheComp), ICP_QAT_FW_SLICE_XLAT, DC_COMPRESSION_REQUEST); /* Populate the translator section of the content * descriptor */ dcTransContentDescPopulate( &(pSessionDesc->reqCacheComp), ICP_QAT_FW_SLICE_DRAM_WR); if (0 != pService->pInterBuffPtrsArrayPhyAddr) { pReqCache = &(pSessionDesc->reqCacheComp); pReqCache->u1.xlt_pars.inter_buff_ptr = pService->pInterBuffPtrsArrayPhyAddr; } } else { dcCompContentDescPopulate(pService, pSessionDesc, contextAddrPhys, &(pSessionDesc->reqCacheComp), ICP_QAT_FW_SLICE_DRAM_WR, DC_COMPRESSION_REQUEST); } } /* Populate the compression section of the content descriptor for * the decompression case or combined */ if (CPA_DC_DIR_COMPRESS != pSessionData->sessDirection) { dcCompContentDescPopulate(pService, pSessionDesc, contextAddrPhys, &(pSessionDesc->reqCacheDecomp), ICP_QAT_FW_SLICE_DRAM_WR, DC_DECOMPRESSION_REQUEST); } if (CPA_DC_STATEFUL == pSessionData->sessState) { sessType = ICP_QAT_FW_COMP_STATEFUL_SESSION; LAC_OS_BZERO(&pSessionDesc->stateRegistersComp, sizeof(pSessionDesc->stateRegistersComp)); LAC_OS_BZERO(&pSessionDesc->stateRegistersDecomp, sizeof(pSessionDesc->stateRegistersDecomp)); } /* Get physical address of E2E CRC buffer */ pSessionDesc->physDataIntegrityCrcs = (icp_qat_addr_width_t) LAC_OS_VIRT_TO_PHYS_EXTERNAL(pService->generic_service_info, &pSessionDesc->dataIntegrityCrcs); if (0 == pSessionDesc->physDataIntegrityCrcs) { QAT_UTILS_LOG( "Unable to get the physical address of Data Integrity buffer.\n"); return CPA_STATUS_FAIL; } /* Initialize default CRC parameters */ pDataIntegrityCrcs = &pSessionDesc->dataIntegrityCrcs; pDataIntegrityCrcs->crc32 = 0; pDataIntegrityCrcs->adler32 = 1; - pDataIntegrityCrcs->oCrc32Cpr = DC_INVALID_CRC; - pDataIntegrityCrcs->iCrc32Cpr = DC_INVALID_CRC; - pDataIntegrityCrcs->oCrc32Xlt = DC_INVALID_CRC; - pDataIntegrityCrcs->iCrc32Xlt = DC_INVALID_CRC; - pDataIntegrityCrcs->xorFlags = DC_XOR_FLAGS_DEFAULT; - pDataIntegrityCrcs->crcPoly = DC_CRC_POLY_DEFAULT; - pDataIntegrityCrcs->xorOut = DC_XOR_OUT_DEFAULT; - - /* Initialise seed checksums */ - pSessionDesc->seedSwCrc.swCrcI = 0; - pSessionDesc->seedSwCrc.swCrcO = 0; + + if (isDcGen2x(pService)) { + pDataIntegrityCrcs->oCrc32Cpr = DC_INVALID_CRC; + pDataIntegrityCrcs->iCrc32Cpr = DC_INVALID_CRC; + pDataIntegrityCrcs->oCrc32Xlt = DC_INVALID_CRC; + pDataIntegrityCrcs->iCrc32Xlt = DC_INVALID_CRC; + pDataIntegrityCrcs->xorFlags = DC_XOR_FLAGS_DEFAULT; + pDataIntegrityCrcs->crcPoly = DC_CRC_POLY_DEFAULT; + pDataIntegrityCrcs->xorOut = DC_XOR_OUT_DEFAULT; + } else { + pDataIntegrityCrcs->iCrc64Cpr = DC_INVALID_CRC; + pDataIntegrityCrcs->oCrc64Cpr = DC_INVALID_CRC; + pDataIntegrityCrcs->iCrc64Xlt = DC_INVALID_CRC; + pDataIntegrityCrcs->oCrc64Xlt = DC_INVALID_CRC; + pDataIntegrityCrcs->crc64Poly = DC_CRC64_POLY_DEFAULT; + pDataIntegrityCrcs->xor64Out = DC_XOR64_OUT_DEFAULT; + } + + /* Initialise seed checksums. + * It initializes swCrc32I, swCrc32O, too(union). + */ + pSessionDesc->seedSwCrc.swCrc64I = 0; + pSessionDesc->seedSwCrc.swCrc64O = 0; /* Populate the cmdFlags */ switch (pSessionDesc->autoSelectBestHuffmanTree) { case CPA_DC_ASB_DISABLED: break; case CPA_DC_ASB_STATIC_DYNAMIC: autoSelectBest = ICP_QAT_FW_COMP_AUTO_SELECT_BEST; break; case CPA_DC_ASB_UNCOMP_STATIC_DYNAMIC_WITH_STORED_HDRS: autoSelectBest = ICP_QAT_FW_COMP_AUTO_SELECT_BEST; enhancedAutoSelectBest = ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST; break; case CPA_DC_ASB_UNCOMP_STATIC_DYNAMIC_WITH_NO_HDRS: autoSelectBest = ICP_QAT_FW_COMP_AUTO_SELECT_BEST; enhancedAutoSelectBest = ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST; disableType0EnhancedAutoSelectBest = ICP_QAT_FW_COMP_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST; break; default: break; } rpCmdFlags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD( ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP, ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_NO_CNV, ICP_QAT_FW_COMP_NO_CNV_RECOVERY, + ICP_QAT_FW_COMP_NO_CNV_DFX, ICP_QAT_FW_COMP_CRC_MODE_LEGACY); cmdFlags = ICP_QAT_FW_COMP_FLAGS_BUILD(sessType, autoSelectBest, enhancedAutoSelectBest, disableType0EnhancedAutoSelectBest, secureRam); if (CPA_DC_DIR_DECOMPRESS != pSessionData->sessDirection) { - if (CPA_DC_HT_FULL_DYNAMIC == pSessionDesc->huffType) { - dcCmdId = (icp_qat_fw_la_cmd_id_t)( - ICP_QAT_FW_COMP_CMD_DYNAMIC); - } + status = dcGetCompressCommandId(pService, + pSessionData, + (Cpa8U *)&dcCmdId); + if (CPA_STATUS_SUCCESS != status) { + QAT_UTILS_LOG( + "Couldn't get compress command ID for current " + "session data."); + return status; + } pReqCache = &(pSessionDesc->reqCacheComp); pReqCache->comp_pars.req_par_flags = rpCmdFlags; pReqCache->comp_pars.crc.legacy.initial_adler = 1; pReqCache->comp_pars.crc.legacy.initial_crc32 = 0; /* Populate header of the common request message */ SalQatMsg_CmnHdrWrite((icp_qat_fw_comn_req_t *)pReqCache, ICP_QAT_FW_COMN_REQ_CPM_FW_COMP, (uint8_t)dcCmdId, cmnRequestFlags, cmdFlags); } if (CPA_DC_DIR_COMPRESS != pSessionData->sessDirection) { - dcCmdId = - (icp_qat_fw_la_cmd_id_t)(ICP_QAT_FW_COMP_CMD_DECOMPRESS); + status = dcGetDecompressCommandId(pService, + pSessionData, + (Cpa8U *)&dcCmdId); + if (CPA_STATUS_SUCCESS != status) { + QAT_UTILS_LOG( + "Couldn't get decompress command ID for current " + "session data."); + + return status; + } pReqCache = &(pSessionDesc->reqCacheDecomp); pReqCache->comp_pars.req_par_flags = rpCmdFlags; pReqCache->comp_pars.crc.legacy.initial_adler = 1; pReqCache->comp_pars.crc.legacy.initial_crc32 = 0; /* Populate header of the common request message */ SalQatMsg_CmnHdrWrite((icp_qat_fw_comn_req_t *)pReqCache, ICP_QAT_FW_COMN_REQ_CPM_FW_COMP, (uint8_t)dcCmdId, cmnRequestFlags, cmdFlags); } return status; } CpaStatus cpaDcInitSession(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaDcSessionSetupData *pSessionData, CpaBufferList *pContextBuffer, CpaDcCallbackFn callbackFn) { CpaInstanceHandle insHandle = NULL; sal_compression_service_t *pService = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); } else { insHandle = dcInstance; } LAC_CHECK_INSTANCE_HANDLE(insHandle); SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); pService = (sal_compression_service_t *)insHandle; /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(pService); return dcInitSession(insHandle, pSessionHandle, pSessionData, pContextBuffer, callbackFn); } CpaStatus cpaDcResetSession(const CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle) { CpaStatus status = CPA_STATUS_SUCCESS; CpaInstanceHandle insHandle = NULL; + sal_compression_service_t *pService = NULL; dc_session_desc_t *pSessionDesc = NULL; Cpa64U numPendingStateless = 0; Cpa64U numPendingStateful = 0; icp_comms_trans_handle trans_handle = NULL; + dc_integrity_crc_fw_t *pDataIntegrityCrcs = NULL; + dc_sw_checksums_t *pSwCrcs = NULL; + LAC_CHECK_NULL_PARAM(pSessionHandle); pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); LAC_CHECK_NULL_PARAM(pSessionDesc); if (CPA_TRUE == pSessionDesc->isDcDp) { insHandle = dcInstance; } else { if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); } else { insHandle = dcInstance; } } LAC_CHECK_NULL_PARAM(insHandle); SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); /* Check if SAL is running otherwise return an error */ SAL_RUNNING_CHECK(insHandle); if (CPA_TRUE == pSessionDesc->isDcDp) { - trans_handle = ((sal_compression_service_t *)dcInstance) + trans_handle = ((sal_compression_service_t *)insHandle) ->trans_handle_compression_tx; if (CPA_TRUE == icp_adf_queueDataToSend(trans_handle)) { /* Process the remaining messages on the ring */ SalQatMsg_updateQueueTail(trans_handle); QAT_UTILS_LOG( "There are remaining messages on the ring\n"); return CPA_STATUS_RETRY; } /* Check if there are stateless pending requests */ if (0 != pSessionDesc->pendingDpStatelessCbCount) { QAT_UTILS_LOG( "There are %llu stateless DP requests pending.\n", (unsigned long long) pSessionDesc->pendingDpStatelessCbCount); return CPA_STATUS_RETRY; } } else { numPendingStateless = qatUtilsAtomicGet(&(pSessionDesc->pendingStatelessCbCount)); numPendingStateful = qatUtilsAtomicGet(&(pSessionDesc->pendingStatefulCbCount)); /* Check if there are stateless pending requests */ if (0 != numPendingStateless) { QAT_UTILS_LOG( "There are %llu stateless requests pending.\n", (unsigned long long)numPendingStateless); return CPA_STATUS_RETRY; } /* Check if there are stateful pending requests */ if (0 != numPendingStateful) { QAT_UTILS_LOG( "There are %llu stateful requests pending.\n", (unsigned long long)numPendingStateful); return CPA_STATUS_RETRY; } /* Reset pSessionDesc */ pSessionDesc->requestType = DC_REQUEST_FIRST; pSessionDesc->cumulativeConsumedBytes = 0; if (CPA_DC_ADLER32 == pSessionDesc->checksumType) { pSessionDesc->previousChecksum = 1; } else { pSessionDesc->previousChecksum = 0; } + pSessionDesc->cnvErrorInjection = ICP_QAT_FW_COMP_NO_CNV_DFX; + + /* Reset integrity CRCs to default parameters. */ + pDataIntegrityCrcs = &pSessionDesc->dataIntegrityCrcs; + memset(pDataIntegrityCrcs, 0, sizeof(dc_integrity_crc_fw_t)); + pDataIntegrityCrcs->adler32 = 1; + + pService = (sal_compression_service_t *)insHandle; + if (isDcGen2x(pService)) { + pDataIntegrityCrcs->xorFlags = DC_XOR_FLAGS_DEFAULT; + pDataIntegrityCrcs->crcPoly = DC_CRC_POLY_DEFAULT; + pDataIntegrityCrcs->xorOut = DC_XOR_OUT_DEFAULT; + } else { + pDataIntegrityCrcs->crc64Poly = DC_CRC64_POLY_DEFAULT; + pDataIntegrityCrcs->xor64Out = DC_XOR64_OUT_DEFAULT; + } + + /* Reset seed SW checksums. */ + pSwCrcs = &pSessionDesc->seedSwCrc; + memset(pSwCrcs, 0, sizeof(dc_sw_checksums_t)); + + /* Reset integrity SW checksums. */ + pSwCrcs = &pSessionDesc->integritySwCrc; + memset(pSwCrcs, 0, sizeof(dc_sw_checksums_t)); } + /* Reset the pending callback counters */ qatUtilsAtomicSet(0, &pSessionDesc->pendingStatelessCbCount); qatUtilsAtomicSet(0, &pSessionDesc->pendingStatefulCbCount); pSessionDesc->pendingDpStatelessCbCount = 0; if (CPA_DC_STATEFUL == pSessionDesc->sessState) { LAC_OS_BZERO(&pSessionDesc->stateRegistersComp, sizeof(pSessionDesc->stateRegistersComp)); LAC_OS_BZERO(&pSessionDesc->stateRegistersDecomp, sizeof(pSessionDesc->stateRegistersDecomp)); } return status; } CpaStatus cpaDcRemoveSession(const CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle) { CpaStatus status = CPA_STATUS_SUCCESS; CpaInstanceHandle insHandle = NULL; dc_session_desc_t *pSessionDesc = NULL; Cpa64U numPendingStateless = 0; Cpa64U numPendingStateful = 0; icp_comms_trans_handle trans_handle = NULL; LAC_CHECK_NULL_PARAM(pSessionHandle); pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); LAC_CHECK_NULL_PARAM(pSessionDesc); if (CPA_TRUE == pSessionDesc->isDcDp) { insHandle = dcInstance; } else { if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); } else { insHandle = dcInstance; } } LAC_CHECK_NULL_PARAM(insHandle); SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); /* Check if SAL is running otherwise return an error */ SAL_RUNNING_CHECK(insHandle); if (CPA_TRUE == pSessionDesc->isDcDp) { trans_handle = ((sal_compression_service_t *)insHandle) ->trans_handle_compression_tx; if (CPA_TRUE == icp_adf_queueDataToSend(trans_handle)) { /* Process the remaining messages on the ring */ SalQatMsg_updateQueueTail(trans_handle); QAT_UTILS_LOG( "There are remaining messages on the ring.\n"); return CPA_STATUS_RETRY; } /* Check if there are stateless pending requests */ if (0 != pSessionDesc->pendingDpStatelessCbCount) { QAT_UTILS_LOG( "There are %llu stateless DP requests pending.\n", (unsigned long long) pSessionDesc->pendingDpStatelessCbCount); return CPA_STATUS_RETRY; } } else { numPendingStateless = qatUtilsAtomicGet(&(pSessionDesc->pendingStatelessCbCount)); numPendingStateful = qatUtilsAtomicGet(&(pSessionDesc->pendingStatefulCbCount)); /* Check if there are stateless pending requests */ if (0 != numPendingStateless) { QAT_UTILS_LOG( "There are %llu stateless requests pending.\n", (unsigned long long)numPendingStateless); status = CPA_STATUS_RETRY; } /* Check if there are stateful pending requests */ if (0 != numPendingStateful) { QAT_UTILS_LOG( "There are %llu stateful requests pending.\n", (unsigned long long)numPendingStateful); status = CPA_STATUS_RETRY; } if ((CPA_DC_STATEFUL == pSessionDesc->sessState) && (CPA_STATUS_SUCCESS == status)) { - if (CPA_STATUS_SUCCESS != - LAC_SPINLOCK_DESTROY( - &(pSessionDesc->sessionLock))) { - QAT_UTILS_LOG( - "Failed to destory session lock.\n"); - } + LAC_SPINLOCK_DESTROY(&(pSessionDesc->sessionLock)); } } return status; } CpaStatus dcGetSessionSize(CpaInstanceHandle dcInstance, CpaDcSessionSetupData *pSessionData, Cpa32U *pSessionSize, Cpa32U *pContextSize) { CpaStatus status = CPA_STATUS_SUCCESS; CpaInstanceHandle insHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); } else { insHandle = dcInstance; } /* Check parameters */ LAC_CHECK_NULL_PARAM(insHandle); LAC_CHECK_NULL_PARAM(pSessionData); LAC_CHECK_NULL_PARAM(pSessionSize); if (dcCheckSessionData(pSessionData, insHandle) != CPA_STATUS_SUCCESS) { return CPA_STATUS_INVALID_PARAM; } /* Get session size for session data */ *pSessionSize = sizeof(dc_session_desc_t) + LAC_64BYTE_ALIGNMENT + sizeof(LAC_ARCH_UINT); if (NULL != pContextSize) { status = dcGetContextSize(insHandle, pSessionData, pContextSize); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG( "Unable to get the context size of the session.\n"); return CPA_STATUS_FAIL; } } return CPA_STATUS_SUCCESS; } CpaStatus cpaDcGetSessionSize(CpaInstanceHandle dcInstance, CpaDcSessionSetupData *pSessionData, Cpa32U *pSessionSize, Cpa32U *pContextSize) { LAC_CHECK_NULL_PARAM(pContextSize); return dcGetSessionSize(dcInstance, pSessionData, pSessionSize, pContextSize); } + +CpaStatus +dcSetCnvError(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle) +{ + LAC_CHECK_NULL_PARAM(pSessionHandle); + + dc_session_desc_t *pSessionDesc = NULL; + CpaInstanceHandle insHandle = NULL; + sal_compression_service_t *pService = NULL; + + if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { + insHandle = dcGetFirstHandle(); + } else { + insHandle = dcInstance; + } + + pService = (sal_compression_service_t *)insHandle; + + if (isDcGen2x(pService)) { + QAT_UTILS_LOG("Unsupported compression feature.\n"); + return CPA_STATUS_UNSUPPORTED; + } + pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle); + + LAC_CHECK_NULL_PARAM(pSessionDesc); + + pSessionDesc->cnvErrorInjection = ICP_QAT_FW_COMP_CNV_DFX; + + return CPA_STATUS_SUCCESS; +} diff --git a/sys/dev/qat/qat_api/common/compression/include/dc_datapath.h b/sys/dev/qat/qat_api/common/compression/include/dc_datapath.h index 0a6ef7191704..72cb08e4e128 100644 --- a/sys/dev/qat/qat_api/common/compression/include/dc_datapath.h +++ b/sys/dev/qat/qat_api/common/compression/include/dc_datapath.h @@ -1,186 +1,200 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** ***************************************************************************** * @file dc_datapath.h * * @ingroup Dc_DataCompression * * @description * Definition of the Data Compression datapath parameters. * ******************* * **********************************************************/ #ifndef DC_DATAPATH_H_ #define DC_DATAPATH_H_ #define LAC_QAT_DC_REQ_SZ_LW 32 #define LAC_QAT_DC_RESP_SZ_LW 8 /* Restriction on the source buffer size for compression due to the firmware * processing */ #define DC_SRC_BUFFER_MIN_SIZE (15) /* Restriction on the destination buffer size for compression due to * the management of skid buffers in the firmware */ #define DC_DEST_BUFFER_DYN_MIN_SIZE (128) #define DC_DEST_BUFFER_STA_MIN_SIZE (64) +#define DC_DEST_BUFFER_DYN_MIN_SIZE_GEN4 (512) +#define DC_DEST_BUFFER_STA_MIN_SIZE_GEN4 (1024) /* C62x and C3xxx pcie rev0 devices require an additional 32bytes */ #define DC_DEST_BUFFER_STA_ADDITIONAL_SIZE (32) /* C4xxx device only requires 47 bytes */ #define DC_DEST_BUFFER_MIN_SIZE (47) /* Minimum destination buffer size for decompression */ #define DC_DEST_BUFFER_DEC_MIN_SIZE (1) /* Restriction on the source and destination buffer sizes for compression due * to the firmware taking 32 bits parameters. The max size is 2^32-1 */ #define DC_BUFFER_MAX_SIZE (0xFFFFFFFF) /* DC Source & Destination buffer type (FLAT/SGL) */ #define DC_DEFAULT_QAT_PTR_TYPE QAT_COMN_PTR_TYPE_SGL #define DC_DP_QAT_PTR_TYPE QAT_COMN_PTR_TYPE_FLAT /* Offset to first byte of Input Byte Counter (IBC) in state register */ #define DC_STATE_IBC_OFFSET (8) /* Size in bytes of input byte counter (IBC) in state register */ #define DC_IBC_SIZE_IN_BYTES (4) /* Offset to first byte to CRC32 in state register */ #define DC_STATE_CRC32_OFFSET (40) /* Offset to first byte to output CRC32 in state register */ #define DC_STATE_OUTPUT_CRC32_OFFSET (48) /* Offset to first byte to input CRC32 in state register */ #define DC_STATE_INPUT_CRC32_OFFSET (52) /* Offset to first byte of ADLER32 in state register */ #define DC_STATE_ADLER32_OFFSET (48) /* 8 bit mask value */ #define DC_8_BIT_MASK (0xff) /* 8 bit shift position */ #define DC_8_BIT_SHIFT_POS (8) /* Size in bytes of checksum */ #define DC_CHECKSUM_SIZE_IN_BYTES (4) /* Mask used to set the most significant bit to zero */ #define DC_STATE_REGISTER_ZERO_MSB_MASK (0x7F) /* Mask used to keep only the most significant bit and set the others to zero */ #define DC_STATE_REGISTER_KEEP_MSB_MASK (0x80) /* Compression state register word containing the parity bit */ #define DC_STATE_REGISTER_PARITY_BIT_WORD (5) /* Location of the parity bit within the compression state register word */ #define DC_STATE_REGISTER_PARITY_BIT (7) /* size which needs to be reserved before the results field to * align the results field with the API struct */ #define DC_API_ALIGNMENT_OFFSET (offsetof(CpaDcDpOpData, results)) /* Mask used to check the CompressAndVerify capability bit */ #define DC_CNV_EXTENDED_CAPABILITY (0x01) /* Mask used to check the CompressAndVerifyAndRecover capability bit */ #define DC_CNVNR_EXTENDED_CAPABILITY (0x100) /* Default values for CNV integrity checks, * those are used to inform hardware of specifying CRC parameters to be used * when calculating CRCs */ #define DC_CRC_POLY_DEFAULT 0x04c11db7 +#define DC_CRC64_POLY_DEFAULT 0x42f0e1eba9ea3693ULL #define DC_XOR_FLAGS_DEFAULT 0xe0000 #define DC_XOR_OUT_DEFAULT 0xffffffff +#define DC_XOR64_OUT_DEFAULT 0x0ULL #define DC_INVALID_CRC 0x0 /** ******************************************************************************* * @ingroup cpaDc Data Compression * Compression cookie * @description * This cookie stores information for a particular compression perform op. * This includes various user-supplied parameters for the operation which * will be needed in our callback function. * A pointer to this cookie is stored in the opaque data field of the QAT * message so that it can be accessed in the asynchronous callback. * @note * The order of the parameters within this structure is important. It needs * to match the order of the parameters in CpaDcDpOpData up to the * pSessionHandle. This allows the correct processing of the callback. *****************************************************************************/ typedef struct dc_compression_cookie_s { Cpa8U dcReqParamsBuffer[DC_API_ALIGNMENT_OFFSET]; /**< Memory block - was previously reserved for request parameters. * Now size maintained so following members align with API struct, * but no longer used for request parameters */ CpaDcRqResults reserved; /**< This is reserved for results to correctly align the structure * to match the one from the data plane API */ CpaInstanceHandle dcInstance; /**< Compression instance handle */ CpaDcSessionHandle pSessionHandle; /**< Pointer to the session handle */ icp_qat_fw_comp_req_t request; /**< Compression request */ void *callbackTag; /**< Opaque data supplied by the client */ dc_session_desc_t *pSessionDesc; /**< Pointer to the session descriptor */ CpaDcFlush flushFlag; /**< Flush flag */ CpaDcOpData *pDcOpData; /**< struct containing flags and CRC related data for this session */ CpaDcRqResults *pResults; /**< Pointer to result buffer holding consumed and produced data */ Cpa32U srcTotalDataLenInBytes; /**< Total length of the source data */ Cpa32U dstTotalDataLenInBytes; /**< Total length of the destination data */ dc_request_dir_t compDecomp; /**< Used to know whether the request is compression or decompression. * Useful when defining the session as combined */ CpaBufferList *pUserSrcBuff; /**< virtual userspace ptr to source SGL */ CpaBufferList *pUserDestBuff; /**< virtual userspace ptr to destination SGL */ + CpaDcCallbackFn pCbFunc; + /**< Callback function defined for the traditional sessionless API */ + CpaDcChecksum checksumType; + /**< Type of checksum */ + dc_integrity_crc_fw_t dataIntegrityCrcs; + /**< Data integrity table */ + } dc_compression_cookie_t; /** ***************************************************************************** * @ingroup Dc_DataCompression * Callback function called for compression and decompression requests in * asynchronous mode * * @description * Called to process compression and decompression response messages. This * callback will check for errors, update the statistics and will call the * user callback * * @param[in] pRespMsg Response message * *****************************************************************************/ void dcCompression_ProcessCallback(void *pRespMsg); +CpaStatus dcCheckOpData(sal_compression_service_t *pService, + CpaDcOpData *pOpData); + /** ***************************************************************************** * @ingroup Dc_DataCompression * Describes CNV and CNVNR modes * * @description * This enum is used to indicate the CNV modes. * *****************************************************************************/ typedef enum dc_cnv_mode_s { DC_NO_CNV = 0, /* CNV = FALSE, CNVNR = FALSE */ DC_CNV, /* CNV = TRUE, CNVNR = FALSE */ DC_CNVNR, /* CNV = TRUE, CNVNR = TRUE */ } dc_cnv_mode_t; #endif /* DC_DATAPATH_H_ */ diff --git a/sys/dev/qat/qat_api/common/compression/include/dc_session.h b/sys/dev/qat/qat_api/common/compression/include/dc_session.h index 5a4961fadd60..041c60e5845c 100644 --- a/sys/dev/qat/qat_api/common/compression/include/dc_session.h +++ b/sys/dev/qat/qat_api/common/compression/include/dc_session.h @@ -1,278 +1,430 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** ***************************************************************************** * @file dc_session.h * * @ingroup Dc_DataCompression * * @description * Definition of the Data Compression session parameters. * *****************************************************************************/ #ifndef DC_SESSION_H #define DC_SESSION_H #include "cpa_dc_dp.h" #include "icp_qat_fw_comp.h" #include "sal_qat_cmn_msg.h" +#include "sal_types_compression.h" /* Maximum number of intermediate buffers SGLs for devices * with a maximum of 6 compression slices */ #define DC_QAT_MAX_NUM_INTER_BUFFERS_6COMP_SLICES (12) /* Maximum number of intermediate buffers SGLs for devices * with a maximum of 10 max compression slices */ #define DC_QAT_MAX_NUM_INTER_BUFFERS_10COMP_SLICES (20) /* Maximum number of intermediate buffers SGLs for devices * with a maximum of 24 max compression slices and 32 MEs */ #define DC_QAT_MAX_NUM_INTER_BUFFERS_24COMP_SLICES (64) /* Maximum size of the state registers 64 bytes */ #define DC_QAT_STATE_REGISTERS_MAX_SIZE (64) /* Size of the history window. * Base 2 logarithm of maximum window size minus 8 */ +#define DC_4K_WINDOW_SIZE (4) #define DC_8K_WINDOW_SIZE (5) #define DC_16K_WINDOW_SIZE (6) #define DC_32K_WINDOW_SIZE (7) /* Context size */ #define DC_DEFLATE_MAX_CONTEXT_SIZE (49152) #define DC_INFLATE_CONTEXT_SIZE (36864) #define DC_DEFLATE_EH_MAX_CONTEXT_SIZE (65536) #define DC_DEFLATE_EH_MIN_CONTEXT_SIZE (49152) #define DC_INFLATE_EH_CONTEXT_SIZE (34032) /* Retrieve the session descriptor pointer from the session context structure * that the user allocates. The pointer to the internally realigned address * is stored at the start of the session context that the user allocates */ #define DC_SESSION_DESC_FROM_CTX_GET(pSession) \ (dc_session_desc_t *)(*(LAC_ARCH_UINT *)pSession) /* Maximum size for the compression part of the content descriptor */ #define DC_QAT_COMP_CONTENT_DESC_SIZE sizeof(icp_qat_fw_comp_cd_hdr_t) /* Maximum size for the translator part of the content descriptor */ #define DC_QAT_TRANS_CONTENT_DESC_SIZE \ (sizeof(icp_qat_fw_xlt_cd_hdr_t) + DC_QAT_MAX_TRANS_SETUP_BLK_SZ) /* Maximum size of the decompression content descriptor */ #define DC_QAT_CONTENT_DESC_DECOMP_MAX_SIZE \ LAC_ALIGN_POW2_ROUNDUP(DC_QAT_COMP_CONTENT_DESC_SIZE, \ (1 << LAC_64BYTE_ALIGNMENT_SHIFT)) /* Maximum size of the compression content descriptor */ #define DC_QAT_CONTENT_DESC_COMP_MAX_SIZE \ LAC_ALIGN_POW2_ROUNDUP(DC_QAT_COMP_CONTENT_DESC_SIZE + \ DC_QAT_TRANS_CONTENT_DESC_SIZE, \ (1 << LAC_64BYTE_ALIGNMENT_SHIFT)) /* Direction of the request */ typedef enum dc_request_dir_e { DC_COMPRESSION_REQUEST = 1, DC_DECOMPRESSION_REQUEST } dc_request_dir_t; /* Type of the compression request */ typedef enum dc_request_type_e { DC_REQUEST_FIRST = 1, DC_REQUEST_SUBSEQUENT } dc_request_type_t; typedef enum dc_block_type_e { DC_CLEARTEXT_TYPE = 0, DC_STATIC_TYPE, DC_DYNAMIC_TYPE } dc_block_type_t; /* Internal data structure supporting end to end data integrity checks. */ typedef struct dc_integrity_crc_fw_s { Cpa32U crc32; /* CRC32 checksum returned for compressed data */ Cpa32U adler32; /* ADLER32 checksum returned for compressed data */ - Cpa32U oCrc32Cpr; - /* CRC32 checksum returned for data output by compression accelerator */ - Cpa32U iCrc32Cpr; - /* CRC32 checksum returned for input data to compression accelerator */ - Cpa32U oCrc32Xlt; - /* CRC32 checksum returned for data output by translator accelerator */ - Cpa32U iCrc32Xlt; - /* CRC32 checksum returned for input data to translator accelerator */ - Cpa32U xorFlags; - /* Initialise transactor pCRC controls in state register */ - Cpa32U crcPoly; - /* CRC32 polynomial used by hardware */ - Cpa32U xorOut; - /* CRC32 from XOR stage (Input CRC is xor'ed with value in the state) */ - Cpa32U deflateBlockType; - /* Bit 1 - Bit 0 - * 0 0 -> RAW DATA + Deflate header. - * This will not produced any CRC check because - * the output will not come from the slices. - * It will be a simple copy from input to output - * buffers list. - * 0 1 -> Static deflate block type - * 1 0 -> Dynamic deflate block type - * 1 1 -> Invalid type */ + + union { + struct { + Cpa32U oCrc32Cpr; + /* CRC32 checksum returned for data output by + * compression accelerator */ + Cpa32U iCrc32Cpr; + /* CRC32 checksum returned for input data to compression + * accelerator + */ + Cpa32U oCrc32Xlt; + /* CRC32 checksum returned for data output by translator + * accelerator + */ + Cpa32U iCrc32Xlt; + /* CRC32 checksum returned for input data to translator + * accelerator + */ + Cpa32U xorFlags; + /* Initialise transactor pCRC controls in state register + */ + Cpa32U crcPoly; + /* CRC32 polynomial used by hardware */ + Cpa32U xorOut; + /* CRC32 from XOR stage (Input CRC is xor'ed with value + * in the state) */ + Cpa32U deflateBlockType; + /* Bit 1 - Bit 0 + * 0 0 -> RAW DATA + Deflate header. + * This will not produced any CRC check + * because the output will not come + * from the slices. It will be a simple + * copy from input to output buffer + * list. 0 1 -> Static deflate block type 1 0 -> + * Dynamic deflate block type 1 1 -> Invalid type + */ + }; + + struct { + Cpa64U iCrc64Cpr; + /* CRC64 checksum returned for input data to compression + * accelerator + */ + Cpa64U oCrc64Cpr; + /* CRC64 checksum returned for data output by + * compression accelerator */ + Cpa64U iCrc64Xlt; + /* CRC64 checksum returned for input data to translator + * accelerator + */ + Cpa64U oCrc64Xlt; + /* CRC64 checksum returned for data output by translator + * accelerator + */ + Cpa64U crc64Poly; + /* CRC64 polynomial used by hardware */ + Cpa64U xor64Out; + /* CRC64 from XOR stage (Input CRC is xor'ed with value + * in the state) */ + }; + }; } dc_integrity_crc_fw_t; typedef struct dc_sw_checksums_s { - Cpa32U swCrcI; - Cpa32U swCrcO; + union { + struct { + Cpa32U swCrc32I; + Cpa32U swCrc32O; + }; + + struct { + Cpa64U swCrc64I; + Cpa64U swCrc64O; + }; + }; } dc_sw_checksums_t; /* Session descriptor structure for compression */ typedef struct dc_session_desc_s { Cpa8U stateRegistersComp[DC_QAT_STATE_REGISTERS_MAX_SIZE]; /**< State registers for compression */ Cpa8U stateRegistersDecomp[DC_QAT_STATE_REGISTERS_MAX_SIZE]; /**< State registers for decompression */ icp_qat_fw_comp_req_t reqCacheComp; /**< Cache as much as possible of the compression request in a pre-built * request */ icp_qat_fw_comp_req_t reqCacheDecomp; /**< Cache as much as possible of the decompression request in a * pre-built * request */ dc_request_type_t requestType; /**< Type of the compression request. As stateful mode do not support * more * than one in-flight request there is no need to use spinlocks */ dc_request_type_t previousRequestType; /**< Type of the previous compression request. Used in cases where there * the * stateful operation needs to be resubmitted */ CpaDcHuffType huffType; /**< Huffman tree type */ CpaDcCompType compType; /**< Compression type */ CpaDcChecksum checksumType; /**< Type of checksum */ CpaDcAutoSelectBest autoSelectBestHuffmanTree; /**< Indicates if the implementation selects the best Huffman encoding */ CpaDcSessionDir sessDirection; /**< Session direction */ CpaDcSessionState sessState; /**< Session state */ Cpa32U deflateWindowSize; /**< Window size */ CpaDcCompLvl compLevel; /**< Compression level */ CpaDcCallbackFn pCompressionCb; /**< Callback function defined for the traditional compression session */ QatUtilsAtomic pendingStatelessCbCount; /**< Keeps track of number of pending requests on stateless session */ QatUtilsAtomic pendingStatefulCbCount; /**< Keeps track of number of pending requests on stateful session */ Cpa64U pendingDpStatelessCbCount; /**< Keeps track of number of data plane pending requests on stateless * session */ struct mtx sessionLock; /**< Lock used to provide exclusive access for number of stateful * in-flight * requests update */ CpaBoolean isDcDp; /**< Indicates if the data plane API is used */ Cpa32U minContextSize; /**< Indicates the minimum size required to allocate the context buffer */ CpaBufferList *pContextBuffer; /**< Context buffer */ Cpa32U historyBuffSize; /**< Size of the history buffer */ Cpa64U cumulativeConsumedBytes; /**< Cumulative amount of consumed bytes. Used to build the footer in * the * stateful case */ Cpa32U previousChecksum; /**< Save the previous value of the checksum. Used to process zero byte * stateful compression or decompression requests */ CpaBoolean isSopForCompressionProcessed; /**< Indicates whether a Compression Request is received in this session */ CpaBoolean isSopForDecompressionProcessed; /**< Indicates whether a Decompression Request is received in this * session */ /**< Data integrity table */ dc_integrity_crc_fw_t dataIntegrityCrcs; /**< Physical address of Data integrity buffer */ CpaPhysicalAddr physDataIntegrityCrcs; /* Seed checksums structure used to calculate software calculated * checksums. */ dc_sw_checksums_t seedSwCrc; /* Driver calculated integrity software CRC */ dc_sw_checksums_t integritySwCrc; + /* Flag to disable or enable CnV Error Injection mechanism */ + CpaBoolean cnvErrorInjection; } dc_session_desc_t; /** ***************************************************************************** * @ingroup Dc_DataCompression * Initialise a compression session * * @description * This function will initialise a compression session * * @param[in] dcInstance Instance handle derived from discovery * functions * @param[in,out] pSessionHandle Pointer to a session handle * @param[in,out] pSessionData Pointer to a user instantiated structure * containing session data * @param[in] pContextBuffer Pointer to context buffer * * @param[in] callbackFn For synchronous operation this callback * shall be a null pointer * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_FAIL Function failed * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in * @retval CPA_STATUS_RESOURCE Error related to system resources *****************************************************************************/ CpaStatus dcInitSession(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle, CpaDcSessionSetupData *pSessionData, CpaBufferList *pContextBuffer, CpaDcCallbackFn callbackFn); /** ***************************************************************************** * @ingroup Dc_DataCompression * Get the size of the memory required to hold the session information * * @description * This function will get the size of the memory required to hold the * session information * * @param[in] dcInstance Instance handle derived from discovery * functions * @param[in] pSessionData Pointer to a user instantiated structure * containing session data * @param[out] pSessionSize On return, this parameter will be the size * of the memory that will be * required by cpaDcInitSession() for session * data. * @param[out] pContextSize On return, this parameter will be the size * of the memory that will be required * for context data. Context data is * save/restore data including history and * any implementation specific data that is * required for a save/restore operation. * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_FAIL Function failed * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in *****************************************************************************/ CpaStatus dcGetSessionSize(CpaInstanceHandle dcInstance, CpaDcSessionSetupData *pSessionData, Cpa32U *pSessionSize, Cpa32U *pContextSize); +/** + ***************************************************************************** + * @ingroup Dc_DataCompression + * Set the cnvErrorInjection flag in session descriptor + * + * @description + * This function enables the CnVError injection for the session + * passed in. All Compression requests sent within the session + * are injected with CnV errors. This error injection is for the + * duration of the session. Resetting the session results in + * setting being cleared. CnV error injection does not apply to + * Data Plane API. + * + * @param[in] dcInstance Instance Handle + * @param[in] pSessionHandle Pointer to a session handle + * + * @retval CPA_STATUS_SUCCESS Function executed successfully + * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in + * @retval CPA_STATUS_UNSUPPORTED Unsupported feature + *****************************************************************************/ +CpaStatus dcSetCnvError(CpaInstanceHandle dcInstance, + CpaDcSessionHandle pSessionHandle); + +/** + ***************************************************************************** + * @ingroup Dc_DataCompression + * Check that pSessionData is valid + * + * @description + * Check that all the parameters defined in the pSessionData are valid + * + * @param[in] pSessionData Pointer to a user instantiated structure + * containing session data + * + * @retval CPA_STATUS_SUCCESS Function executed successfully + * @retval CPA_STATUS_FAIL Function failed to find device + * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in + * @retval CPA_STATUS_UNSUPPORTED Unsupported algorithm/feature + * + *****************************************************************************/ +CpaStatus dcCheckSessionData(const CpaDcSessionSetupData *pSessionData, + CpaInstanceHandle dcInstance); + +/** + ***************************************************************************** + * @ingroup Dc_DataCompression + * Get the compression command id for the given session setup data. + * + * @description + * This function will get the compression command id based on parameters + * passed in the given session setup data. + * + * @param[in] pService Pointer to the service + * @param[in] pSessionData Pointer to a user instantiated + * structure containing session data + * @param[out] pDcCmdId Pointer to the command id + * + * @retval CPA_STATUS_SUCCESS Function executed successfully + * @retval CPA_STATUS_UNSUPPORTED Unsupported algorithm/feature + * + *****************************************************************************/ +CpaStatus dcGetCompressCommandId(sal_compression_service_t *pService, + CpaDcSessionSetupData *pSessionData, + Cpa8U *pDcCmdId); + +/** + ***************************************************************************** + * @ingroup Dc_DataCompression + * Get the decompression command id for the given session setup data. + * + * @description + * This function will get the decompression command id based on parameters + * passed in the given session setup data. + * + * @param[in] pService Pointer to the service + * @param[in] pSessionData Pointer to a user instantiated + * structure containing session data + * @param[out] pDcCmdId Pointer to the command id + * + * @retval CPA_STATUS_SUCCESS Function executed successfully + * @retval CPA_STATUS_UNSUPPORTED Unsupported algorithm/feature + * + *****************************************************************************/ +CpaStatus dcGetDecompressCommandId(sal_compression_service_t *pService, + CpaDcSessionSetupData *pSessionData, + Cpa8U *pDcCmdId); + +/** + ***************************************************************************** + * @ingroup Dc_DataCompression + * Populate the translator content descriptor + * + * @description + * This function will populate the translator content descriptor + * + * @param[out] pMsg Pointer to the compression message + * @param[in] nextSlice Next slice + * + *****************************************************************************/ +void dcTransContentDescPopulate(icp_qat_fw_comp_req_t *pMsg, + icp_qat_fw_slice_t nextSlice); + #endif /* DC_SESSION_H */ diff --git a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_session.h b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_session.h index 6ae3c51e7766..76d0f4f08bb5 100644 --- a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_session.h +++ b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_session.h @@ -1,622 +1,692 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** ***************************************************************************** * @file lac_session.h * * @defgroup LacSym_Session Session * * @ingroup LacSym * * Definition of symmetric session descriptor structure * * @lld_start * * @lld_overview * A session is required for each symmetric operation. The session descriptor * holds information about the session from when the session is initialised to * when the session is removed. The session descriptor is used in the * subsequent perform operations in the paths for both sending the request and * receiving the response. The session descriptor and any other state * information required for processing responses from the QAT are stored in an * internal cookie. A pointer to this cookie is stored in the opaque data * field of the QAT request. * * The user allocates the memory for the session using the size returned from * \ref cpaCySymSessionCtxGetSize(). Internally this memory is re-aligned on a * 64 byte boundary for use by the QAT engine. The aligned pointer is saved in * the first bytes (size of void *) of the session memory. This address * is then dereferenced in subsequent performs to get access to the session * descriptor. * * LAC Session Init\n The session descriptor is re-aligned and * populated. This includes populating the content descriptor which contains * the hardware setup for the QAT engine. The content descriptor is a read * only structure after session init and a pointer to it is sent to the QAT * for each perform operation. * * LAC Perform \n * The address for the session descriptor is got by dereferencing the first * bytes of the session memory (size of void *). For each successful * request put on the ring, the pendingCbCount for the session is incremented. * * LAC Callback \n * For each successful response the pendingCbCount for the session is * decremented. See \ref LacSymCb_ProcessCallbackInternal() * * LAC Session Remove \n * The address for the session descriptor is got by dereferencing the first * bytes of the session memory (size of void *). * The pendingCbCount for the session is checked to see if it is 0. If it is * non 0 then there are requests in flight. An error is returned to the user. * * Concurrency\n * A reference count is used to prevent the descriptor being removed * while there are requests in flight. * * Reference Count\n * - The perform funcion increments the reference count for the session. * - The callback function decrements the reference count for the session. * - The Remove function checks the reference count to ensure that it is 0. * * @lld_dependencies * - \ref LacMem "Memory" - Inline memory functions * - QatUtils: logging, locking & virt to phys translations. * * @lld_initialisation * * @lld_module_algorithms * * @lld_process_context * * @lld_end * *****************************************************************************/ /***************************************************************************/ #ifndef LAC_SYM_SESSION_H #define LAC_SYM_SESSION_H /* * Common alignment attributes to ensure * hashStatePrefixBuffer is 64-byte aligned */ #define ALIGN_START(x) #define ALIGN_END(x) __attribute__((__aligned__(x))) /* ****************************************************************************** * Include public/global header files ****************************************************************************** */ #include "cpa.h" #include "icp_accel_devices.h" #include "lac_list.h" #include "lac_sal_types.h" #include "sal_qat_cmn_msg.h" #include "lac_sym_cipher_defs.h" #include "lac_sym.h" #include "lac_sym_hash_defs.h" #include "lac_sym_qat_hash.h" /* ******************************************************************************* * Include private header files ******************************************************************************* */ +/** +***************************************************************************** +* @ingroup LacSym +* Spc state +* +* @description +* This enum is used to indicate the Spc state. +* +*****************************************************************************/ +typedef enum lac_single_pass_state_e { + NON_SPC, /* Algorithms other than CHACHA-POLY and AES-GCM */ + LIKELY_SPC, /* AES-GCM - Likely to handle it as single pass */ + SPC /* CHACHA-POLY and AES-GCM */ +} lac_single_pass_state_t; + /** ******************************************************************************* * @ingroup LacSym_Session * Symmetric session descriptor * @description * This structure stores information about a session * Note: struct types lac_session_d1_s and lac_session_d2_s are subsets of * this structure. Elements in all three should retain the same order * Only this structure is used in the session init call. The other two are * for determining the size of memory to allocate. * The comments section of each of the other two structures below show * the conditions that determine which session context memory size to use. *****************************************************************************/ typedef struct lac_session_desc_s { Cpa8U contentDescriptor[LAC_SYM_QAT_CONTENT_DESC_MAX_SIZE]; /**< QAT Content Descriptor for this session. * NOTE: Field must be correctly aligned in memory for access by QAT * engine */ Cpa8U contentDescriptorOptimised[LAC_SYM_OPTIMISED_CD_SIZE]; /**< QAT Optimised Content Descriptor for this session. * NOTE: Field must be correctly aligned in memory for access by QAT * engine */ CpaCySymOp symOperation; /**< type of command to be performed */ sal_qat_content_desc_info_t contentDescInfo; /**< info on the content descriptor */ sal_qat_content_desc_info_t contentDescOptimisedInfo; /**< info on the optimised content descriptor */ icp_qat_fw_la_cmd_id_t laCmdId; /**Block Vs. Stream Ciphers\n * Block ciphers are treated slightly differently than Stream ciphers by this * cipher component. Supported stream ciphers consist of AES and * TripleDES algorithms in CTR mode, and ARC4. The 2 primary differences are: * - Data buffers for block ciphers are required to be a multiple of the * block size defined for the algorithm (e.g. 8 bytes for DES). For stream * ciphers, there is no such restriction. * - For stream ciphers, decryption is performed by setting the QAT hardware * to encryption mode. * * Memory address alignment of data buffers \n * The QAT requires that most data buffers are aligned on an 8-byte memory * address boundary (64-byte boundary for optimum performance). For Cipher, * this applies to the cipher key buffer passed in the Content Descriptor, * and the IV/State buffer passed in the Request Parameters block in each * request. Both of these buffers are provided by the user. It does not * apply to the cipher source/destination data buffers. * Alignment of the key buffer is ensured because the key is always copied * from the user provided buffer into a new (aligned) buffer for the QAT * (the hardware setup block, which configures the QAT slice). This is done * once only during session registration, and the user's key buffer can be * effectively discarded after that. * The IV/State buffer is provided per-request by the user, so it is recommended * to the user to provide aligned buffers for optimal performance. In the case * where an unaligned buffer is provided, a new temporary buffer is allocated * and the user's IV/State data is copied into this buffer. The aligned buffer * is then passed to the QAT in the request. In the response callback, if the * IV was updated by the QAT, the contents are copied back to the user's buffer * and the temporary buffer is freed. * * @lld_process_context * * Session Register Sequence Diagram: For ARC4 cipher algorithm * \msc * APP [label="Application"], SYM [label="Symmetric LAC"], * Achain [label="Alg chain"], Cipher, SQAT [label="Symmetric QAT"]; * * APP=>SYM [ label = "cpaCySymInitSession(cbFunc)", * URL="\ref cpaCySymInitSession()"] ; * SYM=>SYM [ label = "LacSymSession_ParamCheck()", * URL="\ref LacSymSession_ParamCheck()"]; * SYM=>Achain [ label = "LacAlgChain_SessionInit()", * URL="\ref LacAlgChain_SessionInit()"]; * Achain=>Cipher [ label = "LacCipher_SessionSetupDataCheck()", * URL="\ref LacCipher_SessionSetupDataCheck()"]; * Achain<SQAT [ label = "LacSymQat_CipherContentDescPopulate()", * URL="\ref LacSymQat_CipherContentDescPopulate()"]; * Achain<SQAT [ label = "LacSymQat_CipherArc4StateInit()", * URL="\ref LacSymQat_CipherArc4StateInit()"]; * Achain<SYM [label = "LAC_SYM_STAT_INC", URL="\ref LAC_SYM_STAT_INC"]; * APP<SYM [ label = "cpaCySymPerformOp()", * URL="\ref cpaCySymPerformOp()"] ; * SYM=>SYM [ label = "LacSym_Perform()", * URL="\ref LacSym_Perform()"]; * SYM=>SYM [ label = "LacSymPerform_BufferParamCheck()", * URL="\ref LacSymPerform_BufferParamCheck()"]; * SYM<Achain [ label = "LacAlgChain_Perform()", * URL="\ref LacCipher()"]; * Achain=>Cipher [ label = "LacCipher_PerformParamCheck()", * URL="\ref LacCipher_PerformParamCheck()"]; * Achain<LMP [label="Lac_MemPoolEntryAlloc()", * URL="\ref Lac_MemPoolEntryAlloc()"]; * Achain<Cipher [ label = "LacCipher_PerformIvCheckAndAlign()", * URL="\ref LacCipher_PerformIvCheckAndAlign()"]; * Achain<SQAT [ label = "LacSymQat_CipherRequestParamsPopulate()", * URL="\ref LacSymQat_CipherRequestParamsPopulate()"]; * Achain<BUF [ label = "LacBuffDesc_BufferListDescWrite()", * URL = "\ref LacBuffDesc_BufferListDescWrite()"]; * Achain<SQAT [ label = "SalQatMsg_CmnMsgAndReqParamsPopulate()", * URL="\ref SalQatMsg_CmnMsgAndReqParamsPopulate()"]; * Achain<SYMQ [ label = "LacSymQueue_RequestSend()", * URL="\ref LacSymQueue_RequestSend()"]; * SYMQ=>QATCOMMS [ label = "QatComms_MsgSend()", * URL="\ref QatComms_MsgSend()"]; * SYMQ<SYM [ label = "LacSym_PartialPacketStateUpdate()", * URL="\ref LacSym_PartialPacketStateUpdate()"]; * SYM<SC [label = "LAC_SYM_STAT_INC", URL="\ref LAC_SYM_STAT_INC"]; * SYM<QATCOMMS [label ="QatComms_ResponseMsgHandler()", * URL="\ref QatComms_ResponseMsgHandler()"]; * QATCOMMS=>SQAT [label ="LacSymQat_SymRespHandler()", * URL="\ref LacSymQat_SymRespHandler()"]; * SQAT=>SYMCB [label="LacSymCb_ProcessCallback()", * URL="\ref LacSymCb_ProcessCallback()"]; * SYMCB=>SYMCB [label="LacSymCb_ProcessCallbackInternal()", * URL="\ref LacSymCb_ProcessCallbackInternal()"]; * SYMCB=>LMP [label="Lac_MemPoolEntryFree()", * URL="\ref Lac_MemPoolEntryFree()"]; * SYMCB<SC [label = "LAC_SYM_STAT_INC", URL="\ref LAC_SYM_STAT_INC"]; * SYMCB<APP [label="cbFunc"]; * SYMCB< zero, then there is config data available in the constants +* table which is stored in SHRAM for use by the FW. The value is the offset +* into the constants table, it is returned to the caller in poffset. +* +* +* @param[in] Cipher Algorithm +* @param[in] Cipher Mode +* @param[in] Direction - encrypt/decrypt +* @param[in] convert / no convert +* @param[out] offset into constants table +* +* @return none +* +*****************************************************************************/ +void LacSymQat_ConstantsGetCipherOffset(CpaInstanceHandle instanceHandle, + uint8_t algo, + uint8_t mode, + uint8_t direction, + uint8_t convert, + uint8_t *poffset); + +/** +******************************************************************************* +* @ingroup LacSymQat +* LacSymQat_ConstantsGetAuthOffset +* +* @description +* This function looks up the auth constants lookup array for +* a specific auth algorithm, mode, direction and convert flag. +* If the lookup table value is zero then there's no suitable config data +* available in the constants table. +* If the value > zero, then there is config data available in the constants +* table which is stored in SHRAM for use by the FW. The value is the offset +* into the constants table, it is returned to the caller in poffset. +* +* +* @param[in] auth Algorithm +* @param[in] auth Mode +* @param[in] nested / no nested +* @param[out] offset into constants table +* +* @return none +* +*****************************************************************************/ +void LacSymQat_ConstantsGetAuthOffset(CpaInstanceHandle instanceHandle, + uint8_t algo, + uint8_t mode, + uint8_t nested, + uint8_t *poffset); + +#endif /* LAC_SYM_QAT_SHRAM_CONSTANTS_TABLE_H */ diff --git a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat_hash.h b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat_hash.h index 147e10f573f0..0dfb16c8338f 100644 --- a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat_hash.h +++ b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat_hash.h @@ -1,309 +1,314 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** ***************************************************************************** * @file lac_sym_qat_hash.h * * @defgroup LacSymQatHash Hash QAT * * @ingroup LacSymQat * * interfaces for populating qat structures for a hash operation * *****************************************************************************/ /*****************************************************************************/ #ifndef LAC_SYM_QAT_HASH_H #define LAC_SYM_QAT_HASH_H /* ****************************************************************************** * Include public/global header files ****************************************************************************** */ #include "cpa.h" #include "cpa_cy_sym.h" #include "icp_qat_fw_la.h" #include "icp_qat_hw.h" /* ******************************************************************************* * Include private header files ******************************************************************************* */ #include "lac_common.h" /** ****************************************************************************** * @ingroup LacSymQatHash * hash precomputes * * @description * This structure contains infomation on the hash precomputes * *****************************************************************************/ typedef struct lac_sym_qat_hash_precompute_info_s { Cpa8U *pState1; /**< state1 pointer */ Cpa32U state1Size; /**< state1 size */ Cpa8U *pState2; /**< state2 pointer */ Cpa32U state2Size; /**< state2 size */ } lac_sym_qat_hash_precompute_info_t; /** ****************************************************************************** * @ingroup LacSymQatHash * hash state prefix buffer info * * @description * This structure contains infomation on the hash state prefix aad buffer * *****************************************************************************/ typedef struct lac_sym_qat_hash_state_buffer_info_s { Cpa64U pDataPhys; /**< Physical pointer to the hash state prefix buffer */ Cpa8U *pData; /**< Virtual pointer to the hash state prefix buffer */ Cpa8U stateStorageSzQuadWords; /**< hash state storage size in quad words */ Cpa8U prefixAadSzQuadWords; /**< inner prefix/aad and outer prefix size in quad words */ } lac_sym_qat_hash_state_buffer_info_t; /** ****************************************************************************** * @ingroup LacSymQatHash * Init the hash specific part of the content descriptor. * * @description * This function populates the hash specific fields of the control block * and the hardware setup block for a digest session. This function sets * the size param to hold the size of the hash setup block. * * In the case of hash only, the content descriptor will contain just a * hash control block and hash setup block. In the case of chaining it * will contain the hash control block and setup block along with the * control block and setup blocks of additional services. * * Note: The memory for the content descriptor MUST be allocated prior to * calling this function. The memory for the hash control block and hash * setup block MUST be set to 0 prior to calling this function. * * @image html contentDescriptor.png "Content Descriptor" * * @param[in] pMsg Pointer to req Parameter Footer * * @param[in] pHashSetupData Pointer to the hash setup data as * defined in the LAC API. * * @param[in] pHwBlockBase Pointer to the base of the hardware * setup block * * @param[in] hashBlkOffsetInHwBlock Offset in quad-words from the base of * the hardware setup block where the * hash block will start. This offset * is stored in the control block. It * is used to figure out where to write * that hash setup block. * * @param[in] nextSlice SliceID for next control block * entry This value is known only by * the calling component * * @param[in] qatHashMode QAT hash mode * * @param[in] useSymConstantsTable Indicate if Shared-SRAM constants table * is used for this session. If TRUE, the * h/w setup block is NOT populated * * @param[in] useOptimisedContentDesc Indicate if optimised content desc * is used for this session. * + * @param[in] useStatefulSha3ContentDesc + * Indicate if stateful SHA3 content desc + * is used for this session. + * * @param[in] pPrecompute For auth mode, this is the pointer * to the precompute data. Otherwise this * should be set to NULL * * @param[out] pHashBlkSizeInBytes size in bytes of hash setup block * * @return void * *****************************************************************************/ void LacSymQat_HashContentDescInit(icp_qat_la_bulk_req_ftr_t *pMsg, CpaInstanceHandle instanceHandle, const CpaCySymHashSetupData *pHashSetupData, void *pHwBlockBase, Cpa32U hashBlkOffsetInHwBlock, icp_qat_fw_slice_t nextSlice, icp_qat_hw_auth_mode_t qatHashMode, CpaBoolean useSymConstantsTable, CpaBoolean useOptimisedContentDesc, + CpaBoolean useStatefulSha3ContentDesc, lac_sym_qat_hash_precompute_info_t *pPrecompute, Cpa32U *pHashBlkSizeInBytes); /** ****************************************************************************** * @ingroup LacSymQatHash * Calculate the size of the hash state prefix aad buffer * * @description * This function inspects the hash control block and based on the values * in the fields, it calculates the size of the hash state prefix aad * buffer. * * A partial packet processing request is possible at any stage during a * hash session. In this case, there will always be space for the hash * state storage field of the hash state prefix buffer. When there is * AAD data just the inner prefix AAD data field is used. * * @param[in] pMsg Pointer to the Request Message * * @param[out] pHashStateBuf Pointer to hash state prefix buffer info * structure. * * @return None * *****************************************************************************/ void LacSymQat_HashStatePrefixAadBufferSizeGet( icp_qat_la_bulk_req_ftr_t *pMsg, lac_sym_qat_hash_state_buffer_info_t *pHashStateBuf); /** ****************************************************************************** * @ingroup LacSymQatHash * Populate the fields of the hash state prefix buffer * * @description * This function populates the inner prefix/aad fields and/or the outer * prefix field of the hash state prefix buffer. * * @param[in] pHashStateBuf Pointer to hash state prefix buffer info * structure. * * @param[in] pMsg Pointer to the Request Message * * @param[in] pInnerPrefixAad Pointer to the Inner Prefix or Aad data * This is NULL where if the data size is 0 * * @param[in] innerPrefixSize Size of inner prefix/aad data in bytes * * @param[in] pOuterPrefix Pointer to the Outer Prefix data. This is * NULL where the data size is 0. * * @param[in] outerPrefixSize Size of the outer prefix data in bytes * * @return void * *****************************************************************************/ void LacSymQat_HashStatePrefixAadBufferPopulate( lac_sym_qat_hash_state_buffer_info_t *pHashStateBuf, icp_qat_la_bulk_req_ftr_t *pMsg, Cpa8U *pInnerPrefixAad, Cpa8U innerPrefixSize, Cpa8U *pOuterPrefix, Cpa8U outerPrefixSize); /** ****************************************************************************** * @ingroup LacSymQatHash * Populate the hash request params structure * * @description * This function is passed a pointer to the 128B Request block. * (This memory must be allocated prior to calling this function). It * populates the fields of this block using the parameters as described * below. It is also expected that this structure has been set to 0 * prior to calling this function. * * * @param[in] pReq Pointer to 128B request block. * * @param[in] authOffsetInBytes start offset of data that the digest is to * be computed on. * * @param[in] authLenInBytes Length of data digest calculated on * * @param[in] pService Pointer to service data * * @param[in] pHashStateBuf Pointer to hash state buffer info. This * structure contains the pointers and sizes. * If there is no hash state prefix buffer * required, this parameter can be set to NULL * * @param[in] qatPacketType Packet type using QAT macros. The hash * state buffer pointer and state size will be * different depending on the packet type * * @param[in] hashResultSize Size of the final hash result in bytes. * * @param[in] digestVerify Indicates if verify is enabled or not * * @param[in] pAuthResult Virtual pointer to digest * * @return CPA_STATUS_SUCCESS or CPA_STATUS_FAIL * *****************************************************************************/ CpaStatus LacSymQat_HashRequestParamsPopulate( icp_qat_fw_la_bulk_req_t *pReq, Cpa32U authOffsetInBytes, Cpa32U authLenInBytes, sal_service_t *pService, lac_sym_qat_hash_state_buffer_info_t *pHashStateBuf, Cpa32U qatPacketType, Cpa32U hashResultSize, CpaBoolean digestVerify, Cpa8U *pAuthResult, CpaCySymHashAlgorithm alg, void *data); /** ****************************************************************************** * @ingroup LacSymQatHash * * * @description * This fn returns the QAT values for hash algorithm and nested fields * * * @param[in] pInstance Pointer to service instance. * * @param[in] qatHashMode value for hash mode on the fw qat *interface. * * @param[in] apiHashMode value for hash mode on the QA API. * * @param[in] apiHashAlgorithm value for hash algorithm on the QA API. * * @param[out] pQatAlgorithm Pointer to return fw qat value for *algorithm. * * @param[out] pQatNested Pointer to return fw qat value for nested. * * * @return * none * *****************************************************************************/ void LacSymQat_HashGetCfgData(CpaInstanceHandle pInstance, icp_qat_hw_auth_mode_t qatHashMode, CpaCySymHashMode apiHashMode, CpaCySymHashAlgorithm apiHashAlgorithm, icp_qat_hw_auth_algo_t *pQatAlgorithm, CpaBoolean *pQatNested); void LacSymQat_HashSetupReqParamsMetaData( icp_qat_la_bulk_req_ftr_t *pMsg, CpaInstanceHandle instanceHandle, const CpaCySymHashSetupData *pHashSetupData, CpaBoolean hashStateBuffer, icp_qat_hw_auth_mode_t qatHashMode, CpaBoolean digestVerify); #endif /* LAC_SYM_QAT_HASH_H */ diff --git a/sys/dev/qat/qat_api/common/crypto/sym/key/lac_sym_key.c b/sys/dev/qat/qat_api/common/crypto/sym/key/lac_sym_key.c index 2f27a1781876..604e5751fba9 100644 --- a/sys/dev/qat/qat_api/common/crypto/sym/key/lac_sym_key.c +++ b/sys/dev/qat/qat_api/common/crypto/sym/key/lac_sym_key.c @@ -1,3021 +1,3024 @@ /*************************************************************************** * * * ***************************************************************************/ /** ***************************************************************************** * @file lac_sym_key.c * * @ingroup LacSymKey * * This file contains the implementation of all keygen functionality * *****************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" #include "cpa_cy_key.h" #include "cpa_cy_im.h" /* ******************************************************************************* * Include private header files ******************************************************************************* */ #include "icp_accel_devices.h" #include "icp_adf_debug.h" #include "icp_adf_init.h" #include "icp_adf_transport.h" #include "qat_utils.h" #include "lac_log.h" #include "lac_hooks.h" #include "lac_sym.h" #include "lac_sym_qat_hash_defs_lookup.h" #include "lac_sym_qat.h" #include "lac_sal.h" #include "lac_sym_key.h" #include "lac_sal_types_crypto.h" #include "sal_service_state.h" #include "lac_sym_qat_key.h" #include "lac_sym_hash_defs.h" #include "sal_statistics.h" /* Number of statistics */ #define LAC_KEY_NUM_STATS (sizeof(CpaCyKeyGenStats64) / sizeof(Cpa64U)) #define LAC_KEY_STAT_INC(statistic, instanceHandle) \ do { \ sal_crypto_service_t *pService = NULL; \ pService = (sal_crypto_service_t *)instanceHandle; \ if (CPA_TRUE == \ pService->generic_service_info.stats \ ->bKeyGenStatsEnabled) { \ qatUtilsAtomicInc( \ &pService \ ->pLacKeyStats[offsetof(CpaCyKeyGenStats64, \ statistic) / \ sizeof(Cpa64U)]); \ } \ } while (0) /**< Macro to increment a Key stat (derives offset into array of atomics) */ #define LAC_KEY_STATS32_GET(keyStats, instanceHandle) \ do { \ int i; \ sal_crypto_service_t *pService = \ (sal_crypto_service_t *)instanceHandle; \ for (i = 0; i < LAC_KEY_NUM_STATS; i++) { \ ((Cpa32U *)&(keyStats))[i] = \ (Cpa32U)qatUtilsAtomicGet( \ &pService->pLacKeyStats[i]); \ } \ } while (0) /**< Macro to get all 32bit Key stats (from internal array of atomics) */ #define LAC_KEY_STATS64_GET(keyStats, instanceHandle) \ do { \ int i; \ sal_crypto_service_t *pService = \ (sal_crypto_service_t *)instanceHandle; \ for (i = 0; i < LAC_KEY_NUM_STATS; i++) { \ ((Cpa64U *)&(keyStats))[i] = \ qatUtilsAtomicGet(&pService->pLacKeyStats[i]); \ } \ } while (0) /**< Macro to get all 64bit Key stats (from internal array of atomics) */ #define IS_HKDF_UNSUPPORTED(cmdId, hkdfSupported) \ ((ICP_QAT_FW_LA_CMD_HKDF_EXTRACT <= cmdId && \ ICP_QAT_FW_LA_CMD_HKDF_EXTRACT_AND_EXPAND_LABEL >= cmdId) && \ !hkdfSupported) /**< macro to check whether the HKDF algorithm can be \ supported on the device */ /* Sublabel for HKDF TLS Key Generation, as defined in RFC8446. */ const static Cpa8U key256[HKDF_SUB_LABEL_KEY_LENGTH] = { 0, 16, 9, 't', 'l', 's', '1', '3', ' ', 'k', 'e', 'y', 0 }; const static Cpa8U key384[HKDF_SUB_LABEL_KEY_LENGTH] = { 0, 32, 9, 't', 'l', 's', '1', '3', ' ', 'k', 'e', 'y', 0 }; const static Cpa8U keyChaChaPoly[HKDF_SUB_LABEL_KEY_LENGTH] = { 0, 32, 9, 't', 'l', 's', '1', '3', ' ', 'k', 'e', 'y', 0 }; /* Sublabel for HKDF TLS IV key Generation, as defined in RFC8446. */ const static Cpa8U iv256[HKDF_SUB_LABEL_IV_LENGTH] = { 0, 12, 8, 't', 'l', 's', '1', '3', ' ', 'i', 'v', 0 }; const static Cpa8U iv384[HKDF_SUB_LABEL_IV_LENGTH] = { 0, 12, 8, 't', 'l', 's', '1', '3', ' ', 'i', 'v', 0 }; /* Sublabel for HKDF TLS RESUMPTION key Generation, as defined in RFC8446. */ const static Cpa8U resumption256[HKDF_SUB_LABEL_RESUMPTION_LENGTH] = { 0, 32, 16, 't', 'l', 's', '1', '3', ' ', 'r', 'e', 's', 'u', 'm', 'p', 't', 'i', 'o', 'n', 0 }; const static Cpa8U resumption384[HKDF_SUB_LABEL_RESUMPTION_LENGTH] = { 0, 48, 16, 't', 'l', 's', '1', '3', ' ', 'r', 'e', 's', 'u', 'm', 'p', 't', 'i', 'o', 'n', 0 }; /* Sublabel for HKDF TLS FINISHED key Generation, as defined in RFC8446. */ const static Cpa8U finished256[HKDF_SUB_LABEL_FINISHED_LENGTH] = { 0, 32, 14, 't', 'l', 's', '1', '3', ' ', 'f', 'i', 'n', 'i', 's', 'h', 'e', 'd', 0 }; const static Cpa8U finished384[HKDF_SUB_LABEL_FINISHED_LENGTH] = { 0, 48, 14, 't', 'l', 's', '1', '3', ' ', 'f', 'i', 'n', 'i', 's', 'h', 'e', 'd', 0 }; /** ****************************************************************************** * @ingroup LacSymKey * SSL/TLS stat type * * @description * This enum determines which stat should be incremented *****************************************************************************/ typedef enum { LAC_KEY_REQUESTS = 0, /**< Key requests sent */ LAC_KEY_REQUEST_ERRORS, /**< Key requests errors */ LAC_KEY_COMPLETED, /**< Key requests which received responses */ LAC_KEY_COMPLETED_ERRORS /**< Key requests which received responses with errors */ } lac_key_stat_type_t; /*** Local functions prototypes ***/ static void LacSymKey_MgfHandleResponse(icp_qat_fw_la_cmd_id_t lacCmdId, void *pOpaqueData, icp_qat_fw_comn_flags cmnRespFlags); static CpaStatus LacSymKey_MgfSync(const CpaInstanceHandle instanceHandle, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, const void *pKeyGenMgfOpData, CpaFlatBuffer *pGeneratedMaskBuffer, CpaBoolean bIsExtRequest); static void LacSymKey_SslTlsHandleResponse(icp_qat_fw_la_cmd_id_t lacCmdId, void *pOpaqueData, icp_qat_fw_comn_flags cmnRespFlags); static CpaStatus LacSymKey_SslTlsSync(CpaInstanceHandle instanceHandle, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, icp_qat_fw_la_cmd_id_t lacCmdId, void *pKeyGenSslTlsOpData, Cpa8U hashAlgorithm, CpaFlatBuffer *pKeyGenOutpuData); /*** Implementation ***/ /** ****************************************************************************** * @ingroup LacSymKey * Get the instance handle. Support single handle. * @param[in] instanceHandle_in user supplied handle. * @retval CpaInstanceHandle the instance handle */ static CpaInstanceHandle LacKey_GetHandle(CpaInstanceHandle instanceHandle_in) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } return instanceHandle; } /** ******************************************************************************* * @ingroup LacSymKey * Perform SSL/TLS key gen operation * * @description * Perform SSL/TLS key gen operation * * @param[in] instanceHandle QAT device handle. * @param[in] pKeyGenCb Pointer to callback function to be invoked * when the operation is complete. * @param[in] pCallbackTag Opaque User Data for this specific call. * @param[in] lacCmdId Lac command ID (identify SSL & TLS ops) * @param[in] pKeyGenSslTlsOpData Structure containing all the data needed to * perform the SSL/TLS key generation * operation. * @param[in] hashAlgorithm Specifies the hash algorithm to use. * According to RFC5246, this should be * "SHA-256 or a stronger standard hash * function." * @param[out] pKeyGenOutputData pointer to where output result should be * written * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Function should be retried. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. * *****************************************************************************/ static CpaStatus LacSymKey_KeyGenSslTls_GenCommon(CpaInstanceHandle instanceHandle, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, icp_qat_fw_la_cmd_id_t lacCmdId, void *pKeyGenSslTlsOpData, Cpa8U hashAlgorithm, CpaFlatBuffer *pKeyGenOutputData); /** ****************************************************************************** * @ingroup LacSymKey * Increment stat for TLS or SSL operation * * @description * This is a generic function to update the stats for either a TLS or SSL * operation. * * @param[in] lacCmdId Indicate SSL or TLS operations * @param[in] statType Statistics Type * @param[in] instanceHandle Instance Handle * * @return None * *****************************************************************************/ static void LacKey_StatsInc(icp_qat_fw_la_cmd_id_t lacCmdId, lac_key_stat_type_t statType, CpaInstanceHandle instanceHandle) { if (ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE == lacCmdId) { switch (statType) { case LAC_KEY_REQUESTS: LAC_KEY_STAT_INC(numSslKeyGenRequests, instanceHandle); break; case LAC_KEY_REQUEST_ERRORS: LAC_KEY_STAT_INC(numSslKeyGenRequestErrors, instanceHandle); break; case LAC_KEY_COMPLETED: LAC_KEY_STAT_INC(numSslKeyGenCompleted, instanceHandle); break; case LAC_KEY_COMPLETED_ERRORS: LAC_KEY_STAT_INC(numSslKeyGenCompletedErrors, instanceHandle); break; default: QAT_UTILS_LOG("Invalid statistics type\n"); break; } } else /* TLS v1.0/1.1 and 1.2 */ { switch (statType) { case LAC_KEY_REQUESTS: LAC_KEY_STAT_INC(numTlsKeyGenRequests, instanceHandle); break; case LAC_KEY_REQUEST_ERRORS: LAC_KEY_STAT_INC(numTlsKeyGenRequestErrors, instanceHandle); break; case LAC_KEY_COMPLETED: LAC_KEY_STAT_INC(numTlsKeyGenCompleted, instanceHandle); break; case LAC_KEY_COMPLETED_ERRORS: LAC_KEY_STAT_INC(numTlsKeyGenCompletedErrors, instanceHandle); break; default: QAT_UTILS_LOG("Invalid statistics type\n"); break; } } } void LacKeygen_StatsShow(CpaInstanceHandle instanceHandle) { CpaCyKeyGenStats64 keyStats = { 0 }; LAC_KEY_STATS64_GET(keyStats, instanceHandle); QAT_UTILS_LOG(SEPARATOR BORDER " Key Stats: " BORDER "\n" SEPARATOR); QAT_UTILS_LOG(BORDER " SSL Key Requests: %16llu " BORDER "\n" BORDER " SSL Key Request Errors: %16llu " BORDER "\n" BORDER " SSL Key Completed %16llu " BORDER "\n" BORDER " SSL Key Complete Errors: %16llu " BORDER "\n" SEPARATOR, (unsigned long long)keyStats.numSslKeyGenRequests, (unsigned long long)keyStats.numSslKeyGenRequestErrors, (unsigned long long)keyStats.numSslKeyGenCompleted, (unsigned long long)keyStats.numSslKeyGenCompletedErrors); QAT_UTILS_LOG(BORDER " TLS Key Requests: %16llu " BORDER "\n" BORDER " TLS Key Request Errors: %16llu " BORDER "\n" BORDER " TLS Key Completed %16llu " BORDER "\n" BORDER " TLS Key Complete Errors: %16llu " BORDER "\n" SEPARATOR, (unsigned long long)keyStats.numTlsKeyGenRequests, (unsigned long long)keyStats.numTlsKeyGenRequestErrors, (unsigned long long)keyStats.numTlsKeyGenCompleted, (unsigned long long)keyStats.numTlsKeyGenCompletedErrors); QAT_UTILS_LOG(BORDER " MGF Key Requests: %16llu " BORDER "\n" BORDER " MGF Key Request Errors: %16llu " BORDER "\n" BORDER " MGF Key Completed %16llu " BORDER "\n" BORDER " MGF Key Complete Errors: %16llu " BORDER "\n" SEPARATOR, (unsigned long long)keyStats.numMgfKeyGenRequests, (unsigned long long)keyStats.numMgfKeyGenRequestErrors, (unsigned long long)keyStats.numMgfKeyGenCompleted, (unsigned long long)keyStats.numMgfKeyGenCompletedErrors); } /** @ingroup LacSymKey */ CpaStatus cpaCyKeyGenQueryStats(CpaInstanceHandle instanceHandle_in, struct _CpaCyKeyGenStats *pSymKeyStats) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_INSTANCE_HANDLE(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); LAC_CHECK_NULL_PARAM(pSymKeyStats); SAL_RUNNING_CHECK(instanceHandle); LAC_KEY_STATS32_GET(*pSymKeyStats, instanceHandle); return CPA_STATUS_SUCCESS; } /** @ingroup LacSymKey */ CpaStatus cpaCyKeyGenQueryStats64(CpaInstanceHandle instanceHandle_in, CpaCyKeyGenStats64 *pSymKeyStats) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_INSTANCE_HANDLE(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); LAC_CHECK_NULL_PARAM(pSymKeyStats); SAL_RUNNING_CHECK(instanceHandle); LAC_KEY_STATS64_GET(*pSymKeyStats, instanceHandle); return CPA_STATUS_SUCCESS; } /** ****************************************************************************** * @ingroup LacSymKey * Return the size of the digest for a specific hash algorithm. * @description * Return the expected digest size based on the sha algorithm submitted. * The only supported value are sha256, sha384 and sha512. * * @param[in] hashAlgorithm either sha256, sha384 or sha512. * @return the expected size or 0 for an invalid hash. * *****************************************************************************/ static Cpa32U getDigestSizeFromHashAlgo(CpaCySymHashAlgorithm hashAlgorithm) { switch (hashAlgorithm) { case CPA_CY_SYM_HASH_SHA256: return LAC_HASH_SHA256_DIGEST_SIZE; case CPA_CY_SYM_HASH_SHA384: return LAC_HASH_SHA384_DIGEST_SIZE; case CPA_CY_SYM_HASH_SHA512: return LAC_HASH_SHA512_DIGEST_SIZE; case CPA_CY_SYM_HASH_SM3: return LAC_HASH_SM3_DIGEST_SIZE; default: return 0; } } /** ****************************************************************************** * @ingroup LacSymKey * Return the hash algorithm for a specific cipher. * @description * Return the hash algorithm related to the cipher suite. * Supported hash's are SHA256, and SHA384. * * @param[in] cipherSuite AES_128_GCM, AES_256_GCM, AES_128_CCM, * and CHACHA20_POLY1305. * @return the expected hash algorithm or 0 for an invalid cipher. * *****************************************************************************/ static CpaCySymHashAlgorithm getHashAlgorithmFromCipherSuiteHKDF(CpaCyKeyHKDFCipherSuite cipherSuite) { switch (cipherSuite) { case CPA_CY_HKDF_TLS_AES_128_GCM_SHA256: /* Fall through */ case CPA_CY_HKDF_TLS_CHACHA20_POLY1305_SHA256: case CPA_CY_HKDF_TLS_AES_128_CCM_SHA256: case CPA_CY_HKDF_TLS_AES_128_CCM_8_SHA256: return CPA_CY_SYM_HASH_SHA256; case CPA_CY_HKDF_TLS_AES_256_GCM_SHA384: return CPA_CY_SYM_HASH_SHA384; default: return 0; } } /** ****************************************************************************** * @ingroup LacSymKey * Return the digest size of cipher. * @description * Return the output key size of specific cipher, for specified sub label * * @param[in] cipherSuite = AES_128_GCM, AES_256_GCM, AES_128_CCM, * and CHACHA20_POLY1305. * subLabels = KEY, IV, RESUMPTION, and FINISHED. * @return the expected digest size of the cipher. * *****************************************************************************/ static const Cpa32U cipherSuiteHKDFHashSizes [LAC_KEY_HKDF_CIPHERS_MAX][LAC_KEY_HKDF_SUBLABELS_MAX] = { {}, /* Not used */ { 32, 16, 12, 32, 32 }, /* AES_128_GCM_SHA256 */ { 48, 32, 12, 48, 48 }, /* AES_256_GCM_SHA384 */ { 32, 32, 12, 32, 32 }, /* CHACHA20_POLY1305_SHA256 */ { 32, 16, 12, 32, 32 }, /* AES_128_CCM_SHA256 */ { 32, 16, 12, 32, 32 } /* AES_128_CCM_8_SHA256 */ }; /** ****************************************************************************** * @ingroup LacSymKey * Key Generation MGF response handler * * @description * Handles Key Generation MGF response messages from the QAT. * * @param[in] lacCmdId Command id of the original request * @param[in] pOpaqueData Pointer to opaque data that was in request * @param[in] cmnRespFlags Indicates whether request succeeded * * @return void * *****************************************************************************/ static void LacSymKey_MgfHandleResponse(icp_qat_fw_la_cmd_id_t lacCmdId, void *pOpaqueData, icp_qat_fw_comn_flags cmnRespFlags) { CpaCyKeyGenMgfOpData *pMgfOpData = NULL; lac_sym_key_cookie_t *pCookie = NULL; CpaCyGenFlatBufCbFunc pKeyGenMgfCb = NULL; void *pCallbackTag = NULL; CpaFlatBuffer *pGeneratedKeyBuffer = NULL; CpaStatus status = CPA_STATUS_SUCCESS; CpaBoolean respStatusOk = (ICP_QAT_FW_COMN_STATUS_FLAG_OK == ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(cmnRespFlags)) ? CPA_TRUE : CPA_FALSE; pCookie = (lac_sym_key_cookie_t *)pOpaqueData; if (CPA_TRUE == respStatusOk) { status = CPA_STATUS_SUCCESS; LAC_KEY_STAT_INC(numMgfKeyGenCompleted, pCookie->instanceHandle); } else { status = CPA_STATUS_FAIL; LAC_KEY_STAT_INC(numMgfKeyGenCompletedErrors, pCookie->instanceHandle); } pKeyGenMgfCb = (CpaCyGenFlatBufCbFunc)(pCookie->pKeyGenCb); pMgfOpData = pCookie->pKeyGenOpData; pCallbackTag = pCookie->pCallbackTag; pGeneratedKeyBuffer = pCookie->pKeyGenOutputData; Lac_MemPoolEntryFree(pCookie); (*pKeyGenMgfCb)(pCallbackTag, status, pMgfOpData, pGeneratedKeyBuffer); } /** ****************************************************************************** * @ingroup LacSymKey * Synchronous mode of operation wrapper function * * @description * Wrapper function to implement synchronous mode of operation for * cpaCyKeyGenMgf and cpaCyKeyGenMgfExt function. * * @param[in] instanceHandle Instance handle * @param[in] pKeyGenCb Internal callback function pointer * @param[in] pCallbackTag Callback tag * @param[in] pKeyGenMgfOpData Pointer to user provided Op Data structure * @param[in] pGeneratedMaskBuffer Pointer to a buffer where generated mask * will be stored * @param[in] bIsExtRequest Indicates origin of function call; * if CPA_TRUE then the call comes from * cpaCyKeyGenMgfExt function, otherwise * from cpaCyKeyGenMgf * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Function should be retried. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. * *****************************************************************************/ static CpaStatus LacSymKey_MgfSync(const CpaInstanceHandle instanceHandle, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, const void *pKeyGenMgfOpData, CpaFlatBuffer *pGeneratedMaskBuffer, CpaBoolean bIsExtRequest) { CpaStatus status = CPA_STATUS_SUCCESS; lac_sync_op_data_t *pSyncCallbackData = NULL; status = LacSync_CreateSyncCookie(&pSyncCallbackData); if (CPA_STATUS_SUCCESS == status) { if (CPA_TRUE == bIsExtRequest) { status = cpaCyKeyGenMgfExt( instanceHandle, LacSync_GenFlatBufCb, pSyncCallbackData, (const CpaCyKeyGenMgfOpDataExt *)pKeyGenMgfOpData, pGeneratedMaskBuffer); } else { status = cpaCyKeyGenMgf(instanceHandle, LacSync_GenFlatBufCb, pSyncCallbackData, (const CpaCyKeyGenMgfOpData *) pKeyGenMgfOpData, pGeneratedMaskBuffer); } } else { /* Failure allocating sync cookie */ LAC_KEY_STAT_INC(numMgfKeyGenRequestErrors, instanceHandle); return status; } if (CPA_STATUS_SUCCESS == status) { CpaStatus syncStatus = CPA_STATUS_SUCCESS; syncStatus = LacSync_WaitForCallback(pSyncCallbackData, LAC_SYM_SYNC_CALLBACK_TIMEOUT, &status, NULL); /* If callback doesn't come back */ if (CPA_STATUS_SUCCESS != syncStatus) { LAC_KEY_STAT_INC(numMgfKeyGenCompletedErrors, instanceHandle); LAC_LOG_ERROR("Callback timed out"); status = syncStatus; } } else { /* As the Request was not sent the Callback will never * be called, so need to indicate that we're finished * with cookie so it can be destroyed. */ LacSync_SetSyncCookieComplete(pSyncCallbackData); } LacSync_DestroySyncCookie(&pSyncCallbackData); return status; } /** ****************************************************************************** * @ingroup LacSymKey * Perform MGF key gen operation * * @description * This function performs MGF key gen operation. It is common for requests * coming from both cpaCyKeyGenMgf and cpaCyKeyGenMgfExt QAT API * functions. * * @param[in] instanceHandle Instance handle * @param[in] pKeyGenCb Pointer to callback function to be invoked * when the operation is complete. * @param[in] pCallbackTag Opaque User Data for this specific call. * @param[in] pOpData Pointer to the Op Data structure provided by * the user in API function call. For calls * originating from cpaCyKeyGenMgfExt it will * point to CpaCyKeyGenMgfOpDataExt type of * structure while for calls originating from * cpaCyKeyGenMgf it will point to * CpaCyKeyGenMgfOpData type of structure. * @param[in] pKeyGenMgfOpData Pointer to the user provided * CpaCyKeyGenMgfOpData structure. For calls * originating from cpaCyKeyGenMgf it will * point to the same structure as pOpData * parameter; for calls originating from * cpaCyKeyGenMgfExt it will point to the * baseOpData member of the * CpaCyKeyGenMgfOpDataExt structure passed in * as a parameter to the API function call. * @param[in] pGeneratedMaskBuffer Pointer to a buffer where generated mask * will be stored * @param[in] hashAlgorithm Indicates which hash algorithm is to be used * to perform MGF key gen operation. For calls * originating from cpaCyKeyGenMgf it will * always be CPA_CY_SYM_HASH_SHA1. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Function should be retried. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. * *****************************************************************************/ static CpaStatus LacSymKey_MgfCommon(const CpaInstanceHandle instanceHandle, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, const void *pOpData, const CpaCyKeyGenMgfOpData *pKeyGenMgfOpData, CpaFlatBuffer *pGeneratedMaskBuffer, CpaCySymHashAlgorithm hashAlgorithm) { CpaStatus status = CPA_STATUS_SUCCESS; icp_qat_fw_la_bulk_req_t keyGenReq = { { 0 } }; icp_qat_la_bulk_req_hdr_t keyGenReqHdr = { { 0 } }; icp_qat_fw_la_key_gen_common_t keyGenReqMid = { { 0 } }; icp_qat_la_bulk_req_ftr_t keyGenReqFtr = { { { 0 } } }; Cpa8U *pMsgDummy = NULL; Cpa8U *pCacheDummyHdr = NULL; Cpa8U *pCacheDummyMid = NULL; Cpa8U *pCacheDummyFtr = NULL; sal_qat_content_desc_info_t contentDescInfo = { 0 }; lac_sym_key_cookie_t *pCookie = NULL; lac_sym_cookie_t *pSymCookie = NULL; sal_crypto_service_t *pService = NULL; Cpa64U inputPhysAddr = 0; Cpa64U outputPhysAddr = 0; /* Structure initializer is supported by C99, but it is * not supported by some former Intel compiler. */ CpaCySymHashSetupData hashSetupData = { 0 }; Cpa32U hashBlkSizeInBytes = 0; lac_sym_qat_hash_alg_info_t *pHashAlgInfo = NULL; icp_qat_fw_serv_specif_flags laCmdFlags = 0; icp_qat_fw_comn_flags cmnRequestFlags = ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, QAT_COMN_CD_FLD_TYPE_64BIT_ADR); pService = (sal_crypto_service_t *)instanceHandle; LAC_CHECK_INSTANCE_HANDLE(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); SAL_RUNNING_CHECK(instanceHandle); LAC_CHECK_NULL_PARAM(pOpData); LAC_CHECK_NULL_PARAM(pKeyGenMgfOpData); LAC_CHECK_NULL_PARAM(pGeneratedMaskBuffer); LAC_CHECK_NULL_PARAM(pGeneratedMaskBuffer->pData); LAC_CHECK_NULL_PARAM(pKeyGenMgfOpData->seedBuffer.pData); /* Maximum seed length for MGF1 request */ if (pKeyGenMgfOpData->seedBuffer.dataLenInBytes > ICP_QAT_FW_LA_MGF_SEED_LEN_MAX) { LAC_INVALID_PARAM_LOG("seedBuffer.dataLenInBytes"); return CPA_STATUS_INVALID_PARAM; } /* Maximum mask length for MGF1 request */ if (pKeyGenMgfOpData->maskLenInBytes > ICP_QAT_FW_LA_MGF_MASK_LEN_MAX) { LAC_INVALID_PARAM_LOG("maskLenInBytes"); return CPA_STATUS_INVALID_PARAM; } /* check for enough space in the flat buffer */ if (pKeyGenMgfOpData->maskLenInBytes > pGeneratedMaskBuffer->dataLenInBytes) { LAC_INVALID_PARAM_LOG("pGeneratedMaskBuffer.dataLenInBytes"); return CPA_STATUS_INVALID_PARAM; } /* Get hash alg info */ LacSymQat_HashAlgLookupGet(instanceHandle, hashAlgorithm, &pHashAlgInfo); /* Allocate the cookie */ pCookie = (lac_sym_key_cookie_t *)Lac_MemPoolEntryAlloc( pService->lac_sym_cookie_pool); if (NULL == pCookie) { LAC_LOG_ERROR("Cannot get mem pool entry"); status = CPA_STATUS_RESOURCE; } else if ((void *)CPA_STATUS_RETRY == pCookie) { pCookie = NULL; status = CPA_STATUS_RETRY; } else { pSymCookie = (lac_sym_cookie_t *)pCookie; } if (CPA_STATUS_SUCCESS == status) { /* populate the cookie */ pCookie->instanceHandle = instanceHandle; pCookie->pCallbackTag = pCallbackTag; pCookie->pKeyGenOpData = (void *)LAC_CONST_PTR_CAST(pOpData); pCookie->pKeyGenCb = pKeyGenCb; pCookie->pKeyGenOutputData = pGeneratedMaskBuffer; hashSetupData.hashAlgorithm = hashAlgorithm; hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN; hashSetupData.digestResultLenInBytes = pHashAlgInfo->digestLength; /* Populate the CD ctrl Block (LW 27 - LW 31) * and the CD Hash HW setup block */ LacSymQat_HashContentDescInit( &(keyGenReqFtr), instanceHandle, &hashSetupData, /* point to base of hw setup block */ (Cpa8U *)pCookie->contentDesc, LAC_SYM_KEY_NO_HASH_BLK_OFFSET_QW, ICP_QAT_FW_SLICE_DRAM_WR, ICP_QAT_HW_AUTH_MODE0, /* just a plain hash */ CPA_FALSE, /* Not using sym Constants Table in Shared SRAM - */ + */ CPA_FALSE, /* not using the optimised Content Desc */ + CPA_FALSE, /* Not using the stateful SHA3 Content Desc */ NULL, &hashBlkSizeInBytes); /* Populate the Req param LW 14-26 */ LacSymQat_KeyMgfRequestPopulate( &keyGenReqHdr, &keyGenReqMid, pKeyGenMgfOpData->seedBuffer.dataLenInBytes, pKeyGenMgfOpData->maskLenInBytes, (Cpa8U)pHashAlgInfo->digestLength); contentDescInfo.pData = pCookie->contentDesc; contentDescInfo.hardwareSetupBlockPhys = LAC_MEM_CAST_PTR_TO_UINT64( pSymCookie->keyContentDescPhyAddr); contentDescInfo.hwBlkSzQuadWords = LAC_BYTES_TO_QUADWORDS(hashBlkSizeInBytes); /* Populate common request fields */ inputPhysAddr = LAC_MEM_CAST_PTR_TO_UINT64(LAC_OS_VIRT_TO_PHYS_EXTERNAL( pService->generic_service_info, pKeyGenMgfOpData->seedBuffer.pData)); if (inputPhysAddr == 0) { LAC_LOG_ERROR( "Unable to get the seed buffer physical address"); status = CPA_STATUS_FAIL; } outputPhysAddr = LAC_MEM_CAST_PTR_TO_UINT64( LAC_OS_VIRT_TO_PHYS_EXTERNAL(pService->generic_service_info, pGeneratedMaskBuffer->pData)); if (outputPhysAddr == 0) { LAC_LOG_ERROR( "Unable to get the physical address of the mask"); status = CPA_STATUS_FAIL; } } if (CPA_STATUS_SUCCESS == status) { /* Make up the full keyGenReq struct from its constituents */ pMsgDummy = (Cpa8U *)&(keyGenReq); pCacheDummyHdr = (Cpa8U *)&(keyGenReqHdr); pCacheDummyMid = (Cpa8U *)&(keyGenReqMid); pCacheDummyFtr = (Cpa8U *)&(keyGenReqFtr); memcpy(pMsgDummy, pCacheDummyHdr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_HDR_IN_LW)); memset((pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_HDR_IN_LW)), 0, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_TO_CLEAR_IN_LW)); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_MID_IN_LW), pCacheDummyMid, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_MID_IN_LW)); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_FTR_IN_LW), pCacheDummyFtr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_FTR_IN_LW)); SalQatMsg_ContentDescHdrWrite((icp_qat_fw_comn_req_t *)&( keyGenReq), &(contentDescInfo)); SalQatMsg_CmnHdrWrite((icp_qat_fw_comn_req_t *)&keyGenReq, ICP_QAT_FW_COMN_REQ_CPM_FW_LA, ICP_QAT_FW_LA_CMD_MGF1, cmnRequestFlags, laCmdFlags); /* * MGF uses a flat buffer but we can use zero for source and * dest length because the firmware will use the seed length, * hash length and mask length to find source length. */ SalQatMsg_CmnMidWrite((icp_qat_fw_la_bulk_req_t *)&(keyGenReq), pCookie, LAC_SYM_KEY_QAT_PTR_TYPE, inputPhysAddr, outputPhysAddr, 0, 0); /* Send to QAT */ status = icp_adf_transPutMsg(pService->trans_handle_sym_tx, (void *)&(keyGenReq), LAC_QAT_SYM_REQ_SZ_LW); } if (CPA_STATUS_SUCCESS == status) { /* Update stats */ LAC_KEY_STAT_INC(numMgfKeyGenRequests, instanceHandle); } else { LAC_KEY_STAT_INC(numMgfKeyGenRequestErrors, instanceHandle); /* clean up memory */ if (NULL != pCookie) { Lac_MemPoolEntryFree(pCookie); } } return status; } /** * cpaCyKeyGenMgf */ CpaStatus cpaCyKeyGenMgf(const CpaInstanceHandle instanceHandle_in, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, const CpaCyKeyGenMgfOpData *pKeyGenMgfOpData, CpaFlatBuffer *pGeneratedMaskBuffer) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } /* If synchronous Operation */ if (NULL == pKeyGenCb) { return LacSymKey_MgfSync(instanceHandle, pKeyGenCb, pCallbackTag, (const void *)pKeyGenMgfOpData, pGeneratedMaskBuffer, CPA_FALSE); } /* Asynchronous Operation */ return LacSymKey_MgfCommon(instanceHandle, pKeyGenCb, pCallbackTag, (const void *)pKeyGenMgfOpData, pKeyGenMgfOpData, pGeneratedMaskBuffer, CPA_CY_SYM_HASH_SHA1); } /** * cpaCyKeyGenMgfExt */ CpaStatus cpaCyKeyGenMgfExt(const CpaInstanceHandle instanceHandle_in, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, const CpaCyKeyGenMgfOpDataExt *pKeyGenMgfOpDataExt, CpaFlatBuffer *pGeneratedMaskBuffer) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } /* If synchronous Operation */ if (NULL == pKeyGenCb) { return LacSymKey_MgfSync(instanceHandle, pKeyGenCb, pCallbackTag, (const void *)pKeyGenMgfOpDataExt, pGeneratedMaskBuffer, CPA_TRUE); } /* Param check specific for Ext function, rest of parameters validated * in LacSymKey_MgfCommon */ LAC_CHECK_NULL_PARAM(pKeyGenMgfOpDataExt); if (CPA_CY_SYM_HASH_MD5 > pKeyGenMgfOpDataExt->hashAlgorithm || CPA_CY_SYM_HASH_SHA512 < pKeyGenMgfOpDataExt->hashAlgorithm) { LAC_INVALID_PARAM_LOG("hashAlgorithm"); return CPA_STATUS_INVALID_PARAM; } /* Asynchronous Operation */ return LacSymKey_MgfCommon(instanceHandle, pKeyGenCb, pCallbackTag, (const void *)pKeyGenMgfOpDataExt, &pKeyGenMgfOpDataExt->baseOpData, pGeneratedMaskBuffer, pKeyGenMgfOpDataExt->hashAlgorithm); } /** ****************************************************************************** * @ingroup LacSymKey * Key Generation SSL & TLS response handler * * @description * Handles Key Generation SSL & TLS response messages from the QAT. * * @param[in] lacCmdId Command id of the original request * @param[in] pOpaqueData Pointer to opaque data that was in request * @param[in] cmnRespFlags LA response flags * * @return void * *****************************************************************************/ static void LacSymKey_SslTlsHandleResponse(icp_qat_fw_la_cmd_id_t lacCmdId, void *pOpaqueData, icp_qat_fw_comn_flags cmnRespFlags) { void *pSslTlsOpData = NULL; CpaCyGenFlatBufCbFunc pKeyGenSslTlsCb = NULL; lac_sym_key_cookie_t *pCookie = NULL; void *pCallbackTag = NULL; CpaFlatBuffer *pGeneratedKeyBuffer = NULL; CpaStatus status = CPA_STATUS_SUCCESS; CpaBoolean respStatusOk = (ICP_QAT_FW_COMN_STATUS_FLAG_OK == ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(cmnRespFlags)) ? CPA_TRUE : CPA_FALSE; pCookie = (lac_sym_key_cookie_t *)pOpaqueData; pSslTlsOpData = pCookie->pKeyGenOpData; if (CPA_TRUE == respStatusOk) { LacKey_StatsInc(lacCmdId, LAC_KEY_COMPLETED, pCookie->instanceHandle); } else { status = CPA_STATUS_FAIL; LacKey_StatsInc(lacCmdId, LAC_KEY_COMPLETED_ERRORS, pCookie->instanceHandle); } pKeyGenSslTlsCb = (CpaCyGenFlatBufCbFunc)(pCookie->pKeyGenCb); pCallbackTag = pCookie->pCallbackTag; pGeneratedKeyBuffer = pCookie->pKeyGenOutputData; Lac_MemPoolEntryFree(pCookie); (*pKeyGenSslTlsCb)(pCallbackTag, status, pSslTlsOpData, pGeneratedKeyBuffer); } /** ******************************************************************************* * @ingroup LacSymKey * Synchronous mode of operation function wrapper for performing SSL/TLS * key gen operation * * @description * Synchronous mode of operation function wrapper for performing SSL/TLS * key gen operation * * @param[in] instanceHandle QAT device handle. * @param[in] pKeyGenCb Pointer to callback function to be invoked * when the operation is complete. * @param[in] pCallbackTag Opaque User Data for this specific call. * @param[in] lacCmdId Lac command ID (identify SSL & TLS ops) * @param[in] pKeyGenSslTlsOpData Structure containing all the data needed to * perform the SSL/TLS key generation * operation. * @param[in] hashAlgorithm Specifies the hash algorithm to use. * According to RFC5246, this should be * "SHA-256 or a stronger standard hash * function." * @param[out] pKeyGenOutputData pointer to where output result should be * written * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Function should be retried. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. * *****************************************************************************/ static CpaStatus LacSymKey_SslTlsSync(CpaInstanceHandle instanceHandle, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, icp_qat_fw_la_cmd_id_t lacCmdId, void *pKeyGenSslTlsOpData, Cpa8U hashAlgorithm, CpaFlatBuffer *pKeyGenOutpuData) { lac_sync_op_data_t *pSyncCallbackData = NULL; CpaStatus status = CPA_STATUS_SUCCESS; status = LacSync_CreateSyncCookie(&pSyncCallbackData); if (CPA_STATUS_SUCCESS == status) { status = LacSymKey_KeyGenSslTls_GenCommon(instanceHandle, pKeyGenCb, pSyncCallbackData, lacCmdId, pKeyGenSslTlsOpData, hashAlgorithm, pKeyGenOutpuData); } else { /* Failure allocating sync cookie */ LacKey_StatsInc(lacCmdId, LAC_KEY_REQUEST_ERRORS, instanceHandle); return status; } if (CPA_STATUS_SUCCESS == status) { CpaStatus syncStatus = CPA_STATUS_SUCCESS; syncStatus = LacSync_WaitForCallback(pSyncCallbackData, LAC_SYM_SYNC_CALLBACK_TIMEOUT, &status, NULL); /* If callback doesn't come back */ if (CPA_STATUS_SUCCESS != syncStatus) { LacKey_StatsInc(lacCmdId, LAC_KEY_COMPLETED_ERRORS, instanceHandle); LAC_LOG_ERROR("Callback timed out"); status = syncStatus; } } else { /* As the Request was not sent the Callback will never * be called, so need to indicate that we're finished * with cookie so it can be destroyed. */ LacSync_SetSyncCookieComplete(pSyncCallbackData); } LacSync_DestroySyncCookie(&pSyncCallbackData); return status; } static CpaStatus computeHashKey(CpaFlatBuffer *secret, CpaFlatBuffer *hash, CpaCySymHashAlgorithm *hashAlgorithm) { CpaStatus status = CPA_STATUS_SUCCESS; switch (*hashAlgorithm) { case CPA_CY_SYM_HASH_MD5: status = qatUtilsHashMD5Full(secret->pData, hash->pData, secret->dataLenInBytes); break; case CPA_CY_SYM_HASH_SHA1: status = qatUtilsHashSHA1Full(secret->pData, hash->pData, secret->dataLenInBytes); break; case CPA_CY_SYM_HASH_SHA256: status = qatUtilsHashSHA256Full(secret->pData, hash->pData, secret->dataLenInBytes); break; case CPA_CY_SYM_HASH_SHA384: status = qatUtilsHashSHA384Full(secret->pData, hash->pData, secret->dataLenInBytes); break; case CPA_CY_SYM_HASH_SHA512: status = qatUtilsHashSHA512Full(secret->pData, hash->pData, secret->dataLenInBytes); break; default: status = CPA_STATUS_FAIL; } return status; } static CpaStatus LacSymKey_KeyGenSslTls_GenCommon(CpaInstanceHandle instanceHandle, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, icp_qat_fw_la_cmd_id_t lacCmdId, void *pKeyGenSslTlsOpData, Cpa8U hashAlgCipher, CpaFlatBuffer *pKeyGenOutputData) { CpaStatus status = CPA_STATUS_SUCCESS; CpaBoolean precompute = CPA_FALSE; icp_qat_fw_la_bulk_req_t keyGenReq = { { 0 } }; icp_qat_la_bulk_req_hdr_t keyGenReqHdr = { { 0 } }; icp_qat_fw_la_key_gen_common_t keyGenReqMid = { { 0 } }; icp_qat_la_bulk_req_ftr_t keyGenReqFtr = { { { 0 } } }; Cpa8U *pMsgDummy = NULL; Cpa8U *pCacheDummyHdr = NULL; Cpa8U *pCacheDummyMid = NULL; Cpa8U *pCacheDummyFtr = NULL; lac_sym_key_cookie_t *pCookie = NULL; lac_sym_cookie_t *pSymCookie = NULL; Cpa64U inputPhysAddr = 0; Cpa64U outputPhysAddr = 0; /* Structure initializer is supported by C99, but it is * not supported by some former Intel compiler. */ CpaCySymHashSetupData hashSetupData = { 0 }; sal_qat_content_desc_info_t contentDescInfo = { 0 }; Cpa32U hashBlkSizeInBytes = 0; Cpa32U tlsPrefixLen = 0; CpaFlatBuffer inputSecret = { 0 }; CpaFlatBuffer hashKeyOutput = { 0 }; Cpa32U uSecretLen = 0; CpaCySymHashNestedModeSetupData *pNestedModeSetupData = &(hashSetupData.nestedModeSetupData); icp_qat_fw_serv_specif_flags laCmdFlags = 0; icp_qat_fw_comn_flags cmnRequestFlags = ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, QAT_COMN_CD_FLD_TYPE_64BIT_ADR); sal_crypto_service_t *pService = (sal_crypto_service_t *)instanceHandle; /* If synchronous Operation */ if (NULL == pKeyGenCb) { return LacSymKey_SslTlsSync(instanceHandle, LacSync_GenFlatBufCb, pCallbackTag, lacCmdId, pKeyGenSslTlsOpData, hashAlgCipher, pKeyGenOutputData); } /* Allocate the cookie */ pCookie = (lac_sym_key_cookie_t *)Lac_MemPoolEntryAlloc( pService->lac_sym_cookie_pool); if (NULL == pCookie) { LAC_LOG_ERROR("Cannot get mem pool entry"); status = CPA_STATUS_RESOURCE; } else if ((void *)CPA_STATUS_RETRY == pCookie) { pCookie = NULL; status = CPA_STATUS_RETRY; } else { pSymCookie = (lac_sym_cookie_t *)pCookie; } if (CPA_STATUS_SUCCESS == status) { icp_qat_hw_auth_mode_t qatHashMode = 0; if (ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE == lacCmdId) { qatHashMode = ICP_QAT_HW_AUTH_MODE0; } else /* TLS v1.1, v1.2, v1.3 */ { qatHashMode = ICP_QAT_HW_AUTH_MODE2; } pCookie->instanceHandle = pService; pCookie->pCallbackTag = pCallbackTag; pCookie->pKeyGenCb = pKeyGenCb; pCookie->pKeyGenOpData = pKeyGenSslTlsOpData; pCookie->pKeyGenOutputData = pKeyGenOutputData; hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_NESTED; /* SSL3 */ if (ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE == lacCmdId) { hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1; hashSetupData.digestResultLenInBytes = LAC_HASH_MD5_DIGEST_SIZE; pNestedModeSetupData->outerHashAlgorithm = CPA_CY_SYM_HASH_MD5; pNestedModeSetupData->pInnerPrefixData = NULL; pNestedModeSetupData->innerPrefixLenInBytes = 0; pNestedModeSetupData->pOuterPrefixData = NULL; pNestedModeSetupData->outerPrefixLenInBytes = 0; } /* TLS v1.1 */ else if (ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE == lacCmdId) { CpaCyKeyGenTlsOpData *pKeyGenTlsOpData = (CpaCyKeyGenTlsOpData *)pKeyGenSslTlsOpData; hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1; hashSetupData.digestResultLenInBytes = LAC_HASH_MD5_DIGEST_SIZE; pNestedModeSetupData->outerHashAlgorithm = CPA_CY_SYM_HASH_MD5; uSecretLen = pKeyGenTlsOpData->secret.dataLenInBytes; /* We want to handle pre_master_secret > 128 bytes * therefore we * only verify if the current operation is Master Secret * Derive. * The other operations remain unchanged. */ if ((uSecretLen > ICP_QAT_FW_LA_TLS_V1_1_SECRET_LEN_MAX) && (CPA_CY_KEY_TLS_OP_MASTER_SECRET_DERIVE == pKeyGenTlsOpData->tlsOp || CPA_CY_KEY_TLS_OP_USER_DEFINED == pKeyGenTlsOpData->tlsOp)) { CpaCySymHashAlgorithm hashAlgorithm = (CpaCySymHashAlgorithm)hashAlgCipher; /* secret = [s1 | s2 ] * s1 = outer prefix, s2 = inner prefix * length of s1 and s2 = ceil(secret_length / 2) * (secret length + 1)/2 will always give the * ceil as * division by 2 * (>>1) will give the smallest integral value * not less than * arg */ tlsPrefixLen = (pKeyGenTlsOpData->secret.dataLenInBytes + 1) >> 1; inputSecret.dataLenInBytes = tlsPrefixLen; inputSecret.pData = pKeyGenTlsOpData->secret.pData; /* Since the pre_master_secret is > 128, we * split the input * pre_master_secret in 2 halves and compute the * MD5 of the * first half and the SHA1 on the second half. */ hashAlgorithm = CPA_CY_SYM_HASH_MD5; /* Initialize pointer where MD5 key will go. */ hashKeyOutput.pData = &pCookie->hashKeyBuffer[0]; hashKeyOutput.dataLenInBytes = LAC_HASH_MD5_DIGEST_SIZE; computeHashKey(&inputSecret, &hashKeyOutput, &hashAlgorithm); pNestedModeSetupData->pOuterPrefixData = &pCookie->hashKeyBuffer[0]; pNestedModeSetupData->outerPrefixLenInBytes = LAC_HASH_MD5_DIGEST_SIZE; /* Point to the second half of the * pre_master_secret */ inputSecret.pData = pKeyGenTlsOpData->secret.pData + (pKeyGenTlsOpData->secret.dataLenInBytes - tlsPrefixLen); /* Compute SHA1 on the second half of the * pre_master_secret */ hashAlgorithm = CPA_CY_SYM_HASH_SHA1; /* Initialize pointer where SHA1 key will go. */ hashKeyOutput.pData = &pCookie->hashKeyBuffer [LAC_HASH_MD5_DIGEST_SIZE]; hashKeyOutput.dataLenInBytes = LAC_HASH_SHA1_DIGEST_SIZE; computeHashKey(&inputSecret, &hashKeyOutput, &hashAlgorithm); pNestedModeSetupData->pInnerPrefixData = &pCookie->hashKeyBuffer [LAC_HASH_MD5_DIGEST_SIZE]; pNestedModeSetupData->innerPrefixLenInBytes = LAC_HASH_SHA1_DIGEST_SIZE; } else { /* secret = [s1 | s2 ] * s1 = outer prefix, s2 = inner prefix * length of s1 and s2 = ceil(secret_length / 2) * (secret length + 1)/2 will always give the * ceil as * division by 2 * (>>1) will give the smallest integral value * not less than * arg */ tlsPrefixLen = (pKeyGenTlsOpData->secret.dataLenInBytes + 1) >> 1; /* last byte of s1 will be first byte of s2 if * Length is odd */ pNestedModeSetupData->pInnerPrefixData = pKeyGenTlsOpData->secret.pData + (pKeyGenTlsOpData->secret.dataLenInBytes - tlsPrefixLen); pNestedModeSetupData->pOuterPrefixData = pKeyGenTlsOpData->secret.pData; pNestedModeSetupData->innerPrefixLenInBytes = pNestedModeSetupData ->outerPrefixLenInBytes = tlsPrefixLen; } } /* TLS v1.2 */ else if (ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE == lacCmdId) { CpaCyKeyGenTlsOpData *pKeyGenTlsOpData = (CpaCyKeyGenTlsOpData *)pKeyGenSslTlsOpData; CpaCySymHashAlgorithm hashAlgorithm = (CpaCySymHashAlgorithm)hashAlgCipher; uSecretLen = pKeyGenTlsOpData->secret.dataLenInBytes; hashSetupData.hashAlgorithm = (CpaCySymHashAlgorithm)hashAlgorithm; hashSetupData.digestResultLenInBytes = (Cpa32U)getDigestSizeFromHashAlgo(hashAlgorithm); pNestedModeSetupData->outerHashAlgorithm = (CpaCySymHashAlgorithm)hashAlgorithm; if (CPA_CY_KEY_TLS_OP_MASTER_SECRET_DERIVE == pKeyGenTlsOpData->tlsOp || CPA_CY_KEY_TLS_OP_USER_DEFINED == pKeyGenTlsOpData->tlsOp) { switch (hashAlgorithm) { case CPA_CY_SYM_HASH_SM3: precompute = CPA_FALSE; break; case CPA_CY_SYM_HASH_SHA256: if (uSecretLen > ICP_QAT_FW_LA_TLS_V1_2_SECRET_LEN_MAX) { precompute = CPA_TRUE; } break; case CPA_CY_SYM_HASH_SHA384: case CPA_CY_SYM_HASH_SHA512: if (uSecretLen > ICP_QAT_FW_LA_TLS_SECRET_LEN_MAX) { precompute = CPA_TRUE; } break; default: break; } } if (CPA_TRUE == precompute) { /* Case when secret > algorithm block size * RFC 4868: For SHA-256 Block size is 512 bits, * for SHA-384 * and SHA-512 Block size is 1024 bits * Initialize pointer * where SHAxxx key will go. */ hashKeyOutput.pData = &pCookie->hashKeyBuffer[0]; hashKeyOutput.dataLenInBytes = hashSetupData.digestResultLenInBytes; computeHashKey(&pKeyGenTlsOpData->secret, &hashKeyOutput, &hashSetupData.hashAlgorithm); /* Outer prefix = secret , inner prefix = secret * secret < 64 bytes */ pNestedModeSetupData->pInnerPrefixData = hashKeyOutput.pData; pNestedModeSetupData->pOuterPrefixData = hashKeyOutput.pData; pNestedModeSetupData->innerPrefixLenInBytes = hashKeyOutput.dataLenInBytes; pNestedModeSetupData->outerPrefixLenInBytes = hashKeyOutput.dataLenInBytes; } else { /* Outer prefix = secret , inner prefix = secret * secret <= 64 bytes */ pNestedModeSetupData->pInnerPrefixData = pKeyGenTlsOpData->secret.pData; pNestedModeSetupData->pOuterPrefixData = pKeyGenTlsOpData->secret.pData; pNestedModeSetupData->innerPrefixLenInBytes = pKeyGenTlsOpData->secret.dataLenInBytes; pNestedModeSetupData->outerPrefixLenInBytes = pKeyGenTlsOpData->secret.dataLenInBytes; } } /* TLS v1.3 */ else if ((ICP_QAT_FW_LA_CMD_HKDF_EXTRACT <= lacCmdId) && (ICP_QAT_FW_LA_CMD_HKDF_EXTRACT_AND_EXPAND_LABEL >= lacCmdId)) { CpaCyKeyGenHKDFOpData *pKeyGenTlsOpData = (CpaCyKeyGenHKDFOpData *)pKeyGenSslTlsOpData; CpaCySymHashAlgorithm hashAlgorithm = getHashAlgorithmFromCipherSuiteHKDF(hashAlgCipher); /* Set HASH data */ hashSetupData.hashAlgorithm = hashAlgorithm; /* Calculate digest length from the HASH type */ hashSetupData.digestResultLenInBytes = cipherSuiteHKDFHashSizes[hashAlgCipher] [LAC_KEY_HKDF_DIGESTS]; /* Outer Hash type is the same as inner hash type */ pNestedModeSetupData->outerHashAlgorithm = hashAlgorithm; /* EXPAND (PRK): * Outer prefix = secret, inner prefix = secret * EXTRACT (SEED/SALT): * Outer prefix = seed, inner prefix = seed * Secret <= 64 Bytes * We do not pre compute as secret can't be larger than * 64 bytes */ if ((ICP_QAT_FW_LA_CMD_HKDF_EXPAND == lacCmdId) || (ICP_QAT_FW_LA_CMD_HKDF_EXPAND_LABEL == lacCmdId)) { pNestedModeSetupData->pInnerPrefixData = pKeyGenTlsOpData->secret; pNestedModeSetupData->pOuterPrefixData = pKeyGenTlsOpData->secret; pNestedModeSetupData->innerPrefixLenInBytes = pKeyGenTlsOpData->secretLen; pNestedModeSetupData->outerPrefixLenInBytes = pKeyGenTlsOpData->secretLen; } else { pNestedModeSetupData->pInnerPrefixData = pKeyGenTlsOpData->seed; pNestedModeSetupData->pOuterPrefixData = pKeyGenTlsOpData->seed; pNestedModeSetupData->innerPrefixLenInBytes = pKeyGenTlsOpData->seedLen; pNestedModeSetupData->outerPrefixLenInBytes = pKeyGenTlsOpData->seedLen; } } /* Set the footer Data. * Note that following function doesn't look at inner/outer * prefix pointers in nested digest ctx */ LacSymQat_HashContentDescInit( &keyGenReqFtr, instanceHandle, &hashSetupData, pCookie ->contentDesc, /* Pointer to base of hw setup block */ LAC_SYM_KEY_NO_HASH_BLK_OFFSET_QW, ICP_QAT_FW_SLICE_DRAM_WR, qatHashMode, - CPA_FALSE, /* Not using sym Constants Table in SRAM */ - CPA_FALSE, /* Not using the optimised content Desc */ - NULL, /* Precompute data */ + CPA_FALSE, /* Not using sym Constants Table in Shared SRAM + */ + CPA_FALSE, /* not using the optimised content Desc */ + CPA_FALSE, /* Not using the stateful SHA3 Content Desc */ + NULL, /* precompute data */ &hashBlkSizeInBytes); /* SSL3 */ if (ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE == lacCmdId) { CpaCyKeyGenSslOpData *pKeyGenSslOpData = (CpaCyKeyGenSslOpData *)pKeyGenSslTlsOpData; Cpa8U *pLabel = NULL; Cpa32U labelLen = 0; Cpa8U iterations = 0; Cpa64U labelPhysAddr = 0; /* Iterations = ceiling of output required / output per * iteration Ceiling of a / b = (a + (b-1)) / b */ iterations = (pKeyGenSslOpData->generatedKeyLenInBytes + (LAC_SYM_QAT_KEY_SSL_BYTES_PER_ITERATION - 1)) >> LAC_SYM_QAT_KEY_SSL_ITERATIONS_SHIFT; if (CPA_CY_KEY_SSL_OP_USER_DEFINED == pKeyGenSslOpData->sslOp) { pLabel = pKeyGenSslOpData->userLabel.pData; labelLen = pKeyGenSslOpData->userLabel.dataLenInBytes; labelPhysAddr = LAC_OS_VIRT_TO_PHYS_EXTERNAL( pService->generic_service_info, pLabel); if (labelPhysAddr == 0) { LAC_LOG_ERROR( "Unable to get the physical address of the" " label"); status = CPA_STATUS_FAIL; } } else { pLabel = pService->pSslLabel; /* Calculate label length. * eg. 3 iterations is ABBCCC so length is 6 */ labelLen = ((iterations * iterations) + iterations) >> 1; labelPhysAddr = LAC_OS_VIRT_TO_PHYS_INTERNAL(pLabel); } LacSymQat_KeySslRequestPopulate( &keyGenReqHdr, &keyGenReqMid, pKeyGenSslOpData->generatedKeyLenInBytes, labelLen, pKeyGenSslOpData->secret.dataLenInBytes, iterations); LacSymQat_KeySslKeyMaterialInputPopulate( &(pService->generic_service_info), &(pCookie->u.sslKeyInput), pKeyGenSslOpData->seed.pData, labelPhysAddr, pKeyGenSslOpData->secret.pData); inputPhysAddr = LAC_MEM_CAST_PTR_TO_UINT64( pSymCookie->keySslKeyInputPhyAddr); } /* TLS v1.1, v1.2 */ else if (ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE == lacCmdId || ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE == lacCmdId) { CpaCyKeyGenTlsOpData *pKeyGenTlsOpData = (CpaCyKeyGenTlsOpData *)pKeyGenSslTlsOpData; lac_sym_qat_hash_state_buffer_info_t hashStateBufferInfo = { 0 }; CpaBoolean hashStateBuffer = CPA_FALSE; icp_qat_fw_auth_cd_ctrl_hdr_t *pHashControlBlock = (icp_qat_fw_auth_cd_ctrl_hdr_t *)&( keyGenReqFtr.cd_ctrl); icp_qat_la_auth_req_params_t *pHashReqParams = NULL; Cpa8U *pLabel = NULL; Cpa32U labelLen = 0; Cpa64U labelPhysAddr = 0; hashStateBufferInfo.pData = pCookie->hashStateBuffer; hashStateBufferInfo.pDataPhys = LAC_MEM_CAST_PTR_TO_UINT64( pSymCookie->keyHashStateBufferPhyAddr); hashStateBufferInfo.stateStorageSzQuadWords = 0; LacSymQat_HashSetupReqParamsMetaData(&(keyGenReqFtr), instanceHandle, &(hashSetupData), hashStateBuffer, qatHashMode, CPA_FALSE); pHashReqParams = (icp_qat_la_auth_req_params_t *)&( keyGenReqFtr.serv_specif_rqpars); hashStateBufferInfo.prefixAadSzQuadWords = LAC_BYTES_TO_QUADWORDS( pHashReqParams->u2.inner_prefix_sz + pHashControlBlock->outer_prefix_sz); /* Copy prefix data into hash state buffer */ pMsgDummy = (Cpa8U *)&(keyGenReq); pCacheDummyHdr = (Cpa8U *)&(keyGenReqHdr); pCacheDummyMid = (Cpa8U *)&(keyGenReqMid); pCacheDummyFtr = (Cpa8U *)&(keyGenReqFtr); memcpy(pMsgDummy, pCacheDummyHdr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_HDR_IN_LW)); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_MID_IN_LW), pCacheDummyMid, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_MID_IN_LW)); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_FTR_IN_LW), pCacheDummyFtr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_FTR_IN_LW)); LacSymQat_HashStatePrefixAadBufferPopulate( &hashStateBufferInfo, &keyGenReqFtr, pNestedModeSetupData->pInnerPrefixData, pNestedModeSetupData->innerPrefixLenInBytes, pNestedModeSetupData->pOuterPrefixData, pNestedModeSetupData->outerPrefixLenInBytes); /* Firmware only looks at hash state buffer pointer and * the * hash state buffer size so all other fields are set to * 0 */ LacSymQat_HashRequestParamsPopulate( &(keyGenReq), 0, /* Auth offset */ 0, /* Auth length */ &(pService->generic_service_info), &hashStateBufferInfo, /* Hash state prefix buffer */ ICP_QAT_FW_LA_PARTIAL_NONE, 0, /* Hash result size */ CPA_FALSE, NULL, CPA_CY_SYM_HASH_NONE, /* Hash algorithm */ NULL); /* HKDF only */ /* Set up the labels and their length */ if (CPA_CY_KEY_TLS_OP_USER_DEFINED == pKeyGenTlsOpData->tlsOp) { pLabel = pKeyGenTlsOpData->userLabel.pData; labelLen = pKeyGenTlsOpData->userLabel.dataLenInBytes; labelPhysAddr = LAC_OS_VIRT_TO_PHYS_EXTERNAL( pService->generic_service_info, pLabel); if (labelPhysAddr == 0) { LAC_LOG_ERROR( "Unable to get the physical address of the" " label"); status = CPA_STATUS_FAIL; } } else if (CPA_CY_KEY_TLS_OP_MASTER_SECRET_DERIVE == pKeyGenTlsOpData->tlsOp) { pLabel = pService->pTlsLabel->masterSecret; labelLen = sizeof( LAC_SYM_KEY_TLS_MASTER_SECRET_LABEL) - 1; labelPhysAddr = LAC_OS_VIRT_TO_PHYS_INTERNAL(pLabel); } else if (CPA_CY_KEY_TLS_OP_KEY_MATERIAL_DERIVE == pKeyGenTlsOpData->tlsOp) { pLabel = pService->pTlsLabel->keyMaterial; labelLen = sizeof(LAC_SYM_KEY_TLS_KEY_MATERIAL_LABEL) - 1; labelPhysAddr = LAC_OS_VIRT_TO_PHYS_INTERNAL(pLabel); } else if (CPA_CY_KEY_TLS_OP_CLIENT_FINISHED_DERIVE == pKeyGenTlsOpData->tlsOp) { pLabel = pService->pTlsLabel->clientFinished; labelLen = sizeof(LAC_SYM_KEY_TLS_CLIENT_FIN_LABEL) - 1; labelPhysAddr = LAC_OS_VIRT_TO_PHYS_INTERNAL(pLabel); } else { pLabel = pService->pTlsLabel->serverFinished; labelLen = sizeof(LAC_SYM_KEY_TLS_SERVER_FIN_LABEL) - 1; labelPhysAddr = LAC_OS_VIRT_TO_PHYS_INTERNAL(pLabel); } LacSymQat_KeyTlsRequestPopulate( &keyGenReqMid, pKeyGenTlsOpData->generatedKeyLenInBytes, labelLen, pKeyGenTlsOpData->secret.dataLenInBytes, pKeyGenTlsOpData->seed.dataLenInBytes, lacCmdId); LacSymQat_KeyTlsKeyMaterialInputPopulate( &(pService->generic_service_info), &(pCookie->u.tlsKeyInput), pKeyGenTlsOpData->seed.pData, labelPhysAddr); inputPhysAddr = LAC_MEM_CAST_PTR_TO_UINT64( pSymCookie->keyTlsKeyInputPhyAddr); } /* TLS v1.3 */ else if (ICP_QAT_FW_LA_CMD_HKDF_EXTRACT <= lacCmdId && ICP_QAT_FW_LA_CMD_HKDF_EXTRACT_AND_EXPAND >= lacCmdId) { CpaCyKeyGenHKDFOpData *pKeyGenTlsOpData = (CpaCyKeyGenHKDFOpData *)pKeyGenSslTlsOpData; lac_sym_qat_hash_state_buffer_info_t hashStateBufferInfo = { 0 }; CpaBoolean hashStateBuffer = CPA_FALSE; icp_qat_fw_auth_cd_ctrl_hdr_t *pHashControlBlock = (icp_qat_fw_auth_cd_ctrl_hdr_t *)&( keyGenReqFtr.cd_ctrl); icp_qat_la_auth_req_params_t *pHashReqParams = NULL; hashStateBufferInfo.pData = pCookie->hashStateBuffer; hashStateBufferInfo.pDataPhys = LAC_MEM_CAST_PTR_TO_UINT64( pSymCookie->keyHashStateBufferPhyAddr); hashStateBufferInfo.stateStorageSzQuadWords = 0; LacSymQat_HashSetupReqParamsMetaData(&(keyGenReqFtr), instanceHandle, &(hashSetupData), hashStateBuffer, qatHashMode, CPA_FALSE); pHashReqParams = (icp_qat_la_auth_req_params_t *)&( keyGenReqFtr.serv_specif_rqpars); hashStateBufferInfo.prefixAadSzQuadWords = LAC_BYTES_TO_QUADWORDS( pHashReqParams->u2.inner_prefix_sz + pHashControlBlock->outer_prefix_sz); /* Copy prefix data into hash state buffer */ pMsgDummy = (Cpa8U *)&(keyGenReq); pCacheDummyHdr = (Cpa8U *)&(keyGenReqHdr); pCacheDummyMid = (Cpa8U *)&(keyGenReqMid); pCacheDummyFtr = (Cpa8U *)&(keyGenReqFtr); memcpy(pMsgDummy, pCacheDummyHdr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_HDR_IN_LW)); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_MID_IN_LW), pCacheDummyMid, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_MID_IN_LW)); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_FTR_IN_LW), pCacheDummyFtr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_FTR_IN_LW)); LacSymQat_HashStatePrefixAadBufferPopulate( &hashStateBufferInfo, &keyGenReqFtr, pNestedModeSetupData->pInnerPrefixData, pNestedModeSetupData->innerPrefixLenInBytes, pNestedModeSetupData->pOuterPrefixData, pNestedModeSetupData->outerPrefixLenInBytes); /* Firmware only looks at hash state buffer pointer and * the * hash state buffer size so all other fields are set to * 0 */ LacSymQat_HashRequestParamsPopulate( &(keyGenReq), 0, /* Auth offset */ 0, /* Auth length */ &(pService->generic_service_info), &hashStateBufferInfo, /* Hash state prefix buffer */ ICP_QAT_FW_LA_PARTIAL_NONE, 0, /* Hash result size */ CPA_FALSE, NULL, CPA_CY_SYM_HASH_NONE, /* Hash algorithm */ pKeyGenTlsOpData->secret); /* IKM or PRK */ LacSymQat_KeyTlsRequestPopulate( &keyGenReqMid, cipherSuiteHKDFHashSizes[hashAlgCipher] [LAC_KEY_HKDF_DIGESTS], /* For EXTRACT, EXPAND, FW expects info to be passed as label */ pKeyGenTlsOpData->infoLen, pKeyGenTlsOpData->secretLen, pKeyGenTlsOpData->seedLen, lacCmdId); LacSymQat_KeyTlsHKDFKeyMaterialInputPopulate( &(pService->generic_service_info), &(pCookie->u.tlsHKDFKeyInput), pKeyGenTlsOpData, 0, /* No subLabels used */ lacCmdId); /* Pass op being performed */ inputPhysAddr = LAC_MEM_CAST_PTR_TO_UINT64( pSymCookie->keyTlsKeyInputPhyAddr); } /* TLS v1.3 LABEL */ else if (ICP_QAT_FW_LA_CMD_HKDF_EXPAND_LABEL == lacCmdId || ICP_QAT_FW_LA_CMD_HKDF_EXTRACT_AND_EXPAND_LABEL == lacCmdId) { CpaCyKeyGenHKDFOpData *pKeyGenTlsOpData = (CpaCyKeyGenHKDFOpData *)pKeyGenSslTlsOpData; Cpa64U subLabelsPhysAddr = 0; lac_sym_qat_hash_state_buffer_info_t hashStateBufferInfo = { 0 }; CpaBoolean hashStateBuffer = CPA_FALSE; icp_qat_fw_auth_cd_ctrl_hdr_t *pHashControlBlock = (icp_qat_fw_auth_cd_ctrl_hdr_t *)&( keyGenReqFtr.cd_ctrl); icp_qat_la_auth_req_params_t *pHashReqParams = NULL; hashStateBufferInfo.pData = pCookie->hashStateBuffer; hashStateBufferInfo.pDataPhys = LAC_MEM_CAST_PTR_TO_UINT64( pSymCookie->keyHashStateBufferPhyAddr); hashStateBufferInfo.stateStorageSzQuadWords = 0; LacSymQat_HashSetupReqParamsMetaData(&(keyGenReqFtr), instanceHandle, &(hashSetupData), hashStateBuffer, qatHashMode, CPA_FALSE); pHashReqParams = (icp_qat_la_auth_req_params_t *)&( keyGenReqFtr.serv_specif_rqpars); hashStateBufferInfo.prefixAadSzQuadWords = LAC_BYTES_TO_QUADWORDS( pHashReqParams->u2.inner_prefix_sz + pHashControlBlock->outer_prefix_sz); /* Copy prefix data into hash state buffer */ pMsgDummy = (Cpa8U *)&(keyGenReq); pCacheDummyHdr = (Cpa8U *)&(keyGenReqHdr); pCacheDummyMid = (Cpa8U *)&(keyGenReqMid); pCacheDummyFtr = (Cpa8U *)&(keyGenReqFtr); memcpy(pMsgDummy, pCacheDummyHdr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_HDR_IN_LW)); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_MID_IN_LW), pCacheDummyMid, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_MID_IN_LW)); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_FTR_IN_LW), pCacheDummyFtr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_FTR_IN_LW)); LacSymQat_HashStatePrefixAadBufferPopulate( &hashStateBufferInfo, &keyGenReqFtr, pNestedModeSetupData->pInnerPrefixData, pNestedModeSetupData->innerPrefixLenInBytes, pNestedModeSetupData->pOuterPrefixData, pNestedModeSetupData->outerPrefixLenInBytes); /* Firmware only looks at hash state buffer pointer and * the * hash state buffer size so all other fields are set to * 0 */ LacSymQat_HashRequestParamsPopulate( &(keyGenReq), 0, /* Auth offset */ 0, /* Auth length */ &(pService->generic_service_info), &hashStateBufferInfo, /* Hash state prefix buffer */ ICP_QAT_FW_LA_PARTIAL_NONE, 0, /* Hash result size */ CPA_FALSE, NULL, CPA_CY_SYM_HASH_NONE, /* Hash algorithm */ pKeyGenTlsOpData->secret); /* IKM or PRK */ LacSymQat_KeyTlsRequestPopulate( &keyGenReqMid, cipherSuiteHKDFHashSizes[hashAlgCipher] [LAC_KEY_HKDF_DIGESTS], pKeyGenTlsOpData->numLabels, /* Number of Labels */ pKeyGenTlsOpData->secretLen, pKeyGenTlsOpData->seedLen, lacCmdId); /* Get physical address of subLabels */ switch (hashAlgCipher) { case CPA_CY_HKDF_TLS_AES_128_GCM_SHA256: /* Fall Through */ case CPA_CY_HKDF_TLS_AES_128_CCM_SHA256: case CPA_CY_HKDF_TLS_AES_128_CCM_8_SHA256: subLabelsPhysAddr = pService->pTlsHKDFSubLabel ->sublabelPhysAddr256; break; case CPA_CY_HKDF_TLS_CHACHA20_POLY1305_SHA256: subLabelsPhysAddr = pService->pTlsHKDFSubLabel ->sublabelPhysAddrChaChaPoly; break; case CPA_CY_HKDF_TLS_AES_256_GCM_SHA384: subLabelsPhysAddr = pService->pTlsHKDFSubLabel ->sublabelPhysAddr384; break; default: break; } LacSymQat_KeyTlsHKDFKeyMaterialInputPopulate( &(pService->generic_service_info), &(pCookie->u.tlsHKDFKeyInput), pKeyGenTlsOpData, subLabelsPhysAddr, lacCmdId); /* Pass op being performed */ inputPhysAddr = LAC_MEM_CAST_PTR_TO_UINT64( pSymCookie->keyTlsKeyInputPhyAddr); } outputPhysAddr = LAC_MEM_CAST_PTR_TO_UINT64( LAC_OS_VIRT_TO_PHYS_EXTERNAL(pService->generic_service_info, pKeyGenOutputData->pData)); if (outputPhysAddr == 0) { LAC_LOG_ERROR( "Unable to get the physical address of the" " output buffer"); status = CPA_STATUS_FAIL; } } if (CPA_STATUS_SUCCESS == status) { Cpa8U lw26[4]; char *tmp = NULL; unsigned char a; int n = 0; /* Make up the full keyGenReq struct from its constituents * before calling the SalQatMsg functions below. * Note: The full cache struct has been reduced to a * header, mid and footer for memory size reduction */ pMsgDummy = (Cpa8U *)&(keyGenReq); pCacheDummyHdr = (Cpa8U *)&(keyGenReqHdr); pCacheDummyMid = (Cpa8U *)&(keyGenReqMid); pCacheDummyFtr = (Cpa8U *)&(keyGenReqFtr); memcpy(pMsgDummy, pCacheDummyHdr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_HDR_IN_LW)); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_MID_IN_LW), pCacheDummyMid, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_MID_IN_LW)); memcpy(&lw26, pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_FTR_IN_LW), LAC_LONG_WORD_IN_BYTES); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_FTR_IN_LW), pCacheDummyFtr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_FTR_IN_LW)); tmp = (char *)(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_FTR_IN_LW)); /* Copy LW26, or'd with what's already there, into the Msg, for * TLS */ for (n = 0; n < LAC_LONG_WORD_IN_BYTES; n++) { a = (unsigned char)*(tmp + n); lw26[n] = lw26[n] | a; } memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_FTR_IN_LW), &lw26, LAC_LONG_WORD_IN_BYTES); contentDescInfo.pData = pCookie->contentDesc; contentDescInfo.hardwareSetupBlockPhys = LAC_MEM_CAST_PTR_TO_UINT64( pSymCookie->keyContentDescPhyAddr); contentDescInfo.hwBlkSzQuadWords = LAC_BYTES_TO_QUADWORDS(hashBlkSizeInBytes); /* Populate common request fields */ SalQatMsg_ContentDescHdrWrite((icp_qat_fw_comn_req_t *)&( keyGenReq), &(contentDescInfo)); SalQatMsg_CmnHdrWrite((icp_qat_fw_comn_req_t *)&keyGenReq, ICP_QAT_FW_COMN_REQ_CPM_FW_LA, lacCmdId, cmnRequestFlags, laCmdFlags); SalQatMsg_CmnMidWrite((icp_qat_fw_la_bulk_req_t *)&(keyGenReq), pCookie, LAC_SYM_KEY_QAT_PTR_TYPE, inputPhysAddr, outputPhysAddr, 0, 0); /* Send to QAT */ status = icp_adf_transPutMsg(pService->trans_handle_sym_tx, (void *)&(keyGenReq), LAC_QAT_SYM_REQ_SZ_LW); } if (CPA_STATUS_SUCCESS == status) { /* Update stats */ LacKey_StatsInc(lacCmdId, LAC_KEY_REQUESTS, pCookie->instanceHandle); } else { /* Clean up cookie memory */ if (NULL != pCookie) { LacKey_StatsInc(lacCmdId, LAC_KEY_REQUEST_ERRORS, pCookie->instanceHandle); Lac_MemPoolEntryFree(pCookie); } } return status; } /** * @ingroup LacSymKey * Parameters check for TLS v1.0/1.1, v1.2, v1.3 and SSL3 * @description * Check user parameters against the firmware/spec requirements. * * @param[in] pKeyGenOpData Pointer to a structure containing all * the data needed to perform the key * generation operation. * @param[in] hashAlgCipher Specifies the hash algorithm, * or cipher we are using. * According to RFC5246, this should be * "SHA-256 or a stronger standard hash * function." * @param[in] pGeneratedKeyBuffer User output buffers. * @param[in] cmdId Keygen operation to perform. */ static CpaStatus LacSymKey_CheckParamSslTls(const void *pKeyGenOpData, Cpa8U hashAlgCipher, const CpaFlatBuffer *pGeneratedKeyBuffer, icp_qat_fw_la_cmd_id_t cmdId) { /* Api max value */ Cpa32U maxSecretLen = 0; Cpa32U maxSeedLen = 0; Cpa32U maxOutputLen = 0; Cpa32U maxInfoLen = 0; Cpa32U maxLabelLen = 0; /* User info */ Cpa32U uSecretLen = 0; Cpa32U uSeedLen = 0; Cpa32U uOutputLen = 0; LAC_CHECK_NULL_PARAM(pKeyGenOpData); LAC_CHECK_NULL_PARAM(pGeneratedKeyBuffer); LAC_CHECK_NULL_PARAM(pGeneratedKeyBuffer->pData); if (ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE == cmdId) { CpaCyKeyGenSslOpData *opData = (CpaCyKeyGenSslOpData *)pKeyGenOpData; /* User info */ uSecretLen = opData->secret.dataLenInBytes; uSeedLen = opData->seed.dataLenInBytes; uOutputLen = opData->generatedKeyLenInBytes; /* Api max value */ maxSecretLen = ICP_QAT_FW_LA_SSL_SECRET_LEN_MAX; maxSeedLen = ICP_QAT_FW_LA_SSL_SEED_LEN_MAX; maxOutputLen = ICP_QAT_FW_LA_SSL_OUTPUT_LEN_MAX; /* Check user buffers */ LAC_CHECK_NULL_PARAM(opData->secret.pData); LAC_CHECK_NULL_PARAM(opData->seed.pData); /* Check operation */ if ((Cpa32U)opData->sslOp > CPA_CY_KEY_SSL_OP_USER_DEFINED) { LAC_INVALID_PARAM_LOG("opData->sslOp"); return CPA_STATUS_INVALID_PARAM; } if ((Cpa32U)opData->sslOp == CPA_CY_KEY_SSL_OP_USER_DEFINED) { LAC_CHECK_NULL_PARAM(opData->userLabel.pData); /* Maximum label length for SSL Key Gen request */ if (opData->userLabel.dataLenInBytes > ICP_QAT_FW_LA_SSL_LABEL_LEN_MAX) { LAC_INVALID_PARAM_LOG( "userLabel.dataLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } /* Only seed length for SSL3 Key Gen request */ if (maxSeedLen != uSeedLen) { LAC_INVALID_PARAM_LOG("seed.dataLenInBytes"); return CPA_STATUS_INVALID_PARAM; } /* Maximum output length for SSL3 Key Gen request */ if (uOutputLen > maxOutputLen) { LAC_INVALID_PARAM_LOG("generatedKeyLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } /* TLS v1.1 or TLS v.12 */ else if (ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE == cmdId || ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE == cmdId) { CpaCyKeyGenTlsOpData *opData = (CpaCyKeyGenTlsOpData *)pKeyGenOpData; /* User info */ uSecretLen = opData->secret.dataLenInBytes; uSeedLen = opData->seed.dataLenInBytes; uOutputLen = opData->generatedKeyLenInBytes; if (ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE == cmdId) { /* Api max value */ /* ICP_QAT_FW_LA_TLS_V1_1_SECRET_LEN_MAX needs to be * multiplied * by 4 in order to verifiy the 512 conditions. We did * not change * ICP_QAT_FW_LA_TLS_V1_1_SECRET_LEN_MAX as it * represents * the max value tha firmware can handle. */ maxSecretLen = ICP_QAT_FW_LA_TLS_V1_1_SECRET_LEN_MAX * 4; } else { /* Api max value */ /* ICP_QAT_FW_LA_TLS_V1_2_SECRET_LEN_MAX needs to be * multiplied * by 8 in order to verifiy the 512 conditions. We did * not change * ICP_QAT_FW_LA_TLS_V1_2_SECRET_LEN_MAX as it * represents * the max value tha firmware can handle. */ maxSecretLen = ICP_QAT_FW_LA_TLS_V1_2_SECRET_LEN_MAX * 8; /* Check Hash algorithm */ if (0 == getDigestSizeFromHashAlgo(hashAlgCipher)) { LAC_INVALID_PARAM_LOG("hashAlgorithm"); return CPA_STATUS_INVALID_PARAM; } } maxSeedLen = ICP_QAT_FW_LA_TLS_SEED_LEN_MAX; maxOutputLen = ICP_QAT_FW_LA_TLS_OUTPUT_LEN_MAX; /* Check user buffers */ LAC_CHECK_NULL_PARAM(opData->secret.pData); LAC_CHECK_NULL_PARAM(opData->seed.pData); /* Check operation */ if ((Cpa32U)opData->tlsOp > CPA_CY_KEY_TLS_OP_USER_DEFINED) { LAC_INVALID_PARAM_LOG("opData->tlsOp"); return CPA_STATUS_INVALID_PARAM; } else if ((Cpa32U)opData->tlsOp == CPA_CY_KEY_TLS_OP_USER_DEFINED) { LAC_CHECK_NULL_PARAM(opData->userLabel.pData); /* Maximum label length for TLS Key Gen request */ if (opData->userLabel.dataLenInBytes > ICP_QAT_FW_LA_TLS_LABEL_LEN_MAX) { LAC_INVALID_PARAM_LOG( "userLabel.dataLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } /* Maximum/only seed length for TLS Key Gen request */ if (((Cpa32U)opData->tlsOp != CPA_CY_KEY_TLS_OP_MASTER_SECRET_DERIVE) && ((Cpa32U)opData->tlsOp != CPA_CY_KEY_TLS_OP_KEY_MATERIAL_DERIVE)) { if (uSeedLen > maxSeedLen) { LAC_INVALID_PARAM_LOG("seed.dataLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } else { if (maxSeedLen != uSeedLen) { LAC_INVALID_PARAM_LOG("seed.dataLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } /* Maximum output length for TLS Key Gen request */ if (uOutputLen > maxOutputLen) { LAC_INVALID_PARAM_LOG("generatedKeyLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } /* TLS v1.3 */ else if (cmdId >= ICP_QAT_FW_LA_CMD_HKDF_EXTRACT && cmdId <= ICP_QAT_FW_LA_CMD_HKDF_EXTRACT_AND_EXPAND_LABEL) { CpaCyKeyGenHKDFOpData *HKDF_Data = (CpaCyKeyGenHKDFOpData *)pKeyGenOpData; CpaCyKeyHKDFCipherSuite cipherSuite = hashAlgCipher; CpaCySymHashAlgorithm hashAlgorithm = getHashAlgorithmFromCipherSuiteHKDF(cipherSuite); maxSeedLen = cipherSuiteHKDFHashSizes[cipherSuite][LAC_KEY_HKDF_DIGESTS]; maxSecretLen = CPA_CY_HKDF_KEY_MAX_SECRET_SZ; maxInfoLen = CPA_CY_HKDF_KEY_MAX_INFO_SZ; maxLabelLen = CPA_CY_HKDF_KEY_MAX_LABEL_SZ; uSecretLen = HKDF_Data->secretLen; /* Check using supported hash function */ if (0 == (uOutputLen = getDigestSizeFromHashAlgo(hashAlgorithm))) { LAC_INVALID_PARAM_LOG("Hash function not supported"); return CPA_STATUS_INVALID_PARAM; } /* Number of labels does not exceed the MAX */ if (HKDF_Data->numLabels > CPA_CY_HKDF_KEY_MAX_LABEL_COUNT) { LAC_INVALID_PARAM_LOG( "CpaCyKeyGenHKDFOpData.numLabels"); return CPA_STATUS_INVALID_PARAM; } switch (cmdId) { case ICP_QAT_FW_LA_CMD_HKDF_EXTRACT: if (maxSeedLen < HKDF_Data->seedLen) { LAC_INVALID_PARAM_LOG( "CpaCyKeyGenHKDFOpData.seedLen"); return CPA_STATUS_INVALID_PARAM; } break; case ICP_QAT_FW_LA_CMD_HKDF_EXPAND: maxSecretLen = cipherSuiteHKDFHashSizes[cipherSuite] [LAC_KEY_HKDF_DIGESTS]; if (maxInfoLen < HKDF_Data->infoLen) { LAC_INVALID_PARAM_LOG( "CpaCyKeyGenHKDFOpData.infoLen"); return CPA_STATUS_INVALID_PARAM; } break; case ICP_QAT_FW_LA_CMD_HKDF_EXTRACT_AND_EXPAND: uOutputLen *= 2; if (maxSeedLen < HKDF_Data->seedLen) { LAC_INVALID_PARAM_LOG( "CpaCyKeyGenHKDFOpData.seedLen"); return CPA_STATUS_INVALID_PARAM; } if (maxInfoLen < HKDF_Data->infoLen) { LAC_INVALID_PARAM_LOG( "CpaCyKeyGenHKDFOpData.infoLen"); return CPA_STATUS_INVALID_PARAM; } break; case ICP_QAT_FW_LA_CMD_HKDF_EXPAND_LABEL: /* Fall through */ case ICP_QAT_FW_LA_CMD_HKDF_EXTRACT_AND_EXPAND_LABEL: { Cpa8U subl_mask = 0, subl_number = 1; Cpa8U i = 0; if (maxSeedLen < HKDF_Data->seedLen) { LAC_INVALID_PARAM_LOG( "CpaCyKeyGenHKDFOpData.seedLen"); return CPA_STATUS_INVALID_PARAM; } /* If EXPAND set uOutputLen to zero */ if (ICP_QAT_FW_LA_CMD_HKDF_EXPAND_LABEL == cmdId) { uOutputLen = 0; maxSecretLen = cipherSuiteHKDFHashSizes [cipherSuite][LAC_KEY_HKDF_DIGESTS]; } for (i = 0; i < HKDF_Data->numLabels; i++) { /* Check that the labelLen does not overflow */ if (maxLabelLen < HKDF_Data->label[i].labelLen) { LAC_INVALID_PARAM_LOG1( "CpaCyKeyGenHKDFOpData.label[%d].labelLen", i); return CPA_STATUS_INVALID_PARAM; } if (HKDF_Data->label[i].sublabelFlag & ~HKDF_SUB_LABELS_ALL) { LAC_INVALID_PARAM_LOG1( "CpaCyKeyGenHKDFOpData.label[%d]." "subLabelFlag", i); return CPA_STATUS_INVALID_PARAM; } /* Calculate the appended subLabel output * lengths and * check that the output buffer that the user * has * supplied is the correct length. */ uOutputLen += cipherSuiteHKDFHashSizes [cipherSuite][LAC_KEY_HKDF_DIGESTS]; /* Get mask of subLabel */ subl_mask = HKDF_Data->label[i].sublabelFlag; for (subl_number = 1; subl_number <= LAC_KEY_HKDF_SUBLABELS_NUM; subl_number++) { /* Add the used subLabel key lengths */ if (subl_mask & 1) { uOutputLen += cipherSuiteHKDFHashSizes [cipherSuite] [subl_number]; } subl_mask >>= 1; } } } break; default: break; } } else { LAC_INVALID_PARAM_LOG("TLS/SSL operation"); return CPA_STATUS_INVALID_PARAM; } /* Maximum secret length for TLS/SSL Key Gen request */ if (uSecretLen > maxSecretLen) { LAC_INVALID_PARAM_LOG("HKFD.secretLen/secret.dataLenInBytes"); return CPA_STATUS_INVALID_PARAM; } /* Check for enough space in the flat buffer */ if (uOutputLen > pGeneratedKeyBuffer->dataLenInBytes) { LAC_INVALID_PARAM_LOG("pGeneratedKeyBuffer->dataLenInBytes"); return CPA_STATUS_INVALID_PARAM; } return CPA_STATUS_SUCCESS; } /** * */ /** * @ingroup LacSymKey * Common Keygen Code for TLS v1.0/1.1, v1.2 and SSL3. * @description * Check user parameters and perform the required operation. * * @param[in] instanceHandle_in Instance handle. * @param[in] pKeyGenCb Pointer to callback function to be * invoked when the operation is complete. * If this is set to a NULL value the * function will operate synchronously. * @param[in] pCallbackTag Opaque User Data for this specific * call. Will be returned unchanged in the * callback. * @param[in] pKeyGenOpData Pointer to a structure containing all * the data needed to perform the key * generation operation. * @param[in] hashAlgorithm Specifies the hash algorithm to use. * According to RFC5246, this should be * "SHA-256 or a stronger standard hash * function." * @param[out] pGeneratedKeyBuffer User output buffer. * @param[in] cmdId Keygen operation to perform. */ static CpaStatus LacSymKey_KeyGenSslTls(const CpaInstanceHandle instanceHandle_in, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, const void *pKeyGenOpData, Cpa8U hashAlgorithm, CpaFlatBuffer *pGeneratedKeyBuffer, icp_qat_fw_la_cmd_id_t cmdId) { CpaStatus status = CPA_STATUS_FAIL; CpaInstanceHandle instanceHandle = LacKey_GetHandle(instanceHandle_in); CpaCyCapabilitiesInfo cyCapInfo; LAC_CHECK_INSTANCE_HANDLE(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); SAL_RUNNING_CHECK(instanceHandle); SalCtrl_CyQueryCapabilities(instanceHandle, &cyCapInfo); if (IS_HKDF_UNSUPPORTED(cmdId, cyCapInfo.hkdfSupported)) { LAC_LOG_ERROR("The device does not support HKDF"); return CPA_STATUS_UNSUPPORTED; } status = LacSymKey_CheckParamSslTls(pKeyGenOpData, hashAlgorithm, pGeneratedKeyBuffer, cmdId); if (CPA_STATUS_SUCCESS != status) return status; return LacSymKey_KeyGenSslTls_GenCommon(instanceHandle, pKeyGenCb, pCallbackTag, cmdId, LAC_CONST_PTR_CAST( pKeyGenOpData), hashAlgorithm, pGeneratedKeyBuffer); } /** * @ingroup LacSymKey * SSL Key Generation Function. * @description * This function is used for SSL key generation. It implements the key * generation function defined in section 6.2.2 of the SSL 3.0 * specification as described in * http://www.mozilla.org/projects/security/pki/nss/ssl/draft302.txt. * * The input seed is taken as a flat buffer and the generated key is * returned to caller in a flat destination data buffer. * * @param[in] instanceHandle_in Instance handle. * @param[in] pKeyGenCb Pointer to callback function to be * invoked when the operation is complete. * If this is set to a NULL value the * function will operate synchronously. * @param[in] pCallbackTag Opaque User Data for this specific * call. Will be returned unchanged in the * callback. * @param[in] pKeyGenSslOpData Pointer to a structure containing all * the data needed to perform the SSL key * generation operation. The client code * allocates the memory for this * structure. This component takes * ownership of the memory until it is * returned in the callback. * @param[out] pGeneratedKeyBuffer Caller MUST allocate a sufficient * buffer to hold the key generation * output. The data pointer SHOULD be * aligned on an 8-byte boundary. The * length field passed in represents the * size of the buffer in bytes. The value * that is returned is the size of the * result key in bytes. * On invocation the callback function * will contain this parameter in the * pOut parameter. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Resubmit the request. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. */ CpaStatus cpaCyKeyGenSsl(const CpaInstanceHandle instanceHandle_in, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, const CpaCyKeyGenSslOpData *pKeyGenSslOpData, CpaFlatBuffer *pGeneratedKeyBuffer) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } return LacSymKey_KeyGenSslTls(instanceHandle, pKeyGenCb, pCallbackTag, LAC_CONST_PTR_CAST(pKeyGenSslOpData), CPA_CY_SYM_HASH_NONE, /* Hash algorithm */ pGeneratedKeyBuffer, ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE); } /** * @ingroup LacSymKey * TLS Key Generation Function. * @description * This function is used for TLS key generation. It implements the * TLS PRF (Pseudo Random Function) as defined by RFC2246 (TLS v1.0) * and RFC4346 (TLS v1.1). * * The input seed is taken as a flat buffer and the generated key is * returned to caller in a flat destination data buffer. * * @param[in] instanceHandle_in Instance handle. * @param[in] pKeyGenCb Pointer to callback function to be * invoked when the operation is complete. * If this is set to a NULL value the * function will operate synchronously. * @param[in] pCallbackTag Opaque User Data for this specific * call. Will be returned unchanged in the * callback. * @param[in] pKeyGenTlsOpData Pointer to a structure containing all * the data needed to perform the TLS key * generation operation. The client code * allocates the memory for this * structure. This component takes * ownership of the memory until it is * returned in the callback. * @param[out] pGeneratedKeyBuffer Caller MUST allocate a sufficient * buffer to hold the key generation * output. The data pointer SHOULD be * aligned on an 8-byte boundary. The * length field passed in represents the * size of the buffer in bytes. The value * that is returned is the size of the * result key in bytes. * On invocation the callback function * will contain this parameter in the * pOut parameter. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Resubmit the request. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. * */ CpaStatus cpaCyKeyGenTls(const CpaInstanceHandle instanceHandle_in, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, const CpaCyKeyGenTlsOpData *pKeyGenTlsOpData, CpaFlatBuffer *pGeneratedKeyBuffer) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } return LacSymKey_KeyGenSslTls(instanceHandle, pKeyGenCb, pCallbackTag, LAC_CONST_PTR_CAST(pKeyGenTlsOpData), CPA_CY_SYM_HASH_NONE, /* Hash algorithm */ pGeneratedKeyBuffer, ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE); } /** * @ingroup LacSymKey * @description * This function is used for TLS key generation. It implements the * TLS PRF (Pseudo Random Function) as defined by RFC5246 (TLS v1.2). * * The input seed is taken as a flat buffer and the generated key is * returned to caller in a flat destination data buffer. * * @param[in] instanceHandle_in Instance handle. * @param[in] pKeyGenCb Pointer to callback function to be * invoked when the operation is complete. * If this is set to a NULL value the * function will operate synchronously. * @param[in] pCallbackTag Opaque User Data for this specific * call. Will be returned unchanged in the * callback. * @param[in] pKeyGenTlsOpData Pointer to a structure containing all * the data needed to perform the TLS key * generation operation. The client code * allocates the memory for this * structure. This component takes * ownership of the memory until it is * returned in the callback. * @param[in] hashAlgorithm Specifies the hash algorithm to use. * According to RFC5246, this should be * "SHA-256 or a stronger standard hash * function." * @param[out] pGeneratedKeyBuffer Caller MUST allocate a sufficient * buffer to hold the key generation * output. The data pointer SHOULD be * aligned on an 8-byte boundary. The * length field passed in represents the * size of the buffer in bytes. The value * that is returned is the size of the * result key in bytes. * On invocation the callback function * will contain this parameter in the * pOut parameter. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Resubmit the request. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. */ CpaStatus cpaCyKeyGenTls2(const CpaInstanceHandle instanceHandle_in, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, const CpaCyKeyGenTlsOpData *pKeyGenTlsOpData, CpaCySymHashAlgorithm hashAlgorithm, CpaFlatBuffer *pGeneratedKeyBuffer) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } return LacSymKey_KeyGenSslTls(instanceHandle, pKeyGenCb, pCallbackTag, LAC_CONST_PTR_CAST(pKeyGenTlsOpData), hashAlgorithm, pGeneratedKeyBuffer, ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE); } /** * @ingroup LacSymKey * @description * This function is used for TLS1.3 HKDF key generation. It implements * the "extract-then-expand" paradigm as defined by RFC 5869. * * The input seed/secret/info is taken as a flat buffer and the generated * key(s)/labels are returned to caller in a flat data buffer. * * @param[in] instanceHandle_in Instance handle. * @param[in] pKeyGenCb Pointer to callback function to be * invoked when the operation is complete. * If this is set to a NULL value the * function will operate synchronously. * @param[in] pCallbackTag Opaque User Data for this specific * call. Will be returned unchanged in the * callback. * @param[in] pKeyGenTlsOpData Pointer to a structure containing * the data needed to perform the HKDF key * generation operation. * The client code allocates the memory * for this structure as contiguous * pinned memory. * This component takes ownership of the * memory until it is returned in the * callback. * @param[in] hashAlgorithm Specifies the hash algorithm to use. * According to RFC5246, this should be * "SHA-256 or a stronger standard hash * function." * @param[out] pGeneratedKeyBuffer Caller MUST allocate a sufficient * buffer to hold the key generation * output. The data pointer SHOULD be * aligned on an 8-byte boundary. The * length field passed in represents the * size of the buffer in bytes. The value * that is returned is the size of the * result key in bytes. * On invocation the callback function * will contain this parameter in the * pOut parameter. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @retval CPA_STATUS_RETRY Resubmit the request. * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in. * @retval CPA_STATUS_RESOURCE Error related to system resources. */ CpaStatus cpaCyKeyGenTls3(const CpaInstanceHandle instanceHandle_in, const CpaCyGenFlatBufCbFunc pKeyGenCb, void *pCallbackTag, const CpaCyKeyGenHKDFOpData *pKeyGenTlsOpData, CpaCyKeyHKDFCipherSuite cipherSuite, CpaFlatBuffer *pGeneratedKeyBuffer) { LAC_CHECK_NULL_PARAM(pKeyGenTlsOpData); switch (pKeyGenTlsOpData->hkdfKeyOp) { case CPA_CY_HKDF_KEY_EXTRACT: /* Fall through */ case CPA_CY_HKDF_KEY_EXPAND: case CPA_CY_HKDF_KEY_EXTRACT_EXPAND: case CPA_CY_HKDF_KEY_EXPAND_LABEL: case CPA_CY_HKDF_KEY_EXTRACT_EXPAND_LABEL: break; default: LAC_INVALID_PARAM_LOG("HKDF operation not supported"); return CPA_STATUS_INVALID_PARAM; } return LacSymKey_KeyGenSslTls(instanceHandle_in, pKeyGenCb, pCallbackTag, LAC_CONST_PTR_CAST(pKeyGenTlsOpData), cipherSuite, pGeneratedKeyBuffer, (icp_qat_fw_la_cmd_id_t) pKeyGenTlsOpData->hkdfKeyOp); } /* * LacSymKey_Init */ CpaStatus LacSymKey_Init(CpaInstanceHandle instanceHandle_in) { CpaStatus status = CPA_STATUS_SUCCESS; CpaInstanceHandle instanceHandle = LacKey_GetHandle(instanceHandle_in); sal_crypto_service_t *pService = NULL; LAC_CHECK_INSTANCE_HANDLE(instanceHandle); pService = (sal_crypto_service_t *)instanceHandle; pService->pLacKeyStats = LAC_OS_MALLOC(LAC_KEY_NUM_STATS * sizeof(QatUtilsAtomic)); if (NULL != pService->pLacKeyStats) { LAC_OS_BZERO((void *)pService->pLacKeyStats, LAC_KEY_NUM_STATS * sizeof(QatUtilsAtomic)); status = LAC_OS_CAMALLOC(&pService->pSslLabel, ICP_QAT_FW_LA_SSL_LABEL_LEN_MAX, LAC_8BYTE_ALIGNMENT, pService->nodeAffinity); } else { status = CPA_STATUS_RESOURCE; } if (CPA_STATUS_SUCCESS == status) { Cpa32U i = 0; Cpa32U offset = 0; /* Initialise SSL label ABBCCC..... */ for (i = 0; i < ICP_QAT_FW_LA_SSL_ITERATES_LEN_MAX; i++) { memset(pService->pSslLabel + offset, 'A' + i, i + 1); offset += (i + 1); } /* Allocate memory for TLS labels */ status = LAC_OS_CAMALLOC(&pService->pTlsLabel, sizeof(lac_sym_key_tls_labels_t), LAC_8BYTE_ALIGNMENT, pService->nodeAffinity); } if (CPA_STATUS_SUCCESS == status) { /* Allocate memory for HKDF sub_labels */ status = LAC_OS_CAMALLOC(&pService->pTlsHKDFSubLabel, sizeof(lac_sym_key_tls_hkdf_sub_labels_t), LAC_8BYTE_ALIGNMENT, pService->nodeAffinity); } if (CPA_STATUS_SUCCESS == status) { LAC_OS_BZERO(pService->pTlsLabel, sizeof(lac_sym_key_tls_labels_t)); /* Copy the TLS v1.2 labels into the dynamically allocated * structure */ memcpy(pService->pTlsLabel->masterSecret, LAC_SYM_KEY_TLS_MASTER_SECRET_LABEL, sizeof(LAC_SYM_KEY_TLS_MASTER_SECRET_LABEL) - 1); memcpy(pService->pTlsLabel->keyMaterial, LAC_SYM_KEY_TLS_KEY_MATERIAL_LABEL, sizeof(LAC_SYM_KEY_TLS_KEY_MATERIAL_LABEL) - 1); memcpy(pService->pTlsLabel->clientFinished, LAC_SYM_KEY_TLS_CLIENT_FIN_LABEL, sizeof(LAC_SYM_KEY_TLS_CLIENT_FIN_LABEL) - 1); memcpy(pService->pTlsLabel->serverFinished, LAC_SYM_KEY_TLS_SERVER_FIN_LABEL, sizeof(LAC_SYM_KEY_TLS_SERVER_FIN_LABEL) - 1); LAC_OS_BZERO(pService->pTlsHKDFSubLabel, sizeof(lac_sym_key_tls_hkdf_sub_labels_t)); /* Copy the TLS v1.3 subLabels into the dynamically allocated * struct */ /* KEY SHA-256 */ memcpy(&pService->pTlsHKDFSubLabel->keySublabel256, &key256, HKDF_SUB_LABEL_KEY_LENGTH); pService->pTlsHKDFSubLabel->keySublabel256.labelLen = HKDF_SUB_LABEL_KEY_LENGTH; pService->pTlsHKDFSubLabel->keySublabel256.sublabelFlag = 1 << QAT_FW_HKDF_INNER_SUBLABEL_16_BYTE_OKM_BITPOS; /* KEY SHA-384 */ memcpy(&pService->pTlsHKDFSubLabel->keySublabel384, &key384, HKDF_SUB_LABEL_KEY_LENGTH); pService->pTlsHKDFSubLabel->keySublabel384.labelLen = HKDF_SUB_LABEL_KEY_LENGTH; pService->pTlsHKDFSubLabel->keySublabel384.sublabelFlag = 1 << QAT_FW_HKDF_INNER_SUBLABEL_32_BYTE_OKM_BITPOS; /* KEY CHACHAPOLY */ memcpy(&pService->pTlsHKDFSubLabel->keySublabelChaChaPoly, &keyChaChaPoly, HKDF_SUB_LABEL_KEY_LENGTH); pService->pTlsHKDFSubLabel->keySublabelChaChaPoly.labelLen = HKDF_SUB_LABEL_KEY_LENGTH; pService->pTlsHKDFSubLabel->keySublabelChaChaPoly.sublabelFlag = 1 << QAT_FW_HKDF_INNER_SUBLABEL_32_BYTE_OKM_BITPOS; /* IV SHA-256 */ memcpy(&pService->pTlsHKDFSubLabel->ivSublabel256, &iv256, HKDF_SUB_LABEL_IV_LENGTH); pService->pTlsHKDFSubLabel->ivSublabel256.labelLen = HKDF_SUB_LABEL_IV_LENGTH; pService->pTlsHKDFSubLabel->ivSublabel256.sublabelFlag = 1 << QAT_FW_HKDF_INNER_SUBLABEL_12_BYTE_OKM_BITPOS; /* IV SHA-384 */ memcpy(&pService->pTlsHKDFSubLabel->ivSublabel384, &iv384, HKDF_SUB_LABEL_IV_LENGTH); pService->pTlsHKDFSubLabel->ivSublabel384.labelLen = HKDF_SUB_LABEL_IV_LENGTH; pService->pTlsHKDFSubLabel->ivSublabel384.sublabelFlag = 1 << QAT_FW_HKDF_INNER_SUBLABEL_12_BYTE_OKM_BITPOS; /* IV CHACHAPOLY */ memcpy(&pService->pTlsHKDFSubLabel->ivSublabelChaChaPoly, &iv256, HKDF_SUB_LABEL_IV_LENGTH); pService->pTlsHKDFSubLabel->ivSublabelChaChaPoly.labelLen = HKDF_SUB_LABEL_IV_LENGTH; pService->pTlsHKDFSubLabel->ivSublabelChaChaPoly.sublabelFlag = 1 << QAT_FW_HKDF_INNER_SUBLABEL_12_BYTE_OKM_BITPOS; /* RESUMPTION SHA-256 */ memcpy(&pService->pTlsHKDFSubLabel->resumptionSublabel256, &resumption256, HKDF_SUB_LABEL_RESUMPTION_LENGTH); pService->pTlsHKDFSubLabel->resumptionSublabel256.labelLen = HKDF_SUB_LABEL_RESUMPTION_LENGTH; /* RESUMPTION SHA-384 */ memcpy(&pService->pTlsHKDFSubLabel->resumptionSublabel384, &resumption384, HKDF_SUB_LABEL_RESUMPTION_LENGTH); pService->pTlsHKDFSubLabel->resumptionSublabel384.labelLen = HKDF_SUB_LABEL_RESUMPTION_LENGTH; /* RESUMPTION CHACHAPOLY */ memcpy( &pService->pTlsHKDFSubLabel->resumptionSublabelChaChaPoly, &resumption256, HKDF_SUB_LABEL_RESUMPTION_LENGTH); pService->pTlsHKDFSubLabel->resumptionSublabelChaChaPoly .labelLen = HKDF_SUB_LABEL_RESUMPTION_LENGTH; /* FINISHED SHA-256 */ memcpy(&pService->pTlsHKDFSubLabel->finishedSublabel256, &finished256, HKDF_SUB_LABEL_FINISHED_LENGTH); pService->pTlsHKDFSubLabel->finishedSublabel256.labelLen = HKDF_SUB_LABEL_FINISHED_LENGTH; /* FINISHED SHA-384 */ memcpy(&pService->pTlsHKDFSubLabel->finishedSublabel384, &finished384, HKDF_SUB_LABEL_FINISHED_LENGTH); pService->pTlsHKDFSubLabel->finishedSublabel384.labelLen = HKDF_SUB_LABEL_FINISHED_LENGTH; /* FINISHED CHACHAPOLY */ memcpy(&pService->pTlsHKDFSubLabel->finishedSublabelChaChaPoly, &finished256, HKDF_SUB_LABEL_FINISHED_LENGTH); pService->pTlsHKDFSubLabel->finishedSublabelChaChaPoly .labelLen = HKDF_SUB_LABEL_FINISHED_LENGTH; /* Set physical address of sublabels */ pService->pTlsHKDFSubLabel->sublabelPhysAddr256 = LAC_OS_VIRT_TO_PHYS_INTERNAL( &pService->pTlsHKDFSubLabel->keySublabel256); pService->pTlsHKDFSubLabel->sublabelPhysAddr384 = LAC_OS_VIRT_TO_PHYS_INTERNAL( &pService->pTlsHKDFSubLabel->keySublabel384); pService->pTlsHKDFSubLabel->sublabelPhysAddrChaChaPoly = LAC_OS_VIRT_TO_PHYS_INTERNAL( &pService->pTlsHKDFSubLabel->keySublabelChaChaPoly); /* Register request handlers */ LacSymQat_RespHandlerRegister(ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE, LacSymKey_SslTlsHandleResponse); LacSymQat_RespHandlerRegister( ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE, LacSymKey_SslTlsHandleResponse); LacSymQat_RespHandlerRegister( ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE, LacSymKey_SslTlsHandleResponse); LacSymQat_RespHandlerRegister(ICP_QAT_FW_LA_CMD_HKDF_EXTRACT, LacSymKey_SslTlsHandleResponse); LacSymQat_RespHandlerRegister(ICP_QAT_FW_LA_CMD_HKDF_EXPAND, LacSymKey_SslTlsHandleResponse); LacSymQat_RespHandlerRegister( ICP_QAT_FW_LA_CMD_HKDF_EXTRACT_AND_EXPAND, LacSymKey_SslTlsHandleResponse); LacSymQat_RespHandlerRegister( ICP_QAT_FW_LA_CMD_HKDF_EXPAND_LABEL, LacSymKey_SslTlsHandleResponse); LacSymQat_RespHandlerRegister( ICP_QAT_FW_LA_CMD_HKDF_EXTRACT_AND_EXPAND_LABEL, LacSymKey_SslTlsHandleResponse); LacSymQat_RespHandlerRegister(ICP_QAT_FW_LA_CMD_MGF1, LacSymKey_MgfHandleResponse); } if (CPA_STATUS_SUCCESS != status) { LAC_OS_FREE(pService->pLacKeyStats); LAC_OS_CAFREE(pService->pSslLabel); LAC_OS_CAFREE(pService->pTlsLabel); LAC_OS_CAFREE(pService->pTlsHKDFSubLabel); } return status; } /* * LacSymKey_Shutdown */ CpaStatus LacSymKey_Shutdown(CpaInstanceHandle instanceHandle_in) { CpaStatus status = CPA_STATUS_SUCCESS; CpaInstanceHandle instanceHandle = LacKey_GetHandle(instanceHandle_in); sal_crypto_service_t *pService = NULL; LAC_CHECK_INSTANCE_HANDLE(instanceHandle); pService = (sal_crypto_service_t *)instanceHandle; if (NULL != pService->pLacKeyStats) { LAC_OS_FREE(pService->pLacKeyStats); } LAC_OS_CAFREE(pService->pSslLabel); LAC_OS_CAFREE(pService->pTlsLabel); LAC_OS_CAFREE(pService->pTlsHKDFSubLabel); return status; } diff --git a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_alg_chain.c b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_alg_chain.c index da8b1ca4b91b..9234b649cf2f 100644 --- a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_alg_chain.c +++ b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_alg_chain.c @@ -1,1860 +1,2395 @@ /*************************************************************************** * * * ***************************************************************************/ /** *************************************************************************** * @file lac_sym_alg_chain.c Algorithm Chaining Perform * * @ingroup LacAlgChain ***************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" #include "cpa_cy_sym.h" #include "icp_accel_devices.h" #include "icp_adf_init.h" #include "icp_adf_transport.h" #include "icp_adf_debug.h" /* ******************************************************************************* * Include private header files ******************************************************************************* */ #include "lac_mem.h" #include "lac_log.h" #include "lac_sym.h" #include "lac_list.h" #include "icp_qat_fw_la.h" #include "lac_sal_types_crypto.h" #include "lac_sal.h" #include "lac_sal_ctrl.h" #include "lac_sym_alg_chain.h" #include "lac_sym_cipher.h" #include "lac_sym_cipher_defs.h" #include "lac_sym_hash.h" #include "lac_sym_hash_defs.h" #include "lac_sym_qat_cipher.h" #include "lac_sym_qat_hash.h" #include "lac_sym_stats.h" #include "lac_sym_queue.h" #include "lac_sym_cb.h" #include "sal_string_parse.h" #include "lac_sym_auth_enc.h" #include "lac_sym_qat.h" +#include "sal_hw_gen.h" + +/** + * @ingroup LacAlgChain + * This callback function will be invoked whenever a hash precompute + * operation completes. It will dequeue and send any QAT requests + * which were queued up while the precompute was in progress. + * + * @param[in] callbackTag Opaque value provided by user. This will + * be a pointer to the session descriptor. + * + * @retval + * None + * + */ +static void +LacSymAlgChain_HashPrecomputeDoneCb(void *callbackTag) +{ + LacSymCb_PendingReqsDequeue((lac_session_desc_t *)callbackTag); +} + +/** + * @ingroup LacAlgChain + * Walk the buffer list and find the address for the given offset within + * a buffer. + * + * @param[in] pBufferList Buffer List + * @param[in] packetOffset Offset in the buffer list for which address + * is to be found. + * @param[out] ppDataPtr This is where the sought pointer will be put + * @param[out] pSpaceLeft Pointer to a variable in which information about + * available space from the given offset to the end + * of the flat buffer it is located in will be returned + * + * @retval CPA_STATUS_SUCCESS Address with a given offset is found in the list + * @retval CPA_STATUS_FAIL Address with a given offset not found in the list. + * + */ +static CpaStatus +LacSymAlgChain_PtrFromOffsetGet(const CpaBufferList *pBufferList, + const Cpa32U packetOffset, + Cpa8U **ppDataPtr) +{ + Cpa32U currentOffset = 0; + Cpa32U i = 0; + + for (i = 0; i < pBufferList->numBuffers; i++) { + Cpa8U *pCurrData = pBufferList->pBuffers[i].pData; + Cpa32U currDataSize = pBufferList->pBuffers[i].dataLenInBytes; + + /* If the offset is within the address space of the current + * buffer */ + if ((packetOffset >= currentOffset) && + (packetOffset < (currentOffset + currDataSize))) { + /* increment by offset of the address in the current + * buffer */ + *ppDataPtr = pCurrData + (packetOffset - currentOffset); + return CPA_STATUS_SUCCESS; + } + + /* Increment by the size of the buffer */ + currentOffset += currDataSize; + } + + return CPA_STATUS_FAIL; +} /** * @ingroup LacAlgChain * Function which checks for support of partial packets for symmetric * crypto operations * * @param[in] pService Pointer to service descriptor * @param[in/out] pSessionDesc Pointer to session descriptor * */ static void LacSymCheck_IsPartialSupported(Cpa32U capabilitiesMask, lac_session_desc_t *pSessionDesc) { CpaBoolean isHashPartialSupported = CPA_FALSE; CpaBoolean isCipherPartialSupported = CPA_FALSE; CpaBoolean isPartialSupported = CPA_FALSE; switch (pSessionDesc->cipherAlgorithm) { /* Following ciphers don't support partial */ case CPA_CY_SYM_CIPHER_KASUMI_F8: case CPA_CY_SYM_CIPHER_AES_F8: case CPA_CY_SYM_CIPHER_SNOW3G_UEA2: case CPA_CY_SYM_CIPHER_CHACHA: case CPA_CY_SYM_CIPHER_ZUC_EEA3: break; /* All others support partial */ default: isCipherPartialSupported = CPA_TRUE; + break; } switch (pSessionDesc->hashAlgorithm) { /* Following hash don't support partial */ case CPA_CY_SYM_HASH_KASUMI_F9: case CPA_CY_SYM_HASH_SNOW3G_UIA2: case CPA_CY_SYM_HASH_POLY: case CPA_CY_SYM_HASH_ZUC_EIA3: - case CPA_CY_SYM_HASH_SHAKE_128: - case CPA_CY_SYM_HASH_SHAKE_256: break; /* Following hash may support partial based on device capabilities */ case CPA_CY_SYM_HASH_SHA3_256: if (ICP_ACCEL_CAPABILITIES_SHA3_EXT & capabilitiesMask) { isHashPartialSupported = CPA_TRUE; } break; /* All others support partial */ default: isHashPartialSupported = CPA_TRUE; + break; } switch (pSessionDesc->symOperation) { case CPA_CY_SYM_OP_CIPHER: isPartialSupported = isCipherPartialSupported; break; case CPA_CY_SYM_OP_HASH: isPartialSupported = isHashPartialSupported; break; case CPA_CY_SYM_OP_ALGORITHM_CHAINING: if (isCipherPartialSupported && isHashPartialSupported) { isPartialSupported = CPA_TRUE; } break; case CPA_CY_SYM_OP_NONE: break; + default: + break; + } + + if (ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE == pSessionDesc->cipherSliceType) { + /* UCS slice has no support for state flush and + * because of that is not able to do partial processing */ + isPartialSupported = CPA_FALSE; } + pSessionDesc->isPartialSupported = isPartialSupported; } -/** - * @ingroup LacAlgChain - * This callback function will be invoked whenever a hash precompute - * operation completes. It will dequeue and send any QAT requests - * which were queued up while the precompute was in progress. - * - * @param[in] callbackTag Opaque value provided by user. This will - * be a pointer to the session descriptor. - * - * @retval - * None - * - */ static void -LacSymAlgChain_HashPrecomputeDoneCb(void *callbackTag) +LacAlgChain_CipherCDBuild_ForOptimisedCD( + const CpaCySymCipherSetupData *pCipherData, + lac_session_desc_t *pSessionDesc, + icp_qat_fw_slice_t nextSlice, + Cpa8U cipherOffsetInConstantsTable, + Cpa8U *pOptimisedHwBlockBaseInDRAM, + Cpa32U *pOptimisedHwBlockOffsetInDRAM) { - LacSymCb_PendingReqsDequeue((lac_session_desc_t *)callbackTag); -} + Cpa8U *pCipherKeyField = NULL; + Cpa32U sizeInBytes = 0; + pCipherKeyField = pOptimisedHwBlockBaseInDRAM; -/** - * @ingroup LacAlgChain - * Walk the buffer list and find the address for the given offset within - * a buffer. - * - * @param[in] pBufferList Buffer List - * @param[in] packetOffset Offset in the buffer list for which address - * is to be found. - * @param[out] ppDataPtr This is where the sought pointer will be put - * @param[out] pSpaceLeft Pointer to a variable in which information about - * available space from the given offset to the end - * of the flat buffer it is located in will be returned - * - * @retval CPA_STATUS_SUCCESS Address with a given offset is found in the list - * @retval CPA_STATUS_FAIL Address with a given offset not found in the list. - * - */ -static CpaStatus -LacSymAlgChain_PtrFromOffsetGet(const CpaBufferList *pBufferList, - const Cpa32U packetOffset, - Cpa8U **ppDataPtr) -{ - Cpa32U currentOffset = 0; - Cpa32U i = 0; + /* Need to build up the alternative CD for SHRAM Constants Table use + * with an optimised content desc of 64 bytes for this case. Cipher key + * will be in the Content desc in DRAM, The cipher config data + * is now in the SHRAM constants table. */ - for (i = 0; i < pBufferList->numBuffers; i++) { - Cpa8U *pCurrData = pBufferList->pBuffers[i].pData; - Cpa32U currDataSize = pBufferList->pBuffers[i].dataLenInBytes; + LacSymQat_CipherHwBlockPopulateKeySetup( + pSessionDesc, + pCipherData, + pCipherData->cipherKeyLenInBytes, + pSessionDesc->cipherSliceType, + pCipherKeyField, + &sizeInBytes); - /* If the offset is within the address space of the current - * buffer */ - if ((packetOffset >= currentOffset) && - (packetOffset < (currentOffset + currDataSize))) { - /* increment by offset of the address in the current - * buffer */ - *ppDataPtr = pCurrData + (packetOffset - currentOffset); - return CPA_STATUS_SUCCESS; - } + LacSymQat_CipherCtrlBlockWrite(&(pSessionDesc->shramReqCacheFtr), + pSessionDesc->cipherAlgorithm, + pSessionDesc->cipherKeyLenInBytes, + pSessionDesc->cipherSliceType, + nextSlice, + cipherOffsetInConstantsTable); - /* Increment by the size of the buffer */ - currentOffset += currDataSize; - } + *pOptimisedHwBlockOffsetInDRAM += sizeInBytes; +} - return CPA_STATUS_FAIL; +static void +LacAlgChain_CipherCDBuild_ForSHRAM(const CpaCySymCipherSetupData *pCipherData, + lac_session_desc_t *pSessionDesc, + icp_qat_fw_slice_t nextSlice, + Cpa8U cipherOffsetInConstantsTable) +{ + Cpa32U sizeInBytes = 0; + Cpa8U *pCipherKeyField = NULL; + /* Need to build up the alternative CD for SHRAM Constants Table use + * Cipher key will be in the Request, The cipher config data is now in + * the SHRAM constants table. And nothing is now stored in the content + * desc */ + pCipherKeyField = (Cpa8U *)&( + pSessionDesc->shramReqCacheHdr.cd_pars.s1.serv_specif_fields); + + LacSymQat_CipherHwBlockPopulateKeySetup( + pSessionDesc, + pCipherData, + pCipherData->cipherKeyLenInBytes, + pSessionDesc->cipherSliceType, + pCipherKeyField, + &sizeInBytes); + + LacSymQat_CipherCtrlBlockWrite(&(pSessionDesc->shramReqCacheFtr), + pSessionDesc->cipherAlgorithm, + pSessionDesc->cipherKeyLenInBytes, + pSessionDesc->cipherSliceType, + nextSlice, + cipherOffsetInConstantsTable); } static void LacAlgChain_CipherCDBuild(const CpaCySymCipherSetupData *pCipherData, lac_session_desc_t *pSessionDesc, icp_qat_fw_slice_t nextSlice, Cpa8U cipherOffsetInConstantsTable, icp_qat_fw_comn_flags *pCmnRequestFlags, icp_qat_fw_serv_specif_flags *pLaCmdFlags, Cpa8U *pHwBlockBaseInDRAM, - Cpa32U *pHwBlockOffsetInDRAM) + Cpa32U *pHwBlockOffsetInDRAM, + Cpa32U capabilitiesMask) { Cpa8U *pCipherKeyField = NULL; Cpa8U cipherOffsetInReqQW = 0; Cpa32U sizeInBytes = 0; + void *pCfgData = NULL; + Cpa32U cfgOffset = 0; /* Construct the ContentDescriptor in DRAM */ cipherOffsetInReqQW = (*pHwBlockOffsetInDRAM / LAC_QUAD_WORD_IN_BYTES); ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET( *pLaCmdFlags, ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP); /* construct cipherConfig in CD in DRAM */ + cfgOffset = *pHwBlockOffsetInDRAM; + pCfgData = pHwBlockBaseInDRAM + cfgOffset; LacSymQat_CipherHwBlockPopulateCfgData(pSessionDesc, - pHwBlockBaseInDRAM + - *pHwBlockOffsetInDRAM, + pCfgData, &sizeInBytes); + ICP_QAT_FW_LA_SLICE_TYPE_SET(*pLaCmdFlags, + pSessionDesc->cipherSliceType); + *pHwBlockOffsetInDRAM += sizeInBytes; /* Cipher key will be in CD in DRAM. * The Request contains a ptr to the CD. * This ptr will be copied into the request later once the CD is * fully constructed, but the flag is set here. */ pCipherKeyField = pHwBlockBaseInDRAM + *pHwBlockOffsetInDRAM; ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(*pCmnRequestFlags, QAT_COMN_CD_FLD_TYPE_64BIT_ADR); LacSymQat_CipherHwBlockPopulateKeySetup( + pSessionDesc, pCipherData, pCipherData->cipherKeyLenInBytes, + pSessionDesc->cipherSliceType, pCipherKeyField, &sizeInBytes); /* update offset */ *pHwBlockOffsetInDRAM += sizeInBytes; LacSymQat_CipherCtrlBlockWrite(&(pSessionDesc->reqCacheFtr), pSessionDesc->cipherAlgorithm, pSessionDesc->cipherKeyLenInBytes, + pSessionDesc->cipherSliceType, nextSlice, cipherOffsetInReqQW); - if (LAC_CIPHER_IS_GCM(pSessionDesc->cipherAlgorithm) || - LAC_CIPHER_IS_CHACHA(pSessionDesc->cipherAlgorithm)) { + if (NON_SPC != pSessionDesc->singlePassState) { LacSymQat_CipherCtrlBlockWrite( &(pSessionDesc->reqSpcCacheFtr), pSessionDesc->cipherAlgorithm, pSessionDesc->cipherKeyLenInBytes, + pSessionDesc->cipherSliceType, ICP_QAT_FW_SLICE_DRAM_WR, cipherOffsetInReqQW); } } static void LacAlgChain_HashCDBuild( const CpaCySymHashSetupData *pHashData, CpaInstanceHandle instanceHandle, lac_session_desc_t *pSessionDesc, icp_qat_fw_slice_t nextSlice, Cpa8U hashOffsetInConstantsTable, icp_qat_fw_comn_flags *pCmnRequestFlags, icp_qat_fw_serv_specif_flags *pLaCmdFlags, lac_sym_qat_hash_precompute_info_t *pPrecomputeData, lac_sym_qat_hash_precompute_info_t *pPrecomputeDataOptimisedCd, Cpa8U *pHwBlockBaseInDRAM, Cpa32U *pHwBlockOffsetInDRAM, Cpa8U *pOptimisedHwBlockBaseInDRAM, Cpa32U *pOptimisedHwBlockOffsetInDRAM) { Cpa32U sizeInBytes = 0; Cpa32U hwBlockOffsetInQuadWords = *pHwBlockOffsetInDRAM / LAC_QUAD_WORD_IN_BYTES; /* build: * - the hash part of the ContentDescriptor in DRAM */ /* - the hash part of the CD control block in the Request template */ LacSymQat_HashContentDescInit(&(pSessionDesc->reqCacheFtr), instanceHandle, pHashData, pHwBlockBaseInDRAM, hwBlockOffsetInQuadWords, nextSlice, pSessionDesc->qatHashMode, CPA_FALSE, CPA_FALSE, + pSessionDesc->useStatefulSha3ContentDesc, pPrecomputeData, &sizeInBytes); /* Using DRAM CD so update offset */ *pHwBlockOffsetInDRAM += sizeInBytes; sizeInBytes = 0; + + if (pSessionDesc->useOptimisedContentDesc) { + LacSymQat_HashContentDescInit(&(pSessionDesc->shramReqCacheFtr), + instanceHandle, + pHashData, + pOptimisedHwBlockBaseInDRAM, + hashOffsetInConstantsTable, + nextSlice, + pSessionDesc->qatHashMode, + CPA_TRUE, + CPA_TRUE, + CPA_FALSE, + pPrecomputeDataOptimisedCd, + &sizeInBytes); + + *pOptimisedHwBlockOffsetInDRAM += sizeInBytes; + } else if (pSessionDesc->useSymConstantsTable) { + /* Need to build up the alternative CD for SHRAM Constants Table + * use */ + LacSymQat_HashContentDescInit(&(pSessionDesc->shramReqCacheFtr), + instanceHandle, + pHashData, + pHwBlockBaseInDRAM, + hashOffsetInConstantsTable, + nextSlice, + pSessionDesc->qatHashMode, + CPA_TRUE, + CPA_FALSE, + CPA_FALSE, + pPrecomputeData, + &sizeInBytes); + } +} + +static Cpa16U +LacAlgChain_GetCipherConfigSize(lac_session_desc_t *pSessionDesc) +{ + if (ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE == pSessionDesc->cipherSliceType) { + return sizeof(icp_qat_hw_ucs_cipher_config_t); + } else { + return sizeof(icp_qat_hw_cipher_config_t); + } +} + +static Cpa16U +LacAlgChain_GetCipherConfigOffset(lac_session_desc_t *pSessionDesc) +{ + Cpa16U offset = 0; + + if (CPA_CY_SYM_OP_ALGORITHM_CHAINING == pSessionDesc->symOperation || + SPC == pSessionDesc->singlePassState) { + icp_qat_fw_cipher_auth_cd_ctrl_hdr_t *cd_ctrl = + (icp_qat_fw_cipher_auth_cd_ctrl_hdr_t *)&pSessionDesc + ->reqCacheFtr.cd_ctrl; + offset = cd_ctrl->cipher_cfg_offset; + } else if (CPA_CY_SYM_OP_CIPHER == pSessionDesc->symOperation) { + icp_qat_fw_cipher_cd_ctrl_hdr_t *cd_ctrl = + (icp_qat_fw_cipher_cd_ctrl_hdr_t *)&pSessionDesc + ->reqCacheFtr.cd_ctrl; + offset = cd_ctrl->cipher_cfg_offset; + } + + return offset * LAC_QUAD_WORD_IN_BYTES; } CpaStatus LacAlgChain_SessionAADUpdate(lac_session_desc_t *pSessionDesc, Cpa32U newAADLength) { icp_qat_la_bulk_req_ftr_t *req_ftr = &pSessionDesc->reqCacheFtr; icp_qat_la_auth_req_params_t *req_params = &req_ftr->serv_specif_rqpars; if (!pSessionDesc) return CPA_STATUS_FAIL; pSessionDesc->aadLenInBytes = newAADLength; req_params->u2.aad_sz = LAC_ALIGN_POW2_ROUNDUP(newAADLength, LAC_HASH_AES_GCM_BLOCK_SIZE); - if (CPA_TRUE == pSessionDesc->isSinglePass) { + if (SPC == pSessionDesc->singlePassState) { Cpa8U *pHwBlockBaseInDRAM = NULL; Cpa32U hwBlockOffsetInDRAM = 0; Cpa32U pSizeInBytes = 0; CpaCySymCipherAlgorithm cipher = pSessionDesc->cipherAlgorithm; pHwBlockBaseInDRAM = (Cpa8U *)pSessionDesc->contentDescInfo.pData; if (pSessionDesc->cipherDirection == CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT) { if (LAC_CIPHER_IS_GCM(cipher)) { hwBlockOffsetInDRAM = LAC_QUADWORDS_TO_BYTES( - LAC_SYM_QAT_CIPHER_OFFSET_IN_DRAM_GCM_SPC); + LAC_SYM_QAT_CIPHER_GCM_SPC_OFFSET_IN_DRAM); } else { hwBlockOffsetInDRAM = LAC_QUADWORDS_TO_BYTES( - LAC_SYM_QAT_CIPHER_OFFSET_IN_DRAM_CHACHA_SPC); + LAC_SYM_QAT_CIPHER_CHACHA_SPC_OFFSET_IN_DRAM); } } LacSymQat_CipherHwBlockPopulateCfgData(pSessionDesc, pHwBlockBaseInDRAM + hwBlockOffsetInDRAM, &pSizeInBytes); } return CPA_STATUS_SUCCESS; } CpaStatus LacAlgChain_SessionCipherKeyUpdate(lac_session_desc_t *pSessionDesc, Cpa8U *pCipherKey) { CpaStatus status = CPA_STATUS_SUCCESS; if (pSessionDesc == NULL || pCipherKey == NULL) return CPA_STATUS_FAIL; if (LAC_CIPHER_IS_ARC4(pSessionDesc->cipherAlgorithm)) { LacSymQat_CipherArc4StateInit( pCipherKey, pSessionDesc->cipherKeyLenInBytes, pSessionDesc->cipherARC4InitialState); } else { CpaCySymCipherSetupData cipherSetupData = { 0 }; Cpa32U sizeInBytes; Cpa8U *pCipherKeyField; + Cpa16U cipherConfigSize; + Cpa16U cipherConfigOffset; sal_qat_content_desc_info_t *pCdInfo = &(pSessionDesc->contentDescInfo); cipherSetupData.cipherAlgorithm = pSessionDesc->cipherAlgorithm; cipherSetupData.cipherKeyLenInBytes = pSessionDesc->cipherKeyLenInBytes; cipherSetupData.pCipherKey = pCipherKey; + cipherSetupData.cipherDirection = pSessionDesc->cipherDirection; + + cipherConfigSize = + LacAlgChain_GetCipherConfigSize(pSessionDesc); + cipherConfigOffset = + LacAlgChain_GetCipherConfigOffset(pSessionDesc); + + pCipherKeyField = (Cpa8U *)pCdInfo->pData + cipherConfigOffset + + cipherConfigSize; switch (pSessionDesc->symOperation) { case CPA_CY_SYM_OP_CIPHER: { - pCipherKeyField = (Cpa8U *)pCdInfo->pData + - sizeof(icp_qat_hw_cipher_config_t); - LacSymQat_CipherHwBlockPopulateKeySetup( + pSessionDesc, &(cipherSetupData), cipherSetupData.cipherKeyLenInBytes, + pSessionDesc->cipherSliceType, pCipherKeyField, &sizeInBytes); if (pSessionDesc->useSymConstantsTable) { pCipherKeyField = (Cpa8U *)&( pSessionDesc->shramReqCacheHdr.cd_pars.s1 .serv_specif_fields); LacSymQat_CipherHwBlockPopulateKeySetup( + pSessionDesc, &(cipherSetupData), cipherSetupData.cipherKeyLenInBytes, + pSessionDesc->cipherSliceType, pCipherKeyField, &sizeInBytes); } } break; case CPA_CY_SYM_OP_ALGORITHM_CHAINING: { - icp_qat_fw_cipher_auth_cd_ctrl_hdr_t *cd_ctrl = - (icp_qat_fw_cipher_auth_cd_ctrl_hdr_t - *)&pSessionDesc->reqCacheFtr.cd_ctrl; - - pCipherKeyField = (Cpa8U *)pCdInfo->pData + - cd_ctrl->cipher_cfg_offset * - LAC_QUAD_WORD_IN_BYTES + - sizeof(icp_qat_hw_cipher_config_t); - LacSymQat_CipherHwBlockPopulateKeySetup( + pSessionDesc, &(cipherSetupData), cipherSetupData.cipherKeyLenInBytes, + pSessionDesc->cipherSliceType, pCipherKeyField, &sizeInBytes); } break; default: LAC_LOG_ERROR("Invalid sym operation\n"); status = CPA_STATUS_INVALID_PARAM; break; } } return status; } CpaStatus LacAlgChain_SessionAuthKeyUpdate(lac_session_desc_t *pSessionDesc, Cpa8U *pAuthKey) { CpaStatus status = CPA_STATUS_SUCCESS; Cpa8U *pHwBlockBaseInDRAM = NULL; Cpa8U *pOutHashSetup = NULL; Cpa8U *pInnerState1 = NULL; Cpa8U *pInnerState2 = NULL; CpaCySymSessionSetupData sessionSetup = { 0 }; + Cpa16U cipherConfigSize; if (pSessionDesc == NULL || pAuthKey == NULL) return CPA_STATUS_FAIL; + cipherConfigSize = LacAlgChain_GetCipherConfigSize(pSessionDesc); + icp_qat_fw_cipher_auth_cd_ctrl_hdr_t *cd_ctrl = (icp_qat_fw_cipher_auth_cd_ctrl_hdr_t *)&pSessionDesc->reqCacheFtr .cd_ctrl; pHwBlockBaseInDRAM = (Cpa8U *)pSessionDesc->contentDescInfo.pData; sessionSetup.hashSetupData.hashAlgorithm = pSessionDesc->hashAlgorithm; sessionSetup.hashSetupData.hashMode = pSessionDesc->hashMode; sessionSetup.hashSetupData.authModeSetupData.authKey = pAuthKey; sessionSetup.hashSetupData.authModeSetupData.authKeyLenInBytes = pSessionDesc->authKeyLenInBytes; sessionSetup.hashSetupData.authModeSetupData.aadLenInBytes = pSessionDesc->aadLenInBytes; sessionSetup.hashSetupData.digestResultLenInBytes = pSessionDesc->hashResultSize; sessionSetup.cipherSetupData.cipherAlgorithm = pSessionDesc->cipherAlgorithm; sessionSetup.cipherSetupData.cipherKeyLenInBytes = pSessionDesc->cipherKeyLenInBytes; /* Calculate hash states offsets */ pInnerState1 = pHwBlockBaseInDRAM + cd_ctrl->hash_cfg_offset * LAC_QUAD_WORD_IN_BYTES + sizeof(icp_qat_hw_auth_setup_t); pInnerState2 = pInnerState1 + cd_ctrl->inner_state1_sz; pOutHashSetup = pInnerState2 + cd_ctrl->inner_state2_sz; /* Calculate offset of cipher key */ if (pSessionDesc->laCmdId == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { sessionSetup.cipherSetupData.pCipherKey = - (Cpa8U *)pHwBlockBaseInDRAM + - sizeof(icp_qat_hw_cipher_config_t); + (Cpa8U *)pHwBlockBaseInDRAM + cipherConfigSize; } else if (pSessionDesc->laCmdId == ICP_QAT_FW_LA_CMD_HASH_CIPHER) { sessionSetup.cipherSetupData.pCipherKey = - pOutHashSetup + sizeof(icp_qat_hw_cipher_config_t); - } else if (CPA_TRUE == pSessionDesc->isSinglePass) { + pOutHashSetup + cipherConfigSize; + } else if (SPC == pSessionDesc->singlePassState) { CpaCySymCipherAlgorithm cipher = pSessionDesc->cipherAlgorithm; Cpa32U hwBlockOffsetInDRAM = 0; if (pSessionDesc->cipherDirection == CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT) { sessionSetup.cipherSetupData.pCipherKey = - (Cpa8U *)pHwBlockBaseInDRAM + - sizeof(icp_qat_hw_cipher_config_t); + (Cpa8U *)pHwBlockBaseInDRAM + cipherConfigSize; } else { if (LAC_CIPHER_IS_GCM(cipher)) hwBlockOffsetInDRAM = LAC_QUADWORDS_TO_BYTES( - LAC_SYM_QAT_CIPHER_OFFSET_IN_DRAM_GCM_SPC); + LAC_SYM_QAT_CIPHER_GCM_SPC_OFFSET_IN_DRAM); else hwBlockOffsetInDRAM = LAC_QUADWORDS_TO_BYTES( - LAC_SYM_QAT_CIPHER_OFFSET_IN_DRAM_CHACHA_SPC); + LAC_SYM_QAT_CIPHER_CHACHA_SPC_OFFSET_IN_DRAM); sessionSetup.cipherSetupData.pCipherKey = (Cpa8U *)pHwBlockBaseInDRAM + hwBlockOffsetInDRAM + - sizeof(icp_qat_hw_cipher_config_t); + cipherConfigSize; } } if (!sessionSetup.cipherSetupData.pCipherKey) return CPA_STATUS_FAIL; if (CPA_CY_SYM_HASH_SHA3_256 == pSessionDesc->hashAlgorithm) { if (CPA_FALSE == pSessionDesc->isAuthEncryptOp) { lac_sym_qat_hash_state_buffer_info_t *pHashStateBufferInfo = &(pSessionDesc->hashStateBufferInfo); sal_crypto_service_t *pService = (sal_crypto_service_t *)pSessionDesc->pInstance; status = LacHash_StatePrefixAadBufferInit( &(pService->generic_service_info), &(sessionSetup.hashSetupData), &(pSessionDesc->reqCacheFtr), pSessionDesc->qatHashMode, pSessionDesc->hashStatePrefixBuffer, pHashStateBufferInfo); /* SHRAM Constants Table not used for Auth-Enc */ } } else if (CPA_CY_SYM_HASH_SNOW3G_UIA2 == pSessionDesc->hashAlgorithm) { - Cpa8U *authKey = - (Cpa8U *)pOutHashSetup + sizeof(icp_qat_hw_cipher_config_t); + Cpa8U *authKey = (Cpa8U *)pOutHashSetup + cipherConfigSize; memcpy(authKey, pAuthKey, pSessionDesc->authKeyLenInBytes); } else if (CPA_CY_SYM_HASH_ZUC_EIA3 == pSessionDesc->hashAlgorithm || CPA_CY_SYM_HASH_AES_CBC_MAC == pSessionDesc->hashAlgorithm) { memcpy(pInnerState2, pAuthKey, pSessionDesc->authKeyLenInBytes); } else if (CPA_CY_SYM_HASH_AES_CMAC == pSessionDesc->hashAlgorithm || CPA_CY_SYM_HASH_KASUMI_F9 == pSessionDesc->hashAlgorithm || IS_HASH_MODE_1(pSessionDesc->qatHashMode)) { if (CPA_CY_SYM_HASH_AES_CMAC == pSessionDesc->hashAlgorithm) { memset(pInnerState2, 0, cd_ctrl->inner_state2_sz); } /* Block messages until precompute is completed */ pSessionDesc->nonBlockingOpsInProgress = CPA_FALSE; status = LacHash_PrecomputeDataCreate( pSessionDesc->pInstance, (CpaCySymSessionSetupData *)&(sessionSetup), LacSymAlgChain_HashPrecomputeDoneCb, pSessionDesc, pSessionDesc->hashStatePrefixBuffer, pInnerState1, pInnerState2); } return status; } +static void +buildCmdData(sal_crypto_service_t *pService, + lac_session_desc_t *pSessionDesc, + CpaCySymAlgChainOrder *chainOrder, + Cpa16U *proto, + icp_qat_fw_serv_specif_flags *laCmdFlags, + icp_qat_fw_comn_flags *cmnRequestFlags) +{ + /* LW 28 is used to set hash flags for AlgChaining. */ + icp_qat_fw_cipher_auth_cd_ctrl_hdr_t *cd_ctrl = + (icp_qat_fw_cipher_auth_cd_ctrl_hdr_t *)&pSessionDesc->reqCacheFtr + .cd_ctrl; + + /* proto refers to Protocol Flags, which is legacy FW <=> IA interface + * for ZUC and Snow3G. Use extended protocol flags for AlgChaining. + */ + *proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */ + + switch (pSessionDesc->symOperation) { + case CPA_CY_SYM_OP_CIPHER: + pSessionDesc->laCmdId = ICP_QAT_FW_LA_CMD_CIPHER; + + if (CPA_CY_SYM_CIPHER_SNOW3G_UEA2 == + pSessionDesc->cipherAlgorithm) { + *proto = ICP_QAT_FW_LA_SNOW_3G_PROTO; + } else if (CPA_CY_SYM_CIPHER_ZUC_EEA3 == + pSessionDesc->cipherAlgorithm) { + *proto = ICP_QAT_FW_LA_ZUC_3G_PROTO; + } + if (LAC_CIPHER_IS_CCM(pSessionDesc->cipherAlgorithm)) { + *proto = ICP_QAT_FW_LA_CCM_PROTO; + } + break; + + case CPA_CY_SYM_OP_HASH: + pSessionDesc->laCmdId = ICP_QAT_FW_LA_CMD_AUTH; + if (CPA_CY_SYM_HASH_SNOW3G_UIA2 == + pSessionDesc->hashAlgorithm) { + *proto = ICP_QAT_FW_LA_SNOW_3G_PROTO; + } else if (CPA_CY_SYM_HASH_ZUC_EIA3 == + pSessionDesc->hashAlgorithm) { + *proto = ICP_QAT_FW_LA_ZUC_3G_PROTO; + } + break; + + case CPA_CY_SYM_OP_ALGORITHM_CHAINING: + if (LAC_CIPHER_IS_CCM(pSessionDesc->cipherAlgorithm)) { + *proto = ICP_QAT_FW_LA_CCM_PROTO; + + /* Derive chainOrder from direction for isAuthEncryptOp + * cases */ + /* For CCM & GCM modes: force digest verify flag _TRUE + for decrypt and _FALSE for encrypt. For all other + cases use user defined value */ + + if (CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT == + pSessionDesc->cipherDirection) { + *chainOrder = + CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER; + pSessionDesc->digestVerify = CPA_FALSE; + } else { + *chainOrder = + CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH; + if (CPA_TRUE == pService->forceAEADMacVerify) { + pSessionDesc->digestVerify = CPA_TRUE; + } + } + } else if (LAC_CIPHER_IS_GCM(pSessionDesc->cipherAlgorithm)) { + *proto = ICP_QAT_FW_LA_GCM_PROTO; + + /* Derive chainOrder from direction for isAuthEncryptOp + * cases */ + /* For CCM & GCM modes: force digest verify flag _TRUE + for decrypt and _FALSE for encrypt. For all other + cases use user defined value */ + + if (CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT == + pSessionDesc->cipherDirection) { + *chainOrder = + CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH; + pSessionDesc->digestVerify = CPA_FALSE; + } else { + *chainOrder = + CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER; + if (CPA_TRUE == pService->forceAEADMacVerify) { + pSessionDesc->digestVerify = CPA_TRUE; + } + } + } else if (LAC_CIPHER_IS_CHACHA( + pSessionDesc->cipherAlgorithm)) { + if (CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT == + pSessionDesc->cipherDirection) { + *chainOrder = + CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH; + } else { + *chainOrder = + CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER; + } + } else { + pSessionDesc->isAuthEncryptOp = CPA_FALSE; + + if (CPA_CY_SYM_CIPHER_SNOW3G_UEA2 == + pSessionDesc->cipherAlgorithm) { + *proto = ICP_QAT_FW_LA_SNOW_3G_PROTO; + } else if (CPA_CY_SYM_CIPHER_ZUC_EEA3 == + pSessionDesc->cipherAlgorithm) { + *proto = ICP_QAT_FW_LA_ZUC_3G_PROTO; + } + + if (CPA_CY_SYM_HASH_SNOW3G_UIA2 == + pSessionDesc->hashAlgorithm) { + /* Need to set LW 28 hash flags as well. */ + ICP_QAT_FW_HASH_FLAG_SNOW3G_UIA2_SET( + cd_ctrl->hash_flags, QAT_FW_LA_SNOW3G_UIA2); + } else if (CPA_CY_SYM_HASH_ZUC_EIA3 == + pSessionDesc->hashAlgorithm) { + /* Need to set LW 28 hash flags as well. */ + ICP_QAT_FW_HASH_FLAG_ZUC_EIA3_SET( + cd_ctrl->hash_flags, QAT_FW_LA_ZUC_EIA3); + } + } + + if (CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH == + *chainOrder) { + pSessionDesc->laCmdId = ICP_QAT_FW_LA_CMD_CIPHER_HASH; + } else if (CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER == + *chainOrder) { + pSessionDesc->laCmdId = ICP_QAT_FW_LA_CMD_HASH_CIPHER; + } + break; + + default: + break; + } + + /* + * Build the header flags with the default settings for this session. + */ + if (pSessionDesc->isDPSession == CPA_TRUE) { + *cmnRequestFlags = + ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR, + LAC_SYM_DP_QAT_PTR_TYPE); + } else { + *cmnRequestFlags = + ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR, + LAC_SYM_DEFAULT_QAT_PTR_TYPE); + } + + LacSymQat_LaSetDefaultFlags(laCmdFlags, pSessionDesc->symOperation); + + return; +} + +static void +updateLaCmdFlags(lac_session_desc_t *pSessionDesc, + Cpa16U proto, + icp_qat_fw_serv_specif_flags *laCmdFlags) +{ + if (pSessionDesc->isAuth) { + if (pSessionDesc->digestVerify) { + ICP_QAT_FW_LA_CMP_AUTH_SET(*laCmdFlags, + ICP_QAT_FW_LA_CMP_AUTH_RES); + ICP_QAT_FW_LA_RET_AUTH_SET( + *laCmdFlags, ICP_QAT_FW_LA_NO_RET_AUTH_RES); + } else { + ICP_QAT_FW_LA_RET_AUTH_SET(*laCmdFlags, + ICP_QAT_FW_LA_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET( + *laCmdFlags, ICP_QAT_FW_LA_NO_CMP_AUTH_RES); + } + } + + if ((CPA_CY_SYM_CIPHER_ZUC_EEA3 == pSessionDesc->cipherAlgorithm) || + (CPA_CY_SYM_HASH_ZUC_EIA3 == pSessionDesc->hashAlgorithm)) { + /* New bit position (12) for ZUC. The FW provides a specific + * macro to use to set the ZUC proto flag. With the new FW I/F + * this needs to be set for both Cipher and Auth */ + ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(*laCmdFlags, proto); + } else { + /* Configure the common header */ + ICP_QAT_FW_LA_PROTO_SET(*laCmdFlags, proto); + } + + /* set Append flag, if digest is appended */ + if (pSessionDesc->digestIsAppended) { + ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET( + *laCmdFlags, ICP_QAT_FW_LA_DIGEST_IN_BUFFER); + } else { + ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET( + *laCmdFlags, ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER); + } +} + +static lac_single_pass_state_t +LacSymAlgChain_GetSpcState(CpaCySymCipherAlgorithm cipher, + CpaCySymHashAlgorithm hash, + Cpa32U capabilitiesMask) +{ + lac_single_pass_state_t state = NON_SPC; + if (capabilitiesMask & ICP_ACCEL_CAPABILITIES_CHACHA_POLY) { + switch (cipher) { + case CPA_CY_SYM_CIPHER_CHACHA: { + if (CPA_CY_SYM_HASH_POLY == hash) + state = SPC; + break; + } + case CPA_CY_SYM_CIPHER_AES_GCM: { + if ((CPA_CY_SYM_HASH_AES_GCM == hash) || + (CPA_CY_SYM_HASH_AES_GMAC == hash)) + state = LIKELY_SPC; + break; + } + case CPA_CY_SYM_CIPHER_AES_CCM: { + if (LAC_CIPHER_AES_V2(capabilitiesMask)) + state = SPC; + } + default: + /* Do Nothing as it is NON_SPC */ + break; + } + } + return state; +} + +static CpaBoolean +LacAlgChain_UseStatefulSha3ContentDesc(CpaBoolean partialsNotRequired, + Cpa32U capabilitiesMask, + lac_session_desc_t *pSessionDesc) +{ + CpaBoolean hasSha3Ext = + ICP_ACCEL_CAPABILITIES_SHA3_EXT & capabilitiesMask; + CpaBoolean useStatefulSha3DescFlag = CPA_FALSE; + + if (hasSha3Ext && !partialsNotRequired && + (pSessionDesc->symOperation == CPA_CY_SYM_OP_HASH) && + LAC_HASH_IS_SHA3(pSessionDesc->hashAlgorithm)) { + useStatefulSha3DescFlag = CPA_TRUE; + } + return useStatefulSha3DescFlag; +} + /** @ingroup LacAlgChain */ CpaStatus LacAlgChain_SessionInit(const CpaInstanceHandle instanceHandle, const CpaCySymSessionSetupData *pSessionSetupData, lac_session_desc_t *pSessionDesc) { CpaStatus stat, status = CPA_STATUS_SUCCESS; sal_qat_content_desc_info_t *pCdInfo = NULL; + sal_qat_content_desc_info_t *pCdInfoOptimised = NULL; sal_crypto_service_t *pService = (sal_crypto_service_t *)instanceHandle; Cpa32U capabilitiesMask = pService->generic_service_info.capabilitiesMask; Cpa8U *pHwBlockBaseInDRAM = NULL; Cpa8U *pOptimisedHwBlockBaseInDRAM = NULL; Cpa32U hwBlockOffsetInDRAM = 0; Cpa32U optimisedHwBlockOffsetInDRAM = 0; Cpa8U cipherOffsetInConstantsTable = 0; Cpa8U hashOffsetInConstantsTable = 0; + icp_qat_fw_comn_flags cmnRequestFlags = 0; icp_qat_fw_comn_req_t *pMsg = NULL; + icp_qat_fw_comn_req_t *pMsgS = NULL; const CpaCySymCipherSetupData *pCipherData; const CpaCySymHashSetupData *pHashData; Cpa16U proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */ CpaCySymAlgChainOrder chainOrder = 0; lac_sym_qat_hash_precompute_info_t precomputeData = { 0 }; lac_sym_qat_hash_precompute_info_t precomputeDataOptimisedCd = { 0 }; pCipherData = &(pSessionSetupData->cipherSetupData); pHashData = &(pSessionSetupData->hashSetupData); /*------------------------------------------------------------------------- * Populate session data *-----------------------------------------------------------------------*/ /* Initialise Request Queue */ stat = LAC_SPINLOCK_INIT(&pSessionDesc->requestQueueLock); if (CPA_STATUS_SUCCESS != stat) { LAC_LOG_ERROR("Spinlock init failed for sessionLock"); return CPA_STATUS_RESOURCE; } pSessionDesc->pRequestQueueHead = NULL; pSessionDesc->pRequestQueueTail = NULL; pSessionDesc->nonBlockingOpsInProgress = CPA_TRUE; pSessionDesc->pInstance = instanceHandle; pSessionDesc->digestIsAppended = pSessionSetupData->digestIsAppended; pSessionDesc->digestVerify = pSessionSetupData->verifyDigest; /* Reset the pending callback counter */ qatUtilsAtomicSet(0, &pSessionDesc->u.pendingCbCount); qatUtilsAtomicSet(0, &pSessionDesc->u.pendingDpCbCount); /* Partial state must be set to full, to indicate that next packet * expected on the session is a full packet or the start of a * partial packet. */ pSessionDesc->partialState = CPA_CY_SYM_PACKET_TYPE_FULL; pSessionDesc->symOperation = pSessionSetupData->symOperation; switch (pSessionDesc->symOperation) { case CPA_CY_SYM_OP_CIPHER: - pSessionDesc->laCmdId = ICP_QAT_FW_LA_CMD_CIPHER; - pSessionDesc->isCipher = TRUE; - pSessionDesc->isAuth = FALSE; + pSessionDesc->isCipher = CPA_TRUE; + pSessionDesc->isAuth = CPA_FALSE; pSessionDesc->isAuthEncryptOp = CPA_FALSE; - - if (CPA_CY_SYM_CIPHER_SNOW3G_UEA2 == - pSessionSetupData->cipherSetupData.cipherAlgorithm) { - proto = ICP_QAT_FW_LA_SNOW_3G_PROTO; - } else if (CPA_CY_SYM_CIPHER_ZUC_EEA3 == - pSessionSetupData->cipherSetupData.cipherAlgorithm) { - proto = ICP_QAT_FW_LA_ZUC_3G_PROTO; - } + pSessionDesc->singlePassState = NON_SPC; break; case CPA_CY_SYM_OP_HASH: - pSessionDesc->laCmdId = ICP_QAT_FW_LA_CMD_AUTH; - pSessionDesc->isCipher = FALSE; - pSessionDesc->isAuth = TRUE; + pSessionDesc->isCipher = CPA_FALSE; + pSessionDesc->isAuth = CPA_TRUE; pSessionDesc->isAuthEncryptOp = CPA_FALSE; - - if (CPA_CY_SYM_HASH_SNOW3G_UIA2 == - pSessionSetupData->hashSetupData.hashAlgorithm) { - proto = ICP_QAT_FW_LA_SNOW_3G_PROTO; - } else if (CPA_CY_SYM_HASH_ZUC_EIA3 == - pSessionSetupData->hashSetupData.hashAlgorithm) { - proto = ICP_QAT_FW_LA_ZUC_3G_PROTO; - } - + pSessionDesc->singlePassState = NON_SPC; break; - case CPA_CY_SYM_OP_ALGORITHM_CHAINING: - pSessionDesc->isCipher = TRUE; - pSessionDesc->isAuth = TRUE; - - { - /* set up some useful shortcuts */ - CpaCySymCipherAlgorithm cipherAlgorithm = - pSessionSetupData->cipherSetupData.cipherAlgorithm; - CpaCySymCipherDirection cipherDir = - pSessionSetupData->cipherSetupData.cipherDirection; - - if (LAC_CIPHER_IS_CCM(cipherAlgorithm)) { - pSessionDesc->isAuthEncryptOp = CPA_TRUE; - pSessionDesc->digestIsAppended = CPA_TRUE; - proto = ICP_QAT_FW_LA_CCM_PROTO; - - /* Derive chainOrder from direction for - * isAuthEncryptOp - * cases */ - /* For CCM & GCM modes: force digest verify flag - _TRUE - for decrypt and _FALSE for encrypt. For all - other cases - use user defined value */ - - if (CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT == - cipherDir) { - chainOrder = - CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER; - pSessionDesc->digestVerify = CPA_FALSE; - } else { - chainOrder = - CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH; - pSessionDesc->digestVerify = CPA_TRUE; - } - } else if (LAC_CIPHER_IS_GCM(cipherAlgorithm)) { - pSessionDesc->isAuthEncryptOp = CPA_TRUE; - proto = ICP_QAT_FW_LA_GCM_PROTO; - - /* Derive chainOrder from direction for - * isAuthEncryptOp - * cases */ - /* For CCM & GCM modes: force digest verify flag - _TRUE - for decrypt and _FALSE for encrypt. For all - other cases - use user defined value */ - - if (CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT == - cipherDir) { - chainOrder = - CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH; - pSessionDesc->digestVerify = CPA_FALSE; - } else { - chainOrder = - CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER; - pSessionDesc->digestVerify = CPA_TRUE; - } - } else if (LAC_CIPHER_IS_CHACHA(cipherAlgorithm)) { - pSessionDesc->isAuthEncryptOp = CPA_TRUE; - proto = ICP_QAT_FW_LA_SINGLE_PASS_PROTO; - - if (CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT == - cipherDir) { - chainOrder = - CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH; - } else { - chainOrder = - CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER; - } - } else { - pSessionDesc->isAuthEncryptOp = CPA_FALSE; - /* Use the chainOrder passed in */ - chainOrder = pSessionSetupData->algChainOrder; - if ((chainOrder != - CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER) && - (chainOrder != - CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH)) { - LAC_INVALID_PARAM_LOG("algChainOrder"); - return CPA_STATUS_INVALID_PARAM; - } - - if (CPA_CY_SYM_HASH_SNOW3G_UIA2 == - pSessionSetupData->hashSetupData - .hashAlgorithm) { - proto = ICP_QAT_FW_LA_SNOW_3G_PROTO; - } else if (CPA_CY_SYM_HASH_ZUC_EIA3 == - pSessionSetupData->hashSetupData - .hashAlgorithm) { - proto = ICP_QAT_FW_LA_ZUC_3G_PROTO; - } - } - - if (CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH == - chainOrder) { - pSessionDesc->laCmdId = - ICP_QAT_FW_LA_CMD_CIPHER_HASH; - } else if ( - CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER == - chainOrder) { - pSessionDesc->laCmdId = - ICP_QAT_FW_LA_CMD_HASH_CIPHER; + case CPA_CY_SYM_OP_ALGORITHM_CHAINING: { + pSessionDesc->isCipher = CPA_TRUE; + pSessionDesc->isAuth = CPA_TRUE; + pSessionDesc->singlePassState = + LacSymAlgChain_GetSpcState(pCipherData->cipherAlgorithm, + pHashData->hashAlgorithm, + capabilitiesMask); + + switch (pSessionSetupData->cipherSetupData.cipherAlgorithm) { + case CPA_CY_SYM_CIPHER_AES_CCM: { + pSessionDesc->isAuthEncryptOp = CPA_TRUE; + pSessionDesc->digestIsAppended = CPA_TRUE; + } break; + case CPA_CY_SYM_CIPHER_AES_GCM: + case CPA_CY_SYM_CIPHER_CHACHA: + pSessionDesc->isAuthEncryptOp = CPA_TRUE; + break; + default: { + pSessionDesc->isAuthEncryptOp = CPA_FALSE; + /* Use the chainOrder passed in */ + chainOrder = pSessionSetupData->algChainOrder; + if ((chainOrder != + CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER) && + (chainOrder != + CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH)) { + LAC_INVALID_PARAM_LOG("algChainOrder"); + return CPA_STATUS_INVALID_PARAM; } + } break; } - break; + } break; default: + pSessionDesc->singlePassState = NON_SPC; break; } if (pSessionDesc->isCipher) { -/* Populate cipher specific session data */ + /* Populate cipher specific session data */ - status = LacCipher_SessionSetupDataCheck(pCipherData); + status = LacCipher_SessionSetupDataCheck(pCipherData, + capabilitiesMask); if (CPA_STATUS_SUCCESS == status) { pSessionDesc->cipherAlgorithm = pCipherData->cipherAlgorithm; pSessionDesc->cipherKeyLenInBytes = pCipherData->cipherKeyLenInBytes; pSessionDesc->cipherDirection = pCipherData->cipherDirection; /* ARC4 base key isn't added to the content descriptor, - * because - * we don't need to pass it directly to the QAT engine. - * Instead - * an initial cipher state & key matrix is derived from - * the - * base key and provided to the QAT through the state - * pointer - * in the request params. We'll store this initial state - * in - * the session descriptor. */ + * because we don't need to pass it directly to the QAT + * engine. Instead an initial cipher state & key matrix + * is derived from the base key and provided to the QAT + * through the state pointer in the request params. + * We'll store this initial state in the session + * descriptor. */ if (LAC_CIPHER_IS_ARC4(pSessionDesc->cipherAlgorithm)) { LacSymQat_CipherArc4StateInit( pCipherData->pCipherKey, pSessionDesc->cipherKeyLenInBytes, pSessionDesc->cipherARC4InitialState); pSessionDesc->cipherARC4InitialStatePhysAddr = LAC_OS_VIRT_TO_PHYS_EXTERNAL( pService->generic_service_info, pSessionDesc->cipherARC4InitialState); if (0 == pSessionDesc ->cipherARC4InitialStatePhysAddr) { LAC_LOG_ERROR( "Unable to get the physical address of " "the initial state for ARC4\n"); status = CPA_STATUS_FAIL; } } } } if ((CPA_STATUS_SUCCESS == status) && pSessionDesc->isAuth) { /* Populate auth-specific session data */ const CpaCySymHashSetupData *pHashData = &pSessionSetupData->hashSetupData; status = LacHash_HashContextCheck(instanceHandle, pHashData); if (CPA_STATUS_SUCCESS == status) { pSessionDesc->hashResultSize = pHashData->digestResultLenInBytes; pSessionDesc->hashMode = pHashData->hashMode; pSessionDesc->hashAlgorithm = pHashData->hashAlgorithm; /* Save the authentication key length for further update */ if (CPA_CY_SYM_HASH_MODE_AUTH == pHashData->hashMode) { pSessionDesc->authKeyLenInBytes = pHashData->authModeSetupData .authKeyLenInBytes; } if (CPA_TRUE == pSessionDesc->isAuthEncryptOp || (pHashData->hashAlgorithm == CPA_CY_SYM_HASH_SNOW3G_UIA2 || pHashData->hashAlgorithm == CPA_CY_SYM_HASH_ZUC_EIA3)) { pSessionDesc->aadLenInBytes = pHashData->authModeSetupData.aadLenInBytes; } /* Set the QAT hash mode */ if ((pHashData->hashMode == CPA_CY_SYM_HASH_MODE_NESTED) || (pHashData->hashMode == CPA_CY_SYM_HASH_MODE_PLAIN) || (pHashData->hashMode == CPA_CY_SYM_HASH_MODE_AUTH && pHashData->hashAlgorithm == CPA_CY_SYM_HASH_AES_CBC_MAC)) { pSessionDesc->qatHashMode = ICP_QAT_HW_AUTH_MODE0; } else /* CPA_CY_SYM_HASH_MODE_AUTH && anything except CPA_CY_SYM_HASH_AES_CBC_MAC - */ + */ { if (IS_HMAC_ALG(pHashData->hashAlgorithm)) { - /* SHA3 and SM3 HMAC do not support - * precompute, force MODE2 - * for AUTH */ - if ((CPA_CY_SYM_HASH_SHA3_224 == - pHashData->hashAlgorithm) || - (CPA_CY_SYM_HASH_SHA3_256 == - pHashData->hashAlgorithm) || - (CPA_CY_SYM_HASH_SHA3_384 == - pHashData->hashAlgorithm) || - (CPA_CY_SYM_HASH_SHA3_512 == - pHashData->hashAlgorithm) || + /* SHA3 HMAC and SM3 do not support + * precompute, force MODE2 for AUTH */ + if (LAC_HASH_IS_SHA3( + pHashData->hashAlgorithm) || (CPA_CY_SYM_HASH_SM3 == pHashData->hashAlgorithm)) { pSessionDesc->qatHashMode = ICP_QAT_HW_AUTH_MODE2; } else { pSessionDesc->qatHashMode = - ICP_QAT_HW_AUTH_MODE1; + pService->qatHmacMode; } } else if (CPA_CY_SYM_HASH_ZUC_EIA3 == pHashData->hashAlgorithm) { pSessionDesc->qatHashMode = ICP_QAT_HW_AUTH_MODE0; } else { pSessionDesc->qatHashMode = ICP_QAT_HW_AUTH_MODE1; } } } } /*------------------------------------------------------------------------- * build the message templates * create two content descriptors in the case we can support using SHRAM * constants and an optimised content descriptor. we have to do this in - *case - * of partials. - * 64 byte content descriptor is used in the SHRAM case for - *AES-128-HMAC-SHA1 + *case of partials. 64 byte content descriptor is used in the SHRAM case + *for AES-128-HMAC-SHA1 *-----------------------------------------------------------------------*/ if (CPA_STATUS_SUCCESS == status) { + pSessionDesc->cipherSliceType = + LacCipher_GetCipherSliceType(pService, + pSessionDesc->cipherAlgorithm, + pSessionDesc->hashAlgorithm); LacSymCheck_IsPartialSupported(capabilitiesMask, pSessionDesc); + pSessionDesc->useOptimisedContentDesc = CPA_FALSE; + pSessionDesc->useStatefulSha3ContentDesc = CPA_FALSE; + + /* Build configuration data */ + buildCmdData(pService, + pSessionDesc, + &chainOrder, + &proto, + &pSessionDesc->laCmdFlags, + &cmnRequestFlags); + + if (ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE == + pSessionDesc->cipherSliceType) + pSessionDesc->useSymConstantsTable = CPA_FALSE; + else + pSessionDesc->useSymConstantsTable = + LacSymQat_UseSymConstantsTable( + pSessionDesc, + &cipherOffsetInConstantsTable, + &hashOffsetInConstantsTable); + + /* for a certain combination of Algorthm Chaining we want to + use an optimised cd block */ + + if (pSessionDesc->symOperation == + CPA_CY_SYM_OP_ALGORITHM_CHAINING && + pSessionDesc->useSymConstantsTable == CPA_TRUE) { + pSessionDesc->useOptimisedContentDesc = + LacSymQat_UseOptimisedContentDesc(pSessionDesc); + } + + /* check whether we need to construct content desc for stateful + * SHA3 */ + pSessionDesc->useStatefulSha3ContentDesc = + LacAlgChain_UseStatefulSha3ContentDesc( + pSessionSetupData->partialsNotRequired, + capabilitiesMask, + pSessionDesc); /* setup some convenience pointers */ pCdInfo = &(pSessionDesc->contentDescInfo); pHwBlockBaseInDRAM = (Cpa8U *)pCdInfo->pData; hwBlockOffsetInDRAM = 0; - /* - * Build the header flags with the default settings for this - * session. - */ - if (pSessionDesc->isDPSession == CPA_TRUE) { - pSessionDesc->cmnRequestFlags = - ICP_QAT_FW_COMN_FLAGS_BUILD( - QAT_COMN_CD_FLD_TYPE_64BIT_ADR, - LAC_SYM_DP_QAT_PTR_TYPE); - } else { - pSessionDesc->cmnRequestFlags = - ICP_QAT_FW_COMN_FLAGS_BUILD( - QAT_COMN_CD_FLD_TYPE_64BIT_ADR, - LAC_SYM_DEFAULT_QAT_PTR_TYPE); + /* set up the pointer for the optimised content desc if this is + * possible we still have to support both cd types in case of + * partials so we construct both */ + if (pSessionDesc->useOptimisedContentDesc == CPA_TRUE) { + pCdInfoOptimised = + &(pSessionDesc->contentDescOptimisedInfo); + pOptimisedHwBlockBaseInDRAM = + (Cpa8U *)pCdInfoOptimised->pData; + optimisedHwBlockOffsetInDRAM = 0; } - LacSymQat_LaSetDefaultFlags(&pSessionDesc->laCmdFlags, - pSessionDesc->symOperation); - switch (pSessionDesc->symOperation) { case CPA_CY_SYM_OP_CIPHER: { LacAlgChain_CipherCDBuild( pCipherData, pSessionDesc, ICP_QAT_FW_SLICE_DRAM_WR, cipherOffsetInConstantsTable, &pSessionDesc->cmnRequestFlags, &pSessionDesc->laCmdFlags, pHwBlockBaseInDRAM, - &hwBlockOffsetInDRAM); + &hwBlockOffsetInDRAM, + capabilitiesMask); + + if (pSessionDesc->useSymConstantsTable) { + LacAlgChain_CipherCDBuild_ForSHRAM( + pCipherData, + pSessionDesc, + ICP_QAT_FW_SLICE_DRAM_WR, + cipherOffsetInConstantsTable); + } } break; case CPA_CY_SYM_OP_HASH: LacAlgChain_HashCDBuild(pHashData, instanceHandle, pSessionDesc, - ICP_QAT_FW_SLICE_DRAM_WR, + ICP_QAT_FW_SLICE_NULL, hashOffsetInConstantsTable, &pSessionDesc->cmnRequestFlags, &pSessionDesc->laCmdFlags, &precomputeData, &precomputeDataOptimisedCd, pHwBlockBaseInDRAM, &hwBlockOffsetInDRAM, NULL, NULL); break; case CPA_CY_SYM_OP_ALGORITHM_CHAINING: /* For CCM/GCM, CPM firmware currently expects the - * cipher and - * hash h/w setup blocks to be arranged according to the - * chain - * order (Except for GCM/CCM, order doesn't actually - * matter as - * long as the config offsets are set correctly in CD - * control - * blocks + * cipher and hash h/w setup blocks to be arranged + * according to the chain order (Except for GCM/CCM, + * order doesn't actually matter as long as the config + * offsets are set correctly in CD control blocks */ if (CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER == chainOrder) { LacAlgChain_HashCDBuild( pHashData, instanceHandle, pSessionDesc, ICP_QAT_FW_SLICE_CIPHER, hashOffsetInConstantsTable, &pSessionDesc->cmnRequestFlags, &pSessionDesc->laCmdFlags, &precomputeData, &precomputeDataOptimisedCd, pHwBlockBaseInDRAM, &hwBlockOffsetInDRAM, pOptimisedHwBlockBaseInDRAM, &optimisedHwBlockOffsetInDRAM); LacAlgChain_CipherCDBuild( pCipherData, pSessionDesc, ICP_QAT_FW_SLICE_DRAM_WR, cipherOffsetInConstantsTable, &pSessionDesc->cmnRequestFlags, &pSessionDesc->laCmdFlags, pHwBlockBaseInDRAM, - &hwBlockOffsetInDRAM); - if (LAC_CIPHER_IS_SPC( - pCipherData->cipherAlgorithm, - pHashData->hashAlgorithm, - capabilitiesMask)) { + &hwBlockOffsetInDRAM, + capabilitiesMask); + + if (pSessionDesc->useOptimisedContentDesc) { + LacAlgChain_CipherCDBuild_ForOptimisedCD( + pCipherData, + pSessionDesc, + ICP_QAT_FW_SLICE_DRAM_WR, + cipherOffsetInConstantsTable, + pOptimisedHwBlockBaseInDRAM, + &optimisedHwBlockOffsetInDRAM); + } + + if (NON_SPC != pSessionDesc->singlePassState) { pCdInfo->hwBlkSzQuadWords = (LAC_BYTES_TO_QUADWORDS( hwBlockOffsetInDRAM)); pMsg = (icp_qat_fw_comn_req_t *)&( pSessionDesc->reqSpcCacheHdr); SalQatMsg_ContentDescHdrWrite( (icp_qat_fw_comn_req_t *)pMsg, pCdInfo); } - } else { + } else /* CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH */ + { LacAlgChain_CipherCDBuild( pCipherData, pSessionDesc, ICP_QAT_FW_SLICE_AUTH, cipherOffsetInConstantsTable, &pSessionDesc->cmnRequestFlags, &pSessionDesc->laCmdFlags, pHwBlockBaseInDRAM, - &hwBlockOffsetInDRAM); + &hwBlockOffsetInDRAM, + capabilitiesMask); + + if (pSessionDesc->useOptimisedContentDesc) { + LacAlgChain_CipherCDBuild_ForOptimisedCD( + pCipherData, + pSessionDesc, + ICP_QAT_FW_SLICE_AUTH, + cipherOffsetInConstantsTable, + pOptimisedHwBlockBaseInDRAM, + &optimisedHwBlockOffsetInDRAM); + } - if (LAC_CIPHER_IS_SPC( - pCipherData->cipherAlgorithm, - pHashData->hashAlgorithm, - capabilitiesMask)) { + if (NON_SPC != pSessionDesc->singlePassState) { pCdInfo->hwBlkSzQuadWords = LAC_BYTES_TO_QUADWORDS( hwBlockOffsetInDRAM); pMsg = (icp_qat_fw_comn_req_t *)&( pSessionDesc->reqSpcCacheHdr); SalQatMsg_ContentDescHdrWrite( (icp_qat_fw_comn_req_t *)pMsg, pCdInfo); } LacAlgChain_HashCDBuild( pHashData, instanceHandle, pSessionDesc, ICP_QAT_FW_SLICE_DRAM_WR, hashOffsetInConstantsTable, &pSessionDesc->cmnRequestFlags, &pSessionDesc->laCmdFlags, &precomputeData, &precomputeDataOptimisedCd, pHwBlockBaseInDRAM, &hwBlockOffsetInDRAM, pOptimisedHwBlockBaseInDRAM, &optimisedHwBlockOffsetInDRAM); } break; default: LAC_LOG_ERROR("Invalid sym operation\n"); status = CPA_STATUS_INVALID_PARAM; } } if ((CPA_STATUS_SUCCESS == status) && pSessionDesc->isAuth) { lac_sym_qat_hash_state_buffer_info_t *pHashStateBufferInfo = &(pSessionDesc->hashStateBufferInfo); CpaBoolean hashStateBuffer = CPA_TRUE; /* set up fields in both the cd_ctrl and reqParams which - * describe - * the ReqParams block */ + * describe the ReqParams block */ LacSymQat_HashSetupReqParamsMetaData( &(pSessionDesc->reqCacheFtr), instanceHandle, pHashData, hashStateBuffer, pSessionDesc->qatHashMode, pSessionDesc->digestVerify); + if (pSessionDesc->useSymConstantsTable) { + /* Need to set up for SHRAM Constants Table use also */ + LacSymQat_HashSetupReqParamsMetaData( + &(pSessionDesc->shramReqCacheFtr), + instanceHandle, + pHashData, + hashStateBuffer, + pSessionDesc->qatHashMode, + pSessionDesc->digestVerify); + } + /* populate the hash state prefix buffer info structure * (part of user allocated session memory & the * buffer itself. For CCM/GCM the buffer is stored in the * cookie and is not initialised here) */ if (CPA_FALSE == pSessionDesc->isAuthEncryptOp) { LAC_CHECK_64_BYTE_ALIGNMENT( &(pSessionDesc->hashStatePrefixBuffer[0])); status = LacHash_StatePrefixAadBufferInit( &(pService->generic_service_info), pHashData, &(pSessionDesc->reqCacheFtr), pSessionDesc->qatHashMode, pSessionDesc->hashStatePrefixBuffer, pHashStateBufferInfo); /* SHRAM Constants Table not used for Auth-Enc */ } if (CPA_STATUS_SUCCESS == status) { if (IS_HASH_MODE_1(pSessionDesc->qatHashMode) || CPA_CY_SYM_HASH_ZUC_EIA3 == pHashData->hashAlgorithm) { LAC_CHECK_64_BYTE_ALIGNMENT( &(pSessionDesc->hashStatePrefixBuffer[0])); /* Block messages until precompute is completed */ pSessionDesc->nonBlockingOpsInProgress = CPA_FALSE; status = LacHash_PrecomputeDataCreate( instanceHandle, (CpaCySymSessionSetupData *) pSessionSetupData, LacSymAlgChain_HashPrecomputeDoneCb, pSessionDesc, pSessionDesc->hashStatePrefixBuffer, precomputeData.pState1, precomputeData.pState2); + if (pSessionDesc->useOptimisedContentDesc) { + status = LacHash_PrecomputeDataCreate( + instanceHandle, + (CpaCySymSessionSetupData *) + pSessionSetupData, + LacSymAlgChain_HashPrecomputeDoneCb, + pSessionDesc, + pSessionDesc->hashStatePrefixBuffer, + precomputeDataOptimisedCd.pState1, + precomputeDataOptimisedCd.pState2); + } } else if (pHashData->hashAlgorithm == CPA_CY_SYM_HASH_AES_CBC_MAC) { - LAC_OS_BZERO(precomputeData.pState2, - precomputeData.state2Size); - memcpy(precomputeData.pState2, - pHashData->authModeSetupData.authKey, - pHashData->authModeSetupData - .authKeyLenInBytes); - } - } - if (CPA_STATUS_SUCCESS == status) { - - if (pSessionDesc->digestVerify) { - - ICP_QAT_FW_LA_CMP_AUTH_SET( - pSessionDesc->laCmdFlags, - ICP_QAT_FW_LA_CMP_AUTH_RES); - ICP_QAT_FW_LA_RET_AUTH_SET( - pSessionDesc->laCmdFlags, - ICP_QAT_FW_LA_NO_RET_AUTH_RES); - } else { - - ICP_QAT_FW_LA_RET_AUTH_SET( - pSessionDesc->laCmdFlags, - ICP_QAT_FW_LA_RET_AUTH_RES); - ICP_QAT_FW_LA_CMP_AUTH_SET( - pSessionDesc->laCmdFlags, - ICP_QAT_FW_LA_NO_CMP_AUTH_RES); + if (NULL != precomputeData.pState2) { + LAC_OS_BZERO(precomputeData.pState2, + precomputeData.state2Size); + memcpy(precomputeData.pState2, + pHashData->authModeSetupData + .authKey, + pHashData->authModeSetupData + .authKeyLenInBytes); + } } } } if (CPA_STATUS_SUCCESS == status) { + /* Configure the ContentDescriptor field + in the request if not done already */ pCdInfo->hwBlkSzQuadWords = LAC_BYTES_TO_QUADWORDS(hwBlockOffsetInDRAM); pMsg = (icp_qat_fw_comn_req_t *)&(pSessionDesc->reqCacheHdr); - - /* Configure the ContentDescriptor field - * in the request if not done already */ SalQatMsg_ContentDescHdrWrite((icp_qat_fw_comn_req_t *)pMsg, pCdInfo); - if (CPA_CY_SYM_CIPHER_ZUC_EEA3 == - pSessionSetupData->cipherSetupData.cipherAlgorithm || - pHashData->hashAlgorithm == CPA_CY_SYM_HASH_ZUC_EIA3) { - /* New bit position (12) for ZUC. The FW provides a - * specific macro - * to use to set the ZUC proto flag. With the new FW I/F - * this needs - * to be set for both Cipher and Auth */ - ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( - pSessionDesc->laCmdFlags, proto); - } else { - /* Configure the common header */ - ICP_QAT_FW_LA_PROTO_SET(pSessionDesc->laCmdFlags, - proto); + pMsgS = + (icp_qat_fw_comn_req_t *)&(pSessionDesc->shramReqCacheHdr); + /*If we are using the optimised CD then + we have to set this up correctly in the SHARM reqCache*/ + if (pSessionDesc->useOptimisedContentDesc) { + pCdInfoOptimised->hwBlkSzQuadWords = + LAC_BYTES_TO_QUADWORDS( + optimisedHwBlockOffsetInDRAM); + SalQatMsg_ContentDescHdrWrite( + (icp_qat_fw_comn_req_t *)pMsgS, pCdInfoOptimised); } - /* set Append flag, if digest is appended */ - if (pSessionDesc->digestIsAppended) { - ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET( - pSessionDesc->laCmdFlags, - ICP_QAT_FW_LA_DIGEST_IN_BUFFER); - } else { - ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET( - pSessionDesc->laCmdFlags, - ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER); - } + /* Updates command flags basing on configured alg */ + updateLaCmdFlags(pSessionDesc, + proto, + &pSessionDesc->laCmdFlags); SalQatMsg_CmnHdrWrite((icp_qat_fw_comn_req_t *)pMsg, ICP_QAT_FW_COMN_REQ_CPM_FW_LA, pSessionDesc->laCmdId, pSessionDesc->cmnRequestFlags, pSessionDesc->laCmdFlags); + + /* Need to duplicate if SHRAM Constants Table used */ + if (pSessionDesc->useSymConstantsTable) { + ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET( + pSessionDesc->laCmdFlags, + ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP); + + if (pSessionDesc->isCipher && + !pSessionDesc->useOptimisedContentDesc) { + ICP_QAT_FW_COMN_CD_FLD_TYPE_SET( + cmnRequestFlags, + QAT_COMN_CD_FLD_TYPE_16BYTE_DATA); + } + + SalQatMsg_CmnHdrWrite((icp_qat_fw_comn_req_t *)pMsgS, + ICP_QAT_FW_COMN_REQ_CPM_FW_LA, + pSessionDesc->laCmdId, + cmnRequestFlags, + pSessionDesc->laCmdFlags); + } } return status; } +static void +LacAlgChain_StatefulSha3_SkipStateLoadFlags(icp_qat_fw_la_bulk_req_t *pMsg, + Cpa32U packetType, + icp_qat_hw_auth_mode_t qatHashMode) +{ + icp_qat_fw_auth_cd_ctrl_hdr_t *pAuthCdCtrlHdr = NULL; + + pAuthCdCtrlHdr = (icp_qat_fw_auth_cd_ctrl_hdr_t *)&(pMsg->cd_ctrl); + + if (IS_HASH_MODE_2(qatHashMode)) { + if ((ICP_QAT_FW_LA_PARTIAL_START == packetType) || + (ICP_QAT_FW_LA_PARTIAL_NONE == packetType)) { + ICP_QAT_FW_HASH_FLAG_SKIP_INNER_STATE1_LOAD_SET( + pAuthCdCtrlHdr->hash_flags, + QAT_FW_LA_SKIP_INNER_STATE1_LOAD); + ICP_QAT_FW_HASH_FLAG_SKIP_OUTER_STATE1_LOAD_SET( + pAuthCdCtrlHdr->hash_flags, + QAT_FW_LA_SKIP_OUTER_STATE1_LOAD); + } else if (ICP_QAT_FW_LA_PARTIAL_END == packetType) { + ICP_QAT_FW_HASH_FLAG_SKIP_OUTER_STATE1_LOAD_SET( + pAuthCdCtrlHdr->hash_flags, + QAT_FW_LA_SKIP_OUTER_STATE1_LOAD); + } + } else { + if ((ICP_QAT_FW_LA_PARTIAL_START == packetType) || + (ICP_QAT_FW_LA_PARTIAL_NONE == packetType)) { + ICP_QAT_FW_HASH_FLAG_SKIP_INNER_STATE1_LOAD_SET( + pAuthCdCtrlHdr->hash_flags, + QAT_FW_LA_SKIP_INNER_STATE1_LOAD); + } + } +} + /** @ingroup LacAlgChain */ CpaStatus LacAlgChain_Perform(const CpaInstanceHandle instanceHandle, lac_session_desc_t *pSessionDesc, void *pCallbackTag, const CpaCySymOpData *pOpData, const CpaBufferList *pSrcBuffer, CpaBufferList *pDstBuffer, CpaBoolean *pVerifyResult) { CpaStatus status = CPA_STATUS_SUCCESS; sal_crypto_service_t *pService = (sal_crypto_service_t *)instanceHandle; Cpa32U capabilitiesMask = pService->generic_service_info.capabilitiesMask; lac_sym_bulk_cookie_t *pCookie = NULL; lac_sym_cookie_t *pSymCookie = NULL; icp_qat_fw_la_bulk_req_t *pMsg = NULL; Cpa8U *pMsgDummy = NULL; Cpa8U *pCacheDummyHdr = NULL; Cpa8U *pCacheDummyFtr = NULL; Cpa32U qatPacketType = 0; CpaBufferList *pBufferList = NULL; Cpa8U *pDigestResult = NULL; Cpa64U srcAddrPhys = 0; Cpa64U dstAddrPhys = 0; icp_qat_fw_la_cmd_id_t laCmdId; sal_qat_content_desc_info_t *pCdInfo = NULL; Cpa8U *pHwBlockBaseInDRAM = NULL; Cpa32U hwBlockOffsetInDRAM = 0; Cpa32U sizeInBytes = 0; icp_qat_fw_cipher_cd_ctrl_hdr_t *pSpcCdCtrlHdr = NULL; CpaCySymCipherAlgorithm cipher; CpaCySymHashAlgorithm hash; Cpa8U paddingLen = 0; Cpa8U blockLen = 0; + CpaBoolean digestIsAppended = CPA_FALSE; + Cpa32U aadLenInBytes = 0; Cpa64U srcPktSize = 0; + Cpa64U dstPktSize = 0; /* Set the command id */ laCmdId = pSessionDesc->laCmdId; cipher = pSessionDesc->cipherAlgorithm; hash = pSessionDesc->hashAlgorithm; + CpaBoolean isSpCcm = + (LAC_CIPHER_IS_CCM(cipher) && LAC_CIPHER_AES_V2(capabilitiesMask)); + + if (CPA_CY_SYM_HASH_AES_GMAC == hash) { + pSessionDesc->aadLenInBytes = pOpData->messageLenToHashInBytes; + if (pOpData->messageLenToHashInBytes == 0 || + pOpData->pAdditionalAuthData != NULL) { + LAC_INVALID_PARAM_LOG( + "For AES_GMAC, AAD Length " + "(messageLenToHashInBytes) must " + "be non zero and pAdditionalAuthData " + "must be NULL"); + return CPA_STATUS_INVALID_PARAM; + } + } + + aadLenInBytes = pSessionDesc->aadLenInBytes; + /* Convert Alg Chain Request to Cipher Request for CCP and * AES_GCM single pass */ - if (!pSessionDesc->isSinglePass && - LAC_CIPHER_IS_SPC(cipher, hash, capabilitiesMask) && - (LAC_CIPHER_SPC_IV_SIZE == pOpData->ivLenInBytes)) { + if ((NON_SPC != pSessionDesc->singlePassState) && + (isSpCcm || (LAC_CIPHER_SPC_IV_SIZE == pOpData->ivLenInBytes))) { pSessionDesc->laCmdId = ICP_QAT_FW_LA_CMD_CIPHER; laCmdId = pSessionDesc->laCmdId; pSessionDesc->symOperation = CPA_CY_SYM_OP_CIPHER; - pSessionDesc->isSinglePass = CPA_TRUE; + pSessionDesc->singlePassState = SPC; pSessionDesc->isCipher = CPA_TRUE; pSessionDesc->isAuthEncryptOp = CPA_FALSE; pSessionDesc->isAuth = CPA_FALSE; - if (CPA_CY_SYM_HASH_AES_GMAC == pSessionDesc->hashAlgorithm) { - pSessionDesc->aadLenInBytes = - pOpData->messageLenToHashInBytes; - if (ICP_QAT_FW_SPC_AAD_SZ_MAX < - pSessionDesc->aadLenInBytes) { + + if (CPA_CY_SYM_HASH_AES_GMAC == hash) { + if (ICP_QAT_FW_SPC_AAD_SZ_MAX < aadLenInBytes) { LAC_INVALID_PARAM_LOG( "aadLenInBytes for AES_GMAC"); return CPA_STATUS_INVALID_PARAM; } } - /* New bit position (13) for SINGLE PASS. * The FW provides a specific macro to use to set the proto flag */ ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET( pSessionDesc->laCmdFlags, ICP_QAT_FW_LA_SINGLE_PASS_PROTO); - ICP_QAT_FW_LA_PROTO_SET(pSessionDesc->laCmdFlags, 0); + if (isCyGen2x(pService)) { + ICP_QAT_FW_LA_PROTO_SET(pSessionDesc->laCmdFlags, 0); + } pCdInfo = &(pSessionDesc->contentDescInfo); pHwBlockBaseInDRAM = (Cpa8U *)pCdInfo->pData; if (CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT == pSessionDesc->cipherDirection) { if (LAC_CIPHER_IS_GCM(cipher)) hwBlockOffsetInDRAM = LAC_QUADWORDS_TO_BYTES( - LAC_SYM_QAT_CIPHER_OFFSET_IN_DRAM_GCM_SPC); - else + LAC_SYM_QAT_CIPHER_GCM_SPC_OFFSET_IN_DRAM); + else if (LAC_CIPHER_IS_CHACHA(cipher)) hwBlockOffsetInDRAM = LAC_QUADWORDS_TO_BYTES( - LAC_SYM_QAT_CIPHER_OFFSET_IN_DRAM_CHACHA_SPC); + LAC_SYM_QAT_CIPHER_CHACHA_SPC_OFFSET_IN_DRAM); + } else if (isSpCcm) { + hwBlockOffsetInDRAM = LAC_QUADWORDS_TO_BYTES( + LAC_SYM_QAT_CIPHER_CCM_SPC_OFFSET_IN_DRAM); } + + /* Update cipher slice type */ + pSessionDesc->cipherSliceType = + LacCipher_GetCipherSliceType(pService, + pSessionDesc->cipherAlgorithm, + pSessionDesc->hashAlgorithm); + + ICP_QAT_FW_LA_SLICE_TYPE_SET(pSessionDesc->laCmdFlags, + pSessionDesc->cipherSliceType); + /* construct cipherConfig in CD in DRAM */ LacSymQat_CipherHwBlockPopulateCfgData(pSessionDesc, pHwBlockBaseInDRAM + hwBlockOffsetInDRAM, &sizeInBytes); SalQatMsg_CmnHdrWrite((icp_qat_fw_comn_req_t *)&( pSessionDesc->reqSpcCacheHdr), ICP_QAT_FW_COMN_REQ_CPM_FW_LA, - pSessionDesc->laCmdId, + laCmdId, + pSessionDesc->cmnRequestFlags, + pSessionDesc->laCmdFlags); + } else if ((SPC == pSessionDesc->singlePassState) && + (LAC_CIPHER_SPC_IV_SIZE != pOpData->ivLenInBytes)) { + pSessionDesc->symOperation = CPA_CY_SYM_OP_ALGORITHM_CHAINING; + pSessionDesc->singlePassState = LIKELY_SPC; + pSessionDesc->isCipher = CPA_TRUE; + pSessionDesc->isAuthEncryptOp = CPA_TRUE; + pSessionDesc->isAuth = CPA_TRUE; + pCdInfo = &(pSessionDesc->contentDescInfo); + pHwBlockBaseInDRAM = (Cpa8U *)pCdInfo->pData; + + if (CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT == + pSessionDesc->cipherDirection) { + pSessionDesc->laCmdId = ICP_QAT_FW_LA_CMD_CIPHER_HASH; + } else { + pSessionDesc->laCmdId = ICP_QAT_FW_LA_CMD_HASH_CIPHER; + } + + laCmdId = pSessionDesc->laCmdId; + ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET( + pSessionDesc->laCmdFlags, 0); + ICP_QAT_FW_LA_PROTO_SET(pSessionDesc->laCmdFlags, + ICP_QAT_FW_LA_GCM_PROTO); + + LacSymQat_CipherHwBlockPopulateCfgData(pSessionDesc, + pHwBlockBaseInDRAM + + hwBlockOffsetInDRAM, + &sizeInBytes); + + SalQatMsg_CmnHdrWrite((icp_qat_fw_comn_req_t *)&( + pSessionDesc->reqCacheHdr), + ICP_QAT_FW_COMN_REQ_CPM_FW_LA, + laCmdId, pSessionDesc->cmnRequestFlags, pSessionDesc->laCmdFlags); - } else if (CPA_CY_SYM_HASH_AES_GMAC == pSessionDesc->hashAlgorithm) { - pSessionDesc->aadLenInBytes = pOpData->messageLenToHashInBytes; } - if (LAC_CIPHER_IS_CHACHA(cipher) && - (LAC_CIPHER_SPC_IV_SIZE != pOpData->ivLenInBytes)) { + else if (LAC_CIPHER_IS_CHACHA(cipher) && + (LAC_CIPHER_SPC_IV_SIZE != pOpData->ivLenInBytes)) { LAC_INVALID_PARAM_LOG("IV for CHACHA"); return CPA_STATUS_INVALID_PARAM; - } else if (CPA_CY_SYM_HASH_AES_GMAC == pSessionDesc->hashAlgorithm) { - if (pOpData->messageLenToHashInBytes == 0 || - pOpData->pAdditionalAuthData != NULL) { - LAC_INVALID_PARAM_LOG( - "For AES_GMAC, AAD Length " - "(messageLenToHashInBytes) must " - "be non zero and pAdditionalAuthData " - "must be NULL"); - status = CPA_STATUS_INVALID_PARAM; - } } - if (CPA_TRUE == pSessionDesc->isAuthEncryptOp) { - if (CPA_CY_SYM_HASH_AES_CCM == pSessionDesc->hashAlgorithm) { + if ((CPA_TRUE == pSessionDesc->isAuthEncryptOp) || isSpCcm) { + if (CPA_CY_SYM_HASH_AES_CCM == hash) { status = LacSymAlgChain_CheckCCMData( pOpData->pAdditionalAuthData, pOpData->pIv, pOpData->messageLenToCipherInBytes, pOpData->ivLenInBytes); if (CPA_STATUS_SUCCESS == status) { LacSymAlgChain_PrepareCCMData( pSessionDesc, pOpData->pAdditionalAuthData, pOpData->pIv, pOpData->messageLenToCipherInBytes, pOpData->ivLenInBytes); } - } else if (CPA_CY_SYM_HASH_AES_GCM == - pSessionDesc->hashAlgorithm) { - if (pSessionDesc->aadLenInBytes != 0 && + } else if (CPA_CY_SYM_HASH_AES_GCM == hash) { + if (aadLenInBytes != 0 && pOpData->pAdditionalAuthData == NULL) { LAC_INVALID_PARAM_LOG("pAdditionalAuthData"); status = CPA_STATUS_INVALID_PARAM; } if (CPA_STATUS_SUCCESS == status) { LacSymAlgChain_PrepareGCMData( pSessionDesc, pOpData->pAdditionalAuthData); } } } /* allocate cookie (used by callback function) */ if (CPA_STATUS_SUCCESS == status) { pSymCookie = (lac_sym_cookie_t *)Lac_MemPoolEntryAlloc( pService->lac_sym_cookie_pool); if (pSymCookie == NULL) { LAC_LOG_ERROR("Cannot allocate cookie - NULL"); status = CPA_STATUS_RESOURCE; } else if ((void *)CPA_STATUS_RETRY == pSymCookie) { pSymCookie = NULL; status = CPA_STATUS_RETRY; } else { pCookie = &(pSymCookie->u.bulkCookie); } } if (CPA_STATUS_SUCCESS == status) { /* write the buffer descriptors */ if (IS_ZERO_LENGTH_BUFFER_SUPPORTED(cipher, hash)) { status = LacBuffDesc_BufferListDescWriteAndAllowZeroBuffer( (CpaBufferList *)pSrcBuffer, &srcAddrPhys, CPA_FALSE, &(pService->generic_service_info)); - } else { - status = LacBuffDesc_BufferListDescWrite( - (CpaBufferList *)pSrcBuffer, - &srcAddrPhys, - CPA_FALSE, - &(pService->generic_service_info)); - } - if (CPA_STATUS_SUCCESS != status) { - LAC_LOG_ERROR("Unable to write src buffer descriptors"); - } - - /* For out of place operations */ - if ((pSrcBuffer != pDstBuffer) && - (CPA_STATUS_SUCCESS == status)) { - if (IS_ZERO_LENGTH_BUFFER_SUPPORTED(cipher, hash)) { + if (CPA_STATUS_SUCCESS != status) { + LAC_LOG_ERROR( + "Unable to write src buffer descriptors"); + } + /* For out of place operations */ + if ((pSrcBuffer != pDstBuffer) && + (CPA_STATUS_SUCCESS == status)) { status = LacBuffDesc_BufferListDescWriteAndAllowZeroBuffer( pDstBuffer, &dstAddrPhys, CPA_FALSE, &(pService->generic_service_info)); - } else { + if (CPA_STATUS_SUCCESS != status) { + LAC_LOG_ERROR( + "Unable to write dest buffer descriptors"); + } + } + } else { + status = LacBuffDesc_BufferListDescWrite( + (CpaBufferList *)pSrcBuffer, + &srcAddrPhys, + CPA_FALSE, + &(pService->generic_service_info)); + if (CPA_STATUS_SUCCESS != status) { + LAC_LOG_ERROR( + "Unable to write src buffer descriptors in " + "LacBuffDesc_BufferListDescWrite"); + } + /* For out of place operations */ + if ((pSrcBuffer != pDstBuffer) && + (CPA_STATUS_SUCCESS == status)) { status = LacBuffDesc_BufferListDescWrite( pDstBuffer, &dstAddrPhys, CPA_FALSE, &(pService->generic_service_info)); - } - if (CPA_STATUS_SUCCESS != status) { - LAC_LOG_ERROR( - "Unable to write dest buffer descriptors"); + if (CPA_STATUS_SUCCESS != status) { + LAC_LOG_ERROR( + "Unable to write dest buffer descriptors in " + "LacBuffDesc_BufferListDescWrite"); + } } } } if (CPA_STATUS_SUCCESS == status) { /* populate the cookie */ pCookie->pCallbackTag = pCallbackTag; pCookie->sessionCtx = pOpData->sessionCtx; pCookie->pOpData = (const CpaCySymOpData *)pOpData; pCookie->pDstBuffer = pDstBuffer; pCookie->updateSessionIvOnSend = CPA_FALSE; pCookie->updateUserIvOnRecieve = CPA_FALSE; pCookie->updateKeySizeOnRecieve = CPA_FALSE; pCookie->pNext = NULL; pCookie->instanceHandle = pService; /* get the qat packet type for LAC packet type */ LacSymQat_packetTypeGet(pOpData->packetType, pSessionDesc->partialState, &qatPacketType); /* * For XTS mode, the key size must be updated after * the first partial has been sent. Set a flag here so the * response knows to do this. */ - if ((laCmdId != ICP_QAT_FW_LA_CMD_AUTH) && + if (LAC_CIPHER_IS_XTS_MODE(cipher) && + (laCmdId != ICP_QAT_FW_LA_CMD_AUTH) && (CPA_CY_SYM_PACKET_TYPE_PARTIAL == pOpData->packetType) && - (LAC_CIPHER_IS_XTS_MODE(pSessionDesc->cipherAlgorithm)) && (qatPacketType == ICP_QAT_FW_LA_PARTIAL_START)) { pCookie->updateKeySizeOnRecieve = CPA_TRUE; } /* * Now create the Request. * Start by populating it from the cache in the session * descriptor. */ pMsg = &(pCookie->qatMsg); pMsgDummy = (Cpa8U *)pMsg; - if (pSessionDesc->isSinglePass) { + if (SPC == pSessionDesc->singlePassState) { pCacheDummyHdr = (Cpa8U *)&(pSessionDesc->reqSpcCacheHdr); pCacheDummyFtr = (Cpa8U *)&(pSessionDesc->reqSpcCacheFtr); } else { /* Normally, we want to use the SHRAM Constants Table if - * possible - * for best performance (less DRAM accesses incurred by - * CPM). But - * we can't use it for partial-packet hash operations. - * This is why - * we build 2 versions of the message template at - * sessionInit, + * possible for best performance (less DRAM accesses + * incurred by CPM). But we can't use it for + * partial-packet hash operations. This is why we build + * 2 versions of the message template at sessionInit, * one for SHRAM Constants Table usage and the other - * (default) for - * Content Descriptor h/w setup data in DRAM. And we - * chose between - * them here on a per-request basis, when we know the - * packetType + * (default) for Content Descriptor h/w setup data in + * DRAM. And we chose between them here on a + * per-request basis, when we know the packetType */ if ((!pSessionDesc->useSymConstantsTable) || (pSessionDesc->isAuth && (CPA_CY_SYM_PACKET_TYPE_FULL != pOpData->packetType))) { pCacheDummyHdr = (Cpa8U *)&(pSessionDesc->reqCacheHdr); pCacheDummyFtr = (Cpa8U *)&(pSessionDesc->reqCacheFtr); } else { pCacheDummyHdr = (Cpa8U *)&(pSessionDesc->shramReqCacheHdr); pCacheDummyFtr = (Cpa8U *)&(pSessionDesc->shramReqCacheFtr); } } memcpy(pMsgDummy, pCacheDummyHdr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_HDR_IN_LW)); memset((pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_HDR_IN_LW)), 0, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_TO_CLEAR_IN_LW)); - memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * - LAC_START_OF_CACHE_FTR_IN_LW), + memcpy(pMsgDummy + + (LAC_LONG_WORD_IN_BYTES * + LAC_START_OF_CACHE_FTR_IN_LW), pCacheDummyFtr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_FTR_IN_LW)); /* * Populate the comn_mid section */ SalQatMsg_CmnMidWrite(pMsg, pCookie, LAC_SYM_DEFAULT_QAT_PTR_TYPE, srcAddrPhys, dstAddrPhys, 0, 0); /* * Populate the serv_specif_flags field of the Request header * Some of the flags are set up here. * Others are set up later when the RequestParams are set up. */ LacSymQat_LaPacketCommandFlagSet( qatPacketType, laCmdId, - pSessionDesc->cipherAlgorithm, + cipher, &pMsg->comn_hdr.serv_specif_flags, pOpData->ivLenInBytes); - if (pSessionDesc->isSinglePass) { + if (SPC == pSessionDesc->singlePassState) { ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( pMsg->comn_hdr.serv_specif_flags, ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS); if (CPA_CY_SYM_PACKET_TYPE_PARTIAL == pOpData->packetType) { ICP_QAT_FW_LA_RET_AUTH_SET( pMsg->comn_hdr.serv_specif_flags, ICP_QAT_FW_LA_NO_RET_AUTH_RES); ICP_QAT_FW_LA_CMP_AUTH_SET( pMsg->comn_hdr.serv_specif_flags, ICP_QAT_FW_LA_NO_CMP_AUTH_RES); } } + ICP_QAT_FW_LA_SLICE_TYPE_SET(pMsg->comn_hdr.serv_specif_flags, + pSessionDesc->cipherSliceType); + LacBuffDesc_BufferListTotalSizeGet(pSrcBuffer, &srcPktSize); + LacBuffDesc_BufferListTotalSizeGet(pDstBuffer, &dstPktSize); /* * Populate the CipherRequestParams section of the Request */ if (laCmdId != ICP_QAT_FW_LA_CMD_AUTH) { Cpa8U *pIvBuffer = NULL; - status = LacCipher_PerformParamCheck( - pSessionDesc->cipherAlgorithm, pOpData, srcPktSize); + status = LacCipher_PerformParamCheck(cipher, + pOpData, + srcPktSize); if (CPA_STATUS_SUCCESS != status) { /* free the cookie */ Lac_MemPoolEntryFree(pCookie); return status; } if (CPA_STATUS_SUCCESS == status) { /* align cipher IV */ status = LacCipher_PerformIvCheck( &(pService->generic_service_info), pCookie, qatPacketType, &pIvBuffer); } - if (pSessionDesc->isSinglePass && + if ((SPC == pSessionDesc->singlePassState) && ((ICP_QAT_FW_LA_PARTIAL_MID == qatPacketType) || (ICP_QAT_FW_LA_PARTIAL_END == qatPacketType))) { /* For SPC stateful cipher state size for mid - * and - * end partial packet is 48 bytes + * and end partial packet is 48 bytes */ pSpcCdCtrlHdr = (icp_qat_fw_cipher_cd_ctrl_hdr_t *)&( pMsg->cd_ctrl); pSpcCdCtrlHdr->cipher_state_sz = LAC_BYTES_TO_QUADWORDS( - LAC_SYM_QAT_CIPHER_STATE_SIZE_SPC); + LAC_SYM_QAT_CIPHER_SPC_STATE_SIZE); } /*populate the cipher request parameters */ if (CPA_STATUS_SUCCESS == status) { Cpa64U ivBufferPhysAddr = 0; if (pIvBuffer != NULL) { /* User OpData memory being used for IV * buffer */ /* get the physical address */ ivBufferPhysAddr = LAC_OS_VIRT_TO_PHYS_EXTERNAL( pService->generic_service_info, pIvBuffer); if (0 == ivBufferPhysAddr) { LAC_LOG_ERROR( "Unable to get the physical address " "of the IV\n"); status = CPA_STATUS_FAIL; } } if (status == CPA_STATUS_SUCCESS) { status = LacSymQat_CipherRequestParamsPopulate( + pSessionDesc, pMsg, pOpData ->cryptoStartSrcOffsetInBytes, pOpData ->messageLenToCipherInBytes, ivBufferPhysAddr, pIvBuffer); } } - if (CPA_STATUS_SUCCESS == status && - pSessionDesc->isSinglePass) { + if ((SPC == pSessionDesc->singlePassState) && + CPA_STATUS_SUCCESS == status) { Cpa64U aadBufferPhysAddr = 0; /* For CHACHA and AES-GCM there is an AAD buffer - * if - * aadLenInBytes is nonzero In case of AES-GMAC, - * AAD buffer - * passed in the src buffer. + * if aadLenInBytes is nonzero In case of + * AES-GMAC, AAD buffer passed in the src + * buffer. */ - if (0 != pSessionDesc->aadLenInBytes && - CPA_CY_SYM_HASH_AES_GMAC != - pSessionDesc->hashAlgorithm) { + if ((0 != aadLenInBytes && + CPA_CY_SYM_HASH_AES_GMAC != hash) || + isSpCcm) { LAC_CHECK_NULL_PARAM( pOpData->pAdditionalAuthData); + Cpa32U aadDataLen = + pSessionDesc->aadLenInBytes; + + /* In case of AES_CCM, B0 block size and + * 2 bytes of AAD len encoding need to + * be added to total AAD data len */ + if (isSpCcm) + aadDataLen += + LAC_CIPHER_CCM_AAD_OFFSET; + blockLen = LacSymQat_CipherBlockSizeBytesGet( - pSessionDesc->cipherAlgorithm); - if ((pSessionDesc->aadLenInBytes % - blockLen) != 0) { + cipher); + if ((aadDataLen % blockLen) != 0) { paddingLen = blockLen - - (pSessionDesc - ->aadLenInBytes % - blockLen); + (aadDataLen % blockLen); memset( - &pOpData->pAdditionalAuthData - [pSessionDesc - ->aadLenInBytes], + &pOpData + ->pAdditionalAuthData + [aadDataLen], 0, paddingLen); } - /* User OpData memory being used for aad + /* User OpData memory being used for AAD * buffer */ /* get the physical address */ aadBufferPhysAddr = LAC_OS_VIRT_TO_PHYS_EXTERNAL( pService->generic_service_info, pOpData->pAdditionalAuthData); if (0 == aadBufferPhysAddr) { LAC_LOG_ERROR( "Unable to get the physical address " "of the aad\n"); status = CPA_STATUS_FAIL; } } if (CPA_STATUS_SUCCESS == status) { icp_qat_fw_la_cipher_req_params_t *pCipherReqParams = (icp_qat_fw_la_cipher_req_params_t *)((Cpa8U *)&( pMsg->serv_specif_rqpars) + ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET); - pCipherReqParams->spc_aad_addr = - aadBufferPhysAddr; - pCipherReqParams->spc_aad_sz = - pSessionDesc->aadLenInBytes; + + icp_qat_fw_la_cipher_20_req_params_t + *pCipher20ReqParams = + (void + *)((Cpa8U *)&( + pMsg->serv_specif_rqpars) + + ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET); + + if (isCyGen4x(pService)) { + pCipher20ReqParams + ->spc_aad_addr = + aadBufferPhysAddr; + pCipher20ReqParams->spc_aad_sz = + pSessionDesc->aadLenInBytes; + pCipher20ReqParams + ->spc_aad_offset = 0; + if (isSpCcm) + pCipher20ReqParams + ->spc_aad_sz += + LAC_CIPHER_CCM_AAD_OFFSET; + } else { + pCipherReqParams->spc_aad_addr = + aadBufferPhysAddr; + pCipherReqParams->spc_aad_sz = + (Cpa16U)pSessionDesc + ->aadLenInBytes; + } if (CPA_TRUE != pSessionDesc->digestIsAppended) { Cpa64U digestBufferPhysAddr = 0; /* User OpData memory being used * for digest buffer */ /* get the physical address */ digestBufferPhysAddr = LAC_OS_VIRT_TO_PHYS_EXTERNAL( pService ->generic_service_info, pOpData->pDigestResult); if (0 != digestBufferPhysAddr) { - pCipherReqParams - ->spc_auth_res_addr = - digestBufferPhysAddr; - pCipherReqParams - ->spc_auth_res_sz = - pSessionDesc - ->hashResultSize; + if (isCyGen4x( + pService)) { + pCipher20ReqParams + ->spc_auth_res_addr = + digestBufferPhysAddr; + pCipher20ReqParams + ->spc_auth_res_sz = + (Cpa8U)pSessionDesc + ->hashResultSize; + } else { + pCipherReqParams + ->spc_auth_res_addr = + digestBufferPhysAddr; + pCipherReqParams + ->spc_auth_res_sz = + (Cpa8U)pSessionDesc + ->hashResultSize; + } } else { LAC_LOG_ERROR( "Unable to get the physical address " "of the digest\n"); status = CPA_STATUS_FAIL; } + } else { + /* Check if the dest buffer can + * handle the digest, only for + * last packet */ + if (((ICP_QAT_FW_LA_PARTIAL_NONE == + qatPacketType) || + (ICP_QAT_FW_LA_PARTIAL_END == + qatPacketType))) { + if (dstPktSize < + (pOpData + ->cryptoStartSrcOffsetInBytes + + pOpData + ->messageLenToCipherInBytes + + pSessionDesc + ->hashResultSize)) + status = + CPA_STATUS_INVALID_PARAM; + } + if (isCyGen4x(pService)) { + pCipher20ReqParams + ->spc_auth_res_sz = + (Cpa8U)pSessionDesc + ->hashResultSize; + } else { + pCipherReqParams + ->spc_auth_res_sz = + (Cpa8U)pSessionDesc + ->hashResultSize; + } } } } } /* * Set up HashRequestParams part of Request */ if ((status == CPA_STATUS_SUCCESS) && (laCmdId != ICP_QAT_FW_LA_CMD_CIPHER)) { Cpa32U authOffsetInBytes = pOpData->hashStartSrcOffsetInBytes; Cpa32U authLenInBytes = pOpData->messageLenToHashInBytes; status = LacHash_PerformParamCheck(instanceHandle, pSessionDesc, pOpData, srcPktSize, pVerifyResult); if (CPA_STATUS_SUCCESS != status) { /* free the cookie */ Lac_MemPoolEntryFree(pCookie); return status; } if (CPA_STATUS_SUCCESS == status) { /* Info structure for CCM/GCM */ lac_sym_qat_hash_state_buffer_info_t hashStateBufferInfo = { 0 }; lac_sym_qat_hash_state_buffer_info_t *pHashStateBufferInfo = &(pSessionDesc->hashStateBufferInfo); if (CPA_TRUE == pSessionDesc->isAuthEncryptOp) { icp_qat_fw_la_auth_req_params_t *pHashReqParams = (icp_qat_fw_la_auth_req_params_t *)((Cpa8U *)&( pMsg->serv_specif_rqpars) + ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET); hashStateBufferInfo.pData = pOpData->pAdditionalAuthData; if (pOpData->pAdditionalAuthData == NULL) { hashStateBufferInfo.pDataPhys = 0; } else { hashStateBufferInfo .pDataPhys = LAC_MEM_CAST_PTR_TO_UINT64( LAC_OS_VIRT_TO_PHYS_EXTERNAL( pService ->generic_service_info, pOpData ->pAdditionalAuthData)); } hashStateBufferInfo .stateStorageSzQuadWords = 0; hashStateBufferInfo .prefixAadSzQuadWords = LAC_BYTES_TO_QUADWORDS( pHashReqParams->u2.aad_sz); /* Overwrite hash state buffer info - * structure pointer - * with the one created for CCM/GCM */ + * structure pointer with the one + * created for CCM/GCM */ pHashStateBufferInfo = &hashStateBufferInfo; /* Aad buffer could be null in the GCM * case */ if (0 == hashStateBufferInfo.pDataPhys && - CPA_CY_SYM_HASH_AES_GCM != - pSessionDesc->hashAlgorithm && - CPA_CY_SYM_HASH_AES_GMAC != - pSessionDesc->hashAlgorithm) { + CPA_CY_SYM_HASH_AES_GCM != hash && + CPA_CY_SYM_HASH_AES_GMAC != hash) { LAC_LOG_ERROR( "Unable to get the physical address" "of the AAD\n"); status = CPA_STATUS_FAIL; } /* for CCM/GCM the hash and cipher data - * regions - * are equal */ + * regions are equal */ authOffsetInBytes = pOpData ->cryptoStartSrcOffsetInBytes; /* For authenticated encryption, - * authentication length is - * determined by - * messageLenToCipherInBytes for AES-GCM - * and - * AES-CCM, and by + * authentication length is determined + * by messageLenToCipherInBytes for + * AES-GCM and AES-CCM, and by * messageLenToHashInBytes for AES-GMAC. * You don't see the latter here, as - * that is the initial - * value of authLenInBytes. */ - if (pSessionDesc->hashAlgorithm != - CPA_CY_SYM_HASH_AES_GMAC) + * that is the initial value of + * authLenInBytes. */ + if (hash != CPA_CY_SYM_HASH_AES_GMAC) authLenInBytes = pOpData ->messageLenToCipherInBytes; } else if (CPA_CY_SYM_HASH_SNOW3G_UIA2 == - pSessionDesc->hashAlgorithm || - CPA_CY_SYM_HASH_ZUC_EIA3 == - pSessionDesc->hashAlgorithm) { + hash || + CPA_CY_SYM_HASH_ZUC_EIA3 == hash) { hashStateBufferInfo.pData = pOpData->pAdditionalAuthData; hashStateBufferInfo.pDataPhys = LAC_OS_VIRT_TO_PHYS_EXTERNAL( pService->generic_service_info, hashStateBufferInfo.pData); hashStateBufferInfo .stateStorageSzQuadWords = 0; hashStateBufferInfo .prefixAadSzQuadWords = LAC_BYTES_TO_QUADWORDS( - pSessionDesc->aadLenInBytes); + aadLenInBytes); pHashStateBufferInfo = &hashStateBufferInfo; if (0 == hashStateBufferInfo.pDataPhys) { LAC_LOG_ERROR( "Unable to get the physical address" "of the AAD\n"); status = CPA_STATUS_FAIL; } } - if (CPA_CY_SYM_HASH_AES_CCM == - pSessionDesc->hashAlgorithm) { + if (CPA_CY_SYM_HASH_AES_CCM == hash) { if (CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT == pSessionDesc->cipherDirection) { /* On a decrypt path pSrcBuffer - * is used as this is - * where encrypted digest is - * located. Firmware - * uses encrypted digest for + * is used as this is where + * encrypted digest is located. + * Firmware uses encrypted + * digest for * compare/verification*/ pBufferList = (CpaBufferList *)pSrcBuffer; } else { /* On an encrypt path pDstBuffer - * is used as this is - * where encrypted digest will - * be written */ + * is used as this is where + * encrypted digest will be + * written */ pBufferList = (CpaBufferList *)pDstBuffer; } status = LacSymAlgChain_PtrFromOffsetGet( pBufferList, pOpData->cryptoStartSrcOffsetInBytes + pOpData ->messageLenToCipherInBytes, &pDigestResult); if (CPA_STATUS_SUCCESS != status) { LAC_LOG_ERROR( "Cannot set digest pointer within the" " buffer list - offset out of bounds"); } } else { pDigestResult = pOpData->pDigestResult; } + if (CPA_TRUE == + pSessionDesc->useStatefulSha3ContentDesc) { + LacAlgChain_StatefulSha3_SkipStateLoadFlags( + pMsg, + qatPacketType, + pSessionDesc->qatHashMode); + } + if (CPA_CY_SYM_OP_ALGORITHM_CHAINING == pSessionDesc->symOperation) { /* In alg chaining mode, packets are not - * seen as partials - * for hash operations. Override to - * NONE. + * seen as partials for hash operations. + * Override to NONE. */ qatPacketType = ICP_QAT_FW_LA_PARTIAL_NONE; } - if (CPA_TRUE == - pSessionDesc->digestIsAppended) { + digestIsAppended = + pSessionDesc->digestIsAppended; + if (CPA_TRUE == digestIsAppended) { /*Check if the destination buffer can - * handle the digest - * if digestIsAppend is true*/ + * handle the digest if digestIsAppend + * is true*/ if (srcPktSize < (authOffsetInBytes + authLenInBytes + pSessionDesc->hashResultSize)) { status = CPA_STATUS_INVALID_PARAM; } } if (CPA_STATUS_SUCCESS == status) { /* populate the hash request parameters */ status = LacSymQat_HashRequestParamsPopulate( pMsg, authOffsetInBytes, authLenInBytes, &(pService ->generic_service_info), pHashStateBufferInfo, qatPacketType, pSessionDesc->hashResultSize, pSessionDesc->digestVerify, - pSessionDesc->digestIsAppended ? + digestIsAppended ? NULL : pDigestResult, - pSessionDesc->hashAlgorithm, + hash, NULL); } } } } /* * send the message to the QAT */ if (CPA_STATUS_SUCCESS == status) { qatUtilsAtomicInc(&(pSessionDesc->u.pendingCbCount)); status = LacSymQueue_RequestSend(instanceHandle, pCookie, pSessionDesc); if (CPA_STATUS_SUCCESS != status) { /* Decrease pending callback counter on send fail. */ qatUtilsAtomicDec(&(pSessionDesc->u.pendingCbCount)); } } /* Case that will catch all error status's for this function */ if (CPA_STATUS_SUCCESS != status) { /* free the cookie */ if (NULL != pSymCookie) { Lac_MemPoolEntryFree(pSymCookie); } } return status; } diff --git a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_api.c b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_api.c index 70446ef988b0..050a237208e9 100644 --- a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_api.c +++ b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_api.c @@ -1,1130 +1,1122 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** *************************************************************************** * @file lac_sym_api.c Implementation of the symmetric API * * @ingroup LacSym * ***************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" #include "cpa_cy_sym.h" #include "cpa_cy_im.h" #include "icp_adf_init.h" #include "icp_adf_transport.h" #include "icp_adf_transport_dp.h" #include "icp_accel_devices.h" #include "icp_adf_debug.h" #include "icp_qat_fw_la.h" /* ****************************************************************************** * Include private header files ****************************************************************************** */ #include "lac_common.h" #include "lac_log.h" #include "lac_mem.h" #include "lac_mem_pools.h" #include "lac_list.h" #include "lac_sym.h" #include "lac_sym_qat.h" #include "lac_sal.h" #include "lac_sal_ctrl.h" #include "lac_session.h" #include "lac_sym_cipher.h" #include "lac_sym_hash.h" #include "lac_sym_alg_chain.h" #include "lac_sym_stats.h" #include "lac_sym_partial.h" #include "lac_sym_qat_hash_defs_lookup.h" #include "lac_sym_cb.h" #include "lac_buffer_desc.h" #include "lac_sync.h" #include "lac_hooks.h" #include "lac_sal_types_crypto.h" #include "sal_service_state.h" -#define IS_EXT_ALG_CHAIN_UNSUPPORTED( \ - cipherAlgorithm, hashAlgorithm, extAlgchainSupported) \ +#define IS_EXT_ALG_CHAIN_UNSUPPORTED(cipherAlgorithm, \ + hashAlgorithm, \ + extAlgchainSupported) \ ((((CPA_CY_SYM_CIPHER_ZUC_EEA3 == cipherAlgorithm || \ CPA_CY_SYM_CIPHER_SNOW3G_UEA2 == cipherAlgorithm) && \ CPA_CY_SYM_HASH_AES_CMAC == hashAlgorithm) || \ ((CPA_CY_SYM_CIPHER_NULL == cipherAlgorithm || \ CPA_CY_SYM_CIPHER_AES_CTR == cipherAlgorithm || \ CPA_CY_SYM_CIPHER_ZUC_EEA3 == cipherAlgorithm) && \ CPA_CY_SYM_HASH_SNOW3G_UIA2 == hashAlgorithm) || \ ((CPA_CY_SYM_CIPHER_NULL == cipherAlgorithm || \ CPA_CY_SYM_CIPHER_AES_CTR == cipherAlgorithm || \ CPA_CY_SYM_CIPHER_SNOW3G_UEA2 == cipherAlgorithm) && \ CPA_CY_SYM_HASH_ZUC_EIA3 == hashAlgorithm)) && \ !extAlgchainSupported) /*** Local functions definitions ***/ static CpaStatus LacSymPerform_BufferParamCheck(const CpaBufferList *const pSrcBuffer, const CpaBufferList *const pDstBuffer, const lac_session_desc_t *const pSessionDesc, const CpaCySymOpData *const pOpData); +void LacDp_WriteRingMsgFull(CpaCySymDpOpData *pRequest, + icp_qat_fw_la_bulk_req_t *pCurrentQatMsg); +void LacDp_WriteRingMsgOpt(CpaCySymDpOpData *pRequest, + icp_qat_fw_la_bulk_req_t *pCurrentQatMsg); void getCtxSize(const CpaCySymSessionSetupData *pSessionSetupData, Cpa32U *pSessionCtxSizeInBytes); /** ***************************************************************************** * @ingroup LacSym * Generic bufferList callback function. * @description * This function is used when the API is called in synchronous mode. * It's assumed the callbackTag holds a lac_sync_op_data_t type * and when the callback is received, this callback shall set the * status and opResult element of that cookie structure and * kick the sid. * This function may be used directly as a callback function. * * @param[in] callbackTag Callback Tag * @param[in] status Status of callback * @param[in] operationType Operation Type * @param[in] pOpData Pointer to the Op Data * @param[out] pDstBuffer Pointer to destination buffer list * @param[out] opResult Boolean to indicate the result of the operation * * @return void * *****************************************************************************/ void LacSync_GenBufListVerifyCb(void *pCallbackTag, CpaStatus status, CpaCySymOp operationType, void *pOpData, CpaBufferList *pDstBuffer, CpaBoolean opResult) { LacSync_GenVerifyWakeupSyncCaller(pCallbackTag, status, opResult); } /* ******************************************************************************* * Define static function definitions ******************************************************************************* */ /** * @ingroup LacSym * Function which perform parameter checks on session setup data * * @param[in] CpaInstanceHandle Instance Handle * @param[in] pSessionSetupData Pointer to session setup data * * @retval CPA_STATUS_SUCCESS The operation succeeded * @retval CPA_STATUS_INVALID_PARAM An invalid parameter value was found */ static CpaStatus LacSymSession_ParamCheck(const CpaInstanceHandle instanceHandle, const CpaCySymSessionSetupData *pSessionSetupData) { /* initialize convenient pointers to cipher and hash contexts */ const CpaCySymCipherSetupData *const pCipherSetupData = (const CpaCySymCipherSetupData *)&pSessionSetupData ->cipherSetupData; const CpaCySymHashSetupData *const pHashSetupData = &pSessionSetupData->hashSetupData; CpaCySymCapabilitiesInfo capInfo; CpaCyCapabilitiesInfo cyCapInfo; cpaCySymQueryCapabilities(instanceHandle, &capInfo); SalCtrl_CyQueryCapabilities(instanceHandle, &cyCapInfo); /* Ensure cipher algorithm is correct and supported */ if ((CPA_CY_SYM_OP_ALGORITHM_CHAINING == pSessionSetupData->symOperation) || (CPA_CY_SYM_OP_CIPHER == pSessionSetupData->symOperation)) { /* Protect against value of cipher outside the bitmap * and check if cipher algorithm is correct */ - if ((pCipherSetupData->cipherAlgorithm >= - CPA_CY_SYM_CIPHER_CAP_BITMAP_SIZE) || - (!CPA_BITMAP_BIT_TEST(capInfo.ciphers, - pCipherSetupData->cipherAlgorithm))) { + if (pCipherSetupData->cipherAlgorithm >= + CPA_CY_SYM_CIPHER_CAP_BITMAP_SIZE) { LAC_INVALID_PARAM_LOG("cipherAlgorithm"); return CPA_STATUS_INVALID_PARAM; } + if (!CPA_BITMAP_BIT_TEST(capInfo.ciphers, + pCipherSetupData->cipherAlgorithm)) { + LAC_UNSUPPORTED_PARAM_LOG( + "UnSupported cipherAlgorithm"); + return CPA_STATUS_UNSUPPORTED; + } } /* Ensure hash algorithm is correct and supported */ if ((CPA_CY_SYM_OP_ALGORITHM_CHAINING == pSessionSetupData->symOperation) || (CPA_CY_SYM_OP_HASH == pSessionSetupData->symOperation)) { - /* Ensure SHAKE algorithms are not supported */ - if ((CPA_CY_SYM_HASH_SHAKE_128 == - pHashSetupData->hashAlgorithm) || - (CPA_CY_SYM_HASH_SHAKE_256 == - pHashSetupData->hashAlgorithm)) { - LAC_INVALID_PARAM_LOG( - "Hash algorithms SHAKE-128 and SHAKE-256 " - "are not supported."); - return CPA_STATUS_UNSUPPORTED; - } - /* Protect against value of hash outside the bitmap * and check if hash algorithm is correct */ - if ((pHashSetupData->hashAlgorithm >= - CPA_CY_SYM_HASH_CAP_BITMAP_SIZE) || - (!CPA_BITMAP_BIT_TEST(capInfo.hashes, - pHashSetupData->hashAlgorithm))) { + if (pHashSetupData->hashAlgorithm >= + CPA_CY_SYM_HASH_CAP_BITMAP_SIZE) { LAC_INVALID_PARAM_LOG("hashAlgorithm"); return CPA_STATUS_INVALID_PARAM; } + if (!CPA_BITMAP_BIT_TEST(capInfo.hashes, + pHashSetupData->hashAlgorithm)) { + LAC_UNSUPPORTED_PARAM_LOG("UnSupported hashAlgorithm"); + return CPA_STATUS_UNSUPPORTED; + } } /* ensure CCM, GCM, Kasumi, Snow3G and ZUC cipher and hash algorithms - * are - * selected together for Algorithm Chaining */ + * are selected together for Algorithm Chaining */ if (CPA_CY_SYM_OP_ALGORITHM_CHAINING == pSessionSetupData->symOperation) { /* ensure both hash and cipher algorithms are POLY and CHACHA */ if (((CPA_CY_SYM_CIPHER_CHACHA == pCipherSetupData->cipherAlgorithm) && (CPA_CY_SYM_HASH_POLY != pHashSetupData->hashAlgorithm)) || ((CPA_CY_SYM_HASH_POLY == pHashSetupData->hashAlgorithm) && (CPA_CY_SYM_CIPHER_CHACHA != pCipherSetupData->cipherAlgorithm))) { LAC_INVALID_PARAM_LOG( "Invalid combination of Cipher/Hash " "Algorithms for CHACHA/POLY"); return CPA_STATUS_INVALID_PARAM; } /* ensure both hash and cipher algorithms are CCM */ if (((CPA_CY_SYM_CIPHER_AES_CCM == pCipherSetupData->cipherAlgorithm) && (CPA_CY_SYM_HASH_AES_CCM != pHashSetupData->hashAlgorithm)) || ((CPA_CY_SYM_HASH_AES_CCM == pHashSetupData->hashAlgorithm) && (CPA_CY_SYM_CIPHER_AES_CCM != pCipherSetupData->cipherAlgorithm))) { LAC_INVALID_PARAM_LOG( "Invalid combination of Cipher/Hash Algorithms for CCM"); return CPA_STATUS_INVALID_PARAM; } /* ensure both hash and cipher algorithms are GCM/GMAC */ if ((CPA_CY_SYM_CIPHER_AES_GCM == pCipherSetupData->cipherAlgorithm && (CPA_CY_SYM_HASH_AES_GCM != pHashSetupData->hashAlgorithm && CPA_CY_SYM_HASH_AES_GMAC != pHashSetupData->hashAlgorithm)) || ((CPA_CY_SYM_HASH_AES_GCM == pHashSetupData->hashAlgorithm || CPA_CY_SYM_HASH_AES_GMAC == pHashSetupData->hashAlgorithm) && CPA_CY_SYM_CIPHER_AES_GCM != pCipherSetupData->cipherAlgorithm)) { LAC_INVALID_PARAM_LOG( "Invalid combination of Cipher/Hash Algorithms for GCM"); return CPA_STATUS_INVALID_PARAM; } /* ensure both hash and cipher algorithms are Kasumi */ if (((CPA_CY_SYM_CIPHER_KASUMI_F8 == pCipherSetupData->cipherAlgorithm) && (CPA_CY_SYM_HASH_KASUMI_F9 != pHashSetupData->hashAlgorithm)) || ((CPA_CY_SYM_HASH_KASUMI_F9 == pHashSetupData->hashAlgorithm) && (CPA_CY_SYM_CIPHER_KASUMI_F8 != pCipherSetupData->cipherAlgorithm))) { LAC_INVALID_PARAM_LOG( "Invalid combination of Cipher/Hash Algorithms for Kasumi"); return CPA_STATUS_INVALID_PARAM; } if (IS_EXT_ALG_CHAIN_UNSUPPORTED( pCipherSetupData->cipherAlgorithm, pHashSetupData->hashAlgorithm, cyCapInfo.extAlgchainSupported)) { LAC_UNSUPPORTED_PARAM_LOG( "ExtAlgChain feature not supported"); return CPA_STATUS_UNSUPPORTED; } /* ensure both hash and cipher algorithms are Snow3G */ if (((CPA_CY_SYM_CIPHER_SNOW3G_UEA2 == pCipherSetupData->cipherAlgorithm) && (CPA_CY_SYM_HASH_SNOW3G_UIA2 != pHashSetupData->hashAlgorithm)) || ((CPA_CY_SYM_HASH_SNOW3G_UIA2 == pHashSetupData->hashAlgorithm) && (CPA_CY_SYM_CIPHER_SNOW3G_UEA2 != pCipherSetupData->cipherAlgorithm))) { LAC_INVALID_PARAM_LOG( "Invalid combination of Cipher/Hash Algorithms for Snow3G"); return CPA_STATUS_INVALID_PARAM; } /* ensure both hash and cipher algorithms are ZUC */ if (((CPA_CY_SYM_CIPHER_ZUC_EEA3 == pCipherSetupData->cipherAlgorithm) && (CPA_CY_SYM_HASH_ZUC_EIA3 != pHashSetupData->hashAlgorithm)) || ((CPA_CY_SYM_HASH_ZUC_EIA3 == pHashSetupData->hashAlgorithm) && (CPA_CY_SYM_CIPHER_ZUC_EEA3 != pCipherSetupData->cipherAlgorithm))) { LAC_INVALID_PARAM_LOG( "Invalid combination of Cipher/Hash Algorithms for ZUC"); return CPA_STATUS_INVALID_PARAM; } } /* not Algorithm Chaining so prevent CCM/GCM being selected */ else if (CPA_CY_SYM_OP_CIPHER == pSessionSetupData->symOperation) { /* ensure cipher algorithm is not CCM or GCM */ if ((CPA_CY_SYM_CIPHER_AES_CCM == pCipherSetupData->cipherAlgorithm) || (CPA_CY_SYM_CIPHER_AES_GCM == pCipherSetupData->cipherAlgorithm) || (CPA_CY_SYM_CIPHER_CHACHA == pCipherSetupData->cipherAlgorithm)) { LAC_INVALID_PARAM_LOG( "Invalid Cipher Algorithm for non-Algorithm " "Chaining operation"); return CPA_STATUS_INVALID_PARAM; } } else if (CPA_CY_SYM_OP_HASH == pSessionSetupData->symOperation) { /* ensure hash algorithm is not CCM or GCM/GMAC */ if ((CPA_CY_SYM_HASH_AES_CCM == pHashSetupData->hashAlgorithm) || (CPA_CY_SYM_HASH_AES_GCM == pHashSetupData->hashAlgorithm) || (CPA_CY_SYM_HASH_AES_GMAC == pHashSetupData->hashAlgorithm) || (CPA_CY_SYM_HASH_POLY == pHashSetupData->hashAlgorithm)) { LAC_INVALID_PARAM_LOG( "Invalid Hash Algorithm for non-Algorithm Chaining operation"); return CPA_STATUS_INVALID_PARAM; } } /* Unsupported operation. Return error */ else { LAC_INVALID_PARAM_LOG("symOperation"); return CPA_STATUS_INVALID_PARAM; } /* ensure that cipher direction param is * valid for cipher and algchain ops */ if (CPA_CY_SYM_OP_HASH != pSessionSetupData->symOperation) { if ((pCipherSetupData->cipherDirection != CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT) && (pCipherSetupData->cipherDirection != CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT)) { LAC_INVALID_PARAM_LOG("Invalid Cipher Direction"); return CPA_STATUS_INVALID_PARAM; } } return CPA_STATUS_SUCCESS; } /** * @ingroup LacSym * Function which perform parameter checks on data buffers for symmetric * crypto operations * * @param[in] pSrcBuffer Pointer to source buffer list * @param[in] pDstBuffer Pointer to destination buffer list * @param[in] pSessionDesc Pointer to session descriptor * @param[in] pOpData Pointer to CryptoSymOpData. * * @retval CPA_STATUS_SUCCESS The operation succeeded * @retval CPA_STATUS_INVALID_PARAM An invalid parameter value was found */ static CpaStatus LacSymPerform_BufferParamCheck(const CpaBufferList *const pSrcBuffer, const CpaBufferList *const pDstBuffer, const lac_session_desc_t *const pSessionDesc, const CpaCySymOpData *const pOpData) { Cpa64U srcBufferLen = 0, dstBufferLen = 0; CpaStatus status = CPA_STATUS_SUCCESS; /* verify packet type is in correct range */ switch (pOpData->packetType) { case CPA_CY_SYM_PACKET_TYPE_FULL: case CPA_CY_SYM_PACKET_TYPE_PARTIAL: case CPA_CY_SYM_PACKET_TYPE_LAST_PARTIAL: break; default: { LAC_INVALID_PARAM_LOG("packetType"); return CPA_STATUS_INVALID_PARAM; } } if (!((CPA_CY_SYM_OP_CIPHER != pSessionDesc->symOperation && CPA_CY_SYM_HASH_MODE_PLAIN == pSessionDesc->hashMode) && (0 == pOpData->messageLenToHashInBytes))) { if (IS_ZERO_LENGTH_BUFFER_SUPPORTED( pSessionDesc->cipherAlgorithm, pSessionDesc->hashAlgorithm)) { status = LacBuffDesc_BufferListVerifyNull( pSrcBuffer, &srcBufferLen, LAC_NO_ALIGNMENT_SHIFT); } else { status = LacBuffDesc_BufferListVerify( pSrcBuffer, &srcBufferLen, LAC_NO_ALIGNMENT_SHIFT); } if (CPA_STATUS_SUCCESS != status) { LAC_INVALID_PARAM_LOG("Source buffer invalid"); return CPA_STATUS_INVALID_PARAM; } } else { /* check MetaData !NULL */ if (NULL == pSrcBuffer->pPrivateMetaData) { LAC_INVALID_PARAM_LOG( "Source buffer MetaData cannot be NULL"); return CPA_STATUS_INVALID_PARAM; } } /* out of place checks */ if (pSrcBuffer != pDstBuffer) { /* exception for this check is zero length hash requests to * allow */ /* for srcBufflen=DstBufferLen=0 */ if (!((CPA_CY_SYM_OP_CIPHER != pSessionDesc->symOperation && CPA_CY_SYM_HASH_MODE_PLAIN == pSessionDesc->hashMode) && (0 == pOpData->messageLenToHashInBytes))) { /* Verify buffer(s) for dest packet & return packet * length */ if (IS_ZERO_LENGTH_BUFFER_SUPPORTED( pSessionDesc->cipherAlgorithm, pSessionDesc->hashAlgorithm)) { status = LacBuffDesc_BufferListVerifyNull( pDstBuffer, &dstBufferLen, LAC_NO_ALIGNMENT_SHIFT); } else { status = LacBuffDesc_BufferListVerify( pDstBuffer, &dstBufferLen, LAC_NO_ALIGNMENT_SHIFT); } if (CPA_STATUS_SUCCESS != status) { LAC_INVALID_PARAM_LOG( "Destination buffer invalid"); return CPA_STATUS_INVALID_PARAM; } } else { /* check MetaData !NULL */ if (NULL == pDstBuffer->pPrivateMetaData) { LAC_INVALID_PARAM_LOG( "Dest buffer MetaData cannot be NULL"); return CPA_STATUS_INVALID_PARAM; } } /* Check that src Buffer and dst Buffer Lengths are equal */ - if (srcBufferLen != dstBufferLen) { + /* CCM output needs to be longer than input buffer for appending + * tag*/ + if (srcBufferLen != dstBufferLen && + pSessionDesc->cipherAlgorithm != + CPA_CY_SYM_CIPHER_AES_CCM) { LAC_INVALID_PARAM_LOG( "Source and Dest buffer lengths need to be equal "); return CPA_STATUS_INVALID_PARAM; } } /* check for partial packet suport for the session operation */ if (CPA_CY_SYM_PACKET_TYPE_FULL != pOpData->packetType) { if (CPA_FALSE == pSessionDesc->isPartialSupported) { /* return out here to simplify cleanup */ LAC_INVALID_PARAM_LOG( "Partial packets not supported for operation"); return CPA_STATUS_INVALID_PARAM; } else { /* This function checks to see if the partial packet - * sequence - * is correct */ + * sequence is correct */ if (CPA_STATUS_SUCCESS != LacSym_PartialPacketStateCheck( pOpData->packetType, pSessionDesc->partialState)) { LAC_INVALID_PARAM_LOG("Partial packet Type"); return CPA_STATUS_INVALID_PARAM; } } } return CPA_STATUS_SUCCESS; } /** @ingroup LacSym */ CpaStatus cpaCySymInitSession(const CpaInstanceHandle instanceHandle_in, const CpaCySymCbFunc pSymCb, const CpaCySymSessionSetupData *pSessionSetupData, CpaCySymSessionCtx pSessionCtx) { CpaStatus status = CPA_STATUS_SUCCESS; CpaInstanceHandle instanceHandle = NULL; sal_service_t *pService = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_INSTANCE_HANDLE(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); pService = (sal_service_t *)instanceHandle; /* check crypto service is running otherwise return an error */ SAL_RUNNING_CHECK(pService); status = LacSym_InitSession(instanceHandle, pSymCb, pSessionSetupData, CPA_FALSE, /* isDPSession */ pSessionCtx); if (CPA_STATUS_SUCCESS == status) { /* Increment the stats for a session registered successfully */ LAC_SYM_STAT_INC(numSessionsInitialized, instanceHandle); } else /* if there was an error */ { LAC_SYM_STAT_INC(numSessionErrors, instanceHandle); } return status; } CpaStatus cpaCySymSessionInUse(CpaCySymSessionCtx pSessionCtx, CpaBoolean *pSessionInUse) { CpaStatus status = CPA_STATUS_SUCCESS; lac_session_desc_t *pSessionDesc = NULL; LAC_CHECK_NULL_PARAM(pSessionInUse); LAC_CHECK_INSTANCE_HANDLE(pSessionCtx); *pSessionInUse = CPA_FALSE; pSessionDesc = LAC_SYM_SESSION_DESC_FROM_CTX_GET(pSessionCtx); /* If there are pending requests */ if (pSessionDesc->isDPSession) { if (qatUtilsAtomicGet(&(pSessionDesc->u.pendingDpCbCount))) *pSessionInUse = CPA_TRUE; } else { if (qatUtilsAtomicGet(&(pSessionDesc->u.pendingCbCount))) *pSessionInUse = CPA_TRUE; } return status; } CpaStatus LacSym_InitSession(const CpaInstanceHandle instanceHandle, const CpaCySymCbFunc pSymCb, const CpaCySymSessionSetupData *pSessionSetupData, const CpaBoolean isDPSession, CpaCySymSessionCtx pSessionCtx) { CpaStatus status = CPA_STATUS_SUCCESS; lac_session_desc_t *pSessionDesc = NULL; Cpa32U sessionCtxSizeInBytes = 0; CpaPhysicalAddr physAddress = 0; CpaPhysicalAddr physAddressAligned = 0; sal_service_t *pService = NULL; const CpaCySymCipherSetupData *pCipherSetupData = NULL; const CpaCySymHashSetupData *pHashSetupData = NULL; -/* Instance param checking done by calling function */ + /* Instance param checking done by calling function */ LAC_CHECK_NULL_PARAM(pSessionSetupData); LAC_CHECK_NULL_PARAM(pSessionCtx); status = LacSymSession_ParamCheck(instanceHandle, pSessionSetupData); LAC_CHECK_STATUS(status); /* set the session priority for QAT AL*/ if ((CPA_CY_PRIORITY_HIGH == pSessionSetupData->sessionPriority) || (CPA_CY_PRIORITY_NORMAL == pSessionSetupData->sessionPriority)) { // do nothing - clean up this code. use RANGE macro } else { LAC_INVALID_PARAM_LOG("sessionPriority"); return CPA_STATUS_INVALID_PARAM; } pCipherSetupData = &pSessionSetupData->cipherSetupData; pHashSetupData = &pSessionSetupData->hashSetupData; pService = (sal_service_t *)instanceHandle; /* Re-align the session structure to 64 byte alignment */ physAddress = LAC_OS_VIRT_TO_PHYS_EXTERNAL((*pService), (Cpa8U *)pSessionCtx + sizeof(void *)); if (0 == physAddress) { LAC_LOG_ERROR( - "Unable to get the physical address of the session"); + "Unable to get the physical address of the session\n"); return CPA_STATUS_FAIL; } physAddressAligned = LAC_ALIGN_POW2_ROUNDUP(physAddress, LAC_64BYTE_ALIGNMENT); pSessionDesc = (lac_session_desc_t *) /* Move the session pointer by the physical offset between aligned and unaligned memory */ ((Cpa8U *)pSessionCtx + sizeof(void *) + (physAddressAligned - physAddress)); /* save the aligned pointer in the first bytes (size of unsigned long) * of the session memory */ *((LAC_ARCH_UINT *)pSessionCtx) = (LAC_ARCH_UINT)pSessionDesc; /* start off with a clean session */ /* Choose Session Context size */ getCtxSize(pSessionSetupData, &sessionCtxSizeInBytes); switch (sessionCtxSizeInBytes) { case LAC_SYM_SESSION_D1_SIZE: memset(pSessionDesc, 0, sizeof(lac_session_desc_d1_t)); break; case LAC_SYM_SESSION_D2_SIZE: memset(pSessionDesc, 0, sizeof(lac_session_desc_d2_t)); break; default: memset(pSessionDesc, 0, sizeof(lac_session_desc_t)); break; } /* Setup content descriptor info structure * assumption that content descriptor is the first field in * in the session descriptor */ pSessionDesc->contentDescInfo.pData = (Cpa8U *)pSessionDesc; pSessionDesc->contentDescInfo.hardwareSetupBlockPhys = physAddressAligned; pSessionDesc->contentDescOptimisedInfo.pData = ((Cpa8U *)pSessionDesc + LAC_SYM_QAT_CONTENT_DESC_MAX_SIZE); pSessionDesc->contentDescOptimisedInfo.hardwareSetupBlockPhys = (physAddressAligned + LAC_SYM_QAT_CONTENT_DESC_MAX_SIZE); /* Set the Common Session Information */ pSessionDesc->symOperation = pSessionSetupData->symOperation; if (CPA_FALSE == isDPSession) { /* For asynchronous - use the user supplied callback * for synchronous - use the internal synchronous callback */ pSessionDesc->pSymCb = ((void *)NULL != (void *)pSymCb) ? - pSymCb : - LacSync_GenBufListVerifyCb; + pSymCb : + LacSync_GenBufListVerifyCb; } pSessionDesc->isDPSession = isDPSession; if ((CPA_CY_SYM_HASH_AES_GCM == pHashSetupData->hashAlgorithm) || (CPA_CY_SYM_HASH_AES_GMAC == pHashSetupData->hashAlgorithm) || (CPA_CY_SYM_HASH_AES_CCM == pHashSetupData->hashAlgorithm) || (CPA_CY_SYM_CIPHER_CHACHA == pCipherSetupData->cipherAlgorithm) || (CPA_CY_SYM_CIPHER_ARC4 == pCipherSetupData->cipherAlgorithm)) { pSessionDesc->writeRingMsgFunc = LacDp_WriteRingMsgFull; } else { pSessionDesc->writeRingMsgFunc = LacDp_WriteRingMsgOpt; } if (CPA_STATUS_SUCCESS == status) { /* Session set up via API call (not internal one) */ /* Services such as DRBG call the crypto api as part of their - * service - * hence the need to for the flag, it is needed to distinguish - * between - * an internal and external session. + * service hence the need to for the flag, it is needed to + * distinguish between an internal and external session. */ pSessionDesc->internalSession = CPA_FALSE; status = LacAlgChain_SessionInit(instanceHandle, pSessionSetupData, pSessionDesc); } return status; } /** @ingroup LacSym */ CpaStatus cpaCySymRemoveSession(const CpaInstanceHandle instanceHandle_in, CpaCySymSessionCtx pSessionCtx) { lac_session_desc_t *pSessionDesc = NULL; CpaStatus status = CPA_STATUS_SUCCESS; CpaInstanceHandle instanceHandle = NULL; Cpa64U numPendingRequests = 0; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_INSTANCE_HANDLE(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); LAC_CHECK_NULL_PARAM(pSessionCtx); /* check crypto service is running otherwise return an error */ SAL_RUNNING_CHECK(instanceHandle); pSessionDesc = LAC_SYM_SESSION_DESC_FROM_CTX_GET(pSessionCtx); LAC_CHECK_NULL_PARAM(pSessionDesc); if (CPA_TRUE == pSessionDesc->isDPSession) { /* * Based on one instance, we can initialize multiple sessions. * For example, we can initialize the session "X" and session - * "Y" with - * the same instance "A". If there is no operation pending for - * session - * "X", we can remove the session "X". + * "Y" with the same instance "A". If there is no operation + * pending for session "X", we can remove the session "X". * * Now we only check the @pSessionDesc->pendingDpCbCount, if it - * becomes - * zero, we can remove the session. + * becomes zero, we can remove the session. * * Why? * (1) We increase it in the cpaCySymDpEnqueueOp/ * cpaCySymDpEnqueueOpBatch. * (2) We decrease it in the LacSymCb_ProcessCallback. * * If the @pSessionDesc->pendingDpCbCount becomes zero, it means * there is no operation pending for the session "X" anymore, so - * we can - * remove this session. Maybe there is still some requests left - * in the - * instance's ring (icp_adf_queueDataToSend() returns true), but - * the - * request does not belong to "X", it belongs to session "Y". + * we can remove this session. Maybe there is still some + * requests left in the instance's ring + * (icp_adf_queueDataToSend() returns true), but the request + * does not belong to "X", it belongs to session "Y". */ numPendingRequests = qatUtilsAtomicGet(&(pSessionDesc->u.pendingDpCbCount)); } else { numPendingRequests = qatUtilsAtomicGet(&(pSessionDesc->u.pendingCbCount)); } /* If there are pending requests */ if (0 != numPendingRequests) { QAT_UTILS_LOG("There are %llu requests pending\n", (unsigned long long)numPendingRequests); status = CPA_STATUS_RETRY; if (CPA_TRUE == pSessionDesc->isDPSession) { /* Need to update tail if messages queue on tx hi ring - for - data plane api */ + for data plane api */ icp_comms_trans_handle trans_handle = ((sal_crypto_service_t *)instanceHandle) ->trans_handle_sym_tx; if (CPA_TRUE == icp_adf_queueDataToSend(trans_handle)) { /* process the remaining messages in the ring */ QAT_UTILS_LOG("Submitting enqueued requests\n"); /* * SalQatMsg_updateQueueTail */ SalQatMsg_updateQueueTail(trans_handle); return status; } } } if (CPA_STATUS_SUCCESS == status) { - if (CPA_STATUS_SUCCESS != - LAC_SPINLOCK_DESTROY(&pSessionDesc->requestQueueLock)) { - LAC_LOG_ERROR("Failed to destroy request queue lock"); - } + LAC_SPINLOCK_DESTROY(&pSessionDesc->requestQueueLock); if (CPA_FALSE == pSessionDesc->isDPSession) { LAC_SYM_STAT_INC(numSessionsRemoved, instanceHandle); } } else if (CPA_FALSE == pSessionDesc->isDPSession) { LAC_SYM_STAT_INC(numSessionErrors, instanceHandle); } return status; } /** @ingroup LacSym */ static CpaStatus LacSym_Perform(const CpaInstanceHandle instanceHandle, void *callbackTag, const CpaCySymOpData *pOpData, const CpaBufferList *pSrcBuffer, CpaBufferList *pDstBuffer, CpaBoolean *pVerifyResult, CpaBoolean isAsyncMode) { lac_session_desc_t *pSessionDesc = NULL; CpaStatus status = CPA_STATUS_SUCCESS; LAC_CHECK_INSTANCE_HANDLE(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); /* check crypto service is running otherwise return an error */ SAL_RUNNING_CHECK(instanceHandle); LAC_CHECK_NULL_PARAM(pOpData); LAC_CHECK_NULL_PARAM(pOpData->sessionCtx); LAC_CHECK_NULL_PARAM(pSrcBuffer); LAC_CHECK_NULL_PARAM(pDstBuffer); pSessionDesc = LAC_SYM_SESSION_DESC_FROM_CTX_GET(pOpData->sessionCtx); LAC_CHECK_NULL_PARAM(pSessionDesc); /*check whether Payload size is zero for CHACHA-POLY*/ if ((CPA_CY_SYM_CIPHER_CHACHA == pSessionDesc->cipherAlgorithm) && (CPA_CY_SYM_HASH_POLY == pSessionDesc->hashAlgorithm) && (CPA_CY_SYM_OP_ALGORITHM_CHAINING == pSessionDesc->symOperation)) { if (!pOpData->messageLenToCipherInBytes) { LAC_INVALID_PARAM_LOG( "Invalid messageLenToCipherInBytes for CHACHA-POLY"); return CPA_STATUS_INVALID_PARAM; } } /* If synchronous Operation - Callback function stored in the session * descriptor so a flag is set in the perform to indicate that * the perform is being re-called for the synchronous operation */ if ((LacSync_GenBufListVerifyCb == pSessionDesc->pSymCb) && isAsyncMode == CPA_TRUE) { CpaBoolean opResult = CPA_FALSE; lac_sync_op_data_t *pSyncCallbackData = NULL; status = LacSync_CreateSyncCookie(&pSyncCallbackData); if (CPA_STATUS_SUCCESS == status) { status = LacSym_Perform(instanceHandle, pSyncCallbackData, pOpData, pSrcBuffer, pDstBuffer, pVerifyResult, CPA_FALSE); } else { /* Failure allocating sync cookie */ LAC_SYM_STAT_INC(numSymOpRequestErrors, instanceHandle); return status; } if (CPA_STATUS_SUCCESS == status) { CpaStatus syncStatus = CPA_STATUS_SUCCESS; syncStatus = LacSync_WaitForCallback( pSyncCallbackData, LAC_SYM_SYNC_CALLBACK_TIMEOUT, &status, &opResult); /* If callback doesn't come back */ if (CPA_STATUS_SUCCESS != syncStatus) { LAC_SYM_STAT_INC(numSymOpCompletedErrors, instanceHandle); LAC_LOG_ERROR("Callback timed out"); status = syncStatus; } } else { /* As the Request was not sent the Callback will never * be called, so need to indicate that we're finished * with cookie so it can be destroyed. */ LacSync_SetSyncCookieComplete(pSyncCallbackData); } if (CPA_STATUS_SUCCESS == status) { if (NULL != pVerifyResult) { *pVerifyResult = opResult; } } LacSync_DestroySyncCookie(&pSyncCallbackData); return status; } status = LacSymPerform_BufferParamCheck((const CpaBufferList *)pSrcBuffer, pDstBuffer, pSessionDesc, pOpData); LAC_CHECK_STATUS(status); if ((!pSessionDesc->digestIsAppended) && (CPA_CY_SYM_OP_ALGORITHM_CHAINING == pSessionDesc->symOperation)) { /* Check that pDigestResult is not NULL */ LAC_CHECK_NULL_PARAM(pOpData->pDigestResult); } status = LacAlgChain_Perform(instanceHandle, pSessionDesc, callbackTag, pOpData, pSrcBuffer, pDstBuffer, pVerifyResult); if (CPA_STATUS_SUCCESS == status) { /* check for partial packet suport for the session operation */ if (CPA_CY_SYM_PACKET_TYPE_FULL != pOpData->packetType) { LacSym_PartialPacketStateUpdate( pOpData->packetType, &pSessionDesc->partialState); } /* increment #requests stat */ LAC_SYM_STAT_INC(numSymOpRequests, instanceHandle); } /* Retry also results in the errors stat been incremented */ else { /* increment #errors stat */ LAC_SYM_STAT_INC(numSymOpRequestErrors, instanceHandle); } return status; } /** @ingroup LacSym */ CpaStatus cpaCySymPerformOp(const CpaInstanceHandle instanceHandle_in, void *callbackTag, const CpaCySymOpData *pOpData, const CpaBufferList *pSrcBuffer, CpaBufferList *pDstBuffer, CpaBoolean *pVerifyResult) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } return LacSym_Perform(instanceHandle, callbackTag, pOpData, pSrcBuffer, pDstBuffer, pVerifyResult, CPA_TRUE); } /** @ingroup LacSym */ CpaStatus cpaCySymQueryStats(const CpaInstanceHandle instanceHandle_in, struct _CpaCySymStats *pSymStats) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_INSTANCE_HANDLE(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); LAC_CHECK_NULL_PARAM(pSymStats); /* check if crypto service is running * otherwise return an error */ SAL_RUNNING_CHECK(instanceHandle); /* copy the fields from the internal structure into the api defined * structure */ LacSym_Stats32CopyGet(instanceHandle, pSymStats); return CPA_STATUS_SUCCESS; } /** @ingroup LacSym */ CpaStatus cpaCySymQueryStats64(const CpaInstanceHandle instanceHandle_in, CpaCySymStats64 *pSymStats) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_INSTANCE_HANDLE(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); LAC_CHECK_NULL_PARAM(pSymStats); /* check if crypto service is running * otherwise return an error */ SAL_RUNNING_CHECK(instanceHandle); /* copy the fields from the internal structure into the api defined * structure */ LacSym_Stats64CopyGet(instanceHandle, pSymStats); return CPA_STATUS_SUCCESS; } /** @ingroup LacSym */ CpaStatus cpaCySymSessionCtxGetSize(const CpaInstanceHandle instanceHandle_in, const CpaCySymSessionSetupData *pSessionSetupData, Cpa32U *pSessionCtxSizeInBytes) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_INSTANCE_HANDLE(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); LAC_CHECK_NULL_PARAM(pSessionSetupData); LAC_CHECK_NULL_PARAM(pSessionCtxSizeInBytes); /* check crypto service is running otherwise return an error */ SAL_RUNNING_CHECK(instanceHandle); *pSessionCtxSizeInBytes = LAC_SYM_SESSION_SIZE; return CPA_STATUS_SUCCESS; } /** @ingroup LacSym */ CpaStatus cpaCySymSessionCtxGetDynamicSize( const CpaInstanceHandle instanceHandle_in, const CpaCySymSessionSetupData *pSessionSetupData, Cpa32U *pSessionCtxSizeInBytes) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_INSTANCE_HANDLE(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); LAC_CHECK_NULL_PARAM(pSessionSetupData); LAC_CHECK_NULL_PARAM(pSessionCtxSizeInBytes); /* check crypto service is running otherwise return an error */ SAL_RUNNING_CHECK(instanceHandle); /* Choose Session Context size */ getCtxSize(pSessionSetupData, pSessionCtxSizeInBytes); return CPA_STATUS_SUCCESS; } void getCtxSize(const CpaCySymSessionSetupData *pSessionSetupData, Cpa32U *pSessionCtxSizeInBytes) { /* using lac_session_desc_d1_t */ if ((pSessionSetupData->cipherSetupData.cipherAlgorithm != CPA_CY_SYM_CIPHER_ARC4) && (pSessionSetupData->cipherSetupData.cipherAlgorithm != CPA_CY_SYM_CIPHER_SNOW3G_UEA2) && (pSessionSetupData->hashSetupData.hashAlgorithm != CPA_CY_SYM_HASH_SNOW3G_UIA2) && (pSessionSetupData->cipherSetupData.cipherAlgorithm != CPA_CY_SYM_CIPHER_AES_CCM) && (pSessionSetupData->cipherSetupData.cipherAlgorithm != CPA_CY_SYM_CIPHER_AES_GCM) && (pSessionSetupData->hashSetupData.hashMode != CPA_CY_SYM_HASH_MODE_AUTH) && (pSessionSetupData->hashSetupData.hashMode != CPA_CY_SYM_HASH_MODE_NESTED) && (pSessionSetupData->partialsNotRequired == CPA_TRUE)) { *pSessionCtxSizeInBytes = LAC_SYM_SESSION_D1_SIZE; } /* using lac_session_desc_d2_t */ else if (((pSessionSetupData->cipherSetupData.cipherAlgorithm == CPA_CY_SYM_CIPHER_AES_CCM) || (pSessionSetupData->cipherSetupData.cipherAlgorithm == CPA_CY_SYM_CIPHER_AES_GCM)) && (pSessionSetupData->partialsNotRequired == CPA_TRUE)) { *pSessionCtxSizeInBytes = LAC_SYM_SESSION_D2_SIZE; } /* using lac_session_desc_t */ else { *pSessionCtxSizeInBytes = LAC_SYM_SESSION_SIZE; } } /** ****************************************************************************** * @ingroup LacSym *****************************************************************************/ CpaStatus cpaCyBufferListGetMetaSize(const CpaInstanceHandle instanceHandle_in, Cpa32U numBuffers, Cpa32U *pSizeInBytes) { CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_INSTANCE_HANDLE(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); LAC_CHECK_NULL_PARAM(pSizeInBytes); /* In the case of zero buffers we still need to allocate one * descriptor to pass to the firmware */ if (0 == numBuffers) { numBuffers = 1; } /* Note: icp_buffer_list_desc_t is 8 bytes in size and * icp_flat_buffer_desc_t is 16 bytes in size. Therefore if * icp_buffer_list_desc_t is aligned * so will each icp_flat_buffer_desc_t structure */ *pSizeInBytes = sizeof(icp_buffer_list_desc_t) + (sizeof(icp_flat_buffer_desc_t) * numBuffers) + ICP_DESCRIPTOR_ALIGNMENT_BYTES; return CPA_STATUS_SUCCESS; } diff --git a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_cb.c b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_cb.c index 01e9eca1f10a..2b95dd8cf6fe 100644 --- a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_cb.c +++ b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_cb.c @@ -1,545 +1,527 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** *************************************************************************** * @file lac_sym_cb.c Callback handler functions for symmetric components * * @ingroup LacSym * ***************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" #include "cpa_cy_sym.h" #include "icp_accel_devices.h" #include "icp_adf_init.h" #include "icp_qat_fw_la.h" #include "icp_adf_transport.h" #include "icp_adf_debug.h" #include "lac_sym.h" #include "lac_sym_cipher.h" #include "lac_common.h" #include "lac_list.h" #include "lac_sal_types_crypto.h" #include "lac_sal.h" #include "lac_sal_ctrl.h" #include "lac_session.h" #include "lac_sym_stats.h" #include "lac_log.h" #include "lac_sym_cb.h" #include "lac_sym_hash.h" #include "lac_sym_qat_cipher.h" #include "lac_sym_qat.h" #define DEQUEUE_MSGPUT_MAX_RETRIES 10000 /* ******************************************************************************* * Define static function definitions ******************************************************************************* */ /** ***************************************************************************** * @ingroup LacSymCb * Function to clean computed data. * * @description * This function cleans GCM or CCM data in the case of a failure. * * @param[in] pSessionDesc pointer to the session descriptor * @param[out] pBufferList pointer to the bufferlist to clean * @param[in] pOpData pointer to operation data * @param[in] isCCM is it a CCM operation boolean * * @return None *****************************************************************************/ static void LacSymCb_CleanUserData(const lac_session_desc_t *pSessionDesc, CpaBufferList *pBufferList, const CpaCySymOpData *pOpData, CpaBoolean isCCM) { - Cpa8U authTagLen = 0; + Cpa32U authTagLen = 0; /* Retrieve authTagLen */ authTagLen = pSessionDesc->hashResultSize; /* Cleaning */ if (isCCM) { /* for CCM the digest is inside the buffer list */ LacBuffDesc_BufferListZeroFromOffset( pBufferList, pOpData->cryptoStartSrcOffsetInBytes, pOpData->messageLenToCipherInBytes + authTagLen); } else { /* clean buffer list */ LacBuffDesc_BufferListZeroFromOffset( pBufferList, pOpData->cryptoStartSrcOffsetInBytes, pOpData->messageLenToCipherInBytes); } if ((CPA_TRUE != pSessionDesc->digestIsAppended) && (NULL != pOpData->pDigestResult)) { /* clean digest */ memset(pOpData->pDigestResult, 0, authTagLen); } } /** ***************************************************************************** * @ingroup LacSymCb * Definition of callback function for processing symmetric responses * * @description * This callback is invoked to process symmetric response messages from * the QAT. It will extract some details from the message and invoke * the user's callback to complete a symmetric operation. * * @param[in] pCookie Pointer to cookie associated with this request * @param[in] qatRespStatusOkFlag Boolean indicating ok/fail status from QAT * @param[in] status Status variable indicating an error occurred * in sending the message (e.g. when dequeueing) * @param[in] pSessionDesc Session descriptor * * @return None *****************************************************************************/ static void LacSymCb_ProcessCallbackInternal(lac_sym_bulk_cookie_t *pCookie, CpaBoolean qatRespStatusOkFlag, CpaStatus status, lac_session_desc_t *pSessionDesc) { CpaCySymCbFunc pSymCb = NULL; void *pCallbackTag = NULL; CpaCySymOpData *pOpData = NULL; CpaBufferList *pDstBuffer = NULL; CpaCySymOp operationType = CPA_CY_SYM_OP_NONE; CpaStatus dequeueStatus = CPA_STATUS_SUCCESS; CpaInstanceHandle instanceHandle = CPA_INSTANCE_HANDLE_SINGLE; /* NOTE: cookie pointer validated in previous function */ instanceHandle = pCookie->instanceHandle; pOpData = (CpaCySymOpData *)LAC_CONST_PTR_CAST(pCookie->pOpData); operationType = pSessionDesc->symOperation; /* Set the destination pointer to the one supplied in the cookie. */ pDstBuffer = pCookie->pDstBuffer; /* For a digest verify operation - for full packet and final partial * only, perform a comparison with the digest generated and with the one - * supplied in the packet. */ + * supplied in the packet. In case of AES_GCM in SPC mode, destination + * buffer needs to be cleared if digest verify operation fails */ - if (((pSessionDesc->isSinglePass && - (CPA_CY_SYM_CIPHER_AES_GCM == pSessionDesc->cipherAlgorithm)) || + if (((SPC == pSessionDesc->singlePassState) || (CPA_CY_SYM_OP_CIPHER != operationType)) && (CPA_TRUE == pSessionDesc->digestVerify) && ((CPA_CY_SYM_PACKET_TYPE_FULL == pOpData->packetType) || (CPA_CY_SYM_PACKET_TYPE_LAST_PARTIAL == pOpData->packetType))) { if (CPA_FALSE == qatRespStatusOkFlag) { LAC_SYM_STAT_INC(numSymOpVerifyFailures, instanceHandle); /* The comparison has failed at this point (status is - * fail), - * need to clean any sensitive calculated data up to - * this point. - * The data calculated is no longer useful to the end - * result and - * does not need to be returned to the user so setting - * buffers to - * zero. + * fail), need to clean any sensitive calculated data up + * to this point. The data calculated is no longer + * useful to the end result and does not need to be + * returned to the user so setting buffers to zero. */ if (pSessionDesc->cipherAlgorithm == CPA_CY_SYM_CIPHER_AES_CCM) { LacSymCb_CleanUserData(pSessionDesc, pDstBuffer, pOpData, CPA_TRUE); } else if (pSessionDesc->cipherAlgorithm == CPA_CY_SYM_CIPHER_AES_GCM) { LacSymCb_CleanUserData(pSessionDesc, pDstBuffer, pOpData, CPA_FALSE); } } } else { /* Most commands have no point of failure and always return * success. This is the default response from the QAT. * If status is already set to an error value, don't overwrite * it */ if ((CPA_STATUS_SUCCESS == status) && (CPA_TRUE != qatRespStatusOkFlag)) { LAC_LOG_ERROR("Response status value not as expected"); status = CPA_STATUS_FAIL; } } pSymCb = pSessionDesc->pSymCb; pCallbackTag = pCookie->pCallbackTag; /* State returned to the client for intermediate partials packets * for hash only and cipher only partial packets. Cipher update * allow next partial through */ if (CPA_CY_SYM_PACKET_TYPE_PARTIAL == pOpData->packetType) { if ((CPA_CY_SYM_OP_CIPHER == operationType) || (CPA_CY_SYM_OP_ALGORITHM_CHAINING == operationType)) { if (CPA_TRUE == pCookie->updateUserIvOnRecieve) { /* Update the user's IV buffer * Very important to do this BEFORE dequeuing * subsequent partial requests, as the state - * buffer - * may get overwritten + * buffer may get overwritten */ memcpy(pCookie->pOpData->pIv, pSessionDesc->cipherPartialOpState, pCookie->pOpData->ivLenInBytes); } if (CPA_TRUE == pCookie->updateKeySizeOnRecieve && LAC_CIPHER_IS_XTS_MODE( pSessionDesc->cipherAlgorithm)) { LacSymQat_CipherXTSModeUpdateKeyLen( pSessionDesc, pSessionDesc->cipherKeyLenInBytes / 2); } } } else if (CPA_CY_SYM_PACKET_TYPE_LAST_PARTIAL == pOpData->packetType) { if ((CPA_CY_SYM_OP_CIPHER == operationType) || (CPA_CY_SYM_OP_ALGORITHM_CHAINING == operationType)) { - if (CPA_TRUE == LAC_CIPHER_IS_XTS_MODE( - pSessionDesc->cipherAlgorithm)) { + if (CPA_TRUE == + LAC_CIPHER_IS_XTS_MODE( + pSessionDesc->cipherAlgorithm)) { /* * For XTS mode, we replace the updated key with * the original key - for subsequent partial * requests * */ LacSymQat_CipherXTSModeUpdateKeyLen( pSessionDesc, pSessionDesc->cipherKeyLenInBytes); } } } if ((CPA_CY_SYM_PACKET_TYPE_FULL != pOpData->packetType) && (qatRespStatusOkFlag != CPA_FALSE)) { /* There may be requests blocked pending the completion of this * operation */ dequeueStatus = LacSymCb_PendingReqsDequeue(pSessionDesc); if (CPA_STATUS_SUCCESS != dequeueStatus) { LAC_SYM_STAT_INC(numSymOpCompletedErrors, instanceHandle); qatRespStatusOkFlag = CPA_FALSE; if (CPA_STATUS_SUCCESS == status) { status = dequeueStatus; } } } if (CPA_STATUS_SUCCESS == status) { /* update stats */ if (pSessionDesc->internalSession == CPA_FALSE) { LAC_SYM_STAT_INC(numSymOpCompleted, instanceHandle); if (CPA_STATUS_SUCCESS != status) { LAC_SYM_STAT_INC(numSymOpCompletedErrors, instanceHandle); } } } qatUtilsAtomicDec(&(pSessionDesc->u.pendingCbCount)); /* deallocate the memory for the internal callback cookie */ Lac_MemPoolEntryFree(pCookie); /* user callback function is the last thing to be called */ pSymCb(pCallbackTag, status, operationType, pOpData, pDstBuffer, qatRespStatusOkFlag); } /** ****************************************************************************** * @ingroup LacSymCb * Definition of callback function for processing symmetric Data Plane * responses * * @description * This callback checks the status, decrements the number of operations * pending and calls the user callback * * @param[in/out] pResponse pointer to the response structure * @param[in] qatRespStatusOkFlag status * @param[in] pSessionDesc pointer to the session descriptor * * @return None ******************************************************************************/ static void LacSymCb_ProcessDpCallback(CpaCySymDpOpData *pResponse, CpaBoolean qatRespStatusOkFlag, + CpaStatus status, lac_session_desc_t *pSessionDesc) { - CpaStatus status = CPA_STATUS_SUCCESS; + CpaCySymDpCbFunc pSymDpCb = NULL; /* For CCM and GCM, if qatRespStatusOkFlag is false, the data has to be * cleaned as stated in RFC 3610; in DP mode, it is the user - * responsability - * to do so */ - - if (CPA_FALSE == pSessionDesc->isSinglePass) { - if ((CPA_CY_SYM_OP_CIPHER == pSessionDesc->symOperation) || - (CPA_FALSE == pSessionDesc->digestVerify)) { - /* If not doing digest compare and qatRespStatusOkFlag - != - CPA_TRUE - then there is something very wrong */ - if (CPA_FALSE == qatRespStatusOkFlag) { - LAC_LOG_ERROR( - "Response status value not as expected"); - status = CPA_STATUS_FAIL; - } + * responsability to do so */ + + if (((CPA_CY_SYM_OP_CIPHER == pSessionDesc->symOperation) && + SPC != pSessionDesc->singlePassState) || + (CPA_FALSE == pSessionDesc->digestVerify)) { + /* If not doing digest compare and qatRespStatusOkFlag != + CPA_TRUE then there is something very wrong */ + if ((CPA_FALSE == qatRespStatusOkFlag) && + (status != CPA_STATUS_UNSUPPORTED)) { + LAC_LOG_ERROR("Response status value not as expected"); + status = CPA_STATUS_FAIL; } } - ((sal_crypto_service_t *)pResponse->instanceHandle) - ->pSymDpCb(pResponse, status, qatRespStatusOkFlag); + pSymDpCb = + ((sal_crypto_service_t *)pResponse->instanceHandle)->pSymDpCb; + + pSymDpCb(pResponse, status, qatRespStatusOkFlag); + /* * Decrement the number of pending CB. * * If the @pendingDpCbCount becomes zero, we may remove the session, - * please - * read more information in the cpaCySymRemoveSession(). + * please read more information in the cpaCySymRemoveSession(). * * But there is a field in the @pResponse to store the session, * the "sessionCtx". In another word, in the above @->pSymDpCb() - * callback, - * it may use the session again. If we decrease the @pendingDpCbCount - * before - * the @->pSymDpCb(), there is a _risk_ the @->pSymDpCb() may reference - * to - * a deleted session. + * callback, it may use the session again. If we decrease the + * @pendingDpCbCount before the @->pSymDpCb(), there is a _risk_ the + * @->pSymDpCb() may reference to a deleted session. * * So in order to avoid the risk, we decrease the @pendingDpCbCount - * after - * the @->pSymDpCb() callback. + * after the @->pSymDpCb() callback. */ qatUtilsAtomicDec(&pSessionDesc->u.pendingDpCbCount); } /** ****************************************************************************** * @ingroup LacSymCb * Definition of callback function for processing symmetric responses * * @description * This callback, which is registered with the common symmetric response * message handler, is invoked to process symmetric response messages from * the QAT. It will extract the response status from the cmnRespFlags set * by the QAT, and then will pass it to @ref * LacSymCb_ProcessCallbackInternal to complete the response processing. * * @param[in] lacCmdId ID of the symmetric QAT command of the request * message * @param[in] pOpaqueData pointer to opaque data in the request message * @param[in] cmnRespFlags Flags set by QAT to indicate response status * * @return None ******************************************************************************/ static void LacSymCb_ProcessCallback(icp_qat_fw_la_cmd_id_t lacCmdId, void *pOpaqueData, icp_qat_fw_comn_flags cmnRespFlags) { + CpaStatus status = CPA_STATUS_SUCCESS; CpaCySymDpOpData *pDpOpData = (CpaCySymDpOpData *)pOpaqueData; lac_session_desc_t *pSessionDesc = LAC_SYM_SESSION_DESC_FROM_CTX_GET(pDpOpData->sessionCtx); CpaBoolean qatRespStatusOkFlag = (CpaBoolean)(ICP_QAT_FW_COMN_STATUS_FLAG_OK == ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(cmnRespFlags)); if (CPA_TRUE == pSessionDesc->isDPSession) { /* DP session */ + if (ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET( + cmnRespFlags)) { + status = CPA_STATUS_UNSUPPORTED; + } LacSymCb_ProcessDpCallback(pDpOpData, qatRespStatusOkFlag, + status, pSessionDesc); } else { /* Trad session */ LacSymCb_ProcessCallbackInternal((lac_sym_bulk_cookie_t *) pOpaqueData, qatRespStatusOkFlag, CPA_STATUS_SUCCESS, pSessionDesc); } } /* ******************************************************************************* * Define public/global function definitions ******************************************************************************* */ /** * @ingroup LacSymCb * * @return CpaStatus * value returned will be the result of icp_adf_transPutMsg */ CpaStatus LacSymCb_PendingReqsDequeue(lac_session_desc_t *pSessionDesc) { CpaStatus status = CPA_STATUS_SUCCESS; sal_crypto_service_t *pService = NULL; Cpa32U retries = 0; pService = (sal_crypto_service_t *)pSessionDesc->pInstance; /* Need to protect access to queue head and tail pointers, which may * be accessed by multiple contexts simultaneously for enqueue and * dequeue operations */ - if (CPA_STATUS_SUCCESS != - LAC_SPINLOCK(&pSessionDesc->requestQueueLock)) { - LAC_LOG_ERROR("Failed to lock request queue"); - return CPA_STATUS_RESOURCE; - } + LAC_SPINLOCK(&pSessionDesc->requestQueueLock); /* Clear the blocking flag in the session descriptor */ pSessionDesc->nonBlockingOpsInProgress = CPA_TRUE; while ((NULL != pSessionDesc->pRequestQueueHead) && (CPA_TRUE == pSessionDesc->nonBlockingOpsInProgress)) { /* If we send a partial packet request, set the - * blockingOpsInProgress - * flag for the session to indicate that subsequent requests - * must be - * queued up until this request completes + * blockingOpsInProgress flag for the session to indicate that + * subsequent requests must be queued up until this request + * completes */ if (CPA_CY_SYM_PACKET_TYPE_FULL != pSessionDesc->pRequestQueueHead->pOpData->packetType) { pSessionDesc->nonBlockingOpsInProgress = CPA_FALSE; } /* At this point, we're clear to send the request. For cipher - * requests, - * we need to check if the session IV needs to be updated. This - * can - * only be done when no other partials are in flight for this - * session, - * to ensure the cipherPartialOpState buffer in the session - * descriptor - * is not currently in use + * requests, we need to check if the session IV needs to be + * updated. This can only be done when no other partials are in + * flight for this session, to ensure the cipherPartialOpState + * buffer in the session descriptor is not currently in use */ if (CPA_TRUE == pSessionDesc->pRequestQueueHead->updateSessionIvOnSend) { if (LAC_CIPHER_IS_ARC4(pSessionDesc->cipherAlgorithm)) { memcpy(pSessionDesc->cipherPartialOpState, pSessionDesc->cipherARC4InitialState, LAC_CIPHER_ARC4_STATE_LEN_BYTES); } else { memcpy(pSessionDesc->cipherPartialOpState, pSessionDesc->pRequestQueueHead->pOpData ->pIv, pSessionDesc->pRequestQueueHead->pOpData ->ivLenInBytes); } } /* * Now we'll attempt to send the message directly to QAT. We'll - * keep - * looing until it succeeds (or at least a very high number of - * retries), - * as the failure only happens when the ring is full, and this - * is only - * a temporary situation. After a few retries, space will become - * availble, allowing the putMsg to succeed. + * keep looing until it succeeds (or at least a very high number + * of retries), as the failure only happens when the ring is + * full, and this is only a temporary situation. After a few + * retries, space will become availble, allowing the putMsg to + * succeed. */ retries = 0; do { /* Send to QAT */ status = icp_adf_transPutMsg( pService->trans_handle_sym_tx, (void *)&(pSessionDesc->pRequestQueueHead->qatMsg), LAC_QAT_SYM_REQ_SZ_LW); retries++; /* * Yield to allow other threads that may be on this - * session to poll - * and make some space on the ring + * session to poll and make some space on the ring */ if (CPA_STATUS_SUCCESS != status) { qatUtilsYield(); } } while ((CPA_STATUS_SUCCESS != status) && (retries < DEQUEUE_MSGPUT_MAX_RETRIES)); if ((CPA_STATUS_SUCCESS != status) || (retries >= DEQUEUE_MSGPUT_MAX_RETRIES)) { LAC_LOG_ERROR( "Failed to SalQatMsg_transPutMsg, maximum retries exceeded."); goto cleanup; } pSessionDesc->pRequestQueueHead = pSessionDesc->pRequestQueueHead->pNext; } /* If we've drained the queue, ensure the tail pointer is set to NULL */ if (NULL == pSessionDesc->pRequestQueueHead) { pSessionDesc->pRequestQueueTail = NULL; } cleanup: - if (CPA_STATUS_SUCCESS != - LAC_SPINUNLOCK(&pSessionDesc->requestQueueLock)) { - LAC_LOG_ERROR("Failed to unlock request queue"); - } + LAC_SPINUNLOCK(&pSessionDesc->requestQueueLock); return status; } /** * @ingroup LacSymCb */ void LacSymCb_CallbacksRegister(void) { /*** HASH ***/ LacSymQat_RespHandlerRegister(ICP_QAT_FW_LA_CMD_AUTH, LacSymCb_ProcessCallback); /*** ALGORITHM-CHAINING CIPHER_HASH***/ LacSymQat_RespHandlerRegister(ICP_QAT_FW_LA_CMD_CIPHER_HASH, LacSymCb_ProcessCallback); /*** ALGORITHM-CHAINING HASH_CIPHER***/ LacSymQat_RespHandlerRegister(ICP_QAT_FW_LA_CMD_HASH_CIPHER, LacSymCb_ProcessCallback); /*** CIPHER ***/ LacSymQat_RespHandlerRegister(ICP_QAT_FW_LA_CMD_CIPHER, LacSymCb_ProcessCallback); /* Call compile time param check function to ensure it is included in the build by the compiler - this compile time check ensures callbacks run as expected */ LacSym_CompileTimeAssertions(); } diff --git a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_cipher.c b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_cipher.c index 0039c1f66884..2a6cdd7aa748 100644 --- a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_cipher.c +++ b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_cipher.c @@ -1,416 +1,460 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** *************************************************************************** * @file lac_sym_cipher.c Cipher * * @ingroup LacCipher * * @description Functions specific to cipher ***************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" #include "cpa_cy_sym.h" #include "icp_adf_init.h" #include "icp_adf_transport.h" #include "icp_accel_devices.h" #include "icp_adf_debug.h" #include "icp_qat_fw_la.h" /* ******************************************************************************* * Include private header files ******************************************************************************* */ #include "lac_sym_cipher.h" #include "lac_session.h" #include "lac_mem.h" #include "lac_common.h" #include "lac_list.h" #include "lac_sym.h" #include "lac_sym_key.h" #include "lac_sym_qat_hash_defs_lookup.h" #include "lac_sal_types_crypto.h" #include "lac_sal.h" #include "lac_sal_ctrl.h" #include "lac_sym_cipher_defs.h" #include "lac_sym_cipher.h" #include "lac_sym_stats.h" #include "lac_sym.h" #include "lac_sym_qat_cipher.h" #include "lac_log.h" #include "lac_buffer_desc.h" +#include "sal_hw_gen.h" /* ******************************************************************************* * Static Variables ******************************************************************************* */ CpaStatus LacCipher_PerformIvCheck(sal_service_t *pService, lac_sym_bulk_cookie_t *pCbCookie, Cpa32U qatPacketType, Cpa8U **ppIvBuffer) { const CpaCySymOpData *pOpData = pCbCookie->pOpData; lac_session_desc_t *pSessionDesc = LAC_SYM_SESSION_DESC_FROM_CTX_GET(pOpData->sessionCtx); CpaCySymCipherAlgorithm algorithm = pSessionDesc->cipherAlgorithm; + unsigned ivLenInBytes = 0; - /* Perform IV check. */ - if (LAC_CIPHER_IS_CTR_MODE(algorithm) || - LAC_CIPHER_IS_CBC_MODE(algorithm) || - LAC_CIPHER_IS_AES_F8(algorithm) || - LAC_CIPHER_IS_XTS_MODE(algorithm)) { - unsigned ivLenInBytes = - LacSymQat_CipherIvSizeBytesGet(algorithm); + switch (algorithm) { + /* Perform IV check for CTR, CBC, XTS, F8 MODE. */ + case CPA_CY_SYM_CIPHER_AES_CTR: + case CPA_CY_SYM_CIPHER_3DES_CTR: + case CPA_CY_SYM_CIPHER_SM4_CTR: + case CPA_CY_SYM_CIPHER_AES_CCM: + case CPA_CY_SYM_CIPHER_AES_GCM: + case CPA_CY_SYM_CIPHER_CHACHA: + case CPA_CY_SYM_CIPHER_AES_CBC: + case CPA_CY_SYM_CIPHER_DES_CBC: + case CPA_CY_SYM_CIPHER_3DES_CBC: + case CPA_CY_SYM_CIPHER_SM4_CBC: + case CPA_CY_SYM_CIPHER_AES_F8: + case CPA_CY_SYM_CIPHER_AES_XTS: { + ivLenInBytes = LacSymQat_CipherIvSizeBytesGet(algorithm); LAC_CHECK_NULL_PARAM(pOpData->pIv); if (pOpData->ivLenInBytes != ivLenInBytes) { if (!(/* GCM with 12 byte IV is OK */ (LAC_CIPHER_IS_GCM(algorithm) && pOpData->ivLenInBytes == LAC_CIPHER_IV_SIZE_GCM_12) || /* IV len for CCM has been checked before */ LAC_CIPHER_IS_CCM(algorithm))) { LAC_INVALID_PARAM_LOG("invalid cipher IV size"); return CPA_STATUS_INVALID_PARAM; } } /* Always copy the user's IV into another cipher state buffer if * the request is part of a partial packet sequence * (ensures that pipelined partial requests use same * buffer) */ if (ICP_QAT_FW_LA_PARTIAL_NONE == qatPacketType) { /* Set the value of the ppIvBuffer to that supplied * by the user. * NOTE: There is no guarantee that this address is - * aligned on - * an 8 or 64 Byte address. */ + * aligned on an 8 or 64 Byte address. */ *ppIvBuffer = pOpData->pIv; } else { /* For partial packets, we use a per-session buffer to - * maintain - * the IV. This allows us to easily pass the updated IV - * forward - * to the next partial in the sequence. This makes - * internal - * buffering of partials easier to implement. + * maintain the IV. This allows us to easily pass the + * updated IV forward to the next partial in the + * sequence. This makes internal buffering of partials + * easier to implement. */ *ppIvBuffer = pSessionDesc->cipherPartialOpState; /* Ensure that the user's IV buffer gets updated between - * partial - * requests so that they may also see the residue from - * the - * previous partial. Not needed for final partials - * though. + * partial requests so that they may also see the + * residue from the previous partial. Not needed for + * final partials though. */ if ((ICP_QAT_FW_LA_PARTIAL_START == qatPacketType) || (ICP_QAT_FW_LA_PARTIAL_MID == qatPacketType)) { pCbCookie->updateUserIvOnRecieve = CPA_TRUE; if (ICP_QAT_FW_LA_PARTIAL_START == qatPacketType) { /* if the previous partial state was - * full, then this is - * the first partial in the sequence so - * we need to copy - * in the user's IV. But, we have to be - * very careful - * here not to overwrite the - * cipherPartialOpState just - * yet in case there's a previous - * partial sequence in - * flight, so we defer the copy for now. - * This will be - * completed in the - * LacSymQueue_RequestSend() function. + * full, then this is the first partial + * in the sequence so we need to copy in + * the user's IV. But, we have to be + * very careful here not to overwrite + * the cipherPartialOpState just yet in + * case there's a previous partial + * sequence in flight, so we defer the + * copy for now. This will be completed + * in the LacSymQueue_RequestSend() + * function. */ pCbCookie->updateSessionIvOnSend = CPA_TRUE; } /* For subsequent partials in a sequence, we'll - * re-use the - * IV that was written back by the QAT, using - * internal - * request queueing if necessary to ensure that - * the next - * partial request isn't issued to the QAT until - * the + * re-use the IV that was written back by the + * QAT, using internal request queueing if + * necessary to ensure that the next partial + * request isn't issued to the QAT until the * previous one completes */ } } - } else if (LAC_CIPHER_IS_KASUMI(algorithm)) { + } break; + case CPA_CY_SYM_CIPHER_KASUMI_F8: { LAC_CHECK_NULL_PARAM(pOpData->pIv); - if (LAC_CIPHER_IS_KASUMI(algorithm) && - (pOpData->ivLenInBytes != LAC_CIPHER_KASUMI_F8_IV_LENGTH)) { + if (pOpData->ivLenInBytes != LAC_CIPHER_KASUMI_F8_IV_LENGTH) { LAC_INVALID_PARAM_LOG("invalid cipher IV size"); return CPA_STATUS_INVALID_PARAM; } - *ppIvBuffer = pOpData->pIv; - } else if (LAC_CIPHER_IS_SNOW3G_UEA2(algorithm)) { + } break; + case CPA_CY_SYM_CIPHER_SNOW3G_UEA2: { LAC_CHECK_NULL_PARAM(pOpData->pIv); - if (LAC_CIPHER_IS_SNOW3G_UEA2(algorithm) && - (pOpData->ivLenInBytes != ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ)) { + if (pOpData->ivLenInBytes != ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ) { LAC_INVALID_PARAM_LOG("invalid cipher IV size"); return CPA_STATUS_INVALID_PARAM; } *ppIvBuffer = pOpData->pIv; - } else if (LAC_CIPHER_IS_ARC4(algorithm)) { + } break; + case CPA_CY_SYM_CIPHER_ARC4: { if (ICP_QAT_FW_LA_PARTIAL_NONE == qatPacketType) { /* For full packets, the initial ARC4 state is stored in - * the - * session descriptor. Use it directly. + * the session descriptor. Use it directly. */ *ppIvBuffer = pSessionDesc->cipherARC4InitialState; } else { /* For partial packets, we maintain the running ARC4 - * state in - * dedicated buffer in the session descriptor + * state in dedicated buffer in the session descriptor */ *ppIvBuffer = pSessionDesc->cipherPartialOpState; if (ICP_QAT_FW_LA_PARTIAL_START == qatPacketType) { /* if the previous partial state was full, then - * this is the - * first partial in the sequence so we need to - * (re-)initialise - * the contents of the state buffer using the - * initial state - * that is stored in the session descriptor. - * But, we have to be - * very careful here not to overwrite the - * cipherPartialOpState - * just yet in case there's a previous partial - * sequence in + * this is the first partial in the sequence so + * we need to (re-)initialise the contents of + * the state buffer using the initial state that + * is stored in the session descriptor. But, we + * have to be very careful here not to overwrite + * the cipherPartialOpState just yet in case + * there's a previous partial sequence in * flight, so we defer the copy for now. This - * will be completed - * in the LacSymQueue_RequestSend() function - * when clear to send. + * will be completed in the + * LacSymQueue_RequestSend() function when clear + * to send. */ pCbCookie->updateSessionIvOnSend = CPA_TRUE; } } - } else if (LAC_CIPHER_IS_ZUC_EEA3(algorithm)) { + } break; + case CPA_CY_SYM_CIPHER_ZUC_EEA3: { LAC_CHECK_NULL_PARAM(pOpData->pIv); if (pOpData->ivLenInBytes != ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ) { LAC_INVALID_PARAM_LOG("invalid cipher IV size"); return CPA_STATUS_INVALID_PARAM; } *ppIvBuffer = pOpData->pIv; - } else { + } break; + default: *ppIvBuffer = NULL; } return CPA_STATUS_SUCCESS; } CpaStatus -LacCipher_SessionSetupDataCheck(const CpaCySymCipherSetupData *pCipherSetupData) +LacCipher_SessionSetupDataCheck(const CpaCySymCipherSetupData *pCipherSetupData, + Cpa32U capabilitiesMask) { /* No key required for NULL algorithm */ if (!LAC_CIPHER_IS_NULL(pCipherSetupData->cipherAlgorithm)) { LAC_CHECK_NULL_PARAM(pCipherSetupData->pCipherKey); /* Check that algorithm and keys passed in are correct size */ - if (LAC_CIPHER_IS_ARC4(pCipherSetupData->cipherAlgorithm)) { + switch (pCipherSetupData->cipherAlgorithm) { + case CPA_CY_SYM_CIPHER_ARC4: if (pCipherSetupData->cipherKeyLenInBytes > ICP_QAT_HW_ARC4_KEY_SZ) { LAC_INVALID_PARAM_LOG( "Invalid ARC4 cipher key length"); return CPA_STATUS_INVALID_PARAM; } - } else if (LAC_CIPHER_IS_CCM( - pCipherSetupData->cipherAlgorithm)) { - if (pCipherSetupData->cipherKeyLenInBytes != - ICP_QAT_HW_AES_128_KEY_SZ) { + break; + case CPA_CY_SYM_CIPHER_AES_CCM: + if (!LAC_CIPHER_AES_V2(capabilitiesMask) && + pCipherSetupData->cipherKeyLenInBytes != + ICP_QAT_HW_AES_128_KEY_SZ) { LAC_INVALID_PARAM_LOG( "Invalid AES CCM cipher key length"); return CPA_STATUS_INVALID_PARAM; } - } else if (LAC_CIPHER_IS_XTS_MODE( - pCipherSetupData->cipherAlgorithm)) { + break; + case CPA_CY_SYM_CIPHER_AES_XTS: if ((pCipherSetupData->cipherKeyLenInBytes != ICP_QAT_HW_AES_128_XTS_KEY_SZ) && (pCipherSetupData->cipherKeyLenInBytes != - ICP_QAT_HW_AES_256_XTS_KEY_SZ)) { + ICP_QAT_HW_AES_256_XTS_KEY_SZ) && + (pCipherSetupData->cipherKeyLenInBytes != + ICP_QAT_HW_UCS_AES_128_XTS_KEY_SZ) && + (pCipherSetupData->cipherKeyLenInBytes != + ICP_QAT_HW_UCS_AES_256_XTS_KEY_SZ)) { LAC_INVALID_PARAM_LOG( "Invalid AES XTS cipher key length"); return CPA_STATUS_INVALID_PARAM; } - } else if (LAC_CIPHER_IS_AES( - pCipherSetupData->cipherAlgorithm)) { + break; + case CPA_CY_SYM_CIPHER_AES_ECB: + case CPA_CY_SYM_CIPHER_AES_CBC: + case CPA_CY_SYM_CIPHER_AES_CTR: + case CPA_CY_SYM_CIPHER_AES_GCM: if ((pCipherSetupData->cipherKeyLenInBytes != ICP_QAT_HW_AES_128_KEY_SZ) && (pCipherSetupData->cipherKeyLenInBytes != ICP_QAT_HW_AES_192_KEY_SZ) && (pCipherSetupData->cipherKeyLenInBytes != ICP_QAT_HW_AES_256_KEY_SZ)) { LAC_INVALID_PARAM_LOG( "Invalid AES cipher key length"); return CPA_STATUS_INVALID_PARAM; } - } else if (LAC_CIPHER_IS_AES_F8( - pCipherSetupData->cipherAlgorithm)) { + break; + case CPA_CY_SYM_CIPHER_AES_F8: if ((pCipherSetupData->cipherKeyLenInBytes != ICP_QAT_HW_AES_128_F8_KEY_SZ) && (pCipherSetupData->cipherKeyLenInBytes != ICP_QAT_HW_AES_192_F8_KEY_SZ) && (pCipherSetupData->cipherKeyLenInBytes != ICP_QAT_HW_AES_256_F8_KEY_SZ)) { LAC_INVALID_PARAM_LOG( "Invalid AES cipher key length"); return CPA_STATUS_INVALID_PARAM; } - } else if (LAC_CIPHER_IS_DES( - pCipherSetupData->cipherAlgorithm)) { + break; + case CPA_CY_SYM_CIPHER_DES_ECB: + case CPA_CY_SYM_CIPHER_DES_CBC: if (pCipherSetupData->cipherKeyLenInBytes != ICP_QAT_HW_DES_KEY_SZ) { LAC_INVALID_PARAM_LOG( "Invalid DES cipher key length"); return CPA_STATUS_INVALID_PARAM; } - } else if (LAC_CIPHER_IS_TRIPLE_DES( - pCipherSetupData->cipherAlgorithm)) { + break; + case CPA_CY_SYM_CIPHER_3DES_ECB: + case CPA_CY_SYM_CIPHER_3DES_CBC: + case CPA_CY_SYM_CIPHER_3DES_CTR: if (pCipherSetupData->cipherKeyLenInBytes != ICP_QAT_HW_3DES_KEY_SZ) { LAC_INVALID_PARAM_LOG( "Invalid Triple-DES cipher key length"); return CPA_STATUS_INVALID_PARAM; } - } else if (LAC_CIPHER_IS_KASUMI( - pCipherSetupData->cipherAlgorithm)) { + break; + case CPA_CY_SYM_CIPHER_KASUMI_F8: /* QAT-FW only supports 128 bits Cipher Key size for - * Kasumi F8 - * Ref: 3GPP TS 55.216 V6.2.0 */ + * Kasumi F8 Ref: 3GPP TS 55.216 V6.2.0 */ if (pCipherSetupData->cipherKeyLenInBytes != ICP_QAT_HW_KASUMI_KEY_SZ) { LAC_INVALID_PARAM_LOG( "Invalid Kasumi cipher key length"); return CPA_STATUS_INVALID_PARAM; } - } else if (LAC_CIPHER_IS_SNOW3G_UEA2( - pCipherSetupData->cipherAlgorithm)) { + break; + case CPA_CY_SYM_CIPHER_SNOW3G_UEA2: /* QAT-FW only supports 256 bits Cipher Key size for * Snow_3G */ if (pCipherSetupData->cipherKeyLenInBytes != ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ) { LAC_INVALID_PARAM_LOG( "Invalid Snow_3G cipher key length"); return CPA_STATUS_INVALID_PARAM; } - } else if (LAC_CIPHER_IS_ZUC_EEA3( - pCipherSetupData->cipherAlgorithm)) { + break; + case CPA_CY_SYM_CIPHER_ZUC_EEA3: /* ZUC EEA3 */ if (pCipherSetupData->cipherKeyLenInBytes != ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ) { LAC_INVALID_PARAM_LOG( "Invalid ZUC cipher key length"); return CPA_STATUS_INVALID_PARAM; } - } else if (LAC_CIPHER_IS_CHACHA( - pCipherSetupData->cipherAlgorithm)) { + break; + case CPA_CY_SYM_CIPHER_CHACHA: if (pCipherSetupData->cipherKeyLenInBytes != ICP_QAT_HW_CHACHAPOLY_KEY_SZ) { LAC_INVALID_PARAM_LOG( "Invalid CHACHAPOLY cipher key length"); return CPA_STATUS_INVALID_PARAM; } - } else if (LAC_CIPHER_IS_SM4( - pCipherSetupData->cipherAlgorithm)) { + break; + case CPA_CY_SYM_CIPHER_SM4_ECB: + case CPA_CY_SYM_CIPHER_SM4_CBC: + case CPA_CY_SYM_CIPHER_SM4_CTR: if (pCipherSetupData->cipherKeyLenInBytes != ICP_QAT_HW_SM4_KEY_SZ) { LAC_INVALID_PARAM_LOG( "Invalid SM4 cipher key length"); return CPA_STATUS_INVALID_PARAM; } - } else { + break; + default: LAC_INVALID_PARAM_LOG("Invalid cipher algorithm"); return CPA_STATUS_INVALID_PARAM; } } return CPA_STATUS_SUCCESS; } CpaStatus LacCipher_PerformParamCheck(CpaCySymCipherAlgorithm algorithm, const CpaCySymOpData *pOpData, const Cpa64U packetLen) { CpaStatus status = CPA_STATUS_SUCCESS; /* The following check will cover the dstBuffer as well, since * the dstBuffer cannot be smaller than the srcBuffer (checked in * LacSymPerform_BufferParamCheck() called from LacSym_Perform()) */ if ((pOpData->messageLenToCipherInBytes + pOpData->cryptoStartSrcOffsetInBytes) > packetLen) { LAC_INVALID_PARAM_LOG("cipher len + offset greater than " "srcBuffer packet len"); status = CPA_STATUS_INVALID_PARAM; - } - - if (CPA_STATUS_SUCCESS == status) { + } else { + /* Perform algorithm-specific checks */ + switch (algorithm) { + case CPA_CY_SYM_CIPHER_ARC4: + case CPA_CY_SYM_CIPHER_AES_CTR: + case CPA_CY_SYM_CIPHER_3DES_CTR: + case CPA_CY_SYM_CIPHER_SM4_CTR: + case CPA_CY_SYM_CIPHER_AES_CCM: + case CPA_CY_SYM_CIPHER_AES_GCM: + case CPA_CY_SYM_CIPHER_CHACHA: + case CPA_CY_SYM_CIPHER_KASUMI_F8: + case CPA_CY_SYM_CIPHER_AES_F8: + case CPA_CY_SYM_CIPHER_SNOW3G_UEA2: + case CPA_CY_SYM_CIPHER_ZUC_EEA3: + /* No action needed */ + break; /* * XTS Mode allow for ciphers which are not multiples of * the block size. */ - /* Perform algorithm-specific checks */ - if (LAC_CIPHER_IS_XTS_MODE(algorithm) && - ((pOpData->packetType == CPA_CY_SYM_PACKET_TYPE_FULL) || - (pOpData->packetType == - CPA_CY_SYM_PACKET_TYPE_LAST_PARTIAL))) { - /* - * If this is the last of a partial request - */ - if (pOpData->messageLenToCipherInBytes < - ICP_QAT_HW_AES_BLK_SZ) { - LAC_INVALID_PARAM_LOG( - "data size must be greater than block " - "size for last XTS partial or XTS " - "full packet"); - status = CPA_STATUS_INVALID_PARAM; + case CPA_CY_SYM_CIPHER_AES_XTS: + if ((pOpData->packetType == + CPA_CY_SYM_PACKET_TYPE_FULL) || + (pOpData->packetType == + CPA_CY_SYM_PACKET_TYPE_LAST_PARTIAL)) { + /* + * If this is the last of a partial request + */ + if (pOpData->messageLenToCipherInBytes < + ICP_QAT_HW_AES_BLK_SZ) { + LAC_INVALID_PARAM_LOG( + "data size must be greater than block" + " size for last XTS partial or XTS " + "full packet"); + status = CPA_STATUS_INVALID_PARAM; + } } - } else if (!(LAC_CIPHER_IS_ARC4(algorithm) || - LAC_CIPHER_IS_CTR_MODE(algorithm) || - LAC_CIPHER_IS_F8_MODE(algorithm) || - LAC_CIPHER_IS_SNOW3G_UEA2(algorithm) || - LAC_CIPHER_IS_XTS_MODE(algorithm) || - LAC_CIPHER_IS_CHACHA(algorithm) || - LAC_CIPHER_IS_ZUC_EEA3(algorithm))) { + break; + default: /* Mask & check below is based on assumption that block - * size is - * a power of 2. If data size is not a multiple of the - * block size, - * the "remainder" bits selected by the mask be non-zero + * size is a power of 2. If data size is not a multiple + * of the block size, the "remainder" bits selected by + * the mask be non-zero */ if (pOpData->messageLenToCipherInBytes & (LacSymQat_CipherBlockSizeBytesGet(algorithm) - 1)) { LAC_INVALID_PARAM_LOG( - "data size must be block size multiple"); + "data size must be block size" + " multiple"); status = CPA_STATUS_INVALID_PARAM; } } } - return status; } +Cpa32U +LacCipher_GetCipherSliceType(sal_crypto_service_t *pService, + CpaCySymCipherAlgorithm cipherAlgorithm, + CpaCySymHashAlgorithm hashAlgorithm) +{ + Cpa32U sliceType = ICP_QAT_FW_LA_USE_LEGACY_SLICE_TYPE; + Cpa32U capabilitiesMask = + pService->generic_service_info.capabilitiesMask; + + /* UCS Slice is supproted only in Gen4 */ + if (isCyGen4x(pService)) { + if (LAC_CIPHER_IS_XTS_MODE(cipherAlgorithm) || + LAC_CIPHER_IS_CHACHA(cipherAlgorithm) || + LAC_CIPHER_IS_GCM(cipherAlgorithm)) { + sliceType = ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE; + } else if (LAC_CIPHER_IS_CCM(cipherAlgorithm) && + LAC_CIPHER_AES_V2(capabilitiesMask)) { + sliceType = ICP_QAT_FW_LA_USE_LEGACY_SLICE_TYPE; + } else if (LAC_CIPHER_IS_AES(cipherAlgorithm) && + LAC_CIPHER_IS_CTR_MODE(cipherAlgorithm)) { + sliceType = ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE; + } + } + + return sliceType; +} diff --git a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_dp.c b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_dp.c index 4ccf1f7f82d5..82add3a5dc08 100644 --- a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_dp.c +++ b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_dp.c @@ -1,1080 +1,1185 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** *************************************************************************** * @file lac_sym_dp.c * Implementation of the symmetric data plane API * * @ingroup cpaCySymDp ***************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" #include "cpa_cy_sym.h" #include "cpa_cy_sym_dp.h" /* ******************************************************************************* * Include private header files ******************************************************************************* */ #include "icp_accel_devices.h" #include "icp_adf_init.h" #include "icp_adf_transport.h" #include "icp_adf_transport_dp.h" #include "icp_adf_debug.h" #include "icp_sal_poll.h" #include "qat_utils.h" -#include "lac_mem.h" +#include "lac_list.h" #include "lac_log.h" +#include "lac_mem.h" +#include "lac_sal_types_crypto.h" #include "lac_sym.h" +#include "lac_sym_cipher.h" +#include "lac_sym_auth_enc.h" #include "lac_sym_qat_cipher.h" -#include "lac_list.h" -#include "lac_sal_types_crypto.h" #include "sal_service_state.h" -#include "lac_sym_auth_enc.h" +#include "sal_hw_gen.h" typedef void (*write_ringMsgFunc_t)(CpaCySymDpOpData *pRequest, icp_qat_fw_la_bulk_req_t *pCurrentQatMsg); /** ***************************************************************************** * @ingroup cpaCySymDp * Check that the operation data is valid * * @description * Check that all the parameters defined in the operation data are valid * * @param[in] pRequest Pointer to an operation data for crypto * data plane API * * @retval CPA_STATUS_SUCCESS Function executed successfully * @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in * *****************************************************************************/ static CpaStatus LacDp_EnqueueParamCheck(const CpaCySymDpOpData *pRequest) { lac_session_desc_t *pSessionDesc = NULL; CpaCySymCipherAlgorithm cipher = 0; CpaCySymHashAlgorithm hash = 0; Cpa32U capabilitiesMask = 0; LAC_CHECK_NULL_PARAM(pRequest); LAC_CHECK_NULL_PARAM(pRequest->instanceHandle); LAC_CHECK_NULL_PARAM(pRequest->sessionCtx); /* Ensure this is a crypto instance */ SAL_CHECK_INSTANCE_TYPE(pRequest->instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); pSessionDesc = LAC_SYM_SESSION_DESC_FROM_CTX_GET(pRequest->sessionCtx); if (NULL == pSessionDesc) { do { qatUtilsSleep(500); pSessionDesc = LAC_SYM_SESSION_DESC_FROM_CTX_GET( pRequest->sessionCtx); } while (NULL == pSessionDesc); } if (NULL == pSessionDesc) { LAC_INVALID_PARAM_LOG("Session context not as expected"); return CPA_STATUS_INVALID_PARAM; } if (CPA_FALSE == pSessionDesc->isDPSession) { LAC_INVALID_PARAM_LOG( "Session not initialised for data plane API"); return CPA_STATUS_INVALID_PARAM; } /*check whether Payload size is zero for CHACHA-POLY */ if ((CPA_CY_SYM_CIPHER_CHACHA == pSessionDesc->cipherAlgorithm) && (CPA_CY_SYM_HASH_POLY == pSessionDesc->hashAlgorithm) && (CPA_CY_SYM_OP_ALGORITHM_CHAINING == pSessionDesc->symOperation)) { if (!pRequest->messageLenToCipherInBytes) { LAC_INVALID_PARAM_LOG( "Invalid messageLenToCipherInBytes for CHACHA-POLY"); return CPA_STATUS_INVALID_PARAM; } } if (0 == pRequest->srcBuffer) { LAC_INVALID_PARAM_LOG("Invalid srcBuffer"); return CPA_STATUS_INVALID_PARAM; } if (0 == pRequest->dstBuffer) { LAC_INVALID_PARAM_LOG("Invalid destBuffer"); return CPA_STATUS_INVALID_PARAM; } if (0 == pRequest->thisPhys) { LAC_INVALID_PARAM_LOG("Invalid thisPhys"); return CPA_STATUS_INVALID_PARAM; } /* Check that src buffer Len = dst buffer Len Note this also checks that they are of the same type */ if (pRequest->srcBufferLen != pRequest->dstBufferLen) { LAC_INVALID_PARAM_LOG( "Source and Destination buffer lengths need to be equal"); return CPA_STATUS_INVALID_PARAM; } /* digestVerify and digestIsAppended on Hash-Only operation not * supported */ if (pSessionDesc->digestIsAppended && pSessionDesc->digestVerify && - (CPA_CY_SYM_OP_HASH == pSessionDesc->symOperation)) { + (pSessionDesc->symOperation == CPA_CY_SYM_OP_HASH)) { LAC_INVALID_PARAM_LOG( "digestVerify and digestIsAppended set " "on Hash-Only operation is not supported"); return CPA_STATUS_INVALID_PARAM; } /* Cipher specific tests */ if (CPA_CY_SYM_OP_HASH != pSessionDesc->symOperation) { /* Perform IV check */ - if ((LAC_CIPHER_IS_CTR_MODE(pSessionDesc->cipherAlgorithm) || - LAC_CIPHER_IS_CBC_MODE(pSessionDesc->cipherAlgorithm) || - LAC_CIPHER_IS_AES_F8(pSessionDesc->cipherAlgorithm)) && - (!(LAC_CIPHER_IS_CCM(pSessionDesc->cipherAlgorithm)))) { + switch (pSessionDesc->cipherAlgorithm) { + case CPA_CY_SYM_CIPHER_AES_CTR: + case CPA_CY_SYM_CIPHER_3DES_CTR: + case CPA_CY_SYM_CIPHER_SM4_CTR: + case CPA_CY_SYM_CIPHER_AES_GCM: + case CPA_CY_SYM_CIPHER_CHACHA: + case CPA_CY_SYM_CIPHER_AES_CBC: + case CPA_CY_SYM_CIPHER_DES_CBC: + case CPA_CY_SYM_CIPHER_3DES_CBC: + case CPA_CY_SYM_CIPHER_SM4_CBC: + case CPA_CY_SYM_CIPHER_AES_F8: { Cpa32U ivLenInBytes = LacSymQat_CipherIvSizeBytesGet( pSessionDesc->cipherAlgorithm); if (pRequest->ivLenInBytes != ivLenInBytes) { if (!(/* GCM with 12 byte IV is OK */ (LAC_CIPHER_IS_GCM( pSessionDesc->cipherAlgorithm) && pRequest->ivLenInBytes == LAC_CIPHER_IV_SIZE_GCM_12))) { LAC_INVALID_PARAM_LOG( "invalid cipher IV size"); return CPA_STATUS_INVALID_PARAM; } } if (0 == pRequest->iv) { LAC_INVALID_PARAM_LOG("invalid iv of 0"); return CPA_STATUS_INVALID_PARAM; } - /* pRequest->pIv is only used for CCM so is not checked * here */ - } else if (LAC_CIPHER_IS_KASUMI( - pSessionDesc->cipherAlgorithm)) { + } break; + case CPA_CY_SYM_CIPHER_KASUMI_F8: { if (LAC_CIPHER_KASUMI_F8_IV_LENGTH != pRequest->ivLenInBytes) { LAC_INVALID_PARAM_LOG("invalid cipher IV size"); return CPA_STATUS_INVALID_PARAM; } if (0 == pRequest->iv) { LAC_INVALID_PARAM_LOG("invalid iv of 0"); return CPA_STATUS_INVALID_PARAM; } - } else if (LAC_CIPHER_IS_SNOW3G_UEA2( - pSessionDesc->cipherAlgorithm)) { + } break; + case CPA_CY_SYM_CIPHER_SNOW3G_UEA2: { if (ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ != pRequest->ivLenInBytes) { LAC_INVALID_PARAM_LOG("invalid cipher IV size"); return CPA_STATUS_INVALID_PARAM; } if (0 == pRequest->iv) { LAC_INVALID_PARAM_LOG("invalid iv of 0"); return CPA_STATUS_INVALID_PARAM; } - } else if (LAC_CIPHER_IS_ZUC_EEA3( - pSessionDesc->cipherAlgorithm)) { + } break; + case CPA_CY_SYM_CIPHER_ZUC_EEA3: { if (ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ != pRequest->ivLenInBytes) { LAC_INVALID_PARAM_LOG("invalid cipher IV size"); return CPA_STATUS_INVALID_PARAM; } if (0 == pRequest->iv) { LAC_INVALID_PARAM_LOG("invalid iv of 0"); return CPA_STATUS_INVALID_PARAM; } - } else if (LAC_CIPHER_IS_CCM(pSessionDesc->cipherAlgorithm)) { + } break; + case CPA_CY_SYM_CIPHER_AES_CCM: { if (CPA_STATUS_SUCCESS != LacSymAlgChain_CheckCCMData( pRequest->pAdditionalAuthData, pRequest->pIv, pRequest->messageLenToCipherInBytes, pRequest->ivLenInBytes)) { return CPA_STATUS_INVALID_PARAM; } + } break; + default: + break; } - /* Perform algorithm-specific checks */ - if (!(LAC_CIPHER_IS_ARC4(pSessionDesc->cipherAlgorithm) || - LAC_CIPHER_IS_CTR_MODE(pSessionDesc->cipherAlgorithm) || - LAC_CIPHER_IS_F8_MODE(pSessionDesc->cipherAlgorithm) || - LAC_CIPHER_IS_SNOW3G_UEA2( - pSessionDesc->cipherAlgorithm) || - LAC_CIPHER_IS_ZUC_EEA3(pSessionDesc->cipherAlgorithm))) { + switch (pSessionDesc->cipherAlgorithm) { + case CPA_CY_SYM_CIPHER_ARC4: + case CPA_CY_SYM_CIPHER_AES_CTR: + case CPA_CY_SYM_CIPHER_3DES_CTR: + case CPA_CY_SYM_CIPHER_SM4_CTR: + case CPA_CY_SYM_CIPHER_AES_CCM: + case CPA_CY_SYM_CIPHER_AES_GCM: + case CPA_CY_SYM_CIPHER_CHACHA: + case CPA_CY_SYM_CIPHER_KASUMI_F8: + case CPA_CY_SYM_CIPHER_AES_F8: + case CPA_CY_SYM_CIPHER_SNOW3G_UEA2: + case CPA_CY_SYM_CIPHER_ZUC_EEA3: + /* No action needed */ + break; + default: { /* Mask & check below is based on assumption that block - * size is - * a power of 2. If data size is not a multiple of the - * block size, - * the "remainder" bits selected by the mask be non-zero + * size is a power of 2. If data size is not a multiple + * of the block size, the "remainder" bits selected by + * the mask be non-zero */ if (pRequest->messageLenToCipherInBytes & (LacSymQat_CipherBlockSizeBytesGet( pSessionDesc->cipherAlgorithm) - 1)) { LAC_INVALID_PARAM_LOG( - "Data size must be block size multiple"); + "Data size must be block size" + " multiple"); return CPA_STATUS_INVALID_PARAM; } } + } cipher = pSessionDesc->cipherAlgorithm; hash = pSessionDesc->hashAlgorithm; capabilitiesMask = ((sal_crypto_service_t *)pRequest->instanceHandle) ->generic_service_info.capabilitiesMask; if (LAC_CIPHER_IS_SPC(cipher, hash, capabilitiesMask) && (LAC_CIPHER_SPC_IV_SIZE == pRequest->ivLenInBytes)) { /* For CHACHA and AES_GCM single pass there is an AAD - * buffer - * if aadLenInBytes is nonzero. AES_GMAC AAD is stored - * in - * source buffer, therefore there is no separate AAD - * buffer. */ + * buffer if aadLenInBytes is nonzero. AES_GMAC AAD is + * stored in source buffer, therefore there is no + * separate AAD buffer. */ if ((0 != pSessionDesc->aadLenInBytes) && (CPA_CY_SYM_HASH_AES_GMAC != pSessionDesc->hashAlgorithm)) { LAC_CHECK_NULL_PARAM( pRequest->pAdditionalAuthData); } /* Ensure AAD length for AES_GMAC spc */ if ((CPA_CY_SYM_HASH_AES_GMAC == hash) && (ICP_QAT_FW_SPC_AAD_SZ_MAX < pRequest->messageLenToHashInBytes)) { LAC_INVALID_PARAM_LOG( "aadLenInBytes for AES_GMAC"); return CPA_STATUS_INVALID_PARAM; } } } /* Hash specific tests */ if (CPA_CY_SYM_OP_CIPHER != pSessionDesc->symOperation) { /* For CCM, snow3G and ZUC there is always an AAD buffer For GCM there is an AAD buffer if aadLenInBytes is nonzero */ if ((CPA_CY_SYM_HASH_AES_CCM == pSessionDesc->hashAlgorithm) || (CPA_CY_SYM_HASH_AES_GCM == pSessionDesc->hashAlgorithm && (0 != pSessionDesc->aadLenInBytes))) { LAC_CHECK_NULL_PARAM(pRequest->pAdditionalAuthData); if (0 == pRequest->additionalAuthData) { LAC_INVALID_PARAM_LOG( "Invalid additionalAuthData"); return CPA_STATUS_INVALID_PARAM; } } else if (CPA_CY_SYM_HASH_SNOW3G_UIA2 == pSessionDesc->hashAlgorithm || CPA_CY_SYM_HASH_ZUC_EIA3 == pSessionDesc->hashAlgorithm) { if (0 == pRequest->additionalAuthData) { LAC_INVALID_PARAM_LOG( "Invalid additionalAuthData"); return CPA_STATUS_INVALID_PARAM; } } if ((CPA_CY_SYM_HASH_AES_CCM != pSessionDesc->hashAlgorithm) && (!pSessionDesc->digestIsAppended) && (0 == pRequest->digestResult)) { LAC_INVALID_PARAM_LOG("Invalid digestResult"); return CPA_STATUS_INVALID_PARAM; } if (CPA_CY_SYM_HASH_AES_CCM == pSessionDesc->hashAlgorithm) { if ((pRequest->cryptoStartSrcOffsetInBytes + pRequest->messageLenToCipherInBytes + pSessionDesc->hashResultSize) > pRequest->dstBufferLen) { LAC_INVALID_PARAM_LOG( "CCM - Not enough room for" " digest in destination buffer"); return CPA_STATUS_INVALID_PARAM; } } else if (CPA_TRUE == pSessionDesc->digestIsAppended) { if (CPA_CY_SYM_HASH_AES_GMAC == pSessionDesc->hashAlgorithm) { if ((pRequest->hashStartSrcOffsetInBytes + pRequest->messageLenToHashInBytes + pSessionDesc->hashResultSize) > pRequest->dstBufferLen) { LAC_INVALID_PARAM_LOG( "Append Digest - Not enough room for" " digest in destination buffer for " "AES GMAC algorithm"); return CPA_STATUS_INVALID_PARAM; } } if (CPA_CY_SYM_HASH_AES_GCM == pSessionDesc->hashAlgorithm) { if ((pRequest->cryptoStartSrcOffsetInBytes + pRequest->messageLenToCipherInBytes + pSessionDesc->hashResultSize) > pRequest->dstBufferLen) { LAC_INVALID_PARAM_LOG( "Append Digest - Not enough room " "for digest in destination buffer" " for GCM algorithm"); return CPA_STATUS_INVALID_PARAM; } } if ((pRequest->hashStartSrcOffsetInBytes + pRequest->messageLenToHashInBytes + pSessionDesc->hashResultSize) > pRequest->dstBufferLen) { LAC_INVALID_PARAM_LOG( "Append Digest - Not enough room for" " digest in destination buffer"); return CPA_STATUS_INVALID_PARAM; } } if (CPA_CY_SYM_HASH_AES_GMAC == pSessionDesc->hashAlgorithm) { if (pRequest->messageLenToHashInBytes == 0 || pRequest->pAdditionalAuthData != NULL) { LAC_INVALID_PARAM_LOG( "For AES_GMAC, AAD Length " "(messageLenToHashInBytes) must be " "non zero and pAdditionalAuthData " "must be NULL"); return CPA_STATUS_INVALID_PARAM; } } } if (CPA_DP_BUFLIST != pRequest->srcBufferLen) { if ((CPA_CY_SYM_OP_HASH != pSessionDesc->symOperation) && ((pRequest->messageLenToCipherInBytes + pRequest->cryptoStartSrcOffsetInBytes) > pRequest->srcBufferLen)) { LAC_INVALID_PARAM_LOG( "cipher len + offset greater than " "srcBufferLen"); return CPA_STATUS_INVALID_PARAM; } else if ((CPA_CY_SYM_OP_CIPHER != pSessionDesc->symOperation) && (CPA_CY_SYM_HASH_AES_CCM != pSessionDesc->hashAlgorithm) && (CPA_CY_SYM_HASH_AES_GCM != pSessionDesc->hashAlgorithm) && (CPA_CY_SYM_HASH_AES_GMAC != pSessionDesc->hashAlgorithm) && ((pRequest->messageLenToHashInBytes + pRequest->hashStartSrcOffsetInBytes) > pRequest->srcBufferLen)) { LAC_INVALID_PARAM_LOG( "hash len + offset greater than srcBufferLen"); return CPA_STATUS_INVALID_PARAM; } } else { LAC_CHECK_8_BYTE_ALIGNMENT(pRequest->srcBuffer); LAC_CHECK_8_BYTE_ALIGNMENT(pRequest->dstBuffer); } LAC_CHECK_8_BYTE_ALIGNMENT(pRequest->thisPhys); return CPA_STATUS_SUCCESS; } /** ***************************************************************************** * @ingroup cpaCySymDp * Write Message on the ring and write request params * This is the optimized version, which should not be used for - * algorithm of CCM, GCM and RC4 + * algorithm of CCM, GCM, CHACHA and RC4 * * @description * Write Message on the ring and write request params * * @param[in/out] pRequest Pointer to operation data for crypto * data plane API * @param[in/out] pCurrentQatMsg Pointer to ring memory where msg will * be written * * @retval none * *****************************************************************************/ void LacDp_WriteRingMsgOpt(CpaCySymDpOpData *pRequest, icp_qat_fw_la_bulk_req_t *pCurrentQatMsg) { lac_session_desc_t *pSessionDesc = LAC_SYM_SESSION_DESC_FROM_CTX_GET(pRequest->sessionCtx); Cpa8U *pMsgDummy = NULL; Cpa8U *pCacheDummyHdr = NULL; Cpa8U *pCacheDummyFtr = NULL; pMsgDummy = (Cpa8U *)pCurrentQatMsg; /* Write Request */ /* * Fill in the header and footer bytes of the ET ring message - cached - * from - * the session descriptor. + * from the session descriptor. */ - pCacheDummyHdr = (Cpa8U *)&(pSessionDesc->reqCacheHdr); - pCacheDummyFtr = (Cpa8U *)&(pSessionDesc->reqCacheFtr); - + if (!pSessionDesc->useSymConstantsTable) { + pCacheDummyHdr = (Cpa8U *)&(pSessionDesc->reqCacheHdr); + pCacheDummyFtr = (Cpa8U *)&(pSessionDesc->reqCacheFtr); + } else { + pCacheDummyHdr = (Cpa8U *)&(pSessionDesc->shramReqCacheHdr); + pCacheDummyFtr = (Cpa8U *)&(pSessionDesc->shramReqCacheFtr); + } memcpy(pMsgDummy, pCacheDummyHdr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_HDR_IN_LW)); memset((pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_HDR_IN_LW)), 0, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_TO_CLEAR_IN_LW)); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_FTR_IN_LW), pCacheDummyFtr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_FTR_IN_LW)); SalQatMsg_CmnMidWrite(pCurrentQatMsg, pRequest, (CPA_DP_BUFLIST == pRequest->srcBufferLen ? QAT_COMN_PTR_TYPE_SGL : QAT_COMN_PTR_TYPE_FLAT), pRequest->srcBuffer, pRequest->dstBuffer, pRequest->srcBufferLen, pRequest->dstBufferLen); /* Write Request Params */ if (pSessionDesc->isCipher) { LacSymQat_CipherRequestParamsPopulate( + pSessionDesc, pCurrentQatMsg, pRequest->cryptoStartSrcOffsetInBytes, pRequest->messageLenToCipherInBytes, pRequest->iv, pRequest->pIv); } if (pSessionDesc->isAuth) { lac_sym_qat_hash_state_buffer_info_t *pHashStateBufferInfo = &(pSessionDesc->hashStateBufferInfo); icp_qat_fw_la_auth_req_params_t *pAuthReqPars = (icp_qat_fw_la_auth_req_params_t *)((Cpa8U *)&(pCurrentQatMsg->serv_specif_rqpars) + ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET); if ((CPA_CY_SYM_HASH_SNOW3G_UIA2 != pSessionDesc->hashAlgorithm && CPA_CY_SYM_HASH_AES_CCM != pSessionDesc->hashAlgorithm && CPA_CY_SYM_HASH_AES_GCM != pSessionDesc->hashAlgorithm && CPA_CY_SYM_HASH_AES_GMAC != pSessionDesc->hashAlgorithm && CPA_CY_SYM_HASH_ZUC_EIA3 != pSessionDesc->hashAlgorithm) && (pHashStateBufferInfo->prefixAadSzQuadWords > 0)) { /* prefixAadSzQuadWords > 0 when there is prefix data - i.e. nested hash or HMAC no precompute cases Note partials not supported on DP api so we do not need dynamic hash state in this case */ pRequest->additionalAuthData = pHashStateBufferInfo->pDataPhys + LAC_QUADWORDS_TO_BYTES( pHashStateBufferInfo->stateStorageSzQuadWords); } /* The first 24 bytes in icp_qat_fw_la_auth_req_params_t can be * copied directly from the op request data because they share a * corresponding layout. The remaining 4 bytes are taken * from the session message template and use values - * preconfigured at - * sessionInit (updated per request for some specific cases - * below) + * preconfigured at sessionInit (updated per request for some + * specific cases below) */ - memcpy(pAuthReqPars, - (Cpa32U *)&(pRequest->hashStartSrcOffsetInBytes), - ((unsigned long)&(pAuthReqPars->u2.inner_prefix_sz) - - (unsigned long)pAuthReqPars)); + + /* We force a specific compiler optimisation here. The length + * to be copied turns out to be always 16, and by coding a + * memcpy with a literal value the compiler will compile inline + * code (in fact, only two vector instructions) to effect the + * copy. This gives us a huge performance increase. + */ + unsigned long cplen = + (unsigned long)&(pAuthReqPars->u2.inner_prefix_sz) - + (unsigned long)pAuthReqPars; + if (cplen == 16) + memcpy(pAuthReqPars, + (Cpa32U *)&(pRequest->hashStartSrcOffsetInBytes), + 16); + else + memcpy(pAuthReqPars, + (Cpa32U *)&(pRequest->hashStartSrcOffsetInBytes), + cplen); if (CPA_TRUE == pSessionDesc->isAuthEncryptOp) { pAuthReqPars->hash_state_sz = LAC_BYTES_TO_QUADWORDS(pAuthReqPars->u2.aad_sz); } else if (CPA_CY_SYM_HASH_SNOW3G_UIA2 == pSessionDesc->hashAlgorithm || CPA_CY_SYM_HASH_ZUC_EIA3 == pSessionDesc->hashAlgorithm) { pAuthReqPars->hash_state_sz = LAC_BYTES_TO_QUADWORDS(pSessionDesc->aadLenInBytes); } } } /** ***************************************************************************** * @ingroup cpaCySymDp * Write Message on the ring and write request params * * @description * Write Message on the ring and write request params * * @param[in/out] pRequest Pointer to operation data for crypto * data plane API * @param[in/out] pCurrentQatMsg Pointer to ring memory where msg will * be written * * @retval none * *****************************************************************************/ void LacDp_WriteRingMsgFull(CpaCySymDpOpData *pRequest, icp_qat_fw_la_bulk_req_t *pCurrentQatMsg) { lac_session_desc_t *pSessionDesc = LAC_SYM_SESSION_DESC_FROM_CTX_GET(pRequest->sessionCtx); Cpa8U *pMsgDummy = NULL; Cpa8U *pCacheDummyHdr = NULL; Cpa8U *pCacheDummyFtr = NULL; sal_qat_content_desc_info_t *pCdInfo = NULL; Cpa8U *pHwBlockBaseInDRAM = NULL; Cpa32U hwBlockOffsetInDRAM = 0; Cpa32U sizeInBytes = 0; CpaCySymCipherAlgorithm cipher = pSessionDesc->cipherAlgorithm; CpaCySymHashAlgorithm hash = pSessionDesc->hashAlgorithm; + sal_crypto_service_t *pService = + (sal_crypto_service_t *)pRequest->instanceHandle; Cpa32U capabilitiesMask = ((sal_crypto_service_t *)pRequest->instanceHandle) ->generic_service_info.capabilitiesMask; + CpaBoolean isSpCcm = LAC_CIPHER_IS_CCM(cipher) && + LAC_CIPHER_IS_SPC(cipher, hash, capabilitiesMask); + Cpa8U paddingLen = 0; Cpa8U blockLen = 0; + Cpa32U aadDataLen = 0; pMsgDummy = (Cpa8U *)pCurrentQatMsg; /* Write Request */ /* * Fill in the header and footer bytes of the ET ring message - cached - * from - * the session descriptor. + * from the session descriptor. */ - if (!pSessionDesc->isSinglePass && - LAC_CIPHER_IS_SPC(cipher, hash, capabilitiesMask) && - (LAC_CIPHER_SPC_IV_SIZE == pRequest->ivLenInBytes)) { - pSessionDesc->isSinglePass = CPA_TRUE; + if ((NON_SPC != pSessionDesc->singlePassState) && + (isSpCcm || (LAC_CIPHER_SPC_IV_SIZE == pRequest->ivLenInBytes))) { + pSessionDesc->singlePassState = SPC; pSessionDesc->isCipher = CPA_TRUE; pSessionDesc->isAuthEncryptOp = CPA_FALSE; pSessionDesc->isAuth = CPA_FALSE; pSessionDesc->symOperation = CPA_CY_SYM_OP_CIPHER; pSessionDesc->laCmdId = ICP_QAT_FW_LA_CMD_CIPHER; if (CPA_CY_SYM_HASH_AES_GMAC == pSessionDesc->hashAlgorithm) { pSessionDesc->aadLenInBytes = pRequest->messageLenToHashInBytes; } /* New bit position (13) for SINGLE PASS. * The FW provides a specific macro to use to set the proto flag */ ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET( pSessionDesc->laCmdFlags, ICP_QAT_FW_LA_SINGLE_PASS_PROTO); - ICP_QAT_FW_LA_PROTO_SET(pSessionDesc->laCmdFlags, 0); + if (isCyGen2x(pService)) + ICP_QAT_FW_LA_PROTO_SET(pSessionDesc->laCmdFlags, 0); pCdInfo = &(pSessionDesc->contentDescInfo); pHwBlockBaseInDRAM = (Cpa8U *)pCdInfo->pData; if (CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT == pSessionDesc->cipherDirection) { if (LAC_CIPHER_IS_GCM(cipher)) hwBlockOffsetInDRAM = LAC_QUADWORDS_TO_BYTES( - LAC_SYM_QAT_CIPHER_OFFSET_IN_DRAM_GCM_SPC); - else + LAC_SYM_QAT_CIPHER_GCM_SPC_OFFSET_IN_DRAM); + else if (LAC_CIPHER_IS_CHACHA(cipher)) hwBlockOffsetInDRAM = LAC_QUADWORDS_TO_BYTES( - LAC_SYM_QAT_CIPHER_OFFSET_IN_DRAM_CHACHA_SPC); + LAC_SYM_QAT_CIPHER_CHACHA_SPC_OFFSET_IN_DRAM); + } else if (isSpCcm) { + hwBlockOffsetInDRAM = LAC_QUADWORDS_TO_BYTES( + LAC_SYM_QAT_CIPHER_CCM_SPC_OFFSET_IN_DRAM); } + + /* Update slice type, as used algos changed */ + pSessionDesc->cipherSliceType = + LacCipher_GetCipherSliceType(pService, cipher, hash); + + ICP_QAT_FW_LA_SLICE_TYPE_SET(pSessionDesc->laCmdFlags, + pSessionDesc->cipherSliceType); + /* construct cipherConfig in CD in DRAM */ LacSymQat_CipherHwBlockPopulateCfgData(pSessionDesc, pHwBlockBaseInDRAM + hwBlockOffsetInDRAM, &sizeInBytes); SalQatMsg_CmnHdrWrite((icp_qat_fw_comn_req_t *)&( pSessionDesc->reqSpcCacheHdr), ICP_QAT_FW_COMN_REQ_CPM_FW_LA, pSessionDesc->laCmdId, pSessionDesc->cmnRequestFlags, pSessionDesc->laCmdFlags); + } else if ((SPC == pSessionDesc->singlePassState) && + (LAC_CIPHER_SPC_IV_SIZE != pRequest->ivLenInBytes)) { + pSessionDesc->symOperation = CPA_CY_SYM_OP_ALGORITHM_CHAINING; + pSessionDesc->singlePassState = LIKELY_SPC; + pSessionDesc->isCipher = CPA_TRUE; + pSessionDesc->isAuthEncryptOp = CPA_TRUE; + pSessionDesc->isAuth = CPA_TRUE; + pCdInfo = &(pSessionDesc->contentDescInfo); + pHwBlockBaseInDRAM = (Cpa8U *)pCdInfo->pData; + + if (CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT == + pSessionDesc->cipherDirection) { + pSessionDesc->laCmdId = ICP_QAT_FW_LA_CMD_CIPHER_HASH; + } else { + pSessionDesc->laCmdId = ICP_QAT_FW_LA_CMD_HASH_CIPHER; + } + + ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET( + pSessionDesc->laCmdFlags, 0); + ICP_QAT_FW_LA_PROTO_SET(pSessionDesc->laCmdFlags, + ICP_QAT_FW_LA_GCM_PROTO); + + LacSymQat_CipherHwBlockPopulateCfgData(pSessionDesc, + pHwBlockBaseInDRAM + + hwBlockOffsetInDRAM, + &sizeInBytes); + SalQatMsg_CmnHdrWrite((icp_qat_fw_comn_req_t *)&( + pSessionDesc->reqCacheHdr), + ICP_QAT_FW_COMN_REQ_CPM_FW_LA, + pSessionDesc->laCmdId, + pSessionDesc->cmnRequestFlags, + pSessionDesc->laCmdFlags); } else if (CPA_CY_SYM_HASH_AES_GMAC == pSessionDesc->hashAlgorithm) { pSessionDesc->aadLenInBytes = pRequest->messageLenToHashInBytes; } - if (pSessionDesc->isSinglePass) { + if (SPC == pSessionDesc->singlePassState) { pCacheDummyHdr = (Cpa8U *)&(pSessionDesc->reqSpcCacheHdr); pCacheDummyFtr = (Cpa8U *)&(pSessionDesc->reqSpcCacheFtr); } else { if (!pSessionDesc->useSymConstantsTable) { pCacheDummyHdr = (Cpa8U *)&(pSessionDesc->reqCacheHdr); pCacheDummyFtr = (Cpa8U *)&(pSessionDesc->reqCacheFtr); } else { pCacheDummyHdr = (Cpa8U *)&(pSessionDesc->shramReqCacheHdr); pCacheDummyFtr = (Cpa8U *)&(pSessionDesc->shramReqCacheFtr); } } memcpy(pMsgDummy, pCacheDummyHdr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_HDR_IN_LW)); memset((pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_HDR_IN_LW)), 0, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_TO_CLEAR_IN_LW)); memcpy(pMsgDummy + (LAC_LONG_WORD_IN_BYTES * LAC_START_OF_CACHE_FTR_IN_LW), pCacheDummyFtr, (LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_FTR_IN_LW)); SalQatMsg_CmnMidWrite(pCurrentQatMsg, pRequest, (CPA_DP_BUFLIST == pRequest->srcBufferLen ? QAT_COMN_PTR_TYPE_SGL : QAT_COMN_PTR_TYPE_FLAT), pRequest->srcBuffer, pRequest->dstBuffer, pRequest->srcBufferLen, pRequest->dstBufferLen); - if (CPA_CY_SYM_HASH_AES_CCM == pSessionDesc->hashAlgorithm && - pSessionDesc->isAuth == CPA_TRUE) { + if ((CPA_CY_SYM_HASH_AES_CCM == pSessionDesc->hashAlgorithm && + pSessionDesc->isAuth == CPA_TRUE) || + isSpCcm) { /* prepare IV and AAD for CCM */ LacSymAlgChain_PrepareCCMData( pSessionDesc, pRequest->pAdditionalAuthData, pRequest->pIv, pRequest->messageLenToCipherInBytes, pRequest->ivLenInBytes); /* According to the API, for CCM and GCM, - * messageLenToHashInBytes - * and hashStartSrcOffsetInBytes are not initialized by the - * user and must be set by the driver + * messageLenToHashInBytes and hashStartSrcOffsetInBytes are not + * initialized by the user and must be set by the driver */ pRequest->hashStartSrcOffsetInBytes = pRequest->cryptoStartSrcOffsetInBytes; pRequest->messageLenToHashInBytes = pRequest->messageLenToCipherInBytes; - } else if (!pSessionDesc->isSinglePass && + } else if ((SPC != pSessionDesc->singlePassState) && (CPA_CY_SYM_HASH_AES_GCM == pSessionDesc->hashAlgorithm || CPA_CY_SYM_HASH_AES_GMAC == pSessionDesc->hashAlgorithm)) { /* GCM case */ if (CPA_CY_SYM_HASH_AES_GMAC != pSessionDesc->hashAlgorithm) { /* According to the API, for CCM and GCM, * messageLenToHashInBytes and hashStartSrcOffsetInBytes * are not initialized by the user and must be set * by the driver */ pRequest->hashStartSrcOffsetInBytes = pRequest->cryptoStartSrcOffsetInBytes; pRequest->messageLenToHashInBytes = pRequest->messageLenToCipherInBytes; LacSymAlgChain_PrepareGCMData( pSessionDesc, pRequest->pAdditionalAuthData); } if (LAC_CIPHER_IV_SIZE_GCM_12 == pRequest->ivLenInBytes) { ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( pCurrentQatMsg->comn_hdr.serv_specif_flags, ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); } } /* Write Request Params */ if (pSessionDesc->isCipher) { if (CPA_CY_SYM_CIPHER_ARC4 == pSessionDesc->cipherAlgorithm) { /* ARC4 does not have an IV but the field is used to - * store the - * initial state */ + * store the initial state */ pRequest->iv = pSessionDesc->cipherARC4InitialStatePhysAddr; } + ICP_QAT_FW_LA_SLICE_TYPE_SET( + pCurrentQatMsg->comn_hdr.serv_specif_flags, + pSessionDesc->cipherSliceType); + LacSymQat_CipherRequestParamsPopulate( + pSessionDesc, pCurrentQatMsg, pRequest->cryptoStartSrcOffsetInBytes, pRequest->messageLenToCipherInBytes, pRequest->iv, pRequest->pIv); - if (pSessionDesc->isSinglePass) { + if (SPC == pSessionDesc->singlePassState) { icp_qat_fw_la_cipher_req_params_t *pCipherReqParams = (icp_qat_fw_la_cipher_req_params_t *)((Cpa8U *)&( pCurrentQatMsg->serv_specif_rqpars) + ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET); - pCipherReqParams->spc_aad_addr = - (uint64_t)pRequest->additionalAuthData; - pCipherReqParams->spc_aad_sz = - pSessionDesc->aadLenInBytes; + icp_qat_fw_la_cipher_20_req_params_t *pCipher20ReqParams = + (void + *)((Cpa8U *)&( + pCurrentQatMsg->serv_specif_rqpars) + + ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET); - pCipherReqParams->spc_auth_res_addr = - (uint64_t)pRequest->digestResult; - pCipherReqParams->spc_auth_res_sz = - pSessionDesc->hashResultSize; + if (isCyGen4x(pService)) { + pCipher20ReqParams->spc_aad_addr = + (uint64_t)pRequest->additionalAuthData; + pCipher20ReqParams->spc_aad_sz = + pSessionDesc->aadLenInBytes; + pCipher20ReqParams->spc_aad_offset = 0; + + if (isSpCcm) + pCipher20ReqParams->spc_aad_sz += + LAC_CIPHER_CCM_AAD_OFFSET; + + pCipher20ReqParams->spc_auth_res_addr = + (uint64_t)pRequest->digestResult; + pCipher20ReqParams->spc_auth_res_sz = + (Cpa8U)pSessionDesc->hashResultSize; + } else { + pCipherReqParams->spc_aad_addr = + (uint64_t)pRequest->additionalAuthData; + pCipherReqParams->spc_aad_sz = + (Cpa16U)pSessionDesc->aadLenInBytes; + + pCipherReqParams->spc_auth_res_addr = + (uint64_t)pRequest->digestResult; + pCipherReqParams->spc_auth_res_sz = + (Cpa8U)pSessionDesc->hashResultSize; + } /* For CHACHA and AES_GCM single pass AAD buffer needs - * alignment - * if aadLenInBytes is nonzero. - * In case of AES-GMAC, AAD buffer passed in the src - * buffer. + * alignment if aadLenInBytes is nonzero. In case of + * AES-GMAC, AAD buffer passed in the src buffer. */ if (0 != pSessionDesc->aadLenInBytes && CPA_CY_SYM_HASH_AES_GMAC != pSessionDesc->hashAlgorithm) { blockLen = LacSymQat_CipherBlockSizeBytesGet( pSessionDesc->cipherAlgorithm); - if ((pSessionDesc->aadLenInBytes % blockLen) != - 0) { - paddingLen = blockLen - - (pSessionDesc->aadLenInBytes % - blockLen); - memset( - &pRequest->pAdditionalAuthData - [pSessionDesc->aadLenInBytes], - 0, - paddingLen); + aadDataLen = pSessionDesc->aadLenInBytes; + + /* In case of AES_CCM, B0 block size and 2 bytes + * of AAD len + * encoding need to be added to total AAD data + * len */ + if (isSpCcm) + aadDataLen += LAC_CIPHER_CCM_AAD_OFFSET; + + if ((aadDataLen % blockLen) != 0) { + paddingLen = + blockLen - (aadDataLen % blockLen); + memset(&pRequest->pAdditionalAuthData + [aadDataLen], + 0, + paddingLen); } } } } if (pSessionDesc->isAuth) { lac_sym_qat_hash_state_buffer_info_t *pHashStateBufferInfo = &(pSessionDesc->hashStateBufferInfo); icp_qat_fw_la_auth_req_params_t *pAuthReqPars = (icp_qat_fw_la_auth_req_params_t *)((Cpa8U *)&(pCurrentQatMsg->serv_specif_rqpars) + ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET); if ((CPA_CY_SYM_HASH_SNOW3G_UIA2 != pSessionDesc->hashAlgorithm && CPA_CY_SYM_HASH_AES_CCM != pSessionDesc->hashAlgorithm && CPA_CY_SYM_HASH_AES_GCM != pSessionDesc->hashAlgorithm && CPA_CY_SYM_HASH_AES_GMAC != pSessionDesc->hashAlgorithm && CPA_CY_SYM_HASH_ZUC_EIA3 != pSessionDesc->hashAlgorithm) && (pHashStateBufferInfo->prefixAadSzQuadWords > 0)) { /* prefixAadSzQuadWords > 0 when there is prefix data - i.e. nested hash or HMAC no precompute cases Note partials not supported on DP api so we do not need dynamic hash state in this case */ pRequest->additionalAuthData = pHashStateBufferInfo->pDataPhys + LAC_QUADWORDS_TO_BYTES( pHashStateBufferInfo->stateStorageSzQuadWords); } /* The first 24 bytes in icp_qat_fw_la_auth_req_params_t can be * copied directly from the op request data because they share a * corresponding layout. The remaining 4 bytes are taken * from the session message template and use values - * preconfigured at - * sessionInit (updated per request for some specific cases - * below) + * preconfigured at sessionInit (updated per request for some + * specific cases below) */ - - /* We force a specific compiler optimisation here. The length - * to - * be copied turns out to be always 16, and by coding a memcpy - * with - * a literal value the compiler will compile inline code (in - * fact, - * only two vector instructions) to effect the copy. This gives - * us - * a huge performance increase. - */ - unsigned long cplen = - (unsigned long)&(pAuthReqPars->u2.inner_prefix_sz) - - (unsigned long)pAuthReqPars; - if (cplen == 16) - memcpy(pAuthReqPars, - (Cpa32U *)&(pRequest->hashStartSrcOffsetInBytes), - 16); - else - memcpy(pAuthReqPars, - (Cpa32U *)&(pRequest->hashStartSrcOffsetInBytes), - cplen); + memcpy(pAuthReqPars, + (Cpa32U *)&(pRequest->hashStartSrcOffsetInBytes), + ((uintptr_t) & + (pAuthReqPars->u2.inner_prefix_sz) - + (uintptr_t)pAuthReqPars)); if (CPA_TRUE == pSessionDesc->isAuthEncryptOp) { pAuthReqPars->hash_state_sz = LAC_BYTES_TO_QUADWORDS(pAuthReqPars->u2.aad_sz); } else if (CPA_CY_SYM_HASH_SNOW3G_UIA2 == pSessionDesc->hashAlgorithm || CPA_CY_SYM_HASH_ZUC_EIA3 == pSessionDesc->hashAlgorithm) { pAuthReqPars->hash_state_sz = LAC_BYTES_TO_QUADWORDS(pSessionDesc->aadLenInBytes); } } } CpaStatus cpaCySymDpSessionCtxGetSize(const CpaInstanceHandle instanceHandle, const CpaCySymSessionSetupData *pSessionSetupData, Cpa32U *pSessionCtxSizeInBytes) { CpaStatus status = CPA_STATUS_SUCCESS; /* CPA_INSTANCE_HANDLE_SINGLE is not supported on DP apis */ LAC_CHECK_INSTANCE_HANDLE(instanceHandle); /* All other param checks are common with trad api */ /* Check for valid pointers */ LAC_CHECK_NULL_PARAM(pSessionCtxSizeInBytes); status = cpaCySymSessionCtxGetSize(instanceHandle, pSessionSetupData, pSessionCtxSizeInBytes); return status; } CpaStatus cpaCySymDpSessionCtxGetDynamicSize( const CpaInstanceHandle instanceHandle, const CpaCySymSessionSetupData *pSessionSetupData, Cpa32U *pSessionCtxSizeInBytes) { CpaStatus status = CPA_STATUS_SUCCESS; /* CPA_INSTANCE_HANDLE_SINGLE is not supported on DP apis */ LAC_CHECK_INSTANCE_HANDLE(instanceHandle); /* All other param checks are common with trad api */ /* Check for valid pointers */ LAC_CHECK_NULL_PARAM(pSessionCtxSizeInBytes); status = cpaCySymSessionCtxGetDynamicSize(instanceHandle, pSessionSetupData, pSessionCtxSizeInBytes); return status; } /** @ingroup cpaCySymDp */ CpaStatus cpaCySymDpInitSession(CpaInstanceHandle instanceHandle, const CpaCySymSessionSetupData *pSessionSetupData, CpaCySymDpSessionCtx sessionCtx) { CpaStatus status = CPA_STATUS_FAIL; sal_service_t *pService = NULL; LAC_CHECK_INSTANCE_HANDLE(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); LAC_CHECK_NULL_PARAM(pSessionSetupData); pService = (sal_service_t *)instanceHandle; /* Check crypto service is running otherwise return an error */ SAL_RUNNING_CHECK(pService); status = LacSym_InitSession(instanceHandle, NULL, /* Callback */ pSessionSetupData, CPA_TRUE, /* isDPSession */ sessionCtx); return status; } CpaStatus cpaCySymDpRemoveSession(const CpaInstanceHandle instanceHandle, CpaCySymDpSessionCtx sessionCtx) { /* CPA_INSTANCE_HANDLE_SINGLE is not supported on DP apis */ LAC_CHECK_INSTANCE_HANDLE(instanceHandle); -/* All other param checks are common with trad api */ + /* All other param checks are common with trad api */ return cpaCySymRemoveSession(instanceHandle, sessionCtx); } CpaStatus cpaCySymDpRegCbFunc(const CpaInstanceHandle instanceHandle, const CpaCySymDpCbFunc pSymDpCb) { sal_crypto_service_t *pService = (sal_crypto_service_t *)instanceHandle; LAC_CHECK_INSTANCE_HANDLE(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); LAC_CHECK_NULL_PARAM(pSymDpCb); SAL_RUNNING_CHECK(instanceHandle); pService->pSymDpCb = pSymDpCb; return CPA_STATUS_SUCCESS; } CpaStatus cpaCySymDpEnqueueOp(CpaCySymDpOpData *pRequest, const CpaBoolean performOpNow) { icp_qat_fw_la_bulk_req_t *pCurrentQatMsg = NULL; icp_comms_trans_handle trans_handle = NULL; lac_session_desc_t *pSessionDesc = NULL; write_ringMsgFunc_t callFunc; CpaStatus status = CPA_STATUS_SUCCESS; LAC_CHECK_NULL_PARAM(pRequest); status = LacDp_EnqueueParamCheck(pRequest); if (CPA_STATUS_SUCCESS != status) { return status; } + /* Check if SAL is running in crypto data plane otherwise return an + * error */ + SAL_RUNNING_CHECK(pRequest->instanceHandle); + trans_handle = ((sal_crypto_service_t *)pRequest->instanceHandle) ->trans_handle_sym_tx; pSessionDesc = LAC_SYM_SESSION_DESC_FROM_CTX_GET(pRequest->sessionCtx); icp_adf_getSingleQueueAddr(trans_handle, (void **)&pCurrentQatMsg); if (NULL == pCurrentQatMsg) { /* * No space is available on the queue. */ return CPA_STATUS_RETRY; } callFunc = (write_ringMsgFunc_t)pSessionDesc->writeRingMsgFunc; LAC_CHECK_NULL_PARAM(callFunc); callFunc(pRequest, pCurrentQatMsg); qatUtilsAtomicInc(&(pSessionDesc->u.pendingDpCbCount)); if (CPA_TRUE == performOpNow) { SalQatMsg_updateQueueTail(trans_handle); } return CPA_STATUS_SUCCESS; } CpaStatus cpaCySymDpPerformOpNow(const CpaInstanceHandle instanceHandle) { icp_comms_trans_handle trans_handle = NULL; LAC_CHECK_INSTANCE_HANDLE(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(instanceHandle); trans_handle = ((sal_crypto_service_t *)instanceHandle)->trans_handle_sym_tx; if (CPA_TRUE == icp_adf_queueDataToSend(trans_handle)) { SalQatMsg_updateQueueTail(trans_handle); } return CPA_STATUS_SUCCESS; } CpaStatus cpaCySymDpEnqueueOpBatch(const Cpa32U numberRequests, CpaCySymDpOpData *pRequests[], const CpaBoolean performOpNow) { icp_qat_fw_la_bulk_req_t *pCurrentQatMsg = NULL; icp_comms_trans_handle trans_handle = NULL; lac_session_desc_t *pSessionDesc = NULL; write_ringMsgFunc_t callFunc; Cpa32U i = 0; CpaStatus status = CPA_STATUS_SUCCESS; sal_crypto_service_t *pService = NULL; LAC_CHECK_NULL_PARAM(pRequests); LAC_CHECK_NULL_PARAM(pRequests[0]); LAC_CHECK_NULL_PARAM(pRequests[0]->instanceHandle); pService = (sal_crypto_service_t *)(pRequests[0]->instanceHandle); if ((0 == numberRequests) || (numberRequests > pService->maxNumSymReqBatch)) { LAC_INVALID_PARAM_LOG1( "The number of requests needs to be between 1 " "and %d", pService->maxNumSymReqBatch); return CPA_STATUS_INVALID_PARAM; } for (i = 0; i < numberRequests; i++) { status = LacDp_EnqueueParamCheck(pRequests[i]); if (CPA_STATUS_SUCCESS != status) { return status; } /* Check that all instance handles are the same */ if (pRequests[i]->instanceHandle != pRequests[0]->instanceHandle) { LAC_INVALID_PARAM_LOG( "All instance handles should be the same " "in the requests"); return CPA_STATUS_INVALID_PARAM; } } + /* Check if SAL is running in crypto data plane otherwise return an + * error */ + SAL_RUNNING_CHECK(pRequests[0]->instanceHandle); + trans_handle = ((sal_crypto_service_t *)pRequests[0]->instanceHandle) ->trans_handle_sym_tx; pSessionDesc = LAC_SYM_SESSION_DESC_FROM_CTX_GET(pRequests[0]->sessionCtx); icp_adf_getQueueMemory(trans_handle, numberRequests, (void **)&pCurrentQatMsg); if (NULL == pCurrentQatMsg) { /* * No space is available on the queue. */ return CPA_STATUS_RETRY; } for (i = 0; i < numberRequests; i++) { pSessionDesc = LAC_SYM_SESSION_DESC_FROM_CTX_GET(pRequests[i]->sessionCtx); callFunc = (write_ringMsgFunc_t)pSessionDesc->writeRingMsgFunc; callFunc(pRequests[i], pCurrentQatMsg); icp_adf_getQueueNext(trans_handle, (void **)&pCurrentQatMsg); qatUtilsAtomicAdd(1, &(pSessionDesc->u.pendingDpCbCount)); } if (CPA_TRUE == performOpNow) { SalQatMsg_updateQueueTail(trans_handle); } return CPA_STATUS_SUCCESS; } CpaStatus icp_sal_CyPollDpInstance(const CpaInstanceHandle instanceHandle, const Cpa32U responseQuota) { icp_comms_trans_handle trans_handle = NULL; LAC_CHECK_INSTANCE_HANDLE(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); /* Check if SAL is initialised otherwise return an error */ SAL_RUNNING_CHECK(instanceHandle); trans_handle = ((sal_crypto_service_t *)instanceHandle)->trans_handle_sym_rx; return icp_adf_pollQueue(trans_handle, responseQuota); } diff --git a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_hash.c b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_hash.c index e662d0d6d220..a502c7318844 100644 --- a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_hash.c +++ b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_hash.c @@ -1,783 +1,766 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** *************************************************************************** * @file lac_sym_hash.c * * @ingroup LacHash * * Hash specific functionality ***************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" #include "cpa_cy_sym.h" #include "icp_accel_devices.h" #include "icp_adf_debug.h" /* ******************************************************************************* * Include private header files ******************************************************************************* */ #include "lac_common.h" #include "lac_mem.h" #include "lac_sym.h" #include "lac_session.h" #include "lac_sym_hash.h" #include "lac_log.h" #include "lac_sym_qat_hash.h" #include "lac_sym_qat_hash_defs_lookup.h" #include "lac_sym_cb.h" #include "lac_sync.h" #define LAC_HASH_ALG_MODE_NOT_SUPPORTED(alg, mode) \ ((((CPA_CY_SYM_HASH_KASUMI_F9 == (alg)) || \ (CPA_CY_SYM_HASH_SNOW3G_UIA2 == (alg)) || \ (CPA_CY_SYM_HASH_AES_XCBC == (alg)) || \ (CPA_CY_SYM_HASH_AES_CCM == (alg)) || \ (CPA_CY_SYM_HASH_AES_GCM == (alg)) || \ (CPA_CY_SYM_HASH_AES_GMAC == (alg)) || \ (CPA_CY_SYM_HASH_AES_CMAC == (alg)) || \ (CPA_CY_SYM_HASH_ZUC_EIA3 == (alg))) && \ (CPA_CY_SYM_HASH_MODE_AUTH != (mode))) || \ - (((CPA_CY_SYM_HASH_SHA3_224 == (alg)) || \ - (CPA_CY_SYM_HASH_SHA3_256 == (alg)) || \ - (CPA_CY_SYM_HASH_SHA3_384 == (alg)) || \ - (CPA_CY_SYM_HASH_SHA3_512 == (alg))) && \ - (CPA_CY_SYM_HASH_MODE_NESTED == (mode))) || \ - (((CPA_CY_SYM_HASH_SHAKE_128 == (alg)) || \ - (CPA_CY_SYM_HASH_SHAKE_256 == (alg))) && \ - (CPA_CY_SYM_HASH_MODE_AUTH == (mode)))) - + ((LAC_HASH_IS_SHA3(alg)) && (CPA_CY_SYM_HASH_MODE_NESTED == (mode)))) /**< Macro to check for valid algorithm-mode combination */ +void LacSync_GenBufListVerifyCb(void *pCallbackTag, + CpaStatus status, + CpaCySymOp operationType, + void *pOpData, + CpaBufferList *pDstBuffer, + CpaBoolean opResult); + /** * @ingroup LacHash * This callback function will be invoked whenever a synchronous * hash precompute operation completes. It will set the wait * queue flag for the synchronous operation. * * @param[in] pCallbackTag Opaque value provided by user. This will * be a pointer to a wait queue flag. * * @retval * None * */ static void LacHash_SyncPrecomputeDoneCb(void *pCallbackTag) { LacSync_GenWakeupSyncCaller(pCallbackTag, CPA_STATUS_SUCCESS); } /** @ingroup LacHash */ CpaStatus LacHash_StatePrefixAadBufferInit( sal_service_t *pService, const CpaCySymHashSetupData *pHashSetupData, icp_qat_la_bulk_req_ftr_t *pReq, icp_qat_hw_auth_mode_t qatHashMode, Cpa8U *pHashStateBuffer, lac_sym_qat_hash_state_buffer_info_t *pHashStateBufferInfo) { /* set up the hash state prefix buffer info structure */ pHashStateBufferInfo->pData = pHashStateBuffer; pHashStateBufferInfo->pDataPhys = LAC_MEM_CAST_PTR_TO_UINT64( LAC_OS_VIRT_TO_PHYS_EXTERNAL((*pService), pHashStateBuffer)); if (pHashStateBufferInfo->pDataPhys == 0) { LAC_LOG_ERROR("Unable to get the physical address of " - "the hash state buffer"); + "the hash state buffer\n"); return CPA_STATUS_FAIL; } LacSymQat_HashStatePrefixAadBufferSizeGet(pReq, pHashStateBufferInfo); /* Prefix data gets copied to the hash state buffer for nested mode */ if (CPA_CY_SYM_HASH_MODE_NESTED == pHashSetupData->hashMode) { LacSymQat_HashStatePrefixAadBufferPopulate( pHashStateBufferInfo, pReq, pHashSetupData->nestedModeSetupData.pInnerPrefixData, - pHashSetupData->nestedModeSetupData.innerPrefixLenInBytes, + (Cpa8U)pHashSetupData->nestedModeSetupData + .innerPrefixLenInBytes, pHashSetupData->nestedModeSetupData.pOuterPrefixData, - pHashSetupData->nestedModeSetupData.outerPrefixLenInBytes); + (Cpa8U)pHashSetupData->nestedModeSetupData + .outerPrefixLenInBytes); } /* For mode2 HMAC the key gets copied into both the inner and * outer prefix fields */ else if (IS_HASH_MODE_2_AUTH(qatHashMode, pHashSetupData->hashMode)) { LacSymQat_HashStatePrefixAadBufferPopulate( pHashStateBufferInfo, pReq, pHashSetupData->authModeSetupData.authKey, - pHashSetupData->authModeSetupData.authKeyLenInBytes, + (Cpa8U)pHashSetupData->authModeSetupData.authKeyLenInBytes, pHashSetupData->authModeSetupData.authKey, - pHashSetupData->authModeSetupData.authKeyLenInBytes); + (Cpa8U)pHashSetupData->authModeSetupData.authKeyLenInBytes); } /* else do nothing for the other cases */ return CPA_STATUS_SUCCESS; } /** @ingroup LacHash */ CpaStatus LacHash_PrecomputeDataCreate(const CpaInstanceHandle instanceHandle, CpaCySymSessionSetupData *pSessionSetup, lac_hash_precompute_done_cb_t callbackFn, void *pCallbackTag, Cpa8U *pWorkingBuffer, Cpa8U *pState1, Cpa8U *pState2) { CpaStatus status = CPA_STATUS_SUCCESS; Cpa8U *pAuthKey = NULL; Cpa32U authKeyLenInBytes = 0; CpaCySymHashAlgorithm hashAlgorithm = pSessionSetup->hashSetupData.hashAlgorithm; CpaCySymHashAuthModeSetupData *pAuthModeSetupData = &pSessionSetup->hashSetupData.authModeSetupData; /* synchronous operation */ if (NULL == callbackFn) { lac_sync_op_data_t *pSyncCallbackData = NULL; status = LacSync_CreateSyncCookie(&pSyncCallbackData); if (CPA_STATUS_SUCCESS == status) { status = LacHash_PrecomputeDataCreate( instanceHandle, pSessionSetup, LacHash_SyncPrecomputeDoneCb, /* wait queue condition from sync cookie */ pSyncCallbackData, pWorkingBuffer, pState1, pState2); } else { return status; } if (CPA_STATUS_SUCCESS == status) { CpaStatus syncStatus = CPA_STATUS_SUCCESS; syncStatus = LacSync_WaitForCallback( pSyncCallbackData, LAC_SYM_SYNC_CALLBACK_TIMEOUT, &status, NULL); /* If callback doesn't come back */ if (CPA_STATUS_SUCCESS != syncStatus) { QAT_UTILS_LOG( "callback functions for precomputes did not return\n"); status = syncStatus; } } else { /* As the Request was not sent the Callback will never * be called, so need to indicate that we're finished * with cookie so it can be destroyed. */ LacSync_SetSyncCookieComplete(pSyncCallbackData); } LacSync_DestroySyncCookie(&pSyncCallbackData); return status; } /* set up convenience pointers */ pAuthKey = pAuthModeSetupData->authKey; authKeyLenInBytes = pAuthModeSetupData->authKeyLenInBytes; /* Pre-compute data state pointers must already be set up * by LacSymQat_HashSetupBlockInit() */ /* state1 is not allocated for AES XCBC/CCM/GCM/Kasumi/UIA2 * so for these algorithms set state2 only */ if (CPA_CY_SYM_HASH_AES_XCBC == hashAlgorithm) { status = LacSymHash_AesECBPreCompute(instanceHandle, hashAlgorithm, authKeyLenInBytes, pAuthKey, pWorkingBuffer, pState2, callbackFn, pCallbackTag); } else if (CPA_CY_SYM_HASH_AES_CMAC == hashAlgorithm) { /* First, copy the original key to pState2 */ memcpy(pState2, pAuthKey, authKeyLenInBytes); /* Then precompute */ status = LacSymHash_AesECBPreCompute(instanceHandle, hashAlgorithm, authKeyLenInBytes, pAuthKey, pWorkingBuffer, pState2, callbackFn, pCallbackTag); } else if (CPA_CY_SYM_HASH_AES_CCM == hashAlgorithm) { /* - * The Inner Hash Initial State2 block must contain K - * (the cipher key) and 16 zeroes which will be replaced with - * EK(Ctr0) by the QAT-ME. - */ - - /* write the auth key which for CCM is equivalent to cipher key + * The Inner Hash Initial State2 block is 32 bytes long. + * Therefore, for keys bigger than 128 bits (16 bytes), + * there is no space for 16 zeroes. */ - memcpy(pState2, - pSessionSetup->cipherSetupData.pCipherKey, - pSessionSetup->cipherSetupData.cipherKeyLenInBytes); + if (pSessionSetup->cipherSetupData.cipherKeyLenInBytes == + ICP_QAT_HW_AES_128_KEY_SZ) { + /* + * The Inner Hash Initial State2 block must contain K + * (the cipher key) and 16 zeroes which will be replaced + * with EK(Ctr0) by the QAT-ME. + */ - /* initialize remaining buffer space to all zeroes */ - LAC_OS_BZERO( - pState2 + - pSessionSetup->cipherSetupData.cipherKeyLenInBytes, - ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ); + /* write the auth key which for CCM is equivalent to + * cipher key + */ + memcpy( + pState2, + pSessionSetup->cipherSetupData.pCipherKey, + pSessionSetup->cipherSetupData.cipherKeyLenInBytes); + + /* initialize remaining buffer space to all zeroes */ + LAC_OS_BZERO(pState2 + + pSessionSetup->cipherSetupData + .cipherKeyLenInBytes, + ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ); + } /* There is no request sent to the QAT for this operation, * so just invoke the user's callback directly to signal * completion of the precompute */ callbackFn(pCallbackTag); } else if (CPA_CY_SYM_HASH_AES_GCM == hashAlgorithm || CPA_CY_SYM_HASH_AES_GMAC == hashAlgorithm) { /* * The Inner Hash Initial State2 block contains the following * H (the Galois Hash Multiplier) * len(A) (the length of A), (length before padding) * 16 zeroes which will be replaced with EK(Ctr0) by the * QAT. */ /* Memset state2 to 0 */ LAC_OS_BZERO(pState2, ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ + ICP_QAT_HW_GALOIS_E_CTR0_SZ); /* write H (the Galois Hash Multiplier) where H = E(K, 0...0) * This will only write bytes 0-15 of pState2 */ status = LacSymHash_AesECBPreCompute( instanceHandle, hashAlgorithm, pSessionSetup->cipherSetupData.cipherKeyLenInBytes, pSessionSetup->cipherSetupData.pCipherKey, pWorkingBuffer, pState2, callbackFn, pCallbackTag); if (CPA_STATUS_SUCCESS == status) { /* write len(A) (the length of A) into bytes 16-19 of - * pState2 - * in big-endian format. This field is 8 bytes */ + * pState2 in big-endian format. This field is 8 bytes + */ *(Cpa32U *)&pState2[ICP_QAT_HW_GALOIS_H_SZ] = LAC_MEM_WR_32(pAuthModeSetupData->aadLenInBytes); } } else if (CPA_CY_SYM_HASH_KASUMI_F9 == hashAlgorithm) { Cpa32U wordIndex = 0; Cpa32U *pTempKey = (Cpa32U *)(pState2 + authKeyLenInBytes); /* * The Inner Hash Initial State2 block must contain IK * (Initialisation Key), followed by IK XOR-ed with KM * (Key Modifier): IK||(IK^KM). */ /* write the auth key */ memcpy(pState2, pAuthKey, authKeyLenInBytes); /* initialise temp key with auth key */ memcpy(pTempKey, pAuthKey, authKeyLenInBytes); /* XOR Key with KASUMI F9 key modifier at 4 bytes level */ for (wordIndex = 0; wordIndex < LAC_BYTES_TO_LONGWORDS(authKeyLenInBytes); wordIndex++) { pTempKey[wordIndex] ^= LAC_HASH_KASUMI_F9_KEY_MODIFIER_4_BYTES; } /* There is no request sent to the QAT for this operation, * so just invoke the user's callback directly to signal * completion of the precompute */ callbackFn(pCallbackTag); } else if (CPA_CY_SYM_HASH_SNOW3G_UIA2 == hashAlgorithm) { /* * The Inner Hash Initial State2 should be all zeros */ LAC_OS_BZERO(pState2, ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ); /* There is no request sent to the QAT for this operation, * so just invoke the user's callback directly to signal * completion of the precompute */ callbackFn(pCallbackTag); } else if (CPA_CY_SYM_HASH_ZUC_EIA3 == hashAlgorithm) { /* * The Inner Hash Initial State2 should contain the key * and zero the rest of the state. */ LAC_OS_BZERO(pState2, ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ); memcpy(pState2, pAuthKey, authKeyLenInBytes); /* There is no request sent to the QAT for this operation, * so just invoke the user's callback directly to signal * completion of the precompute */ callbackFn(pCallbackTag); } else if (CPA_CY_SYM_HASH_POLY == hashAlgorithm) { /* There is no request sent to the QAT for this operation, * so just invoke the user's callback directly to signal * completion of the precompute */ callbackFn(pCallbackTag); } else /* For Hmac Precomputes */ { status = LacSymHash_HmacPreComputes(instanceHandle, hashAlgorithm, authKeyLenInBytes, pAuthKey, pWorkingBuffer, pState1, pState2, callbackFn, pCallbackTag); } return status; } /** @ingroup LacHash */ CpaStatus LacHash_HashContextCheck(CpaInstanceHandle instanceHandle, const CpaCySymHashSetupData *pHashSetupData) { lac_sym_qat_hash_alg_info_t *pHashAlgInfo = NULL; lac_sym_qat_hash_alg_info_t *pOuterHashAlgInfo = NULL; CpaCySymCapabilitiesInfo capInfo; /*Protect against value of hash outside the bitmap*/ - if ((pHashSetupData->hashAlgorithm) >= - CPA_CY_SYM_HASH_CAP_BITMAP_SIZE) { + if (pHashSetupData->hashAlgorithm >= CPA_CY_SYM_HASH_CAP_BITMAP_SIZE) { LAC_INVALID_PARAM_LOG("hashAlgorithm"); return CPA_STATUS_INVALID_PARAM; } + cpaCySymQueryCapabilities(instanceHandle, &capInfo); if (!CPA_BITMAP_BIT_TEST(capInfo.hashes, pHashSetupData->hashAlgorithm) && pHashSetupData->hashAlgorithm != CPA_CY_SYM_HASH_AES_CBC_MAC) { - /* Ensure SHAKE algorithms are not supported */ - if ((CPA_CY_SYM_HASH_SHAKE_128 == - pHashSetupData->hashAlgorithm) || - (CPA_CY_SYM_HASH_SHAKE_256 == - pHashSetupData->hashAlgorithm)) { - LAC_INVALID_PARAM_LOG( - "Hash algorithms SHAKE-128 and SHAKE-256 " - "are not supported."); - return CPA_STATUS_UNSUPPORTED; - } - LAC_INVALID_PARAM_LOG("hashAlgorithm"); return CPA_STATUS_INVALID_PARAM; } switch (pHashSetupData->hashMode) { case CPA_CY_SYM_HASH_MODE_PLAIN: case CPA_CY_SYM_HASH_MODE_AUTH: case CPA_CY_SYM_HASH_MODE_NESTED: break; default: { LAC_INVALID_PARAM_LOG("hashMode"); return CPA_STATUS_INVALID_PARAM; } } if (LAC_HASH_ALG_MODE_NOT_SUPPORTED(pHashSetupData->hashAlgorithm, pHashSetupData->hashMode)) { - LAC_INVALID_PARAM_LOG("hashAlgorithm and hashMode combination"); - return CPA_STATUS_INVALID_PARAM; + LAC_UNSUPPORTED_PARAM_LOG( + "hashAlgorithm and hashMode combination"); + return CPA_STATUS_UNSUPPORTED; } LacSymQat_HashAlgLookupGet(instanceHandle, pHashSetupData->hashAlgorithm, &pHashAlgInfo); /* note: nested hash mode checks digest length against outer algorithm */ if ((CPA_CY_SYM_HASH_MODE_PLAIN == pHashSetupData->hashMode) || (CPA_CY_SYM_HASH_MODE_AUTH == pHashSetupData->hashMode)) { /* Check Digest Length is permitted by the algorithm */ if ((0 == pHashSetupData->digestResultLenInBytes) || (pHashSetupData->digestResultLenInBytes > pHashAlgInfo->digestLength)) { LAC_INVALID_PARAM_LOG("digestResultLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } if (CPA_CY_SYM_HASH_MODE_AUTH == pHashSetupData->hashMode) { if (CPA_CY_SYM_HASH_AES_GCM == pHashSetupData->hashAlgorithm || CPA_CY_SYM_HASH_AES_GMAC == pHashSetupData->hashAlgorithm) { Cpa32U aadDataSize = 0; /* RFC 4106: Implementations MUST support a full-length - * 16-octet - * ICV, and MAY support 8 or 12 octet ICVs, and MUST NOT - * support - * other ICV lengths. */ + * 16-octet ICV, and MAY support 8 or 12 octet ICVs, and + * MUST NOT support other ICV lengths. */ if ((pHashSetupData->digestResultLenInBytes != LAC_HASH_AES_GCM_ICV_SIZE_8) && (pHashSetupData->digestResultLenInBytes != LAC_HASH_AES_GCM_ICV_SIZE_12) && (pHashSetupData->digestResultLenInBytes != LAC_HASH_AES_GCM_ICV_SIZE_16)) { LAC_INVALID_PARAM_LOG("digestResultLenInBytes"); return CPA_STATUS_INVALID_PARAM; } /* ensure aadLen is within maximum limit imposed by QAT */ aadDataSize = pHashSetupData->authModeSetupData.aadLenInBytes; /* round the aad size to the multiple of GCM hash block * size. */ aadDataSize = LAC_ALIGN_POW2_ROUNDUP(aadDataSize, LAC_HASH_AES_GCM_BLOCK_SIZE); if (aadDataSize > ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX && CPA_CY_SYM_HASH_AES_GMAC != pHashSetupData->hashAlgorithm) { LAC_INVALID_PARAM_LOG("aadLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } else if (CPA_CY_SYM_HASH_AES_CCM == pHashSetupData->hashAlgorithm) { Cpa32U aadDataSize = 0; /* RFC 3610: Valid values are 4, 6, 8, 10, 12, 14, and * 16 octets */ if ((pHashSetupData->digestResultLenInBytes >= LAC_HASH_AES_CCM_ICV_SIZE_MIN) && (pHashSetupData->digestResultLenInBytes <= LAC_HASH_AES_CCM_ICV_SIZE_MAX)) { if ((pHashSetupData->digestResultLenInBytes & 0x01) != 0) { LAC_INVALID_PARAM_LOG( "digestResultLenInBytes must be a multiple of 2"); return CPA_STATUS_INVALID_PARAM; } } else { LAC_INVALID_PARAM_LOG("digestResultLenInBytes"); return CPA_STATUS_INVALID_PARAM; } /* ensure aadLen is within maximum limit imposed by QAT */ /* at the beginning of the buffer there is B0 block */ aadDataSize = LAC_HASH_AES_CCM_BLOCK_SIZE; /* then, if there is some 'a' data, the buffer will - * store encoded - * length of 'a' and 'a' itself */ + * store encoded length of 'a' and 'a' itself */ if (pHashSetupData->authModeSetupData.aadLenInBytes > 0) { /* as the QAT API puts the requirement on the * pAdditionalAuthData not to be bigger than 240 - * bytes then we - * just need 2 bytes to store encoded length of - * 'a' */ + * bytes then we just need 2 bytes to store + * encoded length of 'a' */ aadDataSize += sizeof(Cpa16U); aadDataSize += pHashSetupData->authModeSetupData .aadLenInBytes; } /* round the aad size to the multiple of CCM block * size.*/ aadDataSize = LAC_ALIGN_POW2_ROUNDUP(aadDataSize, LAC_HASH_AES_CCM_BLOCK_SIZE); if (aadDataSize > ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX) { LAC_INVALID_PARAM_LOG("aadLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } else if (CPA_CY_SYM_HASH_KASUMI_F9 == pHashSetupData->hashAlgorithm) { /* QAT-FW only supports 128 bit Integrity Key size for * Kasumi f9 * Ref: 3GPP TS 35.201 version 7.0.0 Release 7 */ if (pHashSetupData->authModeSetupData .authKeyLenInBytes != ICP_QAT_HW_KASUMI_KEY_SZ) { LAC_INVALID_PARAM_LOG("authKeyLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } else if (CPA_CY_SYM_HASH_SNOW3G_UIA2 == pHashSetupData->hashAlgorithm) { /* QAT-FW only supports 128 bits Integrity Key size for * Snow3g */ if (pHashSetupData->authModeSetupData .authKeyLenInBytes != ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ) { LAC_INVALID_PARAM_LOG("authKeyLenInBytes"); return CPA_STATUS_INVALID_PARAM; } /* For Snow3g hash aad field contains IV - it needs to - * be 16 - * bytes long + * be 16 bytes long */ if (pHashSetupData->authModeSetupData.aadLenInBytes != ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ) { LAC_INVALID_PARAM_LOG("aadLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } else if (CPA_CY_SYM_HASH_AES_XCBC == pHashSetupData->hashAlgorithm || CPA_CY_SYM_HASH_AES_CMAC == pHashSetupData->hashAlgorithm || CPA_CY_SYM_HASH_AES_CBC_MAC == pHashSetupData->hashAlgorithm) { /* ensure auth key len is valid (128-bit keys supported) */ if ((pHashSetupData->authModeSetupData .authKeyLenInBytes != ICP_QAT_HW_AES_128_KEY_SZ)) { LAC_INVALID_PARAM_LOG("authKeyLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } else if (CPA_CY_SYM_HASH_ZUC_EIA3 == pHashSetupData->hashAlgorithm) { /* QAT-FW only supports 128 bits Integrity Key size for * ZUC */ if (pHashSetupData->authModeSetupData .authKeyLenInBytes != ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ) { LAC_INVALID_PARAM_LOG("authKeyLenInBytes"); return CPA_STATUS_INVALID_PARAM; } /* For ZUC EIA3 hash aad field contains IV - it needs to - * be 16 - * bytes long + * be 16 bytes long */ if (pHashSetupData->authModeSetupData.aadLenInBytes != ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ) { LAC_INVALID_PARAM_LOG("aadLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } else if (CPA_CY_SYM_HASH_POLY == pHashSetupData->hashAlgorithm) { if (pHashSetupData->digestResultLenInBytes != ICP_QAT_HW_SPC_CTR_SZ) { LAC_INVALID_PARAM_LOG("Digest Length for CCP"); return CPA_STATUS_INVALID_PARAM; } if (pHashSetupData->authModeSetupData.aadLenInBytes > ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX) { LAC_INVALID_PARAM_LOG("AAD Length for CCP"); return CPA_STATUS_INVALID_PARAM; } } else { /* The key size must be less than or equal the block * length */ if (pHashSetupData->authModeSetupData .authKeyLenInBytes > pHashAlgInfo->blockLength) { LAC_INVALID_PARAM_LOG("authKeyLenInBytes"); return CPA_STATUS_INVALID_PARAM; } } /* when the key size is greater than 0 check pointer is not null */ if (CPA_CY_SYM_HASH_AES_CCM != pHashSetupData->hashAlgorithm && CPA_CY_SYM_HASH_AES_GCM != pHashSetupData->hashAlgorithm && pHashSetupData->authModeSetupData.authKeyLenInBytes > 0) { LAC_CHECK_NULL_PARAM( pHashSetupData->authModeSetupData.authKey); } } else if (CPA_CY_SYM_HASH_MODE_NESTED == pHashSetupData->hashMode) { if (!CPA_BITMAP_BIT_TEST(capInfo.hashes, pHashSetupData->nestedModeSetupData .outerHashAlgorithm)) { - /* Ensure SHAKE algorithms are not supported */ - if ((CPA_CY_SYM_HASH_SHAKE_128 == - pHashSetupData->nestedModeSetupData - .outerHashAlgorithm) || - (CPA_CY_SYM_HASH_SHAKE_256 == - pHashSetupData->nestedModeSetupData - .outerHashAlgorithm)) { - LAC_INVALID_PARAM_LOG( - "Hash algorithms SHAKE-128 and SHAKE-256 " - "are not supported."); - return CPA_STATUS_UNSUPPORTED; - } - LAC_INVALID_PARAM_LOG("outerHashAlgorithm"); return CPA_STATUS_INVALID_PARAM; } if (LAC_HASH_ALG_MODE_NOT_SUPPORTED( pHashSetupData->nestedModeSetupData.outerHashAlgorithm, pHashSetupData->hashMode)) { LAC_INVALID_PARAM_LOG( "outerHashAlgorithm and hashMode combination"); return CPA_STATUS_INVALID_PARAM; } LacSymQat_HashAlgLookupGet( instanceHandle, pHashSetupData->nestedModeSetupData.outerHashAlgorithm, &pOuterHashAlgInfo); /* Check Digest Length is permitted by the algorithm */ if ((0 == pHashSetupData->digestResultLenInBytes) || (pHashSetupData->digestResultLenInBytes > pOuterHashAlgInfo->digestLength)) { LAC_INVALID_PARAM_LOG("digestResultLenInBytes"); return CPA_STATUS_INVALID_PARAM; } if (pHashSetupData->nestedModeSetupData.innerPrefixLenInBytes > LAC_MAX_INNER_OUTER_PREFIX_SIZE_BYTES) { LAC_INVALID_PARAM_LOG("innerPrefixLenInBytes"); return CPA_STATUS_INVALID_PARAM; } if (pHashSetupData->nestedModeSetupData.innerPrefixLenInBytes > 0) { LAC_CHECK_NULL_PARAM(pHashSetupData->nestedModeSetupData .pInnerPrefixData); } if (pHashSetupData->nestedModeSetupData.outerPrefixLenInBytes > LAC_MAX_INNER_OUTER_PREFIX_SIZE_BYTES) { LAC_INVALID_PARAM_LOG("outerPrefixLenInBytes"); return CPA_STATUS_INVALID_PARAM; } if (pHashSetupData->nestedModeSetupData.outerPrefixLenInBytes > 0) { LAC_CHECK_NULL_PARAM(pHashSetupData->nestedModeSetupData .pOuterPrefixData); } } return CPA_STATUS_SUCCESS; } /** @ingroup LacHash */ CpaStatus LacHash_PerformParamCheck(CpaInstanceHandle instanceHandle, lac_session_desc_t *pSessionDesc, const CpaCySymOpData *pOpData, Cpa64U srcPktSize, const CpaBoolean *pVerifyResult) { CpaStatus status = CPA_STATUS_SUCCESS; lac_sym_qat_hash_alg_info_t *pHashAlgInfo = NULL; + CpaBoolean digestIsAppended = pSessionDesc->digestIsAppended; + CpaBoolean digestVerify = pSessionDesc->digestVerify; + CpaCySymOp symOperation = pSessionDesc->symOperation; + CpaCySymHashAlgorithm hashAlgorithm = pSessionDesc->hashAlgorithm; /* digestVerify and digestIsAppended on Hash-Only operation not * supported */ - if (pSessionDesc->digestIsAppended && pSessionDesc->digestVerify && - (CPA_CY_SYM_OP_HASH == pSessionDesc->symOperation)) { + if (digestIsAppended && digestVerify && + (CPA_CY_SYM_OP_HASH == symOperation)) { LAC_INVALID_PARAM_LOG( "digestVerify and digestIsAppended set " "on Hash-Only operation is not supported"); return CPA_STATUS_INVALID_PARAM; } /* check the digest result pointer */ if ((CPA_CY_SYM_PACKET_TYPE_PARTIAL != pOpData->packetType) && - !pSessionDesc->digestIsAppended && - (NULL == pOpData->pDigestResult)) { + !digestIsAppended && (NULL == pOpData->pDigestResult)) { LAC_INVALID_PARAM_LOG("pDigestResult is NULL"); return CPA_STATUS_INVALID_PARAM; } /* * Check if the pVerifyResult pointer is not null for hash operation - * when - * the packet is the last one and user has set verifyDigest flag + * when the packet is the last one and user has set verifyDigest flag * Also, this is only needed for symchronous operation, so check if the * callback pointer is the internal synchronous one rather than a user- * supplied one. */ - if ((CPA_TRUE == pSessionDesc->digestVerify) && + if ((CPA_TRUE == digestVerify) && (CPA_CY_SYM_PACKET_TYPE_PARTIAL != pOpData->packetType) && (LacSync_GenBufListVerifyCb == pSessionDesc->pSymCb)) { if (NULL == pVerifyResult) { LAC_INVALID_PARAM_LOG( "Null pointer pVerifyResult for hash op"); return CPA_STATUS_INVALID_PARAM; } } /* verify start offset + messageLenToDigest is inside the source packet. * this also verifies that the start offset is inside the packet * Note: digest is specified as a pointer therefore it can be * written anywhere so we cannot check for this been inside a buffer * CCM/GCM specify the auth region using just the cipher params as this * region is the same for auth and cipher. It is not checked here */ - if ((CPA_CY_SYM_HASH_AES_CCM == pSessionDesc->hashAlgorithm) || - (CPA_CY_SYM_HASH_AES_GCM == pSessionDesc->hashAlgorithm)) { + if ((CPA_CY_SYM_HASH_AES_CCM == hashAlgorithm) || + (CPA_CY_SYM_HASH_AES_GCM == hashAlgorithm)) { /* ensure AAD data pointer is non-NULL if AAD len > 0 */ if ((pSessionDesc->aadLenInBytes > 0) && (NULL == pOpData->pAdditionalAuthData)) { LAC_INVALID_PARAM_LOG("pAdditionalAuthData is NULL"); return CPA_STATUS_INVALID_PARAM; } } else { if ((pOpData->hashStartSrcOffsetInBytes + pOpData->messageLenToHashInBytes) > srcPktSize) { LAC_INVALID_PARAM_LOG( "hashStartSrcOffsetInBytes + " "messageLenToHashInBytes > Src Buffer Packet Length"); return CPA_STATUS_INVALID_PARAM; } } /* For Snow3g & ZUC hash pAdditionalAuthData field * of OpData should contain IV */ - if ((CPA_CY_SYM_HASH_SNOW3G_UIA2 == pSessionDesc->hashAlgorithm) || - (CPA_CY_SYM_HASH_ZUC_EIA3 == pSessionDesc->hashAlgorithm)) { + if ((CPA_CY_SYM_HASH_SNOW3G_UIA2 == hashAlgorithm) || + (CPA_CY_SYM_HASH_ZUC_EIA3 == hashAlgorithm)) { if (NULL == pOpData->pAdditionalAuthData) { LAC_INVALID_PARAM_LOG("pAdditionalAuthData is NULL"); return CPA_STATUS_INVALID_PARAM; } } /* partial packets need to be multiples of the algorithm block size in - * hash - * only mode (except for final partial packet) */ + * hash only mode (except for final partial packet) */ if ((CPA_CY_SYM_PACKET_TYPE_PARTIAL == pOpData->packetType) && - (CPA_CY_SYM_OP_HASH == pSessionDesc->symOperation)) { + (CPA_CY_SYM_OP_HASH == symOperation)) { LacSymQat_HashAlgLookupGet(instanceHandle, - pSessionDesc->hashAlgorithm, + hashAlgorithm, &pHashAlgInfo); /* check if the message is a multiple of the block size. */ if ((pOpData->messageLenToHashInBytes % pHashAlgInfo->blockLength) != 0) { LAC_INVALID_PARAM_LOG( "messageLenToHashInBytes not block size"); return CPA_STATUS_INVALID_PARAM; } } return status; } diff --git a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_queue.c b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_queue.c index 977fb0b84d0b..43d2f44474ee 100644 --- a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_queue.c +++ b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_queue.c @@ -1,165 +1,158 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** *************************************************************************** * @file lac_sym_queue.c Functions for sending/queuing symmetric requests * * @ingroup LacSym * ***************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" #include "cpa_cy_sym.h" /* ******************************************************************************* * Include private header files ******************************************************************************* */ #include "icp_accel_devices.h" #include "icp_adf_init.h" #include "icp_adf_debug.h" #include "icp_adf_transport.h" #include "lac_sym_queue.h" #include "lac_sym_qat.h" #include "lac_session.h" #include "lac_sym.h" #include "lac_log.h" #include "icp_qat_fw_la.h" #include "lac_sal_types_crypto.h" #define GetSingleBitFromByte(byte, bit) ((byte) & (1 << (bit))) /* ******************************************************************************* * Define public/global function definitions ******************************************************************************* */ CpaStatus LacSymQueue_RequestSend(const CpaInstanceHandle instanceHandle, lac_sym_bulk_cookie_t *pRequest, lac_session_desc_t *pSessionDesc) { CpaStatus status = CPA_STATUS_SUCCESS; CpaBoolean enqueued = CPA_FALSE; sal_crypto_service_t *pService = (sal_crypto_service_t *)instanceHandle; /* Enqueue the message instead of sending directly if: * (i) a blocking operation is in progress * (ii) there are previous requests already in the queue */ if ((CPA_FALSE == pSessionDesc->nonBlockingOpsInProgress) || (NULL != pSessionDesc->pRequestQueueTail)) { - if (CPA_STATUS_SUCCESS != - LAC_SPINLOCK(&pSessionDesc->requestQueueLock)) { - LAC_LOG_ERROR("Failed to lock request queue"); - return CPA_STATUS_RESOURCE; - } + LAC_SPINLOCK(&pSessionDesc->requestQueueLock); /* Re-check blockingOpsInProgress and pRequestQueueTail in case * either * changed before the lock was acquired. The lock is shared * with * the callback context which drains this queue */ if ((CPA_FALSE == pSessionDesc->nonBlockingOpsInProgress) || (NULL != pSessionDesc->pRequestQueueTail)) { /* Enqueue the message and exit */ /* The FIFO queue is made up of a head and tail pointer. * The head pointer points to the first/oldest, entry * in the queue, and the tail pointer points to the * last/newest * entry in the queue */ if (NULL != pSessionDesc->pRequestQueueTail) { /* Queue is non-empty. Add this request to the * list */ pSessionDesc->pRequestQueueTail->pNext = pRequest; } else { /* Queue is empty. Initialise the head pointer * as well */ pSessionDesc->pRequestQueueHead = pRequest; } pSessionDesc->pRequestQueueTail = pRequest; /* request is queued, don't send to QAT here */ enqueued = CPA_TRUE; } - if (CPA_STATUS_SUCCESS != - LAC_SPINUNLOCK(&pSessionDesc->requestQueueLock)) { - LAC_LOG_ERROR("Failed to unlock request queue"); - } + LAC_SPINUNLOCK(&pSessionDesc->requestQueueLock); } if (CPA_FALSE == enqueued) { /* If we send a partial packet request, set the * blockingOpsInProgress * flag for the session to indicate that subsequent requests * must be * queued up until this request completes * * @assumption * If we have got here it means that there were no previous * blocking * operations in progress and, since multiple partial packet * requests * on a given session cannot be issued concurrently, there * should be * no need for a critical section around the following code */ if (CPA_CY_SYM_PACKET_TYPE_FULL != pRequest->pOpData->packetType) { /* Select blocking operations which this reqest will * complete */ pSessionDesc->nonBlockingOpsInProgress = CPA_FALSE; } /* At this point, we're clear to send the request. For cipher * requests, * we need to check if the session IV needs to be updated. This * can * only be done when no other partials are in flight for this * session, * to ensure the cipherPartialOpState buffer in the session * descriptor * is not currently in use */ if (CPA_TRUE == pRequest->updateSessionIvOnSend) { if (LAC_CIPHER_IS_ARC4(pSessionDesc->cipherAlgorithm)) { memcpy(pSessionDesc->cipherPartialOpState, pSessionDesc->cipherARC4InitialState, LAC_CIPHER_ARC4_STATE_LEN_BYTES); } else { memcpy(pSessionDesc->cipherPartialOpState, pRequest->pOpData->pIv, pRequest->pOpData->ivLenInBytes); } } /* Send to QAT */ status = icp_adf_transPutMsg(pService->trans_handle_sym_tx, (void *)&(pRequest->qatMsg), LAC_QAT_SYM_REQ_SZ_LW); /* if fail to send request, we need to change * nonBlockingOpsInProgress * to CPA_TRUE */ if ((CPA_STATUS_SUCCESS != status) && (CPA_CY_SYM_PACKET_TYPE_FULL != pRequest->pOpData->packetType)) { pSessionDesc->nonBlockingOpsInProgress = CPA_TRUE; } } return status; } diff --git a/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat.c b/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat.c index c55da0a0d531..61cb7044ada6 100644 --- a/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat.c +++ b/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat.c @@ -1,227 +1,332 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** ***************************************************************************** * @file lac_sym_qat.c Interfaces for populating the symmetric qat structures * * @ingroup LacSymQat * *****************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" /* ******************************************************************************* * Include private header files ******************************************************************************* */ #include "icp_accel_devices.h" #include "icp_adf_cfg.h" #include "lac_log.h" #include "lac_sym.h" #include "lac_sym_qat.h" #include "lac_sal_types_crypto.h" #include "sal_string_parse.h" #include "lac_sym_key.h" #include "lac_sym_qat_hash_defs_lookup.h" +#include "lac_sym_qat_constants_table.h" #include "lac_sym_qat_cipher.h" #include "lac_sym_qat_hash.h" #define EMBEDDED_CIPHER_KEY_MAX_SIZE 16 static void LacSymQat_SymLogSliceHangError(icp_qat_fw_la_cmd_id_t symCmdId) { Cpa8U cmdId = symCmdId; switch (cmdId) { case ICP_QAT_FW_LA_CMD_CIPHER: case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP: LAC_LOG_ERROR("slice hang detected on CPM cipher slice."); break; case ICP_QAT_FW_LA_CMD_AUTH: case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP: LAC_LOG_ERROR("slice hang detected on CPM auth slice."); break; case ICP_QAT_FW_LA_CMD_CIPHER_HASH: case ICP_QAT_FW_LA_CMD_HASH_CIPHER: case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE: case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE: case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE: case ICP_QAT_FW_LA_CMD_MGF1: default: LAC_LOG_ERROR( "slice hang detected on CPM cipher or auth slice."); } return; } /* sym crypto response handlers */ static sal_qat_resp_handler_func_t respHandlerSymTbl[ICP_QAT_FW_LA_CMD_DELIMITER]; void LacSymQat_SymRespHandler(void *pRespMsg) { Cpa8U lacCmdId = 0; void *pOpaqueData = NULL; icp_qat_fw_la_resp_t *pRespMsgFn = NULL; Cpa8U opStatus = ICP_QAT_FW_COMN_STATUS_FLAG_OK; Cpa8U comnErr = ERR_CODE_NO_ERROR; pRespMsgFn = (icp_qat_fw_la_resp_t *)pRespMsg; LAC_MEM_SHARED_READ_TO_PTR(pRespMsgFn->opaque_data, pOpaqueData); lacCmdId = pRespMsgFn->comn_resp.cmd_id; opStatus = pRespMsgFn->comn_resp.comn_status; comnErr = pRespMsgFn->comn_resp.comn_error.s.comn_err_code; /* log the slice hang and endpoint push/pull error inside the response */ if (ERR_CODE_SSM_ERROR == (Cpa8S)comnErr) { LacSymQat_SymLogSliceHangError(lacCmdId); } else if (ERR_CODE_ENDPOINT_ERROR == (Cpa8S)comnErr) { LAC_LOG_ERROR("The PCIe End Point Push/Pull or" " TI/RI Parity error detected."); } /* call the response message handler registered for the command ID */ respHandlerSymTbl[lacCmdId]((icp_qat_fw_la_cmd_id_t)lacCmdId, pOpaqueData, (icp_qat_fw_comn_flags)opStatus); } CpaStatus LacSymQat_Init(CpaInstanceHandle instanceHandle) { CpaStatus status = CPA_STATUS_SUCCESS; + /* Initialize the SHRAM constants table */ + LacSymQat_ConstantsInitLookupTables(instanceHandle); + /* Initialise the Hash lookup table */ status = LacSymQat_HashLookupInit(instanceHandle); return status; } void LacSymQat_RespHandlerRegister(icp_qat_fw_la_cmd_id_t lacCmdId, sal_qat_resp_handler_func_t pCbHandler) { if (lacCmdId >= ICP_QAT_FW_LA_CMD_DELIMITER) { QAT_UTILS_LOG("Invalid Command ID\n"); return; } /* set the response handler for the command ID */ respHandlerSymTbl[lacCmdId] = pCbHandler; } void LacSymQat_LaPacketCommandFlagSet(Cpa32U qatPacketType, icp_qat_fw_la_cmd_id_t laCmdId, CpaCySymCipherAlgorithm cipherAlgorithm, Cpa16U *pLaCommandFlags, Cpa32U ivLenInBytes) { - /* For Chacha ciphers set command flag as partial none to proceed + /* For SM4/Chacha ciphers set command flag as partial none to proceed * with stateless processing */ - if (LAC_CIPHER_IS_CHACHA(cipherAlgorithm) || - LAC_CIPHER_IS_SM4(cipherAlgorithm)) { + if (LAC_CIPHER_IS_SM4(cipherAlgorithm) || + LAC_CIPHER_IS_CHACHA(cipherAlgorithm)) { ICP_QAT_FW_LA_PARTIAL_SET(*pLaCommandFlags, ICP_QAT_FW_LA_PARTIAL_NONE); return; } ICP_QAT_FW_LA_PARTIAL_SET(*pLaCommandFlags, qatPacketType); /* For ECB-mode ciphers, IV is NULL so update-state flag * must be disabled always. * For all other ciphers and auth * update state is disabled for full packets and final partials */ - if (((laCmdId != ICP_QAT_FW_LA_CMD_AUTH) && - LAC_CIPHER_IS_ECB_MODE(cipherAlgorithm)) || - (ICP_QAT_FW_LA_PARTIAL_NONE == qatPacketType) || - (ICP_QAT_FW_LA_PARTIAL_END == qatPacketType)) { + if ((ICP_QAT_FW_LA_PARTIAL_NONE == qatPacketType) || + (ICP_QAT_FW_LA_PARTIAL_END == qatPacketType) || + ((laCmdId != ICP_QAT_FW_LA_CMD_AUTH) && + LAC_CIPHER_IS_ECB_MODE(cipherAlgorithm))) { ICP_QAT_FW_LA_UPDATE_STATE_SET(*pLaCommandFlags, ICP_QAT_FW_LA_NO_UPDATE_STATE); } /* For first or middle partials set the update state command flag */ else { ICP_QAT_FW_LA_UPDATE_STATE_SET(*pLaCommandFlags, ICP_QAT_FW_LA_UPDATE_STATE); if (laCmdId == ICP_QAT_FW_LA_CMD_AUTH) { /* For hash only partial - verify and return auth result * are * disabled */ ICP_QAT_FW_LA_RET_AUTH_SET( *pLaCommandFlags, ICP_QAT_FW_LA_NO_RET_AUTH_RES); ICP_QAT_FW_LA_CMP_AUTH_SET( *pLaCommandFlags, ICP_QAT_FW_LA_NO_CMP_AUTH_RES); } } if ((LAC_CIPHER_IS_GCM(cipherAlgorithm)) && (LAC_CIPHER_IV_SIZE_GCM_12 == ivLenInBytes)) { ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( *pLaCommandFlags, ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); } } void LacSymQat_packetTypeGet(CpaCySymPacketType packetType, CpaCySymPacketType packetState, Cpa32U *pQatPacketType) { + switch (packetType) { /* partial */ - if (CPA_CY_SYM_PACKET_TYPE_PARTIAL == packetType) { + case CPA_CY_SYM_PACKET_TYPE_PARTIAL: /* if the previous state was full, then this is the first packet */ if (CPA_CY_SYM_PACKET_TYPE_FULL == packetState) { *pQatPacketType = ICP_QAT_FW_LA_PARTIAL_START; } else { *pQatPacketType = ICP_QAT_FW_LA_PARTIAL_MID; } - } + break; + /* final partial */ - else if (CPA_CY_SYM_PACKET_TYPE_LAST_PARTIAL == packetType) { + case CPA_CY_SYM_PACKET_TYPE_LAST_PARTIAL: *pQatPacketType = ICP_QAT_FW_LA_PARTIAL_END; - } + break; + /* full packet - CPA_CY_SYM_PACKET_TYPE_FULL */ - else { + default: *pQatPacketType = ICP_QAT_FW_LA_PARTIAL_NONE; } } void LacSymQat_LaSetDefaultFlags(icp_qat_fw_serv_specif_flags *laCmdFlags, CpaCySymOp symOp) { ICP_QAT_FW_LA_PARTIAL_SET(*laCmdFlags, ICP_QAT_FW_LA_PARTIAL_NONE); ICP_QAT_FW_LA_UPDATE_STATE_SET(*laCmdFlags, ICP_QAT_FW_LA_NO_UPDATE_STATE); if (symOp != CPA_CY_SYM_OP_CIPHER) { ICP_QAT_FW_LA_RET_AUTH_SET(*laCmdFlags, ICP_QAT_FW_LA_RET_AUTH_RES); } else { ICP_QAT_FW_LA_RET_AUTH_SET(*laCmdFlags, ICP_QAT_FW_LA_NO_RET_AUTH_RES); } ICP_QAT_FW_LA_CMP_AUTH_SET(*laCmdFlags, ICP_QAT_FW_LA_NO_CMP_AUTH_RES); ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( *laCmdFlags, ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS); } + +CpaBoolean +LacSymQat_UseSymConstantsTable(lac_session_desc_t *pSession, + Cpa8U *pCipherOffset, + Cpa8U *pHashOffset) +{ + + CpaBoolean useOptimisedContentDesc = CPA_FALSE; + CpaBoolean useSHRAMConstants = CPA_FALSE; + + *pCipherOffset = 0; + *pHashOffset = 0; + + /* for chaining can we use the optimised content descritor */ + if (pSession->laCmdId == ICP_QAT_FW_LA_CMD_CIPHER_HASH || + pSession->laCmdId == ICP_QAT_FW_LA_CMD_HASH_CIPHER) { + useOptimisedContentDesc = + LacSymQat_UseOptimisedContentDesc(pSession); + } + + /* Cipher-only case or chaining */ + if (pSession->laCmdId == ICP_QAT_FW_LA_CMD_CIPHER || + useOptimisedContentDesc) { + icp_qat_hw_cipher_algo_t algorithm; + icp_qat_hw_cipher_mode_t mode; + icp_qat_hw_cipher_dir_t dir; + icp_qat_hw_cipher_convert_t key_convert; + + if (pSession->cipherKeyLenInBytes > + sizeof(icp_qat_fw_comn_req_hdr_cd_pars_t)) { + return CPA_FALSE; + } + + LacSymQat_CipherGetCfgData( + pSession, &algorithm, &mode, &dir, &key_convert); + + /* Check if cipher config is available in table. */ + LacSymQat_ConstantsGetCipherOffset(pSession->pInstance, + algorithm, + mode, + dir, + key_convert, + pCipherOffset); + if (*pCipherOffset > 0) { + useSHRAMConstants = CPA_TRUE; + } else { + useSHRAMConstants = CPA_FALSE; + } + } + + /* hash only case or when chaining, cipher must be found in SHRAM table + * for + * optimised CD case */ + if (pSession->laCmdId == ICP_QAT_FW_LA_CMD_AUTH || + (useOptimisedContentDesc && useSHRAMConstants)) { + icp_qat_hw_auth_algo_t algorithm; + CpaBoolean nested; + + if (pSession->digestVerify) { + return CPA_FALSE; + } + + if ((!(useOptimisedContentDesc && useSHRAMConstants)) && + (pSession->qatHashMode == ICP_QAT_HW_AUTH_MODE1)) { + /* we can only use the SHA1-mode1 in the SHRAM constants + * table when + * we are using the opimised content desc */ + return CPA_FALSE; + } + + LacSymQat_HashGetCfgData(pSession->pInstance, + pSession->qatHashMode, + pSession->hashMode, + pSession->hashAlgorithm, + &algorithm, + &nested); + + /* Check if config data is available in table. */ + LacSymQat_ConstantsGetAuthOffset(pSession->pInstance, + algorithm, + pSession->qatHashMode, + nested, + pHashOffset); + if (*pHashOffset > 0) { + useSHRAMConstants = CPA_TRUE; + } else { + useSHRAMConstants = CPA_FALSE; + } + } + + return useSHRAMConstants; +} + +CpaBoolean +LacSymQat_UseOptimisedContentDesc(lac_session_desc_t *pSession) +{ + return CPA_FALSE; +} diff --git a/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_cipher.c b/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_cipher.c index b958d3723703..5c554efd61a1 100644 --- a/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_cipher.c +++ b/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_cipher.c @@ -1,889 +1,1010 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** *************************************************************************** * @file lac_sym_qat_cipher.c QAT-related support functions for Cipher * * @ingroup LacSymQat_Cipher * * @description Functions to support the QAT related operations for Cipher ***************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" #include "icp_accel_devices.h" #include "icp_adf_debug.h" #include "lac_sym_qat.h" #include "lac_sym_qat_cipher.h" #include "lac_mem.h" #include "lac_common.h" #include "cpa_cy_sym.h" #include "lac_sym_qat.h" #include "lac_sym_cipher_defs.h" #include "icp_qat_hw.h" #include "icp_qat_fw_la.h" +#include "sal_hw_gen.h" + +#define LAC_UNUSED_POS_MASK 0x3 /***************************************************************************** * Internal data *****************************************************************************/ typedef enum _icp_qat_hw_key_depend { IS_KEY_DEP_NO = 0, IS_KEY_DEP_YES, } icp_qat_hw_key_depend; /* LAC_CIPHER_IS_XTS_MODE */ static const uint8_t key_size_xts[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ICP_QAT_HW_CIPHER_ALGO_AES128, // ICP_QAT_HW_AES_128_XTS_KEY_SZ + ICP_QAT_HW_CIPHER_ALGO_AES128, /* ICP_QAT_HW_AES_128_XTS_KEY_SZ */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ICP_QAT_HW_CIPHER_ALGO_AES256 // ICP_QAT_HW_AES_256_XTS_KEY_SZ + ICP_QAT_HW_CIPHER_ALGO_AES256 /* ICP_QAT_HW_AES_256_XTS_KEY_SZ */ }; /* LAC_CIPHER_IS_AES */ static const uint8_t key_size_aes[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ICP_QAT_HW_CIPHER_ALGO_AES128, // ICP_QAT_HW_AES_128_KEY_SZ + ICP_QAT_HW_CIPHER_ALGO_AES128, /* ICP_QAT_HW_AES_128_KEY_SZ */ 0, 0, 0, 0, 0, 0, 0, - ICP_QAT_HW_CIPHER_ALGO_AES192, // ICP_QAT_HW_AES_192_KEY_SZ + ICP_QAT_HW_CIPHER_ALGO_AES192, /* ICP_QAT_HW_AES_192_KEY_SZ */ 0, 0, 0, 0, 0, 0, 0, - ICP_QAT_HW_CIPHER_ALGO_AES256 // ICP_QAT_HW_AES_256_KEY_SZ + ICP_QAT_HW_CIPHER_ALGO_AES256 /* ICP_QAT_HW_AES_256_KEY_SZ */ }; /* LAC_CIPHER_IS_AES_F8 */ static const uint8_t key_size_f8[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ICP_QAT_HW_CIPHER_ALGO_AES128, // ICP_QAT_HW_AES_128_F8_KEY_SZ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + ICP_QAT_HW_CIPHER_ALGO_AES128, /* ICP_QAT_HW_AES_128_F8_KEY_SZ */ 0, 0, 0, 0, 0, 0, - ICP_QAT_HW_CIPHER_ALGO_AES192, // ICP_QAT_HW_AES_192_F8_KEY_SZ 0, 0, 0, 0, 0, 0, 0, 0, 0, + ICP_QAT_HW_CIPHER_ALGO_AES192, /* ICP_QAT_HW_AES_192_F8_KEY_SZ */ 0, 0, - 0, - 0, - 0, - 0, - ICP_QAT_HW_CIPHER_ALGO_AES256 // ICP_QAT_HW_AES_256_F8_KEY_SZ -}; -/* LAC_CIPHER_IS_SM4 */ -static const uint8_t key_size_sm4[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - 0, - 0, - ICP_QAT_HW_CIPHER_ALGO_SM4 // ICP_QAT_HW_SM4_KEY_SZ + ICP_QAT_HW_CIPHER_ALGO_AES256 /* ICP_QAT_HW_AES_256_F8_KEY_SZ */ }; typedef struct _icp_qat_hw_cipher_info { icp_qat_hw_cipher_algo_t algorithm; icp_qat_hw_cipher_mode_t mode; icp_qat_hw_cipher_convert_t key_convert[2]; icp_qat_hw_cipher_dir_t dir[2]; icp_qat_hw_key_depend isKeyLenDepend; const uint8_t *pAlgByKeySize; } icp_qat_hw_cipher_info; -static const icp_qat_hw_cipher_info icp_qat_alg_info[] = - { - /* CPA_CY_SYM_CIPHER_NULL */ - { - ICP_QAT_HW_CIPHER_ALGO_NULL, - ICP_QAT_HW_CIPHER_ECB_MODE, - { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, - IS_KEY_DEP_NO, - NULL, - }, - /* CPA_CY_SYM_CIPHER_ARC4 */ - { - ICP_QAT_HW_CIPHER_ALGO_ARC4, - ICP_QAT_HW_CIPHER_ECB_MODE, - { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, - /* Streaming ciphers are a special case. Decrypt = encrypt */ - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, - IS_KEY_DEP_NO, - NULL, - }, - /* CPA_CY_SYM_CIPHER_AES_ECB */ - { - ICP_QAT_HW_CIPHER_ALGO_AES128, - ICP_QAT_HW_CIPHER_ECB_MODE, - /* AES decrypt key needs to be reversed. Instead of reversing the key - * at session registration, it is instead reversed on-the-fly by - * setting the KEY_CONVERT bit here - */ - { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, - IS_KEY_DEP_YES, - key_size_aes, - }, - /* CPA_CY_SYM_CIPHER_AES_CBC */ - { - ICP_QAT_HW_CIPHER_ALGO_AES128, - ICP_QAT_HW_CIPHER_CBC_MODE, - /* AES decrypt key needs to be reversed. Instead of reversing the key - * at session registration, it is instead reversed on-the-fly by - * setting the KEY_CONVERT bit here - */ - { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, - IS_KEY_DEP_YES, - key_size_aes, - }, - /* CPA_CY_SYM_CIPHER_AES_CTR */ - { - ICP_QAT_HW_CIPHER_ALGO_AES128, - ICP_QAT_HW_CIPHER_CTR_MODE, - /* AES decrypt key needs to be reversed. Instead of reversing the key - * at session registration, it is instead reversed on-the-fly by - * setting the KEY_CONVERT bit here - */ - { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, - /* Streaming ciphers are a special case. Decrypt = encrypt - * Overriding default values previously set for AES - */ - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, - IS_KEY_DEP_YES, - key_size_aes, - }, - /* CPA_CY_SYM_CIPHER_AES_CCM */ - { - ICP_QAT_HW_CIPHER_ALGO_AES128, - ICP_QAT_HW_CIPHER_CTR_MODE, - /* AES decrypt key needs to be reversed. Instead of reversing the key - * at session registration, it is instead reversed on-the-fly by - * setting the KEY_CONVERT bit here - */ - { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, - /* Streaming ciphers are a special case. Decrypt = encrypt - * Overriding default values previously set for AES - */ - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, - IS_KEY_DEP_YES, - key_size_aes, - }, - /* CPA_CY_SYM_CIPHER_AES_GCM */ - { - ICP_QAT_HW_CIPHER_ALGO_AES128, - ICP_QAT_HW_CIPHER_CTR_MODE, - /* AES decrypt key needs to be reversed. Instead of reversing the key - * at session registration, it is instead reversed on-the-fly by - * setting the KEY_CONVERT bit here - */ - { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, - /* Streaming ciphers are a special case. Decrypt = encrypt - * Overriding default values previously set for AES - */ - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, - IS_KEY_DEP_YES, - key_size_aes, - }, - /* CPA_CY_SYM_CIPHER_DES_ECB */ - { - ICP_QAT_HW_CIPHER_ALGO_DES, - ICP_QAT_HW_CIPHER_ECB_MODE, - { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, - IS_KEY_DEP_NO, - NULL, - }, - /* CPA_CY_SYM_CIPHER_DES_CBC */ - { - ICP_QAT_HW_CIPHER_ALGO_DES, - ICP_QAT_HW_CIPHER_CBC_MODE, - { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, - IS_KEY_DEP_NO, - NULL, - }, - /* CPA_CY_SYM_CIPHER_3DES_ECB */ - { - ICP_QAT_HW_CIPHER_ALGO_3DES, - ICP_QAT_HW_CIPHER_ECB_MODE, - { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, - IS_KEY_DEP_NO, - NULL, - }, - /* CPA_CY_SYM_CIPHER_3DES_CBC */ - { - ICP_QAT_HW_CIPHER_ALGO_3DES, - ICP_QAT_HW_CIPHER_CBC_MODE, - { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, - IS_KEY_DEP_NO, - NULL, - }, - /* CPA_CY_SYM_CIPHER_3DES_CTR */ - { - ICP_QAT_HW_CIPHER_ALGO_3DES, - ICP_QAT_HW_CIPHER_CTR_MODE, - { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, - /* Streaming ciphers are a special case. Decrypt = encrypt - * Overriding default values previously set for AES - */ - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, - IS_KEY_DEP_NO, - NULL, - }, - /* CPA_CY_SYM_CIPHER_KASUMI_F8 */ - { - ICP_QAT_HW_CIPHER_ALGO_KASUMI, - ICP_QAT_HW_CIPHER_F8_MODE, - { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, - /* Streaming ciphers are a special case. Decrypt = encrypt */ - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, - IS_KEY_DEP_NO, - NULL, - }, - /* CPA_CY_SYM_CIPHER_SNOW3G_UEA2 */ - { - /* The KEY_CONVERT bit has to be set for Snow_3G operation */ - ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2, - ICP_QAT_HW_CIPHER_ECB_MODE, - { ICP_QAT_HW_CIPHER_KEY_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, - IS_KEY_DEP_NO, - NULL, - }, - /* CPA_CY_SYM_CIPHER_AES_F8 */ - { - ICP_QAT_HW_CIPHER_ALGO_AES128, - ICP_QAT_HW_CIPHER_F8_MODE, - { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, - /* Streaming ciphers are a special case. Decrypt = encrypt */ - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, - IS_KEY_DEP_YES, - key_size_f8, - }, - /* CPA_CY_SYM_CIPHER_AES_XTS */ - { - ICP_QAT_HW_CIPHER_ALGO_AES128, - ICP_QAT_HW_CIPHER_XTS_MODE, - /* AES decrypt key needs to be reversed. Instead of reversing the key - * at session registration, it is instead reversed on-the-fly by - * setting the KEY_CONVERT bit here - */ - { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, - IS_KEY_DEP_YES, - key_size_xts, - }, - /* CPA_CY_SYM_CIPHER_ZUC_EEA3 */ - { - ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3, - ICP_QAT_HW_CIPHER_ECB_MODE, - { ICP_QAT_HW_CIPHER_KEY_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, - IS_KEY_DEP_NO, - NULL, - }, - /* CPA_CY_SYM_CIPHER_CHACHA */ - { - ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305, - ICP_QAT_HW_CIPHER_CTR_MODE, - { ICP_QAT_HW_CIPHER_KEY_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, - IS_KEY_DEP_NO, - NULL, - }, - /* CPA_CY_SYM_CIPHER_SM4_ECB */ - { - ICP_QAT_HW_CIPHER_ALGO_SM4, - ICP_QAT_HW_CIPHER_ECB_MODE, - { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, - IS_KEY_DEP_YES, - key_size_sm4, - }, - /* CPA_CY_SYM_CIPHER_SM4_CBC */ - { - ICP_QAT_HW_CIPHER_ALGO_SM4, - ICP_QAT_HW_CIPHER_CBC_MODE, - { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, - IS_KEY_DEP_YES, - key_size_sm4, - }, - /* CPA_CY_SYM_CIPHER_SM4_CTR */ - { - ICP_QAT_HW_CIPHER_ALGO_SM4, - ICP_QAT_HW_CIPHER_CTR_MODE, - { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, - /* Streaming ciphers are a special case. Decrypt = encrypt */ - { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, - IS_KEY_DEP_YES, - key_size_sm4, - }, - }; +static const icp_qat_hw_cipher_info icp_qat_alg_info[] = { + /* CPA_CY_SYM_CIPHER_NULL */ + { + ICP_QAT_HW_CIPHER_ALGO_NULL, + ICP_QAT_HW_CIPHER_ECB_MODE, + { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, + IS_KEY_DEP_NO, + NULL, + }, + /* CPA_CY_SYM_CIPHER_ARC4 */ + { + ICP_QAT_HW_CIPHER_ALGO_ARC4, + ICP_QAT_HW_CIPHER_ECB_MODE, + { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, + /* Streaming ciphers are a special case. Decrypt = encrypt */ + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, + IS_KEY_DEP_NO, + NULL, + }, + /* CPA_CY_SYM_CIPHER_AES_ECB */ + { + ICP_QAT_HW_CIPHER_ALGO_AES128, + ICP_QAT_HW_CIPHER_ECB_MODE, + /* AES decrypt key needs to be reversed. Instead of reversing the + * key at session registration, it is instead reversed on-the-fly by + * setting the KEY_CONVERT bit here + */ + { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, + IS_KEY_DEP_YES, + key_size_aes, + }, + /* CPA_CY_SYM_CIPHER_AES_CBC */ + { + ICP_QAT_HW_CIPHER_ALGO_AES128, + ICP_QAT_HW_CIPHER_CBC_MODE, + /* AES decrypt key needs to be reversed. Instead of reversing the + * key at session registration, it is instead reversed on-the-fly by + * setting the KEY_CONVERT bit here + */ + { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, + IS_KEY_DEP_YES, + key_size_aes, + }, + /* CPA_CY_SYM_CIPHER_AES_CTR */ + { + ICP_QAT_HW_CIPHER_ALGO_AES128, + ICP_QAT_HW_CIPHER_CTR_MODE, + /* AES decrypt key needs to be reversed. Instead of reversing the + * key at session registration, it is instead reversed on-the-fly by + * setting the KEY_CONVERT bit here + */ + { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, + /* Streaming ciphers are a special case. Decrypt = encrypt + * Overriding default values previously set for AES + */ + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, + IS_KEY_DEP_YES, + key_size_aes, + }, + /* CPA_CY_SYM_CIPHER_AES_CCM */ + { + ICP_QAT_HW_CIPHER_ALGO_AES128, + ICP_QAT_HW_CIPHER_CTR_MODE, + /* AES decrypt key needs to be reversed. Instead of reversing the + * key at session registration, it is instead reversed on-the-fly by + * setting the KEY_CONVERT bit here + */ + { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, + /* Streaming ciphers are a special case. Decrypt = encrypt + * Overriding default values previously set for AES + */ + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, + IS_KEY_DEP_YES, + key_size_aes, + }, + /* CPA_CY_SYM_CIPHER_AES_GCM */ + { + ICP_QAT_HW_CIPHER_ALGO_AES128, + ICP_QAT_HW_CIPHER_CTR_MODE, + /* AES decrypt key needs to be reversed. Instead of reversing the + * key at session registration, it is instead reversed on-the-fly by + * setting the KEY_CONVERT bit here + */ + { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, + /* Streaming ciphers are a special case. Decrypt = encrypt + * Overriding default values previously set for AES + */ + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, + IS_KEY_DEP_YES, + key_size_aes, + }, + /* CPA_CY_SYM_CIPHER_DES_ECB */ + { + ICP_QAT_HW_CIPHER_ALGO_DES, + ICP_QAT_HW_CIPHER_ECB_MODE, + { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, + IS_KEY_DEP_NO, + NULL, + }, + /* CPA_CY_SYM_CIPHER_DES_CBC */ + { + ICP_QAT_HW_CIPHER_ALGO_DES, + ICP_QAT_HW_CIPHER_CBC_MODE, + { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, + IS_KEY_DEP_NO, + NULL, + }, + /* CPA_CY_SYM_CIPHER_3DES_ECB */ + { + ICP_QAT_HW_CIPHER_ALGO_3DES, + ICP_QAT_HW_CIPHER_ECB_MODE, + { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, + IS_KEY_DEP_NO, + NULL, + }, + /* CPA_CY_SYM_CIPHER_3DES_CBC */ + { + ICP_QAT_HW_CIPHER_ALGO_3DES, + ICP_QAT_HW_CIPHER_CBC_MODE, + { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, + IS_KEY_DEP_NO, + NULL, + }, + /* CPA_CY_SYM_CIPHER_3DES_CTR */ + { + ICP_QAT_HW_CIPHER_ALGO_3DES, + ICP_QAT_HW_CIPHER_CTR_MODE, + { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, + /* Streaming ciphers are a special case. Decrypt = encrypt + * Overriding default values previously set for AES + */ + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, + IS_KEY_DEP_NO, + NULL, + }, + /* CPA_CY_SYM_CIPHER_KASUMI_F8 */ + { + ICP_QAT_HW_CIPHER_ALGO_KASUMI, + ICP_QAT_HW_CIPHER_F8_MODE, + { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, + /* Streaming ciphers are a special case. Decrypt = encrypt */ + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, + IS_KEY_DEP_NO, + NULL, + }, + /* CPA_CY_SYM_CIPHER_SNOW3G_UEA2 */ + { + /* The KEY_CONVERT bit has to be set for Snow_3G operation */ + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2, + ICP_QAT_HW_CIPHER_ECB_MODE, + { ICP_QAT_HW_CIPHER_KEY_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, + IS_KEY_DEP_NO, + NULL, + }, + /* CPA_CY_SYM_CIPHER_AES_F8 */ + { + ICP_QAT_HW_CIPHER_ALGO_AES128, + ICP_QAT_HW_CIPHER_F8_MODE, + { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, + /* Streaming ciphers are a special case. Decrypt = encrypt */ + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, + IS_KEY_DEP_YES, + key_size_f8, + }, + /* CPA_CY_SYM_CIPHER_AES_XTS */ + { + ICP_QAT_HW_CIPHER_ALGO_AES128, + ICP_QAT_HW_CIPHER_XTS_MODE, + /* AES decrypt key needs to be reversed. Instead of reversing the + * key at session registration, it is instead reversed on-the-fly by + * setting the KEY_CONVERT bit here + */ + { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, + IS_KEY_DEP_YES, + key_size_xts, + }, + /* CPA_CY_SYM_CIPHER_ZUC_EEA3 */ + { + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3, + ICP_QAT_HW_CIPHER_ECB_MODE, + { ICP_QAT_HW_CIPHER_KEY_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, + IS_KEY_DEP_NO, + NULL, + }, + /* CPA_CY_SYM_CIPHER_CHACHA */ + { + ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305, + ICP_QAT_HW_CIPHER_CTR_MODE, + { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, + IS_KEY_DEP_NO, + NULL, + }, + /* CPA_CY_SYM_CIPHER_SM4_ECB */ + { + ICP_QAT_HW_CIPHER_ALGO_SM4, + ICP_QAT_HW_CIPHER_ECB_MODE, + { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, + IS_KEY_DEP_NO, + NULL, + }, + /* CPA_CY_SYM_CIPHER_SM4_CBC */ + { + ICP_QAT_HW_CIPHER_ALGO_SM4, + ICP_QAT_HW_CIPHER_CBC_MODE, + { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_KEY_CONVERT }, + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_DECRYPT }, + IS_KEY_DEP_NO, + NULL, + }, + /* CPA_CY_SYM_CIPHER_SM4_CTR */ + { + ICP_QAT_HW_CIPHER_ALGO_SM4, + ICP_QAT_HW_CIPHER_CTR_MODE, + { ICP_QAT_HW_CIPHER_NO_CONVERT, ICP_QAT_HW_CIPHER_NO_CONVERT }, + { ICP_QAT_HW_CIPHER_ENCRYPT, ICP_QAT_HW_CIPHER_ENCRYPT }, + IS_KEY_DEP_NO, + NULL, + }, +}; /***************************************************************************** * Internal functions *****************************************************************************/ void LacSymQat_CipherCtrlBlockWrite(icp_qat_la_bulk_req_ftr_t *pMsg, Cpa32U cipherAlgorithm, Cpa32U targetKeyLenInBytes, + Cpa32U sliceType, icp_qat_fw_slice_t nextSlice, Cpa8U cipherCfgOffsetInQuadWord) { icp_qat_fw_cipher_cd_ctrl_hdr_t *cd_ctrl = (icp_qat_fw_cipher_cd_ctrl_hdr_t *)&(pMsg->cd_ctrl); /* state_padding_sz is nonzero for f8 mode only */ cd_ctrl->cipher_padding_sz = 0; + /* Special handling of AES 192 key for UCS slice. + UCS requires it to have 32 bytes - set is as targetKeyLen + in this case, and add padding. It makes no sense + to force applications to provide such key length for couple reasons: + 1. It won't be possible to distinguish between AES 192 and 256 based + on key lenght only + 2. Only some modes of AES will use UCS slice, then application will + have to know which ones */ + if (ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE == sliceType && + ICP_QAT_HW_AES_192_KEY_SZ == targetKeyLenInBytes) { + targetKeyLenInBytes = ICP_QAT_HW_UCS_AES_192_KEY_SZ; + } + + switch (cipherAlgorithm) { /* Base Key is not passed down to QAT in the case of ARC4 or NULL */ - if (LAC_CIPHER_IS_ARC4(cipherAlgorithm) || - LAC_CIPHER_IS_NULL(cipherAlgorithm)) { + case CPA_CY_SYM_CIPHER_ARC4: + case CPA_CY_SYM_CIPHER_NULL: cd_ctrl->cipher_key_sz = 0; - } else if (LAC_CIPHER_IS_KASUMI(cipherAlgorithm)) { + break; + case CPA_CY_SYM_CIPHER_KASUMI_F8: cd_ctrl->cipher_key_sz = LAC_BYTES_TO_QUADWORDS(ICP_QAT_HW_KASUMI_F8_KEY_SZ); cd_ctrl->cipher_padding_sz = ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR; - } else if (LAC_CIPHER_IS_SNOW3G_UEA2(cipherAlgorithm)) { - /* For Snow3G UEA2 content descriptor key size is - key size plus iv size */ + break; + /* For Snow3G UEA2 content descriptor key size is + key size plus iv size */ + case CPA_CY_SYM_CIPHER_SNOW3G_UEA2: cd_ctrl->cipher_key_sz = LAC_BYTES_TO_QUADWORDS(ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ); - } else if (LAC_CIPHER_IS_AES_F8(cipherAlgorithm)) { + break; + case CPA_CY_SYM_CIPHER_AES_F8: cd_ctrl->cipher_key_sz = LAC_BYTES_TO_QUADWORDS(targetKeyLenInBytes); cd_ctrl->cipher_padding_sz = - 2 * ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR; - } else if (LAC_CIPHER_IS_ZUC_EEA3(cipherAlgorithm)) { - /* For ZUC EEA3 content descriptor key size is - key size plus iv size */ + (2 * ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR); + break; + /* For ZUC EEA3 content descriptor key size is + key size plus iv size */ + case CPA_CY_SYM_CIPHER_ZUC_EEA3: cd_ctrl->cipher_key_sz = LAC_BYTES_TO_QUADWORDS(ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ); - } else { + break; + default: cd_ctrl->cipher_key_sz = LAC_BYTES_TO_QUADWORDS(targetKeyLenInBytes); } cd_ctrl->cipher_state_sz = LAC_BYTES_TO_QUADWORDS( LacSymQat_CipherIvSizeBytesGet(cipherAlgorithm)); cd_ctrl->cipher_cfg_offset = cipherCfgOffsetInQuadWord; ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, nextSlice); ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); } void LacSymQat_CipherGetCfgData(lac_session_desc_t *pSession, icp_qat_hw_cipher_algo_t *pAlgorithm, icp_qat_hw_cipher_mode_t *pMode, icp_qat_hw_cipher_dir_t *pDir, icp_qat_hw_cipher_convert_t *pKey_convert) { + sal_crypto_service_t *pService = + (sal_crypto_service_t *)pSession->pInstance; CpaCySymCipherAlgorithm cipherAlgorithm = 0; icp_qat_hw_cipher_dir_t cipherDirection = 0; /* Set defaults */ *pKey_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; *pAlgorithm = ICP_QAT_HW_CIPHER_ALGO_NULL; *pMode = ICP_QAT_HW_CIPHER_ECB_MODE; *pDir = ICP_QAT_HW_CIPHER_ENCRYPT; /* decrease since it's numbered from 1 instead of 0 */ cipherAlgorithm = pSession->cipherAlgorithm - 1; cipherDirection = pSession->cipherDirection == CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT ? - ICP_QAT_HW_CIPHER_ENCRYPT : - ICP_QAT_HW_CIPHER_DECRYPT; + ICP_QAT_HW_CIPHER_ENCRYPT : + ICP_QAT_HW_CIPHER_DECRYPT; *pAlgorithm = icp_qat_alg_info[cipherAlgorithm].algorithm; *pMode = icp_qat_alg_info[cipherAlgorithm].mode; *pDir = icp_qat_alg_info[cipherAlgorithm].dir[cipherDirection]; *pKey_convert = icp_qat_alg_info[cipherAlgorithm].key_convert[cipherDirection]; if (IS_KEY_DEP_NO != icp_qat_alg_info[cipherAlgorithm].isKeyLenDepend) { *pAlgorithm = icp_qat_alg_info[cipherAlgorithm] .pAlgByKeySize[pSession->cipherKeyLenInBytes]; } - /* Set the mode */ - if (LAC_CIPHER_IS_CTR_MODE(pSession->cipherAlgorithm)) { - *pMode = ICP_QAT_HW_CIPHER_CTR_MODE; - *pKey_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; - /* CCP and AES_GCM single pass, despite being limited to - * CTR/AEAD mode, - * support both Encrypt/Decrypt modes - this is because of the - * differences in the hash computation/verification paths in - * encrypt/decrypt modes respectively. - * By default CCP is set as CTR Mode.Set AEAD Mode for AES_GCM. - */ - if (pSession->isSinglePass) { - if (LAC_CIPHER_IS_GCM(pSession->cipherAlgorithm)) - *pMode = ICP_QAT_HW_CIPHER_AEAD_MODE; - if (cipherDirection == ICP_QAT_HW_CIPHER_DECRYPT) - *pDir = ICP_QAT_HW_CIPHER_DECRYPT; - } + + /* CCP and AES_GCM single pass, despite being limited to CTR/AEAD mode, + * support both Encrypt/Decrypt modes - this is because of the + * differences in the hash computation/verification paths in + * encrypt/decrypt modes respectively. + * By default CCP is set as CTR Mode.Set AEAD Mode for AES_GCM. + */ + if (SPC == pSession->singlePassState) { + if (LAC_CIPHER_IS_GCM(pSession->cipherAlgorithm)) + *pMode = ICP_QAT_HW_CIPHER_AEAD_MODE; + else if (isCyGen4x(pService) && + LAC_CIPHER_IS_CCM(pSession->cipherAlgorithm)) + *pMode = ICP_QAT_HW_CIPHER_CCM_MODE; + + if (cipherDirection == ICP_QAT_HW_CIPHER_DECRYPT) + *pDir = ICP_QAT_HW_CIPHER_DECRYPT; } } void LacSymQat_CipherHwBlockPopulateCfgData(lac_session_desc_t *pSession, const void *pCipherHwBlock, Cpa32U *pSizeInBytes) { icp_qat_hw_cipher_algo_t algorithm = ICP_QAT_HW_CIPHER_ALGO_NULL; icp_qat_hw_cipher_mode_t mode = ICP_QAT_HW_CIPHER_ECB_MODE; icp_qat_hw_cipher_dir_t dir = ICP_QAT_HW_CIPHER_ENCRYPT; icp_qat_hw_cipher_convert_t key_convert; icp_qat_hw_cipher_config_t *pCipherConfig = (icp_qat_hw_cipher_config_t *)pCipherHwBlock; + icp_qat_hw_ucs_cipher_config_t *pUCSCipherConfig = + (icp_qat_hw_ucs_cipher_config_t *)pCipherHwBlock; + + Cpa32U val, reserved; Cpa32U aed_hash_cmp_length = 0; *pSizeInBytes = 0; LacSymQat_CipherGetCfgData( pSession, &algorithm, &mode, &dir, &key_convert); /* Build the cipher config into the hardware setup block */ - if (pSession->isSinglePass) { + if (SPC == pSession->singlePassState) { aed_hash_cmp_length = pSession->hashResultSize; - pCipherConfig->reserved = ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER( + reserved = ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER( pSession->aadLenInBytes); } else { - pCipherConfig->reserved = 0; + reserved = 0; } - pCipherConfig->val = ICP_QAT_HW_CIPHER_CONFIG_BUILD( + val = ICP_QAT_HW_CIPHER_CONFIG_BUILD( mode, algorithm, key_convert, dir, aed_hash_cmp_length); - *pSizeInBytes = sizeof(icp_qat_hw_cipher_config_t); + /* UCS slice has 128-bit configuration register. + Leacy cipher slice has 64-bit config register */ + if (ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE == pSession->cipherSliceType) { + pUCSCipherConfig->val = val; + pUCSCipherConfig->reserved[0] = reserved; + pUCSCipherConfig->reserved[1] = 0; + pUCSCipherConfig->reserved[2] = 0; + *pSizeInBytes = sizeof(icp_qat_hw_ucs_cipher_config_t); + } else { + pCipherConfig->val = val; + pCipherConfig->reserved = reserved; + *pSizeInBytes = sizeof(icp_qat_hw_cipher_config_t); + } } void LacSymQat_CipherHwBlockPopulateKeySetup( + lac_session_desc_t *pSessionDesc, const CpaCySymCipherSetupData *pCipherSetupData, Cpa32U targetKeyLenInBytes, + Cpa32U sliceType, const void *pCipherHwBlock, Cpa32U *pSizeInBytes) { Cpa8U *pCipherKey = (Cpa8U *)pCipherHwBlock; Cpa32U actualKeyLenInBytes = pCipherSetupData->cipherKeyLenInBytes; *pSizeInBytes = 0; /* Key is copied into content descriptor for all cases except for * Arc4 and Null cipher */ if (!(LAC_CIPHER_IS_ARC4(pCipherSetupData->cipherAlgorithm) || LAC_CIPHER_IS_NULL(pCipherSetupData->cipherAlgorithm))) { + /* Special handling of AES 192 key for UCS slice. + UCS requires it to have 32 bytes - set is as targetKeyLen + in this case, and add padding. It makes no sense + to force applications to provide such key length for couple + reasons: + 1. It won't be possible to distinguish between AES 192 and + 256 based on key lenght only + 2. Only some modes of AES will use UCS slice, then + application will have to know which ones */ + if (ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE == sliceType && + ICP_QAT_HW_AES_192_KEY_SZ == targetKeyLenInBytes) { + targetKeyLenInBytes = ICP_QAT_HW_UCS_AES_192_KEY_SZ; + } + /* Set the Cipher key field in the cipher block */ memcpy(pCipherKey, pCipherSetupData->pCipherKey, actualKeyLenInBytes); /* Pad the key with 0's if required */ if (0 < (targetKeyLenInBytes - actualKeyLenInBytes)) { LAC_OS_BZERO(pCipherKey + actualKeyLenInBytes, targetKeyLenInBytes - actualKeyLenInBytes); } *pSizeInBytes += targetKeyLenInBytes; - /* For Kasumi in F8 mode Cipher Key is concatenated with - * Cipher Key XOR-ed with Key Modifier (CK||CK^KM) */ - if (LAC_CIPHER_IS_KASUMI(pCipherSetupData->cipherAlgorithm)) { + switch (pCipherSetupData->cipherAlgorithm) { + /* For Kasumi in F8 mode Cipher Key is concatenated with + * Cipher Key XOR-ed with Key Modifier (CK||CK^KM) */ + case CPA_CY_SYM_CIPHER_KASUMI_F8: { Cpa32U wordIndex = 0; Cpa32U *pu32CipherKey = (Cpa32U *)pCipherSetupData->pCipherKey; Cpa32U *pTempKey = (Cpa32U *)(pCipherKey + targetKeyLenInBytes); /* XOR Key with KASUMI F8 key modifier at 4 bytes level */ for (wordIndex = 0; wordIndex < LAC_BYTES_TO_LONGWORDS(targetKeyLenInBytes); wordIndex++) { pTempKey[wordIndex] = pu32CipherKey[wordIndex] ^ LAC_CIPHER_KASUMI_F8_KEY_MODIFIER_4_BYTES; } *pSizeInBytes += targetKeyLenInBytes; /* also add padding for F8 */ *pSizeInBytes += LAC_QUADWORDS_TO_BYTES( ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR); LAC_OS_BZERO((Cpa8U *)pTempKey + targetKeyLenInBytes, LAC_QUADWORDS_TO_BYTES( ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR)); - } - /* For AES in F8 mode Cipher Key is concatenated with - * Cipher Key XOR-ed with Key Mask (CK||CK^KM) */ - else if (LAC_CIPHER_IS_AES_F8( - pCipherSetupData->cipherAlgorithm)) { + } break; + /* For AES in F8 mode Cipher Key is concatenated with + * Cipher Key XOR-ed with Key Mask (CK||CK^KM) */ + case CPA_CY_SYM_CIPHER_AES_F8: { Cpa32U index = 0; Cpa8U *pTempKey = pCipherKey + (targetKeyLenInBytes / 2); *pSizeInBytes += targetKeyLenInBytes; /* XOR Key with key Mask */ for (index = 0; index < targetKeyLenInBytes; index++) { pTempKey[index] = pCipherKey[index] ^ pTempKey[index]; } pTempKey = (pCipherKey + targetKeyLenInBytes); /* also add padding for AES F8 */ *pSizeInBytes += 2 * targetKeyLenInBytes; LAC_OS_BZERO(pTempKey, 2 * targetKeyLenInBytes); - } else if (LAC_CIPHER_IS_SNOW3G_UEA2( - pCipherSetupData->cipherAlgorithm)) { + } break; + case CPA_CY_SYM_CIPHER_SNOW3G_UEA2: { /* For Snow3G zero area after the key for FW */ LAC_OS_BZERO(pCipherKey + targetKeyLenInBytes, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ); *pSizeInBytes += ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ; - } else if (LAC_CIPHER_IS_ZUC_EEA3( - pCipherSetupData->cipherAlgorithm)) { + } break; + case CPA_CY_SYM_CIPHER_ZUC_EEA3: { /* For ZUC zero area after the key for FW */ LAC_OS_BZERO(pCipherKey + targetKeyLenInBytes, ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ); *pSizeInBytes += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; + } break; + case CPA_CY_SYM_CIPHER_AES_XTS: { + /* For AES in XTS mode Cipher Key is concatenated with + * second Cipher Key which is used for tweak calculation + * (CK1||CK2). For decryption Cipher Key needs to be + * converted to reverse key.*/ + if (ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE == sliceType) { + Cpa32U key_len = + pCipherSetupData->cipherKeyLenInBytes / 2; + memcpy(pSessionDesc->cipherAesXtsKey1Forward, + pCipherSetupData->pCipherKey, + key_len); + + qatUtilsAESKeyExpansionForward( + pSessionDesc->cipherAesXtsKey1Forward, + key_len, + (uint32_t *) + pSessionDesc->cipherAesXtsKey1Reverse); + + memcpy(pSessionDesc->cipherAesXtsKey2, + pCipherSetupData->pCipherKey + key_len, + key_len); + + if (CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT == + pCipherSetupData->cipherDirection) { + memcpy(pCipherKey, + pSessionDesc + ->cipherAesXtsKey1Reverse, + key_len); + } else { + memcpy(pCipherKey, + pSessionDesc + ->cipherAesXtsKey1Forward, + key_len); + } + } + } break; + default: + break; } } } /***************************************************************************** * External functions *****************************************************************************/ Cpa8U LacSymQat_CipherBlockSizeBytesGet(CpaCySymCipherAlgorithm cipherAlgorithm) { - if (LAC_CIPHER_IS_ARC4(cipherAlgorithm)) { - return LAC_CIPHER_ARC4_BLOCK_LEN_BYTES; - } else if (LAC_CIPHER_IS_AES(cipherAlgorithm) || - LAC_CIPHER_IS_AES_F8(cipherAlgorithm)) { - return ICP_QAT_HW_AES_BLK_SZ; - } else if (LAC_CIPHER_IS_DES(cipherAlgorithm)) { - return ICP_QAT_HW_DES_BLK_SZ; - } else if (LAC_CIPHER_IS_TRIPLE_DES(cipherAlgorithm)) { - return ICP_QAT_HW_3DES_BLK_SZ; - } else if (LAC_CIPHER_IS_KASUMI(cipherAlgorithm)) { - return ICP_QAT_HW_KASUMI_BLK_SZ; - } else if (LAC_CIPHER_IS_SNOW3G_UEA2(cipherAlgorithm)) { - return ICP_QAT_HW_SNOW_3G_BLK_SZ; - } else if (LAC_CIPHER_IS_ZUC_EEA3(cipherAlgorithm)) { - return ICP_QAT_HW_ZUC_3G_BLK_SZ; - } else if (LAC_CIPHER_IS_NULL(cipherAlgorithm)) { - return LAC_CIPHER_NULL_BLOCK_LEN_BYTES; - } else if (LAC_CIPHER_IS_CHACHA(cipherAlgorithm)) { - return ICP_QAT_HW_CHACHAPOLY_BLK_SZ; - } else if (LAC_CIPHER_IS_SM4(cipherAlgorithm)) { - return ICP_QAT_HW_SM4_BLK_SZ; - } else { - QAT_UTILS_LOG("Algorithm not supported in Cipher\n"); - return 0; + Cpa8U blockSize = 0; + switch (cipherAlgorithm) { + case CPA_CY_SYM_CIPHER_ARC4: + blockSize = LAC_CIPHER_ARC4_BLOCK_LEN_BYTES; + break; + /* Handle AES or AES_F8 */ + case CPA_CY_SYM_CIPHER_AES_ECB: + case CPA_CY_SYM_CIPHER_AES_CBC: + case CPA_CY_SYM_CIPHER_AES_CTR: + case CPA_CY_SYM_CIPHER_AES_CCM: + case CPA_CY_SYM_CIPHER_AES_GCM: + case CPA_CY_SYM_CIPHER_AES_XTS: + case CPA_CY_SYM_CIPHER_AES_F8: + blockSize = ICP_QAT_HW_AES_BLK_SZ; + break; + /* Handle DES */ + case CPA_CY_SYM_CIPHER_DES_ECB: + case CPA_CY_SYM_CIPHER_DES_CBC: + blockSize = ICP_QAT_HW_DES_BLK_SZ; + break; + /* Handle TRIPLE DES */ + case CPA_CY_SYM_CIPHER_3DES_ECB: + case CPA_CY_SYM_CIPHER_3DES_CBC: + case CPA_CY_SYM_CIPHER_3DES_CTR: + blockSize = ICP_QAT_HW_3DES_BLK_SZ; + break; + case CPA_CY_SYM_CIPHER_KASUMI_F8: + blockSize = ICP_QAT_HW_KASUMI_BLK_SZ; + break; + case CPA_CY_SYM_CIPHER_SNOW3G_UEA2: + blockSize = ICP_QAT_HW_SNOW_3G_BLK_SZ; + break; + case CPA_CY_SYM_CIPHER_ZUC_EEA3: + blockSize = ICP_QAT_HW_ZUC_3G_BLK_SZ; + break; + case CPA_CY_SYM_CIPHER_NULL: + blockSize = LAC_CIPHER_NULL_BLOCK_LEN_BYTES; + break; + case CPA_CY_SYM_CIPHER_CHACHA: + blockSize = ICP_QAT_HW_CHACHAPOLY_BLK_SZ; + break; + case CPA_CY_SYM_CIPHER_SM4_ECB: + case CPA_CY_SYM_CIPHER_SM4_CBC: + case CPA_CY_SYM_CIPHER_SM4_CTR: + blockSize = ICP_QAT_HW_SM4_BLK_SZ; + break; + default: + QAT_UTILS_LOG("Algorithm not supported in Cipher"); } + return blockSize; } Cpa32U LacSymQat_CipherIvSizeBytesGet(CpaCySymCipherAlgorithm cipherAlgorithm) { - if (CPA_CY_SYM_CIPHER_ARC4 == cipherAlgorithm) { - return LAC_CIPHER_ARC4_STATE_LEN_BYTES; - } else if (LAC_CIPHER_IS_KASUMI(cipherAlgorithm)) { - return ICP_QAT_HW_KASUMI_BLK_SZ; - } else if (LAC_CIPHER_IS_SNOW3G_UEA2(cipherAlgorithm)) { - return ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ; - } else if (LAC_CIPHER_IS_ZUC_EEA3(cipherAlgorithm)) { - return ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; - } else if (LAC_CIPHER_IS_CHACHA(cipherAlgorithm)) { - return ICP_QAT_HW_CHACHAPOLY_IV_SZ; - } else if (LAC_CIPHER_IS_ECB_MODE(cipherAlgorithm)) { - return 0; - } else { - return (Cpa32U)LacSymQat_CipherBlockSizeBytesGet( - cipherAlgorithm); + Cpa32U ivSize = 0; + switch (cipherAlgorithm) { + case CPA_CY_SYM_CIPHER_ARC4: + ivSize = LAC_CIPHER_ARC4_STATE_LEN_BYTES; + break; + case CPA_CY_SYM_CIPHER_KASUMI_F8: + ivSize = ICP_QAT_HW_KASUMI_BLK_SZ; + break; + case CPA_CY_SYM_CIPHER_SNOW3G_UEA2: + ivSize = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ; + break; + case CPA_CY_SYM_CIPHER_ZUC_EEA3: + ivSize = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; + break; + case CPA_CY_SYM_CIPHER_CHACHA: + ivSize = ICP_QAT_HW_CHACHAPOLY_IV_SZ; + break; + case CPA_CY_SYM_CIPHER_AES_ECB: + case CPA_CY_SYM_CIPHER_DES_ECB: + case CPA_CY_SYM_CIPHER_3DES_ECB: + case CPA_CY_SYM_CIPHER_SM4_ECB: + case CPA_CY_SYM_CIPHER_NULL: + /* for all ECB Mode IV size is 0 */ + break; + default: + ivSize = LacSymQat_CipherBlockSizeBytesGet(cipherAlgorithm); } + return ivSize; } inline CpaStatus -LacSymQat_CipherRequestParamsPopulate(icp_qat_fw_la_bulk_req_t *pReq, +LacSymQat_CipherRequestParamsPopulate(lac_session_desc_t *pSessionDesc, + icp_qat_fw_la_bulk_req_t *pReq, Cpa32U cipherOffsetInBytes, Cpa32U cipherLenInBytes, Cpa64U ivBufferPhysAddr, Cpa8U *pIvBufferVirt) { icp_qat_fw_la_cipher_req_params_t *pCipherReqParams; icp_qat_fw_cipher_cd_ctrl_hdr_t *pCipherCdCtrlHdr; icp_qat_fw_serv_specif_flags *pCipherSpecificFlags; + Cpa32U usedBufSize = 0; + Cpa32U totalBufSize = 0; pCipherReqParams = (icp_qat_fw_la_cipher_req_params_t *)((Cpa8U *)&(pReq->serv_specif_rqpars) + ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET); pCipherCdCtrlHdr = (icp_qat_fw_cipher_cd_ctrl_hdr_t *)&(pReq->cd_ctrl); pCipherSpecificFlags = &(pReq->comn_hdr.serv_specif_flags); pCipherReqParams->cipher_offset = cipherOffsetInBytes; pCipherReqParams->cipher_length = cipherLenInBytes; /* Don't copy the buffer into the Msg if * it's too big for the cipher_IV_array * OR if the FW needs to update it * OR if there's no buffer supplied * OR if last partial */ if ((pCipherCdCtrlHdr->cipher_state_sz > LAC_SYM_QAT_HASH_IV_REQ_MAX_SIZE_QW) || (ICP_QAT_FW_LA_UPDATE_STATE_GET(*pCipherSpecificFlags) == ICP_QAT_FW_LA_UPDATE_STATE) || (pIvBufferVirt == NULL) || (ICP_QAT_FW_LA_PARTIAL_GET(*pCipherSpecificFlags) == ICP_QAT_FW_LA_PARTIAL_END)) { /* Populate the field with a ptr to the flat buffer */ pCipherReqParams->u.s.cipher_IV_ptr = ivBufferPhysAddr; pCipherReqParams->u.s.resrvd1 = 0; /* Set the flag indicating the field format */ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( *pCipherSpecificFlags, ICP_QAT_FW_CIPH_IV_64BIT_PTR); } else { /* Populate the field with the contents of the buffer, * zero field first as data may be smaller than the field */ - memset(pCipherReqParams->u.cipher_IV_array, - 0, - LAC_LONGWORDS_TO_BYTES(ICP_QAT_FW_NUM_LONGWORDS_4)); - - /* We force a specific compiler optimisation here. The length - * to - * be copied turns out to be always 16, and by coding a memcpy - * with - * a literal value the compiler will compile inline code (in - * fact, - * only two vector instructions) to effect the copy. This gives - * us - * a huge performance increase. - */ - unsigned long cplen = - LAC_QUADWORDS_TO_BYTES(pCipherCdCtrlHdr->cipher_state_sz); - - if (cplen == 16) - memcpy(pCipherReqParams->u.cipher_IV_array, - pIvBufferVirt, - 16); - else + + /* In case of XTS mode using UCS slice always embedd IV. + * IV provided by user needs to be encrypted to calculate + * initial tweak, use pCipherReqParams->u.cipher_IV_array as + * destination buffer for tweak value */ + if (ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE == + pSessionDesc->cipherSliceType && + LAC_CIPHER_IS_XTS_MODE(pSessionDesc->cipherAlgorithm)) { + memset(pCipherReqParams->u.cipher_IV_array, + 0, + LAC_LONGWORDS_TO_BYTES( + ICP_QAT_FW_NUM_LONGWORDS_4)); + qatUtilsAESEncrypt( + pSessionDesc->cipherAesXtsKey2, + pSessionDesc->cipherKeyLenInBytes / 2, + pIvBufferVirt, + (Cpa8U *)pCipherReqParams->u.cipher_IV_array); + } else { + totalBufSize = + LAC_LONGWORDS_TO_BYTES(ICP_QAT_FW_NUM_LONGWORDS_4); + usedBufSize = LAC_QUADWORDS_TO_BYTES( + pCipherCdCtrlHdr->cipher_state_sz); + /* Only initialise unused buffer if applicable*/ + if (usedBufSize < totalBufSize) { + memset( + (&pCipherReqParams->u.cipher_IV_array + [usedBufSize & LAC_UNUSED_POS_MASK]), + 0, + totalBufSize - usedBufSize); + } memcpy(pCipherReqParams->u.cipher_IV_array, pIvBufferVirt, - cplen); + usedBufSize); + } /* Set the flag indicating the field format */ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( *pCipherSpecificFlags, ICP_QAT_FW_CIPH_IV_16BYTE_DATA); } return CPA_STATUS_SUCCESS; } void LacSymQat_CipherArc4StateInit(const Cpa8U *pKey, Cpa32U keyLenInBytes, Cpa8U *pArc4CipherState) { Cpa32U i = 0; Cpa32U j = 0; Cpa32U k = 0; for (i = 0; i < LAC_CIPHER_ARC4_KEY_MATRIX_LEN_BYTES; ++i) { pArc4CipherState[i] = (Cpa8U)i; } - for (i = 0; i < LAC_CIPHER_ARC4_KEY_MATRIX_LEN_BYTES; ++i) { + for (i = 0, k = 0; i < LAC_CIPHER_ARC4_KEY_MATRIX_LEN_BYTES; ++i, ++k) { Cpa8U swap = 0; if (k >= keyLenInBytes) k -= keyLenInBytes; j = (j + pArc4CipherState[i] + pKey[k]); if (j >= LAC_CIPHER_ARC4_KEY_MATRIX_LEN_BYTES) j %= LAC_CIPHER_ARC4_KEY_MATRIX_LEN_BYTES; - ++k; /* Swap state[i] & state[j] */ swap = pArc4CipherState[i]; pArc4CipherState[i] = pArc4CipherState[j]; pArc4CipherState[j] = swap; } /* Initialise i & j values for QAT */ pArc4CipherState[LAC_CIPHER_ARC4_KEY_MATRIX_LEN_BYTES] = 0; pArc4CipherState[LAC_CIPHER_ARC4_KEY_MATRIX_LEN_BYTES + 1] = 0; } /* Update the cipher_key_sz in the Request cache prepared and stored * in the session */ void LacSymQat_CipherXTSModeUpdateKeyLen(lac_session_desc_t *pSessionDesc, Cpa32U newKeySizeInBytes) { icp_qat_fw_cipher_cd_ctrl_hdr_t *pCipherControlBlock = NULL; pCipherControlBlock = (icp_qat_fw_cipher_cd_ctrl_hdr_t *)&( pSessionDesc->reqCacheFtr.cd_ctrl); pCipherControlBlock->cipher_key_sz = LAC_BYTES_TO_QUADWORDS(newKeySizeInBytes); } diff --git a/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_constants_table.c b/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_constants_table.c new file mode 100644 index 000000000000..00b54ac8842b --- /dev/null +++ b/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_constants_table.c @@ -0,0 +1,257 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright(c) 2007-2022 Intel Corporation */ +/* $FreeBSD$ */ + +/** + *************************************************************************** + * @file lac_sym_qat_constants_table.c + * + * @ingroup LacSymQat + ***************************************************************************/ + +/* +******************************************************************************* +* Include public/global header files +******************************************************************************* +*/ + +#include "cpa.h" + +/* +******************************************************************************* +* Include private header files +******************************************************************************* +*/ + +#include "lac_common.h" +#include "icp_qat_fw_la.h" +#include "lac_log.h" +#include "lac_mem.h" +#include "sal_string_parse.h" +#include "lac_sal_types_crypto.h" +#include "sal_types_compression.h" + +static uint8_t icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_DELIMITER] + [ICP_QAT_HW_CIPHER_MODE_DELIMITER][2] + [2]; /* IA version */ +static uint8_t icp_qat_hw_auth_lookup_tbl[ICP_QAT_HW_AUTH_ALGO_DELIMITER] + [ICP_QAT_HW_AUTH_MODE_DELIMITER] + [2]; /* IA version */ + +#define ICP_QAT_HW_FILL_LOOKUP_TBLS \ + { \ + \ + icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_DES] \ + [ICP_QAT_HW_CIPHER_ECB_MODE] \ + [ICP_QAT_HW_CIPHER_ENCRYPT] \ + [ICP_QAT_HW_CIPHER_NO_CONVERT] = \ + 9; \ + icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_DES] \ + [ICP_QAT_HW_CIPHER_ECB_MODE] \ + [ICP_QAT_HW_CIPHER_DECRYPT] \ + [ICP_QAT_HW_CIPHER_NO_CONVERT] = \ + 10; \ + icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_DES] \ + [ICP_QAT_HW_CIPHER_CBC_MODE] \ + [ICP_QAT_HW_CIPHER_ENCRYPT] \ + [ICP_QAT_HW_CIPHER_NO_CONVERT] = \ + 11; \ + icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_DES] \ + [ICP_QAT_HW_CIPHER_CBC_MODE] \ + [ICP_QAT_HW_CIPHER_DECRYPT] \ + [ICP_QAT_HW_CIPHER_NO_CONVERT] = \ + 12; \ + icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_DES] \ + [ICP_QAT_HW_CIPHER_CTR_MODE] \ + [ICP_QAT_HW_CIPHER_ENCRYPT] \ + [ICP_QAT_HW_CIPHER_NO_CONVERT] = \ + 13; \ + icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_AES128] \ + [ICP_QAT_HW_CIPHER_ECB_MODE] \ + [ICP_QAT_HW_CIPHER_ENCRYPT] \ + [ICP_QAT_HW_CIPHER_NO_CONVERT] = \ + 14; \ + icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_AES128] \ + [ICP_QAT_HW_CIPHER_ECB_MODE] \ + [ICP_QAT_HW_CIPHER_ENCRYPT] \ + [ICP_QAT_HW_CIPHER_KEY_CONVERT] = \ + 15; \ + icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_AES128] \ + [ICP_QAT_HW_CIPHER_ECB_MODE] \ + [ICP_QAT_HW_CIPHER_DECRYPT] \ + [ICP_QAT_HW_CIPHER_NO_CONVERT] = \ + 16; \ + icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_AES128] \ + [ICP_QAT_HW_CIPHER_ECB_MODE] \ + [ICP_QAT_HW_CIPHER_DECRYPT] \ + [ICP_QAT_HW_CIPHER_KEY_CONVERT] = \ + 17; \ + icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_AES128] \ + [ICP_QAT_HW_CIPHER_CBC_MODE] \ + [ICP_QAT_HW_CIPHER_ENCRYPT] \ + [ICP_QAT_HW_CIPHER_NO_CONVERT] = \ + 18; \ + icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_AES128] \ + [ICP_QAT_HW_CIPHER_CBC_MODE] \ + [ICP_QAT_HW_CIPHER_ENCRYPT] \ + [ICP_QAT_HW_CIPHER_KEY_CONVERT] = \ + 19; \ + icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_AES128] \ + [ICP_QAT_HW_CIPHER_CBC_MODE] \ + [ICP_QAT_HW_CIPHER_DECRYPT] \ + [ICP_QAT_HW_CIPHER_NO_CONVERT] = \ + 20; \ + icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_AES128] \ + [ICP_QAT_HW_CIPHER_CBC_MODE] \ + [ICP_QAT_HW_CIPHER_DECRYPT] \ + [ICP_QAT_HW_CIPHER_KEY_CONVERT] = \ + 21; \ + icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_AES128] \ + [ICP_QAT_HW_CIPHER_CTR_MODE] \ + [ICP_QAT_HW_CIPHER_ENCRYPT] \ + [ICP_QAT_HW_CIPHER_NO_CONVERT] = \ + 22; \ + icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_AES128] \ + [ICP_QAT_HW_CIPHER_F8_MODE] \ + [ICP_QAT_HW_CIPHER_ENCRYPT] \ + [ICP_QAT_HW_CIPHER_NO_CONVERT] = \ + 23; \ + icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_ARC4] \ + [ICP_QAT_HW_CIPHER_ECB_MODE] \ + [ICP_QAT_HW_CIPHER_ENCRYPT] \ + [ICP_QAT_HW_CIPHER_NO_CONVERT] = \ + 24; \ + icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_ARC4] \ + [ICP_QAT_HW_CIPHER_ECB_MODE] \ + [ICP_QAT_HW_CIPHER_ENCRYPT] \ + [ICP_QAT_HW_CIPHER_KEY_CONVERT] = \ + 25; \ + \ + icp_qat_hw_auth_lookup_tbl \ + [ICP_QAT_HW_AUTH_ALGO_MD5][ICP_QAT_HW_AUTH_MODE0] \ + [ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED] = 37; \ + icp_qat_hw_auth_lookup_tbl \ + [ICP_QAT_HW_AUTH_ALGO_SHA1][ICP_QAT_HW_AUTH_MODE0] \ + [ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED] = 41; \ + icp_qat_hw_auth_lookup_tbl \ + [ICP_QAT_HW_AUTH_ALGO_SHA1][ICP_QAT_HW_AUTH_MODE1] \ + [ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED] = 46; \ + icp_qat_hw_auth_lookup_tbl \ + [ICP_QAT_HW_AUTH_ALGO_SHA224][ICP_QAT_HW_AUTH_MODE0] \ + [ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED] = 48; \ + icp_qat_hw_auth_lookup_tbl \ + [ICP_QAT_HW_AUTH_ALGO_SHA256][ICP_QAT_HW_AUTH_MODE0] \ + [ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED] = 54; \ + icp_qat_hw_auth_lookup_tbl \ + [ICP_QAT_HW_AUTH_ALGO_SHA384][ICP_QAT_HW_AUTH_MODE0] \ + [ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED] = 60; \ + icp_qat_hw_auth_lookup_tbl \ + [ICP_QAT_HW_AUTH_ALGO_SHA512][ICP_QAT_HW_AUTH_MODE0] \ + [ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED] = 70; \ + } + +/** + ***************************************************************************** + * @ingroup LacSymQat + * LacSymQat_ConstantsInitLookupTables + * + * + *****************************************************************************/ +void +LacSymQat_ConstantsInitLookupTables(CpaInstanceHandle instanceHandle) +{ + sal_service_t *pService = (sal_service_t *)instanceHandle; + lac_sym_qat_constants_t *pConstantsLookupTables; + + /* Note the global tables are initialised first, then copied + * to the service which probably seems like a waste of memory + * and processing cycles as the global tables are never needed again + * but this allows use of the ICP_QAT_HW_FILL_LOOKUP_TBLS macro + * supplied by FW without modification */ + + if (SAL_SERVICE_TYPE_COMPRESSION == pService->type) { + /* DC chaining not supported yet */ + return; + } else { + pConstantsLookupTables = &( + ((sal_crypto_service_t *)pService)->constantsLookupTables); + } + + /* First fill the global lookup tables with zeroes. */ + memset(icp_qat_hw_cipher_lookup_tbl, + 0, + sizeof(icp_qat_hw_cipher_lookup_tbl)); + memset(icp_qat_hw_auth_lookup_tbl, + 0, + sizeof(icp_qat_hw_auth_lookup_tbl)); + + /* Override lookup tables with the offsets into the SHRAM table + * for supported algorithms/modes */ + ICP_QAT_HW_FILL_LOOKUP_TBLS; + + /* Copy the global tables to the service instance */ + memcpy(pConstantsLookupTables->cipher_offset, + icp_qat_hw_cipher_lookup_tbl, + sizeof(pConstantsLookupTables->cipher_offset)); + memcpy(pConstantsLookupTables->auth_offset, + icp_qat_hw_auth_lookup_tbl, + sizeof(pConstantsLookupTables->auth_offset)); +} + +/** + ***************************************************************************** + * @ingroup LacSymQat + * LacSymQat_ConstantsGetCipherOffset + * + * + *****************************************************************************/ +void +LacSymQat_ConstantsGetCipherOffset(CpaInstanceHandle instanceHandle, + uint8_t algo, + uint8_t mode, + uint8_t direction, + uint8_t convert, + uint8_t *poffset) +{ + sal_service_t *pService = (sal_service_t *)instanceHandle; + lac_sym_qat_constants_t *pConstantsLookupTables; + + if (SAL_SERVICE_TYPE_COMPRESSION == pService->type) { + /* DC chaining not supported yet */ + return; + } else { + pConstantsLookupTables = &( + ((sal_crypto_service_t *)pService)->constantsLookupTables); + } + + *poffset = pConstantsLookupTables + ->cipher_offset[algo][mode][direction][convert]; +} + +/** + ***************************************************************************** + * @ingroup LacSymQat + * LacSymQat_ConstantsGetAuthOffset + * + * + *****************************************************************************/ +void +LacSymQat_ConstantsGetAuthOffset(CpaInstanceHandle instanceHandle, + uint8_t algo, + uint8_t mode, + uint8_t nested, + uint8_t *poffset) +{ + sal_service_t *pService = (sal_service_t *)instanceHandle; + lac_sym_qat_constants_t *pConstantsLookupTables; + + if (SAL_SERVICE_TYPE_COMPRESSION == pService->type) { + /* DC chaining not supported yet */ + return; + } else { + pConstantsLookupTables = &( + ((sal_crypto_service_t *)pService)->constantsLookupTables); + } + + *poffset = pConstantsLookupTables->auth_offset[algo][mode][nested]; +} diff --git a/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_hash.c b/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_hash.c index a39734ad16d0..b62ecf271d80 100644 --- a/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_hash.c +++ b/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_hash.c @@ -1,942 +1,988 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** *************************************************************************** * @file lac_sym_qat_hash.c * * @ingroup LacSymQatHash * * Implementation for populating QAT data structures for hash operation ***************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" #include "cpa_cy_sym.h" #include "icp_accel_devices.h" #include "icp_adf_debug.h" #include "lac_log.h" #include "lac_mem.h" #include "lac_sym.h" #include "lac_common.h" #include "lac_sym_qat.h" #include "lac_list.h" #include "lac_sal_types.h" +#include "lac_sal_types_crypto.h" #include "lac_sym_qat_hash.h" #include "lac_sym_qat_hash_defs_lookup.h" +#include "sal_hw_gen.h" /** * This structure contains pointers into the hash setup block of the * security descriptor. As the hash setup block contains fields that * are of variable length, pointers must be calculated to these fields * and the hash setup block is populated using these pointers. */ typedef struct lac_hash_blk_ptrs_s { icp_qat_hw_auth_setup_t *pInHashSetup; /**< inner hash setup */ Cpa8U *pInHashInitState1; /**< inner initial state 1 */ Cpa8U *pInHashInitState2; /**< inner initial state 2 */ icp_qat_hw_auth_setup_t *pOutHashSetup; /**< outer hash setup */ Cpa8U *pOutHashInitState1; /**< outer hash initial state */ } lac_hash_blk_ptrs_t; typedef struct lac_hash_blk_ptrs_optimised_s { Cpa8U *pInHashInitState1; /**< inner initial state 1 */ Cpa8U *pInHashInitState2; /**< inner initial state 2 */ } lac_hash_blk_ptrs_optimised_t; /** * This function calculates the pointers into the hash setup block * based on the control block * * @param[in] pHashControlBlock Pointer to hash control block * @param[in] pHwBlockBase pointer to base of hardware block * @param[out] pHashBlkPtrs structure containing pointers to * various fields in the hash setup block * * @return void */ static void LacSymQat_HashHwBlockPtrsInit(icp_qat_fw_auth_cd_ctrl_hdr_t *pHashControlBlock, void *pHwBlockBase, lac_hash_blk_ptrs_t *pHashBlkPtrs); static void LacSymQat_HashSetupBlockOptimisedFormatInit( const CpaCySymHashSetupData *pHashSetupData, icp_qat_fw_auth_cd_ctrl_hdr_t *pHashControlBlock, void *pHwBlockBase, icp_qat_hw_auth_mode_t qatHashMode, lac_sym_qat_hash_precompute_info_t *pPrecompute, lac_sym_qat_hash_defs_t *pHashDefs, lac_sym_qat_hash_defs_t *pOuterHashDefs); /** * This function populates the hash setup block * * @param[in] pHashSetupData Pointer to the hash context * @param[in] pHashControlBlock Pointer to hash control block * @param[in] pHwBlockBase pointer to base of hardware block * @param[in] qatHashMode QAT hash mode * @param[in] pPrecompute For auth mode, this is the pointer * to the precompute data. Otherwise this * should be set to NULL * @param[in] pHashDefs Pointer to Hash definitions * @param[in] pOuterHashDefs Pointer to Outer Hash definitions. * Required for nested hash mode only * * @return void */ static void LacSymQat_HashSetupBlockInit(const CpaCySymHashSetupData *pHashSetupData, icp_qat_fw_auth_cd_ctrl_hdr_t *pHashControlBlock, void *pHwBlockBase, icp_qat_hw_auth_mode_t qatHashMode, lac_sym_qat_hash_precompute_info_t *pPrecompute, lac_sym_qat_hash_defs_t *pHashDefs, lac_sym_qat_hash_defs_t *pOuterHashDefs); /** @ingroup LacSymQatHash */ void LacSymQat_HashGetCfgData(CpaInstanceHandle pInstance, icp_qat_hw_auth_mode_t qatHashMode, CpaCySymHashMode apiHashMode, CpaCySymHashAlgorithm apiHashAlgorithm, icp_qat_hw_auth_algo_t *pQatAlgorithm, CpaBoolean *pQatNested) { lac_sym_qat_hash_defs_t *pHashDefs = NULL; LacSymQat_HashDefsLookupGet(pInstance, apiHashAlgorithm, &pHashDefs); *pQatAlgorithm = pHashDefs->qatInfo->algoEnc; if (IS_HASH_MODE_2(qatHashMode)) { /* set bit for nested hashing */ *pQatNested = ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED; } /* Nested hash in mode 0. */ else if (CPA_CY_SYM_HASH_MODE_NESTED == apiHashMode) { /* set bit for nested hashing */ *pQatNested = ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED; } /* mode0 - plain or mode1 - auth */ else { *pQatNested = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; } } /** @ingroup LacSymQatHash */ void LacSymQat_HashContentDescInit(icp_qat_la_bulk_req_ftr_t *pMsg, CpaInstanceHandle instanceHandle, const CpaCySymHashSetupData *pHashSetupData, void *pHwBlockBase, Cpa32U hwBlockOffsetInQuadWords, icp_qat_fw_slice_t nextSlice, icp_qat_hw_auth_mode_t qatHashMode, CpaBoolean useSymConstantsTable, CpaBoolean useOptimisedContentDesc, + CpaBoolean useStatefulSha3ContentDesc, lac_sym_qat_hash_precompute_info_t *pPrecompute, Cpa32U *pHashBlkSizeInBytes) { - icp_qat_fw_auth_cd_ctrl_hdr_t *cd_ctrl = (icp_qat_fw_auth_cd_ctrl_hdr_t *)&(pMsg->cd_ctrl); lac_sym_qat_hash_defs_t *pHashDefs = NULL; lac_sym_qat_hash_defs_t *pOuterHashDefs = NULL; Cpa32U hashSetupBlkSize = 0; /* setup the offset in QuadWords into the hw blk */ - cd_ctrl->hash_cfg_offset = hwBlockOffsetInQuadWords; + cd_ctrl->hash_cfg_offset = (Cpa8U)hwBlockOffsetInQuadWords; ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, nextSlice); ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_AUTH); LacSymQat_HashDefsLookupGet(instanceHandle, pHashSetupData->hashAlgorithm, &pHashDefs); /* Hmac in mode 2 TLS */ if (IS_HASH_MODE_2(qatHashMode)) { - /* Set bit for nested hashing. - * Make sure not to overwrite other flags in hash_flags byte. - */ - ICP_QAT_FW_HASH_FLAG_AUTH_HDR_NESTED_SET( - cd_ctrl->hash_flags, ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED); + if (isCyGen4x((sal_crypto_service_t *)instanceHandle)) { + /* CPM2.0 has a dedicated bit for HMAC mode2 */ + ICP_QAT_FW_HASH_FLAG_MODE2_SET(cd_ctrl->hash_flags, + QAT_FW_LA_MODE2); + } else { + /* Set bit for nested hashing. + * Make sure not to overwrite other flags in hash_flags + * byte. + */ + ICP_QAT_FW_HASH_FLAG_AUTH_HDR_NESTED_SET( + cd_ctrl->hash_flags, + ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED); + } } /* Nested hash in mode 0 */ else if (CPA_CY_SYM_HASH_MODE_NESTED == pHashSetupData->hashMode) { /* Set bit for nested hashing. * Make sure not to overwrite other flags in hash_flags byte. */ ICP_QAT_FW_HASH_FLAG_AUTH_HDR_NESTED_SET( cd_ctrl->hash_flags, ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED); } /* mode0 - plain or mode1 - auth */ else { ICP_QAT_FW_HASH_FLAG_AUTH_HDR_NESTED_SET( cd_ctrl->hash_flags, ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED); } + /* Set skip state load flags */ + if (useStatefulSha3ContentDesc) { + /* Here both skip state load flags are set. FW reads them based + * on partial packet type. */ + ICP_QAT_FW_HASH_FLAG_SKIP_INNER_STATE1_LOAD_SET( + cd_ctrl->hash_flags, QAT_FW_LA_SKIP_INNER_STATE1_LOAD); + ICP_QAT_FW_HASH_FLAG_SKIP_OUTER_STATE1_LOAD_SET( + cd_ctrl->hash_flags, QAT_FW_LA_SKIP_OUTER_STATE1_LOAD); + } + /* set the final digest size */ - cd_ctrl->final_sz = pHashSetupData->digestResultLenInBytes; + cd_ctrl->final_sz = (Cpa8U)pHashSetupData->digestResultLenInBytes; /* set the state1 size */ - cd_ctrl->inner_state1_sz = - LAC_ALIGN_POW2_ROUNDUP(pHashDefs->qatInfo->state1Length, - LAC_QUAD_WORD_IN_BYTES); + if (useStatefulSha3ContentDesc) { + cd_ctrl->inner_state1_sz = + LAC_ALIGN_POW2_ROUNDUP(LAC_HASH_SHA3_STATEFUL_STATE_SIZE, + LAC_QUAD_WORD_IN_BYTES); + } else { + cd_ctrl->inner_state1_sz = + LAC_ALIGN_POW2_ROUNDUP(pHashDefs->qatInfo->state1Length, + LAC_QUAD_WORD_IN_BYTES); + } /* set the inner result size to the digest length */ - cd_ctrl->inner_res_sz = pHashDefs->algInfo->digestLength; + cd_ctrl->inner_res_sz = (Cpa8U)pHashDefs->algInfo->digestLength; /* set the state2 size - only for mode 1 Auth algos and AES CBC MAC */ if (IS_HASH_MODE_1(qatHashMode) || pHashSetupData->hashAlgorithm == CPA_CY_SYM_HASH_AES_CBC_MAC || pHashSetupData->hashAlgorithm == CPA_CY_SYM_HASH_ZUC_EIA3) { cd_ctrl->inner_state2_sz = LAC_ALIGN_POW2_ROUNDUP(pHashDefs->qatInfo->state2Length, LAC_QUAD_WORD_IN_BYTES); } else { cd_ctrl->inner_state2_sz = 0; } - cd_ctrl->inner_state2_offset = cd_ctrl->hash_cfg_offset + - LAC_BYTES_TO_QUADWORDS(sizeof(icp_qat_hw_auth_setup_t) + - cd_ctrl->inner_state1_sz); + if (useSymConstantsTable) { + cd_ctrl->inner_state2_offset = + LAC_BYTES_TO_QUADWORDS(cd_ctrl->inner_state1_sz); + + /* size of inner part of hash setup block */ + hashSetupBlkSize = + cd_ctrl->inner_state1_sz + cd_ctrl->inner_state2_sz; + } else { + cd_ctrl->inner_state2_offset = cd_ctrl->hash_cfg_offset + + LAC_BYTES_TO_QUADWORDS(sizeof(icp_qat_hw_auth_setup_t) + + cd_ctrl->inner_state1_sz); - /* size of inner part of hash setup block */ - hashSetupBlkSize = sizeof(icp_qat_hw_auth_setup_t) + - cd_ctrl->inner_state1_sz + cd_ctrl->inner_state2_sz; + /* size of inner part of hash setup block */ + hashSetupBlkSize = sizeof(icp_qat_hw_auth_setup_t) + + cd_ctrl->inner_state1_sz + cd_ctrl->inner_state2_sz; + } /* For nested hashing - Fill in the outer fields */ if (CPA_CY_SYM_HASH_MODE_NESTED == pHashSetupData->hashMode || IS_HASH_MODE_2(qatHashMode)) { /* For nested - use the outer algorithm. This covers TLS and * nested hash. For HMAC mode2 use inner algorithm again */ CpaCySymHashAlgorithm outerAlg = (CPA_CY_SYM_HASH_MODE_NESTED == pHashSetupData->hashMode) ? pHashSetupData->nestedModeSetupData.outerHashAlgorithm : pHashSetupData->hashAlgorithm; LacSymQat_HashDefsLookupGet(instanceHandle, outerAlg, &pOuterHashDefs); /* outer config offset */ cd_ctrl->outer_config_offset = cd_ctrl->inner_state2_offset + LAC_BYTES_TO_QUADWORDS(cd_ctrl->inner_state2_sz); - cd_ctrl->outer_state1_sz = - LAC_ALIGN_POW2_ROUNDUP(pOuterHashDefs->algInfo->stateSize, - LAC_QUAD_WORD_IN_BYTES); + if (useStatefulSha3ContentDesc) { + cd_ctrl->outer_state1_sz = LAC_ALIGN_POW2_ROUNDUP( + LAC_HASH_SHA3_STATEFUL_STATE_SIZE, + LAC_QUAD_WORD_IN_BYTES); + } else { + cd_ctrl->outer_state1_sz = LAC_ALIGN_POW2_ROUNDUP( + pOuterHashDefs->algInfo->stateSize, + LAC_QUAD_WORD_IN_BYTES); + } /* outer result size */ - cd_ctrl->outer_res_sz = pOuterHashDefs->algInfo->digestLength; + cd_ctrl->outer_res_sz = + (Cpa8U)pOuterHashDefs->algInfo->digestLength; /* outer_prefix_offset will be the size of the inner prefix data * plus the hash state storage size. */ /* The prefix buffer is part of the ReqParams, so this param - * will be - * setup where ReqParams are set up */ + * will be setup where ReqParams are set up */ /* add on size of outer part of hash block */ hashSetupBlkSize += sizeof(icp_qat_hw_auth_setup_t) + cd_ctrl->outer_state1_sz; } else { cd_ctrl->outer_config_offset = 0; cd_ctrl->outer_state1_sz = 0; cd_ctrl->outer_res_sz = 0; } if (CPA_CY_SYM_HASH_SNOW3G_UIA2 == pHashSetupData->hashAlgorithm) { /* add the size for the cipher config word, the key and the IV*/ hashSetupBlkSize += sizeof(icp_qat_hw_cipher_config_t) + pHashSetupData->authModeSetupData.authKeyLenInBytes + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ; } *pHashBlkSizeInBytes = hashSetupBlkSize; if (useOptimisedContentDesc) { LacSymQat_HashSetupBlockOptimisedFormatInit(pHashSetupData, cd_ctrl, pHwBlockBase, qatHashMode, pPrecompute, pHashDefs, pOuterHashDefs); } else if (!useSymConstantsTable) { /***************************************************************************** * Populate Hash Setup block * *****************************************************************************/ LacSymQat_HashSetupBlockInit(pHashSetupData, cd_ctrl, pHwBlockBase, qatHashMode, pPrecompute, pHashDefs, pOuterHashDefs); } } /* This fn populates fields in both the CD ctrl block and the ReqParams block * which describe the Hash ReqParams: * cd_ctrl.outer_prefix_offset * cd_ctrl.outer_prefix_sz * req_params.inner_prefix_sz/aad_sz * req_params.hash_state_sz * req_params.auth_res_sz * */ void LacSymQat_HashSetupReqParamsMetaData( icp_qat_la_bulk_req_ftr_t *pMsg, CpaInstanceHandle instanceHandle, const CpaCySymHashSetupData *pHashSetupData, CpaBoolean hashStateBuffer, icp_qat_hw_auth_mode_t qatHashMode, CpaBoolean digestVerify) { icp_qat_fw_auth_cd_ctrl_hdr_t *cd_ctrl = NULL; icp_qat_la_auth_req_params_t *pHashReqParams = NULL; lac_sym_qat_hash_defs_t *pHashDefs = NULL; cd_ctrl = (icp_qat_fw_auth_cd_ctrl_hdr_t *)&(pMsg->cd_ctrl); pHashReqParams = (icp_qat_la_auth_req_params_t *)(&(pMsg->serv_specif_rqpars)); LacSymQat_HashDefsLookupGet(instanceHandle, pHashSetupData->hashAlgorithm, &pHashDefs); /* Hmac in mode 2 TLS */ if (IS_HASH_MODE_2(qatHashMode)) { /* Inner and outer prefixes are the block length */ pHashReqParams->u2.inner_prefix_sz = - pHashDefs->algInfo->blockLength; - cd_ctrl->outer_prefix_sz = pHashDefs->algInfo->blockLength; + (Cpa8U)pHashDefs->algInfo->blockLength; + cd_ctrl->outer_prefix_sz = + (Cpa8U)pHashDefs->algInfo->blockLength; cd_ctrl->outer_prefix_offset = LAC_BYTES_TO_QUADWORDS( LAC_ALIGN_POW2_ROUNDUP((pHashReqParams->u2.inner_prefix_sz), LAC_QUAD_WORD_IN_BYTES)); } /* Nested hash in mode 0 */ else if (CPA_CY_SYM_HASH_MODE_NESTED == pHashSetupData->hashMode) { /* set inner and outer prefixes */ pHashReqParams->u2.inner_prefix_sz = - pHashSetupData->nestedModeSetupData.innerPrefixLenInBytes; + (Cpa8U)pHashSetupData->nestedModeSetupData + .innerPrefixLenInBytes; cd_ctrl->outer_prefix_sz = - pHashSetupData->nestedModeSetupData.outerPrefixLenInBytes; + (Cpa8U)pHashSetupData->nestedModeSetupData + .outerPrefixLenInBytes; cd_ctrl->outer_prefix_offset = LAC_BYTES_TO_QUADWORDS( LAC_ALIGN_POW2_ROUNDUP((pHashReqParams->u2.inner_prefix_sz), LAC_QUAD_WORD_IN_BYTES)); } /* mode0 - plain or mode1 - auth */ else { Cpa16U aadDataSize = 0; /* For Auth Encrypt set the aad size */ if (CPA_CY_SYM_HASH_AES_CCM == pHashSetupData->hashAlgorithm) { /* at the beginning of the buffer there is B0 block */ aadDataSize = LAC_HASH_AES_CCM_BLOCK_SIZE; /* then, if there is some 'a' data, the buffer will * store encoded * length of 'a' and 'a' itself */ if (pHashSetupData->authModeSetupData.aadLenInBytes > 0) { /* as the QAT API puts the requirement on the * pAdditionalAuthData not to be bigger than 240 * bytes then we * just need 2 bytes to store encoded length of * 'a' */ aadDataSize += sizeof(Cpa16U); - aadDataSize += pHashSetupData->authModeSetupData - .aadLenInBytes; + aadDataSize += + (Cpa16U)pHashSetupData->authModeSetupData + .aadLenInBytes; } /* round the aad size to the multiple of CCM block * size.*/ pHashReqParams->u2.aad_sz = LAC_ALIGN_POW2_ROUNDUP(aadDataSize, LAC_HASH_AES_CCM_BLOCK_SIZE); } else if (CPA_CY_SYM_HASH_AES_GCM == pHashSetupData->hashAlgorithm) { aadDataSize = - pHashSetupData->authModeSetupData.aadLenInBytes; + (Cpa16U) + pHashSetupData->authModeSetupData.aadLenInBytes; /* round the aad size to the multiple of GCM hash block * size. */ pHashReqParams->u2.aad_sz = LAC_ALIGN_POW2_ROUNDUP(aadDataSize, LAC_HASH_AES_GCM_BLOCK_SIZE); } else { pHashReqParams->u2.aad_sz = 0; } cd_ctrl->outer_prefix_sz = 0; cd_ctrl->outer_prefix_offset = 0; } /* If there is a hash state prefix buffer */ if (CPA_TRUE == hashStateBuffer) { /* Note, this sets up size for both aad and non-aad cases */ pHashReqParams->hash_state_sz = LAC_BYTES_TO_QUADWORDS( LAC_ALIGN_POW2_ROUNDUP(pHashReqParams->u2.inner_prefix_sz, LAC_QUAD_WORD_IN_BYTES) + LAC_ALIGN_POW2_ROUNDUP(cd_ctrl->outer_prefix_sz, LAC_QUAD_WORD_IN_BYTES)); } else { pHashReqParams->hash_state_sz = 0; } if (CPA_TRUE == digestVerify) { /* auth result size in bytes to be read in for a verify * operation */ pHashReqParams->auth_res_sz = - pHashSetupData->digestResultLenInBytes; + (Cpa8U)pHashSetupData->digestResultLenInBytes; } else { pHashReqParams->auth_res_sz = 0; } pHashReqParams->resrvd1 = 0; } void LacSymQat_HashHwBlockPtrsInit(icp_qat_fw_auth_cd_ctrl_hdr_t *cd_ctrl, void *pHwBlockBase, lac_hash_blk_ptrs_t *pHashBlkPtrs) { /* encoded offset for inner config is converted to a byte offset. */ pHashBlkPtrs->pInHashSetup = (icp_qat_hw_auth_setup_t *)((Cpa8U *)pHwBlockBase + (cd_ctrl->hash_cfg_offset * LAC_QUAD_WORD_IN_BYTES)); pHashBlkPtrs->pInHashInitState1 = (Cpa8U *)pHashBlkPtrs->pInHashSetup + sizeof(icp_qat_hw_auth_setup_t); pHashBlkPtrs->pInHashInitState2 = (Cpa8U *)(pHashBlkPtrs->pInHashInitState1) + cd_ctrl->inner_state1_sz; pHashBlkPtrs->pOutHashSetup = (icp_qat_hw_auth_setup_t *)((Cpa8U *)(pHashBlkPtrs ->pInHashInitState2) + cd_ctrl->inner_state2_sz); pHashBlkPtrs->pOutHashInitState1 = (Cpa8U *)(pHashBlkPtrs->pOutHashSetup) + sizeof(icp_qat_hw_auth_setup_t); } static void LacSymQat_HashSetupBlockInit(const CpaCySymHashSetupData *pHashSetupData, icp_qat_fw_auth_cd_ctrl_hdr_t *pHashControlBlock, void *pHwBlockBase, icp_qat_hw_auth_mode_t qatHashMode, lac_sym_qat_hash_precompute_info_t *pPrecompute, lac_sym_qat_hash_defs_t *pHashDefs, lac_sym_qat_hash_defs_t *pOuterHashDefs) { Cpa32U innerConfig = 0; lac_hash_blk_ptrs_t hashBlkPtrs = { 0 }; - Cpa32U aed_hash_cmp_length = 0; + Cpa32U aedHashCmpLength = 0; LacSymQat_HashHwBlockPtrsInit(pHashControlBlock, pHwBlockBase, &hashBlkPtrs); innerConfig = ICP_QAT_HW_AUTH_CONFIG_BUILD( qatHashMode, pHashDefs->qatInfo->algoEnc, pHashSetupData->digestResultLenInBytes); /* Set the Inner hash configuration */ hashBlkPtrs.pInHashSetup->auth_config.config = innerConfig; hashBlkPtrs.pInHashSetup->auth_config.reserved = 0; /* For mode 1 pre-computes for auth algorithms */ if (IS_HASH_MODE_1(qatHashMode) || CPA_CY_SYM_HASH_AES_CBC_MAC == pHashSetupData->hashAlgorithm || CPA_CY_SYM_HASH_ZUC_EIA3 == pHashSetupData->hashAlgorithm) { /* for HMAC in mode 1 authCounter is the block size * else the authCounter is 0. The firmware expects the counter * to be * big endian */ LAC_MEM_SHARED_WRITE_SWAP( hashBlkPtrs.pInHashSetup->auth_counter.counter, pHashDefs->qatInfo->authCounter); /* state 1 is set to 0 for the following algorithms */ if ((CPA_CY_SYM_HASH_AES_XCBC == pHashSetupData->hashAlgorithm) || (CPA_CY_SYM_HASH_AES_CMAC == pHashSetupData->hashAlgorithm) || (CPA_CY_SYM_HASH_AES_CBC_MAC == pHashSetupData->hashAlgorithm) || (CPA_CY_SYM_HASH_KASUMI_F9 == pHashSetupData->hashAlgorithm) || (CPA_CY_SYM_HASH_SNOW3G_UIA2 == pHashSetupData->hashAlgorithm) || (CPA_CY_SYM_HASH_AES_CCM == pHashSetupData->hashAlgorithm) || (CPA_CY_SYM_HASH_AES_GMAC == pHashSetupData->hashAlgorithm) || (CPA_CY_SYM_HASH_AES_GCM == pHashSetupData->hashAlgorithm) || (CPA_CY_SYM_HASH_ZUC_EIA3 == pHashSetupData->hashAlgorithm)) { LAC_OS_BZERO(hashBlkPtrs.pInHashInitState1, pHashDefs->qatInfo->state1Length); } /* Pad remaining bytes of sha1 precomputes */ if (CPA_CY_SYM_HASH_SHA1 == pHashSetupData->hashAlgorithm) { Cpa32U state1PadLen = 0; Cpa32U state2PadLen = 0; if (pHashControlBlock->inner_state1_sz > pHashDefs->algInfo->stateSize) { state1PadLen = pHashControlBlock->inner_state1_sz - pHashDefs->algInfo->stateSize; } if (pHashControlBlock->inner_state2_sz > pHashDefs->algInfo->stateSize) { state2PadLen = pHashControlBlock->inner_state2_sz - pHashDefs->algInfo->stateSize; } if (state1PadLen > 0) { LAC_OS_BZERO(hashBlkPtrs.pInHashInitState1 + pHashDefs->algInfo->stateSize, state1PadLen); } if (state2PadLen > 0) { LAC_OS_BZERO(hashBlkPtrs.pInHashInitState2 + pHashDefs->algInfo->stateSize, state2PadLen); } } pPrecompute->state1Size = pHashDefs->qatInfo->state1Length; pPrecompute->state2Size = pHashDefs->qatInfo->state2Length; /* Set the destination for pre-compute state1 data to be written */ pPrecompute->pState1 = hashBlkPtrs.pInHashInitState1; /* Set the destination for pre-compute state1 data to be written */ pPrecompute->pState2 = hashBlkPtrs.pInHashInitState2; } /* For digest and nested digest */ else { Cpa32U padLen = pHashControlBlock->inner_state1_sz - pHashDefs->algInfo->stateSize; /* counter set to 0 */ hashBlkPtrs.pInHashSetup->auth_counter.counter = 0; /* set the inner hash state 1 */ memcpy(hashBlkPtrs.pInHashInitState1, pHashDefs->algInfo->initState, pHashDefs->algInfo->stateSize); if (padLen > 0) { LAC_OS_BZERO(hashBlkPtrs.pInHashInitState1 + pHashDefs->algInfo->stateSize, padLen); } } hashBlkPtrs.pInHashSetup->auth_counter.reserved = 0; /* Fill in the outer part of the hash setup block */ if ((CPA_CY_SYM_HASH_MODE_NESTED == pHashSetupData->hashMode || IS_HASH_MODE_2(qatHashMode)) && (NULL != pOuterHashDefs)) { Cpa32U outerConfig = ICP_QAT_HW_AUTH_CONFIG_BUILD( qatHashMode, pOuterHashDefs->qatInfo->algoEnc, pHashSetupData->digestResultLenInBytes); Cpa32U padLen = pHashControlBlock->outer_state1_sz - pOuterHashDefs->algInfo->stateSize; /* populate the auth config */ hashBlkPtrs.pOutHashSetup->auth_config.config = outerConfig; hashBlkPtrs.pOutHashSetup->auth_config.reserved = 0; /* outer Counter set to 0 */ hashBlkPtrs.pOutHashSetup->auth_counter.counter = 0; hashBlkPtrs.pOutHashSetup->auth_counter.reserved = 0; /* set outer hash state 1 */ memcpy(hashBlkPtrs.pOutHashInitState1, pOuterHashDefs->algInfo->initState, pOuterHashDefs->algInfo->stateSize); if (padLen > 0) { LAC_OS_BZERO(hashBlkPtrs.pOutHashInitState1 + pOuterHashDefs->algInfo->stateSize, padLen); } } if (CPA_CY_SYM_HASH_SNOW3G_UIA2 == pHashSetupData->hashAlgorithm) { icp_qat_hw_cipher_config_t *pCipherConfig = (icp_qat_hw_cipher_config_t *)hashBlkPtrs.pOutHashSetup; pCipherConfig->val = ICP_QAT_HW_CIPHER_CONFIG_BUILD( ICP_QAT_HW_CIPHER_ECB_MODE, ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2, ICP_QAT_HW_CIPHER_KEY_CONVERT, ICP_QAT_HW_CIPHER_ENCRYPT, - aed_hash_cmp_length); + aedHashCmpLength); pCipherConfig->reserved = 0; memcpy((Cpa8U *)pCipherConfig + sizeof(icp_qat_hw_cipher_config_t), pHashSetupData->authModeSetupData.authKey, pHashSetupData->authModeSetupData.authKeyLenInBytes); LAC_OS_BZERO( (Cpa8U *)pCipherConfig + sizeof(icp_qat_hw_cipher_config_t) + pHashSetupData->authModeSetupData.authKeyLenInBytes, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ); } else if (CPA_CY_SYM_HASH_ZUC_EIA3 == pHashSetupData->hashAlgorithm) { icp_qat_hw_cipher_config_t *pCipherConfig = (icp_qat_hw_cipher_config_t *)hashBlkPtrs.pOutHashSetup; pCipherConfig->val = ICP_QAT_HW_CIPHER_CONFIG_BUILD( ICP_QAT_HW_CIPHER_ECB_MODE, ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3, ICP_QAT_HW_CIPHER_KEY_CONVERT, ICP_QAT_HW_CIPHER_ENCRYPT, - aed_hash_cmp_length); + aedHashCmpLength); pCipherConfig->reserved = 0; memcpy((Cpa8U *)pCipherConfig + sizeof(icp_qat_hw_cipher_config_t), pHashSetupData->authModeSetupData.authKey, pHashSetupData->authModeSetupData.authKeyLenInBytes); LAC_OS_BZERO( (Cpa8U *)pCipherConfig + sizeof(icp_qat_hw_cipher_config_t) + pHashSetupData->authModeSetupData.authKeyLenInBytes, ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ); } } static void LacSymQat_HashOpHwBlockPtrsInit(icp_qat_fw_auth_cd_ctrl_hdr_t *cd_ctrl, void *pHwBlockBase, lac_hash_blk_ptrs_optimised_t *pHashBlkPtrs) { pHashBlkPtrs->pInHashInitState1 = (((Cpa8U *)pHwBlockBase) + 16); pHashBlkPtrs->pInHashInitState2 = (Cpa8U *)(pHashBlkPtrs->pInHashInitState1) + cd_ctrl->inner_state1_sz; } static void LacSymQat_HashSetupBlockOptimisedFormatInit( const CpaCySymHashSetupData *pHashSetupData, icp_qat_fw_auth_cd_ctrl_hdr_t *pHashControlBlock, void *pHwBlockBase, icp_qat_hw_auth_mode_t qatHashMode, lac_sym_qat_hash_precompute_info_t *pPrecompute, lac_sym_qat_hash_defs_t *pHashDefs, lac_sym_qat_hash_defs_t *pOuterHashDefs) { Cpa32U state1PadLen = 0; Cpa32U state2PadLen = 0; lac_hash_blk_ptrs_optimised_t pHashBlkPtrs = { 0 }; LacSymQat_HashOpHwBlockPtrsInit(pHashControlBlock, pHwBlockBase, &pHashBlkPtrs); if (pHashControlBlock->inner_state1_sz > pHashDefs->algInfo->stateSize) { state1PadLen = pHashControlBlock->inner_state1_sz - pHashDefs->algInfo->stateSize; } if (pHashControlBlock->inner_state2_sz > pHashDefs->algInfo->stateSize) { state2PadLen = pHashControlBlock->inner_state2_sz - pHashDefs->algInfo->stateSize; } if (state1PadLen > 0) { LAC_OS_BZERO(pHashBlkPtrs.pInHashInitState1 + pHashDefs->algInfo->stateSize, state1PadLen); } if (state2PadLen > 0) { LAC_OS_BZERO(pHashBlkPtrs.pInHashInitState2 + pHashDefs->algInfo->stateSize, state2PadLen); } pPrecompute->state1Size = pHashDefs->qatInfo->state1Length; pPrecompute->state2Size = pHashDefs->qatInfo->state2Length; /* Set the destination for pre-compute state1 data to be written */ pPrecompute->pState1 = pHashBlkPtrs.pInHashInitState1; /* Set the destination for pre-compute state1 data to be written */ pPrecompute->pState2 = pHashBlkPtrs.pInHashInitState2; } void LacSymQat_HashStatePrefixAadBufferSizeGet( icp_qat_la_bulk_req_ftr_t *pMsg, lac_sym_qat_hash_state_buffer_info_t *pHashStateBuf) { const icp_qat_fw_auth_cd_ctrl_hdr_t *cd_ctrl; icp_qat_la_auth_req_params_t *pHashReqParams; cd_ctrl = (icp_qat_fw_auth_cd_ctrl_hdr_t *)&(pMsg->cd_ctrl); pHashReqParams = (icp_qat_la_auth_req_params_t *)(&(pMsg->serv_specif_rqpars)); /* hash state storage needed to support partial packets. Space reserved * for this in all cases */ pHashStateBuf->stateStorageSzQuadWords = LAC_BYTES_TO_QUADWORDS( sizeof(icp_qat_hw_auth_counter_t) + cd_ctrl->inner_state1_sz); pHashStateBuf->prefixAadSzQuadWords = pHashReqParams->hash_state_sz; } void LacSymQat_HashStatePrefixAadBufferPopulate( lac_sym_qat_hash_state_buffer_info_t *pHashStateBuf, icp_qat_la_bulk_req_ftr_t *pMsg, Cpa8U *pInnerPrefixAad, Cpa8U innerPrefixSize, Cpa8U *pOuterPrefix, Cpa8U outerPrefixSize) { const icp_qat_fw_auth_cd_ctrl_hdr_t *cd_ctrl = (icp_qat_fw_auth_cd_ctrl_hdr_t *)&(pMsg->cd_ctrl); icp_qat_la_auth_req_params_t *pHashReqParams = (icp_qat_la_auth_req_params_t *)(&(pMsg->serv_specif_rqpars)); /* * Let S be the supplied secret * S1 = S/2 if S is even and (S/2 + 1) if S is odd. * Set length S2 (inner prefix) = S1 and the start address * of S2 is S[S1/2] i.e. if S is odd then S2 starts at the last byte of * S1 * _____________________________________________________________ * | outer prefix | padding | * |________________| | * | | * |____________________________________________________________| * | inner prefix | padding | * |________________| | * | | * |____________________________________________________________| * */ if (NULL != pInnerPrefixAad) { Cpa8U *pLocalInnerPrefix = (Cpa8U *)(pHashStateBuf->pData) + LAC_QUADWORDS_TO_BYTES( pHashStateBuf->stateStorageSzQuadWords); Cpa8U padding = pHashReqParams->u2.inner_prefix_sz - innerPrefixSize; /* copy the inner prefix or aad data */ memcpy(pLocalInnerPrefix, pInnerPrefixAad, innerPrefixSize); /* Reset with zeroes any area reserved for padding in this block */ if (0 < padding) { LAC_OS_BZERO(pLocalInnerPrefix + innerPrefixSize, padding); } } if (NULL != pOuterPrefix) { Cpa8U *pLocalOuterPrefix = (Cpa8U *)pHashStateBuf->pData + LAC_QUADWORDS_TO_BYTES( pHashStateBuf->stateStorageSzQuadWords + cd_ctrl->outer_prefix_offset); Cpa8U padding = LAC_QUADWORDS_TO_BYTES( pHashStateBuf->prefixAadSzQuadWords) - pHashReqParams->u2.inner_prefix_sz - outerPrefixSize; /* copy the outer prefix */ memcpy(pLocalOuterPrefix, pOuterPrefix, outerPrefixSize); /* Reset with zeroes any area reserved for padding in this block */ if (0 < padding) { LAC_OS_BZERO(pLocalOuterPrefix + outerPrefixSize, padding); } } } inline CpaStatus LacSymQat_HashRequestParamsPopulate( icp_qat_fw_la_bulk_req_t *pReq, Cpa32U authOffsetInBytes, Cpa32U authLenInBytes, sal_service_t *pService, lac_sym_qat_hash_state_buffer_info_t *pHashStateBuf, Cpa32U packetType, Cpa32U hashResultSize, CpaBoolean digestVerify, Cpa8U *pAuthResult, CpaCySymHashAlgorithm alg, - void *hkdf_secret) + void *pHKDFSecret) { Cpa64U authResultPhys = 0; icp_qat_fw_la_auth_req_params_t *pHashReqParams; pHashReqParams = (icp_qat_fw_la_auth_req_params_t *)((Cpa8U *)&(pReq->serv_specif_rqpars) + ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET); pHashReqParams->auth_off = authOffsetInBytes; pHashReqParams->auth_len = authLenInBytes; /* Set the physical location of secret for HKDF */ - if (NULL != hkdf_secret) { + if (NULL != pHKDFSecret) { LAC_MEM_SHARED_WRITE_VIRT_TO_PHYS_PTR_EXTERNAL( - (*pService), pHashReqParams->u1.aad_adr, hkdf_secret); + (*pService), pHashReqParams->u1.aad_adr, pHKDFSecret); - if (pHashReqParams->u1.aad_adr == 0) { + if (0 == pHashReqParams->u1.aad_adr) { LAC_LOG_ERROR( "Unable to get the physical address of the" " HKDF secret\n"); return CPA_STATUS_FAIL; } } /* For a Full packet or last partial need to set the digest result * pointer * and the auth result field */ if (NULL != pAuthResult) { authResultPhys = LAC_OS_VIRT_TO_PHYS_EXTERNAL((*pService), (void *)pAuthResult); if (authResultPhys == 0) { LAC_LOG_ERROR( "Unable to get the physical address of the" " auth result\n"); return CPA_STATUS_FAIL; } pHashReqParams->auth_res_addr = authResultPhys; } else { pHashReqParams->auth_res_addr = 0; } if (CPA_TRUE == digestVerify) { /* auth result size in bytes to be read in for a verify * operation */ - pHashReqParams->auth_res_sz = hashResultSize; + pHashReqParams->auth_res_sz = (Cpa8U)hashResultSize; } else { pHashReqParams->auth_res_sz = 0; } /* If there is a hash state prefix buffer */ if (NULL != pHashStateBuf) { /* Only write the pointer to the buffer if the size is greater * than 0 * this will be the case for plain and auth mode due to the * state storage required for partial packets and for nested * mode (when * the prefix data is > 0) */ if ((pHashStateBuf->stateStorageSzQuadWords + pHashStateBuf->prefixAadSzQuadWords) > 0) { /* For the first partial packet, the QAT expects the * pointer to the * inner prefix even if there is no memory allocated for * this. The * QAT will internally calculate where to write the * state back. */ if ((ICP_QAT_FW_LA_PARTIAL_START == packetType) || (ICP_QAT_FW_LA_PARTIAL_NONE == packetType)) { // prefix_addr changed to auth_partial_st_prefix pHashReqParams->u1.auth_partial_st_prefix = ((pHashStateBuf->pDataPhys) + LAC_QUADWORDS_TO_BYTES( pHashStateBuf ->stateStorageSzQuadWords)); } else { pHashReqParams->u1.auth_partial_st_prefix = pHashStateBuf->pDataPhys; } } /* nested mode when the prefix data is 0 */ else { pHashReqParams->u1.auth_partial_st_prefix = 0; } /* For middle & last partial, state size is the hash state * storage * if hash mode 2 this will include the prefix data */ if ((ICP_QAT_FW_LA_PARTIAL_MID == packetType) || (ICP_QAT_FW_LA_PARTIAL_END == packetType)) { pHashReqParams->hash_state_sz = (pHashStateBuf->stateStorageSzQuadWords + pHashStateBuf->prefixAadSzQuadWords); } /* For full packets and first partials set the state size to * that of * the prefix/aad. prefix includes both the inner and outer * prefix */ else { pHashReqParams->hash_state_sz = pHashStateBuf->prefixAadSzQuadWords; } } else { pHashReqParams->u1.auth_partial_st_prefix = 0; pHashReqParams->hash_state_sz = 0; } /* GMAC only */ if (CPA_CY_SYM_HASH_AES_GMAC == alg) { pHashReqParams->hash_state_sz = 0; pHashReqParams->u1.aad_adr = 0; } /* This field is only used by TLS requests */ /* In TLS case this is set after this function is called */ pHashReqParams->resrvd1 = 0; return CPA_STATUS_SUCCESS; } diff --git a/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_hash_defs_lookup.c b/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_hash_defs_lookup.c index 23a77f19cca6..5a13241eaf01 100644 --- a/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_hash_defs_lookup.c +++ b/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_hash_defs_lookup.c @@ -1,491 +1,513 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** *************************************************************************** * @file lac_sym_qat_hash_defs_lookup.c Hash Definitions Lookup * * @ingroup LacHashDefsLookup ***************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ #include "cpa.h" /* ******************************************************************************* * Include private header files ******************************************************************************* */ #include "lac_common.h" #include "icp_accel_devices.h" #include "icp_adf_debug.h" #include "icp_adf_transport.h" #include "lac_sym.h" #include "icp_qat_fw_la.h" #include "lac_sym_qat_hash_defs_lookup.h" #include "lac_sal_types_crypto.h" #include "lac_sym_hash_defs.h" /* state size for xcbc mac consists of 3 * 16 byte keys */ #define LAC_SYM_QAT_XCBC_STATE_SIZE ((LAC_HASH_XCBC_MAC_BLOCK_SIZE)*3) #define LAC_SYM_QAT_CMAC_STATE_SIZE ((LAC_HASH_CMAC_BLOCK_SIZE)*3) /* This type is used for the mapping between the hash algorithm and * the corresponding hash definitions structure */ typedef struct lac_sym_qat_hash_def_map_s { CpaCySymHashAlgorithm hashAlgorithm; /* hash algorithm */ lac_sym_qat_hash_defs_t hashDefs; /* hash defintions pointers */ } lac_sym_qat_hash_def_map_t; /* ******************************************************************************* * Static Variables ******************************************************************************* */ /* initialisers as defined in FIPS and RFCS for digest operations */ /* md5 16 bytes - Initialiser state can be found in RFC 1321*/ static Cpa8U md5InitialState[LAC_HASH_MD5_STATE_SIZE] = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10, }; /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */ static Cpa8U sha1InitialState[LAC_HASH_SHA1_STATE_SIZE] = { 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0 }; /* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */ static Cpa8U sha224InitialState[LAC_HASH_SHA224_STATE_SIZE] = { 0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4 }; /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */ -static Cpa8U sha256InitialState[LAC_HASH_SHA256_STATE_SIZE] = - { 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3, - 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05, - 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19 }; +static Cpa8U sha256InitialState[LAC_HASH_SHA256_STATE_SIZE] = { + 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3, + 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05, + 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19 +}; /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */ -static Cpa8U sha384InitialState[LAC_HASH_SHA384_STATE_SIZE] = - { 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29, - 0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, - 0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, - 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, - 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, - 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4 }; +static Cpa8U sha384InitialState[LAC_HASH_SHA384_STATE_SIZE] = { + 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29, + 0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, + 0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, + 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, + 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, + 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4 +}; /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */ -static Cpa8U sha512InitialState[LAC_HASH_SHA512_STATE_SIZE] = - { 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae, - 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, - 0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, - 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, - 0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, - 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79 }; +static Cpa8U sha512InitialState[LAC_HASH_SHA512_STATE_SIZE] = { + 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae, + 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, + 0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, + 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, + 0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, + 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79 +}; /* SHA3 224 - 28 bytes */ -static Cpa8U sha3_224InitialState[LAC_HASH_SHA3_224_STATE_SIZE] = - { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static Cpa8U sha3_224InitialState[LAC_HASH_SHA3_224_STATE_SIZE] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; /* SHA3 256 - 32 bytes */ -static Cpa8U sha3_256InitialState[LAC_HASH_SHA3_256_STATE_SIZE] = - { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static Cpa8U sha3_256InitialState[LAC_HASH_SHA3_256_STATE_SIZE] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; /* SHA3 384 - 48 bytes */ -static Cpa8U sha3_384InitialState[LAC_HASH_SHA3_384_STATE_SIZE] = - { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static Cpa8U sha3_384InitialState[LAC_HASH_SHA3_384_STATE_SIZE] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; /* SHA3 512 - 64 bytes */ -static Cpa8U sha3_512InitialState[LAC_HASH_SHA3_512_STATE_SIZE] = - { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static Cpa8U sha3_512InitialState[LAC_HASH_SHA3_512_STATE_SIZE] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; /* SM3 - 32 bytes */ -static Cpa8U sm3InitialState[LAC_HASH_SM3_STATE_SIZE] = - { 0x73, 0x80, 0x16, 0x6f, 0x49, 0x14, 0xb2, 0xb9, 0x17, 0x24, 0x42, - 0xd7, 0xda, 0x8a, 0x06, 0x00, 0xa9, 0x6f, 0x30, 0xbc, 0x16, 0x31, - 0x38, 0xaa, 0xe3, 0x8d, 0xee, 0x4d, 0xb0, 0xfb, 0x0e, 0x4e }; +static Cpa8U sm3InitialState[LAC_HASH_SM3_STATE_SIZE] = { + 0x73, 0x80, 0x16, 0x6f, 0x49, 0x14, 0xb2, 0xb9, 0x17, 0x24, 0x42, + 0xd7, 0xda, 0x8a, 0x06, 0x00, 0xa9, 0x6f, 0x30, 0xbc, 0x16, 0x31, + 0x38, 0xaa, 0xe3, 0x8d, 0xee, 0x4d, 0xb0, 0xfb, 0x0e, 0x4e +}; /* Constants used in generating K1, K2, K3 from a Key for AES_XCBC_MAC * State defined in RFC 3566 */ static Cpa8U aesXcbcKeySeed[LAC_SYM_QAT_XCBC_STATE_SIZE] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, }; -static Cpa8U aesCmacKeySeed[LAC_HASH_CMAC_BLOCK_SIZE] = { 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, - 0x00 }; +static Cpa8U aesCmacKeySeed[LAC_HASH_CMAC_BLOCK_SIZE] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; /* Hash Algorithm specific structure */ static lac_sym_qat_hash_alg_info_t md5Info = { LAC_HASH_MD5_DIGEST_SIZE, LAC_HASH_MD5_BLOCK_SIZE, md5InitialState, LAC_HASH_MD5_STATE_SIZE }; static lac_sym_qat_hash_alg_info_t sha1Info = { LAC_HASH_SHA1_DIGEST_SIZE, LAC_HASH_SHA1_BLOCK_SIZE, sha1InitialState, LAC_HASH_SHA1_STATE_SIZE }; static lac_sym_qat_hash_alg_info_t sha224Info = { LAC_HASH_SHA224_DIGEST_SIZE, LAC_HASH_SHA224_BLOCK_SIZE, sha224InitialState, LAC_HASH_SHA224_STATE_SIZE }; static lac_sym_qat_hash_alg_info_t sha256Info = { LAC_HASH_SHA256_DIGEST_SIZE, LAC_HASH_SHA256_BLOCK_SIZE, sha256InitialState, LAC_HASH_SHA256_STATE_SIZE }; static lac_sym_qat_hash_alg_info_t sha384Info = { LAC_HASH_SHA384_DIGEST_SIZE, LAC_HASH_SHA384_BLOCK_SIZE, sha384InitialState, LAC_HASH_SHA384_STATE_SIZE }; static lac_sym_qat_hash_alg_info_t sha512Info = { LAC_HASH_SHA512_DIGEST_SIZE, LAC_HASH_SHA512_BLOCK_SIZE, sha512InitialState, LAC_HASH_SHA512_STATE_SIZE }; -static lac_sym_qat_hash_alg_info_t sha3_224Info = - { LAC_HASH_SHA3_224_DIGEST_SIZE, - LAC_HASH_SHA3_224_BLOCK_SIZE, - sha3_224InitialState, - LAC_HASH_SHA3_224_STATE_SIZE }; - -static lac_sym_qat_hash_alg_info_t sha3_256Info = - { LAC_HASH_SHA3_256_DIGEST_SIZE, - LAC_HASH_SHA3_256_BLOCK_SIZE, - sha3_256InitialState, - LAC_HASH_SHA3_256_STATE_SIZE }; - -static lac_sym_qat_hash_alg_info_t sha3_384Info = - { LAC_HASH_SHA3_384_DIGEST_SIZE, - LAC_HASH_SHA3_384_BLOCK_SIZE, - sha3_384InitialState, - LAC_HASH_SHA3_384_STATE_SIZE }; - -static lac_sym_qat_hash_alg_info_t sha3_512Info = - { LAC_HASH_SHA3_512_DIGEST_SIZE, - LAC_HASH_SHA3_512_BLOCK_SIZE, - sha3_512InitialState, - LAC_HASH_SHA3_512_STATE_SIZE }; +static lac_sym_qat_hash_alg_info_t sha3_224Info = { + LAC_HASH_SHA3_224_DIGEST_SIZE, + LAC_HASH_SHA3_224_BLOCK_SIZE, + sha3_224InitialState, + LAC_HASH_SHA3_224_STATE_SIZE +}; -static lac_sym_qat_hash_alg_info_t polyInfo = { LAC_HASH_POLY_DIGEST_SIZE, - LAC_HASH_POLY_BLOCK_SIZE, - NULL, /* intial state */ - LAC_HASH_POLY_STATE_SIZE }; +static lac_sym_qat_hash_alg_info_t sha3_256Info = { + LAC_HASH_SHA3_256_DIGEST_SIZE, + LAC_HASH_SHA3_256_BLOCK_SIZE, + sha3_256InitialState, + LAC_HASH_SHA3_256_STATE_SIZE +}; -static lac_sym_qat_hash_alg_info_t shake_128Info = - { LAC_HASH_SHAKE_128_DIGEST_SIZE, LAC_HASH_SHAKE_128_BLOCK_SIZE, NULL, 0 }; +static lac_sym_qat_hash_alg_info_t sha3_384Info = { + LAC_HASH_SHA3_384_DIGEST_SIZE, + LAC_HASH_SHA3_384_BLOCK_SIZE, + sha3_384InitialState, + LAC_HASH_SHA3_384_STATE_SIZE +}; -static lac_sym_qat_hash_alg_info_t shake_256Info = - { LAC_HASH_SHAKE_256_DIGEST_SIZE, LAC_HASH_SHAKE_256_BLOCK_SIZE, NULL, 0 }; +static lac_sym_qat_hash_alg_info_t sha3_512Info = { + LAC_HASH_SHA3_512_DIGEST_SIZE, + LAC_HASH_SHA3_512_BLOCK_SIZE, + sha3_512InitialState, + LAC_HASH_SHA3_512_STATE_SIZE +}; static lac_sym_qat_hash_alg_info_t sm3Info = { LAC_HASH_SM3_DIGEST_SIZE, LAC_HASH_SM3_BLOCK_SIZE, sm3InitialState, LAC_HASH_SM3_STATE_SIZE }; -static lac_sym_qat_hash_alg_info_t xcbcMacInfo = - { LAC_HASH_XCBC_MAC_128_DIGEST_SIZE, - LAC_HASH_XCBC_MAC_BLOCK_SIZE, - aesXcbcKeySeed, - LAC_SYM_QAT_XCBC_STATE_SIZE }; +static lac_sym_qat_hash_alg_info_t polyInfo = { LAC_HASH_POLY_DIGEST_SIZE, + LAC_HASH_POLY_BLOCK_SIZE, + NULL, /* intial state */ + LAC_HASH_POLY_STATE_SIZE }; -static lac_sym_qat_hash_alg_info_t aesCmacInfo = - { LAC_HASH_CMAC_128_DIGEST_SIZE, - LAC_HASH_CMAC_BLOCK_SIZE, - aesCmacKeySeed, - LAC_SYM_QAT_CMAC_STATE_SIZE }; +static lac_sym_qat_hash_alg_info_t xcbcMacInfo = { + LAC_HASH_XCBC_MAC_128_DIGEST_SIZE, + LAC_HASH_XCBC_MAC_BLOCK_SIZE, + aesXcbcKeySeed, + LAC_SYM_QAT_XCBC_STATE_SIZE +}; + +static lac_sym_qat_hash_alg_info_t aesCmacInfo = { + LAC_HASH_CMAC_128_DIGEST_SIZE, + LAC_HASH_CMAC_BLOCK_SIZE, + aesCmacKeySeed, + LAC_SYM_QAT_CMAC_STATE_SIZE +}; static lac_sym_qat_hash_alg_info_t aesCcmInfo = { LAC_HASH_AES_CCM_DIGEST_SIZE, LAC_HASH_AES_CCM_BLOCK_SIZE, NULL, /* intial state */ 0 /* state size */ }; static lac_sym_qat_hash_alg_info_t aesGcmInfo = { LAC_HASH_AES_GCM_DIGEST_SIZE, LAC_HASH_AES_GCM_BLOCK_SIZE, NULL, /* initial state */ 0 /* state size */ }; static lac_sym_qat_hash_alg_info_t kasumiF9Info = { LAC_HASH_KASUMI_F9_DIGEST_SIZE, LAC_HASH_KASUMI_F9_BLOCK_SIZE, NULL, /* initial state */ 0 /* state size */ }; static lac_sym_qat_hash_alg_info_t snow3gUia2Info = { LAC_HASH_SNOW3G_UIA2_DIGEST_SIZE, LAC_HASH_SNOW3G_UIA2_BLOCK_SIZE, NULL, /* initial state */ 0 /* state size */ }; -static lac_sym_qat_hash_alg_info_t aesCbcMacInfo = - { LAC_HASH_AES_CBC_MAC_DIGEST_SIZE, - LAC_HASH_AES_CBC_MAC_BLOCK_SIZE, - NULL, - 0 }; +static lac_sym_qat_hash_alg_info_t aesCbcMacInfo = { + LAC_HASH_AES_CBC_MAC_DIGEST_SIZE, + LAC_HASH_AES_CBC_MAC_BLOCK_SIZE, + NULL, + 0 +}; static lac_sym_qat_hash_alg_info_t zucEia3Info = { LAC_HASH_ZUC_EIA3_DIGEST_SIZE, LAC_HASH_ZUC_EIA3_BLOCK_SIZE, NULL, /* initial state */ 0 /* state size */ }; /* Hash QAT specific structures */ static lac_sym_qat_hash_qat_info_t md5Config = { ICP_QAT_HW_AUTH_ALGO_MD5, LAC_HASH_MD5_BLOCK_SIZE, ICP_QAT_HW_MD5_STATE1_SZ, ICP_QAT_HW_MD5_STATE2_SZ }; static lac_sym_qat_hash_qat_info_t sha1Config = { ICP_QAT_HW_AUTH_ALGO_SHA1, LAC_HASH_SHA1_BLOCK_SIZE, ICP_QAT_HW_SHA1_STATE1_SZ, ICP_QAT_HW_SHA1_STATE2_SZ }; -static lac_sym_qat_hash_qat_info_t sha224Config = - { ICP_QAT_HW_AUTH_ALGO_SHA224, - LAC_HASH_SHA224_BLOCK_SIZE, - ICP_QAT_HW_SHA224_STATE1_SZ, - ICP_QAT_HW_SHA224_STATE2_SZ }; - -static lac_sym_qat_hash_qat_info_t sha256Config = - { ICP_QAT_HW_AUTH_ALGO_SHA256, - LAC_HASH_SHA256_BLOCK_SIZE, - ICP_QAT_HW_SHA256_STATE1_SZ, - ICP_QAT_HW_SHA256_STATE2_SZ }; - -static lac_sym_qat_hash_qat_info_t sha384Config = - { ICP_QAT_HW_AUTH_ALGO_SHA384, - LAC_HASH_SHA384_BLOCK_SIZE, - ICP_QAT_HW_SHA384_STATE1_SZ, - ICP_QAT_HW_SHA384_STATE2_SZ }; - -static lac_sym_qat_hash_qat_info_t sha512Config = - { ICP_QAT_HW_AUTH_ALGO_SHA512, - LAC_HASH_SHA512_BLOCK_SIZE, - ICP_QAT_HW_SHA512_STATE1_SZ, - ICP_QAT_HW_SHA512_STATE2_SZ }; - -static lac_sym_qat_hash_qat_info_t sha3_224Config = - { ICP_QAT_HW_AUTH_ALGO_SHA3_224, - LAC_HASH_SHA3_224_BLOCK_SIZE, - ICP_QAT_HW_SHA3_224_STATE1_SZ, - ICP_QAT_HW_SHA3_224_STATE2_SZ }; - -static lac_sym_qat_hash_qat_info_t sha3_256Config = - { ICP_QAT_HW_AUTH_ALGO_SHA3_256, - LAC_HASH_SHA3_256_BLOCK_SIZE, - ICP_QAT_HW_SHA3_256_STATE1_SZ, - ICP_QAT_HW_SHA3_256_STATE2_SZ }; - -static lac_sym_qat_hash_qat_info_t sha3_384Config = - { ICP_QAT_HW_AUTH_ALGO_SHA3_384, - LAC_HASH_SHA3_384_BLOCK_SIZE, - ICP_QAT_HW_SHA3_384_STATE1_SZ, - ICP_QAT_HW_SHA3_384_STATE2_SZ }; - -static lac_sym_qat_hash_qat_info_t sha3_512Config = - { ICP_QAT_HW_AUTH_ALGO_SHA3_512, - LAC_HASH_SHA3_512_BLOCK_SIZE, - ICP_QAT_HW_SHA3_512_STATE1_SZ, - ICP_QAT_HW_SHA3_512_STATE2_SZ }; - -static lac_sym_qat_hash_qat_info_t shake_128Config = - { ICP_QAT_HW_AUTH_ALGO_SHAKE_128, LAC_HASH_SHAKE_128_BLOCK_SIZE, 0, 0 }; - -static lac_sym_qat_hash_qat_info_t shake_256Config = - { ICP_QAT_HW_AUTH_ALGO_SHAKE_256, LAC_HASH_SHAKE_256_BLOCK_SIZE, 0, 0 }; +static lac_sym_qat_hash_qat_info_t sha224Config = { + ICP_QAT_HW_AUTH_ALGO_SHA224, + LAC_HASH_SHA224_BLOCK_SIZE, + ICP_QAT_HW_SHA224_STATE1_SZ, + ICP_QAT_HW_SHA224_STATE2_SZ +}; -static lac_sym_qat_hash_qat_info_t polyConfig = { ICP_QAT_HW_AUTH_ALGO_POLY, - LAC_HASH_POLY_BLOCK_SIZE, - 0, - 0 }; +static lac_sym_qat_hash_qat_info_t sha256Config = { + ICP_QAT_HW_AUTH_ALGO_SHA256, + LAC_HASH_SHA256_BLOCK_SIZE, + ICP_QAT_HW_SHA256_STATE1_SZ, + ICP_QAT_HW_SHA256_STATE2_SZ +}; + +static lac_sym_qat_hash_qat_info_t sha384Config = { + ICP_QAT_HW_AUTH_ALGO_SHA384, + LAC_HASH_SHA384_BLOCK_SIZE, + ICP_QAT_HW_SHA384_STATE1_SZ, + ICP_QAT_HW_SHA384_STATE2_SZ +}; + +static lac_sym_qat_hash_qat_info_t sha512Config = { + ICP_QAT_HW_AUTH_ALGO_SHA512, + LAC_HASH_SHA512_BLOCK_SIZE, + ICP_QAT_HW_SHA512_STATE1_SZ, + ICP_QAT_HW_SHA512_STATE2_SZ +}; + +static lac_sym_qat_hash_qat_info_t sha3_224Config = { + ICP_QAT_HW_AUTH_ALGO_SHA3_224, + LAC_HASH_SHA3_224_BLOCK_SIZE, + ICP_QAT_HW_SHA3_224_STATE1_SZ, + ICP_QAT_HW_SHA3_224_STATE2_SZ +}; + +static lac_sym_qat_hash_qat_info_t sha3_256Config = { + ICP_QAT_HW_AUTH_ALGO_SHA3_256, + LAC_HASH_SHA3_256_BLOCK_SIZE, + ICP_QAT_HW_SHA3_256_STATE1_SZ, + ICP_QAT_HW_SHA3_256_STATE2_SZ +}; + +static lac_sym_qat_hash_qat_info_t sha3_384Config = { + ICP_QAT_HW_AUTH_ALGO_SHA3_384, + LAC_HASH_SHA3_384_BLOCK_SIZE, + ICP_QAT_HW_SHA3_384_STATE1_SZ, + ICP_QAT_HW_SHA3_384_STATE2_SZ +}; + +static lac_sym_qat_hash_qat_info_t sha3_512Config = { + ICP_QAT_HW_AUTH_ALGO_SHA3_512, + LAC_HASH_SHA3_512_BLOCK_SIZE, + ICP_QAT_HW_SHA3_512_STATE1_SZ, + ICP_QAT_HW_SHA3_512_STATE2_SZ +}; static lac_sym_qat_hash_qat_info_t sm3Config = { ICP_QAT_HW_AUTH_ALGO_SM3, LAC_HASH_SM3_BLOCK_SIZE, ICP_QAT_HW_SM3_STATE1_SZ, ICP_QAT_HW_SM3_STATE2_SZ }; -static lac_sym_qat_hash_qat_info_t xcbcMacConfig = - { ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC, - 0, - ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ, - LAC_SYM_QAT_XCBC_STATE_SIZE }; - -static lac_sym_qat_hash_qat_info_t aesCmacConfig = - { ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC, - 0, - ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ, - LAC_SYM_QAT_CMAC_STATE_SIZE }; - -static lac_sym_qat_hash_qat_info_t aesCcmConfig = - { ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC, - 0, - ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ, - ICP_QAT_HW_AES_CBC_MAC_KEY_SZ + ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ }; - -static lac_sym_qat_hash_qat_info_t aesGcmConfig = - { ICP_QAT_HW_AUTH_ALGO_GALOIS_128, - 0, - ICP_QAT_HW_GALOIS_128_STATE1_SZ, - ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ + - ICP_QAT_HW_GALOIS_E_CTR0_SZ }; - -static lac_sym_qat_hash_qat_info_t kasumiF9Config = - { ICP_QAT_HW_AUTH_ALGO_KASUMI_F9, - 0, - ICP_QAT_HW_KASUMI_F9_STATE1_SZ, - ICP_QAT_HW_KASUMI_F9_STATE2_SZ }; - -static lac_sym_qat_hash_qat_info_t snow3gUia2Config = - { ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2, - 0, - ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ, - ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ }; - -static lac_sym_qat_hash_qat_info_t aesCbcMacConfig = - { ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC, - 0, - ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ, - ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ + ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ }; - -static lac_sym_qat_hash_qat_info_t zucEia3Config = - { ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3, - 0, - ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ, - ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ }; +static lac_sym_qat_hash_qat_info_t polyConfig = { ICP_QAT_HW_AUTH_ALGO_POLY, + LAC_HASH_POLY_BLOCK_SIZE, + 0, + 0 }; + +static lac_sym_qat_hash_qat_info_t xcbcMacConfig = { + ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC, + 0, + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ, + LAC_SYM_QAT_XCBC_STATE_SIZE +}; + +static lac_sym_qat_hash_qat_info_t aesCmacConfig = { + ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC, + 0, + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ, + LAC_SYM_QAT_CMAC_STATE_SIZE +}; + +static lac_sym_qat_hash_qat_info_t aesCcmConfig = { + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC, + 0, + ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ, + ICP_QAT_HW_AES_CBC_MAC_KEY_SZ + ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ +}; + +static lac_sym_qat_hash_qat_info_t aesGcmConfig = { + ICP_QAT_HW_AUTH_ALGO_GALOIS_128, + 0, + ICP_QAT_HW_GALOIS_128_STATE1_SZ, + ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ + + ICP_QAT_HW_GALOIS_E_CTR0_SZ +}; + +static lac_sym_qat_hash_qat_info_t kasumiF9Config = { + ICP_QAT_HW_AUTH_ALGO_KASUMI_F9, + 0, + ICP_QAT_HW_KASUMI_F9_STATE1_SZ, + ICP_QAT_HW_KASUMI_F9_STATE2_SZ +}; + +static lac_sym_qat_hash_qat_info_t snow3gUia2Config = { + ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2, + 0, + ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ, + ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ +}; + +static lac_sym_qat_hash_qat_info_t aesCbcMacConfig = { + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC, + 0, + ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ, + ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ + ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ +}; + +static lac_sym_qat_hash_qat_info_t zucEia3Config = { + ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3, + 0, + ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ, + ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ +}; /* Array of mappings between algorithm and info structure * This array is used to populate the lookup table */ -static lac_sym_qat_hash_def_map_t lacHashDefsMapping[] = - { { CPA_CY_SYM_HASH_MD5, { &md5Info, &md5Config } }, - { CPA_CY_SYM_HASH_SHA1, { &sha1Info, &sha1Config } }, - { CPA_CY_SYM_HASH_SHA224, { &sha224Info, &sha224Config } }, - { CPA_CY_SYM_HASH_SHA256, { &sha256Info, &sha256Config } }, - { CPA_CY_SYM_HASH_SHA384, { &sha384Info, &sha384Config } }, - { CPA_CY_SYM_HASH_SHA512, { &sha512Info, &sha512Config } }, - { CPA_CY_SYM_HASH_SHA3_224, { &sha3_224Info, &sha3_224Config } }, - { CPA_CY_SYM_HASH_SHA3_256, { &sha3_256Info, &sha3_256Config } }, - { CPA_CY_SYM_HASH_SHA3_384, { &sha3_384Info, &sha3_384Config } }, - { CPA_CY_SYM_HASH_SHA3_512, { &sha3_512Info, &sha3_512Config } }, - { CPA_CY_SYM_HASH_SHAKE_128, { &shake_128Info, &shake_128Config } }, - { CPA_CY_SYM_HASH_SHAKE_256, { &shake_256Info, &shake_256Config } }, - { CPA_CY_SYM_HASH_POLY, { &polyInfo, &polyConfig } }, - { CPA_CY_SYM_HASH_SM3, { &sm3Info, &sm3Config } }, - { CPA_CY_SYM_HASH_AES_XCBC, { &xcbcMacInfo, &xcbcMacConfig } }, - { CPA_CY_SYM_HASH_AES_CMAC, { &aesCmacInfo, &aesCmacConfig } }, - { CPA_CY_SYM_HASH_AES_CCM, { &aesCcmInfo, &aesCcmConfig } }, - { CPA_CY_SYM_HASH_AES_GCM, { &aesGcmInfo, &aesGcmConfig } }, - { CPA_CY_SYM_HASH_KASUMI_F9, { &kasumiF9Info, &kasumiF9Config } }, - { CPA_CY_SYM_HASH_SNOW3G_UIA2, { &snow3gUia2Info, &snow3gUia2Config } }, - { CPA_CY_SYM_HASH_AES_GMAC, { &aesGcmInfo, &aesGcmConfig } }, - { CPA_CY_SYM_HASH_ZUC_EIA3, { &zucEia3Info, &zucEia3Config } }, - { CPA_CY_SYM_HASH_AES_CBC_MAC, { &aesCbcMacInfo, &aesCbcMacConfig } } }; +static lac_sym_qat_hash_def_map_t lacHashDefsMapping[] = { + { CPA_CY_SYM_HASH_MD5, { &md5Info, &md5Config } }, + { CPA_CY_SYM_HASH_SHA1, { &sha1Info, &sha1Config } }, + { CPA_CY_SYM_HASH_SHA224, { &sha224Info, &sha224Config } }, + { CPA_CY_SYM_HASH_SHA256, { &sha256Info, &sha256Config } }, + { CPA_CY_SYM_HASH_SHA384, { &sha384Info, &sha384Config } }, + { CPA_CY_SYM_HASH_SHA512, { &sha512Info, &sha512Config } }, + { CPA_CY_SYM_HASH_SHA3_224, { &sha3_224Info, &sha3_224Config } }, + { CPA_CY_SYM_HASH_SHA3_256, { &sha3_256Info, &sha3_256Config } }, + { CPA_CY_SYM_HASH_SHA3_384, { &sha3_384Info, &sha3_384Config } }, + { CPA_CY_SYM_HASH_SHA3_512, { &sha3_512Info, &sha3_512Config } }, + { CPA_CY_SYM_HASH_SM3, { &sm3Info, &sm3Config } }, + { CPA_CY_SYM_HASH_POLY, { &polyInfo, &polyConfig } }, + { CPA_CY_SYM_HASH_AES_XCBC, { &xcbcMacInfo, &xcbcMacConfig } }, + { CPA_CY_SYM_HASH_AES_CMAC, { &aesCmacInfo, &aesCmacConfig } }, + { CPA_CY_SYM_HASH_AES_CCM, { &aesCcmInfo, &aesCcmConfig } }, + { CPA_CY_SYM_HASH_AES_GCM, { &aesGcmInfo, &aesGcmConfig } }, + { CPA_CY_SYM_HASH_KASUMI_F9, { &kasumiF9Info, &kasumiF9Config } }, + { CPA_CY_SYM_HASH_SNOW3G_UIA2, { &snow3gUia2Info, &snow3gUia2Config } }, + { CPA_CY_SYM_HASH_AES_GMAC, { &aesGcmInfo, &aesGcmConfig } }, + { CPA_CY_SYM_HASH_ZUC_EIA3, { &zucEia3Info, &zucEia3Config } }, + { CPA_CY_SYM_HASH_AES_CBC_MAC, { &aesCbcMacInfo, &aesCbcMacConfig } } +}; /* * LacSymQat_HashLookupInit */ CpaStatus LacSymQat_HashLookupInit(CpaInstanceHandle instanceHandle) { Cpa32U entry = 0; Cpa32U numEntries = 0; Cpa32U arraySize = 0; CpaStatus status = CPA_STATUS_SUCCESS; CpaCySymHashAlgorithm hashAlg = CPA_CY_SYM_HASH_NONE; - sal_crypto_service_t *pService = (sal_crypto_service_t *)instanceHandle; + sal_service_t *pService = (sal_service_t *)instanceHandle; + lac_sym_qat_hash_defs_t **pLacHashLookupDefs; arraySize = (CPA_CY_HASH_ALG_END + 1) * sizeof(lac_sym_qat_hash_defs_t *); /* Size round up for performance */ arraySize = LAC_ALIGN_POW2_ROUNDUP(arraySize, LAC_64BYTE_ALIGNMENT); - pService->pLacHashLookupDefs = LAC_OS_MALLOC(arraySize); - - if (NULL != pService->pLacHashLookupDefs) { - LAC_OS_BZERO(pService->pLacHashLookupDefs, arraySize); + pLacHashLookupDefs = LAC_OS_MALLOC(arraySize); + if (NULL == pLacHashLookupDefs) { + return CPA_STATUS_RESOURCE; + } - numEntries = sizeof(lacHashDefsMapping) / - sizeof(lac_sym_qat_hash_def_map_t); + LAC_OS_BZERO(pLacHashLookupDefs, arraySize); + numEntries = + sizeof(lacHashDefsMapping) / sizeof(lac_sym_qat_hash_def_map_t); - /* initialise the hash lookup definitions table so that the - * algorithm - * can be used to index into the table */ - for (entry = 0; entry < numEntries; entry++) { - hashAlg = lacHashDefsMapping[entry].hashAlgorithm; + /* initialise the hash lookup definitions table so that the algorithm + * can be used to index into the table */ + for (entry = 0; entry < numEntries; entry++) { + hashAlg = lacHashDefsMapping[entry].hashAlgorithm; - pService->pLacHashLookupDefs[hashAlg] = - &(lacHashDefsMapping[entry].hashDefs); - } - } else { - status = CPA_STATUS_RESOURCE; + pLacHashLookupDefs[hashAlg] = + &(lacHashDefsMapping[entry].hashDefs); } + + ((sal_crypto_service_t *)pService)->pLacHashLookupDefs = + pLacHashLookupDefs; + return status; } /* * LacSymQat_HashAlgLookupGet */ void LacSymQat_HashAlgLookupGet(CpaInstanceHandle instanceHandle, CpaCySymHashAlgorithm hashAlgorithm, lac_sym_qat_hash_alg_info_t **ppHashAlgInfo) { - sal_crypto_service_t *pService = (sal_crypto_service_t *)instanceHandle; + sal_service_t *pService = (sal_service_t *)instanceHandle; + lac_sym_qat_hash_defs_t **pLacHashLookupDefs = + ((sal_crypto_service_t *)pService)->pLacHashLookupDefs; - *ppHashAlgInfo = pService->pLacHashLookupDefs[hashAlgorithm]->algInfo; + *ppHashAlgInfo = pLacHashLookupDefs[hashAlgorithm]->algInfo; } /* * LacSymQat_HashDefsLookupGet */ void LacSymQat_HashDefsLookupGet(CpaInstanceHandle instanceHandle, CpaCySymHashAlgorithm hashAlgorithm, lac_sym_qat_hash_defs_t **ppHashDefsInfo) { - sal_crypto_service_t *pService = (sal_crypto_service_t *)instanceHandle; + sal_service_t *pService = (sal_service_t *)instanceHandle; + lac_sym_qat_hash_defs_t **pLacHashLookupDefs = + ((sal_crypto_service_t *)pService)->pLacHashLookupDefs; - *ppHashDefsInfo = pService->pLacHashLookupDefs[hashAlgorithm]; + *ppHashDefsInfo = pLacHashLookupDefs[hashAlgorithm]; } diff --git a/sys/dev/qat/qat_api/common/ctrl/sal_compression.c b/sys/dev/qat/qat_api/common/ctrl/sal_compression.c index d8523d23b0e0..9827015767bf 100644 --- a/sys/dev/qat/qat_api/common/ctrl/sal_compression.c +++ b/sys/dev/qat/qat_api/common/ctrl/sal_compression.c @@ -1,1554 +1,1621 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** ***************************************************************************** * @file sal_compression.c * * @ingroup SalCtrl * * @description * This file contains the sal implementation for compression. * *****************************************************************************/ /* QAT-API includes */ #include "cpa.h" #include "cpa_dc.h" /* QAT utils includes */ #include "qat_utils.h" /* ADF includes */ #include "icp_adf_init.h" #include "icp_adf_transport.h" #include "icp_accel_devices.h" #include "icp_adf_cfg.h" #include "icp_adf_accel_mgr.h" #include "icp_adf_poll.h" #include "icp_adf_debug.h" #include "icp_adf_esram.h" #include "icp_qat_hw.h" /* SAL includes */ #include "lac_mem.h" #include "lac_common.h" #include "lac_mem_pools.h" #include "sal_statistics.h" #include "lac_list.h" #include "icp_sal_poll.h" #include "sal_types_compression.h" #include "dc_session.h" #include "dc_datapath.h" #include "dc_stats.h" #include "lac_sal.h" #include "lac_sal_ctrl.h" #include "sal_string_parse.h" #include "sal_service_state.h" #include "lac_buffer_desc.h" #include "icp_qat_fw_comp.h" +#include "icp_qat_hw_20_comp_defs.h" #include "icp_sal_versions.h" /* C string null terminator size */ #define SAL_NULL_TERM_SIZE 1 /* Type to access extended features bit fields */ typedef struct dc_extended_features_s { unsigned is_cnv : 1; /* Bit<0> */ unsigned padding : 7; unsigned is_cnvnr : 1; /* Bit<8> */ unsigned not_used : 23; } dc_extd_ftrs_t; /* * Prints statistics for a compression instance */ static int SalCtrl_CompresionDebug(void *private_data, char *data, int size, int offset) { sal_compression_service_t *pCompressionService = (sal_compression_service_t *)private_data; CpaStatus status = CPA_STATUS_SUCCESS; CpaDcStats dcStats = { 0 }; Cpa32S len = 0; status = cpaDcGetStats(pCompressionService, &dcStats); if (status != CPA_STATUS_SUCCESS) { QAT_UTILS_LOG("cpaDcGetStats returned error.\n"); return (-1); } /* Engine Info */ if (NULL != pCompressionService->debug_file) { len += snprintf(data + len, size - len, SEPARATOR BORDER " Statistics for Instance %24s | \n" SEPARATOR, pCompressionService->debug_file->name); } /* Perform Info */ len += snprintf(data + len, size - len, BORDER " DC comp Requests: %16llu " BORDER "\n" BORDER " DC comp Request Errors: %16llu " BORDER "\n" BORDER " DC comp Completed: %16llu " BORDER "\n" BORDER " DC comp Completed Errors: %16llu " BORDER "\n" SEPARATOR, (long long unsigned int)dcStats.numCompRequests, (long long unsigned int)dcStats.numCompRequestsErrors, (long long unsigned int)dcStats.numCompCompleted, (long long unsigned int)dcStats.numCompCompletedErrors); /* Perform Info */ len += snprintf( data + len, size - len, BORDER " DC decomp Requests: %16llu " BORDER "\n" BORDER " DC decomp Request Errors: %16llu " BORDER "\n" BORDER " DC decomp Completed: %16llu " BORDER "\n" BORDER " DC decomp Completed Errors: %16llu " BORDER "\n" SEPARATOR, (long long unsigned int)dcStats.numDecompRequests, (long long unsigned int)dcStats.numDecompRequestsErrors, (long long unsigned int)dcStats.numDecompCompleted, (long long unsigned int)dcStats.numDecompCompletedErrors); return 0; } /* Initialise device specific information needed by compression service */ static CpaStatus SalCtrl_CompressionInit_CompData(icp_accel_dev_t *device, sal_compression_service_t *pCompService) { + int level = 0; + + pCompService->comp_device_data.uniqueCompressionLevels[0] = CPA_FALSE; + switch (device->deviceType) { case DEVICE_DH895XCC: case DEVICE_DH895XCCVF: pCompService->generic_service_info.integrityCrcCheck = CPA_FALSE; pCompService->numInterBuffs = DC_QAT_MAX_NUM_INTER_BUFFERS_6COMP_SLICES; pCompService->comp_device_data.minOutputBuffSize = DC_DEST_BUFFER_STA_MIN_SIZE; pCompService->comp_device_data.oddByteDecompNobFinal = CPA_TRUE; pCompService->comp_device_data.oddByteDecompInterim = CPA_FALSE; pCompService->comp_device_data.translatorOverflow = CPA_FALSE; pCompService->comp_device_data.useDevRam = ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF; pCompService->comp_device_data.enableDmm = ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED; pCompService->comp_device_data.inflateContextSize = DC_INFLATE_CONTEXT_SIZE; pCompService->comp_device_data.highestHwCompressionDepth = ICP_QAT_HW_COMPRESSION_DEPTH_16; pCompService->comp_device_data.windowSizeMask = (1 << DC_8K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE); pCompService->comp_device_data.cnvnrSupported = CPA_FALSE; break; case DEVICE_C3XXX: case DEVICE_C3XXXVF: case DEVICE_200XX: case DEVICE_200XXVF: pCompService->generic_service_info.integrityCrcCheck = CPA_FALSE; pCompService->numInterBuffs = DC_QAT_MAX_NUM_INTER_BUFFERS_6COMP_SLICES; pCompService->comp_device_data.oddByteDecompNobFinal = CPA_FALSE; pCompService->comp_device_data.oddByteDecompInterim = CPA_TRUE; pCompService->comp_device_data.translatorOverflow = CPA_FALSE; pCompService->comp_device_data.useDevRam = ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF; pCompService->comp_device_data.inflateContextSize = DC_INFLATE_EH_CONTEXT_SIZE; pCompService->comp_device_data.highestHwCompressionDepth = ICP_QAT_HW_COMPRESSION_DEPTH_16; pCompService->comp_device_data.windowSizeMask = (1 << DC_16K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE); pCompService->comp_device_data.minOutputBuffSize = DC_DEST_BUFFER_STA_MIN_SIZE; pCompService->comp_device_data.enableDmm = ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED; pCompService->comp_device_data.cnvnrSupported = CPA_TRUE; break; case DEVICE_C62X: case DEVICE_C62XVF: pCompService->generic_service_info.integrityCrcCheck = CPA_FALSE; pCompService->numInterBuffs = DC_QAT_MAX_NUM_INTER_BUFFERS_10COMP_SLICES; pCompService->comp_device_data.oddByteDecompNobFinal = CPA_FALSE; pCompService->comp_device_data.oddByteDecompInterim = CPA_TRUE; pCompService->comp_device_data.translatorOverflow = CPA_FALSE; pCompService->comp_device_data.useDevRam = ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF; pCompService->comp_device_data.inflateContextSize = DC_INFLATE_EH_CONTEXT_SIZE; + pCompService->comp_device_data.highestHwCompressionDepth = + ICP_QAT_HW_COMPRESSION_DEPTH_16; pCompService->comp_device_data.windowSizeMask = - (1 << DC_16K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE); + (1 << DC_4K_WINDOW_SIZE | 1 << DC_8K_WINDOW_SIZE | + 1 << DC_16K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE); pCompService->comp_device_data.minOutputBuffSize = DC_DEST_BUFFER_STA_MIN_SIZE; + pCompService->comp_device_data.minOutputBuffSizeDynamic = + pCompService->comp_device_data.minOutputBuffSize; pCompService->comp_device_data.enableDmm = ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED; pCompService->comp_device_data.cnvnrSupported = CPA_TRUE; + + for (level = CPA_DC_L1; level <= CPA_DC_L9; level++) { + switch (level) { + case CPA_DC_L1: + case CPA_DC_L2: + case CPA_DC_L3: + case CPA_DC_L4: + pCompService->comp_device_data + .uniqueCompressionLevels[level] = CPA_TRUE; + break; + default: + pCompService->comp_device_data + .uniqueCompressionLevels[level] = CPA_FALSE; + break; + } + } + pCompService->comp_device_data.numCompressionLevels = + DC_NUM_COMPRESSION_LEVELS; break; case DEVICE_C4XXX: case DEVICE_C4XXXVF: pCompService->generic_service_info.integrityCrcCheck = CPA_TRUE; pCompService->numInterBuffs = DC_QAT_MAX_NUM_INTER_BUFFERS_24COMP_SLICES; pCompService->comp_device_data.minOutputBuffSize = DC_DEST_BUFFER_MIN_SIZE; pCompService->comp_device_data.oddByteDecompNobFinal = CPA_TRUE; pCompService->comp_device_data.oddByteDecompInterim = CPA_TRUE; pCompService->comp_device_data.translatorOverflow = CPA_TRUE; if (pCompService->generic_service_info.capabilitiesMask & ICP_ACCEL_CAPABILITIES_INLINE) { pCompService->comp_device_data.useDevRam = ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF; } else { pCompService->comp_device_data.useDevRam = ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF; } pCompService->comp_device_data.enableDmm = ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED; pCompService->comp_device_data.inflateContextSize = DC_INFLATE_EH_CONTEXT_SIZE; pCompService->comp_device_data.highestHwCompressionDepth = ICP_QAT_HW_COMPRESSION_DEPTH_128; pCompService->comp_device_data.windowSizeMask = (1 << DC_16K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE); pCompService->comp_device_data.cnvnrSupported = CPA_TRUE; break; + case DEVICE_GEN4: + pCompService->generic_service_info.integrityCrcCheck = CPA_TRUE; + pCompService->numInterBuffs = 0; + pCompService->comp_device_data.minOutputBuffSize = + DC_DEST_BUFFER_STA_MIN_SIZE_GEN4; + pCompService->comp_device_data.minOutputBuffSizeDynamic = + DC_DEST_BUFFER_DYN_MIN_SIZE_GEN4; + pCompService->comp_device_data.oddByteDecompNobFinal = CPA_TRUE; + pCompService->comp_device_data.oddByteDecompInterim = CPA_FALSE; + pCompService->comp_device_data.translatorOverflow = CPA_TRUE; + pCompService->comp_device_data.useDevRam = + ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF; + pCompService->comp_device_data.enableDmm = + ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED; + + pCompService->comp_device_data.inflateContextSize = + DC_INFLATE_CONTEXT_SIZE; + pCompService->comp_device_data.highestHwCompressionDepth = + ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_9; + pCompService->comp_device_data.windowSizeMask = + (1 << DC_4K_WINDOW_SIZE | 1 << DC_8K_WINDOW_SIZE | + 1 << DC_16K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE); + for (level = CPA_DC_L1; level <= CPA_DC_L9; level++) { + switch (level) { + case CPA_DC_L1: + case CPA_DC_L6: + case CPA_DC_L9: + pCompService->comp_device_data + .uniqueCompressionLevels[level] = CPA_TRUE; + break; + default: + pCompService->comp_device_data + .uniqueCompressionLevels[level] = CPA_FALSE; + break; + } + } + pCompService->comp_device_data.numCompressionLevels = + DC_NUM_COMPRESSION_LEVELS; + break; default: QAT_UTILS_LOG("Unknown device type! - %d.\n", device->deviceType); return CPA_STATUS_FAIL; } return CPA_STATUS_SUCCESS; } CpaStatus SalCtrl_CompressionInit(icp_accel_dev_t *device, sal_service_t *service) { CpaStatus status = CPA_STATUS_SUCCESS; Cpa32U numCompConcurrentReq = 0; Cpa32U request_ring_id = 0; Cpa32U response_ring_id = 0; char adfGetParam[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char compMemPool[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char temp_string[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char temp_string2[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char *instance_name = NULL; sal_statistics_collection_t *pStatsCollection = (sal_statistics_collection_t *)device->pQatStats; icp_resp_deliv_method rx_resp_type = ICP_RESP_TYPE_IRQ; sal_compression_service_t *pCompressionService = (sal_compression_service_t *)service; Cpa32U msgSize = 0; char *section = DYN_SEC; SAL_SERVICE_GOOD_FOR_INIT(pCompressionService); pCompressionService->generic_service_info.state = SAL_SERVICE_STATE_INITIALIZING; if (CPA_FALSE == pCompressionService->generic_service_info.is_dyn) { section = icpGetProcessName(); } if (pStatsCollection == NULL) { return CPA_STATUS_FAIL; } /* Get Config Info: Accel Num, bank Num, packageID, coreAffinity, nodeAffinity and response mode */ pCompressionService->acceleratorNum = 0; /* Initialise device specific compression data */ SalCtrl_CompressionInit_CompData(device, pCompressionService); status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "BankNumber", temp_string); LAC_CHECK_STATUS(status); status = icp_adf_cfgGetParamValue(device, section, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", temp_string); return status; } pCompressionService->bankNum = Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "IsPolled", temp_string); LAC_CHECK_STATUS(status); status = icp_adf_cfgGetParamValue(device, section, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", temp_string); return status; } pCompressionService->isPolled = (Cpa8U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); /* User instances only support poll and epoll mode */ if (SAL_RESP_POLL_CFG_FILE != pCompressionService->isPolled) { QAT_UTILS_LOG( "IsPolled %u is not supported for user instance %s.\n", pCompressionService->isPolled, temp_string); return CPA_STATUS_FAIL; } if (SAL_RESP_POLL_CFG_FILE == pCompressionService->isPolled) { rx_resp_type = ICP_RESP_TYPE_POLL; } status = icp_adf_cfgGetParamValue(device, LAC_CFG_SECTION_GENERAL, ADF_DEV_PKG_ID, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", ADF_DEV_PKG_ID); return status; } pCompressionService->pkgID = (Cpa16U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); status = icp_adf_cfgGetParamValue(device, LAC_CFG_SECTION_GENERAL, ADF_DEV_NODE_ID, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", ADF_DEV_NODE_ID); return status; } pCompressionService->nodeAffinity = (Cpa32U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); /* In case of interrupt instance, use the bank affinity set by adf_ctl * Otherwise, use the instance affinity for backwards compatibility */ if (SAL_RESP_POLL_CFG_FILE != pCompressionService->isPolled) { /* Next need to read the [AcceleratorX] section of the config * file */ status = Sal_StringParsing("Accelerator", pCompressionService->acceleratorNum, "", temp_string2); LAC_CHECK_STATUS(status); status = Sal_StringParsing("Bank", pCompressionService->bankNum, "CoreAffinity", temp_string); LAC_CHECK_STATUS(status); } else { strncpy(temp_string2, section, sizeof(temp_string2) - SAL_NULL_TERM_SIZE); temp_string2[SAL_CFG_MAX_VAL_LEN_IN_BYTES - SAL_NULL_TERM_SIZE] = '\0'; status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "CoreAffinity", temp_string); LAC_CHECK_STATUS(status); } status = icp_adf_cfgGetParamValue(device, temp_string2, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", temp_string); return status; } pCompressionService->coreAffinity = (Cpa32U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "NumConcurrentRequests", temp_string); LAC_CHECK_STATUS(status); status = icp_adf_cfgGetParamValue(device, section, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", temp_string); return status; } numCompConcurrentReq = (Cpa32U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); if (validateConcurrRequest(numCompConcurrentReq)) { QAT_UTILS_LOG( "Invalid NumConcurrentRequests, valid values are: {64, 128, 256, ... 32768, 65536}.\n"); return CPA_STATUS_FAIL; } /* ADF does not allow us to completely fill the ring for batch requests */ pCompressionService->maxNumCompConcurrentReq = (numCompConcurrentReq - SAL_BATCH_SUBMIT_FREE_SPACE); /* 1. Create transport handles */ status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "RingTx", temp_string); LAC_CHECK_STATUS(status); msgSize = LAC_QAT_DC_REQ_SZ_LW * LAC_LONG_WORD_IN_BYTES; status = icp_adf_transCreateHandle( device, ICP_TRANS_TYPE_ETR, section, pCompressionService->acceleratorNum, pCompressionService->bankNum, temp_string, lac_getRingType(SAL_RING_TYPE_DC), NULL, ICP_RESP_TYPE_NONE, numCompConcurrentReq, msgSize, (icp_comms_trans_handle *)&( pCompressionService->trans_handle_compression_tx)); LAC_CHECK_STATUS(status); if (icp_adf_transGetRingNum( pCompressionService->trans_handle_compression_tx, &request_ring_id) != CPA_STATUS_SUCCESS) { icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); QAT_UTILS_LOG("Failed to get DC TX ring number.\n"); return CPA_STATUS_FAIL; } status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "RingRx", temp_string); if (CPA_STATUS_SUCCESS != status) { icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); return status; } msgSize = LAC_QAT_DC_RESP_SZ_LW * LAC_LONG_WORD_IN_BYTES; status = icp_adf_transCreateHandle( device, ICP_TRANS_TYPE_ETR, section, pCompressionService->acceleratorNum, pCompressionService->bankNum, temp_string, lac_getRingType(SAL_RING_TYPE_NONE), (icp_trans_callback)dcCompression_ProcessCallback, rx_resp_type, numCompConcurrentReq, msgSize, (icp_comms_trans_handle *)&( pCompressionService->trans_handle_compression_rx)); if (CPA_STATUS_SUCCESS != status) { icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); return status; } if (icp_adf_transGetRingNum( pCompressionService->trans_handle_compression_rx, &response_ring_id) != CPA_STATUS_SUCCESS) { icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); QAT_UTILS_LOG("Failed to get DC RX ring number.\n"); return CPA_STATUS_FAIL; } /* 2. Allocates memory pools */ /* Valid initialisation value for a pool ID */ pCompressionService->compression_mem_pool = LAC_MEM_POOL_INIT_POOL_ID; status = Sal_StringParsing( "Comp", pCompressionService->generic_service_info.instance, "_MemPool", compMemPool); if (CPA_STATUS_SUCCESS != status) { icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); return status; } status = Lac_MemPoolCreate(&pCompressionService->compression_mem_pool, compMemPool, (numCompConcurrentReq + 1), sizeof(dc_compression_cookie_t), LAC_64BYTE_ALIGNMENT, CPA_FALSE, pCompressionService->nodeAffinity); if (CPA_STATUS_SUCCESS != status) { icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); return status; } /* Init compression statistics */ status = dcStatsInit(pCompressionService); if (CPA_STATUS_SUCCESS != status) { Lac_MemPoolDestroy(pCompressionService->compression_mem_pool); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); return status; } if (CPA_TRUE == pStatsCollection->bDcStatsEnabled) { /* Get instance name for stats */ instance_name = LAC_OS_MALLOC(ADF_CFG_MAX_VAL_LEN_IN_BYTES); if (NULL == instance_name) { Lac_MemPoolDestroy( pCompressionService->compression_mem_pool); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); return CPA_STATUS_RESOURCE; } status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "Name", temp_string); if (CPA_STATUS_SUCCESS != status) { Lac_MemPoolDestroy( pCompressionService->compression_mem_pool); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); LAC_OS_FREE(instance_name); return status; } status = icp_adf_cfgGetParamValue(device, section, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration.\n", temp_string); Lac_MemPoolDestroy( pCompressionService->compression_mem_pool); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); LAC_OS_FREE(instance_name); return status; } snprintf(instance_name, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%s", adfGetParam); pCompressionService->debug_file = LAC_OS_MALLOC(sizeof(debug_file_info_t)); if (NULL == pCompressionService->debug_file) { Lac_MemPoolDestroy( pCompressionService->compression_mem_pool); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); LAC_OS_FREE(instance_name); return CPA_STATUS_RESOURCE; } memset(pCompressionService->debug_file, 0, sizeof(debug_file_info_t)); pCompressionService->debug_file->name = instance_name; pCompressionService->debug_file->seq_read = SalCtrl_CompresionDebug; pCompressionService->debug_file->private_data = pCompressionService; pCompressionService->debug_file->parent = pCompressionService->generic_service_info.debug_parent_dir; status = icp_adf_debugAddFile(device, pCompressionService->debug_file); if (CPA_STATUS_SUCCESS != status) { Lac_MemPoolDestroy( pCompressionService->compression_mem_pool); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); LAC_OS_FREE(instance_name); LAC_OS_FREE(pCompressionService->debug_file); return status; } } pCompressionService->generic_service_info.stats = pStatsCollection; pCompressionService->generic_service_info.state = SAL_SERVICE_STATE_INITIALIZED; return status; } CpaStatus SalCtrl_CompressionStart(icp_accel_dev_t *device, sal_service_t *service) { CpaStatus status = CPA_STATUS_SUCCESS; sal_compression_service_t *pCompressionService = (sal_compression_service_t *)service; if (SAL_SERVICE_STATE_INITIALIZED != pCompressionService->generic_service_info.state) { QAT_UTILS_LOG("Not in the correct state to call start.\n"); return CPA_STATUS_FAIL; } /**************************************************************/ /* Obtain Extended Features. I.e. Compress And Verify */ /**************************************************************/ pCompressionService->generic_service_info.dcExtendedFeatures = device->dcExtendedFeatures; pCompressionService->generic_service_info.state = SAL_SERVICE_STATE_RUNNING; return status; } CpaStatus SalCtrl_CompressionStop(icp_accel_dev_t *device, sal_service_t *service) { sal_compression_service_t *pCompressionService = (sal_compression_service_t *)service; if (SAL_SERVICE_STATE_RUNNING != pCompressionService->generic_service_info.state) { QAT_UTILS_LOG("Not in the correct state to call stop.\n"); return CPA_STATUS_FAIL; } if (icp_adf_is_dev_in_reset(device)) { pCompressionService->generic_service_info.state = SAL_SERVICE_STATE_RESTARTING; return CPA_STATUS_SUCCESS; } pCompressionService->generic_service_info.state = SAL_SERVICE_STATE_SHUTTING_DOWN; return CPA_STATUS_RETRY; } CpaStatus SalCtrl_CompressionShutdown(icp_accel_dev_t *device, sal_service_t *service) { CpaStatus status = CPA_STATUS_SUCCESS; sal_compression_service_t *pCompressionService = (sal_compression_service_t *)service; sal_statistics_collection_t *pStatsCollection = (sal_statistics_collection_t *)device->pQatStats; if ((SAL_SERVICE_STATE_INITIALIZED != pCompressionService->generic_service_info.state) && (SAL_SERVICE_STATE_SHUTTING_DOWN != pCompressionService->generic_service_info.state) && (SAL_SERVICE_STATE_RESTARTING != pCompressionService->generic_service_info.state)) { QAT_UTILS_LOG("Not in the correct state to call shutdown.\n"); return CPA_STATUS_FAIL; } Lac_MemPoolDestroy(pCompressionService->compression_mem_pool); status = icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_tx); LAC_CHECK_STATUS(status); status = icp_adf_transReleaseHandle( pCompressionService->trans_handle_compression_rx); LAC_CHECK_STATUS(status); if (CPA_TRUE == pStatsCollection->bDcStatsEnabled) { /* Clean stats */ if (NULL != pCompressionService->debug_file) { icp_adf_debugRemoveFile( pCompressionService->debug_file); LAC_OS_FREE(pCompressionService->debug_file->name); LAC_OS_FREE(pCompressionService->debug_file); pCompressionService->debug_file = NULL; } } pCompressionService->generic_service_info.stats = NULL; dcStatsFree(pCompressionService); if (icp_adf_is_dev_in_reset(device)) { pCompressionService->generic_service_info.state = SAL_SERVICE_STATE_RESTARTING; return CPA_STATUS_SUCCESS; } pCompressionService->generic_service_info.state = SAL_SERVICE_STATE_SHUTDOWN; return status; } CpaStatus cpaDcGetStatusText(const CpaInstanceHandle dcInstance, const CpaStatus errStatus, Cpa8S *pStatusText) { CpaStatus status = CPA_STATUS_SUCCESS; LAC_CHECK_NULL_PARAM(pStatusText); switch (errStatus) { case CPA_STATUS_SUCCESS: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_SUCCESS); break; case CPA_STATUS_FAIL: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_FAIL); break; case CPA_STATUS_RETRY: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_RETRY); break; case CPA_STATUS_RESOURCE: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_RESOURCE); break; case CPA_STATUS_INVALID_PARAM: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_INVALID_PARAM); break; case CPA_STATUS_FATAL: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_FATAL); break; case CPA_STATUS_UNSUPPORTED: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_UNSUPPORTED); break; default: status = CPA_STATUS_INVALID_PARAM; break; } return status; } CpaStatus cpaDcGetNumIntermediateBuffers(CpaInstanceHandle dcInstance, Cpa16U *pNumBuffers) { CpaInstanceHandle insHandle = NULL; sal_compression_service_t *pService = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); } else { insHandle = dcInstance; } LAC_CHECK_NULL_PARAM(insHandle); LAC_CHECK_NULL_PARAM(pNumBuffers); pService = (sal_compression_service_t *)insHandle; *pNumBuffers = pService->numInterBuffs; return CPA_STATUS_SUCCESS; } CpaStatus cpaDcStartInstance(CpaInstanceHandle instanceHandle, Cpa16U numBuffers, CpaBufferList **pIntermediateBufferPtrsArray) { icp_qat_addr_width_t *pInterBuffPtrsArray = NULL; icp_qat_addr_width_t pArrayBufferListDescPhyAddr = 0; icp_qat_addr_width_t bufListDescPhyAddr; icp_qat_addr_width_t bufListAlignedPhyAddr; CpaFlatBuffer *pClientCurrFlatBuffer = NULL; icp_buffer_list_desc_t *pBufferListDesc = NULL; icp_flat_buffer_desc_t *pCurrFlatBufDesc = NULL; CpaInstanceInfo2 info = { 0 }; icp_accel_dev_t *dev = NULL; CpaStatus status = CPA_STATUS_SUCCESS; sal_compression_service_t *pService = NULL; CpaInstanceHandle insHandle = NULL; Cpa16U bufferIndex = 0; Cpa32U numFlatBuffers = 0; Cpa64U clientListSize = 0; CpaBufferList *pClientCurrentIntermediateBuffer = NULL; Cpa32U bufferIndex2 = 0; CpaBufferList **pTempIntermediateBufferPtrsArray; Cpa64U lastClientListSize = 0; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle) { insHandle = dcGetFirstHandle(); } else { insHandle = instanceHandle; } LAC_CHECK_NULL_PARAM(insHandle); status = cpaDcInstanceGetInfo2(insHandle, &info); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Can not get instance info.\n"); return status; } dev = icp_adf_getAccelDevByAccelId(info.physInstId.packageId); if (NULL == dev) { QAT_UTILS_LOG("Can not find device for the instance\n"); return CPA_STATUS_FAIL; } if (NULL == pIntermediateBufferPtrsArray) { /* Increment dev ref counter and return - DRAM is not used */ icp_qa_dev_get(dev); return CPA_STATUS_SUCCESS; } if (0 == numBuffers) { /* Increment dev ref counter and return - DRAM is not used */ icp_qa_dev_get(dev); return CPA_STATUS_SUCCESS; } pService = (sal_compression_service_t *)insHandle; LAC_CHECK_NULL_PARAM(insHandle); if ((numBuffers > 0) && (NULL == pIntermediateBufferPtrsArray)) { QAT_UTILS_LOG("Invalid Intermediate Buffers Array pointer\n"); return CPA_STATUS_INVALID_PARAM; } /* Check number of intermediate buffers allocated by user */ if ((pService->numInterBuffs != numBuffers)) { QAT_UTILS_LOG("Invalid number of buffers\n"); return CPA_STATUS_INVALID_PARAM; } pTempIntermediateBufferPtrsArray = pIntermediateBufferPtrsArray; for (bufferIndex = 0; bufferIndex < numBuffers; bufferIndex++) { if (NULL == *pTempIntermediateBufferPtrsArray) { QAT_UTILS_LOG( "Intermediate Buffer - Invalid Buffer List pointer\n"); return CPA_STATUS_INVALID_PARAM; } if (NULL == (*pTempIntermediateBufferPtrsArray)->pBuffers) { QAT_UTILS_LOG( "Intermediate Buffer - Invalid Flat Buffer descriptor pointer\n"); return CPA_STATUS_INVALID_PARAM; } if (NULL == (*pTempIntermediateBufferPtrsArray)->pPrivateMetaData) { QAT_UTILS_LOG( "Intermediate Buffer - Invalid Private MetaData descriptor pointer\n"); return CPA_STATUS_INVALID_PARAM; } clientListSize = 0; for (bufferIndex2 = 0; bufferIndex2 < (*pTempIntermediateBufferPtrsArray)->numBuffers; bufferIndex2++) { if ((0 != (*pTempIntermediateBufferPtrsArray) ->pBuffers[bufferIndex2] .dataLenInBytes) && NULL == (*pTempIntermediateBufferPtrsArray) ->pBuffers[bufferIndex2] .pData) { QAT_UTILS_LOG( "Intermediate Buffer - Invalid Flat Buffer pointer\n"); return CPA_STATUS_INVALID_PARAM; } clientListSize += (*pTempIntermediateBufferPtrsArray) ->pBuffers[bufferIndex2] .dataLenInBytes; } if (bufferIndex != 0) { if (lastClientListSize != clientListSize) { QAT_UTILS_LOG( "SGLs have to be of the same size.\n"); return CPA_STATUS_INVALID_PARAM; } } else { lastClientListSize = clientListSize; } pTempIntermediateBufferPtrsArray++; } /* Allocate array of physical pointers to icp_buffer_list_desc_t */ status = LAC_OS_CAMALLOC(&pInterBuffPtrsArray, (numBuffers * sizeof(icp_qat_addr_width_t)), LAC_64BYTE_ALIGNMENT, pService->nodeAffinity); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Can not allocate Intermediate Buffers array.\n"); return status; } /* Get physical address of the intermediate buffer pointers array */ pArrayBufferListDescPhyAddr = LAC_MEM_CAST_PTR_TO_UINT64( LAC_OS_VIRT_TO_PHYS_INTERNAL(pInterBuffPtrsArray)); pService->pInterBuffPtrsArray = pInterBuffPtrsArray; pService->pInterBuffPtrsArrayPhyAddr = pArrayBufferListDescPhyAddr; /* Get the full size of the buffer list */ /* Assumption: all the SGLs allocated by the user have the same size */ clientListSize = 0; for (bufferIndex = 0; bufferIndex < (*pIntermediateBufferPtrsArray)->numBuffers; bufferIndex++) { clientListSize += ((*pIntermediateBufferPtrsArray) ->pBuffers[bufferIndex] .dataLenInBytes); } pService->minInterBuffSizeInBytes = clientListSize; for (bufferIndex = 0; bufferIndex < numBuffers; bufferIndex++) { /* Get pointer to the client Intermediate Buffer List * (CpaBufferList) */ pClientCurrentIntermediateBuffer = *pIntermediateBufferPtrsArray; /* Get number of flat buffers in the buffer list */ numFlatBuffers = pClientCurrentIntermediateBuffer->numBuffers; /* Get pointer to the client array of CpaFlatBuffers */ pClientCurrFlatBuffer = pClientCurrentIntermediateBuffer->pBuffers; /* Calculate Physical address of current private SGL */ bufListDescPhyAddr = LAC_OS_VIRT_TO_PHYS_EXTERNAL( (*pService), pClientCurrentIntermediateBuffer->pPrivateMetaData); if (bufListDescPhyAddr == 0) { QAT_UTILS_LOG( "Unable to get the physical address of the metadata.\n"); return CPA_STATUS_FAIL; } /* Align SGL physical address */ bufListAlignedPhyAddr = LAC_ALIGN_POW2_ROUNDUP(bufListDescPhyAddr, ICP_DESCRIPTOR_ALIGNMENT_BYTES); /* Set physical address of the Intermediate Buffer SGL in the * SGLs array */ *pInterBuffPtrsArray = LAC_MEM_CAST_PTR_TO_UINT64(bufListAlignedPhyAddr); /* Calculate (virtual) offset to the buffer list descriptor */ pBufferListDesc = (icp_buffer_list_desc_t *)((LAC_ARCH_UINT)pClientCurrentIntermediateBuffer ->pPrivateMetaData + (LAC_ARCH_UINT)(bufListAlignedPhyAddr - bufListDescPhyAddr)); /* Set number of flat buffers in the physical Buffer List * descriptor */ pBufferListDesc->numBuffers = numFlatBuffers; /* Go past the Buffer List descriptor to the list of buffer * descriptors */ pCurrFlatBufDesc = (icp_flat_buffer_desc_t *)((pBufferListDesc->phyBuffers)); /* Loop for each flat buffer in the SGL */ while (0 != numFlatBuffers) { /* Set length of the current flat buffer */ pCurrFlatBufDesc->dataLenInBytes = pClientCurrFlatBuffer->dataLenInBytes; /* Set physical address of the flat buffer */ pCurrFlatBufDesc->phyBuffer = LAC_MEM_CAST_PTR_TO_UINT64( LAC_OS_VIRT_TO_PHYS_EXTERNAL( (*pService), pClientCurrFlatBuffer->pData)); if (pCurrFlatBufDesc->phyBuffer == 0) { QAT_UTILS_LOG( "Unable to get the physical address of the flat buffer.\n"); return CPA_STATUS_FAIL; } pCurrFlatBufDesc++; pClientCurrFlatBuffer++; numFlatBuffers--; } pIntermediateBufferPtrsArray++; pInterBuffPtrsArray++; } pService->generic_service_info.isInstanceStarted = CPA_TRUE; /* Increment dev ref counter */ icp_qa_dev_get(dev); return CPA_STATUS_SUCCESS; } CpaStatus cpaDcStopInstance(CpaInstanceHandle instanceHandle) { CpaInstanceHandle insHandle = NULL; CpaInstanceInfo2 info = { 0 }; icp_accel_dev_t *dev = NULL; CpaStatus status = CPA_STATUS_SUCCESS; sal_compression_service_t *pService = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle) { insHandle = dcGetFirstHandle(); } else { insHandle = instanceHandle; } LAC_CHECK_NULL_PARAM(insHandle); pService = (sal_compression_service_t *)insHandle; /* Free Intermediate Buffer Pointers Array */ if (pService->pInterBuffPtrsArray != NULL) { LAC_OS_CAFREE(pService->pInterBuffPtrsArray); pService->pInterBuffPtrsArray = 0; } pService->pInterBuffPtrsArrayPhyAddr = 0; status = cpaDcInstanceGetInfo2(insHandle, &info); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Can not get instance info.\n"); return status; } dev = icp_adf_getAccelDevByAccelId(info.physInstId.packageId); if (NULL == dev) { QAT_UTILS_LOG("Can not find device for the instance.\n"); return CPA_STATUS_FAIL; } pService->generic_service_info.isInstanceStarted = CPA_FALSE; /* Decrement dev ref counter */ icp_qa_dev_put(dev); return CPA_STATUS_SUCCESS; } CpaStatus cpaDcGetNumInstances(Cpa16U *pNumInstances) { CpaStatus status = CPA_STATUS_SUCCESS; icp_accel_dev_t **pAdfInsts = NULL; icp_accel_dev_t *dev_addr = NULL; sal_t *base_addr = NULL; sal_list_t *list_temp = NULL; Cpa16U num_accel_dev = 0; Cpa16U num = 0; Cpa16U i = 0; LAC_CHECK_NULL_PARAM(pNumInstances); /* Get the number of accel_dev in the system */ status = icp_amgr_getNumInstances(&num_accel_dev); LAC_CHECK_STATUS(status); /* Allocate memory to store addr of accel_devs */ pAdfInsts = malloc(num_accel_dev * sizeof(icp_accel_dev_t *), M_QAT, M_WAITOK); num_accel_dev = 0; /* Get ADF to return accel_devs with dc enabled */ status = icp_amgr_getAllAccelDevByCapabilities( ICP_ACCEL_CAPABILITIES_COMPRESSION, pAdfInsts, &num_accel_dev); if (CPA_STATUS_SUCCESS == status) { for (i = 0; i < num_accel_dev; i++) { dev_addr = (icp_accel_dev_t *)pAdfInsts[i]; if (NULL != dev_addr) { base_addr = dev_addr->pSalHandle; if (NULL != base_addr) { list_temp = base_addr->compression_services; while (NULL != list_temp) { num++; list_temp = SalList_next(list_temp); } } } } *pNumInstances = num; } free(pAdfInsts, M_QAT); return status; } CpaStatus cpaDcGetInstances(Cpa16U numInstances, CpaInstanceHandle *dcInstances) { CpaStatus status = CPA_STATUS_SUCCESS; icp_accel_dev_t **pAdfInsts = NULL; icp_accel_dev_t *dev_addr = NULL; sal_t *base_addr = NULL; sal_list_t *list_temp = NULL; Cpa16U num_accel_dev = 0; Cpa16U index = 0; Cpa16U i = 0; LAC_CHECK_NULL_PARAM(dcInstances); if (0 == numInstances) { QAT_UTILS_LOG("numInstances is 0.\n"); return CPA_STATUS_INVALID_PARAM; } /* Get the number of accel_dev in the system */ status = icp_amgr_getNumInstances(&num_accel_dev); LAC_CHECK_STATUS(status); /* Allocate memory to store addr of accel_devs */ pAdfInsts = malloc(num_accel_dev * sizeof(icp_accel_dev_t *), M_QAT, M_WAITOK); num_accel_dev = 0; /* Get ADF to return accel_devs with dc enabled */ status = icp_amgr_getAllAccelDevByCapabilities( ICP_ACCEL_CAPABILITIES_COMPRESSION, pAdfInsts, &num_accel_dev); if (CPA_STATUS_SUCCESS == status) { /* First check the number of instances in the system */ for (i = 0; i < num_accel_dev; i++) { dev_addr = (icp_accel_dev_t *)pAdfInsts[i]; if (NULL != dev_addr) { base_addr = dev_addr->pSalHandle; if (NULL != base_addr) { list_temp = base_addr->compression_services; while (NULL != list_temp) { if (index > (numInstances - 1)) { break; } dcInstances[index] = SalList_getObject( list_temp); list_temp = SalList_next(list_temp); index++; } } } } if (numInstances > index) { QAT_UTILS_LOG("Only %d dc instances available.\n", index); status = CPA_STATUS_RESOURCE; } } if (CPA_STATUS_SUCCESS == status) { index = 0; for (i = 0; i < num_accel_dev; i++) { dev_addr = (icp_accel_dev_t *)pAdfInsts[i]; /* Note dev_addr cannot be NULL here as numInstances=0 is not valid and if dev_addr=NULL then index=0 (which is less than numInstances and status is set to _RESOURCE above */ base_addr = dev_addr->pSalHandle; if (NULL != base_addr) { list_temp = base_addr->compression_services; while (NULL != list_temp) { if (index > (numInstances - 1)) { break; } dcInstances[index] = SalList_getObject(list_temp); list_temp = SalList_next(list_temp); index++; } } } } free(pAdfInsts, M_QAT); return status; } CpaStatus cpaDcInstanceGetInfo2(const CpaInstanceHandle instanceHandle, CpaInstanceInfo2 *pInstanceInfo2) { sal_compression_service_t *pCompressionService = NULL; CpaInstanceHandle insHandle = NULL; icp_accel_dev_t *dev = NULL; CpaStatus status = CPA_STATUS_SUCCESS; char keyStr[ADF_CFG_MAX_KEY_LEN_IN_BYTES] = { 0 }; char valStr[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char *section = DYN_SEC; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle) { insHandle = dcGetFirstHandle(); } else { insHandle = instanceHandle; } LAC_CHECK_NULL_PARAM(insHandle); SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); LAC_CHECK_NULL_PARAM(pInstanceInfo2); LAC_OS_BZERO(pInstanceInfo2, sizeof(CpaInstanceInfo2)); pInstanceInfo2->accelerationServiceType = CPA_ACC_SVC_TYPE_DATA_COMPRESSION; snprintf((char *)pInstanceInfo2->vendorName, CPA_INST_VENDOR_NAME_SIZE, "%s", SAL_INFO2_VENDOR_NAME); pInstanceInfo2->vendorName[CPA_INST_VENDOR_NAME_SIZE - 1] = '\0'; snprintf((char *)pInstanceInfo2->swVersion, CPA_INST_SW_VERSION_SIZE, "Version %d.%d", SAL_INFO2_DRIVER_SW_VERSION_MAJ_NUMBER, SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER); pInstanceInfo2->swVersion[CPA_INST_SW_VERSION_SIZE - 1] = '\0'; /* Note we can safely read the contents of the compression service instance here because icp_amgr_getAccelDevByCapabilities() only returns devs that have started */ pCompressionService = (sal_compression_service_t *)insHandle; pInstanceInfo2->physInstId.packageId = pCompressionService->pkgID; pInstanceInfo2->physInstId.acceleratorId = pCompressionService->acceleratorNum; pInstanceInfo2->physInstId.executionEngineId = 0; pInstanceInfo2->physInstId.busAddress = icp_adf_get_busAddress(pInstanceInfo2->physInstId.packageId); /* set coreAffinity to zero before use */ LAC_OS_BZERO(pInstanceInfo2->coreAffinity, sizeof(pInstanceInfo2->coreAffinity)); CPA_BITMAP_BIT_SET(pInstanceInfo2->coreAffinity, pCompressionService->coreAffinity); pInstanceInfo2->nodeAffinity = pCompressionService->nodeAffinity; if (CPA_TRUE == pCompressionService->generic_service_info.isInstanceStarted) { pInstanceInfo2->operState = CPA_OPER_STATE_UP; } else { pInstanceInfo2->operState = CPA_OPER_STATE_DOWN; } pInstanceInfo2->requiresPhysicallyContiguousMemory = CPA_TRUE; if (SAL_RESP_POLL_CFG_FILE == pCompressionService->isPolled) { pInstanceInfo2->isPolled = CPA_TRUE; } else { pInstanceInfo2->isPolled = CPA_FALSE; } pInstanceInfo2->isOffloaded = CPA_TRUE; /* Get the instance name and part name from the config file */ dev = icp_adf_getAccelDevByAccelId(pCompressionService->pkgID); if (NULL == dev) { QAT_UTILS_LOG("Can not find device for the instance.\n"); LAC_OS_BZERO(pInstanceInfo2, sizeof(CpaInstanceInfo2)); return CPA_STATUS_FAIL; } snprintf((char *)pInstanceInfo2->partName, CPA_INST_PART_NAME_SIZE, SAL_INFO2_PART_NAME, dev->deviceName); pInstanceInfo2->partName[CPA_INST_PART_NAME_SIZE - 1] = '\0'; if (CPA_FALSE == pCompressionService->generic_service_info.is_dyn) { section = icpGetProcessName(); } status = Sal_StringParsing( "Dc", pCompressionService->generic_service_info.instance, "Name", keyStr); LAC_CHECK_STATUS(status); status = icp_adf_cfgGetParamValue(dev, section, keyStr, valStr); LAC_CHECK_STATUS(status); strncpy((char *)pInstanceInfo2->instName, valStr, sizeof(pInstanceInfo2->instName) - 1); pInstanceInfo2->instName[CPA_INST_NAME_SIZE - 1] = '\0'; #if __GNUC__ >= 7 #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wformat-truncation" #endif snprintf((char *)pInstanceInfo2->instID, CPA_INST_ID_SIZE, "%s_%s", section, valStr); #if __GNUC__ >= 7 #pragma GCC diagnostic pop #endif return CPA_STATUS_SUCCESS; } CpaStatus cpaDcQueryCapabilities(CpaInstanceHandle dcInstance, CpaDcInstanceCapabilities *pInstanceCapabilities) { CpaInstanceHandle insHandle = NULL; sal_compression_service_t *pService = NULL; Cpa32U capabilitiesMask = 0; dc_extd_ftrs_t *pExtendedFtrs = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) { insHandle = dcGetFirstHandle(); if (NULL == insHandle) { QAT_UTILS_LOG("Can not get the instance.\n"); return CPA_STATUS_FAIL; } } else { insHandle = dcInstance; } pService = (sal_compression_service_t *)insHandle; LAC_CHECK_NULL_PARAM(insHandle); SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); LAC_CHECK_NULL_PARAM(pInstanceCapabilities); memset(pInstanceCapabilities, 0, sizeof(CpaDcInstanceCapabilities)); capabilitiesMask = pService->generic_service_info.capabilitiesMask; /* Set compression capabilities */ if (capabilitiesMask & ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY) { pInstanceCapabilities->integrityCrcs = CPA_TRUE; } pInstanceCapabilities->endOfLastBlock = CPA_TRUE; pInstanceCapabilities->statefulDeflateCompression = CPA_FALSE; pInstanceCapabilities->statefulDeflateDecompression = CPA_TRUE; pInstanceCapabilities->statelessDeflateCompression = CPA_TRUE; pInstanceCapabilities->statelessDeflateDecompression = CPA_TRUE; pInstanceCapabilities->checksumCRC32 = CPA_TRUE; pInstanceCapabilities->checksumAdler32 = CPA_TRUE; pInstanceCapabilities->dynamicHuffman = CPA_TRUE; pInstanceCapabilities->precompiledHuffman = CPA_FALSE; pInstanceCapabilities->dynamicHuffmanBufferReq = CPA_TRUE; pInstanceCapabilities->autoSelectBestHuffmanTree = CPA_TRUE; pInstanceCapabilities->validWindowSizeMaskCompression = pService->comp_device_data.windowSizeMask; pInstanceCapabilities->validWindowSizeMaskDecompression = pService->comp_device_data.windowSizeMask; pExtendedFtrs = (dc_extd_ftrs_t *)&( ((sal_service_t *)insHandle)->dcExtendedFeatures); pInstanceCapabilities->batchAndPack = CPA_FALSE; pInstanceCapabilities->compressAndVerify = (CpaBoolean)pExtendedFtrs->is_cnv; pInstanceCapabilities->compressAndVerifyStrict = CPA_TRUE; pInstanceCapabilities->compressAndVerifyAndRecover = (CpaBoolean)pExtendedFtrs->is_cnvnr; return CPA_STATUS_SUCCESS; } CpaStatus cpaDcSetAddressTranslation(const CpaInstanceHandle instanceHandle, CpaVirtualToPhysical virtual2Physical) { sal_service_t *pService = NULL; CpaInstanceHandle insHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle) { insHandle = dcGetFirstHandle(); } else { insHandle = instanceHandle; } LAC_CHECK_NULL_PARAM(insHandle); SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION); LAC_CHECK_NULL_PARAM(virtual2Physical); pService = (sal_service_t *)insHandle; pService->virt2PhysClient = virtual2Physical; return CPA_STATUS_SUCCESS; } /** ****************************************************************************** * @ingroup cpaDcCommon * Data compression specific polling function which polls a DC instance. *****************************************************************************/ CpaStatus icp_sal_DcPollInstance(CpaInstanceHandle instanceHandle_in, Cpa32U response_quota) { CpaStatus status = CPA_STATUS_SUCCESS; sal_compression_service_t *dc_handle = NULL; sal_service_t *gen_handle = NULL; icp_comms_trans_handle trans_hndTable[DC_NUM_RX_RINGS]; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { dc_handle = (sal_compression_service_t *)dcGetFirstHandle(); } else { dc_handle = (sal_compression_service_t *)instanceHandle_in; } LAC_CHECK_NULL_PARAM(dc_handle); SAL_RUNNING_CHECK(dc_handle); gen_handle = &(dc_handle->generic_service_info); if (SAL_SERVICE_TYPE_COMPRESSION != gen_handle->type) { QAT_UTILS_LOG("Instance handle type is incorrect.\n"); return CPA_STATUS_FAIL; } /* * From the instanceHandle we must get the trans_handle and send * down to adf for polling. * Populate our trans handle table with the appropriate handles. */ trans_hndTable[0] = dc_handle->trans_handle_compression_rx; /* Call adf to do the polling. */ status = icp_adf_pollInstance(trans_hndTable, DC_NUM_RX_RINGS, response_quota); return status; } /** ****************************************************************************** * @ingroup cpaDcCommon *****************************************************************************/ CpaStatus cpaDcInstanceSetNotificationCb( const CpaInstanceHandle instanceHandle, const CpaDcInstanceNotificationCbFunc pInstanceNotificationCb, void *pCallbackTag) { CpaStatus status = CPA_STATUS_SUCCESS; sal_service_t *gen_handle = instanceHandle; LAC_CHECK_NULL_PARAM(gen_handle); gen_handle->notification_cb = pInstanceNotificationCb; gen_handle->cb_tag = pCallbackTag; return status; } CpaInstanceHandle dcGetFirstHandle(void) { CpaStatus status = CPA_STATUS_SUCCESS; static icp_accel_dev_t *adfInsts[ADF_MAX_DEVICES] = { 0 }; CpaInstanceHandle dcInst = NULL; icp_accel_dev_t *dev_addr = NULL; sal_t *base_addr = NULL; sal_list_t *list_temp = NULL; Cpa16U i, num_dc = 0; /* Only need 1 dev with compression enabled - so check all devices */ status = icp_amgr_getAllAccelDevByCapabilities( ICP_ACCEL_CAPABILITIES_COMPRESSION, adfInsts, &num_dc); if ((0 == num_dc) || (CPA_STATUS_SUCCESS != status)) { QAT_UTILS_LOG( "No compression devices enabled in the system.\n"); return dcInst; } for (i = 0; i < num_dc; i++) { dev_addr = (icp_accel_dev_t *)adfInsts[i]; if (NULL != dev_addr) { base_addr = dev_addr->pSalHandle; if (NULL != base_addr) { list_temp = base_addr->compression_services; if (NULL != list_temp) { dcInst = SalList_getObject(list_temp); break; } } } } return dcInst; } diff --git a/sys/dev/qat/qat_api/common/ctrl/sal_create_services.c b/sys/dev/qat/qat_api/common/ctrl/sal_create_services.c index d51cc1bcace0..a512028ab9fe 100644 --- a/sys/dev/qat/qat_api/common/ctrl/sal_create_services.c +++ b/sys/dev/qat/qat_api/common/ctrl/sal_create_services.c @@ -1,105 +1,108 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** ***************************************************************************** * @file sal_create_services.c * * @defgroup SalCtrl Service Access Layer Controller * * @ingroup SalCtrl * * @description * This file contains the main function to create a specific service. * *****************************************************************************/ #include "cpa.h" #include "lac_mem.h" #include "lac_mem_pools.h" #include "qat_utils.h" #include "lac_list.h" #include "icp_adf_transport.h" #include "icp_accel_devices.h" #include "icp_adf_debug.h" #include "icp_qat_fw_la.h" #include "lac_sym_qat.h" #include "sal_types_compression.h" #include "lac_sal_types_crypto.h" #include "icp_adf_init.h" #include "lac_sal.h" #include "lac_sal_ctrl.h" CpaStatus SalCtrl_ServiceCreate(sal_service_type_t serviceType, Cpa32U instance, sal_service_t **ppInst) { sal_crypto_service_t *pCrypto_service = NULL; sal_compression_service_t *pCompression_service = NULL; switch ((sal_service_type_t)serviceType) { case SAL_SERVICE_TYPE_CRYPTO_ASYM: case SAL_SERVICE_TYPE_CRYPTO_SYM: case SAL_SERVICE_TYPE_CRYPTO: { pCrypto_service = malloc(sizeof(sal_crypto_service_t), M_QAT, M_WAITOK); /* Zero memory */ memset(pCrypto_service, 0, sizeof(sal_crypto_service_t)); pCrypto_service->generic_service_info.type = (sal_service_type_t)serviceType; pCrypto_service->generic_service_info.state = SAL_SERVICE_STATE_UNINITIALIZED; pCrypto_service->generic_service_info.instance = instance; pCrypto_service->generic_service_info.init = SalCtrl_CryptoInit; pCrypto_service->generic_service_info.start = SalCtrl_CryptoStart; pCrypto_service->generic_service_info.stop = SalCtrl_CryptoStop; pCrypto_service->generic_service_info.shutdown = SalCtrl_CryptoShutdown; + /* Force HW MAC validation for GCM and CCM */ + pCrypto_service->forceAEADMacVerify = CPA_TRUE; + *(ppInst) = &(pCrypto_service->generic_service_info); return CPA_STATUS_SUCCESS; } case SAL_SERVICE_TYPE_COMPRESSION: { pCompression_service = malloc(sizeof(sal_compression_service_t), M_QAT, M_WAITOK); /* Zero memory */ memset(pCompression_service, 0, sizeof(sal_compression_service_t)); pCompression_service->generic_service_info.type = (sal_service_type_t)serviceType; pCompression_service->generic_service_info.state = SAL_SERVICE_STATE_UNINITIALIZED; pCompression_service->generic_service_info.instance = instance; pCompression_service->generic_service_info.init = SalCtrl_CompressionInit; pCompression_service->generic_service_info.start = SalCtrl_CompressionStart; pCompression_service->generic_service_info.stop = SalCtrl_CompressionStop; pCompression_service->generic_service_info.shutdown = SalCtrl_CompressionShutdown; *(ppInst) = &(pCompression_service->generic_service_info); return CPA_STATUS_SUCCESS; } default: { QAT_UTILS_LOG("Not a valid service type\n"); (*ppInst) = NULL; return CPA_STATUS_FAIL; } } } diff --git a/sys/dev/qat/qat_api/common/ctrl/sal_crypto.c b/sys/dev/qat/qat_api/common/ctrl/sal_crypto.c index c97d36103c2b..78a559eba891 100644 --- a/sys/dev/qat/qat_api/common/ctrl/sal_crypto.c +++ b/sys/dev/qat/qat_api/common/ctrl/sal_crypto.c @@ -1,1837 +1,2010 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** *************************************************************************** * @file sal_crypto.c Instance handling functions for crypto * * @ingroup SalCtrl * ***************************************************************************/ /* ******************************************************************************* * Include public/global header files ******************************************************************************* */ /* QAT-API includes */ #include "cpa.h" #include "cpa_types.h" #include "cpa_cy_common.h" #include "cpa_cy_im.h" #include "cpa_cy_key.h" #include "cpa_cy_sym.h" #include "qat_utils.h" /* ADF includes */ #include "icp_adf_init.h" #include "icp_adf_transport.h" #include "icp_accel_devices.h" #include "icp_adf_cfg.h" #include "icp_adf_accel_mgr.h" #include "icp_adf_poll.h" #include "icp_adf_debug.h" /* SAL includes */ #include "lac_log.h" #include "lac_mem.h" #include "lac_mem_pools.h" #include "sal_statistics.h" #include "lac_common.h" #include "lac_list.h" #include "lac_hooks.h" #include "lac_sym_qat_hash_defs_lookup.h" #include "lac_sym.h" #include "lac_sym_key.h" #include "lac_sym_hash.h" #include "lac_sym_cb.h" #include "lac_sym_stats.h" #include "lac_sal_types_crypto.h" #include "lac_sal.h" #include "lac_sal_ctrl.h" #include "sal_string_parse.h" #include "sal_service_state.h" #include "icp_sal_poll.h" #include "lac_sync.h" #include "lac_sym_qat.h" #include "icp_sal_versions.h" #include "icp_sal_user.h" +#include "sal_hw_gen.h" +#define HMAC_MODE_1 1 +#define HMAC_MODE_2 2 #define TH_CY_RX_0 0 #define TH_CY_RX_1 1 #define MAX_CY_RX_RINGS 2 #define DOUBLE_INCR 2 #define TH_SINGLE_RX 0 #define NUM_CRYPTO_SYM_RX_RINGS 1 #define NUM_CRYPTO_ASYM_RX_RINGS 1 #define NUM_CRYPTO_NRBG_RX_RINGS 1 static CpaInstanceHandle Lac_CryptoGetFirstHandle(void) { CpaInstanceHandle instHandle; instHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO); if (!instHandle) { instHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); if (!instHandle) { instHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_ASYM); } } return instHandle; } /* Function to release the sym handles. */ static CpaStatus SalCtrl_SymReleaseTransHandle(sal_service_t *service) { CpaStatus status = CPA_STATUS_SUCCESS; CpaStatus ret_status = CPA_STATUS_SUCCESS; sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)service; if (NULL != pCryptoService->trans_handle_sym_tx) { status = icp_adf_transReleaseHandle( pCryptoService->trans_handle_sym_tx); if (CPA_STATUS_SUCCESS != status) { ret_status = status; } } if (NULL != pCryptoService->trans_handle_sym_rx) { status = icp_adf_transReleaseHandle( pCryptoService->trans_handle_sym_rx); if (CPA_STATUS_SUCCESS != status) { ret_status = status; } } return ret_status; } /* * @ingroup sal_crypto * Frees resources (memory and transhandles) if allocated * * @param[in] pCryptoService Pointer to sym service instance * @retval SUCCESS if transhandles released * successfully. */ static CpaStatus SalCtrl_SymFreeResources(sal_crypto_service_t *pCryptoService) { CpaStatus status = CPA_STATUS_SUCCESS; /* Free memory pools if not NULL */ Lac_MemPoolDestroy(pCryptoService->lac_sym_cookie_pool); /* Free misc memory if allocated */ /* Frees memory allocated for Hmac precomputes */ LacSymHash_HmacPrecompShutdown(pCryptoService); /* Free memory allocated for key labels Also clears key stats */ LacSymKey_Shutdown(pCryptoService); /* Free hash lookup table if allocated */ if (NULL != pCryptoService->pLacHashLookupDefs) { LAC_OS_FREE(pCryptoService->pLacHashLookupDefs); } /* Free statistics */ LacSym_StatsFree(pCryptoService); /* Free transport handles */ status = SalCtrl_SymReleaseTransHandle((sal_service_t *)pCryptoService); return status; } /** *********************************************************************** * @ingroup SalCtrl * This macro verifies that the status is _SUCCESS * If status is not _SUCCESS then Sym Instance resources are * freed before the function returns the error * * @param[in] status status we are checking * * @return void status is ok (CPA_STATUS_SUCCESS) * @return status The value in the status parameter is an error one * ****************************************************************************/ #define LAC_CHECK_STATUS_SYM_INIT(status) \ do { \ if (CPA_STATUS_SUCCESS != status) { \ SalCtrl_SymFreeResources(pCryptoService); \ return status; \ } \ } while (0) /* Function that creates the Sym Handles. */ static CpaStatus SalCtrl_SymCreateTransHandle(icp_accel_dev_t *device, sal_service_t *service, Cpa32U numSymRequests, char *section) { CpaStatus status = CPA_STATUS_SUCCESS; char temp_string[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)service; icp_resp_deliv_method rx_resp_type = ICP_RESP_TYPE_IRQ; Cpa32U msgSize = 0; if (SAL_RESP_POLL_CFG_FILE == pCryptoService->isPolled) { rx_resp_type = ICP_RESP_TYPE_POLL; } if (CPA_FALSE == pCryptoService->generic_service_info.is_dyn) { section = icpGetProcessName(); } /* Parse Sym ring details */ status = Sal_StringParsing("Cy", pCryptoService->generic_service_info.instance, "RingSymTx", temp_string); /* Need to free resources in case not _SUCCESS from here */ LAC_CHECK_STATUS_SYM_INIT(status); msgSize = LAC_QAT_SYM_REQ_SZ_LW * LAC_LONG_WORD_IN_BYTES; status = icp_adf_transCreateHandle(device, ICP_TRANS_TYPE_ETR, section, pCryptoService->acceleratorNum, - pCryptoService->bankNum, + pCryptoService->bankNumSym, temp_string, lac_getRingType(SAL_RING_TYPE_A_SYM_HI), NULL, ICP_RESP_TYPE_NONE, numSymRequests, msgSize, (icp_comms_trans_handle *)&( pCryptoService->trans_handle_sym_tx)); LAC_CHECK_STATUS_SYM_INIT(status); status = Sal_StringParsing("Cy", pCryptoService->generic_service_info.instance, "RingSymRx", temp_string); LAC_CHECK_STATUS_SYM_INIT(status); msgSize = LAC_QAT_SYM_RESP_SZ_LW * LAC_LONG_WORD_IN_BYTES; status = icp_adf_transCreateHandle( device, ICP_TRANS_TYPE_ETR, section, pCryptoService->acceleratorNum, - pCryptoService->bankNum, + pCryptoService->bankNumSym, temp_string, lac_getRingType(SAL_RING_TYPE_NONE), (icp_trans_callback)LacSymQat_SymRespHandler, rx_resp_type, numSymRequests, msgSize, (icp_comms_trans_handle *)&(pCryptoService->trans_handle_sym_rx)); LAC_CHECK_STATUS_SYM_INIT(status); return status; } static int SalCtrl_CryptoDebug(void *private_data, char *data, int size, int offset) { CpaStatus status = CPA_STATUS_SUCCESS; Cpa32U len = 0; sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)private_data; switch (offset) { case SAL_STATS_SYM: { CpaCySymStats64 symStats = { 0 }; if (CPA_TRUE != pCryptoService->generic_service_info.stats ->bSymStatsEnabled) { break; } status = cpaCySymQueryStats64(pCryptoService, &symStats); if (status != CPA_STATUS_SUCCESS) { LAC_LOG_ERROR("cpaCySymQueryStats64 returned error\n"); return 0; } /* Engine Info */ len += snprintf( data + len, size - len, SEPARATOR BORDER " Statistics for Instance %24s |\n" BORDER " Symmetric Stats " BORDER "\n" SEPARATOR, pCryptoService->debug_file->name); /* Session Info */ len += snprintf( data + len, size - len, BORDER " Sessions Initialized: %16llu " BORDER "\n" BORDER " Sessions Removed: %16llu " BORDER "\n" BORDER " Session Errors: %16llu " BORDER "\n" SEPARATOR, (long long unsigned int)symStats.numSessionsInitialized, (long long unsigned int)symStats.numSessionsRemoved, (long long unsigned int)symStats.numSessionErrors); /* Session info */ len += snprintf( data + len, size - len, BORDER " Symmetric Requests: %16llu " BORDER "\n" BORDER " Symmetric Request Errors: %16llu " BORDER "\n" BORDER " Symmetric Completed: %16llu " BORDER "\n" BORDER " Symmetric Completed Errors: %16llu " BORDER "\n" BORDER " Symmetric Verify Failures: %16llu " BORDER "\n", (long long unsigned int)symStats.numSymOpRequests, (long long unsigned int)symStats.numSymOpRequestErrors, (long long unsigned int)symStats.numSymOpCompleted, (long long unsigned int)symStats.numSymOpCompletedErrors, (long long unsigned int)symStats.numSymOpVerifyFailures); break; } default: { len += snprintf(data + len, size - len, SEPARATOR); return 0; } } return ++offset; } static CpaStatus SalCtrl_SymInit(icp_accel_dev_t *device, sal_service_t *service) { CpaStatus status = CPA_STATUS_SUCCESS; + Cpa32U qatHmacMode = 0; Cpa32U numSymConcurrentReq = 0; char adfGetParam[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char temp_string[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)service; char *section = DYN_SEC; /*Instance may not in the DYN section*/ if (CPA_FALSE == pCryptoService->generic_service_info.is_dyn) { section = icpGetProcessName(); } /* Register callbacks for the symmetric services * (Hash, Cipher, Algorithm-Chaining) (returns void)*/ LacSymCb_CallbacksRegister(); + qatHmacMode = (Cpa32U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); + switch (qatHmacMode) { + case HMAC_MODE_1: + pCryptoService->qatHmacMode = ICP_QAT_HW_AUTH_MODE1; + break; + case HMAC_MODE_2: + pCryptoService->qatHmacMode = ICP_QAT_HW_AUTH_MODE2; + break; + default: + pCryptoService->qatHmacMode = ICP_QAT_HW_AUTH_MODE1; + break; + } + /* Get num concurrent requests from config file */ status = Sal_StringParsing("Cy", pCryptoService->generic_service_info.instance, "NumConcurrentSymRequests", temp_string); LAC_CHECK_STATUS(status); status = icp_adf_cfgGetParamValue(device, section, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration file\n", temp_string); return status; } numSymConcurrentReq = (Cpa32U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); if (CPA_STATUS_FAIL == validateConcurrRequest(numSymConcurrentReq)) { LAC_LOG_ERROR("Invalid NumConcurrentSymRequests, valid " "values {64, 128, 256, ... 32768, 65536}"); return CPA_STATUS_FAIL; } /* ADF does not allow us to completely fill the ring for batch requests */ pCryptoService->maxNumSymReqBatch = (numSymConcurrentReq - SAL_BATCH_SUBMIT_FREE_SPACE); /* Create transport handles */ status = SalCtrl_SymCreateTransHandle(device, service, numSymConcurrentReq, section); LAC_CHECK_STATUS(status); /* Allocates memory pools */ /* Create and initialise symmetric cookie memory pool */ pCryptoService->lac_sym_cookie_pool = LAC_MEM_POOL_INIT_POOL_ID; status = Sal_StringParsing("Cy", pCryptoService->generic_service_info.instance, "SymPool", temp_string); LAC_CHECK_STATUS_SYM_INIT(status); /* Note we need twice (i.e. <<1) the number of sym cookies to support sym ring pairs (and some, for partials) */ status = Lac_MemPoolCreate(&pCryptoService->lac_sym_cookie_pool, temp_string, ((numSymConcurrentReq + numSymConcurrentReq + 1) << 1), sizeof(lac_sym_cookie_t), LAC_64BYTE_ALIGNMENT, CPA_FALSE, pCryptoService->nodeAffinity); LAC_CHECK_STATUS_SYM_INIT(status); /* For all sym cookies fill out the physical address of data that will be set to QAT */ Lac_MemPoolInitSymCookiesPhyAddr(pCryptoService->lac_sym_cookie_pool); /* Clear stats */ /* Clears Key stats and allocate memory of SSL and TLS labels These labels are initialised to standard values */ status = LacSymKey_Init(pCryptoService); LAC_CHECK_STATUS_SYM_INIT(status); /* Initialises the hash lookup table*/ status = LacSymQat_Init(pCryptoService); LAC_CHECK_STATUS_SYM_INIT(status); /* Fills out content descriptor for precomputes and registers the hash precompute callback */ status = LacSymHash_HmacPrecompInit(pCryptoService); LAC_CHECK_STATUS_SYM_INIT(status); /* Init the Sym stats */ status = LacSym_StatsInit(pCryptoService); LAC_CHECK_STATUS_SYM_INIT(status); return status; } static void SalCtrl_DebugShutdown(icp_accel_dev_t *device, sal_service_t *service) { sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)service; sal_statistics_collection_t *pStatsCollection = (sal_statistics_collection_t *)device->pQatStats; if (CPA_TRUE == pStatsCollection->bStatsEnabled) { /* Clean stats */ if (NULL != pCryptoService->debug_file) { icp_adf_debugRemoveFile(pCryptoService->debug_file); LAC_OS_FREE(pCryptoService->debug_file->name); LAC_OS_FREE(pCryptoService->debug_file); pCryptoService->debug_file = NULL; } } pCryptoService->generic_service_info.stats = NULL; } static CpaStatus SalCtrl_DebugInit(icp_accel_dev_t *device, sal_service_t *service) { char adfGetParam[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char temp_string[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char *instance_name = NULL; sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)service; sal_statistics_collection_t *pStatsCollection = (sal_statistics_collection_t *)device->pQatStats; CpaStatus status = CPA_STATUS_SUCCESS; char *section = DYN_SEC; /*Instance may not in the DYN section*/ if (CPA_FALSE == pCryptoService->generic_service_info.is_dyn) { section = icpGetProcessName(); } if (CPA_TRUE == pStatsCollection->bStatsEnabled) { /* Get instance name for stats */ instance_name = LAC_OS_MALLOC(ADF_CFG_MAX_VAL_LEN_IN_BYTES); if (NULL == instance_name) { return CPA_STATUS_RESOURCE; } status = Sal_StringParsing( "Cy", pCryptoService->generic_service_info.instance, "Name", temp_string); if (CPA_STATUS_SUCCESS != status) { LAC_OS_FREE(instance_name); return status; } status = icp_adf_cfgGetParamValue(device, section, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG( "Failed to get %s from configuration file\n", temp_string); LAC_OS_FREE(instance_name); return status; } snprintf(instance_name, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%s", adfGetParam); pCryptoService->debug_file = LAC_OS_MALLOC(sizeof(debug_file_info_t)); if (NULL == pCryptoService->debug_file) { LAC_OS_FREE(instance_name); return CPA_STATUS_RESOURCE; } memset(pCryptoService->debug_file, 0, sizeof(debug_file_info_t)); pCryptoService->debug_file->name = instance_name; pCryptoService->debug_file->seq_read = SalCtrl_CryptoDebug; pCryptoService->debug_file->private_data = pCryptoService; pCryptoService->debug_file->parent = pCryptoService->generic_service_info.debug_parent_dir; status = icp_adf_debugAddFile(device, pCryptoService->debug_file); if (CPA_STATUS_SUCCESS != status) { LAC_OS_FREE(instance_name); LAC_OS_FREE(pCryptoService->debug_file); return status; } } pCryptoService->generic_service_info.stats = pStatsCollection; return status; } +static CpaStatus +SalCtrl_GetBankNum(icp_accel_dev_t *device, + Cpa32U inst, + char *section, + char *bank_name, + Cpa16U *bank) +{ + char adfParamValue[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; + char adfParamName[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; + CpaStatus status = CPA_STATUS_SUCCESS; + + status = Sal_StringParsing("Cy", inst, bank_name, adfParamName); + LAC_CHECK_STATUS(status); + status = icp_adf_cfgGetParamValue(device, + section, + adfParamName, + adfParamValue); + if (CPA_STATUS_SUCCESS != status) { + QAT_UTILS_LOG("Failed to get %s from configuration file\n", + adfParamName); + return status; + } + *bank = (Cpa16U)Sal_Strtoul(adfParamValue, NULL, SAL_CFG_BASE_DEC); + return status; +} + static CpaStatus SalCtr_InstInit(icp_accel_dev_t *device, sal_service_t *service) { char adfGetParam[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char temp_string[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char temp_string2[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)service; CpaStatus status = CPA_STATUS_SUCCESS; char *section = DYN_SEC; /*Instance may not in the DYN section*/ if (CPA_FALSE == pCryptoService->generic_service_info.is_dyn) { section = icpGetProcessName(); } /* Get Config Info: Accel Num, bank Num, packageID, coreAffinity, nodeAffinity and response mode */ pCryptoService->acceleratorNum = 0; - status = - Sal_StringParsing("Cy", - pCryptoService->generic_service_info.instance, - "BankNumber", - temp_string); - LAC_CHECK_STATUS(status); - status = - icp_adf_cfgGetParamValue(device, section, temp_string, adfGetParam); - if (CPA_STATUS_SUCCESS != status) { - QAT_UTILS_LOG("Failed to get %s from configuration file\n", - temp_string); - return status; + /* Gen4, a bank only has 2 rings (1 ring pair), only one type of service + can be assigned one time. asym and sym will be in different bank*/ + if (isCyGen4x(pCryptoService)) { + switch (service->type) { + case SAL_SERVICE_TYPE_CRYPTO_ASYM: + status = SalCtrl_GetBankNum( + device, + pCryptoService->generic_service_info.instance, + section, + "BankNumberAsym", + &pCryptoService->bankNumAsym); + if (CPA_STATUS_SUCCESS != status) + return status; + break; + case SAL_SERVICE_TYPE_CRYPTO_SYM: + status = SalCtrl_GetBankNum( + device, + pCryptoService->generic_service_info.instance, + section, + "BankNumberSym", + &pCryptoService->bankNumSym); + if (CPA_STATUS_SUCCESS != status) + return status; + break; + case SAL_SERVICE_TYPE_CRYPTO: + status = SalCtrl_GetBankNum( + device, + pCryptoService->generic_service_info.instance, + section, + "BankNumberAsym", + &pCryptoService->bankNumAsym); + if (CPA_STATUS_SUCCESS != status) + return status; + status = SalCtrl_GetBankNum( + device, + pCryptoService->generic_service_info.instance, + section, + "BankNumberSym", + &pCryptoService->bankNumSym); + if (CPA_STATUS_SUCCESS != status) + return status; + break; + default: + return CPA_STATUS_FAIL; + } + } else { + status = SalCtrl_GetBankNum( + device, + pCryptoService->generic_service_info.instance, + section, + "BankNumber", + &pCryptoService->bankNumSym); + if (CPA_STATUS_SUCCESS != status) + return status; + pCryptoService->bankNumAsym = pCryptoService->bankNumSym; } - pCryptoService->bankNum = - (Cpa16U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); status = Sal_StringParsing("Cy", pCryptoService->generic_service_info.instance, "IsPolled", temp_string); LAC_CHECK_STATUS(status); status = icp_adf_cfgGetParamValue(device, section, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration file\n", temp_string); return status; } pCryptoService->isPolled = (Cpa8U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); /* Kernel instances do not support epoll mode */ if (SAL_RESP_EPOLL_CFG_FILE == pCryptoService->isPolled) { QAT_UTILS_LOG( "IsPolled %u is not supported for kernel instance %s", pCryptoService->isPolled, temp_string); return CPA_STATUS_FAIL; } status = icp_adf_cfgGetParamValue(device, LAC_CFG_SECTION_GENERAL, ADF_DEV_PKG_ID, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration file\n", ADF_DEV_PKG_ID); return status; } pCryptoService->pkgID = (Cpa16U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); status = icp_adf_cfgGetParamValue(device, LAC_CFG_SECTION_GENERAL, ADF_DEV_NODE_ID, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration file\n", ADF_DEV_NODE_ID); return status; } pCryptoService->nodeAffinity = (Cpa32U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); /* In case of interrupt instance, use the bank affinity set by adf_ctl * Otherwise, use the instance affinity for backwards compatibility */ if (SAL_RESP_POLL_CFG_FILE != pCryptoService->isPolled) { /* Next need to read the [AcceleratorX] section of the config * file */ status = Sal_StringParsing("Accelerator", pCryptoService->acceleratorNum, "", temp_string2); LAC_CHECK_STATUS(status); - status = Sal_StringParsing("Bank", - pCryptoService->bankNum, - "CoreAffinity", - temp_string); + if (service->type == SAL_SERVICE_TYPE_CRYPTO_ASYM) + status = Sal_StringParsing("Bank", + pCryptoService->bankNumAsym, + "CoreAffinity", + temp_string); + else + /* For cy service, asym bank and sym bank will set the + same core affinity. So Just read one*/ + status = Sal_StringParsing("Bank", + pCryptoService->bankNumSym, + "CoreAffinity", + temp_string); LAC_CHECK_STATUS(status); } else { strncpy(temp_string2, section, (strlen(section) + 1)); status = Sal_StringParsing( "Cy", pCryptoService->generic_service_info.instance, "CoreAffinity", temp_string); LAC_CHECK_STATUS(status); } status = icp_adf_cfgGetParamValue(device, temp_string2, temp_string, adfGetParam); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get %s from configuration file\n", temp_string); return status; } pCryptoService->coreAffinity = (Cpa32U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC); /*No Execution Engine in DH895xcc, so make sure it is zero*/ pCryptoService->executionEngine = 0; return status; } /* This function: * 1. Creates sym and asym transport handles * 2. Allocates memory pools required by sym and asym services .* 3. Clears the sym and asym stats counters * 4. In case service asym or sym is enabled then this function * only allocates resources for these services. i.e if the * service asym is enabled then only asym transport handles * are created and vice versa. */ CpaStatus SalCtrl_CryptoInit(icp_accel_dev_t *device, sal_service_t *service) { CpaStatus status = CPA_STATUS_SUCCESS; sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)service; sal_service_type_t svc_type = service->type; SAL_SERVICE_GOOD_FOR_INIT(pCryptoService); pCryptoService->generic_service_info.state = SAL_SERVICE_STATE_INITIALIZING; /* Set up the instance parameters such as bank number, * coreAffinity, pkgId and node affinity etc */ status = SalCtr_InstInit(device, service); LAC_CHECK_STATUS(status); /* Create debug directory for service */ status = SalCtrl_DebugInit(device, service); LAC_CHECK_STATUS(status); switch (svc_type) { case SAL_SERVICE_TYPE_CRYPTO_ASYM: break; case SAL_SERVICE_TYPE_CRYPTO_SYM: status = SalCtrl_SymInit(device, service); if (CPA_STATUS_SUCCESS != status) { SalCtrl_DebugShutdown(device, service); return status; } break; case SAL_SERVICE_TYPE_CRYPTO: status = SalCtrl_SymInit(device, service); if (CPA_STATUS_SUCCESS != status) { SalCtrl_DebugShutdown(device, service); return status; } break; default: LAC_LOG_ERROR("Invalid service type\n"); status = CPA_STATUS_FAIL; break; } pCryptoService->generic_service_info.state = SAL_SERVICE_STATE_INITIALIZED; return status; } CpaStatus SalCtrl_CryptoStart(icp_accel_dev_t *device, sal_service_t *service) { sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)service; CpaStatus status = CPA_STATUS_SUCCESS; if (pCryptoService->generic_service_info.state != SAL_SERVICE_STATE_INITIALIZED) { LAC_LOG_ERROR("Not in the correct state to call start\n"); return CPA_STATUS_FAIL; } pCryptoService->generic_service_info.state = SAL_SERVICE_STATE_RUNNING; return status; } CpaStatus SalCtrl_CryptoStop(icp_accel_dev_t *device, sal_service_t *service) { sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)service; if (SAL_SERVICE_STATE_RUNNING != pCryptoService->generic_service_info.state) { LAC_LOG_ERROR("Not in the correct state to call stop"); } pCryptoService->generic_service_info.state = SAL_SERVICE_STATE_SHUTTING_DOWN; return CPA_STATUS_SUCCESS; } CpaStatus SalCtrl_CryptoShutdown(icp_accel_dev_t *device, sal_service_t *service) { sal_crypto_service_t *pCryptoService = (sal_crypto_service_t *)service; CpaStatus status = CPA_STATUS_SUCCESS; sal_service_type_t svc_type = service->type; if ((SAL_SERVICE_STATE_INITIALIZED != pCryptoService->generic_service_info.state) && (SAL_SERVICE_STATE_SHUTTING_DOWN != pCryptoService->generic_service_info.state)) { LAC_LOG_ERROR("Not in the correct state to call shutdown \n"); return CPA_STATUS_FAIL; } /* Free memory and transhandles */ switch (svc_type) { case SAL_SERVICE_TYPE_CRYPTO_ASYM: break; case SAL_SERVICE_TYPE_CRYPTO_SYM: if (SalCtrl_SymFreeResources(pCryptoService)) { status = CPA_STATUS_FAIL; } break; case SAL_SERVICE_TYPE_CRYPTO: if (SalCtrl_SymFreeResources(pCryptoService)) { status = CPA_STATUS_FAIL; } break; default: LAC_LOG_ERROR("Invalid service type\n"); status = CPA_STATUS_FAIL; break; } SalCtrl_DebugShutdown(device, service); pCryptoService->generic_service_info.state = SAL_SERVICE_STATE_SHUTDOWN; return status; } /** ****************************************************************************** * @ingroup cpaCyCommon *****************************************************************************/ CpaStatus cpaCyGetStatusText(const CpaInstanceHandle instanceHandle, CpaStatus errStatus, Cpa8S *pStatusText) { CpaStatus status = CPA_STATUS_SUCCESS; LAC_CHECK_NULL_PARAM(pStatusText); switch (errStatus) { case CPA_STATUS_SUCCESS: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_SUCCESS); break; case CPA_STATUS_FAIL: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_FAIL); break; case CPA_STATUS_RETRY: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_RETRY); break; case CPA_STATUS_RESOURCE: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_RESOURCE); break; case CPA_STATUS_INVALID_PARAM: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_INVALID_PARAM); break; case CPA_STATUS_FATAL: LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_FATAL); break; + case CPA_STATUS_UNSUPPORTED: + LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_UNSUPPORTED); + break; default: status = CPA_STATUS_INVALID_PARAM; break; } return status; } void SalCtrl_CyQueryCapabilities(sal_service_t *pGenericService, CpaCyCapabilitiesInfo *pCapInfo) { memset(pCapInfo, 0, sizeof(CpaCyCapabilitiesInfo)); if (SAL_SERVICE_TYPE_CRYPTO == pGenericService->type || SAL_SERVICE_TYPE_CRYPTO_SYM == pGenericService->type) { pCapInfo->symSupported = CPA_TRUE; if (pGenericService->capabilitiesMask & ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN) { pCapInfo->extAlgchainSupported = CPA_TRUE; } if (pGenericService->capabilitiesMask & ICP_ACCEL_CAPABILITIES_HKDF) { pCapInfo->hkdfSupported = CPA_TRUE; } } if (pGenericService->capabilitiesMask & ICP_ACCEL_CAPABILITIES_ECEDMONT) { pCapInfo->ecEdMontSupported = CPA_TRUE; } if (pGenericService->capabilitiesMask & ICP_ACCEL_CAPABILITIES_RANDOM_NUMBER) { pCapInfo->nrbgSupported = CPA_TRUE; } pCapInfo->drbgSupported = CPA_FALSE; pCapInfo->randSupported = CPA_FALSE; pCapInfo->nrbgSupported = CPA_FALSE; } /** ****************************************************************************** * @ingroup cpaCyCommon *****************************************************************************/ CpaStatus cpaCyStartInstance(CpaInstanceHandle instanceHandle_in) { CpaInstanceHandle instanceHandle = NULL; /* Structure initializer is supported by C99, but it is * not supported by some former Intel compilers. */ CpaInstanceInfo2 info = { 0 }; icp_accel_dev_t *dev = NULL; CpaStatus status = CPA_STATUS_SUCCESS; sal_crypto_service_t *pService = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO); if (!instanceHandle) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } } else { instanceHandle = instanceHandle_in; } LAC_CHECK_NULL_PARAM(instanceHandle); + SAL_CHECK_INSTANCE_TYPE(instanceHandle, + (SAL_SERVICE_TYPE_CRYPTO | + SAL_SERVICE_TYPE_CRYPTO_ASYM | + SAL_SERVICE_TYPE_CRYPTO_SYM)); pService = (sal_crypto_service_t *)instanceHandle; status = cpaCyInstanceGetInfo2(instanceHandle, &info); if (CPA_STATUS_SUCCESS != status) { LAC_LOG_ERROR("Can not get instance info\n"); return status; } dev = icp_adf_getAccelDevByAccelId(info.physInstId.packageId); if (NULL == dev) { LAC_LOG_ERROR("Can not find device for the instance\n"); return CPA_STATUS_FAIL; } pService->generic_service_info.isInstanceStarted = CPA_TRUE; /* Increment dev ref counter */ icp_qa_dev_get(dev); return CPA_STATUS_SUCCESS; } /** ****************************************************************************** * @ingroup cpaCyCommon *****************************************************************************/ CpaStatus cpaCyStopInstance(CpaInstanceHandle instanceHandle_in) { CpaInstanceHandle instanceHandle = NULL; /* Structure initializer is supported by C99, but it is * not supported by some former Intel compilers. */ CpaInstanceInfo2 info = { 0 }; icp_accel_dev_t *dev = NULL; CpaStatus status = CPA_STATUS_SUCCESS; sal_crypto_service_t *pService = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_CryptoGetFirstHandle(); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_NULL_PARAM(instanceHandle); + SAL_CHECK_INSTANCE_TYPE(instanceHandle, + (SAL_SERVICE_TYPE_CRYPTO | + SAL_SERVICE_TYPE_CRYPTO_ASYM | + SAL_SERVICE_TYPE_CRYPTO_SYM)); status = cpaCyInstanceGetInfo2(instanceHandle, &info); if (CPA_STATUS_SUCCESS != status) { LAC_LOG_ERROR("Can not get instance info\n"); return status; } dev = icp_adf_getAccelDevByAccelId(info.physInstId.packageId); if (NULL == dev) { LAC_LOG_ERROR("Can not find device for the instance\n"); return CPA_STATUS_FAIL; } pService = (sal_crypto_service_t *)instanceHandle; pService->generic_service_info.isInstanceStarted = CPA_FALSE; /* Decrement dev ref counter */ icp_qa_dev_put(dev); return CPA_STATUS_SUCCESS; } /** ****************************************************************************** * @ingroup cpaCyCommon *****************************************************************************/ CpaStatus cpaCyInstanceSetNotificationCb( const CpaInstanceHandle instanceHandle, const CpaCyInstanceNotificationCbFunc pInstanceNotificationCb, void *pCallbackTag) { CpaStatus status = CPA_STATUS_SUCCESS; sal_service_t *gen_handle = instanceHandle; LAC_CHECK_NULL_PARAM(gen_handle); gen_handle->notification_cb = pInstanceNotificationCb; gen_handle->cb_tag = pCallbackTag; return status; } /** ****************************************************************************** * @ingroup cpaCyCommon *****************************************************************************/ CpaStatus cpaCyGetNumInstances(Cpa16U *pNumInstances) { CpaStatus status = CPA_STATUS_SUCCESS; CpaInstanceHandle cyInstanceHandle; CpaInstanceInfo2 info; icp_accel_dev_t **pAdfInsts = NULL; icp_accel_dev_t *dev_addr = NULL; sal_t *base_addr = NULL; sal_list_t *list_temp = NULL; Cpa16U num_accel_dev = 0; Cpa16U num_inst = 0; Cpa16U i = 0; LAC_CHECK_NULL_PARAM(pNumInstances); /* Get the number of accel_dev in the system */ status = icp_amgr_getNumInstances(&num_accel_dev); LAC_CHECK_STATUS(status); /* Allocate memory to store addr of accel_devs */ pAdfInsts = malloc(num_accel_dev * sizeof(icp_accel_dev_t *), M_QAT, M_WAITOK); num_accel_dev = 0; /* Get ADF to return all accel_devs that support either * symmetric or asymmetric crypto */ status = icp_amgr_getAllAccelDevByCapabilities( (ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC), pAdfInsts, &num_accel_dev); if (CPA_STATUS_SUCCESS != status) { LAC_LOG_ERROR("No support for crypto\n"); *pNumInstances = 0; free(pAdfInsts, M_QAT); return status; } for (i = 0; i < num_accel_dev; i++) { dev_addr = (icp_accel_dev_t *)pAdfInsts[i]; if (NULL == dev_addr || NULL == dev_addr->pSalHandle) { continue; } base_addr = dev_addr->pSalHandle; list_temp = base_addr->crypto_services; while (NULL != list_temp) { cyInstanceHandle = SalList_getObject(list_temp); status = cpaCyInstanceGetInfo2(cyInstanceHandle, &info); if (CPA_STATUS_SUCCESS == status && CPA_TRUE == info.isPolled) { num_inst++; } list_temp = SalList_next(list_temp); } list_temp = base_addr->asym_services; while (NULL != list_temp) { cyInstanceHandle = SalList_getObject(list_temp); status = cpaCyInstanceGetInfo2(cyInstanceHandle, &info); if (CPA_STATUS_SUCCESS == status && CPA_TRUE == info.isPolled) { num_inst++; } list_temp = SalList_next(list_temp); } list_temp = base_addr->sym_services; while (NULL != list_temp) { cyInstanceHandle = SalList_getObject(list_temp); status = cpaCyInstanceGetInfo2(cyInstanceHandle, &info); if (CPA_STATUS_SUCCESS == status && CPA_TRUE == info.isPolled) { num_inst++; } list_temp = SalList_next(list_temp); } } *pNumInstances = num_inst; free(pAdfInsts, M_QAT); return status; } /** ****************************************************************************** * @ingroup cpaCyCommon *****************************************************************************/ CpaStatus cpaCyGetInstances(Cpa16U numInstances, CpaInstanceHandle *pCyInstances) { CpaStatus status = CPA_STATUS_SUCCESS; CpaInstanceHandle cyInstanceHandle; CpaInstanceInfo2 info; icp_accel_dev_t **pAdfInsts = NULL; icp_accel_dev_t *dev_addr = NULL; sal_t *base_addr = NULL; sal_list_t *list_temp = NULL; Cpa16U num_accel_dev = 0; Cpa16U num_allocated_instances = 0; Cpa16U index = 0; Cpa16U i = 0; LAC_CHECK_NULL_PARAM(pCyInstances); if (0 == numInstances) { LAC_INVALID_PARAM_LOG("NumInstances is 0"); return CPA_STATUS_INVALID_PARAM; } /* Get the number of crypto instances */ status = cpaCyGetNumInstances(&num_allocated_instances); if (CPA_STATUS_SUCCESS != status) { return status; } if (numInstances > num_allocated_instances) { QAT_UTILS_LOG("Only %d crypto instances available\n", num_allocated_instances); return CPA_STATUS_RESOURCE; } /* Get the number of accel devices in the system */ status = icp_amgr_getNumInstances(&num_accel_dev); LAC_CHECK_STATUS(status); /* Allocate memory to store addr of accel_devs */ pAdfInsts = malloc(num_accel_dev * sizeof(icp_accel_dev_t *), M_QAT, M_WAITOK); num_accel_dev = 0; /* Get ADF to return all accel_devs that support either * symmetric or asymmetric crypto */ status = icp_amgr_getAllAccelDevByCapabilities( (ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC), pAdfInsts, &num_accel_dev); if (CPA_STATUS_SUCCESS != status) { LAC_LOG_ERROR("No support for crypto\n"); free(pAdfInsts, M_QAT); return status; } for (i = 0; i < num_accel_dev; i++) { dev_addr = (icp_accel_dev_t *)pAdfInsts[i]; /* Note dev_addr cannot be NULL here as numInstances = 0 * is not valid and if dev_addr = NULL then index = 0 (which * is less than numInstances and status is set to _RESOURCE * above */ base_addr = dev_addr->pSalHandle; if (NULL == base_addr) { continue; } list_temp = base_addr->crypto_services; while (NULL != list_temp) { if (index > (numInstances - 1)) { break; } cyInstanceHandle = SalList_getObject(list_temp); status = cpaCyInstanceGetInfo2(cyInstanceHandle, &info); list_temp = SalList_next(list_temp); if (CPA_STATUS_SUCCESS != status || CPA_TRUE != info.isPolled) { continue; } pCyInstances[index] = cyInstanceHandle; index++; } list_temp = base_addr->asym_services; while (NULL != list_temp) { if (index > (numInstances - 1)) { break; } cyInstanceHandle = SalList_getObject(list_temp); status = cpaCyInstanceGetInfo2(cyInstanceHandle, &info); list_temp = SalList_next(list_temp); if (CPA_STATUS_SUCCESS != status || CPA_TRUE != info.isPolled) { continue; } pCyInstances[index] = cyInstanceHandle; index++; } list_temp = base_addr->sym_services; while (NULL != list_temp) { if (index > (numInstances - 1)) { break; } cyInstanceHandle = SalList_getObject(list_temp); status = cpaCyInstanceGetInfo2(cyInstanceHandle, &info); list_temp = SalList_next(list_temp); if (CPA_STATUS_SUCCESS != status || CPA_TRUE != info.isPolled) { continue; } pCyInstances[index] = cyInstanceHandle; index++; } } free(pAdfInsts, M_QAT); return status; } /** ****************************************************************************** * @ingroup cpaCyCommon *****************************************************************************/ CpaStatus cpaCyInstanceGetInfo(const CpaInstanceHandle instanceHandle_in, struct _CpaInstanceInfo *pInstanceInfo) { CpaInstanceHandle instanceHandle = NULL; sal_crypto_service_t *pCryptoService = NULL; sal_service_t *pGenericService = NULL; Cpa8U name[CPA_INST_NAME_SIZE] = "Intel(R) DH89XXCC instance number: %02x, type: Crypto"; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_CryptoGetFirstHandle(); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_NULL_PARAM(instanceHandle); LAC_CHECK_NULL_PARAM(pInstanceInfo); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_ASYM | SAL_SERVICE_TYPE_CRYPTO_SYM)); pCryptoService = (sal_crypto_service_t *)instanceHandle; pInstanceInfo->type = CPA_INSTANCE_TYPE_CRYPTO; /* According to cpa.h instance state is initialized and ready for use * or shutdown. Therefore need to map our running state to initialised * or shutdown */ if (SAL_SERVICE_STATE_RUNNING == pCryptoService->generic_service_info.state) { pInstanceInfo->state = CPA_INSTANCE_STATE_INITIALISED; } else { pInstanceInfo->state = CPA_INSTANCE_STATE_SHUTDOWN; } pGenericService = (sal_service_t *)instanceHandle; snprintf((char *)pInstanceInfo->name, CPA_INST_NAME_SIZE, (char *)name, pGenericService->instance); pInstanceInfo->name[CPA_INST_NAME_SIZE - 1] = '\0'; snprintf((char *)pInstanceInfo->version, CPA_INSTANCE_MAX_NAME_SIZE_IN_BYTES, "%d.%d", CPA_CY_API_VERSION_NUM_MAJOR, CPA_CY_API_VERSION_NUM_MINOR); pInstanceInfo->version[CPA_INSTANCE_MAX_VERSION_SIZE_IN_BYTES - 1] = '\0'; return CPA_STATUS_SUCCESS; } /** ****************************************************************************** * @ingroup cpaCyCommon *****************************************************************************/ CpaStatus cpaCyInstanceGetInfo2(const CpaInstanceHandle instanceHandle_in, CpaInstanceInfo2 *pInstanceInfo2) { CpaInstanceHandle instanceHandle = NULL; sal_crypto_service_t *pCryptoService = NULL; icp_accel_dev_t *dev = NULL; CpaStatus status = CPA_STATUS_SUCCESS; char keyStr[ADF_CFG_MAX_KEY_LEN_IN_BYTES] = { 0 }; char valStr[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char *section = DYN_SEC; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_CryptoGetFirstHandle(); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_NULL_PARAM(instanceHandle); LAC_CHECK_NULL_PARAM(pInstanceInfo2); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_ASYM | SAL_SERVICE_TYPE_CRYPTO_SYM)); LAC_OS_BZERO(pInstanceInfo2, sizeof(CpaInstanceInfo2)); pInstanceInfo2->accelerationServiceType = CPA_ACC_SVC_TYPE_CRYPTO; snprintf((char *)pInstanceInfo2->vendorName, CPA_INST_VENDOR_NAME_SIZE, "%s", SAL_INFO2_VENDOR_NAME); pInstanceInfo2->vendorName[CPA_INST_VENDOR_NAME_SIZE - 1] = '\0'; snprintf((char *)pInstanceInfo2->swVersion, CPA_INST_SW_VERSION_SIZE, "Version %d.%d", SAL_INFO2_DRIVER_SW_VERSION_MAJ_NUMBER, SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER); pInstanceInfo2->swVersion[CPA_INST_SW_VERSION_SIZE - 1] = '\0'; /* Note we can safely read the contents of the crypto service instance here because icp_amgr_getAllAccelDevByCapabilities() only returns devs that have started */ pCryptoService = (sal_crypto_service_t *)instanceHandle; pInstanceInfo2->physInstId.packageId = pCryptoService->pkgID; pInstanceInfo2->physInstId.acceleratorId = pCryptoService->acceleratorNum; pInstanceInfo2->physInstId.executionEngineId = pCryptoService->executionEngine; pInstanceInfo2->physInstId.busAddress = icp_adf_get_busAddress(pInstanceInfo2->physInstId.packageId); /*set coreAffinity to zero before use */ LAC_OS_BZERO(pInstanceInfo2->coreAffinity, sizeof(pInstanceInfo2->coreAffinity)); CPA_BITMAP_BIT_SET(pInstanceInfo2->coreAffinity, pCryptoService->coreAffinity); pInstanceInfo2->nodeAffinity = pCryptoService->nodeAffinity; if (SAL_SERVICE_STATE_RUNNING == pCryptoService->generic_service_info.state) { pInstanceInfo2->operState = CPA_OPER_STATE_UP; } else { pInstanceInfo2->operState = CPA_OPER_STATE_DOWN; } pInstanceInfo2->requiresPhysicallyContiguousMemory = CPA_TRUE; if (SAL_RESP_POLL_CFG_FILE == pCryptoService->isPolled) { pInstanceInfo2->isPolled = CPA_TRUE; } else { pInstanceInfo2->isPolled = CPA_FALSE; } pInstanceInfo2->isOffloaded = CPA_TRUE; /* Get the instance name and part name*/ dev = icp_adf_getAccelDevByAccelId(pCryptoService->pkgID); if (NULL == dev) { LAC_LOG_ERROR("Can not find device for the instance\n"); LAC_OS_BZERO(pInstanceInfo2, sizeof(CpaInstanceInfo2)); return CPA_STATUS_FAIL; } snprintf((char *)pInstanceInfo2->partName, CPA_INST_PART_NAME_SIZE, SAL_INFO2_PART_NAME, dev->deviceName); pInstanceInfo2->partName[CPA_INST_PART_NAME_SIZE - 1] = '\0'; status = Sal_StringParsing("Cy", pCryptoService->generic_service_info.instance, "Name", keyStr); LAC_CHECK_STATUS(status); if (CPA_FALSE == pCryptoService->generic_service_info.is_dyn) { section = icpGetProcessName(); } status = icp_adf_cfgGetParamValue(dev, section, keyStr, valStr); LAC_CHECK_STATUS(status); snprintf((char *)pInstanceInfo2->instName, CPA_INST_NAME_SIZE, "%s", valStr); snprintf((char *)pInstanceInfo2->instID, CPA_INST_ID_SIZE, "%s_%s", section, valStr); return CPA_STATUS_SUCCESS; } /** ****************************************************************************** * @ingroup cpaCyCommon *****************************************************************************/ CpaStatus cpaCyQueryCapabilities(const CpaInstanceHandle instanceHandle_in, CpaCyCapabilitiesInfo *pCapInfo) { /* Verify Instance exists */ CpaInstanceHandle instanceHandle = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_CryptoGetFirstHandle(); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_NULL_PARAM(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_ASYM | SAL_SERVICE_TYPE_CRYPTO_SYM)); LAC_CHECK_NULL_PARAM(pCapInfo); SalCtrl_CyQueryCapabilities((sal_service_t *)instanceHandle, pCapInfo); return CPA_STATUS_SUCCESS; } /** ****************************************************************************** * @ingroup cpaCySym *****************************************************************************/ CpaStatus cpaCySymQueryCapabilities(const CpaInstanceHandle instanceHandle_in, CpaCySymCapabilitiesInfo *pCapInfo) { sal_crypto_service_t *pCryptoService = NULL; sal_service_t *pGenericService = NULL; CpaInstanceHandle instanceHandle = NULL; /* Verify Instance exists */ if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO); if (!instanceHandle) { instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM); } } else { instanceHandle = instanceHandle_in; } LAC_CHECK_NULL_PARAM(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_ASYM | SAL_SERVICE_TYPE_CRYPTO_SYM)); LAC_CHECK_NULL_PARAM(pCapInfo); pCryptoService = (sal_crypto_service_t *)instanceHandle; pGenericService = &(pCryptoService->generic_service_info); memset(pCapInfo, '\0', sizeof(CpaCySymCapabilitiesInfo)); /* An asym crypto instance does not support sym service */ if (SAL_SERVICE_TYPE_CRYPTO_ASYM == pGenericService->type) { return CPA_STATUS_SUCCESS; } CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_NULL); - CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_ARC4); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_ECB); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_CBC); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_CTR); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_CCM); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_GCM); - CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_DES_ECB); - CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_DES_CBC); - CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_3DES_ECB); - CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_3DES_CBC); - CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_3DES_CTR); - CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_KASUMI_F8); - CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_SNOW3G_UEA2); - CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_F8); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_XTS); + if (isCyGen2x(pCryptoService)) { + CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_ARC4); + CPA_BITMAP_BIT_SET(pCapInfo->ciphers, + CPA_CY_SYM_CIPHER_DES_ECB); + CPA_BITMAP_BIT_SET(pCapInfo->ciphers, + CPA_CY_SYM_CIPHER_DES_CBC); + CPA_BITMAP_BIT_SET(pCapInfo->ciphers, + CPA_CY_SYM_CIPHER_3DES_ECB); + CPA_BITMAP_BIT_SET(pCapInfo->ciphers, + CPA_CY_SYM_CIPHER_3DES_CBC); + CPA_BITMAP_BIT_SET(pCapInfo->ciphers, + CPA_CY_SYM_CIPHER_3DES_CTR); + CPA_BITMAP_BIT_SET(pCapInfo->ciphers, + CPA_CY_SYM_CIPHER_KASUMI_F8); + CPA_BITMAP_BIT_SET(pCapInfo->ciphers, + CPA_CY_SYM_CIPHER_SNOW3G_UEA2); + CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_F8); + } - CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_MD5); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA1); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA224); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA256); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA384); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA512); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_AES_XCBC); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_AES_CCM); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_AES_GCM); - CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_KASUMI_F9); - CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SNOW3G_UIA2); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_AES_CMAC); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_AES_GMAC); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_AES_CBC_MAC); + if (isCyGen2x(pCryptoService)) { + CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_MD5); + CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_KASUMI_F9); + CPA_BITMAP_BIT_SET(pCapInfo->hashes, + CPA_CY_SYM_HASH_SNOW3G_UIA2); + } if (pGenericService->capabilitiesMask & ICP_ACCEL_CAPABILITIES_CRYPTO_ZUC) { CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_ZUC_EEA3); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_ZUC_EIA3); } if (pGenericService->capabilitiesMask & ICP_ACCEL_CAPABILITIES_CHACHA_POLY) { CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_POLY); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_CHACHA); } if (pGenericService->capabilitiesMask & ICP_ACCEL_CAPABILITIES_SM3) { CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SM3); } pCapInfo->partialPacketSupported = CPA_TRUE; if (pGenericService->capabilitiesMask & ICP_ACCEL_CAPABILITIES_SHA3) { CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA3_256); pCapInfo->partialPacketSupported = CPA_FALSE; } if (pGenericService->capabilitiesMask & ICP_ACCEL_CAPABILITIES_SHA3_EXT) { CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA3_224); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA3_256); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA3_384); CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA3_512); pCapInfo->partialPacketSupported = CPA_FALSE; } if (pGenericService->capabilitiesMask & ICP_ACCEL_CAPABILITIES_SM4) { CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_SM4_ECB); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_SM4_CBC); CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_SM4_CTR); pCapInfo->partialPacketSupported = CPA_FALSE; } return CPA_STATUS_SUCCESS; } /** ****************************************************************************** * @ingroup cpaCyCommon *****************************************************************************/ CpaStatus cpaCySetAddressTranslation(const CpaInstanceHandle instanceHandle_in, CpaVirtualToPhysical virtual2physical) { CpaInstanceHandle instanceHandle = NULL; sal_service_t *pService = NULL; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { instanceHandle = Lac_CryptoGetFirstHandle(); } else { instanceHandle = instanceHandle_in; } LAC_CHECK_NULL_PARAM(instanceHandle); SAL_CHECK_INSTANCE_TYPE(instanceHandle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_ASYM | SAL_SERVICE_TYPE_CRYPTO_SYM)); LAC_CHECK_NULL_PARAM(virtual2physical); pService = (sal_service_t *)instanceHandle; pService->virt2PhysClient = virtual2physical; return CPA_STATUS_SUCCESS; } /** ****************************************************************************** * @ingroup cpaCyCommon * Crypto specific polling function which polls a crypto instance. *****************************************************************************/ CpaStatus icp_sal_CyPollInstance(CpaInstanceHandle instanceHandle_in, Cpa32U response_quota) { CpaStatus status = CPA_STATUS_SUCCESS; sal_crypto_service_t *crypto_handle = NULL; sal_service_t *gen_handle = NULL; icp_comms_trans_handle trans_hndTable[MAX_CY_RX_RINGS] = { 0 }; Cpa32U num_rx_rings = 0; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { crypto_handle = (sal_crypto_service_t *)Lac_CryptoGetFirstHandle(); } else { crypto_handle = (sal_crypto_service_t *)instanceHandle_in; } LAC_CHECK_NULL_PARAM(crypto_handle); SAL_RUNNING_CHECK(crypto_handle); SAL_CHECK_INSTANCE_TYPE(crypto_handle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_ASYM | SAL_SERVICE_TYPE_CRYPTO_SYM)); gen_handle = &(crypto_handle->generic_service_info); /* * From the instanceHandle we must get the trans_handle and send * down to adf for polling. * Populate our trans handle table with the appropriate handles. */ switch (gen_handle->type) { case SAL_SERVICE_TYPE_CRYPTO_ASYM: trans_hndTable[TH_CY_RX_0] = crypto_handle->trans_handle_asym_rx; num_rx_rings = 1; break; case SAL_SERVICE_TYPE_CRYPTO_SYM: trans_hndTable[TH_CY_RX_0] = crypto_handle->trans_handle_sym_rx; num_rx_rings = 1; break; case SAL_SERVICE_TYPE_CRYPTO: trans_hndTable[TH_CY_RX_0] = crypto_handle->trans_handle_sym_rx; trans_hndTable[TH_CY_RX_1] = crypto_handle->trans_handle_asym_rx; num_rx_rings = MAX_CY_RX_RINGS; break; default: break; } /* Call adf to do the polling. */ status = icp_adf_pollInstance(trans_hndTable, num_rx_rings, response_quota); return status; } /** ****************************************************************************** * @ingroup cpaCyCommon * Crypto specific polling function which polls sym crypto ring. *****************************************************************************/ CpaStatus icp_sal_CyPollSymRing(CpaInstanceHandle instanceHandle_in, Cpa32U response_quota) { CpaStatus status = CPA_STATUS_SUCCESS; sal_crypto_service_t *crypto_handle = NULL; icp_comms_trans_handle trans_hndTable[NUM_CRYPTO_SYM_RX_RINGS] = { 0 }; if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) { crypto_handle = (sal_crypto_service_t *)Lac_GetFirstHandle( SAL_SERVICE_TYPE_CRYPTO_SYM); } else { crypto_handle = (sal_crypto_service_t *)instanceHandle_in; } LAC_CHECK_NULL_PARAM(crypto_handle); SAL_CHECK_INSTANCE_TYPE(crypto_handle, (SAL_SERVICE_TYPE_CRYPTO | SAL_SERVICE_TYPE_CRYPTO_SYM)); SAL_RUNNING_CHECK(crypto_handle); /* * From the instanceHandle we must get the trans_handle and send * down to adf for polling. * Populate our trans handle table with the appropriate handles. */ trans_hndTable[TH_SINGLE_RX] = crypto_handle->trans_handle_sym_rx; /* Call adf to do the polling. */ status = icp_adf_pollInstance(trans_hndTable, NUM_CRYPTO_SYM_RX_RINGS, response_quota); return status; } /** ****************************************************************************** * @ingroup cpaCyCommon * Crypto specific polling function which polls an nrbg crypto ring. *****************************************************************************/ CpaStatus icp_sal_CyPollNRBGRing(CpaInstanceHandle instanceHandle_in, Cpa32U response_quota) { return CPA_STATUS_UNSUPPORTED; } /* Returns the handle to the first asym crypto instance */ static CpaInstanceHandle Lac_GetFirstAsymHandle(icp_accel_dev_t *adfInsts[ADF_MAX_DEVICES], Cpa16U num_dev) { + CpaStatus status = CPA_STATUS_SUCCESS; icp_accel_dev_t *dev_addr = NULL; sal_t *base_addr = NULL; sal_list_t *list_temp = NULL; CpaInstanceHandle cyInst = NULL; + CpaInstanceInfo2 info; Cpa16U i = 0; for (i = 0; i < num_dev; i++) { dev_addr = (icp_accel_dev_t *)adfInsts[i]; base_addr = dev_addr->pSalHandle; - if ((NULL != base_addr) && (NULL != base_addr->asym_services)) { - list_temp = base_addr->asym_services; + if (NULL == base_addr) { + continue; + } + list_temp = base_addr->asym_services; + while (NULL != list_temp) { cyInst = SalList_getObject(list_temp); + status = cpaCyInstanceGetInfo2(cyInst, &info); + list_temp = SalList_next(list_temp); + if (CPA_STATUS_SUCCESS != status || + CPA_TRUE != info.isPolled) { + cyInst = NULL; + continue; + } + break; + } + if (cyInst) { break; } } return cyInst; } /* Returns the handle to the first sym crypto instance */ static CpaInstanceHandle Lac_GetFirstSymHandle(icp_accel_dev_t *adfInsts[ADF_MAX_DEVICES], Cpa16U num_dev) { + CpaStatus status = CPA_STATUS_SUCCESS; icp_accel_dev_t *dev_addr = NULL; sal_t *base_addr = NULL; sal_list_t *list_temp = NULL; CpaInstanceHandle cyInst = NULL; + CpaInstanceInfo2 info; Cpa16U i = 0; for (i = 0; i < num_dev; i++) { dev_addr = (icp_accel_dev_t *)adfInsts[i]; base_addr = dev_addr->pSalHandle; - if ((NULL != base_addr) && (NULL != base_addr->sym_services)) { - list_temp = base_addr->sym_services; + if (NULL == base_addr) { + continue; + } + list_temp = base_addr->sym_services; + while (NULL != list_temp) { cyInst = SalList_getObject(list_temp); + status = cpaCyInstanceGetInfo2(cyInst, &info); + list_temp = SalList_next(list_temp); + if (CPA_STATUS_SUCCESS != status || + CPA_TRUE != info.isPolled) { + cyInst = NULL; + continue; + } + break; + } + if (cyInst) { break; } } return cyInst; } /* Returns the handle to the first crypto instance * Note that the crypto instance in this case supports * both asym and sym services */ static CpaInstanceHandle Lac_GetFirstCyHandle(icp_accel_dev_t *adfInsts[ADF_MAX_DEVICES], Cpa16U num_dev) { + CpaStatus status = CPA_STATUS_SUCCESS; icp_accel_dev_t *dev_addr = NULL; sal_t *base_addr = NULL; sal_list_t *list_temp = NULL; CpaInstanceHandle cyInst = NULL; + CpaInstanceInfo2 info; Cpa16U i = 0; for (i = 0; i < num_dev; i++) { dev_addr = (icp_accel_dev_t *)adfInsts[i]; base_addr = dev_addr->pSalHandle; - if ((NULL != base_addr) && - (NULL != base_addr->crypto_services)) { - list_temp = base_addr->crypto_services; + if (NULL == base_addr) { + continue; + } + list_temp = base_addr->crypto_services; + while (NULL != list_temp) { cyInst = SalList_getObject(list_temp); + status = cpaCyInstanceGetInfo2(cyInst, &info); + list_temp = SalList_next(list_temp); + if (CPA_STATUS_SUCCESS != status || + CPA_TRUE != info.isPolled) { + cyInst = NULL; + continue; + } + break; + } + if (cyInst) { break; } } + return cyInst; } CpaInstanceHandle Lac_GetFirstHandle(sal_service_type_t svc_type) { CpaStatus status = CPA_STATUS_SUCCESS; static icp_accel_dev_t *adfInsts[ADF_MAX_DEVICES] = { 0 }; CpaInstanceHandle cyInst = NULL; Cpa16U num_cy_dev = 0; Cpa32U capabilities = 0; switch (svc_type) { case SAL_SERVICE_TYPE_CRYPTO_ASYM: capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; break; case SAL_SERVICE_TYPE_CRYPTO_SYM: capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; break; case SAL_SERVICE_TYPE_CRYPTO: capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; capabilities |= ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; break; default: LAC_LOG_ERROR("Invalid service type\n"); return NULL; break; } /* Only need 1 dev with crypto enabled - so check all devices*/ status = icp_amgr_getAllAccelDevByEachCapability(capabilities, adfInsts, &num_cy_dev); if ((0 == num_cy_dev) || (CPA_STATUS_SUCCESS != status)) { LAC_LOG_ERROR("No crypto devices enabled in the system\n"); return NULL; } switch (svc_type) { case SAL_SERVICE_TYPE_CRYPTO_ASYM: /* Try to find an asym only instance first */ cyInst = Lac_GetFirstAsymHandle(adfInsts, num_cy_dev); /* Try to find a cy instance since it also supports asym */ if (NULL == cyInst) { cyInst = Lac_GetFirstCyHandle(adfInsts, num_cy_dev); } break; case SAL_SERVICE_TYPE_CRYPTO_SYM: /* Try to find a sym only instance first */ cyInst = Lac_GetFirstSymHandle(adfInsts, num_cy_dev); /* Try to find a cy instance since it also supports sym */ if (NULL == cyInst) { cyInst = Lac_GetFirstCyHandle(adfInsts, num_cy_dev); } break; case SAL_SERVICE_TYPE_CRYPTO: /* Try to find a cy instance */ cyInst = Lac_GetFirstCyHandle(adfInsts, num_cy_dev); break; default: break; } if (NULL == cyInst) { LAC_LOG_ERROR("No remaining crypto instances available\n"); } return cyInst; } CpaStatus icp_sal_NrbgGetInflightRequests(CpaInstanceHandle instanceHandle_in, Cpa32U *maxInflightRequests, Cpa32U *numInflightRequests) { return CPA_STATUS_UNSUPPORTED; } CpaStatus icp_sal_SymGetInflightRequests(CpaInstanceHandle instanceHandle, Cpa32U *maxInflightRequests, Cpa32U *numInflightRequests) { sal_crypto_service_t *crypto_handle = NULL; crypto_handle = (sal_crypto_service_t *)instanceHandle; LAC_CHECK_NULL_PARAM(crypto_handle); LAC_CHECK_NULL_PARAM(maxInflightRequests); LAC_CHECK_NULL_PARAM(numInflightRequests); SAL_RUNNING_CHECK(crypto_handle); return icp_adf_getInflightRequests(crypto_handle->trans_handle_sym_tx, maxInflightRequests, numInflightRequests); } CpaStatus icp_sal_dp_SymGetInflightRequests(CpaInstanceHandle instanceHandle, Cpa32U *maxInflightRequests, Cpa32U *numInflightRequests) { sal_crypto_service_t *crypto_handle = NULL; crypto_handle = (sal_crypto_service_t *)instanceHandle; return icp_adf_dp_getInflightRequests( crypto_handle->trans_handle_sym_tx, maxInflightRequests, numInflightRequests); } + +CpaStatus +icp_sal_setForceAEADMACVerify(CpaInstanceHandle instanceHandle, + CpaBoolean forceAEADMacVerify) +{ + sal_crypto_service_t *crypto_handle = NULL; + + crypto_handle = (sal_crypto_service_t *)instanceHandle; + LAC_CHECK_NULL_PARAM(crypto_handle); + crypto_handle->forceAEADMacVerify = forceAEADMacVerify; + + return CPA_STATUS_SUCCESS; +} diff --git a/sys/dev/qat/qat_api/common/ctrl/sal_ctrl_services.c b/sys/dev/qat/qat_api/common/ctrl/sal_ctrl_services.c index 4a41340dfabb..10ce54c0ce43 100644 --- a/sys/dev/qat/qat_api/common/ctrl/sal_ctrl_services.c +++ b/sys/dev/qat/qat_api/common/ctrl/sal_ctrl_services.c @@ -1,1344 +1,1380 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** ***************************************************************************** * @file sal_ctrl_services.c * * @ingroup SalCtrl * * @description * This file contains the core of the service controller implementation. * *****************************************************************************/ /* QAT-API includes */ #include "cpa.h" #include "cpa_cy_key.h" #include "cpa_cy_ln.h" #include "cpa_cy_dh.h" #include "cpa_cy_dsa.h" #include "cpa_cy_rsa.h" #include "cpa_cy_ec.h" #include "cpa_cy_prime.h" #include "cpa_cy_sym.h" #include "cpa_dc.h" /* QAT utils includes */ #include "qat_utils.h" /* ADF includes */ #include "icp_adf_init.h" #include "icp_adf_transport.h" #include "icp_accel_devices.h" #include "icp_adf_cfg.h" #include "icp_adf_init.h" #include "icp_adf_accel_mgr.h" #include "icp_adf_debug.h" /* FW includes */ #include "icp_qat_fw_la.h" /* SAL includes */ #include "lac_mem.h" #include "lac_mem_pools.h" #include "lac_list.h" #include "lac_hooks.h" #include "sal_string_parse.h" #include "lac_common.h" #include "lac_sal_types.h" #include "lac_sal.h" #include "lac_sal_ctrl.h" #include "icp_sal_versions.h" #define MAX_SUBSYSTEM_RETRY 64 static char *subsystem_name = "SAL"; /**< Name used by ADF to identify this component. */ static char *cy_dir_name = "cy"; static char *asym_dir_name = "asym"; static char *sym_dir_name = "sym"; static char *dc_dir_name = "dc"; /**< Stats dir names. */ static char *ver_file_name = "version"; static subservice_registation_handle_t sal_service_reg_handle; /**< Data structure used by ADF to keep a reference to this component. */ /* * @ingroup SalCtrl * @description * This function is used to parse the results from ADF * in response to ServiceEnabled query.The results are * semi-colon separated. Internally, the bitmask represented * by the enabled_service is used to track which features are enabled. * * @context * This functions is called from the SalCtrl_ServiceEventInit function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @param[in] device pointer to icp_accel_dev_t structure * @param[in] pEnabledServices pointer to memory where enabled services will * be written. * @retval Status */ CpaStatus SalCtrl_GetEnabledServices(icp_accel_dev_t *device, Cpa32U *pEnabledServices) { CpaStatus status = CPA_STATUS_SUCCESS; char param_value[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; char *token = NULL; char *running = NULL; *pEnabledServices = 0; memset(param_value, 0, ADF_CFG_MAX_VAL_LEN_IN_BYTES); status = icp_adf_cfgGetParamValue(device, LAC_CFG_SECTION_GENERAL, "ServicesEnabled", param_value); if (CPA_STATUS_SUCCESS == status) { running = param_value; token = strsep(&running, ";"); while (NULL != token) { do { if (strncmp(token, "asym", strlen("asym")) == 0) { *pEnabledServices |= SAL_SERVICE_TYPE_CRYPTO_ASYM; break; } if (strncmp(token, "sym", strlen("sym")) == 0) { *pEnabledServices |= SAL_SERVICE_TYPE_CRYPTO_SYM; break; } if (strncmp(token, "cy", strlen("cy")) == 0) { *pEnabledServices |= SAL_SERVICE_TYPE_CRYPTO; break; } if (strncmp(token, "dc", strlen("dc")) == 0) { *pEnabledServices |= SAL_SERVICE_TYPE_COMPRESSION; break; } QAT_UTILS_LOG( "Error parsing enabled services from ADF.\n"); return CPA_STATUS_FAIL; } while (0); token = strsep(&running, ";"); } } else { QAT_UTILS_LOG("Failed to get enabled services from ADF.\n"); } return status; } /* * @ingroup SalCtrl * @description * This function is used to check whether a service is enabled * * @context * This functions is called from the SalCtrl_ServiceEventInit function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * param[in] enabled_services It is the bitmask for the enabled services * param[in] service It is the service we want to check for */ CpaBoolean SalCtrl_IsServiceEnabled(Cpa32U enabled_services, sal_service_type_t service) { return (CpaBoolean)((enabled_services & (Cpa32U)(service)) != 0); } /* * @ingroup SalCtrl * @description * This function is used to check whether enabled services has associated * hardware capability support * * @context * This functions is called from the SalCtrl_ServiceEventInit function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * param[in] device A pointer to an icp_accel_dev_t * param[in] enabled_services It is the bitmask for the enabled services */ CpaStatus SalCtrl_GetSupportedServices(icp_accel_dev_t *device, Cpa32U enabled_services) { CpaStatus status = CPA_STATUS_SUCCESS; Cpa32U capabilitiesMask = 0; status = icp_amgr_getAccelDevCapabilities(device, &capabilitiesMask); if (CPA_STATUS_SUCCESS == status) { if (SalCtrl_IsServiceEnabled(enabled_services, SAL_SERVICE_TYPE_CRYPTO)) { if (!(capabilitiesMask & ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC) || !(capabilitiesMask & ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC)) { QAT_UTILS_LOG( "Device does not support Crypto service\n"); status = CPA_STATUS_FAIL; } } if (SalCtrl_IsServiceEnabled(enabled_services, SAL_SERVICE_TYPE_CRYPTO_ASYM)) { if (!(capabilitiesMask & ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC)) { QAT_UTILS_LOG( "Device does not support Asym service\n"); status = CPA_STATUS_FAIL; } } if (SalCtrl_IsServiceEnabled(enabled_services, SAL_SERVICE_TYPE_CRYPTO_SYM)) { if (!(capabilitiesMask & ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC)) { QAT_UTILS_LOG( "Device does not support Sym service\n"); status = CPA_STATUS_FAIL; } } if (SalCtrl_IsServiceEnabled(enabled_services, SAL_SERVICE_TYPE_COMPRESSION)) { if (!(capabilitiesMask & ICP_ACCEL_CAPABILITIES_COMPRESSION)) { QAT_UTILS_LOG( "Device does not support Compression service.\n"); status = CPA_STATUS_FAIL; } } } return status; } /************************************************************************* * @ingroup SalCtrl * @description * This function is used to check if a service is supported * on the device. The key difference between this and * SalCtrl_GetSupportedServices() is that the latter treats it as * an error if the service is unsupported. * * @context * This can be called anywhere. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * param[in] device * param[in] service service or services to check * *************************************************************************/ CpaBoolean SalCtrl_IsServiceSupported(icp_accel_dev_t *device, sal_service_type_t service_to_check) { CpaStatus status = CPA_STATUS_SUCCESS; Cpa32U capabilitiesMask = 0; CpaBoolean service_supported = CPA_TRUE; if (!(SalCtrl_IsServiceEnabled((Cpa32U)service_to_check, SAL_SERVICE_TYPE_CRYPTO)) && !(SalCtrl_IsServiceEnabled((Cpa32U)service_to_check, SAL_SERVICE_TYPE_CRYPTO_ASYM)) && !(SalCtrl_IsServiceEnabled((Cpa32U)service_to_check, SAL_SERVICE_TYPE_CRYPTO_SYM)) && !(SalCtrl_IsServiceEnabled((Cpa32U)service_to_check, SAL_SERVICE_TYPE_COMPRESSION))) { QAT_UTILS_LOG("Invalid service type\n"); service_supported = CPA_FALSE; } status = icp_amgr_getAccelDevCapabilities(device, &capabilitiesMask); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Can not get device capabilities.\n"); return CPA_FALSE; } if (SalCtrl_IsServiceEnabled((Cpa32U)service_to_check, SAL_SERVICE_TYPE_CRYPTO)) { if (!(capabilitiesMask & ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC) || !(capabilitiesMask & ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC)) { QAT_UTILS_LOG( "Device does not support Crypto service\n"); service_supported = CPA_FALSE; } } if (SalCtrl_IsServiceEnabled((Cpa32U)service_to_check, SAL_SERVICE_TYPE_CRYPTO_ASYM)) { if (!(capabilitiesMask & ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC)) { QAT_UTILS_LOG("Device does not support Asym service\n"); service_supported = CPA_FALSE; } } if (SalCtrl_IsServiceEnabled((Cpa32U)service_to_check, SAL_SERVICE_TYPE_CRYPTO_SYM)) { if (!(capabilitiesMask & ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC)) { QAT_UTILS_LOG("Device does not support Sym service\n"); service_supported = CPA_FALSE; } } if (SalCtrl_IsServiceEnabled((Cpa32U)service_to_check, SAL_SERVICE_TYPE_COMPRESSION)) { if (!(capabilitiesMask & ICP_ACCEL_CAPABILITIES_COMPRESSION)) { QAT_UTILS_LOG( "Device does not support Compression service.\n"); service_supported = CPA_FALSE; } } return service_supported; } /* * @ingroup SalCtrl * @description * This function is used to retrieve how many instances are * to be configured for process specific service. * * @context * This functions is called from the SalCtrl_ServiceEventInit function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @param[in] device A pointer to an icp_accel_dev_t * @param[in] key Represents the parameter's name we want to query * @param[out] pCount Pointer to memory where num instances will be stored * @retval status returned status from ADF or _FAIL if number of instances * is out of range for the device. */ static CpaStatus SalCtrl_GetInstanceCount(icp_accel_dev_t *device, char *key, Cpa32U *pCount) { CpaStatus status = CPA_STATUS_FAIL; char param_value[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; memset(param_value, 0, ADF_CFG_MAX_VAL_LEN_IN_BYTES); status = icp_adf_cfgGetParamValue(device, icpGetProcessName(), key, param_value); if (CPA_STATUS_SUCCESS == status) { *pCount = (Cpa32U)(Sal_Strtoul(param_value, NULL, SAL_CFG_BASE_DEC)); if (*pCount > SAL_MAX_NUM_INSTANCES_PER_DEV) { QAT_UTILS_LOG("Number of instances is out of range.\n"); status = CPA_STATUS_FAIL; } } return status; } /************************************************************************** * @ingroup SalCtrl * @description * This function calls the shutdown function on all the * service instances. * It also frees all service instance memory allocated at Init. * * @context * This function is called from the SalCtrl_ServiceEventShutdown * function. * * @assumptions * params[in] should not be NULL * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @param[in] device An icp_accel_dev_t* type * @param[in] services A pointer to the container of services * @param[in] dbg_dir A pointer to the debug directory * @param[in] svc_type The type of the service instance * ****************************************************************************/ static CpaStatus SalCtrl_ServiceShutdown(icp_accel_dev_t *device, sal_list_t **services, debug_dir_info_t **debug_dir, sal_service_type_t svc_type) { CpaStatus status = CPA_STATUS_SUCCESS; sal_list_t *dyn_service = NULL; sal_service_t *inst = NULL; /* Call Shutdown function for each service instance */ SAL_FOR_EACH(*services, sal_service_t, device, shutdown, status); if (*debug_dir) { icp_adf_debugRemoveDir(*debug_dir); LAC_OS_FREE(*debug_dir); *debug_dir = NULL; } if (!icp_adf_is_dev_in_reset(device)) { dyn_service = *services; while (dyn_service) { inst = (sal_service_t *)SalList_getObject(dyn_service); if (CPA_TRUE == inst->is_dyn) { icp_adf_putDynInstance(device, (adf_service_type_t) svc_type, inst->instance); } dyn_service = SalList_next(dyn_service); } /* Free Sal services controller memory */ SalList_free(services); } else { sal_list_t *curr_element = NULL; sal_service_t *service = NULL; curr_element = *services; while (NULL != curr_element) { service = (sal_service_t *)SalList_getObject(curr_element); service->state = SAL_SERVICE_STATE_RESTARTING; curr_element = SalList_next(curr_element); } } return status; } +static CpaStatus +selectGeneration(device_type_t deviceType, sal_service_t *pInst) +{ + switch (deviceType) { + case DEVICE_C62X: + case DEVICE_C62XVF: + case DEVICE_DH895XCC: + case DEVICE_DH895XCCVF: + case DEVICE_C3XXX: + case DEVICE_C3XXXVF: + case DEVICE_200XX: + case DEVICE_200XXVF: + pInst->gen = GEN2; + break; + + case DEVICE_C4XXX: + case DEVICE_C4XXXVF: + pInst->gen = GEN3; + break; + + case DEVICE_GEN4: + pInst->gen = GEN4; + break; + + default: + QAT_UTILS_LOG("deviceType not initialised\n"); + return CPA_STATUS_FAIL; + } + return CPA_STATUS_SUCCESS; +} + /************************************************************************* * @ingroup SalCtrl * @description * This function is used to initialise the service instances. * It allocates memory for service instances and invokes the * Init function on them. * * @context * This function is called from the SalCtrl_ServiceEventInit function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @param[in] device An icp_accel_dev_t* type * @param[in] services A pointer to the container of services * @param[in] dbg_dir A pointer to the debug directory * @param[in] dbg_dir_name Name of the debug directory * @param[in] tail_list SAL's list of services * @param[in] instance_count Number of instances * @param[in] svc_type The type of the service instance * *************************************************************************/ static CpaStatus SalCtrl_ServiceInit(icp_accel_dev_t *device, sal_list_t **services, debug_dir_info_t **dbg_dir, char *dbg_dir_name, sal_list_t *tail_list, Cpa32U instance_count, sal_service_type_t svc_type) { CpaStatus status = CPA_STATUS_SUCCESS; sal_service_t *pInst = NULL; Cpa32U i = 0; debug_dir_info_t *debug_dir = NULL; debug_dir = LAC_OS_MALLOC(sizeof(debug_dir_info_t)); if (NULL == debug_dir) { QAT_UTILS_LOG("Failed to allocate memory for debug dir.\n"); return CPA_STATUS_RESOURCE; } debug_dir->name = dbg_dir_name; debug_dir->parent = NULL; status = icp_adf_debugAddDir(device, debug_dir); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to add debug dir.\n"); LAC_OS_FREE(debug_dir); debug_dir = NULL; return status; } if (!icp_adf_is_dev_in_reset(device)) { for (i = 0; i < instance_count; i++) { status = SalCtrl_ServiceCreate(svc_type, i, &pInst); if (CPA_STATUS_SUCCESS != status) { break; } pInst->debug_parent_dir = debug_dir; pInst->capabilitiesMask = device->accelCapabilitiesMask; - status = SalList_add(services, &tail_list, pInst); + + status = selectGeneration(device->deviceType, pInst); + if (CPA_STATUS_SUCCESS == status) { + status = + SalList_add(services, &tail_list, pInst); + } if (CPA_STATUS_SUCCESS != status) { free(pInst, M_QAT); } } } else { sal_list_t *curr_element = *services; sal_service_t *service = NULL; while (NULL != curr_element) { service = (sal_service_t *)SalList_getObject(curr_element); service->debug_parent_dir = debug_dir; if (CPA_TRUE == service->isInstanceStarted) { icp_qa_dev_get(device); } curr_element = SalList_next(curr_element); } } if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to allocate all instances.\n"); icp_adf_debugRemoveDir(debug_dir); LAC_OS_FREE(debug_dir); debug_dir = NULL; SalList_free(services); return status; } /* Call init function for each service instance */ SAL_FOR_EACH(*services, sal_service_t, device, init, status); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to initialise all service instances.\n"); /* shutdown all instances initialised before error */ SAL_FOR_EACH_STATE(*services, sal_service_t, device, shutdown, SAL_SERVICE_STATE_INITIALIZED); icp_adf_debugRemoveDir(debug_dir); LAC_OS_FREE(debug_dir); debug_dir = NULL; SalList_free(services); return status; } /* initialize the debug directory for relevant service */ *dbg_dir = debug_dir; return status; } /************************************************************************** * @ingroup SalCtrl * @description * This function calls the start function on all the service instances. * * @context * This function is called from the SalCtrl_ServiceEventStart function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @param[in] device An icp_accel_dev_t* type * @param[in] services A pointer to the container of services * **************************************************************************/ static CpaStatus SalCtrl_ServiceStart(icp_accel_dev_t *device, sal_list_t *services) { CpaStatus status = CPA_STATUS_SUCCESS; /* Call Start function for each service instance */ SAL_FOR_EACH(services, sal_service_t, device, start, status); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to start all instances.\n"); /* stop all instances started before error */ SAL_FOR_EACH_STATE(services, sal_service_t, device, stop, SAL_SERVICE_STATE_RUNNING); return status; } if (icp_adf_is_dev_in_reset(device)) { sal_list_t *curr_element = services; sal_service_t *service = NULL; while (NULL != curr_element) { service = (sal_service_t *)SalList_getObject(curr_element); if (service->notification_cb) { service->notification_cb( service, service->cb_tag, CPA_INSTANCE_EVENT_RESTARTED); } curr_element = SalList_next(curr_element); } } return status; } /**************************************************************************** * @ingroup SalCtrl * @description * This function calls the stop function on all the * service instances. * * @context * This function is called from the SalCtrl_ServiceEventStop function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @param[in] device An icp_accel_dev_t* type * @param[in] services A pointer to the container of services * *************************************************************************/ static CpaStatus SalCtrl_ServiceStop(icp_accel_dev_t *device, sal_list_t *services) { CpaStatus status = CPA_STATUS_SUCCESS; /* Calling restarting functions */ if (icp_adf_is_dev_in_reset(device)) { sal_list_t *curr_element = services; sal_service_t *service = NULL; while (NULL != curr_element) { service = (sal_service_t *)SalList_getObject(curr_element); if (service->notification_cb) { service->notification_cb( service, service->cb_tag, CPA_INSTANCE_EVENT_RESTARTING); } curr_element = SalList_next(curr_element); } } /* Call Stop function for each service instance */ SAL_FOR_EACH(services, sal_service_t, device, stop, status); return status; } /* * @ingroup SalCtrl * @description * This function is used to print hardware and software versions in proc * filesystem entry via ADF Debug interface * * @context * This functions is called from proc filesystem interface * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @param[in] private_data A pointer to a private data passed to the * function while adding a debug file. * @param[out] data Pointer to a buffer where version information * needs to be printed to. * @param[in] size Size of a buffer pointed by data. * @param[in] offset Offset in a debug file * * @retval 0 This function always returns 0 */ static int SalCtrl_VersionDebug(void *private_data, char *data, int size, int offset) { CpaStatus status = CPA_STATUS_SUCCESS; Cpa32U len = 0; icp_accel_dev_t *device = (icp_accel_dev_t *)private_data; char param_value[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; len += snprintf( data + len, size - len, SEPARATOR BORDER " Hardware and Software versions for device %d " BORDER "\n" SEPARATOR, device->accelId); memset(param_value, 0, ADF_CFG_MAX_VAL_LEN_IN_BYTES); status = icp_adf_cfgGetParamValue(device, LAC_CFG_SECTION_GENERAL, ICP_CFG_HW_REV_ID_KEY, param_value); LAC_CHECK_STATUS(status); len += snprintf(data + len, size - len, " Hardware Version: %s %s \n", param_value, get_sku_info(device->sku)); memset(param_value, 0, ADF_CFG_MAX_VAL_LEN_IN_BYTES); status = icp_adf_cfgGetParamValue(device, LAC_CFG_SECTION_GENERAL, ICP_CFG_UOF_VER_KEY, param_value); LAC_CHECK_STATUS(status); len += snprintf(data + len, size - len, " Firmware Version: %s \n", param_value); memset(param_value, 0, ADF_CFG_MAX_VAL_LEN_IN_BYTES); status = icp_adf_cfgGetParamValue(device, LAC_CFG_SECTION_GENERAL, ICP_CFG_MMP_VER_KEY, param_value); LAC_CHECK_STATUS(status); len += snprintf(data + len, size - len, " MMP Version: %s \n", param_value); len += snprintf(data + len, size - len, " Driver Version: %d.%d.%d \n", SAL_INFO2_DRIVER_SW_VERSION_MAJ_NUMBER, SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER, SAL_INFO2_DRIVER_SW_VERSION_PATCH_NUMBER); memset(param_value, 0, ADF_CFG_MAX_VAL_LEN_IN_BYTES); status = icp_adf_cfgGetParamValue(device, LAC_CFG_SECTION_GENERAL, ICP_CFG_LO_COMPATIBLE_DRV_KEY, param_value); LAC_CHECK_STATUS(status); len += snprintf(data + len, size - len, " Lowest Compatible Driver: %s \n", param_value); len += snprintf(data + len, size - len, " QuickAssist API CY Version: %d.%d \n", CPA_CY_API_VERSION_NUM_MAJOR, CPA_CY_API_VERSION_NUM_MINOR); len += snprintf(data + len, size - len, " QuickAssist API DC Version: %d.%d \n", CPA_DC_API_VERSION_NUM_MAJOR, CPA_DC_API_VERSION_NUM_MINOR); len += snprintf(data + len, size - len, SEPARATOR); return 0; } /************************************************************************** * @ingroup SalCtrl * @description * This function calls the shutdown function on all the service * instances. It also frees all service instance memory * allocated at Init. * * @context * This function is called from the SalCtrl_ServiceEventHandler function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @param[in] device An icp_accel_dev_t* type * @param[in] enabled_services Services enabled by user * ****************************************************************************/ static CpaStatus SalCtrl_ServiceEventShutdown(icp_accel_dev_t *device, Cpa32U enabled_services) { CpaStatus status = CPA_STATUS_SUCCESS; CpaStatus ret_status = CPA_STATUS_SUCCESS; sal_t *service_container = (sal_t *)device->pSalHandle; if (NULL == service_container) { QAT_UTILS_LOG("Private data is NULL\n"); return CPA_STATUS_FATAL; } if (SalCtrl_IsServiceEnabled(enabled_services, SAL_SERVICE_TYPE_CRYPTO)) { status = SalCtrl_ServiceShutdown(device, &service_container->crypto_services, &service_container->cy_dir, SAL_SERVICE_TYPE_CRYPTO); if (CPA_STATUS_SUCCESS != status) { ret_status = status; } } if (SalCtrl_IsServiceEnabled(enabled_services, SAL_SERVICE_TYPE_CRYPTO_ASYM)) { status = SalCtrl_ServiceShutdown(device, &service_container->asym_services, &service_container->asym_dir, SAL_SERVICE_TYPE_CRYPTO_ASYM); if (CPA_STATUS_SUCCESS != status) { ret_status = status; } } if (SalCtrl_IsServiceEnabled(enabled_services, SAL_SERVICE_TYPE_CRYPTO_SYM)) { status = SalCtrl_ServiceShutdown(device, &service_container->sym_services, &service_container->sym_dir, SAL_SERVICE_TYPE_CRYPTO_SYM); if (CPA_STATUS_SUCCESS != status) { ret_status = status; } } if (SalCtrl_IsServiceEnabled(enabled_services, SAL_SERVICE_TYPE_COMPRESSION)) { status = SalCtrl_ServiceShutdown( device, &service_container->compression_services, &service_container->dc_dir, SAL_SERVICE_TYPE_COMPRESSION); if (CPA_STATUS_SUCCESS != status) { ret_status = status; } } if (service_container->ver_file) { icp_adf_debugRemoveFile(service_container->ver_file); LAC_OS_FREE(service_container->ver_file); service_container->ver_file = NULL; } if (!icp_adf_is_dev_in_reset(device)) { /* Free container also */ free(service_container, M_QAT); device->pSalHandle = NULL; } return ret_status; } /************************************************************************* * @ingroup SalCtrl * @description * This function is used to initialize the service instances. * It first checks (via ADF query) which services are enabled in the * system and the number of each services. * It then invokes the init function on them which creates the * instances and allocates memory for them. * * @context * This function is called from the SalCtrl_ServiceEventHandler function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @param[in] device An icp_accel_dev_t* type * @param[in] enabled_services Services enabled by user * *************************************************************************/ static CpaStatus SalCtrl_ServiceEventInit(icp_accel_dev_t *device, Cpa32U enabled_services) { sal_t *service_container = NULL; CpaStatus status = CPA_STATUS_SUCCESS; sal_list_t *tail_list = NULL; Cpa32U instance_count = 0; status = SalCtrl_GetSupportedServices(device, enabled_services); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get supported services.\n"); return status; } if (!icp_adf_is_dev_in_reset(device)) { service_container = malloc(sizeof(sal_t), M_QAT, M_WAITOK); device->pSalHandle = service_container; service_container->asym_services = NULL; service_container->sym_services = NULL; service_container->crypto_services = NULL; service_container->compression_services = NULL; } else { service_container = device->pSalHandle; } service_container->asym_dir = NULL; service_container->sym_dir = NULL; service_container->cy_dir = NULL; service_container->dc_dir = NULL; service_container->ver_file = NULL; service_container->ver_file = LAC_OS_MALLOC(sizeof(debug_file_info_t)); if (NULL == service_container->ver_file) { free(service_container, M_QAT); return CPA_STATUS_RESOURCE; } memset(service_container->ver_file, 0, sizeof(debug_file_info_t)); service_container->ver_file->name = ver_file_name; service_container->ver_file->seq_read = SalCtrl_VersionDebug; service_container->ver_file->private_data = device; service_container->ver_file->parent = NULL; status = icp_adf_debugAddFile(device, service_container->ver_file); if (CPA_STATUS_SUCCESS != status) { LAC_OS_FREE(service_container->ver_file); free(service_container, M_QAT); return status; } if (SalCtrl_IsServiceEnabled(enabled_services, SAL_SERVICE_TYPE_CRYPTO_ASYM)) { status = SalCtrl_GetInstanceCount(device, "NumberCyInstances", &instance_count); if (CPA_STATUS_SUCCESS != status) { instance_count = 0; } status = SalCtrl_ServiceInit(device, &service_container->asym_services, &service_container->asym_dir, asym_dir_name, tail_list, instance_count, SAL_SERVICE_TYPE_CRYPTO_ASYM); if (CPA_STATUS_SUCCESS != status) { goto err_init; } } if (SalCtrl_IsServiceEnabled(enabled_services, SAL_SERVICE_TYPE_CRYPTO_SYM)) { status = SalCtrl_GetInstanceCount(device, "NumberCyInstances", &instance_count); if (CPA_STATUS_SUCCESS != status) { instance_count = 0; } status = SalCtrl_ServiceInit(device, &service_container->sym_services, &service_container->sym_dir, sym_dir_name, tail_list, instance_count, SAL_SERVICE_TYPE_CRYPTO_SYM); if (CPA_STATUS_SUCCESS != status) { goto err_init; } } if (SalCtrl_IsServiceEnabled(enabled_services, SAL_SERVICE_TYPE_CRYPTO)) { status = SalCtrl_GetInstanceCount(device, "NumberCyInstances", &instance_count); if (CPA_STATUS_SUCCESS != status) { instance_count = 0; } status = SalCtrl_ServiceInit(device, &service_container->crypto_services, &service_container->cy_dir, cy_dir_name, tail_list, instance_count, SAL_SERVICE_TYPE_CRYPTO); if (CPA_STATUS_SUCCESS != status) { goto err_init; } } if (SalCtrl_IsServiceEnabled(enabled_services, SAL_SERVICE_TYPE_COMPRESSION)) { status = SalCtrl_GetInstanceCount(device, "NumberDcInstances", &instance_count); if (CPA_STATUS_SUCCESS != status) { instance_count = 0; } status = SalCtrl_ServiceInit( device, &service_container->compression_services, &service_container->dc_dir, dc_dir_name, tail_list, instance_count, SAL_SERVICE_TYPE_COMPRESSION); if (CPA_STATUS_SUCCESS != status) { goto err_init; } } return status; err_init: SalCtrl_ServiceEventShutdown(device, enabled_services); return status; } /**************************************************************************** * @ingroup SalCtrl * @description * This function calls the stop function on all the service instances. * * @context * This function is called from the SalCtrl_ServiceEventHandler function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @param[in] device An icp_accel_dev_t* type * @param[in] enabled_services Enabled services by user * *************************************************************************/ static CpaStatus SalCtrl_ServiceEventStop(icp_accel_dev_t *device, Cpa32U enabled_services) { CpaStatus status = CPA_STATUS_SUCCESS; CpaStatus ret_status = CPA_STATUS_SUCCESS; sal_t *service_container = device->pSalHandle; if (service_container == NULL) { QAT_UTILS_LOG("Private data is NULL.\n"); return CPA_STATUS_FATAL; } if (SalCtrl_IsServiceEnabled(enabled_services, SAL_SERVICE_TYPE_CRYPTO_ASYM)) { status = SalCtrl_ServiceStop(device, service_container->asym_services); if (CPA_STATUS_SUCCESS != status) { ret_status = status; } } if (SalCtrl_IsServiceEnabled(enabled_services, SAL_SERVICE_TYPE_CRYPTO_SYM)) { status = SalCtrl_ServiceStop(device, service_container->sym_services); if (CPA_STATUS_SUCCESS != status) { ret_status = status; } } if (SalCtrl_IsServiceEnabled(enabled_services, SAL_SERVICE_TYPE_CRYPTO)) { status = SalCtrl_ServiceStop(device, service_container->crypto_services); if (CPA_STATUS_SUCCESS != status) { ret_status = status; } } if (SalCtrl_IsServiceEnabled(enabled_services, SAL_SERVICE_TYPE_COMPRESSION)) { status = SalCtrl_ServiceStop( device, service_container->compression_services); if (CPA_STATUS_SUCCESS != status) { ret_status = status; } } return ret_status; } /************************************************************************** * @ingroup SalCtrl * @description * This function calls the start function on all the service instances. * * @context * This function is called from the SalCtrl_ServiceEventHandler function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @param[in] device An icp_accel_dev_t* type * @param[in] enabled_services Enabled services by user * **************************************************************************/ static CpaStatus SalCtrl_ServiceEventStart(icp_accel_dev_t *device, Cpa32U enabled_services) { CpaStatus status = CPA_STATUS_SUCCESS; sal_t *service_container = device->pSalHandle; if (service_container == NULL) { QAT_UTILS_LOG("Private data is NULL.\n"); return CPA_STATUS_FATAL; } if (SalCtrl_IsServiceEnabled(enabled_services, SAL_SERVICE_TYPE_CRYPTO_ASYM)) { status = SalCtrl_ServiceStart(device, service_container->asym_services); if (CPA_STATUS_SUCCESS != status) { goto err_start; } } if (SalCtrl_IsServiceEnabled(enabled_services, SAL_SERVICE_TYPE_CRYPTO_SYM)) { status = SalCtrl_ServiceStart(device, service_container->sym_services); if (CPA_STATUS_SUCCESS != status) { goto err_start; } } if (SalCtrl_IsServiceEnabled(enabled_services, SAL_SERVICE_TYPE_CRYPTO)) { status = SalCtrl_ServiceStart(device, service_container->crypto_services); if (CPA_STATUS_SUCCESS != status) { goto err_start; } } if (SalCtrl_IsServiceEnabled(enabled_services, SAL_SERVICE_TYPE_COMPRESSION)) { status = SalCtrl_ServiceStart( device, service_container->compression_services); if (CPA_STATUS_SUCCESS != status) { goto err_start; } } return status; err_start: SalCtrl_ServiceEventStop(device, enabled_services); return status; } /************************************************************************* * @ingroup SalCtrl * @description * This function is the events handler registered with ADF * for the QA API services (cy, dc) - kernel and user * * @context * This function is called from an ADF context. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @param[in] device An icp_accel_dev_t* type * @param[in] event Event from ADF * @param[in] param Parameter used for back compatibility * ***********************************************************************/ static CpaStatus SalCtrl_ServiceEventHandler(icp_accel_dev_t *device, icp_adf_subsystemEvent_t event, void *param) { CpaStatus status = CPA_STATUS_SUCCESS; CpaStatus stats_status = CPA_STATUS_SUCCESS; Cpa32U enabled_services = 0; status = SalCtrl_GetEnabledServices(device, &enabled_services); if (CPA_STATUS_SUCCESS != status) { QAT_UTILS_LOG("Failed to get enabled services.\n"); return status; } switch (event) { case ICP_ADF_EVENT_INIT: { /* In case there is no QAT SAL needs to call InitStats */ if (NULL == device->pQatStats) { status = SalStatistics_InitStatisticsCollection(device); } if (CPA_STATUS_SUCCESS != status) { return status; } status = SalCtrl_ServiceEventInit(device, enabled_services); break; } case ICP_ADF_EVENT_START: { status = SalCtrl_ServiceEventStart(device, enabled_services); break; } case ICP_ADF_EVENT_STOP: { status = SalCtrl_ServiceEventStop(device, enabled_services); break; } case ICP_ADF_EVENT_SHUTDOWN: { status = SalCtrl_ServiceEventShutdown(device, enabled_services); stats_status = SalStatistics_CleanStatisticsCollection(device); if (CPA_STATUS_SUCCESS != status || CPA_STATUS_SUCCESS != stats_status) { return CPA_STATUS_FAIL; } break; } default: status = CPA_STATUS_SUCCESS; break; } return status; } CpaStatus SalCtrl_AdfServicesRegister(void) { /* Fill out the global sal_service_reg_handle structure */ sal_service_reg_handle.subserviceEventHandler = SalCtrl_ServiceEventHandler; /* Set subsystem name to globally defined name */ sal_service_reg_handle.subsystem_name = subsystem_name; return icp_adf_subsystemRegister(&sal_service_reg_handle); } CpaStatus SalCtrl_AdfServicesUnregister(void) { return icp_adf_subsystemUnregister(&sal_service_reg_handle); } CpaStatus SalCtrl_AdfServicesStartedCheck(void) { CpaStatus status = CPA_STATUS_SUCCESS; Cpa32U retry_num = 0; CpaBoolean state = CPA_FALSE; do { state = icp_adf_isSubsystemStarted(&sal_service_reg_handle); retry_num++; } while ((CPA_FALSE == state) && (retry_num < MAX_SUBSYSTEM_RETRY)); if (CPA_FALSE == state) { QAT_UTILS_LOG("Sal Ctrl failed to start in given time.\n"); status = CPA_STATUS_FAIL; } return status; } CpaStatus validateConcurrRequest(Cpa32U numConcurrRequests) { Cpa32U baseReq = SAL_64_CONCURR_REQUESTS; if (SAL_64_CONCURR_REQUESTS > numConcurrRequests) { QAT_UTILS_LOG( "Invalid numConcurrRequests, it is less than min value.\n"); return CPA_STATUS_FAIL; } while (SAL_MAX_CONCURR_REQUESTS >= baseReq) { if (baseReq != numConcurrRequests) { baseReq = baseReq << 1; } else { break; } } if (SAL_MAX_CONCURR_REQUESTS < baseReq) { QAT_UTILS_LOG( "Invalid baseReg, it is greater than max value.\n"); return CPA_STATUS_FAIL; } return CPA_STATUS_SUCCESS; } diff --git a/sys/dev/qat/qat_api/common/include/lac_common.h b/sys/dev/qat/qat_api/common/include/lac_common.h index fb2fdd95300e..dacf43c3a072 100644 --- a/sys/dev/qat/qat_api/common/include/lac_common.h +++ b/sys/dev/qat/qat_api/common/include/lac_common.h @@ -1,847 +1,844 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** ***************************************************************************** * @file lac_common.h Common macros * * @defgroup Lac Look Aside Crypto LLD Doc * *****************************************************************************/ /** ***************************************************************************** * @defgroup LacCommon LAC Common * Common code for Lac which includes init/shutdown, memory, logging and * hooks. * * @ingroup Lac * *****************************************************************************/ /***************************************************************************/ #ifndef LAC_COMMON_H #define LAC_COMMON_H /* ****************************************************************************** * Include public/global header files ****************************************************************************** */ #include "cpa.h" #include "qat_utils.h" #include "cpa_cy_common.h" #include "icp_adf_init.h" #define LAC_ARCH_UINT uintptr_t #define LAC_ARCH_INT intptr_t /* ***************************************************************************** * Max range values for some primitive param checking ***************************************************************************** */ /**< Maximum number of instances */ #define SAL_MAX_NUM_INSTANCES_PER_DEV 512 #define SAL_DEFAULT_RING_SIZE 256 /**< Default ring size */ #define SAL_64_CONCURR_REQUESTS 64 #define SAL_128_CONCURR_REQUESTS 128 #define SAL_256_CONCURR_REQUESTS 256 #define SAL_512_CONCURR_REQUESTS 512 #define SAL_1024_CONCURR_REQUESTS 1024 #define SAL_2048_CONCURR_REQUESTS 2048 #define SAL_4096_CONCURR_REQUESTS 4096 #define SAL_MAX_CONCURR_REQUESTS 65536 /**< Valid options for the num of concurrent requests per ring pair read from the config file. These values are used to size the rings */ #define SAL_BATCH_SUBMIT_FREE_SPACE 2 /**< For data plane batch submissions ADF leaves 2 spaces free on the ring */ /* ****************************************************************************** * Some common settings for QA API queries ****************************************************************************** */ #define SAL_INFO2_VENDOR_NAME "Intel(R)" /**< @ingroup LacCommon * Name of vendor of this driver */ #define SAL_INFO2_PART_NAME "%s with Intel(R) QuickAssist Technology" /**< @ingroup LacCommon */ /* ******************************************************************************** * User process name defines and functions ******************************************************************************** */ #define LAC_USER_PROCESS_NAME_MAX_LEN 32 /**< @ingroup LacCommon * Max length of user process name */ #define LAC_KERNEL_PROCESS_NAME "KERNEL_QAT" /**< @ingroup LacCommon * Default name for kernel process */ /* ******************************************************************************** * response mode indicator from Config file ******************************************************************************** */ #define SAL_RESP_POLL_CFG_FILE 1 #define SAL_RESP_EPOLL_CFG_FILE 2 /* * @ingroup LacCommon * @description * This function sets the process name * * @context * This functions is called from module_init or from user space process * initialization function * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * No * * param[in] processName Process name to be set */ CpaStatus icpSetProcessName(const char *processName); /* * @ingroup LacCommon * @description * This function gets the process name * * @context * This functions is called from LAC context * * @assumptions * None * @sideEffects * None * @reentrant * Yes * @threadSafe * Yes * */ char *icpGetProcessName(void); /* Sections of the config file */ #define LAC_CFG_SECTION_GENERAL "GENERAL" #define LAC_CFG_SECTION_INTERNAL "INTERNAL" /* ******************************************************************************** * Debug Macros and settings ******************************************************************************** */ #define SEPARATOR "+--------------------------------------------------+\n" /**< @ingroup LacCommon * separator used for printing stats to standard output*/ #define BORDER "|" /**< @ingroup LacCommon * separator used for printing stats to standard output*/ /** ***************************************************************************** * @ingroup LacCommon * Component state * * @description * This enum is used to indicate the state that the component is in. Its * purpose is to prevent components from being initialised or shutdown * incorrectly. * *****************************************************************************/ typedef enum { LAC_COMP_SHUT_DOWN = 0, /**< Component in the Shut Down state */ LAC_COMP_SHUTTING_DOWN, /**< Component in the Process of Shutting down */ LAC_COMP_INITIALISING, /**< Component in the Process of being initialised */ LAC_COMP_INITIALISED, /**< Component in the initialised state */ } lac_comp_state_t; /** ******************************************************************************* * @ingroup LacCommon * This macro checks if a parameter is NULL * * @param[in] param Parameter * * @return CPA_STATUS_INVALID_PARAM Parameter is NULL * @return void Parameter is not NULL ******************************************************************************/ #define LAC_CHECK_NULL_PARAM(param) \ do { \ if (NULL == (param)) { \ return CPA_STATUS_INVALID_PARAM; \ } \ } while (0) /** ******************************************************************************* * @ingroup LacCommon * This macro checks if a parameter is within a specified range * * @param[in] param Parameter * @param[in] min Parameter must be greater than OR equal to *min * @param[in] max Parameter must be less than max * * @return CPA_STATUS_INVALID_PARAM Parameter is outside range * @return void Parameter is within range ******************************************************************************/ #define LAC_CHECK_PARAM_RANGE(param, min, max) \ do { \ if (((param) < (min)) || ((param) >= (max))) { \ return CPA_STATUS_INVALID_PARAM; \ } \ } while (0) /** ******************************************************************************* * @ingroup LacCommon * This checks if a param is 8 byte aligned. * ******************************************************************************/ #define LAC_CHECK_8_BYTE_ALIGNMENT(param) \ do { \ if ((Cpa64U)param % 8 != 0) { \ return CPA_STATUS_INVALID_PARAM; \ } \ } while (0) /** ******************************************************************************* * @ingroup LacCommon * This checks if a param is 64 byte aligned. * ******************************************************************************/ #define LAC_CHECK_64_BYTE_ALIGNMENT(param) \ do { \ if ((LAC_ARCH_UINT)param % 64 != 0) { \ return CPA_STATUS_INVALID_PARAM; \ } \ } while (0) /** ******************************************************************************* * @ingroup LacCommon * This macro returns the size of the buffer list structure given the * number of elements in the buffer list - note: only the sizeof the * buffer list structure is returned. * * @param[in] numBuffers The number of flatbuffers in a buffer list * * @return size of the buffer list structure ******************************************************************************/ #define LAC_BUFFER_LIST_SIZE_GET(numBuffers) \ (sizeof(CpaBufferList) + (numBuffers * sizeof(CpaFlatBuffer))) /** ******************************************************************************* * @ingroup LacCommon * This macro checks that a flatbuffer is valid i.e. that it is not * null and the data it points to is not null * * @param[in] pFlatBuffer Pointer to flatbuffer * * @return CPA_STATUS_INVALID_PARAM Invalid flatbuffer pointer * @return void flatbuffer is ok ******************************************************************************/ #define LAC_CHECK_FLAT_BUFFER(pFlatBuffer) \ do { \ LAC_CHECK_NULL_PARAM((pFlatBuffer)); \ LAC_CHECK_NULL_PARAM((pFlatBuffer)->pData); \ } while (0) /** ******************************************************************************* * @ingroup LacCommon * This macro verifies that the status is ok i.e. equal to CPA_STATUS_SUCCESS * * @param[in] status status we are checking * * @return void status is ok (CPA_STATUS_SUCCESS) * @return status The value in the status parameter is an error one * ******************************************************************************/ #define LAC_CHECK_STATUS(status) \ do { \ if (CPA_STATUS_SUCCESS != (status)) { \ return status; \ } \ } while (0) /** ******************************************************************************* * @ingroup LacCommon * This macro verifies that the Instance Handle is valid. * * @param[in] instanceHandle Instance Handle * * @return CPA_STATUS_INVALID_PARAM Parameter is NULL * @return void Parameter is not NULL * ******************************************************************************/ #define LAC_CHECK_INSTANCE_HANDLE(instanceHandle) \ do { \ if (NULL == (instanceHandle)) { \ return CPA_STATUS_INVALID_PARAM; \ } \ } while (0) /** ******************************************************************************* * @ingroup LacCommon * This macro copies a string from one location to another * * @param[out] pDestinationBuffer Pointer to destination buffer * @param[in] pSource Pointer to source buffer * ******************************************************************************/ #define LAC_COPY_STRING(pDestinationBuffer, pSource) \ do { \ memcpy(pDestinationBuffer, pSource, (sizeof(pSource) - 1)); \ pDestinationBuffer[(sizeof(pSource) - 1)] = '\0'; \ } while (0) /** ******************************************************************************* * @ingroup LacCommon * This macro fills a memory zone with ZEROES * * @param[in] pBuffer Pointer to buffer * @param[in] count Buffer length * * @return void * ******************************************************************************/ #define LAC_OS_BZERO(pBuffer, count) memset(pBuffer, 0, count); /** ******************************************************************************* * @ingroup LacCommon * This macro calculates the position of the given member in a struct * Only for use on a struct where all members are of equal size to map * the struct member position to an array index * * @param[in] structType the struct * @param[in] member the member of the given struct * ******************************************************************************/ #define LAC_IDX_OF(structType, member) \ (offsetof(structType, member) / sizeof(((structType *)0)->member)) /* ******************************************************************************** * Alignment, Bid define and Bit Operation Macros ******************************************************************************** */ #define LAC_BIT31_SET 0x80000000 /**< bit 31 == 1 */ #define LAC_BIT7_SET 0x80 /**< bit 7 == 1 */ #define LAC_BIT6_SET 0x40 /**< bit 6 == 1 */ #define LAC_BIT5_SET 0x20 /**< bit 5 == 1 */ #define LAC_BIT4_SET 0x10 /**< bit 4 == 1 */ #define LAC_BIT3_SET 0x08 /**< bit 3 == 1 */ #define LAC_BIT2_SET 0x04 /**< bit 2 == 1 */ #define LAC_BIT1_SET 0x02 /**< bit 1 == 1 */ #define LAC_BIT0_SET 0x01 /**< bit 0 == 1 */ #define LAC_NUM_BITS_IN_BYTE (8) /**< @ingroup LacCommon * Number of bits in a byte */ #define LAC_LONG_WORD_IN_BYTES (4) /**< @ingroup LacCommon * Number of bytes in an IA word */ #define LAC_QUAD_WORD_IN_BYTES (8) /**< @ingroup LacCommon * Number of bytes in a QUAD word */ #define LAC_QAT_MAX_MSG_SZ_LW (32) /**< @ingroup LacCommon * Maximum size in Long Words for a QAT message */ /** ***************************************************************************** * @ingroup LacCommon * Alignment shift requirements of a buffer. * * @description * This enum is used to indicate the alignment shift of a buffer. * All alignments are to power of 2 * *****************************************************************************/ typedef enum lac_aligment_shift_s { LAC_NO_ALIGNMENT_SHIFT = 0, /**< No alignment shift (to a power of 2)*/ LAC_8BYTE_ALIGNMENT_SHIFT = 3, /**< 8 byte alignment shift (to a power of 2)*/ LAC_16BYTE_ALIGNMENT_SHIFT = 4, /**< 16 byte alignment shift (to a power of 2)*/ LAC_64BYTE_ALIGNMENT_SHIFT = 6, /**< 64 byte alignment shift (to a power of 2)*/ LAC_4KBYTE_ALIGNMENT_SHIFT = 12, /**< 4k byte alignment shift (to a power of 2)*/ } lac_aligment_shift_t; /** ***************************************************************************** * @ingroup LacCommon * Alignment of a buffer. * * @description * This enum is used to indicate the alignment requirements of a buffer. * *****************************************************************************/ typedef enum lac_aligment_s { LAC_NO_ALIGNMENT = 0, /**< No alignment */ LAC_1BYTE_ALIGNMENT = 1, /**< 1 byte alignment */ LAC_8BYTE_ALIGNMENT = 8, /**< 8 byte alignment*/ LAC_64BYTE_ALIGNMENT = 64, /**< 64 byte alignment*/ LAC_4KBYTE_ALIGNMENT = 4096, /**< 4k byte alignment */ } lac_aligment_t; /** ***************************************************************************** * @ingroup LacCommon * Size of a buffer. * * @description * This enum is used to indicate the required size. * The buffer must be a multiple of the required size. * *****************************************************************************/ typedef enum lac_expected_size_s { LAC_NO_LENGTH_REQUIREMENTS = 0, /**< No requirement for size */ LAC_4KBYTE_MULTIPLE_REQUIRED = 4096, /**< 4k multiple requirement for size */ } lac_expected_size_t; #define LAC_OPTIMAL_ALIGNMENT_SHIFT LAC_64BYTE_ALIGNMENT_SHIFT /**< @ingroup LacCommon * optimal alignment to a power of 2 */ #define LAC_SHIFT_8 (1 << LAC_8BYTE_ALIGNMENT_SHIFT) /**< shift by 8 bits */ #define LAC_SHIFT_24 \ ((1 << LAC_8BYTE_ALIGNMENT_SHIFT) + (1 << LAC_16BYTE_ALIGNMENT_SHIFT)) /**< shift by 24 bits */ #define LAC_MAX_16_BIT_VALUE ((1 << 16) - 1) /**< @ingroup LacCommon * maximum value a 16 bit type can hold */ /** ******************************************************************************* * @ingroup LacCommon * This macro can be used to avoid an unused variable warning from the * compiler * * @param[in] variable unused variable * ******************************************************************************/ #define LAC_UNUSED_VARIABLE(x) (void)(x) /** ******************************************************************************* * @ingroup LacCommon * This macro checks if an address is aligned to the specified power of 2 * Returns 0 if alignment is ok, or non-zero otherwise * * @param[in] address the address we are checking * * @param[in] alignment the byte alignment to check (specified as power of 2) * ******************************************************************************/ #define LAC_ADDRESS_ALIGNED(address, alignment) \ (!((LAC_ARCH_UINT)(address) & ((1 << (alignment)) - 1))) /** ******************************************************************************* * @ingroup LacCommon * This macro rounds up a number to a be a multiple of the alignment when * the alignment is a power of 2. * * @param[in] num Number * @param[in] align Alignment (must be a power of 2) * ******************************************************************************/ #define LAC_ALIGN_POW2_ROUNDUP(num, align) (((num) + (align)-1) & ~((align)-1)) /** ******************************************************************************* * @ingroup LacCommon * This macro generates a bit mask to select a particular bit * * @param[in] bitPos Bit position to select * ******************************************************************************/ #define LAC_BIT(bitPos) (0x1 << (bitPos)) /** ******************************************************************************* * @ingroup LacCommon * This macro converts a size in bits to the equivalent size in bytes, * using a bit shift to divide by 8 * * @param[in] x size in bits * ******************************************************************************/ #define LAC_BITS_TO_BYTES(x) ((x) >> 3) /** ******************************************************************************* * @ingroup LacCommon * This macro converts a size in bytes to the equivalent size in bits, * using a bit shift to multiply by 8 * * @param[in] x size in bytes * ******************************************************************************/ #define LAC_BYTES_TO_BITS(x) ((x) << 3) /** ******************************************************************************* * @ingroup LacCommon * This macro converts a size in bytes to the equivalent size in longwords, * using a bit shift to divide by 4 * * @param[in] x size in bytes * ******************************************************************************/ #define LAC_BYTES_TO_LONGWORDS(x) ((x) >> 2) /** ******************************************************************************* * @ingroup LacCommon * This macro converts a size in longwords to the equivalent size in bytes, * using a bit shift to multiply by 4 * * @param[in] x size in long words * ******************************************************************************/ #define LAC_LONGWORDS_TO_BYTES(x) ((x) << 2) /** ******************************************************************************* * @ingroup LacCommon * This macro converts a size in bytes to the equivalent size in quadwords, * using a bit shift to divide by 8 * * @param[in] x size in bytes * ******************************************************************************/ #define LAC_BYTES_TO_QUADWORDS(x) (((x) >> 3) + (((x) % 8) ? 1 : 0)) /** ******************************************************************************* * @ingroup LacCommon * This macro converts a size in quadwords to the equivalent size in bytes, * using a bit shift to multiply by 8 * * @param[in] x size in quad words * ******************************************************************************/ #define LAC_QUADWORDS_TO_BYTES(x) ((x) << 3) /******************************************************************************/ /* ******************************************************************************* * Mutex Macros ******************************************************************************* */ /** ******************************************************************************* * @ingroup LacCommon * This macro tries to acquire a mutex and returns the status * * @param[in] pLock Pointer to Lock * @param[in] timeout Timeout * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_RESOURCE Error with Mutex ******************************************************************************/ #define LAC_LOCK_MUTEX(pLock, timeout) \ ((CPA_STATUS_SUCCESS != qatUtilsMutexLock((pLock), (timeout))) ? \ CPA_STATUS_RESOURCE : \ CPA_STATUS_SUCCESS) /** ******************************************************************************* * @ingroup LacCommon * This macro unlocks a mutex and returns the status * * @param[in] pLock Pointer to Lock * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_RESOURCE Error with Mutex ******************************************************************************/ #define LAC_UNLOCK_MUTEX(pLock) \ ((CPA_STATUS_SUCCESS != qatUtilsMutexUnlock((pLock))) ? \ CPA_STATUS_RESOURCE : \ CPA_STATUS_SUCCESS) /** ******************************************************************************* * @ingroup LacCommon * This macro initialises a mutex and returns the status * * @param[in] pLock Pointer to Lock * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_RESOURCE Error with Mutex ******************************************************************************/ #define LAC_INIT_MUTEX(pLock) \ ((CPA_STATUS_SUCCESS != qatUtilsMutexInit((pLock))) ? \ CPA_STATUS_RESOURCE : \ CPA_STATUS_SUCCESS) /** ******************************************************************************* * @ingroup LacCommon * This macro destroys a mutex and returns the status * * @param[in] pLock Pointer to Lock * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_RESOURCE Error with Mutex ******************************************************************************/ #define LAC_DESTROY_MUTEX(pLock) \ ((CPA_STATUS_SUCCESS != qatUtilsMutexDestroy((pLock))) ? \ CPA_STATUS_RESOURCE : \ CPA_STATUS_SUCCESS) /** ******************************************************************************* * @ingroup LacCommon * This macro calls a trylock on a mutex * * @param[in] pLock Pointer to Lock * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_RESOURCE Error with Mutex ******************************************************************************/ #define LAC_TRYLOCK_MUTEX(pLock) \ ((CPA_STATUS_SUCCESS != \ qatUtilsMutexTryLock((pLock), QAT_UTILS_WAIT_NONE)) ? \ CPA_STATUS_RESOURCE : \ CPA_STATUS_SUCCESS) /* ******************************************************************************* * Semaphore Macros ******************************************************************************* */ /** ******************************************************************************* * @ingroup LacCommon * This macro waits on a semaphore and returns the status * * @param[in] sid The semaphore * @param[in] timeout Timeout * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_RESOURCE Error with semaphore ******************************************************************************/ #define LAC_WAIT_SEMAPHORE(sid, timeout) \ ((CPA_STATUS_SUCCESS != qatUtilsSemaphoreWait(&sid, (timeout))) ? \ CPA_STATUS_RESOURCE : \ CPA_STATUS_SUCCESS) /** ******************************************************************************* * @ingroup LacCommon * This macro checks a semaphore and returns the status * * @param[in] sid The semaphore * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_RESOURCE Error with semaphore ******************************************************************************/ #define LAC_CHECK_SEMAPHORE(sid) \ ((CPA_STATUS_SUCCESS != qatUtilsSemaphoreTryWait(&sid)) ? \ CPA_STATUS_RETRY : \ CPA_STATUS_SUCCESS) /** ******************************************************************************* * @ingroup LacCommon * This macro post a semaphore and returns the status * * @param[in] sid The semaphore * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_RESOURCE Error with semaphore ******************************************************************************/ #define LAC_POST_SEMAPHORE(sid) \ ((CPA_STATUS_SUCCESS != qatUtilsSemaphorePost(&sid)) ? \ CPA_STATUS_RESOURCE : \ CPA_STATUS_SUCCESS) /** ******************************************************************************* * @ingroup LacCommon * This macro initialises a semaphore and returns the status * * @param[in] sid The semaphore * @param[in] semValue Initial semaphore value * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_RESOURCE Error with semaphore ******************************************************************************/ #define LAC_INIT_SEMAPHORE(sid, semValue) \ ((CPA_STATUS_SUCCESS != qatUtilsSemaphoreInit(&sid, semValue)) ? \ CPA_STATUS_RESOURCE : \ CPA_STATUS_SUCCESS) /** ******************************************************************************* * @ingroup LacCommon * This macro destroys a semaphore and returns the status * * @param[in] sid The semaphore * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_RESOURCE Error with semaphore ******************************************************************************/ #define LAC_DESTROY_SEMAPHORE(sid) \ ((CPA_STATUS_SUCCESS != qatUtilsSemaphoreDestroy(&sid)) ? \ CPA_STATUS_RESOURCE : \ CPA_STATUS_SUCCESS) /* ******************************************************************************* * Spinlock Macros ******************************************************************************* */ typedef struct mtx *lac_lock_t; #define LAC_SPINLOCK_INIT(lock) \ ((CPA_STATUS_SUCCESS != qatUtilsLockInit(lock)) ? \ CPA_STATUS_RESOURCE : \ CPA_STATUS_SUCCESS) #define LAC_SPINLOCK(lock) \ ({ \ (void)qatUtilsLock(lock); \ - CPA_STATUS_SUCCESS; \ }) #define LAC_SPINUNLOCK(lock) \ ({ \ (void)qatUtilsUnlock(lock); \ - CPA_STATUS_SUCCESS; \ }) #define LAC_SPINLOCK_DESTROY(lock) \ ({ \ (void)qatUtilsLockDestroy(lock); \ - CPA_STATUS_SUCCESS; \ }) #define LAC_CONST_PTR_CAST(castee) ((void *)(LAC_ARCH_UINT)(castee)) #define LAC_CONST_VOLATILE_PTR_CAST(castee) ((void *)(LAC_ARCH_UINT)(castee)) /* Type of ring */ #define SAL_RING_TYPE_NONE 0 #define SAL_RING_TYPE_A_SYM_HI 1 #define SAL_RING_TYPE_A_SYM_LO 2 #define SAL_RING_TYPE_A_ASYM 3 #define SAL_RING_TYPE_B_SYM_HI 4 #define SAL_RING_TYPE_B_SYM_LO 5 #define SAL_RING_TYPE_B_ASYM 6 #define SAL_RING_TYPE_DC 7 #define SAL_RING_TYPE_ADMIN 8 #define SAL_RING_TYPE_TRNG 9 /* Maps Ring Service to generic service type */ static inline icp_adf_ringInfoService_t lac_getRingType(int type) { switch (type) { case SAL_RING_TYPE_NONE: return ICP_ADF_RING_SERVICE_0; case SAL_RING_TYPE_A_SYM_HI: return ICP_ADF_RING_SERVICE_1; case SAL_RING_TYPE_A_SYM_LO: return ICP_ADF_RING_SERVICE_2; case SAL_RING_TYPE_A_ASYM: return ICP_ADF_RING_SERVICE_3; case SAL_RING_TYPE_B_SYM_HI: return ICP_ADF_RING_SERVICE_4; case SAL_RING_TYPE_B_SYM_LO: return ICP_ADF_RING_SERVICE_5; case SAL_RING_TYPE_B_ASYM: return ICP_ADF_RING_SERVICE_6; case SAL_RING_TYPE_DC: return ICP_ADF_RING_SERVICE_7; case SAL_RING_TYPE_ADMIN: return ICP_ADF_RING_SERVICE_8; case SAL_RING_TYPE_TRNG: return ICP_ADF_RING_SERVICE_9; default: return ICP_ADF_RING_SERVICE_0; } return ICP_ADF_RING_SERVICE_0; } /* Maps generic service type to Ring Service type */ static inline int lac_getServiceType(icp_adf_ringInfoService_t type) { switch (type) { case ICP_ADF_RING_SERVICE_0: return SAL_RING_TYPE_NONE; case ICP_ADF_RING_SERVICE_1: return SAL_RING_TYPE_A_SYM_HI; case ICP_ADF_RING_SERVICE_2: return SAL_RING_TYPE_A_SYM_LO; case ICP_ADF_RING_SERVICE_3: return SAL_RING_TYPE_A_ASYM; case ICP_ADF_RING_SERVICE_4: return SAL_RING_TYPE_B_SYM_HI; case ICP_ADF_RING_SERVICE_5: return SAL_RING_TYPE_B_SYM_LO; case ICP_ADF_RING_SERVICE_6: return SAL_RING_TYPE_B_ASYM; case ICP_ADF_RING_SERVICE_7: return SAL_RING_TYPE_DC; case ICP_ADF_RING_SERVICE_8: return SAL_RING_TYPE_ADMIN; default: return SAL_RING_TYPE_NONE; } return SAL_RING_TYPE_NONE; } #endif /* LAC_COMMON_H */ diff --git a/sys/dev/qat/qat_api/common/include/lac_sal.h b/sys/dev/qat/qat_api/common/include/lac_sal.h index 031cb4204084..69d47628496e 100644 --- a/sys/dev/qat/qat_api/common/include/lac_sal.h +++ b/sys/dev/qat/qat_api/common/include/lac_sal.h @@ -1,498 +1,497 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** ***************************************************************************** * @file lac_sal.h * * @defgroup SalCtrl Service Access Layer Controller * * @ingroup SalCtrl * * @description * These functions are the functions to be executed for each state * of the state machine for each service. * *****************************************************************************/ #ifndef LAC_SAL_H #define LAC_SAL_H #include "cpa_cy_im.h" /** ******************************************************************************* * @ingroup SalCtrl * @description * This function allocates memory for a specific instance type. * Zeros this memory and sets the generic service section of * the instance memory. * * @context * This function is called from the generic services init. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @param[in] service The type of the service to be created * (e.g. CRYPTO) * @param[in] instance_num The logical instance number which will * run the service * @param[out] pObj Pointer to specific service instance memory * @retVal CPA_STATUS_SUCCESS Instance memory successfully allocated * @retVal CPA_STATUS_RESOURCE Instance memory not successfully allocated * @retVal CPA_STATUS_FAIL Unsupported service type * *****************************************************************************/ CpaStatus SalCtrl_ServiceCreate(sal_service_type_t service, Cpa32U instance_num, sal_service_t **pObj); -/** -******************************************************************************* +/****************************************************************************** * @ingroup SalCtl * @description * This macro goes through the 'list' passed in as a parameter. For each * element found in the list, it peforms a cast to the type of the element * given by the 'type' parameter. Finally, it calls the function given by * the 'function' parameter, passing itself and the device as parameters. * * In case of error (i.e. 'function' does not return _SUCCESS or _RETRY) * processing of the 'list' elements will stop and the status_ret will be * updated. * * In case of _RETRY status_ret will be updated but the 'list' * will continue to be processed. _RETRY is only expected when * 'function' is stop. * * @context * This macro is used by both the service and qat event handlers. * * @assumptions * None * @sideEffects * None * * @param[in] list The list of services or qats as a type of list_t * @param[in] type It identifies the type of the object inside the * list: service or qat * @param[in] device The ADF accelerator handle for the device * @param[in] function The function pointer to call * @param[in/out] status_ret If an error occurred (i.e. status returned from * function is not _SUCCESS) then status_ret is * overwritten with status returned from function. * *****************************************************************************/ #define SAL_FOR_EACH(list, type, device, function, status_ret) \ do { \ sal_list_t *curr_element = list; \ CpaStatus status_temp = CPA_STATUS_SUCCESS; \ typeof(type) *process = NULL; \ while (NULL != curr_element) { \ process = \ (typeof(type) *)SalList_getObject(curr_element); \ status_temp = process->function(device, process); \ if ((CPA_STATUS_SUCCESS != status_temp) && \ (CPA_STATUS_RETRY != status_temp)) { \ status_ret = status_temp; \ break; \ } else { \ if (CPA_STATUS_RETRY == status_temp) { \ status_ret = status_temp; \ } \ } \ curr_element = SalList_next(curr_element); \ } \ } while (0) /** ******************************************************************************* * @ingroup SalCtl * @description * This macro goes through the 'list' passed in as a parameter. For each * element found in the list, it peforms a cast to the type of the element * given by the 'type' parameter. Finally, it checks the state of the * element and if it is in state 'state_check' then it calls the * function given by the 'function' parameter, passing itself * and the device as parameters. * If the element is not in 'state_check' it returns from the macro. * * In case of error (i.e. 'function' does not return _SUCCESS) * processing of the 'list' elements will continue. * * @context * This macro is used by both the service and qat event handlers. * * @assumptions * None * @sideEffects * None * * @param[in] list The list of services or qats as a type of list_t * @param[in] type It identifies the type of the object * inside the list: service or qat * @param[in] device The ADF accelerator handle for the device * @param[in] function The function pointer to call * @param[in] state_check The state to check for * *****************************************************************************/ #define SAL_FOR_EACH_STATE(list, type, device, function, state_check) \ do { \ sal_list_t *curr_element = list; \ typeof(type) *process = NULL; \ while (NULL != curr_element) { \ process = \ (typeof(type) *)SalList_getObject(curr_element); \ if (process->state == state_check) { \ process->function(device, process); \ } else { \ break; \ } \ curr_element = SalList_next(curr_element); \ } \ } while (0) /************************************************************************* * @ingroup SalCtrl * @description * This function is used to initialize an instance of crypto service. * It creates a crypto instance's memory pools. It calls ADF to create * its required transport handles. It calls the sub crypto service init * functions. Resets the stats. * * @context * This function is called from the SalCtrl_ServiceEventInit function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * No (ADF ensures that this function doesn't need to be thread safe) * * @param[in] device An icp_accel_dev_t* type * @param[in] service A crypto instance * *************************************************************************/ CpaStatus SalCtrl_CryptoInit(icp_accel_dev_t *device, sal_service_t *service); /************************************************************************* * @ingroup SalCtrl * @description * This function is used to start an instance of crypto service. * It sends the first messages to FW on its crypto instance transport * handles. For asymmetric crypto it verifies the header on the downloaded * MMP library. * * @context * This function is called from the SalCtrl_ServiceEventStart function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * No (ADF ensures that this function doesn't need to be thread safe) * * @param[in] device An icp_accel_dev_t* type * @param[in] service A crypto instance * *************************************************************************/ CpaStatus SalCtrl_CryptoStart(icp_accel_dev_t *device, sal_service_t *service); /************************************************************************* * @ingroup SalCtrl * @description * This function is used to stop an instance of crypto service. * It checks for inflight messages to the FW. If no messages are pending * it returns success. If messages are pending it returns retry. * * @context * This function is called from the SalCtrl_ServiceEventStop function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * No (ADF ensures that this function doesn't need to be thread safe) * * @param[in] device An icp_accel_dev_t* type * @param[in] service A crypto instance * *************************************************************************/ CpaStatus SalCtrl_CryptoStop(icp_accel_dev_t *device, sal_service_t *service); /************************************************************************* * @ingroup SalCtrl * @description * This function is used to shutdown an instance of crypto service. * It frees resources allocated at initialisation - e.g. frees the * memory pools and ADF transport handles. * * @context * This function is called from the SalCtrl_ServiceEventShutdown function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * No (ADF ensures that this function doesn't need to be thread safe) * * @param[in] device An icp_accel_dev_t* type * @param[in] service A crypto instance * *************************************************************************/ CpaStatus SalCtrl_CryptoShutdown(icp_accel_dev_t *device, sal_service_t *service); /************************************************************************* * @ingroup SalCtrl * @description * This function sets the capability info of crypto instances. * * @context * This function is called from the cpaCyQueryCapabilities and * LacSymSession_ParamCheck function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * No (ADF ensures that this function doesn't need to be thread safe) * * @param[in] service A sal_service_t* type * @param[in] cyCapabilityInfo A CpaCyCapabilitiesInfo* type * *************************************************************************/ void SalCtrl_CyQueryCapabilities(sal_service_t *pGenericService, CpaCyCapabilitiesInfo *pCapInfo); /************************************************************************* * @ingroup SalCtrl * @description * This function is used to initialize an instance of compression service. * It creates a compression instance's memory pools. It calls ADF to create * its required transport handles. It zeros an instances stats. * * @context * This function is called from the SalCtrl_ServiceEventInit function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * No (ADF ensures that this function doesn't need to be thread safe) * * @param[in] device An icp_accel_dev_t* type * @param[in] service A compression instance * *************************************************************************/ CpaStatus SalCtrl_CompressionInit(icp_accel_dev_t *device, sal_service_t *service); /************************************************************************* * @ingroup SalCtrl * @description * This function is used to start an instance of compression service. * * @context * This function is called from the SalCtrl_ServiceEventStart function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * No (ADF ensures that this function doesn't need to be thread safe) * * @param[in] device An icp_accel_dev_t* type * @param[in] service A compression instance * *************************************************************************/ CpaStatus SalCtrl_CompressionStart(icp_accel_dev_t *device, sal_service_t *service); /************************************************************************* * @ingroup SalCtrl * @description * This function is used to stop an instance of compression service. * It checks for inflight messages to the FW. If no messages are pending * it returns success. If messages are pending it returns retry. * * @context * This function is called from the SalCtrl_ServiceEventStop function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * No (ADF ensures that this function doesn't need to be thread safe) * * @param[in] device An icp_accel_dev_t* type * @param[in] service A compression instance * *************************************************************************/ CpaStatus SalCtrl_CompressionStop(icp_accel_dev_t *device, sal_service_t *service); /************************************************************************* * @ingroup SalCtrl * @description * This function is used to shutdown an instance of compression service. * It frees resources allocated at initialisation - e.g. frees the * memory pools and ADF transport handles. * * @context * This function is called from the SalCtrl_ServiceEventShutdown function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * No (ADF ensures that this function doesn't need to be thread safe) * * @param[in] device An icp_accel_dev_t* type * @param[in] service A compression instance * *************************************************************************/ CpaStatus SalCtrl_CompressionShutdown(icp_accel_dev_t *device, sal_service_t *service); /************************************************************************* * @ingroup SalCtrl * @description * This function is used to get the number of services enabled * from the config table. * * @context * This function is called from the SalCtrl_QatInit * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * No * * param[in] device An icp_accel_dev_t* type * param[in] pEnabledServices pointer to a variable used to store * the enabled services * *************************************************************************/ CpaStatus SalCtrl_GetEnabledServices(icp_accel_dev_t *device, Cpa32U *pEnabledServices); /************************************************************************* * @ingroup SalCtrl * @description * This function is used to check if a service is enabled * * @context * This function is called from the SalCtrl_QatInit * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * param[in] enabled_services * param[in] service * *************************************************************************/ CpaBoolean SalCtrl_IsServiceEnabled(Cpa32U enabled_services, sal_service_type_t service); /************************************************************************* * @ingroup SalCtrl * @description * This function is used to check if a service is supported on the device * The key difference between this and SalCtrl_GetSupportedServices() is * that the latter treats it as an error if the service is unsupported. * * @context * This can be called anywhere. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * param[in] device * param[in] service service or services to check * *************************************************************************/ CpaBoolean SalCtrl_IsServiceSupported(icp_accel_dev_t *device, sal_service_type_t service); /************************************************************************* * @ingroup SalCtrl * @description * This function is used to check whether enabled services has associated * hardware capability support * * @context * This functions is called from the SalCtrl_ServiceEventInit function. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * param[in] device A pointer to an icp_accel_dev_t * param[in] enabled_services It is the bitmask for the enabled services *************************************************************************/ CpaStatus SalCtrl_GetSupportedServices(icp_accel_dev_t *device, Cpa32U enabled_services); #endif diff --git a/sys/dev/qat/qat_api/common/include/lac_sal_types.h b/sys/dev/qat/qat_api/common/include/lac_sal_types.h index 36e146665787..eb2edf586438 100644 --- a/sys/dev/qat/qat_api/common/include/lac_sal_types.h +++ b/sys/dev/qat/qat_api/common/include/lac_sal_types.h @@ -1,198 +1,212 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** *************************************************************************** * @file lac_sal_types.h * * @ingroup SalCtrl * * Generic instance type definitions of SAL controller * ***************************************************************************/ #ifndef LAC_SAL_TYPES_H #define LAC_SAL_TYPES_H #include "lac_sync.h" #include "lac_list.h" #include "icp_accel_devices.h" #include "sal_statistics.h" #include "icp_adf_debug.h" #define SAL_CFG_BASE_DEC 10 #define SAL_CFG_BASE_HEX 16 /** ***************************************************************************** * @ingroup SalCtrl * Instance States * * @description * An enumeration containing the possible states for an instance. * *****************************************************************************/ typedef enum sal_service_state_s { SAL_SERVICE_STATE_UNINITIALIZED = 0, SAL_SERVICE_STATE_INITIALIZING, SAL_SERVICE_STATE_INITIALIZED, SAL_SERVICE_STATE_RUNNING, SAL_SERVICE_STATE_SHUTTING_DOWN, SAL_SERVICE_STATE_SHUTDOWN, SAL_SERVICE_STATE_RESTARTING, SAL_SERVICE_STATE_END } sal_service_state_t; /** ***************************************************************************** * @ingroup SalCtrl * Service Instance Types * * @description * An enumeration containing the possible types for a service. * *****************************************************************************/ typedef enum { SAL_SERVICE_TYPE_UNKNOWN = 0, /* symmetric and asymmetric crypto service */ SAL_SERVICE_TYPE_CRYPTO = 1, /* compression service */ SAL_SERVICE_TYPE_COMPRESSION = 2, /* inline service */ SAL_SERVICE_TYPE_INLINE = 4, /* asymmetric crypto only service*/ SAL_SERVICE_TYPE_CRYPTO_ASYM = 8, /* symmetric crypto only service*/ SAL_SERVICE_TYPE_CRYPTO_SYM = 16, SAL_SERVICE_TYPE_QAT = 32 } sal_service_type_t; +/** + ***************************************************************************** + * @ingroup SalCtrl + * Device generations + * + * @description + * List in an enum all the QAT device generations. + * + *****************************************************************************/ +typedef enum { GEN2, GEN3, GEN4 } sal_generation_t; + /** ***************************************************************************** * @ingroup SalCtrl * Generic Instance Container * * @description * Contains all the common information across the different instances. * *****************************************************************************/ typedef struct sal_service_s { sal_service_type_t type; /**< Service type (e.g. SAL_SERVICE_TYPE_CRYPTO)*/ Cpa8U state; /**< Status of the service instance (e.g. SAL_SERVICE_STATE_INITIALIZED) */ Cpa32U instance; /**< Instance number */ CpaVirtualToPhysical virt2PhysClient; /**< Function pointer to client supplied virt_to_phys */ CpaStatus (*init)(icp_accel_dev_t *device, struct sal_service_s *service); /**< Function pointer for instance INIT function */ CpaStatus (*start)(icp_accel_dev_t *device, struct sal_service_s *service); /**< Function pointer for instance START function */ CpaStatus (*stop)(icp_accel_dev_t *device, struct sal_service_s *service); /**< Function pointer for instance STOP function */ CpaStatus (*shutdown)(icp_accel_dev_t *device, struct sal_service_s *service); /**< Function pointer for instance SHUTDOWN function */ CpaCyInstanceNotificationCbFunc notification_cb; /**< Function pointer for instance restarting handler */ void *cb_tag; /**< Restarting handler priv data */ sal_statistics_collection_t *stats; /**< Pointer to device statistics configuration */ void *debug_parent_dir; /**< Pointer to parent proc dir entry */ CpaBoolean is_dyn; Cpa32U capabilitiesMask; /**< Capabilities mask of the device */ Cpa32U dcExtendedFeatures; /**< Bit field of features. I.e. Compress And Verify */ CpaBoolean isInstanceStarted; /**< True if user called StartInstance on this instance */ CpaBoolean integrityCrcCheck; /** < True if the device supports end to end data integrity checks */ + + sal_generation_t gen; + /** Generation of devices */ } sal_service_t; /** ***************************************************************************** * @ingroup SalCtrl * SAL structure * * @description * Contains lists to crypto and compression instances. * *****************************************************************************/ typedef struct sal_s { sal_list_t *crypto_services; /**< Container of sal_crypto_service_t */ sal_list_t *asym_services; /**< Container of sal_asym_service_t */ sal_list_t *sym_services; /**< Container of sal_sym_service_t */ sal_list_t *compression_services; /**< Container of sal_compression_service_t */ debug_dir_info_t *cy_dir; /**< Container for crypto proc debug */ debug_dir_info_t *asym_dir; /**< Container for asym proc debug */ debug_dir_info_t *sym_dir; /**< Container for sym proc debug */ debug_dir_info_t *dc_dir; /**< Container for compression proc debug */ debug_file_info_t *ver_file; /**< Container for version debug file */ } sal_t; /** ***************************************************************************** * @ingroup SalCtrl * SAL debug structure * * @description * Service debug handler * *****************************************************************************/ typedef struct sal_service_debug_s { icp_accel_dev_t *accel_dev; debug_file_info_t debug_file; } sal_service_debug_t; /** ******************************************************************************* * @ingroup SalCtrl * This macro verifies that the right service type has been passed in. * * @param[in] pService pointer to service instance * @param[in] service_type service type to check againstx. * * @return CPA_STATUS_FAIL Parameter is incorrect type * ******************************************************************************/ #define SAL_CHECK_INSTANCE_TYPE(pService, service_type) \ do { \ sal_service_t *pGenericService = NULL; \ pGenericService = (sal_service_t *)pService; \ if (!(service_type & pGenericService->type)) { \ QAT_UTILS_LOG("Instance handle type is incorrect.\n"); \ return CPA_STATUS_FAIL; \ } \ } while (0) #endif diff --git a/sys/dev/qat/qat_api/common/include/lac_sal_types_crypto.h b/sys/dev/qat/qat_api/common/include/lac_sal_types_crypto.h index c7db231260ed..68213278672e 100644 --- a/sys/dev/qat/qat_api/common/include/lac_sal_types_crypto.h +++ b/sys/dev/qat/qat_api/common/include/lac_sal_types_crypto.h @@ -1,179 +1,191 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** *************************************************************************** * @file lac_sal_types_crypto.h * * @ingroup SalCtrl * * Generic crypto instance type definition * ***************************************************************************/ #ifndef LAC_SAL_TYPES_CRYPTO_H_ #define LAC_SAL_TYPES_CRYPTO_H_ #include "lac_sym_qat_hash_defs_lookup.h" +#include "lac_sym_qat_constants_table.h" #include "lac_sym_key.h" #include "cpa_cy_sym_dp.h" #include "icp_adf_debug.h" #include "lac_sal_types.h" #include "icp_adf_transport.h" #include "lac_mem_pools.h" #define LAC_PKE_FLOW_ID_TAG 0xFFFFFFFC #define LAC_PKE_ACCEL_ID_BIT_POS 1 #define LAC_PKE_SLICE_ID_BIT_POS 0 /** ***************************************************************************** * @ingroup SalCtrl * Crypto specific Service Container * * @description * Contains information required per crypto service instance. * *****************************************************************************/ typedef struct sal_crypto_service_s { sal_service_t generic_service_info; /**< An instance of the Generic Service Container */ lac_memory_pool_id_t lac_sym_cookie_pool; /**< Memory pool ID used for symmetric operations */ lac_memory_pool_id_t lac_ec_pool; /**< Memory pool ID used for asymmetric operations */ lac_memory_pool_id_t lac_prime_pool; /**< Memory pool ID used for asymmetric operations */ lac_memory_pool_id_t lac_pke_req_pool; /**< Memory pool ID used for asymmetric operations */ lac_memory_pool_id_t lac_pke_align_pool; /**< Memory pool ID used for asymmetric operations */ QatUtilsAtomic *pLacSymStatsArr; /**< pointer to an array of atomic stats for symmetric */ QatUtilsAtomic *pLacKeyStats; /**< pointer to an array of atomic stats for key */ QatUtilsAtomic *pLacDhStatsArr; /**< pointer to an array of atomic stats for DH */ QatUtilsAtomic *pLacDsaStatsArr; /**< pointer to an array of atomic stats for Dsa */ QatUtilsAtomic *pLacRsaStatsArr; /**< pointer to an array of atomic stats for Rsa */ QatUtilsAtomic *pLacEcStatsArr; /**< pointer to an array of atomic stats for Ecc */ QatUtilsAtomic *pLacEcdhStatsArr; /**< pointer to an array of atomic stats for Ecc DH */ QatUtilsAtomic *pLacEcdsaStatsArr; /**< pointer to an array of atomic stats for Ecc DSA */ QatUtilsAtomic *pLacPrimeStatsArr; /**< pointer to an array of atomic stats for prime */ QatUtilsAtomic *pLacLnStatsArr; /**< pointer to an array of atomic stats for large number */ QatUtilsAtomic *pLacDrbgStatsArr; /**< pointer to an array of atomic stats for DRBG */ + icp_qat_hw_auth_mode_t qatHmacMode; + /**< Hmac Mode */ Cpa32U pkeFlowId; /**< Flow ID for all pke requests from this instance - identifies accelerator and execution engine to use */ icp_comms_trans_handle trans_handle_sym_tx; icp_comms_trans_handle trans_handle_sym_rx; icp_comms_trans_handle trans_handle_asym_tx; icp_comms_trans_handle trans_handle_asym_rx; icp_comms_trans_handle trans_handle_nrbg_tx; icp_comms_trans_handle trans_handle_nrbg_rx; Cpa32U maxNumSymReqBatch; /**< Maximum number of requests that can be placed on the sym tx ring for any one batch request (DP api) */ Cpa16U acceleratorNum; Cpa16U bankNum; + Cpa16U bankNumAsym; + Cpa16U bankNumSym; Cpa16U pkgID; Cpa8U isPolled; Cpa8U executionEngine; Cpa32U coreAffinity; Cpa32U nodeAffinity; /**< Config Info */ CpaCySymDpCbFunc pSymDpCb; /**< Sym DP Callback */ lac_sym_qat_hash_defs_t **pLacHashLookupDefs; /**< table of pointers to standard defined information for all hash algorithms. We support an extra hash algo that is not exported by cy api which is why we need the extra +1 */ + + lac_sym_qat_constants_t constantsLookupTables; + Cpa8U **ppHmacContentDesc; /**< table of pointers to CD for Hmac precomputes - used at session init */ Cpa8U *pSslLabel; /**< pointer to memory holding the standard SSL label ABBCCC.. */ lac_sym_key_tls_labels_t *pTlsLabel; /**< pointer to memory holding the 4 standard TLS labels */ QatUtilsAtomic drbgErrorState; /**< DRBG related variables */ lac_sym_key_tls_hkdf_sub_labels_t *pTlsHKDFSubLabel; /**< pointer to memory holding the 4 HKDFLabels sublabels */ debug_file_info_t *debug_file; /**< Statistics handler */ + + CpaBoolean forceAEADMacVerify; + /**< internal flag to enable/disable forcing HW digest verification for + GCM and CCM algorithms */ } sal_crypto_service_t; /************************************************************************* * @ingroup cpaCyCommon * @description * This function returns a valid asym/sym/crypto instance handle for the * system if it exists. When requesting an instance handle of type sym or * asym, if either is not found then a crypto instance handle is returned * if found, since a crypto handle supports both sym and asym services. * Similarly when requesting a crypto instance handle, if it is not found * then an asym or sym crypto instance handle is returned. * * @performance * To avoid calling this function the user of the QA api should not use * instanceHandle = CPA_INSTANCE_HANDLE_SINGLE. * * @context * This function is called whenever instanceHandle = *CPA_INSTANCE_HANDLE_SINGLE * at the QA Cy api. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @param[in] svc_type Type of crypto service requested. * * @retval Pointer to first crypto instance handle or NULL if no crypto * instances in the system. * *************************************************************************/ CpaInstanceHandle Lac_GetFirstHandle(sal_service_type_t svc_type); #endif /*LAC_SAL_TYPES_CRYPTO_H_*/ diff --git a/sys/dev/qat/qat_api/common/include/lac_sync.h b/sys/dev/qat/qat_api/common/include/lac_sync.h index b842cce30d87..7052c656396a 100644 --- a/sys/dev/qat/qat_api/common/include/lac_sync.h +++ b/sys/dev/qat/qat_api/common/include/lac_sync.h @@ -1,376 +1,376 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** *************************************************************************** * @file lac_sync.h * * @defgroup LacSync LAC synchronous * * @ingroup LacCommon * * Function prototypes and defines for synchronous support * ***************************************************************************/ #ifndef LAC_SYNC_H #define LAC_SYNC_H #include "cpa.h" #include "qat_utils.h" #include "lac_mem.h" /** ***************************************************************************** * @ingroup LacSync * * @description * LAC cookie for synchronous support * *****************************************************************************/ typedef struct lac_sync_op_data_s { struct sema *sid; /**< Semaphore to signal */ CpaStatus status; /**< Output - Status of the QAT response */ CpaBoolean opResult; /**< Output - Verification of the operation/protocol status */ CpaBoolean complete; /**< Output - Operation is complete */ CpaBoolean canceled; /**< Output - Operation canceled */ } lac_sync_op_data_t; #define LAC_PKE_SYNC_CALLBACK_TIMEOUT (5000) /**< @ingroup LacSync * Timeout waiting for an async callbacks in msecs. * This is derived from the max latency of a PKE request + 1 sec */ #define LAC_SYM_DRBG_POLL_AND_WAIT_TIME_MS (10) /**< @ingroup LacSyn * Default interval DRBG polling in msecs */ #define LAC_SYM_SYNC_CALLBACK_TIMEOUT (300) /**< @ingroup LacSyn * Timeout for wait for symmetric response in msecs */ #define LAC_INIT_MSG_CALLBACK_TIMEOUT (1922) /**< @ingroup LacSyn * Timeout for wait for init messages response in msecs */ -#define DC_SYNC_CALLBACK_TIMEOUT (1000) +#define DC_SYNC_CALLBACK_TIMEOUT (2000) /**< @ingroup LacSyn * Timeout for wait for compression response in msecs */ #define LAC_SYN_INITIAL_SEM_VALUE (0) /**< @ingroup LacSyn * Initial value of the sync waiting semaphore */ /** ******************************************************************************* * @ingroup LacSync * This function allocates a sync op data cookie * and creates and initialises the QAT Utils semaphore * * @param[in] ppSyncCallbackCookie Pointer to synch op data * * @retval CPA_STATUS_RESOURCE Failed to allocate the memory for the cookie. * @retval CPA_STATUS_SUCCESS Success * ******************************************************************************/ static __inline CpaStatus LacSync_CreateSyncCookie(lac_sync_op_data_t **ppSyncCallbackCookie) { CpaStatus status = CPA_STATUS_SUCCESS; *ppSyncCallbackCookie = malloc(sizeof(lac_sync_op_data_t), M_QAT, M_WAITOK); if (CPA_STATUS_SUCCESS == status) { status = LAC_INIT_SEMAPHORE((*ppSyncCallbackCookie)->sid, LAC_SYN_INITIAL_SEM_VALUE); (*ppSyncCallbackCookie)->complete = CPA_FALSE; (*ppSyncCallbackCookie)->canceled = CPA_FALSE; } if (CPA_STATUS_SUCCESS != status) { LAC_OS_FREE(*ppSyncCallbackCookie); } return status; } /** ******************************************************************************* * @ingroup LacSync * This macro frees a sync op data cookie and destroys the QAT Utils *semaphore * * @param[in] ppSyncCallbackCookie Pointer to sync op data * * @return void ******************************************************************************/ static __inline CpaStatus LacSync_DestroySyncCookie(lac_sync_op_data_t **ppSyncCallbackCookie) { CpaStatus status = CPA_STATUS_SUCCESS; /* * If the operation has not completed, cancel it instead of destroying * the * cookie. Otherwise, the callback might panic. In this case, the cookie * will leak, but it's better than a panic. */ if (!(*ppSyncCallbackCookie)->complete) { QAT_UTILS_LOG( "Attempting to destroy an incomplete sync cookie\n"); (*ppSyncCallbackCookie)->canceled = CPA_TRUE; return CPA_STATUS_FAIL; } status = LAC_DESTROY_SEMAPHORE((*ppSyncCallbackCookie)->sid); LAC_OS_FREE(*ppSyncCallbackCookie); return status; } /** ***************************************************************************** * @ingroup LacSync * Function which will wait for a sync callback on a given cookie. * * @param[in] pSyncCallbackCookie Pointer to sync op data * @param[in] timeOut Time to wait for callback (msec) * @param[out] pStatus Status returned by the callback * @param[out] pOpStatus Operation status returned by callback. * * @retval CPA_STATUS_SUCCESS Success * @retval CPA_STATUS_SUCCESS Fail waiting for a callback * *****************************************************************************/ static __inline CpaStatus LacSync_WaitForCallback(lac_sync_op_data_t *pSyncCallbackCookie, Cpa32S timeOut, CpaStatus *pStatus, CpaBoolean *pOpStatus) { CpaStatus status = CPA_STATUS_SUCCESS; status = LAC_WAIT_SEMAPHORE(pSyncCallbackCookie->sid, timeOut); if (CPA_STATUS_SUCCESS == status) { *pStatus = pSyncCallbackCookie->status; if (NULL != pOpStatus) { *pOpStatus = pSyncCallbackCookie->opResult; } pSyncCallbackCookie->complete = CPA_TRUE; } return status; } /** ***************************************************************************** * @ingroup LacSync * Function which will check for a sync callback on a given cookie. * Returns whether the callback has happened or not, no timeout. * * @param[in] pSyncCallbackCookie Pointer to sync op data * @param[in] timeOut Time to wait for callback (msec) * @param[out] pStatus Status returned by the callback * @param[out] pOpStatus Operation status returned by callback. * * @retval CPA_STATUS_SUCCESS Success * @retval CPA_STATUS_FAIL Fail waiting for a callback * *****************************************************************************/ static __inline CpaStatus LacSync_CheckForCallback(lac_sync_op_data_t *pSyncCallbackCookie, CpaStatus *pStatus, CpaBoolean *pOpStatus) { CpaStatus status = CPA_STATUS_SUCCESS; status = LAC_CHECK_SEMAPHORE(pSyncCallbackCookie->sid); if (CPA_STATUS_SUCCESS == status) { *pStatus = pSyncCallbackCookie->status; if (NULL != pOpStatus) { *pOpStatus = pSyncCallbackCookie->opResult; } pSyncCallbackCookie->complete = CPA_TRUE; } return status; } /** ***************************************************************************** * @ingroup LacSync * Function which will mark a sync cookie as complete. * If it's known that the callback will not happen it's necessary * to call this, else the cookie can't be destroyed. * * @param[in] pSyncCallbackCookie Pointer to sync op data * * @retval CPA_STATUS_SUCCESS Success * @retval CPA_STATUS_FAIL Failed to mark as complete * *****************************************************************************/ static __inline CpaStatus LacSync_SetSyncCookieComplete(lac_sync_op_data_t *pSyncCallbackCookie) { CpaStatus status = CPA_STATUS_FAIL; if (NULL != pSyncCallbackCookie) { pSyncCallbackCookie->complete = CPA_TRUE; status = CPA_STATUS_SUCCESS; } return status; } /** ***************************************************************************** * @ingroup LacSync * Generic verify callback function. * @description * This function is used when the API is called in synchronous mode. * It's assumed the callbackTag holds a lac_sync_op_data_t type * and when the callback is received, this callback shall set the * status element of that cookie structure and kick the sid. * This function may be used directly as a callback function. * * @param[in] callbackTag Callback Tag * @param[in] status Status of callback * @param[out] pOpdata Pointer to the Op Data * @param[out] opResult Boolean to indicate the result of the operation * * @return void *****************************************************************************/ void LacSync_GenVerifyCb(void *callbackTag, CpaStatus status, void *pOpdata, CpaBoolean opResult); /** ***************************************************************************** * @ingroup LacSync * Generic flatbuffer callback function. * @description * This function is used when the API is called in synchronous mode. * It's assumed the callbackTag holds a lac_sync_op_data_t type * and when the callback is received, this callback shall set the * status element of that cookie structure and kick the sid. * This function may be used directly as a callback function. * * @param[in] callbackTag Callback Tag * @param[in] status Status of callback * @param[in] pOpdata Pointer to the Op Data * @param[out] pOut Pointer to the flat buffer * * @return void *****************************************************************************/ void LacSync_GenFlatBufCb(void *callbackTag, CpaStatus status, void *pOpdata, CpaFlatBuffer *pOut); /** ***************************************************************************** * @ingroup LacSync * Generic flatbuffer verify callback function. * @description * This function is used when the API is called in synchronous mode. * It's assumed the callbackTag holds a lac_sync_op_data_t type * and when the callback is received, this callback shall set the * status and opResult element of that cookie structure and * kick the sid. * This function may be used directly as a callback function. * * @param[in] callbackTag Callback Tag * @param[in] status Status of callback * @param[in] pOpdata Pointer to the Op Data * @param[out] opResult Boolean to indicate the result of the operation * @param[out] pOut Pointer to the flat buffer * * @return void *****************************************************************************/ void LacSync_GenFlatBufVerifyCb(void *callbackTag, CpaStatus status, void *pOpdata, CpaBoolean opResult, CpaFlatBuffer *pOut); /** ***************************************************************************** * @ingroup LacSync * Generic dual flatbuffer verify callback function. * @description * This function is used when the API is called in synchronous mode. * It's assumed the callbackTag holds a lac_sync_op_data_t type * and when the callback is received, this callback shall set the * status and opResult element of that cookie structure and * kick the sid. * This function may be used directly as a callback function. * * @param[in] callbackTag Callback Tag * @param[in] status Status of callback * @param[in] pOpdata Pointer to the Op Data * @param[out] opResult Boolean to indicate the result of the operation * @param[out] pOut0 Pointer to the flat buffer * @param[out] pOut1 Pointer to the flat buffer * * @return void *****************************************************************************/ void LacSync_GenDualFlatBufVerifyCb(void *callbackTag, CpaStatus status, void *pOpdata, CpaBoolean opResult, CpaFlatBuffer *pOut0, CpaFlatBuffer *pOut1); /** ***************************************************************************** * @ingroup LacSync * Generic wake up function. * @description * This function is used when the API is called in synchronous * mode. * It's assumed the callbackTag holds a lac_sync_op_data_t type * and when the callback is received, this callback shall set * the status element of that cookie structure and kick the * sid. * This function maybe called from an async callback. * * @param[in] callbackTag Callback Tag * @param[in] status Status of callback * * @return void *****************************************************************************/ void LacSync_GenWakeupSyncCaller(void *callbackTag, CpaStatus status); /** ***************************************************************************** * @ingroup LacSync * Generic wake up verify function. * @description * This function is used when the API is called in synchronous * mode. * It's assumed the callbackTag holds a lac_sync_op_data_t type * and when the callback is received, this callback shall set * the status element and the opResult of that cookie structure * and kick the sid. * This function maybe called from an async callback. * * @param[in] callbackTag Callback Tag * @param[in] status Status of callback * @param[out] opResult Boolean to indicate the result of the operation * * @return void *****************************************************************************/ void LacSync_GenVerifyWakeupSyncCaller(void *callbackTag, CpaStatus status, CpaBoolean opResult); #endif /*LAC_SYNC_H*/ diff --git a/sys/dev/qat/qat_api/common/include/sal_hw_gen.h b/sys/dev/qat/qat_api/common/include/sal_hw_gen.h new file mode 100644 index 000000000000..38deb3cbc013 --- /dev/null +++ b/sys/dev/qat/qat_api/common/include/sal_hw_gen.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright(c) 2007-2022 Intel Corporation */ +/* $FreeBSD$ */ +/** + *************************************************************************** + * @file sal_hw_gen.h + * + * @ingroup SalHwGen + * + * @description + * Functions which return a value corresponding to qat device generation + * + ***************************************************************************/ + +#ifndef SAL_HW_GEN_H +#define SAL_HW_GEN_H + +#include "cpa.h" +#include "sal_types_compression.h" +#include "lac_sal_types_crypto.h" + +/** + *************************************************************************** + * @ingroup SalHwGen + * + * @description This function returns whether qat device is gen 4 or not + * + * @param[in] pService pointer to compression service + * + ***************************************************************************/ + +static inline CpaBoolean +isDcGen4x(const sal_compression_service_t *pService) +{ + return (pService->generic_service_info.gen == GEN4); +} + +/** + *************************************************************************** + * @ingroup SalHwGen + * + * @description This function returns whether qat device is gen 2/3 or not + * + * @param[in] pService pointer to compression service + * + ***************************************************************************/ + +static inline CpaBoolean +isDcGen2x(const sal_compression_service_t *pService) +{ + return ((pService->generic_service_info.gen == GEN2) || + (pService->generic_service_info.gen == GEN3)); +} + +/** + *************************************************************************** + * @ingroup SalHwGen + * + * @description This function returns whether qat device is gen 4 or not + * + * @param[in] pService pointer to crypto service + * + ***************************************************************************/ + +static inline CpaBoolean +isCyGen4x(const sal_crypto_service_t *pService) +{ + return (pService->generic_service_info.gen == GEN4); +} + +/** + *************************************************************************** + * @ingroup SalHwGen + * + * @description This function returns whether qat device is gen 2/3 or not + * + * @param[in] pService pointer to crypto service + * + ***************************************************************************/ + +static inline CpaBoolean +isCyGen2x(const sal_crypto_service_t *pService) +{ + return ((pService->generic_service_info.gen == GEN2) || + (pService->generic_service_info.gen == GEN3)); +} + +#endif /* SAL_HW_GEN_H */ diff --git a/sys/dev/qat/qat_api/common/include/sal_types_compression.h b/sys/dev/qat/qat_api/common/include/sal_types_compression.h index 12f9f673ac43..80e3c89ea699 100644 --- a/sys/dev/qat/qat_api/common/include/sal_types_compression.h +++ b/sys/dev/qat/qat_api/common/include/sal_types_compression.h @@ -1,150 +1,159 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** *************************************************************************** * @file sal_types_compression.h * * @ingroup SalCtrl * * Generic compression instance type definition * ***************************************************************************/ #ifndef SAL_TYPES_COMPRESSION_H_ #define SAL_TYPES_COMPRESSION_H_ #include "cpa_dc.h" #include "cpa_dc_dp.h" #include "lac_sal_types.h" #include "icp_qat_hw.h" #include "icp_buffer_desc.h" #include "lac_mem_pools.h" #include "icp_adf_transport.h" #define DC_NUM_RX_RINGS (1) +#define DC_NUM_COMPRESSION_LEVELS (CPA_DC_L9) /** ***************************************************************************** * @ingroup SalCtrl * Compression device specific data * * @description * Contains device specific information for a compression service. * *****************************************************************************/ typedef struct sal_compression_device_data { /* Device specific minimum output buffer size for static compression */ Cpa32U minOutputBuffSize; + /* Device specific minimum output buffer size for dynamic compression */ + Cpa32U minOutputBuffSizeDynamic; + /* Enable/disable secureRam/acceleratorRam for intermediate buffers*/ Cpa8U useDevRam; /* When set, implies device can decompress interim odd byte length * stateful decompression requests. */ CpaBoolean oddByteDecompInterim; /* When set, implies device can decompress odd byte length * stateful decompression requests when bFinal is absent */ CpaBoolean oddByteDecompNobFinal; /* Flag to indicate if translator slice overflow is supported */ CpaBoolean translatorOverflow; /* Flag to enable/disable delayed match mode */ icp_qat_hw_compression_delayed_match_t enableDmm; Cpa32U inflateContextSize; Cpa8U highestHwCompressionDepth; /* Mask that reports supported window sizes for comp/decomp */ Cpa8U windowSizeMask; + /* List representing compression levels that are the first to have + a unique search depth. */ + CpaBoolean uniqueCompressionLevels[DC_NUM_COMPRESSION_LEVELS + 1]; + Cpa8U numCompressionLevels; + /* Flag to indicate CompressAndVerifyAndRecover feature support */ CpaBoolean cnvnrSupported; } sal_compression_device_data_t; /** ***************************************************************************** * @ingroup SalCtrl * Compression specific Service Container * * @description * Contains information required per compression service instance. * *****************************************************************************/ typedef struct sal_compression_service_s { /* An instance of the Generic Service Container */ sal_service_t generic_service_info; /* Memory pool ID used for compression */ lac_memory_pool_id_t compression_mem_pool; /* Pointer to an array of atomic stats for compression */ QatUtilsAtomic *pCompStatsArr; /* Size of the DRAM intermediate buffer in bytes */ Cpa64U minInterBuffSizeInBytes; /* Number of DRAM intermediate buffers */ Cpa16U numInterBuffs; /* Address of the array of DRAM intermediate buffers*/ icp_qat_addr_width_t *pInterBuffPtrsArray; CpaPhysicalAddr pInterBuffPtrsArrayPhyAddr; icp_comms_trans_handle trans_handle_compression_tx; icp_comms_trans_handle trans_handle_compression_rx; /* Maximum number of in flight requests */ Cpa32U maxNumCompConcurrentReq; /* Callback function defined for the DcDp API compression session */ CpaDcDpCallbackFn pDcDpCb; /* Config info */ Cpa16U acceleratorNum; Cpa16U bankNum; Cpa16U pkgID; Cpa16U isPolled; Cpa32U coreAffinity; Cpa32U nodeAffinity; sal_compression_device_data_t comp_device_data; /* Statistics handler */ debug_file_info_t *debug_file; } sal_compression_service_t; /************************************************************************* * @ingroup SalCtrl * @description * This function returns a valid compression instance handle for the system * if it exists. * * @performance * To avoid calling this function the user of the QA api should not use * instanceHandle = CPA_INSTANCE_HANDLE_SINGLE. * * @context * This function is called whenever instanceHandle = * CPA_INSTANCE_HANDLE_SINGLE at the QA Dc api. * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @retval Pointer to first compression instance handle or NULL if no * compression instances in the system. * *************************************************************************/ CpaInstanceHandle dcGetFirstHandle(void); #endif /*SAL_TYPES_COMPRESSION_H_*/ diff --git a/sys/dev/qat/qat_api/firmware/include/icp_qat_fw.h b/sys/dev/qat/qat_api/firmware/include/icp_qat_fw.h index 7fe1bc3d1056..1280f3c1c8a9 100644 --- a/sys/dev/qat/qat_api/firmware/include/icp_qat_fw.h +++ b/sys/dev/qat/qat_api/firmware/include/icp_qat_fw.h @@ -1,1333 +1,1437 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** ***************************************************************************** * @file icp_qat_fw.h * @defgroup icp_qat_fw_comn ICP QAT FW Common Processing Definitions * @ingroup icp_qat_fw * * @description * This file documents the common interfaces that the QAT FW running on * the QAT AE exports. This common layer is used by a number of services * to export content processing services. * *****************************************************************************/ #ifndef _ICP_QAT_FW_H_ #define _ICP_QAT_FW_H_ /* * ============================== * General Notes on the Interface */ /* * * ============================== * * Introduction * * Data movement and slice chaining * * Endianness * - Unless otherwise stated, all structures are defined in LITTLE ENDIAN * MODE * * Alignment * - In general all data structures provided to a request should be aligned * on the 64 byte boundary so as to allow optimal memory transfers. At the * minimum they must be aligned to the 8 byte boundary * * Sizes * Quad words = 8 bytes * * Terminology * * ============================== */ /* ****************************************************************************** * Include public/global header files ****************************************************************************** */ #include "icp_qat_hw.h" /* Big assumptions that both bitpos and mask are constants */ #define QAT_FIELD_SET(flags, val, bitpos, mask) \ (flags) = (((flags) & (~((mask) << (bitpos)))) | \ (((val) & (mask)) << (bitpos))) #define QAT_FIELD_GET(flags, bitpos, mask) (((flags) >> (bitpos)) & (mask)) #define QAT_FLAG_SET(flags, val, bitpos) \ ((flags) = (((flags) & (~(1 << (bitpos)))) | (((val)&1) << (bitpos)))) #define QAT_FLAG_CLEAR(flags, bitpos) (flags) = ((flags) & (~(1 << (bitpos)))) #define QAT_FLAG_GET(flags, bitpos) (((flags) >> (bitpos)) & 1) /**< @ingroup icp_qat_fw_comn * Default request and response ring size in bytes */ #define ICP_QAT_FW_REQ_DEFAULT_SZ 128 #define ICP_QAT_FW_RESP_DEFAULT_SZ 32 #define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8 #define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF /**< @ingroup icp_qat_fw_comn * Common Request - Block sizes definitions in multiples of individual long * words */ #define ICP_QAT_FW_NUM_LONGWORDS_1 1 #define ICP_QAT_FW_NUM_LONGWORDS_2 2 #define ICP_QAT_FW_NUM_LONGWORDS_3 3 #define ICP_QAT_FW_NUM_LONGWORDS_4 4 #define ICP_QAT_FW_NUM_LONGWORDS_5 5 #define ICP_QAT_FW_NUM_LONGWORDS_6 6 #define ICP_QAT_FW_NUM_LONGWORDS_7 7 #define ICP_QAT_FW_NUM_LONGWORDS_10 10 #define ICP_QAT_FW_NUM_LONGWORDS_13 13 /**< @ingroup icp_qat_fw_comn * Definition of the associated service Id for NULL service type. * Note: the response is expected to use ICP_QAT_FW_COMN_RESP_SERV_CPM_FW */ #define ICP_QAT_FW_NULL_REQ_SERV_ID 1 /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Definition of the firmware interface service users, for * responses. * @description * Enumeration which is used to indicate the ids of the services * for responses using the external firmware interfaces. * *****************************************************************************/ typedef enum { ICP_QAT_FW_COMN_RESP_SERV_NULL, /**< NULL service id type */ ICP_QAT_FW_COMN_RESP_SERV_CPM_FW, /**< CPM FW Service ID */ ICP_QAT_FW_COMN_RESP_SERV_DELIMITER /**< Delimiter service id type */ } icp_qat_fw_comn_resp_serv_id_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Definition of the request types * @description * Enumeration which is used to indicate the ids of the request * types used in each of the external firmware interfaces * *****************************************************************************/ typedef enum { ICP_QAT_FW_COMN_REQ_NULL = 0, /**< NULL request type */ ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3, /**< CPM FW PKE Request */ ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4, /**< CPM FW Lookaside Request */ ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7, /**< CPM FW DMA Request */ ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9, /**< CPM FW Compression Request */ ICP_QAT_FW_COMN_REQ_DELIMITER /**< End delimiter */ } icp_qat_fw_comn_request_id_t; /* ========================================================================= */ /* QAT FW REQUEST STRUCTURES */ /* ========================================================================= */ /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Common request flags type * * @description * Definition of the common request flags. * *****************************************************************************/ typedef uint8_t icp_qat_fw_comn_flags; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Common request - Service specific flags type * * @description * Definition of the common request service specific flags. * *****************************************************************************/ typedef uint16_t icp_qat_fw_serv_specif_flags; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Common request - Extended service specific flags type * * @description * Definition of the common request extended service specific flags. * *****************************************************************************/ typedef uint8_t icp_qat_fw_ext_serv_specif_flags; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Definition of the common QAT FW request content descriptor field - * points to the content descriptor parameters or itself contains service- * specific data. Also specifies content descriptor parameter size. * Contains reserved fields. * @description * Common section of the request used across all of the services exposed * by the QAT FW. Each of the services inherit these common fields * *****************************************************************************/ typedef union icp_qat_fw_comn_req_hdr_cd_pars_s { /**< LWs 2-5 */ struct { uint64_t content_desc_addr; /**< Address of the content descriptor */ uint16_t content_desc_resrvd1; /**< Content descriptor reserved field */ uint8_t content_desc_params_sz; /**< Size of the content descriptor parameters in quad words. * These * parameters describe the session setup configuration info for * the * slices that this request relies upon i.e. the configuration * word and * cipher key needed by the cipher slice if there is a request * for * cipher processing. */ uint8_t content_desc_hdr_resrvd2; /**< Content descriptor reserved field */ uint32_t content_desc_resrvd3; /**< Content descriptor reserved field */ } s; struct { uint32_t serv_specif_fields[ICP_QAT_FW_NUM_LONGWORDS_4]; } s1; } icp_qat_fw_comn_req_hdr_cd_pars_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Definition of the common QAT FW request middle block. * @description * Common section of the request used across all of the services exposed * by the QAT FW. Each of the services inherit these common fields * *****************************************************************************/ typedef struct icp_qat_fw_comn_req_mid_s { /**< LWs 6-13 */ uint64_t opaque_data; /**< Opaque data passed unmodified from the request to response messages * by * firmware (fw) */ uint64_t src_data_addr; /**< Generic definition of the source data supplied to the QAT AE. The * common flags are used to further describe the attributes of this * field */ uint64_t dest_data_addr; /**< Generic definition of the destination data supplied to the QAT AE. * The * common flags are used to further describe the attributes of this * field */ uint32_t src_length; /** < Length of source flat buffer incase src buffer * type is flat */ uint32_t dst_length; /** < Length of source flat buffer incase dst buffer * type is flat */ } icp_qat_fw_comn_req_mid_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Definition of the common QAT FW request content descriptor control * block. * * @description * Service specific section of the request used across all of the services * exposed by the QAT FW. Each of the services populates this block * uniquely. Refer to the service-specific header structures e.g. * 'icp_qat_fw_cipher_hdr_s' (for Cipher) etc. * *****************************************************************************/ typedef struct icp_qat_fw_comn_req_cd_ctrl_s { /**< LWs 27-31 */ uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5]; } icp_qat_fw_comn_req_cd_ctrl_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Definition of the common QAT FW request header. * @description * Common section of the request used across all of the services exposed * by the QAT FW. Each of the services inherit these common fields. The * reserved field of 7 bits and the service command Id field are all * service-specific fields, along with the service specific flags. * *****************************************************************************/ typedef struct icp_qat_fw_comn_req_hdr_s { /**< LW0 */ uint8_t resrvd1; /**< reserved field */ uint8_t service_cmd_id; /**< Service Command Id - this field is service-specific * Please use service-specific command Id here e.g.Crypto Command Id * or Compression Command Id etc. */ uint8_t service_type; /**< Service type */ uint8_t hdr_flags; /**< This represents a flags field for the Service Request. * The most significant bit is the 'valid' flag and the only * one used. All remaining bit positions are unused and * are therefore reserved and need to be set to 0. */ /**< LW1 */ icp_qat_fw_serv_specif_flags serv_specif_flags; /**< Common Request service-specific flags * e.g. Symmetric Crypto Command Flags */ icp_qat_fw_comn_flags comn_req_flags; /**< Common Request Flags consisting of * - 6 reserved bits, * - 1 Content Descriptor field type bit and * - 1 Source/destination pointer type bit */ icp_qat_fw_ext_serv_specif_flags extended_serv_specif_flags; /**< An extension of serv_specif_flags */ } icp_qat_fw_comn_req_hdr_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Definition of the common QAT FW request parameter field. * * @description * Service specific section of the request used across all of the services * exposed by the QAT FW. Each of the services populates this block * uniquely. Refer to service-specific header structures e.g. * 'icp_qat_fw_comn_req_cipher_rqpars_s' (for Cipher) etc. * *****************************************************************************/ typedef struct icp_qat_fw_comn_req_rqpars_s { /**< LWs 14-26 */ uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13]; } icp_qat_fw_comn_req_rqpars_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Definition of the common request structure with service specific * fields * @description * This is a definition of the full qat request structure used by all * services. Each service is free to use the service fields in its own * way. This struct is useful as a message passing argument before the * service contained within the request is determined. * *****************************************************************************/ typedef struct icp_qat_fw_comn_req_s { /**< LWs 0-1 */ icp_qat_fw_comn_req_hdr_t comn_hdr; /**< Common request header */ /**< LWs 2-5 */ icp_qat_fw_comn_req_hdr_cd_pars_t cd_pars; /**< Common Request content descriptor field which points either to a * content descriptor * parameter block or contains the service-specific data itself. */ /**< LWs 6-13 */ icp_qat_fw_comn_req_mid_t comn_mid; /**< Common request middle section */ /**< LWs 14-26 */ icp_qat_fw_comn_req_rqpars_t serv_specif_rqpars; /**< Common request service-specific parameter field */ /**< LWs 27-31 */ icp_qat_fw_comn_req_cd_ctrl_t cd_ctrl; /**< Common request content descriptor control block - * this field is service-specific */ } icp_qat_fw_comn_req_t; /* ========================================================================= */ /* QAT FW RESPONSE STRUCTURES */ /* ========================================================================= */ /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Error code field * * @description * Overloaded field with 8 bit common error field or two * 8 bit compression error fields for compression and translator slices * *****************************************************************************/ typedef union icp_qat_fw_comn_error_s { struct { uint8_t resrvd; /**< 8 bit reserved field */ uint8_t comn_err_code; /**< 8 bit common error code */ } s; /**< Structure which is used for non-compression responses */ struct { uint8_t xlat_err_code; /**< 8 bit translator error field */ uint8_t cmp_err_code; /**< 8 bit compression error field */ } s1; /** Structure which is used for compression responses */ } icp_qat_fw_comn_error_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Definition of the common QAT FW response header. * @description * This section of the response is common across all of the services * that generate a firmware interface response * *****************************************************************************/ typedef struct icp_qat_fw_comn_resp_hdr_s { /**< LW0 */ uint8_t resrvd1; /**< Reserved field - this field is service-specific - * Note: The Response Destination Id has been removed * from first QWord */ uint8_t service_id; /**< Service Id returned by service block */ uint8_t response_type; /**< Response type - copied from the request to * the response message */ uint8_t hdr_flags; /**< This represents a flags field for the Response. * Bit<7> = 'valid' flag * Bit<6> = 'CNV' flag indicating that CNV was executed * on the current request * Bit<5> = 'CNVNR' flag indicating that a recovery happened * on the current request following a CNV error * All remaining bits are unused and are therefore reserved. * They must to be set to 0. */ /**< LW 1 */ icp_qat_fw_comn_error_t comn_error; /**< This field is overloaded to allow for one 8 bit common error field * or two 8 bit error fields from compression and translator */ uint8_t comn_status; /**< Status field which specifies which slice(s) report an error */ uint8_t cmd_id; /**< Command Id - passed from the request to the response message */ } icp_qat_fw_comn_resp_hdr_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Definition of the common response structure with service specific * fields * @description * This is a definition of the full qat response structure used by all * services. * *****************************************************************************/ typedef struct icp_qat_fw_comn_resp_s { /**< LWs 0-1 */ icp_qat_fw_comn_resp_hdr_t comn_hdr; /**< Common header fields */ /**< LWs 2-3 */ uint64_t opaque_data; /**< Opaque data passed from the request to the response message */ /**< LWs 4-7 */ uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; /**< Reserved */ } icp_qat_fw_comn_resp_t; /* ========================================================================= */ /* MACRO DEFINITIONS */ /* ========================================================================= */ /* Common QAT FW request header - structure of LW0 - * + ===== + ---- + ----------- + ----------- + ----------- + ----------- + - * | Bit | 31 | 30 - 24 | 21 - 16 | 15 - 8 | 7 - 0 | - * + ===== + ---- + ----------- + ----------- + ----------- + ----------- + - * | Flags | V | Reserved | Serv Type | Serv Cmd Id | Reserved | - * + ===== + ---- + ----------- + ----------- + ----------- + ----------- + -*/ + * + ===== + ------- + ----------- + ----------- + ----------- + -------- + + * | Bit | 31/30 | 29 - 24 | 21 - 16 | 15 - 8 | 7 - 0 | + * + ===== + ------- + ----------- + ----------- + ----------- + -------- + + * | Flags | V/Gen | Reserved | Serv Type | Serv Cmd Id | Rsv | + * + ===== + ------- + ----------- + ----------- + ----------- + -------- + + */ /**< @ingroup icp_qat_fw_comn * Definition of the setting of the header's valid flag */ #define ICP_QAT_FW_COMN_REQ_FLAG_SET 1 /**< @ingroup icp_qat_fw_comn * Definition of the setting of the header's valid flag */ #define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0 /**< @ingroup icp_qat_fw_comn * Macros defining the bit position and mask of the 'valid' flag, within the * hdr_flags field of LW0 (service request and response) */ #define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7 #define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1 + +/**< @ingroup icp_qat_fw_comn + * Macros defining the bit position and mask of the 'generation' flag, within + * the hdr_flags field of LW0 (service request and response) */ +#define ICP_QAT_FW_COMN_GEN_FLAG_BITPOS 6 +#define ICP_QAT_FW_COMN_GEN_FLAG_MASK 0x1 +/**< @ingroup icp_qat_fw_comn + * The request is targeted for QAT2.0 */ +#define ICP_QAT_FW_COMN_GEN_2 1 +/**< @ingroup icp_qat_fw_comn +* The request is targeted for QAT1.x. QAT2.0 FW will return + 'unsupported request' if GEN1 request type is sent to QAT2.0 FW */ +#define ICP_QAT_FW_COMN_GEN_1 0 + #define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F /* Common QAT FW response header - structure of LW0 * + ===== + --- + --- + ----- + ----- + --------- + ----------- + ----- + * | Bit | 31 | 30 | 29 | 28-24 | 21 - 16 | 15 - 8 | 7-0 | * + ===== + --- + ----+ ----- + ----- + --------- + ----------- + ----- + * | Flags | V | CNV | CNVNR | Rsvd | Serv Type | Serv Cmd Id | Rsvd | * + ===== + --- + --- + ----- + ----- + --------- + ----------- + ----- + */ /**< @ingroup icp_qat_fw_comn * Macros defining the bit position and mask of 'CNV' flag * within the hdr_flags field of LW0 (service response only) */ #define ICP_QAT_FW_COMN_CNV_FLAG_BITPOS 6 #define ICP_QAT_FW_COMN_CNV_FLAG_MASK 0x1 /**< @ingroup icp_qat_fw_comn * Macros defining the bit position and mask of CNVNR flag * within the hdr_flags field of LW0 (service response only) */ #define ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS 5 #define ICP_QAT_FW_COMN_CNVNR_FLAG_MASK 0x1 +/**< @ingroup icp_qat_fw_comn + * Macros defining the bit position and mask of Stored Blocks flag + * within the hdr_flags field of LW0 (service response only) + */ +#define ICP_QAT_FW_COMN_ST_BLK_FLAG_BITPOS 4 +#define ICP_QAT_FW_COMN_ST_BLK_FLAG_MASK 0x1 + /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of Service Type Field * * @param icp_qat_fw_comn_req_hdr_t Structure 'icp_qat_fw_comn_req_hdr_t' * to extract the Service Type Field * *****************************************************************************/ #define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \ icp_qat_fw_comn_req_hdr_t.service_type /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for setting of Service Type Field * * @param 'icp_qat_fw_comn_req_hdr_t' structure to set the Service * Type Field * @param val Value of the Service Type Field * *****************************************************************************/ #define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \ icp_qat_fw_comn_req_hdr_t.service_type = val /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of Service Command Id Field * * @param icp_qat_fw_comn_req_hdr_t Structure 'icp_qat_fw_comn_req_hdr_t' * to extract the Service Command Id Field * *****************************************************************************/ #define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \ icp_qat_fw_comn_req_hdr_t.service_cmd_id /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for setting of Service Command Id Field * * @param 'icp_qat_fw_comn_req_hdr_t' structure to set the * Service Command Id Field * @param val Value of the Service Command Id Field * *****************************************************************************/ #define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \ icp_qat_fw_comn_req_hdr_t.service_cmd_id = val /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Extract the valid flag from the request or response's header flags. * * @param hdr_t Request or Response 'hdr_t' structure to extract the valid bit * from the 'hdr_flags' field. * *****************************************************************************/ #define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \ ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Extract the CNVNR flag from the header flags in the response only. * * @param hdr_t Response 'hdr_t' structure to extract the CNVNR bit * from the 'hdr_flags' field. * *****************************************************************************/ #define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdr_flags) \ QAT_FIELD_GET(hdr_flags, \ ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \ ICP_QAT_FW_COMN_CNVNR_FLAG_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Extract the CNV flag from the header flags in the response only. * * @param hdr_t Response 'hdr_t' structure to extract the CNV bit * from the 'hdr_flags' field. * *****************************************************************************/ #define ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdr_flags) \ QAT_FIELD_GET(hdr_flags, \ ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \ ICP_QAT_FW_COMN_CNV_FLAG_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Set the valid bit in the request's header flags. * * @param hdr_t Request or Response 'hdr_t' structure to set the valid bit * @param val Value of the valid bit flag. * *****************************************************************************/ #define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \ ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Common macro to extract the valid flag from the header flags field * within the header structure (request or response). * * @param hdr_t Structure (request or response) to extract the * valid bit from the 'hdr_flags' field. * *****************************************************************************/ #define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \ QAT_FIELD_GET(hdr_flags, \ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ ICP_QAT_FW_COMN_VALID_FLAG_MASK) +/** + ****************************************************************************** + * @ingroup icp_qat_fw_comn + * + * @description + * Extract the Stored Block flag from the header flags in the + * response only. + * + * @param hdr_flags Response 'hdr' structure to extract the + * Stored Block bit from the 'hdr_flags' field. + * + *****************************************************************************/ +#define ICP_QAT_FW_COMN_HDR_ST_BLK_FLAG_GET(hdr_flags) \ + QAT_FIELD_GET(hdr_flags, \ + ICP_QAT_FW_COMN_ST_BLK_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_ST_BLK_FLAG_MASK) + +/** + ****************************************************************************** + * @ingroup icp_qat_fw_comn + * + * @description + * Set the Stored Block bit in the response's header flags. + * + * @param hdr_t Response 'hdr_t' structure to set the ST_BLK bit + * @param val Value of the ST_BLK bit flag. + * + *****************************************************************************/ +#define ICP_QAT_FW_COMN_HDR_ST_BLK_FLAG_SET(hdr_t, val) \ + QAT_FIELD_SET((hdr_t.hdr_flags), \ + (val), \ + ICP_QAT_FW_COMN_ST_BLK_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_ST_BLK_FLAG_MASK) + +/** + ****************************************************************************** + * @ingroup icp_qat_fw_comn + * + * @description + * Set the generation bit in the request's header flags. + * + * @param hdr_t Request or Response 'hdr_t' structure to set the gen bit + * @param val Value of the generation bit flag. + * + *****************************************************************************/ +#define ICP_QAT_FW_COMN_HDR_GENERATION_FLAG_SET(hdr_t, val) \ + ICP_QAT_FW_COMN_GENERATION_FLAG_SET(hdr_t, val) + +/** +****************************************************************************** +* @ingroup icp_qat_fw_comn +* +* @description +* Common macro to set the generation bit in the common header +* +* @param hdr_t Structure (request or response) containing the header +* flags field, to allow the generation bit to be set. +* @param val Value of the generation bit flag. +* +*****************************************************************************/ +#define ICP_QAT_FW_COMN_GENERATION_FLAG_SET(hdr_t, val) \ + QAT_FIELD_SET((hdr_t.hdr_flags), \ + (val), \ + ICP_QAT_FW_COMN_GEN_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_GEN_FLAG_MASK) + +/** +****************************************************************************** +* @ingroup icp_qat_fw_comn +* +* @description +* Common macro to extract the generation flag from the header flags field +* within the header structure (request or response). +* +* @param hdr_t Structure (request or response) to extract the +* generation bit from the 'hdr_flags' field. +* +*****************************************************************************/ + +#define ICP_QAT_FW_COMN_HDR_GENERATION_FLAG_GET(hdr_flags) \ + QAT_FIELD_GET(hdr_flags, \ + ICP_QAT_FW_COMN_GEN_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_GEN_FLAG_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Common macro to extract the remaining reserved flags from the header flags field within the header structure (request or response). * * @param hdr_t Structure (request or response) to extract the * remaining bits from the 'hdr_flags' field (excluding the * valid flag). * *****************************************************************************/ #define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \ (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Common macro to set the valid bit in the header flags field within * the header structure (request or response). * * @param hdr_t Structure (request or response) containing the header * flags field, to allow the valid bit to be set. * @param val Value of the valid bit flag. * *****************************************************************************/ #define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \ QAT_FIELD_SET((hdr_t.hdr_flags), \ (val), \ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ ICP_QAT_FW_COMN_VALID_FLAG_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro that must be used when building the common header flags. * Note that all bits reserved field bits 0-6 (LW0) need to be forced to 0. * * @param ptr Value of the valid flag *****************************************************************************/ #define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \ (((valid)&ICP_QAT_FW_COMN_VALID_FLAG_MASK) \ << ICP_QAT_FW_COMN_VALID_FLAG_BITPOS) /* * < @ingroup icp_qat_fw_comn * Common Request Flags Definition * The bit offsets below are within the flags field. These are NOT relative to * the memory word. Unused fields e.g. reserved bits, must be zeroed. * * + ===== + ------ + --- + --- + --- + --- + --- + --- + --- + --- + * | Bits [15:8] | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | * + ===== + ------ + --- + --- + --- + --- + --- + --- + --- + --- + * | Flags[15:8] | Rsv | Rsv | Rsv | Rsv | Rsv | Rsv | Rsv | Rsv | * + ===== + ------ + --- + --- + --- + --- + --- + --- + --- + --- + * | Bits [7:0] | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | * + ===== + ------ + --- + --- + --- + --- + --- + --- + --- + --- + * | Flags [7:0] | Rsv | Rsv | Rsv | Rsv | Rsv | BnP | Cdt | Ptr | * + ===== + ------ + --- + --- + --- + --- + --- + --- + --- + --- + */ #define QAT_COMN_PTR_TYPE_BITPOS 0 /**< @ingroup icp_qat_fw_comn * Common Request Flags - Starting bit position indicating * Src&Dst Buffer Pointer type */ #define QAT_COMN_PTR_TYPE_MASK 0x1 /**< @ingroup icp_qat_fw_comn * Common Request Flags - One bit mask used to determine * Src&Dst Buffer Pointer type */ #define QAT_COMN_CD_FLD_TYPE_BITPOS 1 /**< @ingroup icp_qat_fw_comn * Common Request Flags - Starting bit position indicating * CD Field type */ #define QAT_COMN_CD_FLD_TYPE_MASK 0x1 /**< @ingroup icp_qat_fw_comn * Common Request Flags - One bit mask used to determine * CD Field type */ #define QAT_COMN_BNP_ENABLED_BITPOS 2 /**< @ingroup icp_qat_fw_comn * Common Request Flags - Starting bit position indicating * the source buffer contains batch of requests. if this * bit is set, source buffer is type of Batch And Pack OpData List * and the Ptr Type Bit only applies to Destination buffer. */ #define QAT_COMN_BNP_ENABLED_MASK 0x1 /**< @ingroup icp_qat_fw_comn * Batch And Pack Enabled Flag Mask - One bit mask used to determine * the source buffer is in Batch and Pack OpData Link List Mode. */ /* ========================================================================= */ /* Pointer Type Flag definitions */ /* ========================================================================= */ #define QAT_COMN_PTR_TYPE_FLAT 0x0 /**< @ingroup icp_qat_fw_comn * Constant value indicating Src&Dst Buffer Pointer type is flat * If Batch and Pack mode is enabled, only applies to Destination buffer.*/ #define QAT_COMN_PTR_TYPE_SGL 0x1 /**< @ingroup icp_qat_fw_comn * Constant value indicating Src&Dst Buffer Pointer type is SGL type * If Batch and Pack mode is enabled, only applies to Destination buffer.*/ #define QAT_COMN_PTR_TYPE_BATCH 0x2 /**< @ingroup icp_qat_fw_comn * Constant value indicating Src is a batch request * and Dst Buffer Pointer type is SGL type */ /* ========================================================================= */ /* CD Field Flag definitions */ /* ========================================================================= */ #define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0 /**< @ingroup icp_qat_fw_comn * Constant value indicating CD Field contains 64-bit address */ #define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1 /**< @ingroup icp_qat_fw_comn * Constant value indicating CD Field contains 16 bytes of setup data */ /* ========================================================================= */ /* Batch And Pack Enable/Disable Definitions */ /* ========================================================================= */ #define QAT_COMN_BNP_ENABLED 0x1 /**< @ingroup icp_qat_fw_comn * Constant value indicating Source buffer will point to Batch And Pack OpData * List */ #define QAT_COMN_BNP_DISABLED 0x0 /**< @ingroup icp_qat_fw_comn * Constant value indicating Source buffer will point to Batch And Pack OpData * List */ /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro that must be used when building the common request flags (for all * requests but comp BnP). * Note that all bits reserved field bits 2-15 (LW1) need to be forced to 0. * * @param ptr Value of the pointer type flag * @param cdt Value of the cd field type flag *****************************************************************************/ #define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \ ((((cdt)&QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) | \ (((ptr)&QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS)) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro that must be used when building the common request flags for comp * BnP service. * Note that all bits reserved field bits 3-15 (LW1) need to be forced to 0. * * @param ptr Value of the pointer type flag * @param cdt Value of the cd field type flag * @param bnp Value of the bnp enabled flag *****************************************************************************/ #define ICP_QAT_FW_COMN_FLAGS_BUILD_BNP(cdt, ptr, bnp) \ ((((cdt)&QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) | \ (((ptr)&QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS) | \ (((bnp)&QAT_COMN_BNP_ENABLED_MASK) << QAT_COMN_BNP_ENABLED_BITPOS)) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of the pointer type bit from the common flags * * @param flags Flags to extract the pointer type bit from * *****************************************************************************/ #define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \ QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of the cd field type bit from the common flags * * @param flags Flags to extract the cd field type type bit from * *****************************************************************************/ #define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \ QAT_FIELD_GET(flags, \ QAT_COMN_CD_FLD_TYPE_BITPOS, \ QAT_COMN_CD_FLD_TYPE_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of the bnp field type bit from the common flags * * @param flags Flags to extract the bnp field type type bit from * *****************************************************************************/ #define ICP_QAT_FW_COMN_BNP_ENABLED_GET(flags) \ QAT_FIELD_GET(flags, \ QAT_COMN_BNP_ENABLED_BITPOS, \ QAT_COMN_BNP_ENABLED_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for setting the pointer type bit in the common flags * * @param flags Flags in which Pointer Type bit will be set * @param val Value of the bit to be set in flags * *****************************************************************************/ #define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \ QAT_FIELD_SET(flags, \ val, \ QAT_COMN_PTR_TYPE_BITPOS, \ QAT_COMN_PTR_TYPE_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for setting the cd field type bit in the common flags * * @param flags Flags in which Cd Field Type bit will be set * @param val Value of the bit to be set in flags * *****************************************************************************/ #define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \ QAT_FIELD_SET(flags, \ val, \ QAT_COMN_CD_FLD_TYPE_BITPOS, \ QAT_COMN_CD_FLD_TYPE_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for setting the bnp field type bit in the common flags * * @param flags Flags in which Bnp Field Type bit will be set * @param val Value of the bit to be set in flags * *****************************************************************************/ #define ICP_QAT_FW_COMN_BNP_ENABLE_SET(flags, val) \ QAT_FIELD_SET(flags, \ val, \ QAT_COMN_BNP_ENABLED_BITPOS, \ QAT_COMN_BNP_ENABLED_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macros using the bit position and mask to set/extract the next * and current id nibbles within the next_curr_id field of the * content descriptor header block. Note that these are defined * in the common header file, as they are used by compression, cipher * and authentication. * * @param cd_ctrl_hdr_t Content descriptor control block header pointer. * @param val Value of the field being set. * *****************************************************************************/ #define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4 #define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0 #define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0 #define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F #define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \ ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) >> \ (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) #define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ ((cd_ctrl_hdr_t)->next_curr_id) = \ ((((cd_ctrl_hdr_t)->next_curr_id) & \ ICP_QAT_FW_COMN_CURR_ID_MASK) | \ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) & \ ICP_QAT_FW_COMN_NEXT_ID_MASK)) #define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \ (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK) #define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \ ((cd_ctrl_hdr_t)->next_curr_id) = \ ((((cd_ctrl_hdr_t)->next_curr_id) & \ ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ ((val)&ICP_QAT_FW_COMN_CURR_ID_MASK)) /* * < @ingroup icp_qat_fw_comn * Common Status Field Definition The bit offsets below are within the COMMON * RESPONSE status field, assumed to be 8 bits wide. In the case of the PKE * response (which follows the CPM 1.5 message format), the status field is 16 * bits wide. * The status flags are contained within the most significant byte and align * with the diagram below. Please therefore refer to the service-specific PKE * header file for the appropriate macro definition to extract the PKE status * flag from the PKE response, which assumes that a word is passed to the * macro. * + ===== + ------ + --- + --- + ---- + ---- + -------- + ---- + ---------- + * | Bit | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | * + ===== + ------ + --- + --- + ---- + ---- + -------- + ---- + ---------- + * | Flags | Crypto | Pke | Cmp | Xlat | EOLB | UnSupReq | Rsvd | XltWaApply | * + ===== + ------ + --- + --- + ---- + ---- + -------- + ---- + ---------- + * Note: * For the service specific status bit definitions refer to service header files * Eg. Crypto Status bit refers to Symmetric Crypto, Key Generation, and NRBG * Requests' Status. Unused bits e.g. reserved bits need to have been forced to * 0. */ #define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7 /**< @ingroup icp_qat_fw_comn * Starting bit position indicating Response for Crypto service Flag */ #define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1 /**< @ingroup icp_qat_fw_comn * One bit mask used to determine Crypto status mask */ #define QAT_COMN_RESP_PKE_STATUS_BITPOS 6 /**< @ingroup icp_qat_fw_comn * Starting bit position indicating Response for PKE service Flag */ #define QAT_COMN_RESP_PKE_STATUS_MASK 0x1 /**< @ingroup icp_qat_fw_comn * One bit mask used to determine PKE status mask */ #define QAT_COMN_RESP_CMP_STATUS_BITPOS 5 /**< @ingroup icp_qat_fw_comn * Starting bit position indicating Response for Compression service Flag */ #define QAT_COMN_RESP_CMP_STATUS_MASK 0x1 /**< @ingroup icp_qat_fw_comn * One bit mask used to determine Compression status mask */ #define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4 /**< @ingroup icp_qat_fw_comn * Starting bit position indicating Response for Xlat service Flag */ #define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1 /**< @ingroup icp_qat_fw_comn * One bit mask used to determine Translator status mask */ #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3 /**< @ingroup icp_qat_fw_comn * Starting bit position indicating the last block in a deflate stream for the compression service Flag */ #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1 /**< @ingroup icp_qat_fw_comn * One bit mask used to determine the last block in a deflate stream status mask */ #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2 /**< @ingroup icp_qat_fw_comn * Starting bit position indicating when an unsupported service request Flag */ #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1 /**< @ingroup icp_qat_fw_comn * One bit mask used to determine the unsupported service request status mask */ #define QAT_COMN_RESP_XLT_INV_APPLIED_BITPOS 0 /**< @ingroup icp_qat_fw_comn * Bit position indicating that firmware detected an invalid translation during * dynamic compression and took measures to overcome this * */ #define QAT_COMN_RESP_XLT_INV_APPLIED_MASK 0x1 /**< @ingroup icp_qat_fw_comn * One bit mask */ /** ****************************************************************************** * @description * Macro that must be used when building the status * for the common response * * @param crypto Value of the Crypto Service status flag * @param comp Value of the Compression Service Status flag * @param xlat Value of the Xlator Status flag * @param eolb Value of the Compression End of Last Block Status flag * @param unsupp Value of the Unsupported Request flag * @param xlt_inv Value of the Invalid Translation flag *****************************************************************************/ #define ICP_QAT_FW_COMN_RESP_STATUS_BUILD( \ crypto, pke, comp, xlat, eolb, unsupp, xlt_inv) \ ((((crypto)&QAT_COMN_RESP_CRYPTO_STATUS_MASK) \ << QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \ (((pke)&QAT_COMN_RESP_PKE_STATUS_MASK) \ << QAT_COMN_RESP_PKE_STATUS_BITPOS) | \ (((xlt_inv)&QAT_COMN_RESP_XLT_INV_APPLIED_MASK) \ << QAT_COMN_RESP_XLT_INV_APPLIED_BITPOS) | \ (((comp)&QAT_COMN_RESP_CMP_STATUS_MASK) \ << QAT_COMN_RESP_CMP_STATUS_BITPOS) | \ (((xlat)&QAT_COMN_RESP_XLAT_STATUS_MASK) \ << QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \ (((eolb)&QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) \ << QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS) | \ (((unsupp)&QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS) \ << QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK)) /* ========================================================================= */ /* GETTERS */ /* ========================================================================= */ /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of the Crypto bit from the status * * @param status * Status to extract the status bit from * *****************************************************************************/ #define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \ QAT_FIELD_GET(status, \ QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \ QAT_COMN_RESP_CRYPTO_STATUS_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of the PKE bit from the status * * @param status * Status to extract the status bit from * *****************************************************************************/ #define ICP_QAT_FW_COMN_RESP_PKE_STAT_GET(status) \ QAT_FIELD_GET(status, \ QAT_COMN_RESP_PKE_STATUS_BITPOS, \ QAT_COMN_RESP_PKE_STATUS_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of the Compression bit from the status * * @param status * Status to extract the status bit from * *****************************************************************************/ #define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \ QAT_FIELD_GET(status, \ QAT_COMN_RESP_CMP_STATUS_BITPOS, \ QAT_COMN_RESP_CMP_STATUS_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of the Translator bit from the status * * @param status * Status to extract the status bit from * *****************************************************************************/ #define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \ QAT_FIELD_GET(status, \ QAT_COMN_RESP_XLAT_STATUS_BITPOS, \ QAT_COMN_RESP_XLAT_STATUS_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of the Translation Invalid bit * from the status * * @param status * Status to extract the status bit from * *****************************************************************************/ #define ICP_QAT_FW_COMN_RESP_XLT_INV_APPLIED_GET(status) \ QAT_FIELD_GET(status, \ QAT_COMN_RESP_XLT_INV_APPLIED_BITPOS, \ QAT_COMN_RESP_XLT_INV_APPLIED_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of the end of compression block bit from the * status * * @param status * Status to extract the status bit from * *****************************************************************************/ #define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \ QAT_FIELD_GET(status, \ QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \ QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comn * * @description * Macro for extraction of the Unsupported request from the status * * @param status * Status to extract the status bit from * *****************************************************************************/ #define ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(status) \ QAT_FIELD_GET(status, \ QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \ QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK) /* ========================================================================= */ /* Status Flag definitions */ /* ========================================================================= */ #define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0 /**< @ingroup icp_qat_fw_comn * Definition of successful processing of a request */ #define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1 /**< @ingroup icp_qat_fw_comn * Definition of erroneous processing of a request */ #define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0 /**< @ingroup icp_qat_fw_comn * Final Deflate block of a compression request not completed */ #define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1 /**< @ingroup icp_qat_fw_comn * Final Deflate block of a compression request completed */ #define ERR_CODE_NO_ERROR 0 /**< Error Code constant value for no error */ #define ERR_CODE_INVALID_BLOCK_TYPE -1 /* Invalid block type (type == 3)*/ #define ERR_CODE_NO_MATCH_ONES_COMP -2 /* Stored block length does not match one's complement */ #define ERR_CODE_TOO_MANY_LEN_OR_DIS -3 /* Too many length or distance codes */ #define ERR_CODE_INCOMPLETE_LEN -4 /* Code lengths codes incomplete */ #define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5 /* Repeat lengths with no first length */ #define ERR_CODE_RPT_GT_SPEC_LEN -6 /* Repeat more than specified lengths */ #define ERR_CODE_INV_LIT_LEN_CODE_LEN -7 /* Invalid lit/len code lengths */ #define ERR_CODE_INV_DIS_CODE_LEN -8 /* Invalid distance code lengths */ #define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9 /* Invalid lit/len or distance code in fixed/dynamic block */ #define ERR_CODE_DIS_TOO_FAR_BACK -10 /* Distance too far back in fixed or dynamic block */ /* Common Error code definitions */ #define ERR_CODE_OVERFLOW_ERROR -11 /**< Error Code constant value for overflow error */ #define ERR_CODE_SOFT_ERROR -12 /**< Error Code constant value for soft error */ #define ERR_CODE_FATAL_ERROR -13 /**< Error Code constant value for hard/fatal error */ #define ERR_CODE_COMP_OUTPUT_CORRUPTION -14 /**< Error Code constant for compression output corruption */ #define ERR_CODE_HW_INCOMPLETE_FILE -15 /**< Error Code constant value for incomplete file hardware error */ #define ERR_CODE_SSM_ERROR -16 /**< Error Code constant value for error detected by SSM e.g. slice hang */ #define ERR_CODE_ENDPOINT_ERROR -17 /**< Error Code constant value for error detected by PCIe Endpoint, e.g. push * data error */ #define ERR_CODE_CNV_ERROR -18 /**< Error Code constant value for cnv failure */ #define ERR_CODE_EMPTY_DYM_BLOCK -19 /**< Error Code constant value for submission of empty dynamic stored block to * slice */ #define ERR_CODE_EXCEED_MAX_REQ_TIME -24 /**< Error Code constant for exceeding max request time */ #define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_HANDLE -20 /**< Error Code constant for invalid handle in kpt crypto service */ #define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_HMAC_FAILED -21 /**< Error Code constant for failed hmac in kpt crypto service */ #define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_WRAPPING_ALGO -22 /**< Error Code constant for invalid wrapping algo in kpt crypto service */ #define ERR_CODE_KPT_DRNG_SEED_NOT_LOAD -23 /**< Error Code constant for no drng seed is not loaded in kpt ecdsa signrs /service */ #define ERR_CODE_MISC_ERROR -50 /**< Error Code constant for error detected but the source * of error is not recognized */ /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Slice types for building of the processing chain within the content * descriptor * * @description * Enumeration used to indicate the ids of the slice types through which * data will pass. * * A logical slice is not a hardware slice but is a software FSM * performing the actions of a slice * *****************************************************************************/ typedef enum { ICP_QAT_FW_SLICE_NULL = 0, /**< NULL slice type */ ICP_QAT_FW_SLICE_CIPHER = 1, /**< CIPHER slice type */ ICP_QAT_FW_SLICE_AUTH = 2, /**< AUTH slice type */ ICP_QAT_FW_SLICE_DRAM_RD = 3, /**< DRAM_RD Logical slice type */ ICP_QAT_FW_SLICE_DRAM_WR = 4, /**< DRAM_WR Logical slice type */ ICP_QAT_FW_SLICE_COMP = 5, /**< Compression slice type */ ICP_QAT_FW_SLICE_XLAT = 6, /**< Translator slice type */ ICP_QAT_FW_SLICE_DELIMITER /**< End delimiter */ } icp_qat_fw_slice_t; #endif /* _ICP_QAT_FW_H_ */ diff --git a/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_comp.h b/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_comp.h index 1a9d7e727bd0..c4bb5632b318 100644 --- a/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_comp.h +++ b/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_comp.h @@ -1,1029 +1,1147 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** ***************************************************************************** * @file icp_qat_fw_comp.h * @defgroup icp_qat_fw_comp ICP QAT FW Compression Service * Interface Definitions * @ingroup icp_qat_fw * @description * This file documents structs used to provide the interface to the * Compression QAT FW service * *****************************************************************************/ #ifndef _ICP_QAT_FW_COMP_H_ #define _ICP_QAT_FW_COMP_H_ /* ****************************************************************************** * Include local header files ****************************************************************************** */ #include "icp_qat_fw.h" /** ***************************************************************************** * @ingroup icp_qat_fw_comp * Definition of the Compression command types * @description * Enumeration which is used to indicate the ids of functions * that are exposed by the Compression QAT FW service * *****************************************************************************/ typedef enum { ICP_QAT_FW_COMP_CMD_STATIC = 0, /*!< Static Compress Request */ ICP_QAT_FW_COMP_CMD_DYNAMIC = 1, /*!< Dynamic Compress Request */ ICP_QAT_FW_COMP_CMD_DECOMPRESS = 2, /*!< Decompress Request */ ICP_QAT_FW_COMP_CMD_DELIMITER /**< Delimiter type */ } icp_qat_fw_comp_cmd_id_t; /* * REQUEST FLAGS IN COMMON COMPRESSION * In common message it is named as SERVICE SPECIFIC FLAGS. * * + ===== + ------ + ------ + --- + ----- + ----- + ----- + -- + ---- + --- + * | Bit | 15 - 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | * + ===== + ------ + ----- + --- + ----- + ----- + ----- + -- + ---- + --- + * | Flags | Rsvd | Dis. |Resvd| Dis. | Enh. |Auto |Sess| Rsvd | Rsvd| * | | Bits | secure | =0 | Type0 | ASB |Select |Type| = 0 | = 0 | * | | = 0 |RAM use | | Header | |Best | | | | * | | |as intmd| | | | | | | | * | | | buf | | | | | | | | * + ===== + ------ + ----- + --- + ------ + ----- + ----- + -- + ---- + --- + + * Note: For QAT 2.0 Disable Secure Ram, DisType0 Header and Enhanced ASB bits + * are don't care. i.e., these features are removed from QAT 2.0. */ /** Flag usage */ #define ICP_QAT_FW_COMP_STATELESS_SESSION 0 /**< @ingroup icp_qat_fw_comp * Flag representing that session is stateless */ #define ICP_QAT_FW_COMP_STATEFUL_SESSION 1 /**< @ingroup icp_qat_fw_comp * Flag representing that session is stateful */ #define ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST 0 /**< @ingroup icp_qat_fw_comp * Flag representing that autoselectbest is NOT used */ #define ICP_QAT_FW_COMP_AUTO_SELECT_BEST 1 /**< @ingroup icp_qat_fw_comp * Flag representing that autoselectbest is used */ #define ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST 0 /**< @ingroup icp_qat_fw_comp * Flag representing that enhanced autoselectbest is NOT used */ #define ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST 1 /**< @ingroup icp_qat_fw_comp * Flag representing that enhanced autoselectbest is used */ #define ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 0 /**< @ingroup icp_qat_fw_comp * Flag representing that enhanced autoselectbest is NOT used */ #define ICP_QAT_FW_COMP_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 1 /**< @ingroup icp_qat_fw_comp * Flag representing that enhanced autoselectbest is used */ #define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF 1 /**< @ingroup icp_qat_fw_comp * Flag representing secure RAM from being used as * an intermediate buffer is DISABLED. */ #define ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF 0 /**< @ingroup icp_qat_fw_comp * Flag representing secure RAM from being used as * an intermediate buffer is ENABLED. */ /** Flag mask & bit position */ #define ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS 2 /**< @ingroup icp_qat_fw_comp * Starting bit position for the session type */ #define ICP_QAT_FW_COMP_SESSION_TYPE_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask used to determine the session type */ #define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS 3 /**< @ingroup icp_qat_fw_comp * Starting bit position for auto select best */ #define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask for auto select best */ #define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS 4 /**< @ingroup icp_qat_fw_comp * Starting bit position for enhanced auto select best */ #define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask for enhanced auto select best */ #define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS 5 /**< @ingroup icp_qat_fw_comp * Starting bit position for disabling type zero header write back when Enhanced autoselect best is enabled. If set firmware does not return type0 store block header, only copies src to dest. (if best output is Type0) */ #define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask for auto select best */ #define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7 /**< @ingroup icp_qat_fw_comp * Starting bit position for flag used to disable secure ram from * being used as an intermediate buffer. */ #define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask for disable secure ram for use as an intermediate buffer. */ /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro used for the generation of the command flags for Compression Request. * This should always be used for the generation of the flags. No direct sets or * masks should be performed on the flags data * * @param sesstype Session Type * @param autoselect AutoSelectBest * @enhanced_asb Enhanced AutoSelectBest * @ret_uncomp RetUnCompressed * @secure_ram Secure Ram usage * *********************************************************************************/ #define ICP_QAT_FW_COMP_FLAGS_BUILD( \ sesstype, autoselect, enhanced_asb, ret_uncomp, secure_ram) \ (((sesstype & ICP_QAT_FW_COMP_SESSION_TYPE_MASK) \ << ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) | \ ((autoselect & ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) \ << ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS) | \ ((enhanced_asb & ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK) \ << ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS) | \ ((ret_uncomp & ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK) \ << ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS) | \ ((secure_ram & ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) \ << ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS)) +/** +****************************************************************************** +* @ingroup icp_qat_fw_comp +* +* @description +* Macro used for the generation of the command flags for Compression Request. +* This should always be used for the generation of the flags. No direct sets or +* masks should be performed on the flags data +* +* @param sesstype Session Type +* @param autoselect AutoSelectBest +* Selects between compressed and uncompressed output. +* No distinction made between static and dynamic +* compressed data. +* +*********************************************************************************/ +#define ICP_QAT_FW_COMP_20_FLAGS_BUILD(sesstype, autoselect) \ + (((sesstype & ICP_QAT_FW_COMP_SESSION_TYPE_MASK) \ + << ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) | \ + ((autoselect & ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) \ + << ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS)) + /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the session type bit * * @param flags Flags to extract the session type bit from * ******************************************************************************/ #define ICP_QAT_FW_COMP_SESSION_TYPE_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS, \ ICP_QAT_FW_COMP_SESSION_TYPE_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the autoSelectBest bit * * @param flags Flags to extract the autoSelectBest bit from * ******************************************************************************/ #define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS, \ ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the enhanced asb bit * * @param flags Flags to extract the enhanced asb bit from * ******************************************************************************/ #define ICP_QAT_FW_COMP_EN_ASB_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS, \ ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the RetUncomp bit * * @param flags Flags to extract the Ret Uncomp bit from * ******************************************************************************/ #define ICP_QAT_FW_COMP_RET_UNCOMP_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS, \ ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the Secure Ram usage bit * * @param flags Flags to extract the Secure Ram usage from * ******************************************************************************/ #define ICP_QAT_FW_COMP_SECURE_RAM_USE_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS, \ ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comp * Definition of the compression header cd pars block * @description * Definition of the compression processing cd pars block. * The structure is a service-specific implementation of the common * structure. ******************************************************************************/ typedef union icp_qat_fw_comp_req_hdr_cd_pars_s { /**< LWs 2-5 */ struct { uint64_t content_desc_addr; /**< Address of the content descriptor */ uint16_t content_desc_resrvd1; /**< Content descriptor reserved field */ uint8_t content_desc_params_sz; /**< Size of the content descriptor parameters in quad words. * These * parameters describe the session setup configuration info for * the * slices that this request relies upon i.e. the configuration * word and * cipher key needed by the cipher slice if there is a request * for * cipher * processing. */ uint8_t content_desc_hdr_resrvd2; /**< Content descriptor reserved field */ uint32_t content_desc_resrvd3; /**< Content descriptor reserved field */ } s; struct { uint32_t comp_slice_cfg_word[ICP_QAT_FW_NUM_LONGWORDS_2]; /* Compression Slice Config Word */ uint32_t content_desc_resrvd4; /**< Content descriptor reserved field */ } sl; } icp_qat_fw_comp_req_hdr_cd_pars_t; /** ****************************************************************************** * @ingroup icp_qat_fw_comp * Definition of the compression request parameters block * @description * Definition of the compression processing request parameters block. * The structure below forms part of the Compression + Translation * Parameters block spanning LWs 14-21, thus differing from the common * base Parameters block structure. Unused fields must be set to 0. * ******************************************************************************/ typedef struct icp_qat_fw_comp_req_params_s { /**< LW 14 */ uint32_t comp_len; /**< Size of input to process in bytes Note: Only EOP requests can be * odd * for decompression. IA must set LSB to zero for odd sized intermediate * inputs */ /**< LW 15 */ uint32_t out_buffer_sz; /**< Size of output buffer in bytes */ /**< LW 16 */ union { struct { /** LW 16 */ uint32_t initial_crc32; /**< CRC for processed bytes (input byte count) */ /** LW 17 */ uint32_t initial_adler; /**< Adler for processed bytes (input byte count) */ } legacy; /** LW 16-17 */ uint64_t crc_data_addr; /**< CRC data structure pointer */ } crc; /** LW 18 */ uint32_t req_par_flags; /** LW 19 */ uint32_t rsrvd; } icp_qat_fw_comp_req_params_t; /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro used for the generation of the request parameter flags. * This should always be used for the generation of the flags. No direct sets or * masks should be performed on the flags data * * @param sop SOP Flag, 0 restore, 1 don't restore * @param eop EOP Flag, 0 restore, 1 don't restore * @param bfinal Set bfinal in this block or not * @param cnv Whether internal CNV check is to be performed * * ICP_QAT_FW_COMP_NO_CNV * * ICP_QAT_FW_COMP_CNV * @param cnvnr Whether internal CNV recovery is to be performed * * ICP_QAT_FW_COMP_NO_CNV_RECOVERY * * ICP_QAT_FW_COMP_CNV_RECOVERY * @param crc CRC Mode Flag - 0 legacy, 1 crc data struct * *****************************************************************************/ #define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD( \ - sop, eop, bfinal, cnv, cnvnr, crc) \ + sop, eop, bfinal, cnv, cnvnr, cnvdfx, crc) \ (((sop & ICP_QAT_FW_COMP_SOP_MASK) << ICP_QAT_FW_COMP_SOP_BITPOS) | \ ((eop & ICP_QAT_FW_COMP_EOP_MASK) << ICP_QAT_FW_COMP_EOP_BITPOS) | \ ((bfinal & ICP_QAT_FW_COMP_BFINAL_MASK) \ << ICP_QAT_FW_COMP_BFINAL_BITPOS) | \ ((cnv & ICP_QAT_FW_COMP_CNV_MASK) << ICP_QAT_FW_COMP_CNV_BITPOS) | \ - ((cnvnr & ICP_QAT_FW_COMP_CNV_RECOVERY_MASK) \ - << ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS) | \ + ((cnvnr & ICP_QAT_FW_COMP_CNVNR_MASK) \ + << ICP_QAT_FW_COMP_CNVNR_BITPOS) | \ + ((cnvdfx & ICP_QAT_FW_COMP_CNV_DFX_MASK) \ + << ICP_QAT_FW_COMP_CNV_DFX_BITPOS) | \ ((crc & ICP_QAT_FW_COMP_CRC_MODE_MASK) \ << ICP_QAT_FW_COMP_CRC_MODE_BITPOS)) /* * REQUEST FLAGS IN REQUEST PARAMETERS COMPRESSION * * + ===== + ----- + --- +-----+-------+ --- + ---------+ --- + ---- + --- + * --- + * | Bit | 31-20 | 19 | 18 | 17 | 16 | 15 - 7 | 6 | 5-2 | 1 | 0 * | * + ===== + ----- + --- +-----+-------+ --- + ---------+ --- | ---- + --- + * --- + * | Flags | Resvd | CRC |Resvd| CNVNR | CNV |Resvd Bits|BFin |Resvd | EOP | * SOP | * | | =0 | Mode| =0 | | | =0 | | =0 | | | * | | | | | | | | | | | | * + ===== + ----- + --- +-----+-------+ --- + ---------+ --- | ---- + --- + * --- + */ #define ICP_QAT_FW_COMP_NOT_SOP 0 /**< @ingroup icp_qat_fw_comp * Flag representing that a request is NOT Start of Packet */ #define ICP_QAT_FW_COMP_SOP 1 /**< @ingroup icp_qat_fw_comp * Flag representing that a request IS Start of Packet */ #define ICP_QAT_FW_COMP_NOT_EOP 0 /**< @ingroup icp_qat_fw_comp * Flag representing that a request is NOT Start of Packet */ #define ICP_QAT_FW_COMP_EOP 1 /**< @ingroup icp_qat_fw_comp * Flag representing that a request IS End of Packet */ #define ICP_QAT_FW_COMP_NOT_BFINAL 0 /**< @ingroup icp_qat_fw_comp * Flag representing to indicate firmware this is not the last block */ #define ICP_QAT_FW_COMP_BFINAL 1 /**< @ingroup icp_qat_fw_comp * Flag representing to indicate firmware this is the last block */ #define ICP_QAT_FW_COMP_NO_CNV 0 /**< @ingroup icp_qat_fw_comp * Flag indicating that NO cnv check is to be performed on the request */ #define ICP_QAT_FW_COMP_CNV 1 /**< @ingroup icp_qat_fw_comp * Flag indicating that a cnv check IS to be performed on the request */ #define ICP_QAT_FW_COMP_NO_CNV_RECOVERY 0 /**< @ingroup icp_qat_fw_comp * Flag indicating that NO cnv recovery is to be performed on the request */ #define ICP_QAT_FW_COMP_CNV_RECOVERY 1 /**< @ingroup icp_qat_fw_comp * Flag indicating that a cnv recovery is to be performed on the request */ +#define ICP_QAT_FW_COMP_NO_CNV_DFX 0 +/**< @ingroup icp_qat_fw_comp + * Flag indicating that NO CNV inject error is to be performed on the request */ + +#define ICP_QAT_FW_COMP_CNV_DFX 1 +/**< @ingroup icp_qat_fw_comp + * Flag indicating that CNV inject error is to be performed on the request */ + #define ICP_QAT_FW_COMP_CRC_MODE_LEGACY 0 /**< @ingroup icp_qat_fw_comp * Flag representing to use the legacy CRC mode */ #define ICP_QAT_FW_COMP_CRC_MODE_E2E 1 /**< @ingroup icp_qat_fw_comp * Flag representing to use the external CRC data struct */ #define ICP_QAT_FW_COMP_SOP_BITPOS 0 /**< @ingroup icp_qat_fw_comp * Starting bit position for SOP */ #define ICP_QAT_FW_COMP_SOP_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask used to determine SOP */ #define ICP_QAT_FW_COMP_EOP_BITPOS 1 /**< @ingroup icp_qat_fw_comp * Starting bit position for EOP */ #define ICP_QAT_FW_COMP_EOP_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask used to determine EOP */ #define ICP_QAT_FW_COMP_BFINAL_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask for the bfinal bit */ #define ICP_QAT_FW_COMP_BFINAL_BITPOS 6 /**< @ingroup icp_qat_fw_comp * Starting bit position for the bfinal bit */ #define ICP_QAT_FW_COMP_CNV_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask for the CNV bit */ #define ICP_QAT_FW_COMP_CNV_BITPOS 16 /**< @ingroup icp_qat_fw_comp * Starting bit position for the CNV bit */ #define ICP_QAT_FW_COMP_CNV_RECOVERY_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask for the CNV Recovery bit */ #define ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS 17 /**< @ingroup icp_qat_fw_comp * Starting bit position for the CNV Recovery bit */ +#define ICP_QAT_FW_COMP_CNVNR_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for the CNV Recovery bit */ + +#define ICP_QAT_FW_COMP_CNVNR_BITPOS 17 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for the CNV Recovery bit */ + +#define ICP_QAT_FW_COMP_CNV_DFX_BITPOS 18 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for the CNV DFX bit */ + +#define ICP_QAT_FW_COMP_CNV_DFX_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for the CNV DFX bit */ + #define ICP_QAT_FW_COMP_CRC_MODE_BITPOS 19 /**< @ingroup icp_qat_fw_comp * Starting bit position for CRC mode */ #define ICP_QAT_FW_COMP_CRC_MODE_MASK 0x1 /**< @ingroup icp_qat_fw_comp * One bit mask used to determine CRC mode */ +#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS 20 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for xxHash accumulate mode */ + +#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask used to determine xxHash accumulate mode */ + /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the SOP bit * * @param flags Flags to extract the SOP bit from * ******************************************************************************/ #define ICP_QAT_FW_COMP_SOP_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_SOP_BITPOS, \ ICP_QAT_FW_COMP_SOP_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the EOP bit * * @param flags Flags to extract the EOP bit from * *****************************************************************************/ #define ICP_QAT_FW_COMP_EOP_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_EOP_BITPOS, \ ICP_QAT_FW_COMP_EOP_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the bfinal bit * * @param flags Flags to extract the bfinal bit from * ******************************************************************************/ #define ICP_QAT_FW_COMP_BFINAL_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_BFINAL_BITPOS, \ ICP_QAT_FW_COMP_BFINAL_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the CNV bit * * @param flags Flag set containing the CNV flag * *****************************************************************************/ #define ICP_QAT_FW_COMP_CNV_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_CNV_BITPOS, \ ICP_QAT_FW_COMP_CNV_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_comp * * @description * Macro for extraction of the crc mode bit * * @param flags Flags to extract the crc mode bit from * ******************************************************************************/ #define ICP_QAT_FW_COMP_CRC_MODE_GET(flags) \ QAT_FIELD_GET(flags, \ ICP_QAT_FW_COMP_CRC_MODE_BITPOS, \ ICP_QAT_FW_COMP_CRC_MODE_MASK) +/** + ****************************************************************************** + * @ingroup icp_qat_fw_comp + * + * @description + * Macro for extraction of the xxHash accumulate mode bit + * + * @param flags Flags to extract the xxHash accumulate mode bit from + * + *****************************************************************************/ +#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_GET(flags) \ + QAT_FIELD_GET(flags, \ + ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS, \ + ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK) + +/** + ****************************************************************************** + * @ingroup icp_qat_fw_comp + * + * @description + * Macro for setting of the xxHash accumulate mode bit + * + * @param flags Flags to set the xxHash accumulate mode bit to + * @param val xxHash accumulate mode to set + * + *****************************************************************************/ +#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_SET(flags, val) \ + QAT_FIELD_SET(flags, \ + val, \ + ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS, \ + ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK) + /** ****************************************************************************** * @ingroup icp_qat_fw_comp * Definition of the translator request parameters block * @description * Definition of the translator processing request parameters block * The structure below forms part of the Compression + Translation * Parameters block spanning LWs 20-21, thus differing from the common * base Parameters block structure. Unused fields must be set to 0. * ******************************************************************************/ typedef struct icp_qat_fw_xlt_req_params_s { /**< LWs 20-21 */ uint64_t inter_buff_ptr; /**< This field specifies the physical address of an intermediate - * buffer SGL array. The array contains a pair of 64-bit - * intermediate buffer pointers to SGL buffer descriptors, one pair - * per CPM. Please refer to the CPM1.6 Firmware Interface HLD - * specification for more details. */ + * buffer SGL array. The array contains a pair of 64-bit + * intermediate buffer pointers to SGL buffer descriptors, one pair + * per CPM. Please refer to the CPM1.6 Firmware Interface HLD + * specification for more details. + * Placeholder for QAT2.0. */ } icp_qat_fw_xlt_req_params_t; /** ****************************************************************************** * @ingroup icp_qat_fw_comp * Compression header of the content descriptor block * @description * Definition of the service-specific compression control block header * structure. The compression parameters are defined per algorithm * and are located in the icp_qat_hw.h file. This compression * cd block spans LWs 24-29, forming part of the compression + translation * cd block, thus differing from the common base content descriptor * structure. * ******************************************************************************/ typedef struct icp_qat_fw_comp_cd_hdr_s { /**< LW 24 */ uint16_t ram_bank_flags; /**< Flags to show which ram banks to access */ uint8_t comp_cfg_offset; /**< Quad word offset from the content descriptor parameters address to * the * parameters for the compression processing */ uint8_t next_curr_id; /**< This field combines the next and current id (each four bits) - * the next id is the most significant nibble. * Next Id: Set to the next slice to pass the compressed data through. * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through * anymore slices after compression * Current Id: Initialised with the compression slice type */ /**< LW 25 */ uint32_t resrvd; /**< LWs 26-27 */ uint64_t comp_state_addr; /**< Pointer to compression state */ /**< LWs 28-29 */ uint64_t ram_banks_addr; /**< Pointer to banks */ } icp_qat_fw_comp_cd_hdr_t; #define COMP_CPR_INITIAL_CRC 0 #define COMP_CPR_INITIAL_ADLER 1 /** ****************************************************************************** * @ingroup icp_qat_fw_comp * Translator content descriptor header block * @description * Definition of the structure used to describe the translation processing * to perform on data. The translator parameters are defined per algorithm * and are located in the icp_qat_hw.h file. This translation cd block * spans LWs 30-31, forming part of the compression + translation cd block, * thus differing from the common base content descriptor structure. * ******************************************************************************/ typedef struct icp_qat_fw_xlt_cd_hdr_s { /**< LW 30 */ uint16_t resrvd1; /**< Reserved field and assumed set to 0 */ uint8_t resrvd2; /**< Reserved field and assumed set to 0 */ uint8_t next_curr_id; /**< This field combines the next and current id (each four bits) - * the next id is the most significant nibble. * Next Id: Set to the next slice to pass the translated data through. * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through * any more slices after compression * Current Id: Initialised with the translation slice type */ /**< LW 31 */ uint32_t resrvd3; /**< Reserved and should be set to zero, needed for quadword alignment */ } icp_qat_fw_xlt_cd_hdr_t; /** ****************************************************************************** * @ingroup icp_qat_fw_comp * Definition of the common Compression QAT FW request * @description * This is a definition of the full request structure for * compression and translation. * ******************************************************************************/ typedef struct icp_qat_fw_comp_req_s { /**< LWs 0-1 */ icp_qat_fw_comn_req_hdr_t comn_hdr; /**< Common request header - for Service Command Id, * use service-specific Compression Command Id. * Service Specific Flags - use Compression Command Flags */ /**< LWs 2-5 */ icp_qat_fw_comp_req_hdr_cd_pars_t cd_pars; /**< Compression service-specific content descriptor field which points * either to a content descriptor parameter block or contains the * compression slice config word. */ /**< LWs 6-13 */ icp_qat_fw_comn_req_mid_t comn_mid; /**< Common request middle section */ /**< LWs 14-19 */ icp_qat_fw_comp_req_params_t comp_pars; /**< Compression request Parameters block */ /**< LWs 20-21 */ union { icp_qat_fw_xlt_req_params_t xlt_pars; /**< Translation request Parameters block */ uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2]; /**< Reserved if not used for translation */ } u1; /**< LWs 22-23 */ union { uint32_t resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2]; /**< Reserved - not used if Batch and Pack is disabled.*/ uint64_t bnp_res_table_addr; /**< A generic pointer to the unbounded list of * icp_qat_fw_resp_comp_pars_t members. This pointer is only * used when the Batch and Pack is enabled. */ } u3; /**< LWs 24-29 */ icp_qat_fw_comp_cd_hdr_t comp_cd_ctrl; /**< Compression request content descriptor control * block header */ /**< LWs 30-31 */ union { icp_qat_fw_xlt_cd_hdr_t xlt_cd_ctrl; /**< Translation request content descriptor * control block header */ uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_2]; /**< Reserved if not used for translation */ } u2; } icp_qat_fw_comp_req_t; /** ****************************************************************************** * @ingroup icp_qat_fw_comp * Definition of the compression QAT FW response descriptor * parameters * @description * This part of the response is specific to the compression response. * ******************************************************************************/ typedef struct icp_qat_fw_resp_comp_pars_s { /**< LW 4 */ uint32_t input_byte_counter; /**< Input byte counter */ /**< LW 5 */ uint32_t output_byte_counter; /**< Output byte counter */ /** LW 6-7 */ union { struct { /** LW 6 */ uint32_t curr_crc32; /**< Current CRC32 */ /** LW 7 */ uint32_t curr_adler_32; /**< Current Adler32 */ } legacy; uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_2]; /**< Reserved if not in legacy mode */ } crc; } icp_qat_fw_resp_comp_pars_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comp * Definition of a single result metadata structure inside Batch and Pack * results table array. It describes the output if single job in the * batch and pack jobs. * Total number of entries in BNP Out table shall be equal to total * number of requests in the 'batch'. * @description * This structure is specific to the compression output. * *****************************************************************************/ typedef struct icp_qat_fw_comp_bnp_out_tbl_entry_s { /**< LWs 0-3 */ icp_qat_fw_resp_comp_pars_t comp_out_pars; /**< Common output params (checksums and byte counts) */ /**< LW 4 */ icp_qat_fw_comn_error_t comn_error; /**< This field is overloaded to allow for one 8 bit common error field * or two 8 bit error fields from compression and translator */ uint8_t comn_status; /**< Status field which specifies which slice(s) report an error */ uint8_t reserved0; /**< Reserved, shall be set to zero */ uint32_t reserved1; /**< Reserved, shall be set to zero, added for aligning entries to quadword boundary */ } icp_qat_fw_comp_bnp_out_tbl_entry_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comp * Supported modes for skipping regions of input or output buffers. * * @description * This enumeration lists the supported modes for skipping regions of * input or output buffers. * *****************************************************************************/ typedef enum icp_qat_fw_comp_bnp_skip_mode_s { ICP_QAT_FW_SKIP_DISABLED = 0, /**< Skip mode is disabled */ ICP_QAT_FW_SKIP_AT_START = 1, /**< Skip region is at the start of the buffer. */ ICP_QAT_FW_SKIP_AT_END = 2, /**< Skip region is at the end of the buffer. */ ICP_QAT_FW_SKIP_STRIDE = 3 /**< Skip region occurs at regular intervals within the buffer. specifies the number of bytes between each skip region. */ } icp_qat_fw_comp_bnp_skip_mode_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Flags describing the skip and compression job bahaviour. refer to flag * definitions on skip mode and reset/flush types. * Note: compression behaviour flags are ignored for destination skip info. * @description * Definition of the common request flags. * *****************************************************************************/ typedef uint8_t icp_qat_fw_comp_bnp_flags_t; /** ***************************************************************************** * @ingroup icp_qat_fw_comn * Skip Region Data. * @description * This structure contains data relating to configuring skip region * behaviour. A skip region is a region of an input buffer that * should be omitted from processing or a region that should be inserted * into the output buffer. * *****************************************************************************/ typedef struct icp_qat_fw_comp_bnp_skip_info_s { /**< LW 0 */ uint16_t skip_length; /**next_curr_id_cipher) & \ ICP_QAT_FW_COMN_NEXT_ID_MASK) >> \ (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) #define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ (cd_ctrl_hdr_t)->next_curr_id_cipher = \ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \ ICP_QAT_FW_COMN_CURR_ID_MASK) | \ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) & \ ICP_QAT_FW_COMN_NEXT_ID_MASK)) #define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \ (((cd_ctrl_hdr_t)->next_curr_id_cipher) & ICP_QAT_FW_COMN_CURR_ID_MASK) #define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \ (cd_ctrl_hdr_t)->next_curr_id_cipher = \ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \ ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ ((val)&ICP_QAT_FW_COMN_CURR_ID_MASK)) /** Authentication fields within Cipher + Authentication structure */ #define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \ ((((cd_ctrl_hdr_t)->next_curr_id_auth) & \ ICP_QAT_FW_COMN_NEXT_ID_MASK) >> \ (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) #define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ (cd_ctrl_hdr_t)->next_curr_id_auth = \ ((((cd_ctrl_hdr_t)->next_curr_id_auth) & \ ICP_QAT_FW_COMN_CURR_ID_MASK) | \ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) & \ ICP_QAT_FW_COMN_NEXT_ID_MASK)) #define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \ (((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_CURR_ID_MASK) #define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \ (cd_ctrl_hdr_t)->next_curr_id_auth = \ ((((cd_ctrl_hdr_t)->next_curr_id_auth) & \ ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ ((val)&ICP_QAT_FW_COMN_CURR_ID_MASK)) /* Definitions of the bits in the test_status_info of the TRNG_TEST response. * The values returned by the Lookaside service are given below * The Test result and Test Fail Count values are only valid if the Test * Results Valid (Tv) is set. * * TRNG Test Status Info * + ===== + ------------------------------------------------ + --- + --- + * | Bit | 31 - 2 | 1 | 0 | * + ===== + ------------------------------------------------ + --- + --- + * | Flags | RESERVED = 0 | Tv | Ts | * + ===== + ------------------------------------------------------------ + */ /****************************************************************************** * @ingroup icp_qat_fw_la * Definition of the Lookaside TRNG Test Status Information received as * a part of icp_qat_fw_la_trng_test_result_t * *****************************************************************************/ #define QAT_FW_LA_TRNG_TEST_STATUS_TS_BITPOS 0 /**< @ingroup icp_qat_fw_la * TRNG Test Result t_status field bit pos definition.*/ #define QAT_FW_LA_TRNG_TEST_STATUS_TS_MASK 0x1 /**< @ingroup icp_qat_fw_la * TRNG Test Result t_status field mask definition.*/ #define QAT_FW_LA_TRNG_TEST_STATUS_TV_BITPOS 1 /**< @ingroup icp_qat_fw_la * TRNG Test Result test results valid field bit pos definition.*/ #define QAT_FW_LA_TRNG_TEST_STATUS_TV_MASK 0x1 /**< @ingroup icp_qat_fw_la * TRNG Test Result test results valid field mask definition.*/ /****************************************************************************** * @ingroup icp_qat_fw_la * Definition of the Lookaside TRNG test_status values. * * *****************************************************************************/ #define QAT_FW_LA_TRNG_TEST_STATUS_TV_VALID 1 /**< @ingroup icp_qat_fw_la * TRNG TEST Response Test Results Valid Value.*/ #define QAT_FW_LA_TRNG_TEST_STATUS_TV_NOT_VALID 0 /**< @ingroup icp_qat_fw_la * TRNG TEST Response Test Results are NOT Valid Value.*/ #define QAT_FW_LA_TRNG_TEST_STATUS_TS_NO_FAILS 1 /**< @ingroup icp_qat_fw_la * Value for TRNG Test status tests have NO FAILs Value.*/ #define QAT_FW_LA_TRNG_TEST_STATUS_TS_HAS_FAILS 0 /**< @ingroup icp_qat_fw_la * Value for TRNG Test status tests have one or more FAILS Value.*/ /** ****************************************************************************** * @ingroup icp_qat_fw_la * * @description * Macro for extraction of the Test Status Field returned in the response * to TRNG TEST command. * * @param test_status 8 bit test_status value to extract the status bit * *****************************************************************************/ #define ICP_QAT_FW_LA_TRNG_TEST_STATUS_TS_FLD_GET(test_status) \ QAT_FIELD_GET(test_status, \ QAT_FW_LA_TRNG_TEST_STATUS_TS_BITPOS, \ QAT_FW_LA_TRNG_TEST_STATUS_TS_MASK) /** ****************************************************************************** * @ingroup icp_qat_fw_la * * @description * Macro for extraction of the Test Results Valid Field returned in the * response to TRNG TEST command. * * @param test_status 8 bit test_status value to extract the Tests * Results valid bit * *****************************************************************************/ #define ICP_QAT_FW_LA_TRNG_TEST_STATUS_TV_FLD_GET(test_status) \ QAT_FIELD_GET(test_status, \ QAT_FW_LA_TRNG_TEST_STATUS_TV_BITPOS, \ QAT_FW_LA_TRNG_TEST_STATUS_TV_MASK) /* ****************************************************************************** * MGF Max supported input parameters ****************************************************************************** */ #define ICP_QAT_FW_LA_MGF_SEED_LEN_MAX 255 /**< @ingroup icp_qat_fw_la * Maximum seed length for MGF1 request in bytes * Typical values may be 48, 64, 128 bytes (or any).*/ #define ICP_QAT_FW_LA_MGF_MASK_LEN_MAX 65528 /**< @ingroup icp_qat_fw_la * Maximum mask length for MGF1 request in bytes * Typical values may be 8 (64-bit), 16 (128-bit). MUST be quad word multiple */ /* ****************************************************************************** * SSL Max supported input parameters ****************************************************************************** */ #define ICP_QAT_FW_LA_SSL_SECRET_LEN_MAX 512 /**< @ingroup icp_qat_fw_la * Maximum secret length for SSL3 Key Gen request (bytes) */ #define ICP_QAT_FW_LA_SSL_ITERATES_LEN_MAX 16 /**< @ingroup icp_qat_fw_la * Maximum iterations for SSL3 Key Gen request (integer) */ #define ICP_QAT_FW_LA_SSL_LABEL_LEN_MAX 136 /**< @ingroup icp_qat_fw_la * Maximum label length for SSL3 Key Gen request (bytes) */ #define ICP_QAT_FW_LA_SSL_SEED_LEN_MAX 64 /**< @ingroup icp_qat_fw_la * Maximum seed length for SSL3 Key Gen request (bytes) */ #define ICP_QAT_FW_LA_SSL_OUTPUT_LEN_MAX 248 /**< @ingroup icp_qat_fw_la * Maximum output length for SSL3 Key Gen request (bytes) */ /* ****************************************************************************** * TLS Max supported input parameters ****************************************************************************** */ #define ICP_QAT_FW_LA_TLS_SECRET_LEN_MAX 128 /**< @ingroup icp_qat_fw_la * Maximum secret length for TLS Key Gen request (bytes) */ #define ICP_QAT_FW_LA_TLS_V1_1_SECRET_LEN_MAX 128 /**< @ingroup icp_qat_fw_la * Maximum secret length for TLS Key Gen request (bytes) */ #define ICP_QAT_FW_LA_TLS_V1_2_SECRET_LEN_MAX 64 /**< @ingroup icp_qat_fw_la * Maximum secret length for TLS Key Gen request (bytes) */ #define ICP_QAT_FW_LA_TLS_LABEL_LEN_MAX 255 /**< @ingroup icp_qat_fw_la * Maximum label length for TLS Key Gen request (bytes) */ #define ICP_QAT_FW_LA_TLS_SEED_LEN_MAX 64 /**< @ingroup icp_qat_fw_la * Maximum seed length for TLS Key Gen request (bytes) */ #define ICP_QAT_FW_LA_TLS_OUTPUT_LEN_MAX 248 /**< @ingroup icp_qat_fw_la * Maximum output length for TLS Key Gen request (bytes) */ /* ****************************************************************************** * HKDF input parameters ****************************************************************************** */ #define QAT_FW_HKDF_LABEL_BUFFER_SZ 78 #define QAT_FW_HKDF_LABEL_LEN_SZ 1 #define QAT_FW_HKDF_LABEL_FLAGS_SZ 1 #define QAT_FW_HKDF_LABEL_STRUCT_SZ \ (QAT_FW_HKDF_LABEL_BUFFER_SZ + QAT_FW_HKDF_LABEL_LEN_SZ + \ QAT_FW_HKDF_LABEL_FLAGS_SZ) /** ***************************************************************************** * @ingroup icp_qat_fw_la * * @description * Wraps an RFC 8446 HkdfLabel with metadata for use in HKDF Expand-Label * operations. * *****************************************************************************/ struct icp_qat_fw_hkdf_label { uint8_t label[QAT_FW_HKDF_LABEL_BUFFER_SZ]; /**< Buffer containing an HkdfLabel as specified in RFC 8446 */ uint8_t label_length; /**< The size of the HkdfLabel */ union { uint8_t label_flags; /**< For first-level labels: each bit in [0..3] will trigger a - * child - * Expand-Label operation on the corresponding sublabel. Bits - * [4..7] - * are reserved. + * child Expand-Label operation on the corresponding sublabel. + * Bits [4..7] are reserved. */ uint8_t sublabel_flags; /**< For sublabels the following flags are defined: * - QAT_FW_HKDF_INNER_SUBLABEL_12_BYTE_OKM_BITPOS * - QAT_FW_HKDF_INNER_SUBLABEL_16_BYTE_OKM_BITPOS * - QAT_FW_HKDF_INNER_SUBLABEL_32_BYTE_OKM_BITPOS */ } u; }; #define ICP_QAT_FW_LA_HKDF_SECRET_LEN_MAX 64 /**< Maximum secret length for HKDF request (bytes) */ #define ICP_QAT_FW_LA_HKDF_IKM_LEN_MAX 64 /**< Maximum IKM length for HKDF request (bytes) */ #define QAT_FW_HKDF_MAX_LABELS 4 /**< Maximum number of label structures allowed in the labels buffer */ #define QAT_FW_HKDF_MAX_SUBLABELS 4 /**< Maximum number of label structures allowed in the sublabels buffer */ /* ****************************************************************************** * HKDF inner sublabel flags ****************************************************************************** */ #define QAT_FW_HKDF_INNER_SUBLABEL_12_BYTE_OKM_BITPOS 0 /**< Limit sublabel expand output to 12 bytes -- used with the "iv" sublabel */ #define QAT_FW_HKDF_INNER_SUBLABEL_16_BYTE_OKM_BITPOS 1 /**< Limit sublabel expand output to 16 bytes -- used with SHA-256 "key" */ #define QAT_FW_HKDF_INNER_SUBLABEL_32_BYTE_OKM_BITPOS 2 /**< Limit sublabel expand output to 32 bytes -- used with SHA-384 "key" */ #endif /* _ICP_QAT_FW_LA_H_ */ diff --git a/sys/dev/qat/qat_api/firmware/include/icp_qat_hw.h b/sys/dev/qat/qat_api/firmware/include/icp_qat_hw.h index 5ed607aa3c3b..629b6a3d675c 100644 --- a/sys/dev/qat/qat_api/firmware/include/icp_qat_hw.h +++ b/sys/dev/qat/qat_api/firmware/include/icp_qat_hw.h @@ -1,1552 +1,1619 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** ***************************************************************************** * @file icp_qat_hw.h * @defgroup icp_qat_hw_defs ICP QAT HW definitions * @ingroup icp_qat_hw * @description * This file documents definitions for the QAT HW * *****************************************************************************/ #ifndef _ICP_QAT_HW_H_ #define _ICP_QAT_HW_H_ /* ****************************************************************************** * Include public/global header files ****************************************************************************** */ /* ========================================================================= */ /* AccelerationEngine */ /* ========================================================================= */ typedef enum { - ICP_QAT_HW_AE_0 = 0, /*!< ID of AE0 */ - ICP_QAT_HW_AE_1 = 1, /*!< ID of AE1 */ - ICP_QAT_HW_AE_2 = 2, /*!< ID of AE2 */ - ICP_QAT_HW_AE_3 = 3, /*!< ID of AE3 */ - ICP_QAT_HW_AE_4 = 4, /*!< ID of AE4 */ - ICP_QAT_HW_AE_5 = 5, /*!< ID of AE5 */ - ICP_QAT_HW_AE_6 = 6, /*!< ID of AE6 */ - ICP_QAT_HW_AE_7 = 7, /*!< ID of AE7 */ - ICP_QAT_HW_AE_8 = 8, /*!< ID of AE8 */ - ICP_QAT_HW_AE_9 = 9, /*!< ID of AE9 */ - ICP_QAT_HW_AE_10 = 10, /*!< ID of AE10 */ - ICP_QAT_HW_AE_11 = 11, /*!< ID of AE11 */ - ICP_QAT_HW_AE_12 = 12, /*!< ID of AE12 */ - ICP_QAT_HW_AE_13 = 13, /*!< ID of AE13 */ - ICP_QAT_HW_AE_14 = 14, /*!< ID of AE14 */ - ICP_QAT_HW_AE_15 = 15, /*!< ID of AE15 */ + ICP_QAT_HW_AE_0 = 0, /*!< ID of AE0 */ + ICP_QAT_HW_AE_1 = 1, /*!< ID of AE1 */ + ICP_QAT_HW_AE_2 = 2, /*!< ID of AE2 */ + ICP_QAT_HW_AE_3 = 3, /*!< ID of AE3 */ + ICP_QAT_HW_AE_4 = 4, /*!< ID of AE4 */ + ICP_QAT_HW_AE_5 = 5, /*!< ID of AE5 */ + ICP_QAT_HW_AE_6 = 6, /*!< ID of AE6 */ + ICP_QAT_HW_AE_7 = 7, /*!< ID of AE7 */ + ICP_QAT_HW_AE_8 = 8, /*!< ID of AE8 */ + ICP_QAT_HW_AE_9 = 9, /*!< ID of AE9 */ + ICP_QAT_HW_AE_10 = 10, /*!< ID of AE10 */ + ICP_QAT_HW_AE_11 = 11, /*!< ID of AE11 */ + ICP_QAT_HW_AE_12 = 12, /*!< ID of AE12 */ + ICP_QAT_HW_AE_13 = 13, /*!< ID of AE13 */ + ICP_QAT_HW_AE_14 = 14, /*!< ID of AE14 */ + ICP_QAT_HW_AE_15 = 15, /*!< ID of AE15 */ ICP_QAT_HW_AE_DELIMITER = 16 /**< Delimiter type */ } icp_qat_hw_ae_id_t; /* ========================================================================= */ /* QAT */ /* ========================================================================= */ typedef enum { - ICP_QAT_HW_QAT_0 = 0, /*!< ID of QAT0 */ - ICP_QAT_HW_QAT_1 = 1, /*!< ID of QAT1 */ - ICP_QAT_HW_QAT_2 = 2, /*!< ID of QAT2 */ - ICP_QAT_HW_QAT_3 = 3, /*!< ID of QAT3 */ - ICP_QAT_HW_QAT_4 = 4, /*!< ID of QAT4 */ - ICP_QAT_HW_QAT_5 = 5, /*!< ID of QAT5 */ + ICP_QAT_HW_QAT_0 = 0, /*!< ID of QAT0 */ + ICP_QAT_HW_QAT_1 = 1, /*!< ID of QAT1 */ + ICP_QAT_HW_QAT_2 = 2, /*!< ID of QAT2 */ + ICP_QAT_HW_QAT_3 = 3, /*!< ID of QAT3 */ + ICP_QAT_HW_QAT_4 = 4, /*!< ID of QAT4 */ + ICP_QAT_HW_QAT_5 = 5, /*!< ID of QAT5 */ ICP_QAT_HW_QAT_DELIMITER = 6 /**< Delimiter type */ } icp_qat_hw_qat_id_t; /* ========================================================================= */ /* AUTH SLICE */ /* ========================================================================= */ /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Supported Authentication Algorithm types * @description * Enumeration which is used to define the authenticate algorithms * *****************************************************************************/ typedef enum { ICP_QAT_HW_AUTH_ALGO_NULL = 0, /*!< Null hashing */ ICP_QAT_HW_AUTH_ALGO_SHA1 = 1, /*!< SHA1 hashing */ ICP_QAT_HW_AUTH_ALGO_MD5 = 2, /*!< MD5 hashing */ ICP_QAT_HW_AUTH_ALGO_SHA224 = 3, /*!< SHA-224 hashing */ ICP_QAT_HW_AUTH_ALGO_SHA256 = 4, /*!< SHA-256 hashing */ ICP_QAT_HW_AUTH_ALGO_SHA384 = 5, /*!< SHA-384 hashing */ ICP_QAT_HW_AUTH_ALGO_SHA512 = 6, /*!< SHA-512 hashing */ - ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7, /*!< AES-XCBC-MAC hashing */ - ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8, /*!< AES-CBC-MAC hashing */ + ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7, /*!< AES-XCBC-MAC hashing */ + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8, /*!< AES-CBC-MAC hashing */ ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9, /*!< AES F9 hashing */ - ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10, /*!< Galois 128 bit hashing */ - ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11, /*!< Galois 64 hashing */ - ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12, /*!< Kasumi F9 hashing */ + ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10, /*!< Galois 128 bit hashing */ + ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11, /*!< Galois 64 hashing */ + ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12, /*!< Kasumi F9 hashing */ ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13, /*!< UIA2/SNOW_3G F9 hashing */ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = - 14, /*!< 128_EIA3/ZUC_3G hashing */ - ICP_QAT_HW_AUTH_ALGO_SM3 = 15, /*!< SM3 hashing */ - ICP_QAT_HW_AUTH_ALGO_SHA3_224 = 16, /*!< SHA3-224 hashing */ - ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17, /*!< SHA3-256 hashing */ - ICP_QAT_HW_AUTH_ALGO_SHA3_384 = 18, /*!< SHA3-384 hashing */ - ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19, /*!< SHA3-512 hashing */ - ICP_QAT_HW_AUTH_ALGO_SHAKE_128 = 20, /*!< SHAKE-128 hashing */ - ICP_QAT_HW_AUTH_ALGO_SHAKE_256 = 21, /*!< SHAKE-256 hashing */ - ICP_QAT_HW_AUTH_ALGO_POLY = 22, /*!< POLY hashing */ - ICP_QAT_HW_AUTH_ALGO_DELIMITER = 23 /**< Delimiter type */ + 14, /*!< 128_EIA3/ZUC_3G hashing */ + ICP_QAT_HW_AUTH_ALGO_SM3 = 15, /*!< SM3 hashing */ + ICP_QAT_HW_AUTH_ALGO_SHA3_224 = 16, /*!< SHA3-224 hashing */ + ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17, /*!< SHA3-256 hashing */ + ICP_QAT_HW_AUTH_ALGO_SHA3_384 = 18, /*!< SHA3-384 hashing */ + ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19, /*!< SHA3-512 hashing */ + ICP_QAT_HW_AUTH_RESERVED_4 = 20, /*!< Reserved */ + ICP_QAT_HW_AUTH_RESERVED_5 = 21, /*!< Reserved */ + ICP_QAT_HW_AUTH_ALGO_POLY = 22, /*!< POLY hashing */ + ICP_QAT_HW_AUTH_ALGO_DELIMITER = 23 /**< Delimiter type */ } icp_qat_hw_auth_algo_t; /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Definition of the supported Authentication modes * @description * Enumeration which is used to define the authentication slice modes. * The concept of modes is very specific to the QAT implementation. Its * main use is differentiate how the algorithms are used i.e. mode0 SHA1 * will configure the QAT Auth Slice to do plain SHA1 hashing while mode1 * configures it to do SHA1 HMAC with precomputes and mode2 sets up the * slice to do SHA1 HMAC with no precomputes (uses key directly) * * @Note * Only some algorithms are valid in some of the modes. If you dont know * what you are doing then refer back to the HW documentation * *****************************************************************************/ typedef enum { - ICP_QAT_HW_AUTH_MODE0 = 0, /*!< QAT Auth Mode0 configuration */ - ICP_QAT_HW_AUTH_MODE1 = 1, /*!< QAT Auth Mode1 configuration */ - ICP_QAT_HW_AUTH_MODE2 = 2, /*!< QAT AuthMode2 configuration */ + ICP_QAT_HW_AUTH_MODE0 = 0, /*!< QAT Auth Mode0 configuration */ + ICP_QAT_HW_AUTH_MODE1 = 1, /*!< QAT Auth Mode1 configuration */ + ICP_QAT_HW_AUTH_MODE2 = 2, /*!< QAT AuthMode2 configuration */ ICP_QAT_HW_AUTH_MODE_DELIMITER = 3 /**< Delimiter type */ } icp_qat_hw_auth_mode_t; /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Auth configuration structure * * @description * Definition of the format of the authentication slice configuration * *****************************************************************************/ typedef struct icp_qat_hw_auth_config_s { uint32_t config; /**< Configuration used for setting up the slice */ uint32_t reserved; /**< Reserved */ } icp_qat_hw_auth_config_t; /* Private defines */ /* Note: Bit positions have been defined for little endian ordering */ /* * AUTH CONFIG WORD BITMAP * + ===== + ------ + ------ + ------- + ------ + ------ + ----- + ----- + ------ + ------ + ---- + ----- + ----- + ----- + * | Bit | 63:56 | 55:52 | 51:48 | 47:32 | 31:24 | 23:22 | 21:18 | 17 | 16 | 15 | 14:8 | 7:4 | 3:0 | * + ===== + ------ + ------ + ------- + ------ + ------ + ----- + ----- + ------ + ------ + ---- + ----- + ------+ ----- + * | Usage | Prog | Resvd | Prog | Resvd | Resvd | Algo | Rsvrd | SHA3 | SHA3 |Rsvrd | Cmp | Mode | Algo | * | |padding | Bits=0 | padding | Bits=0 | Bits=0 | SHA3 | |Padding |Padding | | | | | * | | SHA3 | | SHA3 | | | | |Override|Disable | | | | | * | |(prefix)| |(postfix)| | | | | | | | | | | * + ===== + ------ + ------ + ------- + ------ + ------ + ----- + ----- + ------ + ------ + ---- + ----- + ----- + ------+ */ /**< Flag mask & bit position */ #define QAT_AUTH_MODE_BITPOS 4 /**< @ingroup icp_qat_hw_defs * Starting bit position indicating the Auth mode */ #define QAT_AUTH_MODE_MASK 0xF /**< @ingroup icp_qat_hw_defs * Four bit mask used for determing the Auth mode */ #define QAT_AUTH_ALGO_BITPOS 0 /**< @ingroup icp_qat_hw_defs * Starting bit position indicating the Auth Algo */ #define QAT_AUTH_ALGO_MASK 0xF /**< @ingroup icp_qat_hw_defs * Four bit mask used for determining the Auth algo */ #define QAT_AUTH_CMP_BITPOS 8 /**< @ingroup icp_qat_hw_defs * Starting bit position indicating the Auth Compare */ #define QAT_AUTH_CMP_MASK 0x7F /**< @ingroup icp_qat_hw_defs * Seven bit mask used to determine the Auth Compare */ #define QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS 16 /**< @ingroup icp_qat_hw_defs * Starting bit position indicating the Auth h/w * padding disable for SHA3. * Flag set to 0 => h/w is required to pad (default) * Flag set to 1 => No padding in h/w */ #define QAT_AUTH_SHA3_PADDING_DISABLE_MASK 0x1 /**< @ingroup icp_qat_hw_defs * Single bit mask used to determine the Auth h/w * padding disable for SHA3. */ #define QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS 17 /**< @ingroup icp_qat_hw_defs * Starting bit position indicating the Auth h/w * padding override for SHA3. * Flag set to 0 => default padding behaviour * implemented in SHA3-256 slice will take effect * (default hardware setting upon h/w reset) * Flag set to 1 => SHA3-core will not use the padding * sequence built into the SHA3 core. Instead, the * padding sequence specified in bits 48-51 and 56-63 * of the 64-bit auth config word will apply * (corresponds with EAS bits 32-43). */ #define QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK 0x1 /**< @ingroup icp_qat_hw_defs * Single bit mask used to determine the Auth h/w * padding override for SHA3. */ #define QAT_AUTH_ALGO_SHA3_BITPOS 22 /**< @ingroup icp_qat_hw_defs * Starting bit position for indicating the * SHA3 Auth Algo */ #define QAT_AUTH_ALGO_SHA3_MASK 0x3 /**< @ingroup icp_qat_hw_defs * Two bit mask used for determining the * SHA3 Auth algo */ /**< Flag mask & bit position */ #define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS 16 /**< @ingroup icp_qat_hw_defs * Starting bit position indicating the SHA3 * flexible programmable padding postfix. * Note that these bits are set using macro * ICP_QAT_HW_AUTH_CONFIG_BUILD_UPPER and are * defined relative to the 32-bit value that * this macro returns. In effect, therefore, this * defines starting bit position 48 within the * 64-bit auth config word. */ #define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK 0xF /**< @ingroup icp_qat_hw_defs * Four-bit mask used to determine the SHA3 * flexible programmable padding postfix */ #define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS 24 /**< @ingroup icp_qat_hw_defs * Starting bit position indicating the SHA3 * flexible programmable padding prefix * Note that these bits are set using macro * ICP_QAT_HW_AUTH_CONFIG_BUILD_UPPER and are * defined relative to the 32-bit value that * this macro returns. In effect, therefore, this * defines starting bit position 56 within the * 64-bit auth config word. */ #define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK 0xFF /**< @ingroup icp_qat_hw_defs * Eight-bit mask used to determine the SHA3 * flexible programmable padding prefix */ /**< Flag usage - see additional notes @description for * ICP_QAT_HW_AUTH_CONFIG_BUILD and * ICP_QAT_HW_AUTH_CONFIG_BUILD_UPPER macros. -*/ + */ #define QAT_AUTH_SHA3_HW_PADDING_ENABLE 0 /**< @ingroup icp_qat_hw_defs * This setting enables h/w padding for SHA3. */ #define QAT_AUTH_SHA3_HW_PADDING_DISABLE 1 /**< @ingroup icp_qat_hw_defs * This setting disables h/w padding for SHA3. */ #define QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT 0 /**< @ingroup icp_qat_hw_defs * Default value for the Auth h/w padding disable. * If set to 0 for SHA3-256, h/w padding is enabled. * Padding_Disable is undefined for all non-SHA3-256 * algos and is consequently set to the default of 0. */ #define QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT 0 /**< @ingroup icp_qat_hw_defs * Value for the Auth h/w padding override for SHA3. * Flag set to 0 => default padding behaviour * implemented in SHA3-256 slice will take effect * (default hardware setting upon h/w reset) * For this setting of the override flag, all the * bits of the padding sequence specified * in bits 48-51 and 56-63 of the 64-bit * auth config word are set to 0 (reserved). */ #define QAT_AUTH_SHA3_PADDING_OVERRIDE_PROGRAMMABLE 1 /**< @ingroup icp_qat_hw_defs * Value for the Auth h/w padding override for SHA3. * Flag set to 1 => SHA3-core will not use the padding * sequence built into the SHA3 core. Instead, the * padding sequence specified in bits 48-51 and 56-63 * of the 64-bit auth config word will apply * (corresponds with EAS bits 32-43). */ #define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED 0 /**< @ingroup icp_qat_hw_defs * All the bits of the padding sequence specified in * bits 48-51 of the 64-bit auth config word are set * to 0 (reserved) if the padding override bit is set * to 0, indicating default padding. */ #define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED 0 /**< @ingroup icp_qat_hw_defs * All the bits of the padding sequence specified in * bits 56-63 of the 64-bit auth config word are set * to 0 (reserved) if the padding override bit is set * to 0, indicating default padding. */ /** *************************************************************************************** * @ingroup icp_qat_hw_defs * * @description * The derived configuration word for the auth slice is based on the inputs * of mode, algorithm type and compare length. The total size of the auth * config word in the setup block is 64 bits however the size of the value * returned by this macro is assumed to be only 32 bits (for now) and sets * the lower 32 bits of the auth config word. Unfortunately, changing the * size of the returned value to 64 bits will also require changes to the * shared RAM constants table so the macro size will remain at 32 bits. * This means that the padding sequence bits specified in bits 48-51 and * 56-63 of the 64-bit auth config word are NOT included in the * ICP_QAT_HW_AUTH_CONFIG_BUILD macro and are defined in a * separate macro, namely, ICP_QAT_HW_AUTH_CONFIG_BUILD_UPPER. * * For the digest generation case the compare length is a don't care value. * Furthermore, if the client will be doing the digest validation, the * compare_length will not be used. * The padding and padding override bits for SHA3 are set internally * by the macro. * Padding_Disable is set it to 0 for SHA3-256 algo only i.e. we want to * enable this to provide the ability to test with h/w padding enabled. * Padding_Disable has no meaning for all non-SHA3-256 algos and is * consequently set the default of 0. * Padding Override is set to 0, implying that the padding behaviour * implemented in the SHA3-256 slice will take effect (default hardware * setting upon h/w reset). * This flag has no meaning for other algos, so is also set to the default * for non-SHA3-256 algos. * * @param mode Authentication mode to use * @param algo Auth Algorithm to use * @param cmp_len The length of the digest if the QAT is to the check * ****************************************************************************************/ #define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \ ((((mode)&QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \ (((algo)&QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \ (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) \ << QAT_AUTH_ALGO_SHA3_BITPOS) | \ (((QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT)&QAT_AUTH_SHA3_PADDING_DISABLE_MASK) \ << QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS) | \ (((QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT)&QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK) \ << QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS) | \ (((cmp_len)&QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS)) /** *************************************************************************************** * @ingroup icp_qat_hw_defs * * @description * This macro sets the upper 32 bits of the 64-bit auth config word. * The sequence bits specified in bits 48-51 and 56-63 of the 64-bit auth * config word are included in this macro, which is therefore assumed to * return a 32-bit value. * Note that the Padding Override bit is set in macro * ICP_QAT_HW_AUTH_CONFIG_BUILD. * Since the Padding Override is set to 0 regardless, for now, all the bits * of the padding sequence specified in bits 48-51 and 56-63 of the 64-bit * auth config word are set to 0 (reserved). Note that the bit positions of * the padding sequence bits are defined relative to the 32-bit value that * this macro returns. * ****************************************************************************************/ #define ICP_QAT_HW_AUTH_CONFIG_BUILD_UPPER \ ((((QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED)&QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK) \ << QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS) | \ (((QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED)&QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK) \ << QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS)) /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Auth Counter structure * * @description * 32 bit counter that tracks the number of data bytes passed through * the slice. This is used by the padding logic for some algorithms. Note * only the upper 32 bits are set. * *****************************************************************************/ typedef struct icp_qat_hw_auth_counter_s { uint32_t counter; /**< Counter value */ uint32_t reserved; /**< Reserved */ } icp_qat_hw_auth_counter_t; /* Private defines */ #define QAT_AUTH_COUNT_MASK 0xFFFFFFFF /**< @ingroup icp_qat_hw_defs * Thirty two bit mask used for determining the Auth count */ #define QAT_AUTH_COUNT_BITPOS 0 /**< @ingroup icp_qat_hw_defs * Starting bit position indicating the Auth count. */ /** ****************************************************************************** * @ingroup icp_qat_hw_defs * * @description * Macro to build the auth counter quad word * * @param val Counter value to set * *****************************************************************************/ #define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \ (((val)&QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS) /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Definition of the common auth parameters * @description * This part of the configuration is constant for each service * *****************************************************************************/ typedef struct icp_qat_hw_auth_setup_s { icp_qat_hw_auth_config_t auth_config; /**< Configuration word for the auth slice */ icp_qat_hw_auth_counter_t auth_counter; /**< Auth counter value for this request */ } icp_qat_hw_auth_setup_t; /* ************************************************************************* */ /* ************************************************************************* */ #define QAT_HW_DEFAULT_ALIGNMENT 8 #define QAT_HW_ROUND_UP(val, n) (((val) + ((n)-1)) & (~(n - 1))) /* State1 */ -#define ICP_QAT_HW_NULL_STATE1_SZ 64 +#define ICP_QAT_HW_NULL_STATE1_SZ 32 /**< @ingroup icp_qat_hw_defs * State1 block size for NULL hashing */ #define ICP_QAT_HW_MD5_STATE1_SZ 16 /**< @ingroup icp_qat_hw_defs * State1 block size for MD5 */ #define ICP_QAT_HW_SHA1_STATE1_SZ 20 /**< @ingroup icp_qat_hw_defs * Define the state1 block size for SHA1 - Note that for the QAT HW the state * is rounded to the nearest 8 byte multiple */ #define ICP_QAT_HW_SHA224_STATE1_SZ 32 /**< @ingroup icp_qat_hw_defs * State1 block size for SHA24 */ -#define ICP_QAT_HW_SHA3_224_STATE1_SZ 32 +#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28 /**< @ingroup icp_qat_hw_defs * State1 block size for SHA3_224 */ #define ICP_QAT_HW_SHA256_STATE1_SZ 32 /**< @ingroup icp_qat_hw_defs * State1 block size for SHA256 */ #define ICP_QAT_HW_SHA3_256_STATE1_SZ 32 /**< @ingroup icp_qat_hw_defs * State1 block size for SHA3_256 */ #define ICP_QAT_HW_SHA384_STATE1_SZ 64 /**< @ingroup icp_qat_hw_defs * State1 block size for SHA384 */ -#define ICP_QAT_HW_SHA3_384_STATE1_SZ 64 +#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48 /**< @ingroup icp_qat_hw_defs * State1 block size for SHA3_384 */ #define ICP_QAT_HW_SHA512_STATE1_SZ 64 /**< @ingroup icp_qat_hw_defs * State1 block size for SHA512 */ #define ICP_QAT_HW_SHA3_512_STATE1_SZ 64 /**< @ingroup icp_qat_hw_defs * State1 block size for SHA3_512 */ #define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16 /**< @ingroup icp_qat_hw_defs * State1 block size for XCBC */ #define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16 /**< @ingroup icp_qat_hw_defs * State1 block size for CBC */ #define ICP_QAT_HW_AES_F9_STATE1_SZ 32 /**< @ingroup icp_qat_hw_defs * State1 block size for AES F9 */ #define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16 /**< @ingroup icp_qat_hw_defs * State1 block size for Kasumi F9 */ #define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16 /**< @ingroup icp_qat_hw_defs * State1 block size for Galois128 */ #define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8 /**< @ingroup icp_cpm_hw_defs * State1 block size for UIA2 */ #define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8 /**< @ingroup icp_cpm_hw_defs * State1 block size for EIA3 */ +#define ICP_QAT_HW_SM3_STATE1_SZ 32 +/**< @ingroup icp_qat_hw_defs + * State1 block size for SM3 */ #define ICP_QAT_HW_SHA3_STATEFUL_STATE1_SZ 200 /** <@ingroup icp_cpm_hw_defs * State1 block size for stateful SHA3 processing*/ -#define ICP_QAT_HW_SM3_STATE1_SZ 32 -/**< @ingroup icp_cpm_hw_defs - * State1 block size for SM3 */ /* State2 */ -#define ICP_QAT_HW_NULL_STATE2_SZ 64 +#define ICP_QAT_HW_NULL_STATE2_SZ 32 /**< @ingroup icp_qat_hw_defs * State2 block size for NULL hashing */ #define ICP_QAT_HW_MD5_STATE2_SZ 16 /**< @ingroup icp_qat_hw_defs * State2 block size for MD5 */ #define ICP_QAT_HW_SHA1_STATE2_SZ 20 /**< @ingroup icp_qat_hw_defs * State2 block size for SHA1 - Note that for the QAT HW the state is rounded * to the nearest 8 byte multiple */ #define ICP_QAT_HW_SHA224_STATE2_SZ 32 /**< @ingroup icp_qat_hw_defs * State2 block size for SHA224 */ -#define ICP_QAT_HW_SHA3_224_STATE2_SZ 32 +#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0 /**< @ingroup icp_qat_hw_defs * State2 block size for SHA3_224 */ #define ICP_QAT_HW_SHA256_STATE2_SZ 32 /**< @ingroup icp_qat_hw_defs * State2 block size for SHA256 */ -#define ICP_QAT_HW_SHA3_256_STATE2_SZ 32 +#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0 /**< @ingroup icp_qat_hw_defs * State2 block size for SHA3_256 */ #define ICP_QAT_HW_SHA384_STATE2_SZ 64 /**< @ingroup icp_qat_hw_defs * State2 block size for SHA384 */ -#define ICP_QAT_HW_SHA3_384_STATE2_SZ 64 +#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0 /**< @ingroup icp_qat_hw_defs * State2 block size for SHA3_384 */ #define ICP_QAT_HW_SHA512_STATE2_SZ 64 /**< @ingroup icp_qat_hw_defs * State2 block size for SHA512 */ -#define ICP_QAT_HW_SHA3_512_STATE2_SZ 64 +#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0 /**< @ingroup icp_qat_hw_defs * State2 block size for SHA3_512 */ #define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16 /**< @ingroup icp_qat_hw_defs * State2 block size for XCBC */ #define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16 /**< @ingroup icp_qat_hw_defs * State2 block size for CBC */ #define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16 /**< @ingroup icp_qat_hw_defs * State2 block size for AES Encrypted Counter 0 */ #define ICP_QAT_HW_F9_IK_SZ 16 /**< @ingroup icp_qat_hw_defs * State2 block size for F9 IK */ #define ICP_QAT_HW_F9_FK_SZ 16 /**< @ingroup icp_qat_hw_defs * State2 block size for F9 FK */ #define ICP_QAT_HW_KASUMI_F9_STATE2_SZ \ (ICP_QAT_HW_F9_IK_SZ + ICP_QAT_HW_F9_FK_SZ) /**< @ingroup icp_qat_hw_defs * State2 complete size for Kasumi F9 */ #define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ /**< @ingroup icp_qat_hw_defs * State2 complete size for AES F9 */ #define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24 /**< @ingroup icp_cpm_hw_defs * State2 block size for UIA2 */ #define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32 /**< @ingroup icp_cpm_hw_defs * State2 block size for EIA3 */ #define ICP_QAT_HW_GALOIS_H_SZ 16 /**< @ingroup icp_qat_hw_defs * State2 block size for Galois Multiplier H */ #define ICP_QAT_HW_GALOIS_LEN_A_SZ 8 /**< @ingroup icp_qat_hw_defs * State2 block size for Galois AAD length */ #define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16 /**< @ingroup icp_qat_hw_defs * State2 block size for Galois Encrypted Counter 0 */ #define ICP_QAT_HW_SM3_STATE2_SZ 32 /**< @ingroup icp_qat_hw_defs * State2 block size for SM3 */ +#define ICP_QAT_HW_SHA3_STATEFUL_STATE2_SZ 208 +/** <@ingroup icp_cpm_hw_defs + * State2 block size for stateful SHA3 processing*/ /* ************************************************************************* */ /* ************************************************************************* */ /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Definition of SHA512 auth algorithm processing struct * @description * This structs described the parameters to pass to the slice for * configuring it for SHA512 processing. This is the largest possible * setup block for authentication * *****************************************************************************/ typedef struct icp_qat_hw_auth_sha512_s { icp_qat_hw_auth_setup_t inner_setup; /**< Inner loop configuration word for the slice */ uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ]; /**< Slice state1 variable */ icp_qat_hw_auth_setup_t outer_setup; /**< Outer configuration word for the slice */ uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ]; /**< Slice state2 variable */ } icp_qat_hw_auth_sha512_t; /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Definition of SHA3_512 auth algorithm processing struct * @description * This structs described the parameters to pass to the slice for * configuring it for SHA3_512 processing. This is the largest possible * setup block for authentication * *****************************************************************************/ typedef struct icp_qat_hw_auth_sha3_512_s { icp_qat_hw_auth_setup_t inner_setup; /**< Inner loop configuration word for the slice */ uint8_t state1[ICP_QAT_HW_SHA3_512_STATE1_SZ]; /**< Slice state1 variable */ icp_qat_hw_auth_setup_t outer_setup; /**< Outer configuration word for the slice */ - /* State2 size is zero - this may change for future implementations */ - uint8_t state2[ICP_QAT_HW_SHA3_512_STATE2_SZ]; } icp_qat_hw_auth_sha3_512_t; +/** + ***************************************************************************** + * @ingroup icp_qat_hw_defs + * Definition of stateful SHA3 auth algorithm processing struct + * @description + * This structs described the parameters to pass to the slice for + * configuring it for stateful SHA3 processing. This is the largest + * possible setup block for authentication + * + *****************************************************************************/ +typedef struct icp_qat_hw_auth_sha3_stateful_s { + icp_qat_hw_auth_setup_t inner_setup; + /**< Inner loop configuration word for the slice */ + + uint8_t inner_state1[ICP_QAT_HW_SHA3_STATEFUL_STATE1_SZ]; + /**< Inner hash block */ + + icp_qat_hw_auth_setup_t outer_setup; + /**< Outer configuration word for the slice */ + + uint8_t outer_state1[ICP_QAT_HW_SHA3_STATEFUL_STATE1_SZ]; + /**< Outer hash block */ + +} icp_qat_hw_auth_sha3_stateful_t; + /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Supported hardware authentication algorithms * @description * Common grouping of the auth algorithm types supported by the QAT * *****************************************************************************/ typedef union icp_qat_hw_auth_algo_blk_u { icp_qat_hw_auth_sha512_t sha512; /**< SHA512 Hashing */ + icp_qat_hw_auth_sha3_stateful_t sha3_stateful; + /**< Stateful SHA3 Hashing */ } icp_qat_hw_auth_algo_blk_t; #define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0 /**< @ingroup icp_qat_hw_defs * Bit position of the 32 bit A value in the 64 bit A configuration sent to * the QAT */ #define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF /**< @ingroup icp_qat_hw_defs * Mask value for A value */ /* ========================================================================= */ /* CIPHER SLICE */ /* ========================================================================= */ /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Definition of the supported Cipher Algorithm types * @description * Enumeration used to define the cipher algorithms * *****************************************************************************/ typedef enum { ICP_QAT_HW_CIPHER_ALGO_NULL = 0, /*!< Null ciphering */ ICP_QAT_HW_CIPHER_ALGO_DES = 1, /*!< DES ciphering */ ICP_QAT_HW_CIPHER_ALGO_3DES = 2, /*!< 3DES ciphering */ - ICP_QAT_HW_CIPHER_ALGO_AES128 = 3, /*!< AES-128 ciphering */ - ICP_QAT_HW_CIPHER_ALGO_AES192 = 4, /*!< AES-192 ciphering */ - ICP_QAT_HW_CIPHER_ALGO_AES256 = 5, /*!< AES-256 ciphering */ + ICP_QAT_HW_CIPHER_ALGO_AES128 = 3, /*!< AES-128 ciphering */ + ICP_QAT_HW_CIPHER_ALGO_AES192 = 4, /*!< AES-192 ciphering */ + ICP_QAT_HW_CIPHER_ALGO_AES256 = 5, /*!< AES-256 ciphering */ ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6, /*!< ARC4 ciphering */ - ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7, /*!< Kasumi */ + ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7, /*!< Kasumi */ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8, /*!< Snow_3G */ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9, /*!< ZUC_3G */ ICP_QAT_HW_CIPHER_ALGO_SM4 = 10, /*!< SM4 ciphering */ ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305 = 11, /*!< CHACHA POLY SPC AEAD */ ICP_QAT_HW_CIPHER_DELIMITER = 12 /**< Delimiter type */ } icp_qat_hw_cipher_algo_t; /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Definition of the supported cipher modes of operation * @description * Enumeration used to define the cipher slice modes. * * @Note * Only some algorithms are valid in some of the modes. If you dont know * what you are doing then refer back to the EAS * *****************************************************************************/ typedef enum { ICP_QAT_HW_CIPHER_ECB_MODE = 0, /*!< ECB mode */ ICP_QAT_HW_CIPHER_CBC_MODE = 1, /*!< CBC more */ ICP_QAT_HW_CIPHER_CTR_MODE = 2, /*!< CTR mode */ ICP_QAT_HW_CIPHER_F8_MODE = 3, /*!< F8 mode */ ICP_QAT_HW_CIPHER_AEAD_MODE = 4, /*!< AES-GCM SPC AEAD mode */ - ICP_QAT_HW_CIPHER_RESERVED_MODE = 5, /*!< Reserved */ + ICP_QAT_HW_CIPHER_CCM_MODE = 5, /*!< AES-CCM SPC AEAD mode */ ICP_QAT_HW_CIPHER_XTS_MODE = 6, /*!< XTS mode */ ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7 /**< Delimiter type */ } icp_qat_hw_cipher_mode_t; /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Cipher Configuration Struct * * @description * Configuration data used for setting up the QAT Cipher Slice * *****************************************************************************/ typedef struct icp_qat_hw_cipher_config_s { uint32_t val; /**< Cipher slice configuration */ uint32_t reserved; /**< Reserved */ } icp_qat_hw_cipher_config_t; +/** + ***************************************************************************** + * @ingroup icp_qat_hw_defs + * Cipher Configuration Struct + * + * @description + * Configuration data used for setting up the QAT UCS Cipher Slice + * + *****************************************************************************/ +typedef struct icp_qat_hw_ucs_cipher_config_s { + uint32_t val; + /**< Cipher slice configuration */ + + uint32_t reserved[3]; + /**< Reserved */ +} icp_qat_hw_ucs_cipher_config_t; + /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Definition of the cipher direction * @description * Enumeration which is used to define the cipher direction to apply * *****************************************************************************/ typedef enum { /*!< Flag to indicate that encryption is required */ ICP_QAT_HW_CIPHER_ENCRYPT = 0, /*!< Flag to indicate that decryption is required */ ICP_QAT_HW_CIPHER_DECRYPT = 1, } icp_qat_hw_cipher_dir_t; /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Definition of the cipher key conversion modes * @description * Enumeration which is used to define if cipher key conversion is needed * *****************************************************************************/ typedef enum { /*!< Flag to indicate that no key convert is required */ ICP_QAT_HW_CIPHER_NO_CONVERT = 0, /*!< Flag to indicate that key conversion is required */ ICP_QAT_HW_CIPHER_KEY_CONVERT = 1, } icp_qat_hw_cipher_convert_t; /* Private defines */ /* Note: Bit positions have been arranged for little endian ordering */ #define QAT_CIPHER_MODE_BITPOS 4 /**< @ingroup icp_qat_hw_defs * Define for the cipher mode bit position */ #define QAT_CIPHER_MODE_MASK 0xF /**< @ingroup icp_qat_hw_defs * Define for the cipher mode mask (four bits) */ #define QAT_CIPHER_ALGO_BITPOS 0 /**< @ingroup icp_qat_hw_defs * Define for the cipher algo bit position */ #define QAT_CIPHER_ALGO_MASK 0xF /**< @ingroup icp_qat_hw_defs * Define for the cipher algo mask (four bits) */ #define QAT_CIPHER_CONVERT_BITPOS 9 /**< @ingroup icp_qat_hw_defs * Define the cipher convert key bit position */ #define QAT_CIPHER_CONVERT_MASK 0x1 /**< @ingroup icp_qat_hw_defs * Define for the cipher convert key mask (one bit)*/ #define QAT_CIPHER_DIR_BITPOS 8 /**< @ingroup icp_qat_hw_defs * Define for the cipher direction bit position */ #define QAT_CIPHER_DIR_MASK 0x1 /**< @ingroup icp_qat_hw_defs * Define for the cipher direction mask (one bit) */ #define QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK 0x1F /**< @ingroup icp_qat_hw_defs * Define for the cipher AEAD Hash compare length mask (5 bits)*/ #define QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS 10 /**< @ingroup icp_qat_hw_defs * Define for the cipher AEAD Hash compare length (5 bits)*/ #define QAT_CIPHER_AEAD_AAD_SIZE_LOWER_MASK 0xFF /**< @ingroup icp_qat_hw_defs * Define for the cipher AEAD AAD size lower byte mask */ #define QAT_CIPHER_AEAD_AAD_SIZE_UPPER_MASK 0x3F /**< @ingroup icp_qat_hw_defs * Define for the cipher AEAD AAD size upper 6 bits mask */ #define QAT_CIPHER_AEAD_AAD_UPPER_SHIFT 8 /**< @ingroup icp_qat_hw_defs * Define for the cipher AEAD AAD size Upper byte shift */ #define QAT_CIPHER_AEAD_AAD_LOWER_SHIFT 24 /**< @ingroup icp_qat_hw_defs * Define for the cipher AEAD AAD size Lower byte shift */ #define QAT_CIPHER_AEAD_AAD_SIZE_BITPOS 16 /**< @ingroup icp_qat_hw_defs * Define for the cipher AEAD AAD size (14 bits)*/ #define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2 /**< @ingroup icp_qat_hw_defs * Define for the cipher mode F8 key size */ #define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2 /**< @ingroup icp_qat_hw_defs * Define for the cipher XTS mode key size */ +#define QAT_CIPHER_MODE_UCS_XTS_KEY_SZ_MULT 1 +/**< @ingroup icp_qat_hw_defs + * Define for the UCS cipher XTS mode key size */ + /** ****************************************************************************** * @ingroup icp_qat_hw_defs * * @description * Build the cipher configuration field * * @param mode Cipher Mode to use * @param algo Cipher Algorithm to use * @param convert Specify if the key is to be converted * @param dir Specify the cipher direction either encrypt or decrypt * *****************************************************************************/ #define ICP_QAT_HW_CIPHER_CONFIG_BUILD( \ mode, algo, convert, dir, aead_hash_cmp_len) \ ((((mode)&QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \ (((algo)&QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \ (((convert)&QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \ (((dir)&QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS) | \ (((aead_hash_cmp_len)&QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK) \ << QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS)) /** ****************************************************************************** * @ingroup icp_qat_hw_defs * * @description * Build the second QW of cipher slice config * * @param aad_size Specify the size of associated authentication data * for AEAD processing * ******************************************************************************/ #define ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(aad_size) \ (((((aad_size) >> QAT_CIPHER_AEAD_AAD_UPPER_SHIFT) & \ QAT_CIPHER_AEAD_AAD_SIZE_UPPER_MASK) \ << QAT_CIPHER_AEAD_AAD_SIZE_BITPOS) | \ (((aad_size)&QAT_CIPHER_AEAD_AAD_SIZE_LOWER_MASK) \ << QAT_CIPHER_AEAD_AAD_LOWER_SHIFT)) #define ICP_QAT_HW_DES_BLK_SZ 8 /**< @ingroup icp_qat_hw_defs * Define the block size for DES. * This used as either the size of the IV or CTR input value */ #define ICP_QAT_HW_3DES_BLK_SZ 8 /**< @ingroup icp_qat_hw_defs * Define the processing block size for 3DES */ #define ICP_QAT_HW_NULL_BLK_SZ 8 /**< @ingroup icp_qat_hw_defs * Define the processing block size for NULL */ #define ICP_QAT_HW_AES_BLK_SZ 16 /**< @ingroup icp_qat_hw_defs * Define the processing block size for AES 128, 192 and 256 */ #define ICP_QAT_HW_KASUMI_BLK_SZ 8 /**< @ingroup icp_qat_hw_defs * Define the processing block size for KASUMI */ #define ICP_QAT_HW_SNOW_3G_BLK_SZ 8 /**< @ingroup icp_qat_hw_defs * Define the processing block size for SNOW_3G */ #define ICP_QAT_HW_ZUC_3G_BLK_SZ 8 /**< @ingroup icp_qat_hw_defs * Define the processing block size for ZUC_3G */ #define ICP_QAT_HW_NULL_KEY_SZ 256 /**< @ingroup icp_qat_hw_defs * Define the key size for NULL */ #define ICP_QAT_HW_DES_KEY_SZ 8 /**< @ingroup icp_qat_hw_defs * Define the key size for DES */ #define ICP_QAT_HW_3DES_KEY_SZ 24 /**< @ingroup icp_qat_hw_defs * Define the key size for 3DES */ #define ICP_QAT_HW_AES_128_KEY_SZ 16 /**< @ingroup icp_qat_hw_defs * Define the key size for AES128 */ #define ICP_QAT_HW_AES_192_KEY_SZ 24 /**< @ingroup icp_qat_hw_defs * Define the key size for AES192 */ #define ICP_QAT_HW_AES_256_KEY_SZ 32 /**< @ingroup icp_qat_hw_defs * Define the key size for AES256 */ +/* AES UCS */ +#define ICP_QAT_HW_UCS_AES_128_KEY_SZ ICP_QAT_HW_AES_128_KEY_SZ +/**< @ingroup icp_qat_hw_defs + * Define the key size for AES128 for UCS slice*/ +#define ICP_QAT_HW_UCS_AES_192_KEY_SZ 32 +/**< @ingroup icp_qat_hw_defs + * Define the key size for AES192 for UCS slice*/ +#define ICP_QAT_HW_UCS_AES_256_KEY_SZ ICP_QAT_HW_AES_256_KEY_SZ +/**< @ingroup icp_qat_hw_defs + * Define the key size for AES256 for UCS slice*/ #define ICP_QAT_HW_AES_128_F8_KEY_SZ \ (ICP_QAT_HW_AES_128_KEY_SZ * QAT_CIPHER_MODE_F8_KEY_SZ_MULT) /**< @ingroup icp_qat_hw_defs * Define the key size for AES128 F8 */ #define ICP_QAT_HW_AES_192_F8_KEY_SZ \ (ICP_QAT_HW_AES_192_KEY_SZ * QAT_CIPHER_MODE_F8_KEY_SZ_MULT) /**< @ingroup icp_qat_hw_defs * Define the key size for AES192 F8 */ #define ICP_QAT_HW_AES_256_F8_KEY_SZ \ (ICP_QAT_HW_AES_256_KEY_SZ * QAT_CIPHER_MODE_F8_KEY_SZ_MULT) /**< @ingroup icp_qat_hw_defs * Define the key size for AES256 F8 */ #define ICP_QAT_HW_AES_128_XTS_KEY_SZ \ (ICP_QAT_HW_AES_128_KEY_SZ * QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) /**< @ingroup icp_qat_hw_defs * Define the key size for AES128 XTS */ #define ICP_QAT_HW_AES_256_XTS_KEY_SZ \ (ICP_QAT_HW_AES_256_KEY_SZ * QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) /**< @ingroup icp_qat_hw_defs * Define the key size for AES256 XTS */ +#define ICP_QAT_HW_UCS_AES_128_XTS_KEY_SZ \ + (ICP_QAT_HW_UCS_AES_128_KEY_SZ * QAT_CIPHER_MODE_UCS_XTS_KEY_SZ_MULT) +/**< @ingroup icp_qat_hw_defs + * Define the key size for AES128 XTS for the UCS Slice*/ +#define ICP_QAT_HW_UCS_AES_256_XTS_KEY_SZ \ + (ICP_QAT_HW_UCS_AES_256_KEY_SZ * QAT_CIPHER_MODE_UCS_XTS_KEY_SZ_MULT) +/**< @ingroup icp_qat_hw_defs + * Define the key size for AES256 XTS for the UCS Slice*/ #define ICP_QAT_HW_KASUMI_KEY_SZ 16 /**< @ingroup icp_qat_hw_defs * Define the key size for Kasumi */ #define ICP_QAT_HW_KASUMI_F8_KEY_SZ \ (ICP_QAT_HW_KASUMI_KEY_SZ * QAT_CIPHER_MODE_F8_KEY_SZ_MULT) /**< @ingroup icp_qat_hw_defs * Define the key size for Kasumi F8 */ #define ICP_QAT_HW_AES_128_XTS_KEY_SZ \ (ICP_QAT_HW_AES_128_KEY_SZ * QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) /**< @ingroup icp_qat_hw_defs * Define the key size for AES128 XTS */ #define ICP_QAT_HW_AES_256_XTS_KEY_SZ \ (ICP_QAT_HW_AES_256_KEY_SZ * QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) /**< @ingroup icp_qat_hw_defs * Define the key size for AES256 XTS */ #define ICP_QAT_HW_ARC4_KEY_SZ 256 /**< @ingroup icp_qat_hw_defs * Define the key size for ARC4 */ #define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16 /**< @ingroup icp_cpm_hw_defs * Define the key size for SNOW_3G_UEA2 */ #define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16 /**< @ingroup icp_cpm_hw_defs * Define the iv size for SNOW_3G_UEA2 */ #define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16 /**< @ingroup icp_cpm_hw_defs * Define the key size for ZUC_3G_EEA3 */ #define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16 /**< @ingroup icp_cpm_hw_defs * Define the iv size for ZUC_3G_EEA3 */ #define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2 /**< @ingroup icp_cpm_hw_defs * Number of the HW register to clear in F8 mode */ /**< @ingroup icp_qat_hw_defs * Define the State/ Initialization Vector size for CHACHAPOLY */ #define ICP_QAT_HW_CHACHAPOLY_KEY_SZ 32 /**< @ingroup icp_qat_hw_defs * Define the key size for CHACHA20-Poly1305*/ #define ICP_QAT_HW_CHACHAPOLY_IV_SZ 12 /**< @ingroup icp_qat_hw_defs * Define the block size for CHACHA20-Poly1305*/ #define ICP_QAT_HW_CHACHAPOLY_BLK_SZ 64 /**< @ingroup icp_qat_hw_defs * Define the State/ Initialization Vector size for CHACHA20-Poly1305 */ #define ICP_QAT_HW_CHACHAPOLY_CTR_SZ 16 /**< @ingroup icp_qat_hw_defs * Define the key size for CHACHA20-Poly1305*/ #define ICP_QAT_HW_SPC_CTR_SZ 16 /**< @ingroup icp_qat_hw_defs * Define the Single Pass tag size*/ #define ICP_QAT_HW_CHACHAPOLY_ICV__SZ 16 /**< @ingroup icp_qat_hw_defs * Define the key size for CHACHA20-Poly1305*/ #define ICP_QAT_HW_CHACHAPOLY_AAD_MAX_LOG 14 /**< @ingroup icp_qat_hw_defs * Define the key size for CHACHA20-Poly1305*/ #define ICP_QAT_HW_SM4_BLK_SZ 16 /**< @ingroup icp_qat_hw_defs * Define the processing block size for SM4 */ #define ICP_QAT_HW_SM4_KEY_SZ 16 /**< @ingroup icp_qat_hw_defs * Number of the HW register to clear in F8 mode */ #define ICP_QAT_HW_SM4_IV_SZ 16 /**< @ingroup icp_qat_hw_defs * Define the key size for SM4 */ /* * SHRAM constants definitions */ #define INIT_SHRAM_CONSTANTS_TABLE_SZ (1024) #define SHRAM_CONSTANTS_TABLE_SIZE_QWS (INIT_SHRAM_CONSTANTS_TABLE_SZ / 4 / 2) /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Definition of AES-256 F8 cipher algorithm processing struct * @description * This structs described the parameters to pass to the slice for * configuring it for AES-256 F8 processing * *****************************************************************************/ typedef struct icp_qat_hw_cipher_aes256_f8_s { icp_qat_hw_cipher_config_t cipher_config; /**< Cipher configuration word for the slice set to * AES-256 and the F8 mode */ uint8_t key[ICP_QAT_HW_AES_256_F8_KEY_SZ]; /**< Cipher key */ } icp_qat_hw_cipher_aes256_f8_t; /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Supported hardware cipher algorithms * @description * Common grouping of the cipher algorithm types supported by the QAT. * This is the largest possible cipher setup block size * *****************************************************************************/ typedef union icp_qat_hw_cipher_algo_blk_u { icp_qat_hw_cipher_aes256_f8_t aes256_f8; /**< AES-256 F8 Cipher */ } icp_qat_hw_cipher_algo_blk_t; /* ========================================================================= */ /* TRNG SLICE */ /* ========================================================================= */ /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Definition of the supported TRNG configuration modes * @description * Enumeration used to define the TRNG modes. Used by clients when * configuring the TRNG for use * *****************************************************************************/ typedef enum { ICP_QAT_HW_TRNG_DBL = 0, /*!< TRNG Disabled mode */ ICP_QAT_HW_TRNG_NHT = 1, /*!< TRNG Normal Health Test mode */ ICP_QAT_HW_TRNG_KAT = 4, /*!< TRNG Known Answer Test mode */ ICP_QAT_HW_TRNG_DELIMITER = 8 /**< Delimiter type */ } icp_qat_hw_trng_cfg_mode_t; /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Definition of the supported TRNG KAT (known answer test) modes * @description * Enumeration which is used to define the TRNG KAT modes. Used by clients * when configuring the TRNG for testing * *****************************************************************************/ typedef enum { - ICP_QAT_HW_TRNG_NEG_0 = 0, /*!< TRNG Neg Zero Test */ - ICP_QAT_HW_TRNG_NEG_1 = 1, /*!< TRNG Neg One Test */ + ICP_QAT_HW_TRNG_NEG_0 = 0, /*!< TRNG Neg Zero Test */ + ICP_QAT_HW_TRNG_NEG_1 = 1, /*!< TRNG Neg One Test */ ICP_QAT_HW_TRNG_POS = 2, /*!< TRNG POS Test */ - ICP_QAT_HW_TRNG_POS_VNC = 3, /*!< TRNG POS VNC Test */ + ICP_QAT_HW_TRNG_POS_VNC = 3, /*!< TRNG POS VNC Test */ ICP_QAT_HW_TRNG_KAT_DELIMITER = 4 /**< Delimiter type */ } icp_qat_hw_trng_kat_mode_t; /** ***************************************************************************** * @ingroup icp_qat_hw_defs * TRNG mode configuration structure. * * @description * Definition of the format of the TRNG slice configuration. Used * internally by the QAT FW for configuration of the KAT unit or the * TRNG depending on the slice command i.e. either a set_slice_config or * slice_wr_KAT_type * *****************************************************************************/ typedef struct icp_qat_hw_trng_config_s { uint32_t val; /**< Configuration used for setting up the TRNG slice */ uint32_t reserved; /**< Reserved */ } icp_qat_hw_trng_config_t; /* Private Defines */ /* Note: Bit positions have been arranged for little endian ordering */ #define QAT_TRNG_CONFIG_MODE_MASK 0x7 /**< @ingroup icp_qat_hw_defs * Mask for the TRNG configuration mode. (Three bits) */ #define QAT_TRNG_CONFIG_MODE_BITPOS 5 /**< @ingroup icp_qat_hw_defs * TRNG configuration mode bit positions start */ #define QAT_TRNG_KAT_MODE_MASK 0x3 /**< @ingroup icp_qat_hw_defs * Mask of two bits for the TRNG known answer test mode */ #define QAT_TRNG_KAT_MODE_BITPOS 6 /**< @ingroup icp_qat_hw_defs * TRNG known answer test mode bit positions start */ /** ****************************************************************************** * @ingroup icp_qat_hw_defs * * @description * Build the configuration byte for the TRNG slice based on the mode * * @param mode Configuration mode parameter * *****************************************************************************/ #define ICP_QAT_HW_TRNG_CONFIG_MODE_BUILD(mode) \ (((mode)&QAT_TRNG_CONFIG_MODE_MASK) << QAT_TRNG_CONFIG_MODE_BITPOS) /** ****************************************************************************** * @ingroup icp_qat_hw_defs * * @description * Build the configuration byte for the TRNG KAT based on the mode * * @param mode Configuration mode parameter * *****************************************************************************/ #define ICP_QAT_HW_TRNG_KAT_MODE_BUILD(mode) \ ((((mode)&QAT_TRNG_KAT_MODE_MASK) << QAT_TRNG_KAT_MODE_BITPOS)) /** ***************************************************************************** * @ingroup icp_qat_hw_defs * TRNG test status structure. * * @description * Definition of the format of the TRNG slice test status structure. Used * internally by the QAT FW. * *****************************************************************************/ typedef struct icp_qat_hw_trng_test_status_s { uint32_t status; /**< Status used for setting up the TRNG slice */ uint32_t fail_count; /**< Comparator fail count */ } icp_qat_hw_trng_test_status_t; #define ICP_QAT_HW_TRNG_TEST_NO_FAILURES 1 /**< @ingroup icp_qat_hw_defs * Flag to indicate that there were no Test Failures */ #define ICP_QAT_HW_TRNG_TEST_FAILURES_FOUND 0 /**< @ingroup icp_qat_hw_defs * Flag to indicate that there were Test Failures */ #define ICP_QAT_HW_TRNG_TEST_STATUS_VALID 1 /**< @ingroup icp_qat_hw_defs * Flag to indicate that there is no valid Test output */ #define ICP_QAT_HW_TRNG_TEST_STATUS_INVALID 0 /**< @ingroup icp_qat_hw_defs * Flag to indicate that the Test output is still invalid */ /* Private defines */ #define QAT_TRNG_TEST_FAILURE_FLAG_MASK 0x1 /**< @ingroup icp_qat_hw_defs * Mask of one bit used to determine the TRNG Test pass/fail */ #define QAT_TRNG_TEST_FAILURE_FLAG_BITPOS 4 /**< @ingroup icp_qat_hw_defs * Flag position to indicate that the TRNG Test status is pass of fail */ #define QAT_TRNG_TEST_STATUS_MASK 0x1 /**< @ingroup icp_qat_hw_defs * Mask of one bit used to determine the TRNG Test staus */ #define QAT_TRNG_TEST_STATUS_BITPOS 1 /**< @ingroup icp_qat_hw_defs * Flag position to indicate the TRNG Test status */ /** ****************************************************************************** * @ingroup icp_qat_hw_defs * * @description * Extract the fail bit for the TRNG slice * * @param status TRNG status value * *****************************************************************************/ #define ICP_QAT_HW_TRNG_FAIL_FLAG_GET(status) \ (((status) >> QAT_TRNG_TEST_FAILURE_FLAG_BITPOS) & \ QAT_TRNG_TEST_FAILURE_FLAG_MASK) /** ****************************************************************************** * @ingroup icp_qat_hw_defs * * @description * Extract the status valid bit for the TRNG slice * * @param status TRNG status value * *****************************************************************************/ #define ICP_QAT_HW_TRNG_STATUS_VALID_GET(status) \ (((status) >> QAT_TRNG_TEST_STATUS_BITPOS) & QAT_TRNG_TEST_STATUS_MASK) /** ***************************************************************************** * @ingroup icp_qat_hw_defs * TRNG entropy counters * * @description * Definition of the format of the TRNG entropy counters. Used internally * by the QAT FW. * *****************************************************************************/ typedef struct icp_qat_hw_trng_entropy_counts_s { uint64_t raw_ones_count; /**< Count of raw ones of entropy */ uint64_t raw_zeros_count; /**< Count of raw zeros of entropy */ uint64_t cond_ones_count; /**< Count of conditioned ones entropy */ uint64_t cond_zeros_count; /**< Count of conditioned zeros entropy */ } icp_qat_hw_trng_entropy_counts_t; /* Private defines */ #define QAT_HW_TRNG_ENTROPY_STS_RSVD_SZ 4 /**< @ingroup icp_qat_hw_defs * TRNG entropy status reserved size in bytes */ /** ***************************************************************************** * @ingroup icp_qat_hw_defs * TRNG entropy available status. * * @description * Definition of the format of the TRNG slice entropy status available. * struct. Used internally by the QAT FW. * *****************************************************************************/ typedef struct icp_qat_hw_trng_entropy_status_s { uint32_t status; /**< Entropy status in the TRNG */ uint8_t reserved[QAT_HW_TRNG_ENTROPY_STS_RSVD_SZ]; /**< Reserved */ } icp_qat_hw_trng_entropy_status_t; #define ICP_QAT_HW_TRNG_ENTROPY_AVAIL 1 /**< @ingroup icp_qat_hw_defs * Flag indicating that entropy data is available in the QAT TRNG slice */ #define ICP_QAT_HW_TRNG_ENTROPY_NOT_AVAIL 0 /**< @ingroup icp_qat_hw_defs * Flag indicating that no entropy data is available in the QAT TRNG slice */ /* Private defines */ #define QAT_TRNG_ENTROPY_STATUS_MASK 1 /**< @ingroup icp_qat_hw_defs * Mask of one bit used to determine the TRNG Entropy status */ #define QAT_TRNG_ENTROPY_STATUS_BITPOS 0 /**< @ingroup icp_qat_hw_defs * Starting bit position for TRNG Entropy status. */ /** ****************************************************************************** * @ingroup icp_qat_hw_defs * * @description * Extract the entropy available status bit * * @param status TRNG status value * *****************************************************************************/ #define ICP_QAT_HW_TRNG_ENTROPY_STATUS_GET(status) \ (((status) >> QAT_TRNG_ENTROPY_STATUS_BITPOS) & \ QAT_TRNG_ENTROPY_STATUS_MASK) /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Entropy seed data * * @description * This type is used for the definition of the entropy generated by a read * of the TRNG slice * *****************************************************************************/ typedef uint64_t icp_qat_hw_trng_entropy; /* ========================================================================= */ /* COMPRESSION SLICE */ /* ========================================================================= */ /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Definition of the supported compression directions * @description * Enumeration used to define the compression directions * *****************************************************************************/ typedef enum { ICP_QAT_HW_COMPRESSION_DIR_COMPRESS = 0, /*!< Compression */ ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS = 1, /*!< Decompression */ ICP_QAT_HW_COMPRESSION_DIR_DELIMITER = 2 /**< Delimiter type */ } icp_qat_hw_compression_direction_t; /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Definition of the supported delayed match modes * @description * Enumeration used to define whether delayed match is enabled * *****************************************************************************/ typedef enum { ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED = 0, /*!< Delayed match disabled */ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED = 1, /*!< Delayed match enabled Note: This is the only valid mode - refer to CPM1.6 SAS */ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DELIMITER = 2 /**< Delimiter type */ } icp_qat_hw_compression_delayed_match_t; /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Definition of the supported compression algorithms * @description * Enumeration used to define the compression algorithms * *****************************************************************************/ typedef enum { ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0, /*!< Deflate compression */ - ICP_QAT_HW_COMPRESSION_DEPRECATED = 1, /*!< Deprecated */ + ICP_QAT_HW_COMPRESSION_DEPRECATED = 1, /*!< Deprecated */ ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2 /**< Delimiter type */ } icp_qat_hw_compression_algo_t; /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Definition of the supported compression depths * @description * Enumeration used to define the compression slice depths. * *****************************************************************************/ typedef enum { ICP_QAT_HW_COMPRESSION_DEPTH_1 = 0, /*!< Search depth 1 (Fastest least exhaustive) */ ICP_QAT_HW_COMPRESSION_DEPTH_4 = 1, /*!< Search depth 4 */ ICP_QAT_HW_COMPRESSION_DEPTH_8 = 2, /*!< Search depth 8 */ ICP_QAT_HW_COMPRESSION_DEPTH_16 = 3, /*!< Search depth 16 */ ICP_QAT_HW_COMPRESSION_DEPTH_128 = 4, /*!< Search depth 128 (Slowest, most exhaustive) */ ICP_QAT_HW_COMPRESSION_DEPTH_DELIMITER = 5 /**< Delimiter type */ } icp_qat_hw_compression_depth_t; /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Definition of the supported file types * @description * Enumeration used to define the compression file types. * *****************************************************************************/ typedef enum { ICP_QAT_HW_COMPRESSION_FILE_TYPE_0 = 0, /*!< Use Static Trees */ ICP_QAT_HW_COMPRESSION_FILE_TYPE_1 = 1, /*!< Use Semi-Dynamic Trees at offset 0 */ ICP_QAT_HW_COMPRESSION_FILE_TYPE_2 = 2, /*!< Use Semi-Dynamic Trees at offset 320 */ ICP_QAT_HW_COMPRESSION_FILE_TYPE_3 = 3, /*!< Use Semi-Dynamic Trees at offset 640 */ ICP_QAT_HW_COMPRESSION_FILE_TYPE_4 = 4, /*!< Use Semi-Dynamic Trees at offset 960 */ ICP_QAT_HW_COMPRESSION_FILE_TYPE_DELIMITER = 5 /**< Delimiter type */ } icp_qat_hw_compression_file_type_t; typedef enum { BNP_SKIP_MODE_DISABLED = 0, BNP_SKIP_MODE_AT_START = 1, BNP_SKIP_MODE_AT_END = 2, BNP_SKIP_MODE_STRIDE = 3 } icp_qat_bnp_skip_mode_t; /** ***************************************************************************** * @ingroup icp_qat_hw_defs * Compression Configuration Struct * * @description * Configuration data used for setting up the QAT Compression Slice * *****************************************************************************/ typedef struct icp_qat_hw_compression_config_s { - uint32_t val; - /**< Compression slice configuration */ + uint32_t lower_val; + /**< Compression slice configuration lower LW */ - uint32_t reserved; - /**< Reserved */ + uint32_t upper_val; + /**< Compression slice configuration upper LW */ } icp_qat_hw_compression_config_t; /* Private defines */ #define QAT_COMPRESSION_DIR_BITPOS 4 /**< @ingroup icp_qat_hw_defs * Define for the compression direction bit position */ #define QAT_COMPRESSION_DIR_MASK 0x7 /**< @ingroup icp_qat_hw_defs * Define for the compression direction mask (three bits) */ #define QAT_COMPRESSION_DELAYED_MATCH_BITPOS 16 /**< @ingroup icp_qat_hw_defs * Define for the compression delayed match bit position */ #define QAT_COMPRESSION_DELAYED_MATCH_MASK 0x1 /**< @ingroup icp_qat_hw_defs * Define for the delayed match mask (one bit) */ #define QAT_COMPRESSION_ALGO_BITPOS 31 /**< @ingroup icp_qat_hw_defs * Define for the compression algorithm bit position */ #define QAT_COMPRESSION_ALGO_MASK 0x1 /**< @ingroup icp_qat_hw_defs * Define for the compression algorithm mask (one bit) */ #define QAT_COMPRESSION_DEPTH_BITPOS 28 /**< @ingroup icp_qat_hw_defs * Define for the compression depth bit position */ #define QAT_COMPRESSION_DEPTH_MASK 0x7 /**< @ingroup icp_qat_hw_defs * Define for the compression depth mask (three bits) */ #define QAT_COMPRESSION_FILE_TYPE_BITPOS 24 /**< @ingroup icp_qat_hw_defs * Define for the compression file type bit position */ #define QAT_COMPRESSION_FILE_TYPE_MASK 0xF /**< @ingroup icp_qat_hw_defs * Define for the compression file type mask (four bits) */ /** ****************************************************************************** * @ingroup icp_qat_hw_defs * * @description * Build the compression slice configuration field * * @param dir Compression Direction to use, compress or decompress * @param delayed Specify if delayed match should be enabled * @param algo Compression algorithm to use * @param depth Compression search depth to use * @param filetype Compression file type to use, static or semi dynamic trees * *****************************************************************************/ #define ICP_QAT_HW_COMPRESSION_CONFIG_BUILD( \ dir, delayed, algo, depth, filetype) \ ((((dir)&QAT_COMPRESSION_DIR_MASK) << QAT_COMPRESSION_DIR_BITPOS) | \ (((delayed)&QAT_COMPRESSION_DELAYED_MATCH_MASK) \ << QAT_COMPRESSION_DELAYED_MATCH_BITPOS) | \ (((algo)&QAT_COMPRESSION_ALGO_MASK) << QAT_COMPRESSION_ALGO_BITPOS) | \ (((depth)&QAT_COMPRESSION_DEPTH_MASK) \ << QAT_COMPRESSION_DEPTH_BITPOS) | \ (((filetype)&QAT_COMPRESSION_FILE_TYPE_MASK) \ << QAT_COMPRESSION_FILE_TYPE_BITPOS)) /* ========================================================================= */ /* TRANSLATOR SLICE */ /* ========================================================================= */ /**< Translator slice configuration is set internally by the firmware */ #endif /* _ICP_QAT_HW_H_ */ diff --git a/sys/dev/qat/qat_api/firmware/include/icp_qat_hw_20_comp.h b/sys/dev/qat/qat_api/firmware/include/icp_qat_hw_20_comp.h new file mode 100644 index 000000000000..6e3ee4ce0446 --- /dev/null +++ b/sys/dev/qat/qat_api/firmware/include/icp_qat_hw_20_comp.h @@ -0,0 +1,292 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright(c) 2007-2022 Intel Corporation */ +/* $FreeBSD$ */ +/** + ***************************************************************************** + * @file icp_qat_hw_2x_comp.h + * @defgroup ICP QAT HW accessors for using the for 2.x Compression Slice + * definitions + * @ingroup icp_qat_hw_2x_comp + * @description + * This file documents definitions for the QAT HW COMP SLICE + * + *****************************************************************************/ + +#ifndef _ICP_QAT_HW_20_COMP_H_ +#define _ICP_QAT_HW_20_COMP_H_ + +#include "icp_qat_hw_20_comp_defs.h" // For HW definitions +#include "icp_qat_fw.h" //For Set Field Macros. + +#ifdef WIN32 +#include // built in support for _byteswap_ulong +#define BYTE_SWAP_32 _byteswap_ulong +#else +#define BYTE_SWAP_32 __builtin_bswap32 +#endif + +/** +***************************************************************************** +* @ingroup icp_qat_fw_comn +* +* @description +* Definition of the hw config csr. This representation has to be further +* processed by the corresponding config build function. +* +*****************************************************************************/ +typedef struct icp_qat_hw_comp_20_config_csr_lower_s { + // Fields programmable directly by the SW. + icp_qat_hw_comp_20_extended_delay_match_mode_t edmm; + icp_qat_hw_comp_20_hw_comp_format_t algo; + icp_qat_hw_comp_20_search_depth_t sd; + icp_qat_hw_comp_20_hbs_control_t hbs; + // Fields programmable directly by the FW. + // Block Drop enable. (Set by FW) + icp_qat_hw_comp_20_abd_t abd; + icp_qat_hw_comp_20_lllbd_ctrl_t lllbd; + // Advanced HW control (Set to default vals) + icp_qat_hw_comp_20_skip_hash_collision_t hash_col; + icp_qat_hw_comp_20_skip_hash_update_t hash_update; + icp_qat_hw_comp_20_byte_skip_t skip_ctrl; + +} icp_qat_hw_comp_20_config_csr_lower_t; + +/** +***************************************************************************** +* @ingroup icp_qat_fw_comn +* +* @description +* Build the longword as expected by the HW +* +*****************************************************************************/ +static inline uint32_t +ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(icp_qat_hw_comp_20_config_csr_lower_t csr) +{ + uint32_t val32 = 0; + // Programmable values + QAT_FIELD_SET(val32, + csr.algo, + ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK); + + QAT_FIELD_SET(val32, + csr.sd, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK); + + QAT_FIELD_SET( + val32, + csr.edmm, + ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK); + + QAT_FIELD_SET(val32, + csr.hbs, + ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK); + + QAT_FIELD_SET(val32, + csr.lllbd, + ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK); + + QAT_FIELD_SET(val32, + csr.hash_col, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK); + + QAT_FIELD_SET(val32, + csr.hash_update, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK); + + QAT_FIELD_SET(val32, + csr.skip_ctrl, + ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK); + // Default values. + + QAT_FIELD_SET(val32, + csr.abd, + ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK); + + QAT_FIELD_SET(val32, + csr.lllbd, + ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK); + + return BYTE_SWAP_32(val32); +} + +/** +***************************************************************************** +* @ingroup icp_qat_fw_comn +* +* @description +* Definition of the hw config csr. This representation has to be further +* processed by the corresponding config build function. +* +*****************************************************************************/ +typedef struct icp_qat_hw_comp_20_config_csr_upper_s { + icp_qat_hw_comp_20_scb_control_t scb_ctrl; + icp_qat_hw_comp_20_rmb_control_t rmb_ctrl; + icp_qat_hw_comp_20_som_control_t som_ctrl; + icp_qat_hw_comp_20_skip_hash_rd_control_t skip_hash_ctrl; + icp_qat_hw_comp_20_scb_unload_control_t scb_unload_ctrl; + icp_qat_hw_comp_20_disable_token_fusion_control_t + disable_token_fusion_ctrl; + icp_qat_hw_comp_20_scb_mode_reset_mask_t scb_mode_reset; + uint16_t lazy; + uint16_t nice; +} icp_qat_hw_comp_20_config_csr_upper_t; + +/** +***************************************************************************** +* @ingroup icp_qat_fw_comn +* +* @description +* Build the longword as expected by the HW +* +*****************************************************************************/ +static inline uint32_t +ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(icp_qat_hw_comp_20_config_csr_upper_t csr) +{ + uint32_t val32 = 0; + + QAT_FIELD_SET(val32, + csr.scb_ctrl, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK); + + QAT_FIELD_SET(val32, + csr.rmb_ctrl, + ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK); + + QAT_FIELD_SET(val32, + csr.som_ctrl, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK); + + QAT_FIELD_SET(val32, + csr.skip_hash_ctrl, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK); + + QAT_FIELD_SET(val32, + csr.scb_unload_ctrl, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK); + + QAT_FIELD_SET( + val32, + csr.disable_token_fusion_ctrl, + ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK); + + QAT_FIELD_SET(val32, + csr.scb_mode_reset, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK); + + QAT_FIELD_SET(val32, + csr.lazy, + ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK); + + QAT_FIELD_SET(val32, + csr.nice, + ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK); + + return BYTE_SWAP_32(val32); +} + +/** +***************************************************************************** +* @ingroup icp_qat_fw_comn +* +* @description +* Definition of the hw config csr. This representation has to be further +* processed by the corresponding config build function. +* +*****************************************************************************/ +typedef struct icp_qat_hw_decomp_20_config_csr_lower_s { + /* Fields programmable directly by the SW. */ + icp_qat_hw_decomp_20_hbs_control_t hbs; + /* Advanced HW control (Set to default vals) */ + icp_qat_hw_decomp_20_hw_comp_format_t algo; +} icp_qat_hw_decomp_20_config_csr_lower_t; + +/** +***************************************************************************** +* @ingroup icp_qat_fw_comn +* +* @description +* Build the longword as expected by the HW +* +*****************************************************************************/ +static inline uint32_t +ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER( + icp_qat_hw_decomp_20_config_csr_lower_t csr) +{ + uint32_t val32 = 0; + + QAT_FIELD_SET(val32, + csr.hbs, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK); + + QAT_FIELD_SET(val32, + csr.algo, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK); + + return BYTE_SWAP_32(val32); +} + +/** +***************************************************************************** +* @ingroup icp_qat_fw_comn +* +* @description +* Definition of the hw config csr. This representation has to be further +* processed by the corresponding config build function. +* +*****************************************************************************/ +typedef struct icp_qat_hw_decomp_20_config_csr_upper_s { + /* Advanced HW control (Set to default vals) */ + icp_qat_hw_decomp_20_speculative_decoder_control_t sdc; + icp_qat_hw_decomp_20_mini_cam_control_t mcc; +} icp_qat_hw_decomp_20_config_csr_upper_t; + +/** +***************************************************************************** +* @ingroup icp_qat_fw_comn +* +* @description +* Build the longword as expected by the HW +* +*****************************************************************************/ +static inline uint32_t +ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_UPPER( + icp_qat_hw_decomp_20_config_csr_upper_t csr) +{ + uint32_t val32 = 0; + + QAT_FIELD_SET( + val32, + csr.sdc, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK); + + QAT_FIELD_SET(val32, + csr.mcc, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK); + + return BYTE_SWAP_32(val32); +} + +#endif /* ICP_QAT_HW__2X_COMP_H_ */ diff --git a/sys/dev/qat/qat_api/firmware/include/icp_qat_hw_20_comp_defs.h b/sys/dev/qat/qat_api/firmware/include/icp_qat_hw_20_comp_defs.h new file mode 100644 index 000000000000..496f2e5c8de5 --- /dev/null +++ b/sys/dev/qat/qat_api/firmware/include/icp_qat_hw_20_comp_defs.h @@ -0,0 +1,443 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright(c) 2007-2022 Intel Corporation */ +/* $FreeBSD$ */ +/* + **************************************************************************** + * @file icp_qat_hw_20_comp_defs.h, (autogenerated at 04-19-18 16:06) + * @defgroup icp_qat_hw_comp_20 + * @ingroup icp_qat_hw_comp_20 + * @description + * This file represents the HW configuration CSR definitions + **************************************************************************** + */ + +#ifndef _ICP_QAT_HW_20_COMP_DEFS_H +#define _ICP_QAT_HW_20_COMP_DEFS_H + +/*****************************************************************************/ +/* SCB Disabled - Set by FW, located in upper 32bit */ +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS 31 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK 0x1 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible SCB_CONTROL field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_COMP_20_SCB_CONTROL_ENABLE = 0x0, + /* Normal Mode using SCB (Default) */ + ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE = 0x1, + /* Legacy CPM1.x Mode with SCB disabled. */ +} icp_qat_hw_comp_20_scb_control_t; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE + +/*****************************************************************************/ +/* Reset Bit Mask Disabled - Set by FW , located in upper 32bit */ +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS 30 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK 0x1 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible RMB_CONTROL field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL = 0x0, + /* Reset all data structures with a set_config command. (Set by FW) */ + ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_FC_ONLY = 0x1, + /* Reset only the Frequency Counters (LFCT) with a set_config command. + */ +} icp_qat_hw_comp_20_rmb_control_t; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL + +/*****************************************************************************/ +/* Slice Operation Mode (SOM) - Set By FW, located in upper 32bit */ +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS 28 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK 0x3 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible SOM_CONTROL field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE = 0x0, + /* Normal mode. */ + ICP_QAT_HW_COMP_20_SOM_CONTROL_REPLAY_MODE = 0x1, + /* Replay mode */ + ICP_QAT_HW_COMP_20_SOM_CONTROL_INPUT_CRC = 0x2, + /* Input CRC Mode */ + ICP_QAT_HW_COMP_20_SOM_CONTROL_RESERVED_MODE = 0x3, + /* Reserved. */ +} icp_qat_hw_comp_20_som_control_t; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE + +/*****************************************************************************/ +/* Skip Hash Read (Set By FW) , located in upper 32bit */ +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS 27 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK 0x1 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible SKIP_HASH_RD_CONTROL field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP = 0x0, + /* When set to 0, hash reads are not skipped. */ + ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_SKIP_HASH_READS = 0x1, + /* Hash reads are skipped. */ +} icp_qat_hw_comp_20_skip_hash_rd_control_t; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP + +/*****************************************************************************/ +/* SCB Unload Disable, located in upper 32bit */ +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS 26 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK 0x1 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible SCB_UNLOAD_CONTROL field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD = 0x0, + /* Unloads the LFCT and flushes the State Registers. */ + ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_NO_UNLOAD = 0x1, + /* Does not unload the LFCT, but flushes the State Registers. */ +} icp_qat_hw_comp_20_scb_unload_control_t; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD + +/*****************************************************************************/ +/* Disable token fusion, located in upper 32bit */ +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS 21 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK 0x1 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible DISABLE_TOKEN_FUSION_CONTROL field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE = 0x0, + /* Enables token fusion. */ + ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_DISABLE = 0x1, + /* Disables token fusion. */ +} icp_qat_hw_comp_20_disable_token_fusion_control_t; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE + +/*****************************************************************************/ +/* SCB Mode Reset Mask (Set By FW) , located in upper 32bit */ +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS 18 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK 0x1 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible SCB_MODE_RESET_MASK field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS = 0x0, + /* iLZ77 mode: Reset LFCT, OBC */ + ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS_AND_HISTORY = 0x1, + /* iLZ77 mode: Reset LFCT, OBC, HB, HT */ +} icp_qat_hw_comp_20_scb_mode_reset_mask_t; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS + +/*****************************************************************************/ +/* Lazy - For iLZ77 and Static DEFLATE, Lazy = 102h , located in upper + * 32bit */ +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS 9 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK 0x1ff +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL 258 + +/*****************************************************************************/ +/* Nice - For iLZ77 and Static DEFLATE, Nice = 103h , located in upper + * 32bit */ +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS 0 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK 0x1ff +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL 259 + +/*****************************************************************************/ +/* History Buffer Size (Set By the Driver/ Application), located in lower 32bit + */ +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible HBS_CONTROL field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0, + /* 000b - 32KB */ + ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_64KB = 0x1, + /* 001b - 64KB */ +} icp_qat_hw_comp_20_hbs_control_t; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB + +/*****************************************************************************/ +/* Adaptive Block Drop (Set By FW if Dynamic), located in lower 32bit */ +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS 13 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK 0x1 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible ABD field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED = 0x0, + /* 0b - Feature enabled. */ + ICP_QAT_HW_COMP_20_ABD_ABD_DISABLED = 0x1, + /* 1b - Feature disabled. */ +} icp_qat_hw_comp_20_abd_t; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED + +/*****************************************************************************/ +/* Literal+Length Limit Block Drop Block Drop, (Set By FW if Dynamic) , located + * in lower 32bit */ +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS 12 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK 0x1 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible LLLBD_CTRL field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED = 0x0, + /* 0b - Feature enabled. */ + ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_DISABLED = 0x1, + /* 1b - Feature disabled. */ +} icp_qat_hw_comp_20_lllbd_ctrl_t; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED + +/*****************************************************************************/ +/* Search Depth (SD) (Set By Driver/Application), located in lower 32bit */ +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS 8 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK 0xf +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible SEARCH_DEPTH field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1 = 0x1, + /* 0001b - Level 1 (search depth = 2^1 = 2) */ + ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_6 = 0x3, + /* 0011b - Level 6 (search depth = 2^3 = 8) */ + ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_9 = 0x4, + /* 0100b - Level 9 (search depth = 2^4 = 16) */ +} icp_qat_hw_comp_20_search_depth_t; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1 + +/*****************************************************************************/ +/* Compression Format (Set By Driver/Application. Also See CMD ID), located in + * lower 32bit */ +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS 5 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK 0x7 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible HW_COMP_FORMAT field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77 = 0x0, + /* 000 - iLZ77. (Must set Min_Match = 3 bytes and HB size = 32KB.) */ + ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE = 0x1, + /* 001 - Static DEFLATE. (Must set Min_Match = 3 bytes and HB size = + 32KB.) */ +} icp_qat_hw_comp_20_hw_comp_format_t; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE + +/*****************************************************************************/ +/* Skip Hash Collision (Set By FW to default value), located in lower 32bit */ +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS 3 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK 0x1 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible SKIP_HASH_COLLISION field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW = 0x0, + /* When set to 0, hash collisions are allowed. */ + ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_DONT_ALLOW = 0x1, + /* When set to 0, hash collisions are allowed. */ +} icp_qat_hw_comp_20_skip_hash_collision_t; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW + +/*****************************************************************************/ +/* Skip Hash Update (Set By FW to default value) , located in lower 32bit */ +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS 2 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK 0x1 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible SKIP_HASH_UPDATE field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW = 0x0, + /* 0 - hash updates are not skipped. */ + ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW = 0x1, + /* 1 - hash updates are skipped. */ +} icp_qat_hw_comp_20_skip_hash_update_t; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW + +/*****************************************************************************/ +/* 3-Byte Match Skip (Set By FW to default value), located in lower 32bit */ +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS 1 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK 0x1 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible BYTE_SKIP field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN = 0x0, + /* 0 - Use 3-byte token */ + ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL = 0x1, + /* 0 - Use 3-byte literal */ +} icp_qat_hw_comp_20_byte_skip_t; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN + +/*****************************************************************************/ +/* Extended Delayed Match Mode enabled (Set By the Driver), located in lower + * 32bit */ +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS 0 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK 0x1 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible EXTENDED_DELAY_MATCH_MODE field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED = 0x0, + /* 0 - EXTENDED_DELAY_MATCH_MODE disabled */ + ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED = 0x1, + /* 1 - EXTENDED_DELAY_MATCH_MODE enabled */ +} icp_qat_hw_comp_20_extended_delay_match_mode_t; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED + +/*****************************************************************************/ +/* Speculative Decoder Disable (Set By the Driver/ Application), located in + * upper 32bit */ +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS 31 +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK 0x1 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible SPECULATIVE_DECODER_CONTROL field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE = 0x0, + /* 0b - Enabled */ + ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_DISABLE = 0x1, + /* 1b - Disabled */ +} icp_qat_hw_decomp_20_speculative_decoder_control_t; + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE + +/*****************************************************************************/ +/* Mini CAM Disable (Set By the Driver/ Application), located in upper 32bit */ +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS 30 +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK 0x1 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible MINI_CAM_CONTROL field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE = 0x0, + /* 0b - Enabled */ + ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_DISABLE = 0x1, + /* 1b - Disabled */ +} icp_qat_hw_decomp_20_mini_cam_control_t; + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE + +/*****************************************************************************/ +/* History Buffer Size (Set By the Driver/ Application), located in lower 32bit + */ +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14 +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible HBS_CONTROL field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0, + /* 000b - 32KB */ +} icp_qat_hw_decomp_20_hbs_control_t; + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB + +/*****************************************************************************/ +/* Decompression Format (Set By Driver/Application. Also See CMD ID), located in + * lower 32bit */ +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS 5 +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK 0x7 +/* + **************************************************************************** + * @ingroup icp_qat_hw_defs + * @description + * Enumeration of possible HW_DECOMP_FORMAT field values + *****************************************************************************/ +typedef enum { + ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE = 0x1, + /* 001 - Static DEFLATE. (Must set Min_Match = 3 bytes and HB size = + 32KB.) */ +} icp_qat_hw_decomp_20_hw_comp_format_t; + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE + +#endif //_ICP_QAT_HW_20_COMP_DEFS_H diff --git a/sys/dev/qat/qat_api/include/icp_sal_user.h b/sys/dev/qat/qat_api/include/icp_sal_user.h index af34d5aa194a..6c46210f6a1f 100644 --- a/sys/dev/qat/qat_api/include/icp_sal_user.h +++ b/sys/dev/qat/qat_api/include/icp_sal_user.h @@ -1,871 +1,908 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** *************************************************************************** * @file icp_sal_user.h * * @ingroup SalUser * * User space process init and shutdown functions. * ***************************************************************************/ #ifndef ICP_SAL_USER_H #define ICP_SAL_USER_H /************************************************************************* * @ingroup SalUser * @description * This function initialises and starts user space service access layer * (SAL) - it registers SAL with ADF and initialises the ADF proxy. * This function must only be called once per user space process. * * @context * This function is called from the user process context * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @param[in] pProcessName Process address space name described in * the config file for this device * * @retval CPA_STATUS_SUCCESS No error * @retval CPA_STATUS_FAIL Operation failed * *************************************************************************/ CpaStatus icp_sal_userStart(const char *pProcessName); /************************************************************************* * @ingroup SalUser * @description * This function is to be used with simplified config file, where user * defines many user space processes. The driver generates unique * process names based on the pProcessName provided. * For example: * If a config file in simplified format contains: * [SSL] * NumProcesses = 3 * * Then three internal sections will be generated and the three * applications can be started at a given time. Each application can call * icp_sal_userStartMultiProcess("SSL"). In this case the driver will * figure out the unique name to use for each process. * * @context * This function is called from the user process context * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @param[in] pProcessName Process address space name described in * the new format of the config file * for this device. * * @param[in] limitDevAccess Specifies if the address space is limited * to one device (true) or if it spans * accross multiple devices. * * @retval CPA_STATUS_SUCCESS No error * @retval CPA_STATUS_FAIL Operation failed. In this case user * can wait and retry. * *************************************************************************/ CpaStatus icp_sal_userStartMultiProcess(const char *pProcessName, CpaBoolean limitDevAccess); /************************************************************************* * @ingroup SalUser * @description * This function stops and shuts down user space SAL * - it deregisters SAL with ADF and shuts down ADF proxy * * @context * This function is called from the user process context * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @retval CPA_STATUS_SUCCESS No error * @retval CPA_STATUS_FAIL Operation failed * ************************************************************************/ CpaStatus icp_sal_userStop(void); /************************************************************************* * @ingroup SalUser * @description * This function gets the number of the available dynamic allocated * crypto instances * * @context * This function is called from the user process context * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @retval CPA_STATUS_SUCCESS No error * @retval CPA_STATUS_FAIL Operation failed * ************************************************************************/ CpaStatus icp_sal_userCyGetAvailableNumDynInstances(Cpa32U *pNumCyInstances); /************************************************************************* * @ingroup SalUser * @description * This function gets the number of the available dynamic allocated * compression instances * * @context * This function is called from the user process context * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @retval CPA_STATUS_SUCCESS No error * @retval CPA_STATUS_FAIL Operation failed * ************************************************************************/ CpaStatus icp_sal_userDcGetAvailableNumDynInstances(Cpa32U *pNumDcInstances); /************************************************************************* * @ingroup SalUser * @description * This function gets the number of the available dynamic allocated * crypto instances which are from the specific device package. * * @context * This function is called from the user process context * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @retval CPA_STATUS_SUCCESS No error * @retval CPA_STATUS_FAIL Operation failed * ************************************************************************/ CpaStatus icp_sal_userCyGetAvailableNumDynInstancesByDevPkg(Cpa32U *pNumCyInstances, Cpa32U devPkgID); /************************************************************************* * @ingroup SalUser * @description * This function gets the number of the available dynamic allocated * crypto instances which are from the specific device package and specific * accelerator. * * @context * This function is called from the user process context * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @retval CPA_STATUS_SUCCESS No error * @retval CPA_STATUS_FAIL Operation failed * ************************************************************************/ CpaStatus icp_sal_userCyGetAvailableNumDynInstancesByPkgAccel(Cpa32U *pNumCyInstances, Cpa32U devPkgID, Cpa32U accelerator_number); /************************************************************************* * @ingroup SalUser * @description * This function gets the number of the available dynamic allocated * compression instances which are from the specific device package. * * @context * This function is called from the user process context * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @retval CPA_STATUS_SUCCESS No error * @retval CPA_STATUS_FAIL Operation failed * ************************************************************************/ CpaStatus icp_sal_userDcGetAvailableNumDynInstancesByDevPkg(Cpa32U *pNumDcInstances, Cpa32U devPkgID); /************************************************************************* * @ingroup SalUser * @description * This function allocates crypto instances * from dynamic crypto instance pool * - it adds new allocated instances into crypto_services * - it initializes new allocated instances * - it starts new allocated instances * * @context * This function is called from the user process context * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @retval CPA_STATUS_SUCCESS No error * @retval CPA_STATUS_FAIL Operation failed * ************************************************************************/ CpaStatus icp_sal_userCyInstancesAlloc(Cpa32U numCyInstances, CpaInstanceHandle *pCyInstances); /************************************************************************* * @ingroup SalUser * @description * This function allocates crypto instances * from dynamic crypto instance pool * which are from the specific device package. * - it adds new allocated instances into crypto_services * - it initializes new allocated instances * - it starts new allocated instances * * @context * This function is called from the user process context * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @retval CPA_STATUS_SUCCESS No error * @retval CPA_STATUS_FAIL Operation failed * ************************************************************************/ CpaStatus icp_sal_userCyInstancesAllocByDevPkg(Cpa32U numCyInstances, CpaInstanceHandle *pCyInstances, Cpa32U devPkgID); /************************************************************************* * @ingroup SalUser * @description * This function allocates crypto instances * from dynamic crypto instance pool * which are from the specific device package and specific accelerator * - it adds new allocated instances into crypto_services * - it initializes new allocated instances * - it starts new allocated instances * * @context * This function is called from the user process context * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @retval CPA_STATUS_SUCCESS No error * @retval CPA_STATUS_FAIL Operation failed * ************************************************************************/ CpaStatus icp_sal_userCyInstancesAllocByPkgAccel(Cpa32U numCyInstances, CpaInstanceHandle *pCyInstances, Cpa32U devPkgID, Cpa32U accelerator_number); /************************************************************************* * @ingroup SalUser * @description * This function frees crypto instances allocated * from dynamic crypto instance pool * - it stops the instances * - it shutdowns the instances * - it removes the instances from crypto_services * * @context * This function is called from the user process context * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @retval CPA_STATUS_SUCCESS No error * @retval CPA_STATUS_FAIL Operation failed * ************************************************************************/ CpaStatus icp_sal_userCyFreeInstances(Cpa32U numCyInstances, CpaInstanceHandle *pCyInstances); /************************************************************************* * @ingroup SalUser * @description * This function allocates compression instances * from dynamic compression instance pool * - it adds new allocated instances into compression_services * - it initializes new allocated instances * - it starts new allocated instances * * @context * This function is called from the user process context * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @retval CPA_STATUS_SUCCESS No error * @retval CPA_STATUS_FAIL Operation failed * ************************************************************************/ CpaStatus icp_sal_userDcInstancesAlloc(Cpa32U numDcInstances, CpaInstanceHandle *pDcInstances); /************************************************************************* * @ingroup SalUser * @description * This function allocates compression instances * from dynamic compression instance pool * which are from the specific device package. * - it adds new allocated instances into compression_services * - it initializes new allocated instances * - it starts new allocated instances * * @context * This function is called from the user process context * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @retval CPA_STATUS_SUCCESS No error * @retval CPA_STATUS_FAIL Operation failed * ************************************************************************/ CpaStatus icp_sal_userDcInstancesAllocByDevPkg(Cpa32U numDcInstances, CpaInstanceHandle *pDcInstances, Cpa32U devPkgID); /************************************************************************* * @ingroup SalUser * @description * This function frees compression instances allocated * from dynamic compression instance pool * - it stops the instances * - it shutdowns the instances * - it removes the instances from compression_services * * @context * This function is called from the user process context * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * Yes * * @retval CPA_STATUS_SUCCESS No error * @retval CPA_STATUS_FAIL Operation failed * ************************************************************************/ CpaStatus icp_sal_userDcFreeInstances(Cpa32U numDcInstances, CpaInstanceHandle *pDcInstances); /************************************************************************* * @ingroup SalUser * @description * This function checks if new devices have been started and if so * starts to use them. * * @context * This function is called from the user process context * in threadless mode * * @assumptions * None * @sideEffects * None * @reentrant * No * @threadSafe * No * * @retval CPA_STATUS_SUCCESS No error * @retval CPA_STATUS_FAIL Operation failed * ************************************************************************/ CpaStatus icp_sal_find_new_devices(void); /************************************************************************* * @ingroup SalUser * @description * This function polls device events. * * @context * This function is called from the user process context * in threadless mode * * @assumptions * None * @sideEffects * In case a device has beed stoped or restarted the application * will get restarting/stop/shutdown events * @reentrant * No * @threadSafe * No * * @retval CPA_STATUS_SUCCESS No error * @retval CPA_STATUS_FAIL Operation failed * ************************************************************************/ CpaStatus icp_sal_poll_device_events(void); /* * icp_adf_check_device * * @description: * This function checks the status of the firmware/hardware for a given device. * This function is used as part of the heartbeat functionality. * * @context * This function is called from the user process context * @assumptions * None * @sideEffects * In case a device is unresponsive the device will * be restarted. * @reentrant * No * @threadSafe * Yes * * @param[in] accelId Device Id. * @retval CPA_STATUS_SUCCESS No error * @retval CPA_STATUS_FAIL Operation failed */ CpaStatus icp_sal_check_device(Cpa32U accelId); /* * icp_adf_check_all_devices * * @description: * This function checks the status of the firmware/hardware for all devices. * This function is used as part of the heartbeat functionality. * * @context * This function is called from the user process context * @assumptions * None * @sideEffects * In case a device is unresponsive the device will * be restarted. * @reentrant * No * @threadSafe * Yes * * @retval CPA_STATUS_SUCCESS No error * @retval CPA_STATUS_FAIL Operation failed */ CpaStatus icp_sal_check_all_devices(void); /* * @ingroup icp_sal_user * @description * This is a stub function to send messages to VF * * @context * None * * @assumptions * None * @sideEffects * None * @reentrant * Yes * @threadSafe * Yes * */ CpaStatus icp_sal_userSendMsgToVf(Cpa32U accelId, Cpa32U vfNum, Cpa32U message); /* * @ingroup icp_sal_user * @description * This is a stub function to send messages to PF * * @context * None * * @assumptions * None * @sideEffects * None * @reentrant * Yes * @threadSafe * Yes * */ CpaStatus icp_sal_userSendMsgToPf(Cpa32U accelId, Cpa32U message); /* * @ingroup icp_sal_user * @description * This is a stub function to get messages from VF * * @context * None * * @assumptions * None * @sideEffects * None * @reentrant * Yes * @threadSafe * Yes * */ CpaStatus icp_sal_userGetMsgFromVf(Cpa32U accelId, Cpa32U vfNum, Cpa32U *message, Cpa32U *messageCounter); /* * @ingroup icp_sal_user * @description * This is a stub function to get messages from PF * * @context * None * * @assumptions * None * @sideEffects * None * @reentrant * Yes * @threadSafe * Yes * */ CpaStatus icp_sal_userGetMsgFromPf(Cpa32U accelId, Cpa32U *message, Cpa32U *messageCounter); /* * @ingroup icp_sal_user * @description * This is a stub function to get pfvf comms status * * @context * None * * @assumptions * None * @sideEffects * None * @reentrant * Yes * @threadSafe * Yes * */ CpaStatus icp_sal_userGetPfVfcommsStatus(CpaBoolean *unreadMessage); /* * @ingroup icp_sal_user * @description * This is a stub function to reset the device * * @context * None * * @assumptions * None * @sideEffects * None * @reentrant * Yes * @threadSafe * Yes * */ CpaStatus icp_sal_reset_device(Cpa32U accelId); /** ***************************************************************************** * @ingroup icp_sal_user * Retrieve number of in flight requests for a nrbg tx ring * from a crypto instance (Traditional API). * * @description * This function is a part of back-pressure mechanism. * Applications can query for inflight requests in * the appropriate service/ring on each instance * and select any instance with sufficient space or * the instance with the lowest number. * * @assumptions * None * @sideEffects * None * @blocking * None * @reentrant * No * @threadSafe * Yes * * @param[in] instanceHandle Crypto API instance handle. * @param[out] maxInflightRequests Maximal number of in flight requests. * @param[out] numInflightRequests Current number of in flight requests. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @pre * None * @post * None * @see * None * *****************************************************************************/ CpaStatus icp_sal_NrbgGetInflightRequests(CpaInstanceHandle instanceHandle, Cpa32U *maxInflightRequests, Cpa32U *numInflightRequests); /** ***************************************************************************** * @ingroup icp_sal_user * Retrieve number of in flight requests for a symmetric tx ring * from a crypto instance (Traditional API). * * @description * This function is a part of back-pressure mechanism. * Applications can query for inflight requests in * the appropriate service/ring on each instance * and select any instance with sufficient space or * the instance with the lowest number. * * @assumptions * None * @sideEffects * None * @blocking * None * @reentrant * No * @threadSafe * Yes * * @param[in] instanceHandle Crypto API instance handle. * @param[out] maxInflightRequests Maximal number of in flight requests. * @param[out] numInflightRequests Current number of in flight requests. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @pre * None * @post * None * @see * None * *****************************************************************************/ CpaStatus icp_sal_SymGetInflightRequests(CpaInstanceHandle instanceHandle, Cpa32U *maxInflightRequests, Cpa32U *numInflightRequests); /** ***************************************************************************** * @ingroup icp_sal_user * Retrieve number of in flight requests for an asymmetric tx ring * from a crypto instance (Traditional API). * * @description * This function is a part of back-pressure mechanism. * Applications can query the appropriate service/ring on each instance * and select any instance with sufficient space or * the instance with the lowest number. * * @assumptions * None * @sideEffects * None * @blocking * None * @reentrant * No * @threadSafe * Yes * * @param[in] instanceHandle Crypto API instance handle. * @param[out] maxInflightRequests Maximal number of in flight requests. * @param[out] numInflightRequests Current number of in flight requests. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @pre * None * @post * None * @see * None * *****************************************************************************/ CpaStatus icp_sal_AsymGetInflightRequests(CpaInstanceHandle instanceHandle, Cpa32U *maxInflightRequests, Cpa32U *numInflightRequests); /** ***************************************************************************** * @ingroup icp_sal_user * Retrieve number of in flight requests for a symmetric tx ring * from a crypto instancei (Data Plane API). * * @description * This function is a part of back-pressure mechanism. * Applications can query the appropriate service/ring on each instance * and select any instance with sufficient space or * the instance with the lowest number. * * @assumptions * None * @sideEffects * None * @blocking * None * @reentrant * No * @threadSafe * Yes * * @param[in] instanceHandle Crypto API instance handle. * @param[out] maxInflightRequests Maximal number of in flight requests. * @param[out] numInflightRequests Current number of in flight requests. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @pre * None * @post * None * @see * None * *****************************************************************************/ CpaStatus icp_sal_dp_SymGetInflightRequests(CpaInstanceHandle instanceHandle, Cpa32U *maxInflightRequests, Cpa32U *numInflightRequests); /** ***************************************************************************** * @ingroup icp_sal_user * Updates the CSR with queued requests in the asymmetric tx ring. * * @description * The function writes current shadow tail pointer of the asymmetric * TX ring into ring's CSR. Updating the CSR will notify the HW that * there are request(s) queued to be processed. The CSR is updated * always, disregarding the current value of shadow tail pointer and * the current CSR's tail value. * * @assumptions * None * @sideEffects * None * @blocking * None * @reentrant * No * @threadSafe * Yes * * @param[in] instanceHandle Crypto API instance handle. * * @retval CPA_STATUS_SUCCESS Function executed successfully. * @retval CPA_STATUS_FAIL Function failed. * @pre * None * @post * None * @see * None * *****************************************************************************/ CpaStatus icp_sal_AsymPerformOpNow(CpaInstanceHandle instanceHandle); + +/** + ***************************************************************************** + * @ingroup icp_sal_setForceAEADMACVerify + * Sets forceAEADMacVerify for particular instance to force HW MAC + * validation. + * + * @description + * By default HW MAC verification is set to CPA_TRUE - this utility + * function allows to change default behavior. + * + * @assumptions + * None + * @sideEffects + * None + * @blocking + * None + * @reentrant + * No + * @threadSafe + * No + * + * @param[in] instanceHandle Crypto API instance handle. + * @param[in] forceAEADMacVerify new value + * + * @retval CPA_STATUS_SUCCESS Function executed successfully. + * @retval CPA_STATUS_FAIL Function failed. + * @pre + * None + * @post + * None + * @see + * None + * + *****************************************************************************/ +CpaStatus icp_sal_setForceAEADMACVerify(CpaInstanceHandle instanceHandle, + CpaBoolean forceAEADMacVerify); #endif diff --git a/sys/dev/qat/qat_api/include/icp_sal_versions.h b/sys/dev/qat/qat_api/include/icp_sal_versions.h index 0a013fe46b81..91719587445d 100644 --- a/sys/dev/qat/qat_api/include/icp_sal_versions.h +++ b/sys/dev/qat/qat_api/include/icp_sal_versions.h @@ -1,97 +1,97 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /** *************************************************************************** * @file icp_sal_versions.h * * @defgroup SalVersions * * @ingroup SalVersions * * API and structures definition for obtaining software and hardware versions * ***************************************************************************/ #ifndef _ICP_SAL_VERSIONS_H_ #define _ICP_SAL_VERSIONS_H_ #define ICP_SAL_VERSIONS_FW_VERSION_SIZE 16 /**< Max length of firmware version string */ #define ICP_SAL_VERSIONS_SW_VERSION_SIZE 16 /**< Max length of software version string */ #define ICP_SAL_VERSIONS_MMP_VERSION_SIZE 16 /**< Max length of MMP binary version string */ #define ICP_SAL_VERSIONS_HW_VERSION_SIZE 4 /**< Max length of hardware version string */ /* Part name and number of the accelerator device */ -#define SAL_INFO2_DRIVER_SW_VERSION_MAJ_NUMBER 3 -#define SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER 11 +#define SAL_INFO2_DRIVER_SW_VERSION_MAJ_NUMBER 3 +#define SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER 12 #define SAL_INFO2_DRIVER_SW_VERSION_PATCH_NUMBER 0 /** ******************************************************************************* * @ingroup SalVersions * Structure holding versions information * * @description * This structure stores information about versions of software * and hardware being run on a particular device. *****************************************************************************/ typedef struct icp_sal_dev_version_info_s { Cpa32U devId; /**< Number of acceleration device for which this structure holds * version * information */ Cpa8U firmwareVersion[ICP_SAL_VERSIONS_FW_VERSION_SIZE]; /**< String identifying the version of the firmware associated with * the device. */ Cpa8U mmpVersion[ICP_SAL_VERSIONS_MMP_VERSION_SIZE]; /**< String identifying the version of the MMP binary associated with * the device. */ Cpa8U softwareVersion[ICP_SAL_VERSIONS_SW_VERSION_SIZE]; /**< String identifying the version of the software associated with * the device. */ Cpa8U hardwareVersion[ICP_SAL_VERSIONS_HW_VERSION_SIZE]; /**< String identifying the version of the hardware (stepping and * revision ID) associated with the device. */ } icp_sal_dev_version_info_t; /** ******************************************************************************* * @ingroup SalVersions * Obtains the version information for a given device * @description * This function obtains hardware and software version information * associated with a given device. * * @param[in] accelId ID of the acceleration device for which version * information is to be obtained. * @param[out] pVerInfo Pointer to a structure that will hold version * information * * @context * This function might sleep. It cannot be executed in a context that * does not permit sleeping. * @assumptions * The system has been started * @sideEffects * None * @blocking * No * @reentrant * No * @threadSafe * Yes * * @return CPA_STATUS_SUCCESS Operation finished successfully * @return CPA_STATUS_INVALID_PARAM Invalid parameter passed to the function * @return CPA_STATUS_RESOURCE System resources problem * @return CPA_STATUS_FAIL Operation failed * *****************************************************************************/ CpaStatus icp_sal_getDevVersionInfo(Cpa32U accelId, icp_sal_dev_version_info_t *pVerInfo); #endif diff --git a/sys/dev/qat/qat_api/qat_direct/include/icp_accel_devices.h b/sys/dev/qat/qat_api/qat_direct/include/icp_accel_devices.h index 3733fec2c9ea..82df3bb00d19 100644 --- a/sys/dev/qat/qat_api/qat_direct/include/icp_accel_devices.h +++ b/sys/dev/qat/qat_api/qat_direct/include/icp_accel_devices.h @@ -1,157 +1,158 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ /***************************************************************************** * @file icp_accel_devices.h * * @defgroup Acceleration Driver Framework * * @ingroup icp_Adf * * @description * This is the top level header file that contains the layout of the ADF * icp_accel_dev_t structure and related macros/definitions. * It can be used to dereference the icp_accel_dev_t *passed into upper * layers. * *****************************************************************************/ #ifndef ICP_ACCEL_DEVICES_H_ #define ICP_ACCEL_DEVICES_H_ #include "cpa.h" #include "qat_utils.h" #include "adf_accel_devices.h" #define ADF_CFG_NO_INSTANCE 0xFFFFFFFF #define ICP_DC_TX_RING_0 6 #define ICP_DC_TX_RING_1 7 #define ICP_RX_RINGS_OFFSET 8 #define ICP_RINGS_PER_BANK 16 /* Number of worker threads per AE */ #define ICP_ARB_WRK_THREAD_TO_SARB 12 #define MAX_ACCEL_NAME_LEN 16 #define ADF_DEVICE_NAME_LENGTH 32 #define ADF_DEVICE_TYPE_LENGTH 8 #define ADF_CTL_DEVICE_NAME "/dev/qat_adf_ctl" /** ***************************************************************************** * @ingroup icp_AdfAccelHandle * * @description * Accelerator capabilities * *****************************************************************************/ typedef enum { ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 0x01, ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 0x02, ICP_ACCEL_CAPABILITIES_CIPHER = 0x04, ICP_ACCEL_CAPABILITIES_AUTHENTICATION = 0x08, ICP_ACCEL_CAPABILITIES_RESERVED_1 = 0x10, ICP_ACCEL_CAPABILITIES_COMPRESSION = 0x20, ICP_ACCEL_CAPABILITIES_DEPRECATED = 0x40, ICP_ACCEL_CAPABILITIES_RANDOM_NUMBER = 0x80, ICP_ACCEL_CAPABILITIES_CRYPTO_ZUC = 0x100, ICP_ACCEL_CAPABILITIES_SHA3 = 0x200, ICP_ACCEL_CAPABILITIES_KPT = 0x400, ICP_ACCEL_CAPABILITIES_RL = 0x800, ICP_ACCEL_CAPABILITIES_HKDF = 0x1000, ICP_ACCEL_CAPABILITIES_ECEDMONT = 0x2000, ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN = 0x4000, ICP_ACCEL_CAPABILITIES_SHA3_EXT = 0x8000, ICP_ACCEL_CAPABILITIES_AESGCM_SPC = 0x10000, ICP_ACCEL_CAPABILITIES_CHACHA_POLY = 0x20000, ICP_ACCEL_CAPABILITIES_SM2 = 0x40000, ICP_ACCEL_CAPABILITIES_SM3 = 0x80000, ICP_ACCEL_CAPABILITIES_SM4 = 0x100000, ICP_ACCEL_CAPABILITIES_INLINE = 0x200000, ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY = 0x400000, ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = 0x800000, ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = 0x1000000, ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION = 0x2000000, ICP_ACCEL_CAPABILITIES_AES_V2 = 0x4000000, ICP_ACCEL_CAPABILITIES_KPT2 = 0x8000000, } icp_accel_capabilities_t; /** ***************************************************************************** * @ingroup icp_AdfAccelHandle * * @description * Device Configuration Data Structure * *****************************************************************************/ typedef enum device_type_e { DEVICE_UNKNOWN = 0, DEVICE_DH895XCC, DEVICE_DH895XCCVF, DEVICE_C62X, DEVICE_C62XVF, DEVICE_C3XXX, DEVICE_C3XXXVF, DEVICE_200XX, DEVICE_200XXVF, DEVICE_C4XXX, - DEVICE_C4XXXVF + DEVICE_C4XXXVF, + DEVICE_GEN4 } device_type_t; /* * Enumeration on Service Type */ typedef enum adf_service_type_s { ADF_SERVICE_CRYPTO, ADF_SERVICE_COMPRESS, ADF_SERVICE_MAX /* this is always the last one */ } adf_service_type_t; typedef struct accel_dev_s { /* Some generic information */ Cpa32U accelId; Cpa8U *pAccelName; /* Name given to accelerator */ Cpa32U aeMask; /* Acceleration Engine mask */ device_type_t deviceType; /* Device Type */ /* Device name for SAL */ char deviceName[ADF_DEVICE_NAME_LENGTH + 1]; Cpa32U accelCapabilitiesMask; /* Accelerator's capabilities mask */ Cpa32U dcExtendedFeatures; /* bit field of features */ QatUtilsAtomic usageCounter; /* Usage counter. Prevents shutting down the dev if not 0*/ Cpa32U deviceMemAvail; /* Device memory for intermediate buffers */ /* Component specific fields - cast to relevent layer */ void *pRingInflight; /* For offload optimization */ void *pSalHandle; /* For SAL*/ void *pQatStats; /* For QATAL/SAL stats */ void *ringInfoCallBack; /* Callback for user space ring enabling */ void *pShramConstants; /* Virtual address of Shram constants page */ Cpa64U pShramConstantsDma; /* Bus address of Shram constants page */ /* Status of ADF and registered subsystems */ Cpa32U adfSubsystemStatus; /* Physical processor to which the dev is connected */ Cpa8U pkg_id; enum dev_sku_info sku; Cpa32U pciDevId; Cpa8U devFileName[ADF_DEVICE_NAME_LENGTH]; Cpa32S csrFileHdl; Cpa32S ringFileHdl; void *accel; Cpa32U maxNumBanks; Cpa32U maxNumRingsPerBank; /* pointer to dynamic instance resource manager */ void *pInstMgr; void *banks; /* banks information */ struct adf_accel_dev *accel_dev; struct accel_dev_s *pPrev; struct accel_dev_s *pNext; } icp_accel_dev_t; #endif /* ICP_ACCEL_HANDLE_H */ diff --git a/sys/dev/qat/qat_api/qat_kernel/src/qat_transport.c b/sys/dev/qat/qat_api/qat_kernel/src/qat_transport.c index 739cd026eedf..910854446442 100644 --- a/sys/dev/qat/qat_api/qat_kernel/src/qat_transport.c +++ b/sys/dev/qat/qat_api/qat_kernel/src/qat_transport.c @@ -1,429 +1,436 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include "cpa.h" #include "icp_adf_init.h" #include "icp_adf_transport.h" #include "icp_adf_poll.h" #include "icp_adf_transport_dp.h" #include "icp_sal_poll.h" /* * adf_modulo * result = data % ( 2 ^ shift ) */ static inline Cpa32U adf_modulo(Cpa32U data, Cpa32U shift) { Cpa32U div = data >> shift; Cpa32U mult = div << shift; return data - mult; } /* * icp_adf_transCreateHandle * crete transport handle for a service * call adf_create_ring from adf driver directly with same parameters */ CpaStatus icp_adf_transCreateHandle(icp_accel_dev_t *adf, icp_transport_type trans_type, const char *section, const uint32_t accel_nr, const uint32_t bank_nr, const char *service_name, const icp_adf_ringInfoService_t info, icp_trans_callback callback, icp_resp_deliv_method resp, const uint32_t num_msgs, const uint32_t msg_size, icp_comms_trans_handle *trans_handle) { CpaStatus status; int error; ICP_CHECK_FOR_NULL_PARAM(trans_handle); ICP_CHECK_FOR_NULL_PARAM(adf); error = adf_create_ring(adf->accel_dev, section, bank_nr, num_msgs, msg_size, service_name, callback, ((resp == ICP_RESP_TYPE_IRQ) ? 0 : 1), (struct adf_etr_ring_data **)trans_handle); if (!error) status = CPA_STATUS_SUCCESS; else status = CPA_STATUS_FAIL; return status; } /* * icp_adf_transReinitHandle * Reinitialize transport handle for a service */ CpaStatus icp_adf_transReinitHandle(icp_accel_dev_t *adf, icp_transport_type trans_type, const char *section, const uint32_t accel_nr, const uint32_t bank_nr, const char *service_name, const icp_adf_ringInfoService_t info, icp_trans_callback callback, icp_resp_deliv_method resp, const uint32_t num_msgs, const uint32_t msg_size, icp_comms_trans_handle *trans_handle) { return CPA_STATUS_SUCCESS; } /* * icp_adf_transReleaseHandle * destroy a transport handle, call adf_remove_ring from adf driver directly */ CpaStatus icp_adf_transReleaseHandle(icp_comms_trans_handle trans_handle) { struct adf_etr_ring_data *ring = trans_handle; ICP_CHECK_FOR_NULL_PARAM(ring); adf_remove_ring(ring); return CPA_STATUS_SUCCESS; } /* * icp_adf_transResetHandle * clean a transport handle, call adf_remove_ring from adf driver directly */ CpaStatus icp_adf_transResetHandle(icp_comms_trans_handle trans_handle) { return CPA_STATUS_SUCCESS; } /* * icp_adf_transGetRingNum * get ring number from a transport handle */ CpaStatus icp_adf_transGetRingNum(icp_comms_trans_handle trans_handle, uint32_t *ringNum) { struct adf_etr_ring_data *ring = trans_handle; ICP_CHECK_FOR_NULL_PARAM(ring); ICP_CHECK_FOR_NULL_PARAM(ringNum); *ringNum = (uint32_t)(ring->ring_number); return CPA_STATUS_SUCCESS; } /* * icp_adf_transPutMsg * send a request to transport handle * call adf_send_message from adf driver directly */ CpaStatus icp_adf_transPutMsg(icp_comms_trans_handle trans_handle, uint32_t *inBuf, uint32_t bufLen) { struct adf_etr_ring_data *ring = trans_handle; CpaStatus status = CPA_STATUS_FAIL; int error = EFAULT; ICP_CHECK_FOR_NULL_PARAM(ring); error = adf_send_message(ring, inBuf); if (EAGAIN == error) status = CPA_STATUS_RETRY; else if (0 == error) status = CPA_STATUS_SUCCESS; else status = CPA_STATUS_FAIL; return status; } CpaStatus icp_adf_getInflightRequests(icp_comms_trans_handle trans_handle, Cpa32U *maxInflightRequests, Cpa32U *numInflightRequests) { struct adf_etr_ring_data *ring = trans_handle; ICP_CHECK_FOR_NULL_PARAM(ring); ICP_CHECK_FOR_NULL_PARAM(maxInflightRequests); ICP_CHECK_FOR_NULL_PARAM(numInflightRequests); /* * XXX: The qat_direct version of this routine returns max - 1, not * the absolute max. */ *numInflightRequests = (*(uint32_t *)ring->inflights); *maxInflightRequests = ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size); return CPA_STATUS_SUCCESS; } CpaStatus icp_adf_dp_getInflightRequests(icp_comms_trans_handle trans_handle, Cpa32U *maxInflightRequests, Cpa32U *numInflightRequests) { ICP_CHECK_FOR_NULL_PARAM(trans_handle); ICP_CHECK_FOR_NULL_PARAM(maxInflightRequests); ICP_CHECK_FOR_NULL_PARAM(numInflightRequests); return icp_adf_getInflightRequests(trans_handle, maxInflightRequests, numInflightRequests); } /* * This function allows the user to poll the response ring. The * ring number to be polled is supplied by the user via the * trans handle for that ring. The trans_hnd is a pointer * to an array of trans handles. This ring is * only polled if it contains data. * This method is used as an alternative to the reading messages * via the ISR method. * This function will return RETRY if the ring is empty. */ CpaStatus icp_adf_pollInstance(icp_comms_trans_handle *trans_hnd, Cpa32U num_transHandles, Cpa32U response_quota) { Cpa32U resp_total = 0; Cpa32U num_resp; struct adf_etr_ring_data *ring = NULL; struct adf_etr_bank_data *bank = NULL; Cpa32U i; ICP_CHECK_FOR_NULL_PARAM(trans_hnd); for (i = 0; i < num_transHandles; i++) { ring = trans_hnd[i]; if (!ring) continue; bank = ring->bank; /* If the ring in question is empty try the next ring.*/ if (!bank || !bank->ring_mask) { continue; } num_resp = adf_handle_response(ring, response_quota); resp_total += num_resp; } /* If any of the rings in the instance had data and was polled * return SUCCESS. */ if (resp_total) return CPA_STATUS_SUCCESS; else return CPA_STATUS_RETRY; } /* * This function allows the user to check the response ring. The * ring number to be polled is supplied by the user via the * trans handle for that ring. The trans_hnd is a pointer * to an array of trans handles. * This function now is a empty function. */ CpaStatus icp_adf_check_RespInstance(icp_comms_trans_handle *trans_hnd, Cpa32U num_transHandles) { return CPA_STATUS_SUCCESS; } /* * icp_sal_pollBank * poll bank with id bank_number inside acceleration device with id @accelId */ CpaStatus icp_sal_pollBank(Cpa32U accelId, Cpa32U bank_number, Cpa32U response_quota) { int ret; ret = adf_poll_bank(accelId, bank_number, response_quota); if (!ret) return CPA_STATUS_SUCCESS; else if (EAGAIN == ret) return CPA_STATUS_RETRY; return CPA_STATUS_FAIL; } /* * icp_sal_pollAllBanks * poll all banks inside acceleration device with id @accelId */ CpaStatus icp_sal_pollAllBanks(Cpa32U accelId, Cpa32U response_quota) { int ret = 0; ret = adf_poll_all_banks(accelId, response_quota); if (!ret) return CPA_STATUS_SUCCESS; else if (ret == EAGAIN) return CPA_STATUS_RETRY; return CPA_STATUS_FAIL; } /* * icp_adf_getQueueMemory * Data plane support function - returns the pointer to next message on the ring * or NULL if there is not enough space. */ void icp_adf_getQueueMemory(icp_comms_trans_handle trans_handle, Cpa32U numberRequests, void **pCurrentQatMsg) { struct adf_etr_ring_data *ring = trans_handle; Cpa64U flight; ICP_CHECK_FOR_NULL_PARAM_VOID(ring); /* Check if there is enough space in the ring */ flight = atomic_add_return(numberRequests, ring->inflights); if (flight > ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) { atomic_sub(numberRequests, ring->inflights); *pCurrentQatMsg = NULL; return; } /* We have enough space - get the address of next message */ *pCurrentQatMsg = (void *)((uintptr_t)ring->base_addr + ring->tail); } /* * icp_adf_getSingleQueueAddr * Data plane support function - returns the pointer to next message on the ring * or NULL if there is not enough space - it also updates the shadow tail copy. */ void icp_adf_getSingleQueueAddr(icp_comms_trans_handle trans_handle, void **pCurrentQatMsg) { struct adf_etr_ring_data *ring = trans_handle; Cpa64U flight; ICP_CHECK_FOR_NULL_PARAM_VOID(ring); ICP_CHECK_FOR_NULL_PARAM_VOID(pCurrentQatMsg); /* Check if there is enough space in the ring */ flight = atomic_add_return(1, ring->inflights); if (flight > ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) { atomic_dec(ring->inflights); *pCurrentQatMsg = NULL; return; } /* We have enough space - get the address of next message */ *pCurrentQatMsg = (void *)((uintptr_t)ring->base_addr + ring->tail); /* Update the shadow tail */ ring->tail = adf_modulo(ring->tail + ADF_MSG_SIZE_TO_BYTES(ring->msg_size), ADF_RING_SIZE_MODULO(ring->ring_size)); } /* * icp_adf_getQueueNext * Data plane support function - increments the tail pointer and returns * the pointer to next message on the ring. */ void icp_adf_getQueueNext(icp_comms_trans_handle trans_handle, void **pCurrentQatMsg) { struct adf_etr_ring_data *ring = trans_handle; ICP_CHECK_FOR_NULL_PARAM_VOID(ring); ICP_CHECK_FOR_NULL_PARAM_VOID(pCurrentQatMsg); /* Increment tail to next message */ ring->tail = adf_modulo(ring->tail + ADF_MSG_SIZE_TO_BYTES(ring->msg_size), ADF_RING_SIZE_MODULO(ring->ring_size)); /* Get the address of next message */ *pCurrentQatMsg = (void *)((uintptr_t)ring->base_addr + ring->tail); } /* * icp_adf_updateQueueTail * Data plane support function - Writes the tail shadow copy to the device. */ void icp_adf_updateQueueTail(icp_comms_trans_handle trans_handle) { struct adf_etr_ring_data *ring = trans_handle; + struct adf_hw_csr_ops *csr_ops; ICP_CHECK_FOR_NULL_PARAM_VOID(ring); + ICP_CHECK_FOR_NULL_PARAM_VOID(ring->bank); + ICP_CHECK_FOR_NULL_PARAM_VOID(ring->bank->accel_dev); - WRITE_CSR_RING_TAIL(ring->bank->csr_addr, - ring->bank->bank_number, - ring->ring_number, - ring->tail); + csr_ops = GET_CSR_OPS(ring->bank->accel_dev); + + ICP_CHECK_FOR_NULL_PARAM_VOID(csr_ops); + + csr_ops->write_csr_ring_tail(ring->bank->csr_addr, + ring->bank->bank_number, + ring->ring_number, + ring->tail); ring->csr_tail_offset = ring->tail; } /* * icp_adf_pollQueue * Data plane support function - Poll messages from the queue. */ CpaStatus icp_adf_pollQueue(icp_comms_trans_handle trans_handle, Cpa32U response_quota) { Cpa32U num_resp; struct adf_etr_ring_data *ring = trans_handle; ICP_CHECK_FOR_NULL_PARAM(ring); num_resp = adf_handle_response(ring, response_quota); if (num_resp) return CPA_STATUS_SUCCESS; else return CPA_STATUS_RETRY; } /* * icp_adf_queueDataToSend * Data-plane support function - Indicates if there is data on the ring to be * sent. This should only be called on request rings. If the function returns * true then it is ok to call icp_adf_updateQueueTail() function on this ring. */ CpaBoolean icp_adf_queueDataToSend(icp_comms_trans_handle trans_handle) { struct adf_etr_ring_data *ring = trans_handle; if (ring->tail != ring->csr_tail_offset) return CPA_TRUE; else return CPA_FALSE; } /* * This icp API won't be supported in kernel space currently */ CpaStatus icp_adf_transGetFdForHandle(icp_comms_trans_handle trans_hnd, int *fd) { return CPA_STATUS_UNSUPPORTED; } diff --git a/sys/dev/qat/qat_api/qat_utils/include/qat_utils.h b/sys/dev/qat/qat_api/qat_utils/include/qat_utils.h index c30a19785980..a9874eb89215 100644 --- a/sys/dev/qat/qat_api/qat_utils/include/qat_utils.h +++ b/sys/dev/qat/qat_api/qat_utils/include/qat_utils.h @@ -1,851 +1,872 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #ifndef QAT_UTILS_H #define QAT_UTILS_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __x86_64__ #include #else #include #endif #include #include #include #include #include #include #include #include #include #include #include "cpa.h" #define QAT_UTILS_LOG(...) printf("QAT: "__VA_ARGS__) #define QAT_UTILS_WAIT_FOREVER (-1) #define QAT_UTILS_WAIT_NONE 0 #define QAT_UTILS_HOST_TO_NW_16(uData) QAT_UTILS_OS_HOST_TO_NW_16(uData) #define QAT_UTILS_HOST_TO_NW_32(uData) QAT_UTILS_OS_HOST_TO_NW_32(uData) #define QAT_UTILS_HOST_TO_NW_64(uData) QAT_UTILS_OS_HOST_TO_NW_64(uData) #define QAT_UTILS_NW_TO_HOST_16(uData) QAT_UTILS_OS_NW_TO_HOST_16(uData) #define QAT_UTILS_NW_TO_HOST_32(uData) QAT_UTILS_OS_NW_TO_HOST_32(uData) #define QAT_UTILS_NW_TO_HOST_64(uData) QAT_UTILS_OS_NW_TO_HOST_64(uData) #define QAT_UTILS_UDIV64_32(dividend, divisor) \ QAT_UTILS_OS_UDIV64_32(dividend, divisor) #define QAT_UTILS_UMOD64_32(dividend, divisor) \ QAT_UTILS_OS_UMOD64_32(dividend, divisor) #define ICP_CHECK_FOR_NULL_PARAM(param) \ do { \ if (NULL == param) { \ QAT_UTILS_LOG("%s(): invalid param: %s\n", \ __FUNCTION__, \ #param); \ return CPA_STATUS_INVALID_PARAM; \ } \ } while (0) #define ICP_CHECK_FOR_NULL_PARAM_VOID(param) \ do { \ if (NULL == param) { \ QAT_UTILS_LOG("%s(): invalid param: %s\n", \ __FUNCTION__, \ #param); \ return; \ } \ } while (0) /*Macro for adding an element to the tail of a doubly linked list*/ /*The currentptr tracks the tail, and the headptr tracks the head.*/ #define ICP_ADD_ELEMENT_TO_END_OF_LIST(elementtoadd, currentptr, headptr) \ do { \ if (NULL == currentptr) { \ currentptr = elementtoadd; \ elementtoadd->pNext = NULL; \ elementtoadd->pPrev = NULL; \ headptr = currentptr; \ } else { \ elementtoadd->pPrev = currentptr; \ currentptr->pNext = elementtoadd; \ elementtoadd->pNext = NULL; \ currentptr = elementtoadd; \ } \ } while (0) /*currentptr is not used in this case since we don't track the tail. */ #define ICP_ADD_ELEMENT_TO_HEAD_OF_LIST(elementtoadd, currentptr, headptr) \ do { \ if (NULL == headptr) { \ elementtoadd->pNext = NULL; \ elementtoadd->pPrev = NULL; \ headptr = elementtoadd; \ } else { \ elementtoadd->pPrev = NULL; \ elementtoadd->pNext = headptr; \ headptr->pPrev = elementtoadd; \ headptr = elementtoadd; \ } \ } while (0) #define ICP_REMOVE_ELEMENT_FROM_LIST(elementtoremove, currentptr, headptr) \ do { \ /*If the previous pointer is not NULL*/ \ if (NULL != elementtoremove->pPrev) { \ elementtoremove->pPrev->pNext = \ elementtoremove->pNext; \ if (elementtoremove->pNext) { \ elementtoremove->pNext->pPrev = \ elementtoremove->pPrev; \ } else { \ /* Move the tail pointer backwards */ \ currentptr = elementtoremove->pPrev; \ } \ } else if (NULL != elementtoremove->pNext) { \ /*Remove the head pointer.*/ \ elementtoremove->pNext->pPrev = NULL; \ /*Hence move the head forward.*/ \ headptr = elementtoremove->pNext; \ } else { \ /*Remove the final entry in the list. */ \ currentptr = NULL; \ headptr = NULL; \ } \ } while (0) MALLOC_DECLARE(M_QAT); #ifdef __x86_64__ typedef atomic64_t QatUtilsAtomic; #else typedef atomic_t QatUtilsAtomic; #endif #define QAT_UTILS_OS_NW_TO_HOST_16(uData) be16toh(uData) #define QAT_UTILS_OS_NW_TO_HOST_32(uData) be32toh(uData) #define QAT_UTILS_OS_NW_TO_HOST_64(uData) be64toh(uData) #define QAT_UTILS_OS_HOST_TO_NW_16(uData) htobe16(uData) #define QAT_UTILS_OS_HOST_TO_NW_32(uData) htobe32(uData) #define QAT_UTILS_OS_HOST_TO_NW_64(uData) htobe64(uData) /** * @ingroup QatUtils * * @brief Atomically read the value of atomic variable * * @param pAtomicVar IN - atomic variable * * Atomically reads the value of pAtomicVar to the outValue * * @li Reentrant: yes * @li IRQ safe: yes * * @return pAtomicVar value */ int64_t qatUtilsAtomicGet(QatUtilsAtomic *pAtomicVar); /** * @ingroup QatUtils * * @brief Atomically set the value of atomic variable * * @param inValue IN - atomic variable to be set equal to inValue * * @param pAtomicVar OUT - atomic variable * * Atomically sets the value of pAtomicVar to the value given * * @li Reentrant: yes * @li IRQ safe: yes * * @return none */ void qatUtilsAtomicSet(int64_t inValue, QatUtilsAtomic *pAtomicVar); /** * @ingroup QatUtils * * @brief add the value to atomic variable * * @param inValue (in) - value to be added to the atomic variable * * @param pAtomicVar (in & out) - atomic variable * * Atomically adds the value of inValue to the pAtomicVar * * @li Reentrant: yes * @li IRQ safe: yes * * @return pAtomicVar value after the addition */ int64_t qatUtilsAtomicAdd(int64_t inValue, QatUtilsAtomic *pAtomicVar); /** * @ingroup QatUtils * * @brief subtract the value from atomic variable * * @param inValue IN - atomic variable value to be subtracted by value * * @param pAtomicVar IN/OUT - atomic variable * * Atomically subtracts the value of pAtomicVar by inValue * * @li Reentrant: yes * @li IRQ safe: yes * * @return pAtomicVar value after the subtraction */ int64_t qatUtilsAtomicSub(int64_t inValue, QatUtilsAtomic *pAtomicVar); /** * @ingroup QatUtils * * @brief increment value of atomic variable by 1 * * @param pAtomicVar IN/OUT - atomic variable * * Atomically increments the value of pAtomicVar by 1. * * @li Reentrant: yes * @li IRQ safe: yes * * @return pAtomicVar value after the increment */ int64_t qatUtilsAtomicInc(QatUtilsAtomic *pAtomicVar); /** * @ingroup QatUtils * * @brief decrement value of atomic variable by 1 * * @param pAtomicVar IN/OUT - atomic variable * * Atomically decrements the value of pAtomicVar by 1. * * @li Reentrant: yes * @li IRQ safe: yes * * @return pAtomic value after the decrement */ int64_t qatUtilsAtomicDec(QatUtilsAtomic *pAtomicVar); /** * @ingroup QatUtils * * @brief NUMA aware memory allocation; available on Linux OS only. * * @param size - memory size to allocate, in bytes * @param node - node * @param alignment - memory boundary alignment (alignment can not be 0) * * Allocates a memory zone of a given size on the specified node * The returned memory is guaraunteed to be physically contiguous if the * given size is less than 128KB and belonging to the node specified * * @li Reentrant: yes * @li IRQ safe: no * * @return Pointer to the allocated zone or NULL if the allocation failed */ void *qatUtilsMemAllocContiguousNUMA(uint32_t size, uint32_t node, uint32_t alignment); /** * @ingroup QatUtils * * @brief Frees memory allocated by qatUtilsMemAllocContigousNUMA. * * @param ptr - pointer to the memory zone * @param size - size of the pointer previously allocated * * Frees a previously allocated memory zone * * @li Reentrant: yes * @li IRQ safe: no * * @return - none */ void qatUtilsMemFreeNUMA(void *ptr); /** * @ingroup QatUtils * * @brief virtual to physical address translation * * @param virtAddr - virtual address * * Converts a virtual address into its equivalent MMU-mapped physical address * * @li Reentrant: yes * @li IRQ safe: yes * * @return Corresponding physical address */ #define QAT_UTILS_MMU_VIRT_TO_PHYS(virtAddr) \ ((uint64_t)((virtAddr) ? vtophys(virtAddr) : 0)) /** * @ingroup QatUtils * * @brief Initializes the SpinLock object * * @param pLock - Spinlock handle * * Initializes the SpinLock object. * * @li Reentrant: yes * @li IRQ safe: yes * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL */ CpaStatus qatUtilsLockInit(struct mtx *pLock); /** * @ingroup QatUtils * * @brief Acquires a spin lock * * @param pLock - Spinlock handle * * This routine acquires a spin lock so the * caller can synchronize access to shared data in a * multiprocessor-safe way by raising IRQL. * * @li Reentrant: yes * @li IRQ safe: yes * * @return - Returns CPA_STATUS_SUCCESS if the spinlock is acquired. Returns * CPA_STATUS_FAIL * if * spinlock handle is NULL. If spinlock is already acquired by any * other thread of execution then it tries in busy loop/spins till it * gets spinlock. */ CpaStatus qatUtilsLock(struct mtx *pLock); /** * @ingroup QatUtils * * @brief Releases the spin lock * * @param pLock - Spinlock handle * * This routine releases the spin lock which the thread had acquired * * @li Reentrant: yes * @li IRQ safe: yes * * @return - return CPA_STATUS_SUCCESS if the spinlock is released. Returns * CPA_STATUS_FAIL * if * spinlockhandle passed is NULL. */ CpaStatus qatUtilsUnlock(struct mtx *pLock); /** * @ingroup QatUtils * * @brief Destroy the spin lock object * * @param pLock - Spinlock handle * * @li Reentrant: yes * @li IRQ safe: yes * * @return - returns CPA_STATUS_SUCCESS if plock is destroyed. * returns CPA_STATUS_FAIL if plock is NULL. */ CpaStatus qatUtilsLockDestroy(struct mtx *pLock); /** * @ingroup QatUtils * * @brief Initializes a semaphore * * @param pSid - semaphore handle * @param start_value - initial semaphore value * * Initializes a semaphore object * Note: Semaphore initialization qatUtilsSemaphoreInit API must be called * first before using any QAT Utils Semaphore APIs * * @li Reentrant: yes * @li IRQ safe: no * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL */ CpaStatus qatUtilsSemaphoreInit(struct sema **pSid, uint32_t start_value); /** * @ingroup QatUtils * * @brief Destroys a semaphore object * * @param pSid - semaphore handle * * Destroys a semaphore object; the caller should ensure that no thread is * blocked on this semaphore. If call made when thread blocked on semaphore the * behaviour is unpredictable * * @li Reentrant: yes ] * @li IRQ safe: no * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL */ CpaStatus qatUtilsSemaphoreDestroy(struct sema **pSid); /** * @ingroup QatUtils * * @brief Waits on (decrements) a semaphore * * @param pSid - semaphore handle * @param timeout - timeout, in ms; QAT_UTILS_WAIT_FOREVER (-1) if the thread * is to block indefinitely or QAT_UTILS_WAIT_NONE (0) if the thread is to * return immediately even if the call fails * * Decrements a semaphore, blocking if the semaphore is * unavailable (value is 0). * * @li Reentrant: yes * @li IRQ safe: no * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL */ CpaStatus qatUtilsSemaphoreWait(struct sema **pSid, int32_t timeout); /** * @ingroup QatUtils * * @brief Non-blocking wait on semaphore * * @param semaphore - semaphore handle * * Decrements a semaphore, not blocking the calling thread if the semaphore * is unavailable * * @li Reentrant: yes * @li IRQ safe: no * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL */ CpaStatus qatUtilsSemaphoreTryWait(struct sema **semaphore); /** * @ingroup QatUtils * * @brief Posts to (increments) a semaphore * * @param pSid - semaphore handle * * Increments a semaphore object * * @li Reentrant: yes * @li IRQ safe: no * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL */ CpaStatus qatUtilsSemaphorePost(struct sema **pSid); /** * @ingroup QatUtils * * @brief initializes a pMutex * * @param pMutex - pMutex handle * * Initializes a pMutex object * @note Mutex initialization qatUtilsMutexInit API must be called * first before using any QAT Utils Mutex APIs * * @li Reentrant: yes * @li IRQ safe: no * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL */ CpaStatus qatUtilsMutexInit(struct mtx **pMutex); /** * @ingroup QatUtils * * @brief locks a pMutex * * @param pMutex - pMutex handle * @param timeout - timeout in ms; QAT_UTILS_WAIT_FOREVER (-1) to wait forever * or QAT_UTILS_WAIT_NONE to return immediately * * Locks a pMutex object * * @li Reentrant: yes * @li IRQ safe: no * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL */ CpaStatus qatUtilsMutexLock(struct mtx **pMutex, int32_t timeout); /** * @ingroup QatUtils * * @brief Unlocks a pMutex * * @param pMutex - pMutex handle * * Unlocks a pMutex object * * @li Reentrant: yes * @li IRQ safe: no * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL */ CpaStatus qatUtilsMutexUnlock(struct mtx **pMutex); /** * @ingroup QatUtils * * @brief Destroys a pMutex object * * @param pMutex - pMutex handle * * Destroys a pMutex object; the caller should ensure that no thread is * blocked on this pMutex. If call made when thread blocked on pMutex the * behaviour is unpredictable * * @li Reentrant: yes * @li IRQ safe: no * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL */ CpaStatus qatUtilsMutexDestroy(struct mtx **pMutex); /** * @ingroup QatUtils * * @brief Non-blocking attempt to lock a pMutex * * @param pMutex - pMutex handle * * Attempts to lock a pMutex object, returning immediately with * CPA_STATUS_SUCCESS if * the lock was successful or CPA_STATUS_FAIL if the lock failed * * @li Reentrant: yes * @li IRQ safe: no * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL */ CpaStatus qatUtilsMutexTryLock(struct mtx **pMutex); /** * @ingroup QatUtils * * @brief Yielding sleep for a number of milliseconds * * @param milliseconds - number of milliseconds to sleep * * The calling thread will sleep for the specified number of milliseconds. * This sleep is yielding, hence other tasks will be scheduled by the * operating system during the sleep period. Calling this function with an * argument of 0 will place the thread at the end of the current scheduling * loop. * * @li Reentrant: yes * @li IRQ safe: no * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL */ CpaStatus qatUtilsSleep(uint32_t milliseconds); /** * @ingroup QatUtils * * @brief Yields execution of current thread * * Yields the execution of the current thread * * @li Reentrant: yes * @li IRQ safe: no * * @return - none */ void qatUtilsYield(void); /** * @ingroup QatUtils * * @brief Calculate MD5 transform operation * * @param in - pointer to data to be processed. * The buffer needs to be at least md5 block size long as defined in * rfc1321 (64 bytes) * out - output pointer for state data after single md5 transform * operation. * The buffer needs to be at least md5 state size long as defined in * rfc1321 (16 bytes) * * @li Reentrant: yes * @li IRQ safe: yes * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL * */ CpaStatus qatUtilsHashMD5(uint8_t *in, uint8_t *out); /** * @ingroup QatUtils * * @brief Calculate MD5 transform operation * * @param in - pointer to data to be processed. * The buffer needs to be at least md5 block size long as defined in * rfc1321 (64 bytes) * out - output pointer for state data after single md5 transform * operation. * The buffer needs to be at least md5 state size long as defined in * rfc1321 (16 bytes) * len - Length on the input to be processed. * * @li Reentrant: yes * @li IRQ safe: yes * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL * */ CpaStatus qatUtilsHashMD5Full(uint8_t *in, uint8_t *out, uint32_t len); /** * @ingroup QatUtils * * @brief Calculate SHA1 transform operation * * @param in - pointer to data to be processed. * The buffer needs to be at least sha1 block size long as defined in * rfc3174 (64 bytes) * out - output pointer for state data after single sha1 transform * operation. * The buffer needs to be at least sha1 state size long as defined in * rfc3174 (20 bytes) * * @li Reentrant: yes * @li IRQ safe: yes * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL * */ CpaStatus qatUtilsHashSHA1(uint8_t *in, uint8_t *out); /** * @ingroup QatUtils * * @brief Calculate SHA1 transform operation * * @param in - pointer to data to be processed. * The buffer needs to be at least sha1 block size long as defined in * rfc3174 (64 bytes) * out - output pointer for state data after single sha1 transform * operation. * The buffer needs to be at least sha1 state size long as defined in * rfc3174 (20 bytes) * len - Length on the input to be processed. * * @li Reentrant: yes * @li IRQ safe: yes * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL * */ CpaStatus qatUtilsHashSHA1Full(uint8_t *in, uint8_t *out, uint32_t len); /** * @ingroup QatUtils * * @brief Calculate SHA224 transform operation * * @param in - pointer to data to be processed. * The buffer needs to be at least sha224 block size long as defined in * rfc3874 and rfc4868 (64 bytes) * out - output pointer for state data after single sha224 transform * operation. * The buffer needs to be at least sha224 state size long as defined in * rfc3874 and rfc4868 (32 bytes) * @li Reentrant: yes * @li IRQ safe: yes * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL * */ CpaStatus qatUtilsHashSHA224(uint8_t *in, uint8_t *out); /** * @ingroup QatUtils * * @brief Calculate SHA256 transform operation * * * @param in - pointer to data to be processed. * The buffer needs to be at least sha256 block size long as defined in * rfc4868 (64 bytes) * out - output pointer for state data after single sha256 transform * operation. * The buffer needs to be at least sha256 state size long as defined in * rfc4868 (32 bytes) * @li Reentrant: yes * @li IRQ safe: yes * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL * */ CpaStatus qatUtilsHashSHA256(uint8_t *in, uint8_t *out); /** * @ingroup QatUtils * * @brief Calculate SHA256 transform operation * * * @param in - pointer to data to be processed. * The buffer needs to be at least sha256 block size long as defined in * rfc4868 (64 bytes) * out - output pointer for state data after single sha256 transform * operation. * The buffer needs to be at least sha256 state size long as defined in * rfc4868 (32 bytes) * len - Length on the input to be processed. * @li Reentrant: yes * @li IRQ safe: yes * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL * */ CpaStatus qatUtilsHashSHA256Full(uint8_t *in, uint8_t *out, uint32_t len); /** * @ingroup QatUtils * * @brief Calculate SHA384 transform operation * * @param in - pointer to data to be processed. * The buffer needs to be at least sha384 block size long as defined in * rfc4868 (128 bytes) * out - output pointer for state data after single sha384 transform * operation. * The buffer needs to be at least sha384 state size long as defined in * rfc4868 (64 bytes) * @li Reentrant: yes * @li IRQ safe: yes * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL * */ CpaStatus qatUtilsHashSHA384(uint8_t *in, uint8_t *out); /** * @ingroup QatUtils * * @brief Calculate SHA384 transform operation * * @param in - pointer to data to be processed. * The buffer needs to be at least sha384 block size long as defined in * rfc4868 (128 bytes) * out - output pointer for state data after single sha384 transform * operation. * The buffer needs to be at least sha384 state size long as defined in * rfc4868 (64 bytes) * len - Length on the input to be processed. * @li Reentrant: yes * @li IRQ safe: yes * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL * */ CpaStatus qatUtilsHashSHA384Full(uint8_t *in, uint8_t *out, uint32_t len); /** * @ingroup QatUtils * * @brief Calculate SHA512 transform operation * * @param in - pointer to data to be processed. * The buffer needs to be at least sha512 block size long as defined in * rfc4868 (128 bytes) * out - output pointer for state data after single sha512 transform * operation. * The buffer needs to be at least sha512 state size long as defined in * rfc4868 (64 bytes) * @li Reentrant: yes * @li IRQ safe: yes * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL * */ CpaStatus qatUtilsHashSHA512(uint8_t *in, uint8_t *out); /** * @ingroup QatUtils * * @brief Calculate SHA512 transform operation * * @param in - pointer to data to be processed. * The buffer needs to be at least sha512 block size long as defined in * rfc4868 (128 bytes) * out - output pointer for state data after single sha512 transform * operation. * The buffer needs to be at least sha512 state size long as defined in * rfc4868 (64 bytes) * len - Length on the input to be processed. * @li Reentrant: yes * @li IRQ safe: yes * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL * */ CpaStatus qatUtilsHashSHA512Full(uint8_t *in, uint8_t *out, uint32_t len); /** * @ingroup QatUtils * * @brief Single block AES encrypt * * @param key - pointer to symetric key. * keyLenInBytes - key length * in - pointer to data to encrypt * out - pointer to output buffer for encrypted text * The in and out buffers need to be at least AES block size long * as defined in rfc3686 (16 bytes) * * @li Reentrant: yes * @li IRQ safe: yes * * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL * */ CpaStatus qatUtilsAESEncrypt(uint8_t *key, uint32_t keyLenInBytes, uint8_t *in, uint8_t *out); + +/** + * @ingroup QatUtils + * + * @brief Converts AES forward key to reverse key + * + * @param key - pointer to symetric key. + * keyLenInBytes - key length + * out - pointer to output buffer for reversed key + * The in and out buffers need to be at least AES block size long + * as defined in rfc3686 (16 bytes) + * + * @li Reentrant: yes + * @li IRQ safe: yes + * + * @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL + * + */ +CpaStatus qatUtilsAESKeyExpansionForward(uint8_t *key, + uint32_t keyLenInBytes, + uint32_t *out); #endif diff --git a/sys/dev/qat/qat_api/qat_utils/src/QatUtilsCrypto.c b/sys/dev/qat/qat_api/qat_utils/src/QatUtilsCrypto.c index 3be2b8c362a0..6026faa52fa5 100644 --- a/sys/dev/qat/qat_api/qat_utils/src/QatUtilsCrypto.c +++ b/sys/dev/qat/qat_api/qat_utils/src/QatUtilsCrypto.c @@ -1,152 +1,191 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "qat_utils.h" +#define AES_128_KEY_LEN_BYTES 16 +#define AES_192_KEY_LEN_BYTES 24 +#define AES_256_KEY_LEN_BYTES 32 + CpaStatus qatUtilsHashMD5(uint8_t *in, uint8_t *out) { MD5_CTX ctx; MD5Init(&ctx); MD5Update(&ctx, in, MD5_BLOCK_LENGTH); bcopy(&ctx, out, MD5_DIGEST_LENGTH); return CPA_STATUS_SUCCESS; } CpaStatus qatUtilsHashSHA1(uint8_t *in, uint8_t *out) { SHA1_CTX ctx; SHA1Init(&ctx); SHA1Update(&ctx, in, SHA1_BLOCK_LEN); bcopy(&ctx, out, SHA1_HASH_LEN); return CPA_STATUS_SUCCESS; } CpaStatus qatUtilsHashSHA224(uint8_t *in, uint8_t *out) { SHA224_CTX ctx; SHA224_Init(&ctx); SHA224_Update(&ctx, in, SHA224_BLOCK_LENGTH); bcopy(&ctx, out, SHA256_DIGEST_LENGTH); return CPA_STATUS_SUCCESS; } CpaStatus qatUtilsHashSHA256(uint8_t *in, uint8_t *out) { SHA256_CTX ctx; SHA256_Init(&ctx); SHA256_Update(&ctx, in, SHA256_BLOCK_LENGTH); bcopy(&ctx, out, SHA256_DIGEST_LENGTH); return CPA_STATUS_SUCCESS; } CpaStatus qatUtilsHashSHA384(uint8_t *in, uint8_t *out) { SHA384_CTX ctx; SHA384_Init(&ctx); SHA384_Update(&ctx, in, SHA384_BLOCK_LENGTH); bcopy(&ctx, out, SHA512_DIGEST_LENGTH); return CPA_STATUS_SUCCESS; } CpaStatus qatUtilsHashSHA512(uint8_t *in, uint8_t *out) { SHA512_CTX ctx; SHA512_Init(&ctx); SHA512_Update(&ctx, in, SHA512_BLOCK_LENGTH); bcopy(&ctx, out, SHA512_DIGEST_LENGTH); return CPA_STATUS_SUCCESS; } CpaStatus qatUtilsHashMD5Full(uint8_t *in, uint8_t *out, uint32_t len) { MD5_CTX ctx; MD5Init(&ctx); MD5Update(&ctx, in, len); MD5Final(out, &ctx); return CPA_STATUS_SUCCESS; } CpaStatus qatUtilsHashSHA1Full(uint8_t *in, uint8_t *out, uint32_t len) { SHA1_CTX ctx; SHA1Init(&ctx); SHA1Update(&ctx, in, len); SHA1Final((caddr_t)out, &ctx); return CPA_STATUS_SUCCESS; } CpaStatus qatUtilsHashSHA256Full(uint8_t *in, uint8_t *out, uint32_t len) { SHA256_CTX ctx; SHA256_Init(&ctx); SHA256_Update(&ctx, in, len); SHA256_Final(out, &ctx); return CPA_STATUS_SUCCESS; } CpaStatus qatUtilsHashSHA384Full(uint8_t *in, uint8_t *out, uint32_t len) { SHA384_CTX ctx; SHA384_Init(&ctx); SHA384_Update(&ctx, in, len); SHA384_Final(out, &ctx); return CPA_STATUS_SUCCESS; } CpaStatus qatUtilsHashSHA512Full(uint8_t *in, uint8_t *out, uint32_t len) { SHA512_CTX ctx; SHA512_Init(&ctx); SHA512_Update(&ctx, in, len); SHA512_Final(out, &ctx); return CPA_STATUS_SUCCESS; } #define BYTE_TO_BITS_SHIFT 3 CpaStatus qatUtilsAESEncrypt(uint8_t *key, uint32_t keyLenInBytes, uint8_t *in, uint8_t *out) { rijndael_ctx ctx; rijndael_set_key(&ctx, key, keyLenInBytes << BYTE_TO_BITS_SHIFT); rijndael_encrypt(&ctx, in, out); return CPA_STATUS_SUCCESS; } + +CpaStatus +qatUtilsAESKeyExpansionForward(uint8_t *key, + uint32_t keyLenInBytes, + uint32_t *out) +{ + rijndael_ctx ctx; + uint32_t i = 0, j = 0; + uint32_t lw_per_round = 4; + int32_t lw_left_to_copy = keyLenInBytes / lw_per_round; + uint32_t *key_pointer = NULL; + + /* Error check for wrong input key len */ + if (AES_128_KEY_LEN_BYTES != keyLenInBytes && + AES_192_KEY_LEN_BYTES != keyLenInBytes && + AES_256_KEY_LEN_BYTES != keyLenInBytes) { + return CPA_STATUS_INVALID_PARAM; + } + + rijndael_set_key(&ctx, key, keyLenInBytes << BYTE_TO_BITS_SHIFT); + + /* Pointer to the last round of expanded key. */ + key_pointer = &ctx.ek[lw_per_round * ctx.Nr]; + + while (lw_left_to_copy > 0) { + for (i = 0; i < MIN(lw_left_to_copy, lw_per_round); i++, j++) { + out[j] = __builtin_bswap32(key_pointer[i]); + } + + lw_left_to_copy -= lw_per_round; + key_pointer -= lw_left_to_copy; + } + + return CPA_STATUS_SUCCESS; +} diff --git a/sys/dev/qat/qat_common/adf_accel_engine.c b/sys/dev/qat/qat_common/adf_accel_engine.c index 3b41ab5764e8..d430605be522 100644 --- a/sys/dev/qat/qat_common/adf_accel_engine.c +++ b/sys/dev/qat/qat_common/adf_accel_engine.c @@ -1,267 +1,273 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include #include #include "adf_cfg.h" #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "icp_qat_uclo.h" #include "icp_qat_hw.h" #define MMP_VERSION_LEN 4 struct adf_mmp_version_s { u8 ver_val[MMP_VERSION_LEN]; }; static int request_firmware(const struct firmware **firmware_p, const char *name) { int retval = 0; if (NULL == firmware_p) { return -1; } *firmware_p = firmware_get(name); if (NULL == *firmware_p) { retval = -1; } return retval; } int adf_ae_fw_load(struct adf_accel_dev *accel_dev) { struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; struct adf_hw_device_data *hw_device = accel_dev->hw_device; const void *fw_addr, *mmp_addr; u32 fw_size, mmp_size; s32 i = 0; u32 max_objs = 1; const char *obj_name = NULL; struct adf_mmp_version_s mmp_ver = { { 0 } }; unsigned int cfg_ae_mask = 0; if (!hw_device->fw_name) return 0; if (request_firmware(&loader_data->uof_fw, hw_device->fw_name)) { device_printf(GET_DEV(accel_dev), "Failed to load UOF FW %s\n", hw_device->fw_name); goto out_err; } if (request_firmware(&loader_data->mmp_fw, hw_device->fw_mmp_name)) { device_printf(GET_DEV(accel_dev), "Failed to load MMP FW %s\n", hw_device->fw_mmp_name); goto out_err; } fw_size = loader_data->uof_fw->datasize; fw_addr = loader_data->uof_fw->data; mmp_size = loader_data->mmp_fw->datasize; mmp_addr = loader_data->mmp_fw->data; memcpy(&mmp_ver, mmp_addr, MMP_VERSION_LEN); accel_dev->fw_versions.mmp_version_major = mmp_ver.ver_val[0]; accel_dev->fw_versions.mmp_version_minor = mmp_ver.ver_val[1]; accel_dev->fw_versions.mmp_version_patch = mmp_ver.ver_val[2]; if (hw_device->accel_capabilities_mask & ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC) if (qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size)) { device_printf(GET_DEV(accel_dev), "Failed to load MMP\n"); goto out_err; } if (hw_device->get_objs_num) max_objs = hw_device->get_objs_num(accel_dev); for (i = max_objs - 1; i >= 0; i--) { /* obj_name is used to indicate the firmware name in MOF, * config unit0 must be loaded at end for authentication */ if (hw_device->get_obj_name && hw_device->get_obj_cfg_ae_mask) { unsigned long service_mask = hw_device->service_mask; + enum adf_accel_unit_services service_type = + ADF_ACCEL_SERVICE_NULL; - if (hw_device->service_mask && - !(test_bit(i, &service_mask))) + if (hw_device->get_service_type) + service_type = + hw_device->get_service_type(accel_dev, i); + else + service_type = BIT(i); + + if (service_mask && !(service_mask & service_type)) continue; - obj_name = hw_device->get_obj_name(accel_dev, BIT(i)); + + obj_name = + hw_device->get_obj_name(accel_dev, service_type); + cfg_ae_mask = + hw_device->get_obj_cfg_ae_mask(accel_dev, + service_type); + if (!obj_name) { device_printf( GET_DEV(accel_dev), "Invalid object (service = %lx)\n", BIT(i)); goto out_err; } - if (!hw_device->get_obj_cfg_ae_mask(accel_dev, BIT(i))) + if (!cfg_ae_mask) continue; - cfg_ae_mask = - hw_device->get_obj_cfg_ae_mask(accel_dev, BIT(i)); if (qat_uclo_set_cfg_ae_mask(loader_data->fw_loader, cfg_ae_mask)) { device_printf(GET_DEV(accel_dev), "Invalid config AE mask\n"); goto out_err; } } if (qat_uclo_map_obj( loader_data->fw_loader, fw_addr, fw_size, obj_name)) { device_printf(GET_DEV(accel_dev), "Failed to map UOF firmware\n"); goto out_err; } if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) { device_printf(GET_DEV(accel_dev), "Failed to load UOF firmware\n"); goto out_err; } qat_uclo_del_obj(loader_data->fw_loader); obj_name = NULL; } return 0; out_err: adf_ae_fw_release(accel_dev); return EFAULT; } void adf_ae_fw_release(struct adf_accel_dev *accel_dev) { struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; struct adf_hw_device_data *hw_device = accel_dev->hw_device; if (!hw_device->fw_name) return; if (loader_data->fw_loader) qat_uclo_del_obj(loader_data->fw_loader); if (loader_data->fw_loader && loader_data->fw_loader->mobj_handle) qat_uclo_del_mof(loader_data->fw_loader); qat_hal_deinit(loader_data->fw_loader); if (loader_data->uof_fw) firmware_put(loader_data->uof_fw, FIRMWARE_UNLOAD); if (loader_data->mmp_fw) firmware_put(loader_data->mmp_fw, FIRMWARE_UNLOAD); loader_data->uof_fw = NULL; loader_data->mmp_fw = NULL; loader_data->fw_loader = NULL; } int adf_ae_start(struct adf_accel_dev *accel_dev) { struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; struct adf_hw_device_data *hw_data = accel_dev->hw_device; - uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); + uint32_t ae_ctr; if (!hw_data->fw_name) return 0; - for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) { - if (hw_data->ae_mask & (1 << ae)) { - qat_hal_start(loader_data->fw_loader, ae, 0xFF); - ae_ctr++; - } - } + ae_ctr = qat_hal_start(loader_data->fw_loader); device_printf(GET_DEV(accel_dev), "qat_dev%d started %d acceleration engines\n", accel_dev->accel_id, ae_ctr); return 0; } int adf_ae_stop(struct adf_accel_dev *accel_dev) { struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; struct adf_hw_device_data *hw_data = accel_dev->hw_device; uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); if (!hw_data->fw_name) return 0; for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) { if (hw_data->ae_mask & (1 << ae)) { qat_hal_stop(loader_data->fw_loader, ae, 0xFF); ae_ctr++; } } device_printf(GET_DEV(accel_dev), "qat_dev%d stopped %d acceleration engines\n", accel_dev->accel_id, ae_ctr); return 0; } static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae) { struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; qat_hal_reset(loader_data->fw_loader); if (qat_hal_clr_reset(loader_data->fw_loader)) return EFAULT; return 0; } int adf_ae_init(struct adf_accel_dev *accel_dev) { struct adf_fw_loader_data *loader_data; struct adf_hw_device_data *hw_device = accel_dev->hw_device; if (!hw_device->fw_name) return 0; loader_data = malloc(sizeof(*loader_data), M_QAT, M_WAITOK | M_ZERO); accel_dev->fw_loader = loader_data; if (qat_hal_init(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to init the AEs\n"); free(loader_data, M_QAT); return EFAULT; } if (adf_ae_reset(accel_dev, 0)) { device_printf(GET_DEV(accel_dev), "Failed to reset the AEs\n"); qat_hal_deinit(loader_data->fw_loader); free(loader_data, M_QAT); return EFAULT; } return 0; } int adf_ae_shutdown(struct adf_accel_dev *accel_dev) { struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; struct adf_hw_device_data *hw_device = accel_dev->hw_device; if (!hw_device->fw_name) return 0; qat_hal_deinit(loader_data->fw_loader); free(accel_dev->fw_loader, M_QAT); accel_dev->fw_loader = NULL; return 0; } diff --git a/sys/dev/qat/qat_common/adf_cfg_bundle.c b/sys/dev/qat/qat_common/adf_cfg_bundle.c index edb2ef942417..a7faf6c05730 100644 --- a/sys/dev/qat/qat_common/adf_cfg_bundle.c +++ b/sys/dev/qat/qat_common/adf_cfg_bundle.c @@ -1,377 +1,396 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "adf_cfg_bundle.h" #include "adf_cfg_strings.h" #include "adf_cfg_instance.h" #include static bool adf_cfg_is_interrupt_mode(struct adf_cfg_bundle *bundle) { return (bundle->polling_mode == ADF_CFG_RESP_EPOLL) || (bundle->type == KERNEL && (bundle->polling_mode != ADF_CFG_RESP_POLL)); } static bool adf_cfg_can_be_shared(struct adf_cfg_bundle *bundle, const char *process_name, int polling_mode) { if (adf_cfg_is_free(bundle)) return true; if (bundle->polling_mode != polling_mode) return false; return !adf_cfg_is_interrupt_mode(bundle) || !strncmp(process_name, bundle->sections[0], ADF_CFG_MAX_SECTION_LEN_IN_BYTES); } bool adf_cfg_is_free(struct adf_cfg_bundle *bundle) { return bundle->type == FREE; } struct adf_cfg_instance * adf_cfg_get_free_instance(struct adf_cfg_device *device, struct adf_cfg_bundle *bundle, struct adf_cfg_instance *inst, const char *process_name) { int i = 0; struct adf_cfg_instance *ret_instance = NULL; if (adf_cfg_can_be_shared(bundle, process_name, inst->polling_mode)) { for (i = 0; i < device->instance_index; i++) { /* * the selected instance must match two criteria * 1) instance is from the bundle * 2) instance type is same */ if (bundle->number == device->instances[i]->bundle && inst->stype == device->instances[i]->stype) { ret_instance = device->instances[i]; break; } /* * no opportunity to match, * quit the loop as early as possible */ if ((bundle->number + 1) == device->instances[i]->bundle) break; } } return ret_instance; } int adf_cfg_get_ring_pairs_from_bundle(struct adf_cfg_bundle *bundle, struct adf_cfg_instance *inst, const char *process_name, struct adf_cfg_instance *bundle_inst) { if (inst->polling_mode == ADF_CFG_RESP_POLL && adf_cfg_is_interrupt_mode(bundle)) { pr_err("Trying to get ring pairs for a non-interrupt"); pr_err(" bundle from an interrupt bundle\n"); return EFAULT; } if (inst->stype != bundle_inst->stype) { pr_err("Got an instance of different type (cy/dc) than the"); pr_err(" one request\n"); return EFAULT; } if (strcmp(ADF_KERNEL_SEC, process_name) && strcmp(ADF_KERNEL_SAL_SEC, process_name) && inst->polling_mode != ADF_CFG_RESP_EPOLL && inst->polling_mode != ADF_CFG_RESP_POLL) { pr_err("User instance %s needs to be configured", inst->name); pr_err(" with IsPolled 1 or 2 for poll and epoll mode,"); pr_err(" respectively\n"); return EFAULT; } strlcpy(bundle->sections[bundle->section_index], process_name, ADF_CFG_MAX_STR_LEN); bundle->section_index++; if (adf_cfg_is_free(bundle)) { bundle->polling_mode = inst->polling_mode; bundle->type = (!strcmp(ADF_KERNEL_SEC, process_name) || !strcmp(ADF_KERNEL_SAL_SEC, process_name)) ? KERNEL : USER; if (adf_cfg_is_interrupt_mode(bundle)) { CPU_ZERO(&bundle->affinity_mask); CPU_COPY(&inst->affinity_mask, &bundle->affinity_mask); } } switch (inst->stype) { case CRYPTO: inst->asym_tx = bundle_inst->asym_tx; inst->asym_rx = bundle_inst->asym_rx; inst->sym_tx = bundle_inst->sym_tx; inst->sym_rx = bundle_inst->sym_rx; break; case COMP: inst->dc_tx = bundle_inst->dc_tx; inst->dc_rx = bundle_inst->dc_rx; break; case ASYM: inst->asym_tx = bundle_inst->asym_tx; inst->asym_rx = bundle_inst->asym_rx; break; case SYM: inst->sym_tx = bundle_inst->sym_tx; inst->sym_rx = bundle_inst->sym_rx; break; default: /* unknown service type of instance */ pr_err("1 Unknown service type %d of instance\n", inst->stype); } /* mark it as used */ bundle_inst->stype = USED; inst->bundle = bundle->number; return 0; } static void adf_cfg_init_and_insert_inst(struct adf_cfg_bundle *bundle, struct adf_cfg_device *device, int bank_num, struct adf_accel_dev *accel_dev) { struct adf_cfg_instance *cfg_instance = NULL; int ring_pair_index = 0; + int ring_index = 0; int i = 0; u8 serv_type; - int num_req_rings = bundle->num_of_rings / 2; - int num_rings_per_srv = num_req_rings / ADF_CFG_NUM_SERVICES; + int num_rings_per_srv = 0; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; u16 ring_to_svc_map = GET_HW_DATA(accel_dev)->ring_to_svc_map; /* init the bundle with instance information */ - for (ring_pair_index = 0; ring_pair_index < ADF_CFG_NUM_SERVICES; + for (ring_pair_index = 0; ring_pair_index < bundle->max_cfg_svc_num; ring_pair_index++) { - serv_type = GET_SRV_TYPE(ring_to_svc_map, ring_pair_index); + adf_get_ring_svc_map_data(hw_data, + bundle->number, + ring_pair_index, + &serv_type, + &ring_index, + &num_rings_per_srv); + for (i = 0; i < num_rings_per_srv; i++) { cfg_instance = malloc(sizeof(*cfg_instance), M_QAT, M_WAITOK | M_ZERO); switch (serv_type) { case CRYPTO: crypto_instance_init(cfg_instance, bundle); break; case COMP: dc_instance_init(cfg_instance, bundle); break; case ASYM: asym_instance_init(cfg_instance, bundle); break; case SYM: sym_instance_init(cfg_instance, bundle); break; case NA: break; default: /* Unknown service type */ device_printf( GET_DEV(accel_dev), "Unknown service type %d of instance, mask is 0x%x\n", serv_type, ring_to_svc_map); } cfg_instance->bundle = bank_num; device->instances[device->instance_index++] = cfg_instance; cfg_instance = NULL; } if (serv_type == CRYPTO) { ring_pair_index++; serv_type = GET_SRV_TYPE(ring_to_svc_map, ring_pair_index); } } return; } int adf_cfg_bundle_init(struct adf_cfg_bundle *bundle, struct adf_cfg_device *device, int bank_num, struct adf_accel_dev *accel_dev) { int i = 0; + bundle->number = bank_num; /* init ring to service mapping for this bundle */ - adf_cfg_init_ring2serv_mapping(accel_dev, bundle); + adf_cfg_init_ring2serv_mapping(accel_dev, bundle, device); /* init the bundle with instance information */ adf_cfg_init_and_insert_inst(bundle, device, bank_num, accel_dev); CPU_FILL(&bundle->affinity_mask); bundle->type = FREE; bundle->polling_mode = -1; bundle->section_index = 0; - bundle->number = bank_num; bundle->sections = malloc(sizeof(char *) * bundle->max_section, M_QAT, M_WAITOK | M_ZERO); for (i = 0; i < bundle->max_section; i++) { bundle->sections[i] = malloc(ADF_CFG_MAX_STR_LEN, M_QAT, M_WAITOK | M_ZERO); } return 0; } void adf_cfg_bundle_clear(struct adf_cfg_bundle *bundle, struct adf_accel_dev *accel_dev) { int i = 0; for (i = 0; i < bundle->max_section; i++) { if (bundle->sections && bundle->sections[i]) { free(bundle->sections[i], M_QAT); bundle->sections[i] = NULL; } } free(bundle->sections, M_QAT); bundle->sections = NULL; adf_cfg_rel_ring2serv_mapping(bundle); } static void -adf_cfg_assign_serv_to_rings(struct adf_cfg_bundle *bundle, u16 ring_to_svc_map) +adf_cfg_assign_serv_to_rings(struct adf_hw_device_data *hw_data, + struct adf_cfg_bundle *bundle, + struct adf_cfg_device *device) { int ring_pair_index = 0; int ring_index = 0; u8 serv_type = 0; int num_req_rings = bundle->num_of_rings / 2; - int num_rings_per_srv = num_req_rings / ADF_CFG_NUM_SERVICES; + int num_rings_per_srv = 0; - for (ring_pair_index = 0; ring_pair_index < ADF_CFG_NUM_SERVICES; + for (ring_pair_index = 0; ring_pair_index < bundle->max_cfg_svc_num; ring_pair_index++) { - serv_type = GET_SRV_TYPE(ring_to_svc_map, ring_pair_index); - ring_index = num_rings_per_srv * ring_pair_index; + adf_get_ring_svc_map_data(hw_data, + bundle->number, + ring_pair_index, + &serv_type, + &ring_index, + &num_rings_per_srv); + switch (serv_type) { case CRYPTO: ASSIGN_SERV_TO_RINGS(bundle, ring_index, num_req_rings, ADF_ACCEL_SERV_ASYM, num_rings_per_srv); ring_pair_index++; ring_index = num_rings_per_srv * ring_pair_index; - if (ring_pair_index == ADF_CFG_NUM_SERVICES) + if (ring_pair_index == bundle->max_cfg_svc_num) break; ASSIGN_SERV_TO_RINGS(bundle, ring_index, num_req_rings, ADF_ACCEL_SERV_SYM, num_rings_per_srv); break; case COMP: ASSIGN_SERV_TO_RINGS(bundle, ring_index, num_req_rings, ADF_ACCEL_SERV_DC, num_rings_per_srv); break; case SYM: ASSIGN_SERV_TO_RINGS(bundle, ring_index, num_req_rings, ADF_ACCEL_SERV_SYM, num_rings_per_srv); break; case ASYM: ASSIGN_SERV_TO_RINGS(bundle, ring_index, num_req_rings, ADF_ACCEL_SERV_ASYM, num_rings_per_srv); break; case NA: ASSIGN_SERV_TO_RINGS(bundle, ring_index, num_req_rings, ADF_ACCEL_SERV_NA, num_rings_per_srv); break; default: /* unknown service type */ pr_err("Unknown service type %d, mask 0x%x.\n", serv_type, - ring_to_svc_map); + hw_data->ring_to_svc_map); } } return; } void adf_cfg_init_ring2serv_mapping(struct adf_accel_dev *accel_dev, - struct adf_cfg_bundle *bundle) + struct adf_cfg_bundle *bundle, + struct adf_cfg_device *device) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_cfg_ring *ring_in_bundle; int ring_num = 0; bundle->num_of_rings = hw_data->num_rings_per_bank; + if (hw_data->num_rings_per_bank >= (2 * ADF_CFG_NUM_SERVICES)) + bundle->max_cfg_svc_num = ADF_CFG_NUM_SERVICES; + else + bundle->max_cfg_svc_num = 1; bundle->rings = malloc(bundle->num_of_rings * sizeof(struct adf_cfg_ring *), M_QAT, M_WAITOK | M_ZERO); for (ring_num = 0; ring_num < bundle->num_of_rings; ring_num++) { ring_in_bundle = malloc(sizeof(struct adf_cfg_ring), M_QAT, M_WAITOK | M_ZERO); ring_in_bundle->mode = (ring_num < bundle->num_of_rings / 2) ? TX : RX; ring_in_bundle->number = ring_num; bundle->rings[ring_num] = ring_in_bundle; } - adf_cfg_assign_serv_to_rings(bundle, hw_data->ring_to_svc_map); + adf_cfg_assign_serv_to_rings(hw_data, bundle, device); return; } int adf_cfg_rel_ring2serv_mapping(struct adf_cfg_bundle *bundle) { int i = 0; if (bundle->rings) { for (i = 0; i < bundle->num_of_rings; i++) free(bundle->rings[i], M_QAT); free(bundle->rings, M_QAT); } return 0; } diff --git a/sys/dev/qat/qat_common/adf_cfg_bundle.h b/sys/dev/qat/qat_common/adf_cfg_bundle.h index 50ad7b007ef7..b9ec5e7a0af5 100644 --- a/sys/dev/qat/qat_common/adf_cfg_bundle.h +++ b/sys/dev/qat/qat_common/adf_cfg_bundle.h @@ -1,55 +1,77 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #ifndef ADF_CFG_BUNDLE_H_ #define ADF_CFG_BUNDLE_H_ #include "adf_accel_devices.h" #include "adf_cfg_common.h" #define MAX_SECTIONS_PER_BUNDLE 8 #define MAX_SECTION_NAME_LEN 64 #define TX 0x0 #define RX 0x1 #define ASSIGN_SERV_TO_RINGS(bund, index, base, stype, rng_per_srv) \ do { \ int j = 0; \ typeof(bund) b = (bund); \ typeof(index) i = (index); \ typeof(base) s = (base); \ typeof(stype) t = (stype); \ typeof(rng_per_srv) rps = (rng_per_srv); \ for (j = 0; j < rps; j++) { \ b->rings[i + j]->serv_type = t; \ b->rings[i + j + s]->serv_type = t; \ } \ } while (0) bool adf_cfg_is_free(struct adf_cfg_bundle *bundle); int adf_cfg_get_ring_pairs_from_bundle(struct adf_cfg_bundle *bundle, struct adf_cfg_instance *inst, const char *process_name, struct adf_cfg_instance *bundle_inst); struct adf_cfg_instance * adf_cfg_get_free_instance(struct adf_cfg_device *device, struct adf_cfg_bundle *bundle, struct adf_cfg_instance *inst, const char *process_name); int adf_cfg_bundle_init(struct adf_cfg_bundle *bundle, struct adf_cfg_device *device, int bank_num, struct adf_accel_dev *accel_dev); void adf_cfg_bundle_clear(struct adf_cfg_bundle *bundle, struct adf_accel_dev *accel_dev); void adf_cfg_init_ring2serv_mapping(struct adf_accel_dev *accel_dev, - struct adf_cfg_bundle *bundle); + struct adf_cfg_bundle *bundle, + struct adf_cfg_device *device); int adf_cfg_rel_ring2serv_mapping(struct adf_cfg_bundle *bundle); + +static inline void +adf_get_ring_svc_map_data(struct adf_hw_device_data *hw_data, + int bundle_num, + int ring_pair_index, + u8 *serv_type, + int *ring_index, + int *num_rings_per_srv) +{ + if (hw_data->get_ring_svc_map_data) + return hw_data->get_ring_svc_map_data(ring_pair_index, + hw_data->ring_to_svc_map, + serv_type, + ring_index, + num_rings_per_srv, + bundle_num); + *serv_type = GET_SRV_TYPE(hw_data->ring_to_svc_map, ring_pair_index); + *num_rings_per_srv = + hw_data->num_rings_per_bank / (2 * ADF_CFG_NUM_SERVICES); + *ring_index = (*num_rings_per_srv) * ring_pair_index; +} #endif diff --git a/sys/dev/qat/qat_common/adf_cfg_device.c b/sys/dev/qat/qat_common/adf_cfg_device.c index ecd8e1599eeb..9e59c038f2f3 100644 --- a/sys/dev/qat/qat_common/adf_cfg_device.c +++ b/sys/dev/qat/qat_common/adf_cfg_device.c @@ -1,1102 +1,1097 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "adf_cfg_instance.h" #include "adf_cfg_section.h" #include "adf_cfg_device.h" #include "icp_qat_hw.h" #include "adf_common_drv.h" #define ADF_CFG_SVCS_MAX (25) #define ADF_CFG_DEPRE_PARAMS_NUM (4) #define ADF_CFG_CAP_DC ADF_ACCEL_CAPABILITIES_COMPRESSION #define ADF_CFG_CAP_ASYM ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC #define ADF_CFG_CAP_SYM \ (ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | \ ADF_ACCEL_CAPABILITIES_CIPHER | \ ADF_ACCEL_CAPABILITIES_AUTHENTICATION) #define ADF_CFG_CAP_CY (ADF_CFG_CAP_ASYM | ADF_CFG_CAP_SYM) #define ADF_CFG_FW_CAP_RL ICP_ACCEL_CAPABILITIES_RL #define ADF_CFG_FW_CAP_HKDF ICP_ACCEL_CAPABILITIES_HKDF #define ADF_CFG_FW_CAP_ECEDMONT ICP_ACCEL_CAPABILITIES_ECEDMONT #define ADF_CFG_FW_CAP_EXT_ALGCHAIN ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN #define ADF_CFG_CY_RINGS \ (CRYPTO | CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ CRYPTO << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ CRYPTO << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_CFG_SYM_RINGS \ (SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ SYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_CFG_ASYM_RINGS \ (ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ ASYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_CFG_CY_DC_RINGS \ (CRYPTO | CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_CFG_ASYM_DC_RINGS \ (ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_CFG_SYM_DC_RINGS \ (SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) #define ADF_CFG_DC_RINGS \ (COMP | COMP << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) static char adf_cfg_deprecated_params[][ADF_CFG_MAX_KEY_LEN_IN_BYTES] = { ADF_DEV_KPT_ENABLE, ADF_STORAGE_FIRMWARE_ENABLED, ADF_RL_FIRMWARE_ENABLED, ADF_PKE_DISABLED }; struct adf_cfg_enabled_services { const char svcs_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; u16 rng_to_svc_msk; u32 enabled_svc_cap; u32 enabled_fw_cap; }; struct adf_cfg_profile { enum adf_cfg_fw_image_type fw_image_type; struct adf_cfg_enabled_services supported_svcs[ADF_CFG_SVCS_MAX]; }; static struct adf_cfg_profile adf_profiles[] = { { ADF_FW_IMAGE_DEFAULT, { { "cy", ADF_CFG_CY_RINGS, ADF_CFG_CAP_CY, ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "dc", ADF_CFG_DC_RINGS, ADF_CFG_CAP_DC, 0 }, { "sym", ADF_CFG_SYM_RINGS, ADF_CFG_CAP_SYM, ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "asym", ADF_CFG_ASYM_RINGS, ADF_CFG_CAP_ASYM, ADF_CFG_FW_CAP_ECEDMONT }, { "cy;dc", ADF_CFG_CY_DC_RINGS, ADF_CFG_CAP_CY | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "dc;cy", ADF_CFG_CY_DC_RINGS, ADF_CFG_CAP_CY | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "asym;dc", ADF_CFG_ASYM_DC_RINGS, ADF_CFG_CAP_ASYM | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_ECEDMONT }, { "dc;asym", ADF_CFG_ASYM_DC_RINGS, ADF_CFG_CAP_ASYM | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_ECEDMONT }, { "sym;dc", ADF_CFG_SYM_DC_RINGS, ADF_CFG_CAP_SYM | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "dc;sym", ADF_CFG_SYM_DC_RINGS, ADF_CFG_CAP_SYM | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "inline;sym", ADF_CFG_SYM_RINGS, ADF_CFG_CAP_SYM, ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "sym;inline", ADF_CFG_SYM_RINGS, ADF_CFG_CAP_SYM, ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "inline;asym", ADF_CFG_SYM_RINGS, ADF_CFG_CAP_SYM, ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "asym;inline", ADF_CFG_ASYM_RINGS, ADF_CFG_CAP_ASYM, ADF_CFG_FW_CAP_ECEDMONT }, { "inline", 0, 0, 0 }, { "inline;cy", ADF_CFG_CY_RINGS, ADF_CFG_CAP_CY, ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "cy;inline", ADF_CFG_CY_RINGS, ADF_CFG_CAP_CY, ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "dc;inline", ADF_CFG_DC_RINGS, ADF_CFG_CAP_DC, 0 }, { "inline;dc", ADF_CFG_DC_RINGS, ADF_CFG_CAP_DC, 0 }, { "cy;dc;inline", ADF_CFG_CY_DC_RINGS, ADF_CFG_CAP_CY | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "cy;inline;dc", ADF_CFG_CY_DC_RINGS, ADF_CFG_CAP_CY | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "dc;inline;cy", ADF_CFG_CY_DC_RINGS, ADF_CFG_CAP_CY | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "dc;cy;inline", ADF_CFG_CY_DC_RINGS, ADF_CFG_CAP_CY | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "inline;cy;dc", ADF_CFG_CY_DC_RINGS, ADF_CFG_CAP_CY | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "inline;dc;cy", ADF_CFG_CY_DC_RINGS, ADF_CFG_CAP_CY | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, } }, { ADF_FW_IMAGE_CRYPTO, { { "cy", ADF_CFG_CY_RINGS, ADF_CFG_CAP_CY, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_HKDF | ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "sym", ADF_CFG_SYM_RINGS, ADF_CFG_CAP_SYM, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_HKDF | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "asym", ADF_CFG_ASYM_RINGS, ADF_CFG_CAP_ASYM, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_ECEDMONT }, } }, { ADF_FW_IMAGE_COMPRESSION, { { "dc", ADF_CFG_DC_RINGS, ADF_CFG_CAP_DC, 0 }, } }, { ADF_FW_IMAGE_CUSTOM1, { { "cy", ADF_CFG_CY_RINGS, ADF_CFG_CAP_CY, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_HKDF | ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "dc", ADF_CFG_DC_RINGS, ADF_CFG_CAP_DC, 0 }, { "sym", ADF_CFG_SYM_RINGS, ADF_CFG_CAP_SYM, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_HKDF | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "asym", ADF_CFG_ASYM_RINGS, ADF_CFG_CAP_ASYM, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_ECEDMONT }, { "cy;dc", ADF_CFG_CY_DC_RINGS, ADF_CFG_CAP_CY | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_HKDF | ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "dc;cy", ADF_CFG_CY_DC_RINGS, ADF_CFG_CAP_CY | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_HKDF | ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "asym;dc", ADF_CFG_ASYM_DC_RINGS, ADF_CFG_CAP_ASYM | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_ECEDMONT }, { "dc;asym", ADF_CFG_ASYM_DC_RINGS, ADF_CFG_CAP_ASYM | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_ECEDMONT }, { "sym;dc", ADF_CFG_SYM_DC_RINGS, ADF_CFG_CAP_SYM | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_HKDF | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, { "dc;sym", ADF_CFG_SYM_DC_RINGS, ADF_CFG_CAP_SYM | ADF_CFG_CAP_DC, ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_HKDF | ADF_CFG_FW_CAP_EXT_ALGCHAIN }, } } }; int adf_cfg_get_ring_pairs(struct adf_cfg_device *device, struct adf_cfg_instance *inst, const char *process_name, struct adf_accel_dev *accel_dev) { int i = 0; int ret = EFAULT; struct adf_cfg_instance *free_inst = NULL; - struct adf_cfg_bundle *first_free_bundle = NULL; enum adf_cfg_bundle_type free_bundle_type; int first_user_bundle = 0; /* Section of user process with poll mode */ if (strcmp(ADF_KERNEL_SEC, process_name) && strcmp(ADF_KERNEL_SAL_SEC, process_name) && inst->polling_mode == ADF_CFG_RESP_POLL) { first_user_bundle = device->max_kernel_bundle_nr + 1; for (i = first_user_bundle; i < device->bundle_num; i++) { free_inst = adf_cfg_get_free_instance( device, device->bundles[i], inst, process_name); if (!free_inst) continue; ret = adf_cfg_get_ring_pairs_from_bundle( device->bundles[i], inst, process_name, free_inst); return ret; } } else { /* Section of in-tree, or kernel API or user process * with epoll mode */ if (!strcmp(ADF_KERNEL_SEC, process_name) || !strcmp(ADF_KERNEL_SAL_SEC, process_name)) free_bundle_type = KERNEL; else free_bundle_type = USER; for (i = 0; i < device->bundle_num; i++) { /* Since both in-tree and kernel API's bundle type * are kernel, use cpumask_subset to check if the * ring's affinity mask is a subset of a bundle's * one. */ if (free_bundle_type == device->bundles[i]->type && CPU_SUBSET(&device->bundles[i]->affinity_mask, &inst->affinity_mask)) { free_inst = adf_cfg_get_free_instance( device, device->bundles[i], inst, process_name); if (!free_inst) continue; ret = adf_cfg_get_ring_pairs_from_bundle( device->bundles[i], inst, process_name, free_inst); return ret; - } else if (!first_free_bundle && - adf_cfg_is_free(device->bundles[i])) { - first_free_bundle = device->bundles[i]; } } + for (i = 0; i < device->bundle_num; i++) { + if (adf_cfg_is_free(device->bundles[i])) { + free_inst = adf_cfg_get_free_instance( + device, + device->bundles[i], + inst, + process_name); + if (!free_inst) + continue; - if (first_free_bundle) { - free_inst = adf_cfg_get_free_instance(device, - first_free_bundle, - inst, - process_name); - - if (!free_inst) + ret = adf_cfg_get_ring_pairs_from_bundle( + device->bundles[i], + inst, + process_name, + free_inst); return ret; - - ret = adf_cfg_get_ring_pairs_from_bundle( - first_free_bundle, inst, process_name, free_inst); - - if (free_bundle_type == KERNEL) { - device->max_kernel_bundle_nr = - first_free_bundle->number; } - return ret; } } pr_err("Don't have enough rings for instance %s in process %s\n", inst->name, process_name); return ret; } int adf_cfg_get_services_enabled(struct adf_accel_dev *accel_dev, u16 *ring_to_svc_map) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; u32 i = 0; struct adf_cfg_enabled_services *svcs = NULL; enum adf_cfg_fw_image_type fw_image_type = ADF_FW_IMAGE_DEFAULT; struct adf_hw_device_data *hw_data = accel_dev->hw_device; *ring_to_svc_map = 0; /* Get the services enabled by user */ snprintf(key, sizeof(key), ADF_SERVICES_ENABLED); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) return EFAULT; if (hw_data->get_fw_image_type) { if (hw_data->get_fw_image_type(accel_dev, &fw_image_type)) return EFAULT; } for (i = 0; i < ADF_CFG_SVCS_MAX; i++) { svcs = &adf_profiles[fw_image_type].supported_svcs[i]; if (!strncmp(svcs->svcs_enabled, "", ADF_CFG_MAX_VAL_LEN_IN_BYTES)) break; if (!strncmp(val, svcs->svcs_enabled, ADF_CFG_MAX_VAL_LEN_IN_BYTES)) { *ring_to_svc_map = svcs->rng_to_svc_msk; return 0; } } device_printf(GET_DEV(accel_dev), "Invalid ServicesEnabled %s for ServicesProfile: %d\n", val, fw_image_type); return EFAULT; } void adf_cfg_set_asym_rings_mask(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; hw_data->asym_rings_mask = 0; } void adf_cfg_gen_dispatch_arbiter(struct adf_accel_dev *accel_dev, const u32 *thrd_to_arb_map, u32 *thrd_to_arb_map_gen, u32 total_engines) { int engine, thread, service, bits; u32 thread_ability, ability_map, service_mask, service_type; u16 ena_srv_mask = GET_HW_DATA(accel_dev)->ring_to_svc_map; for (engine = 0; engine < total_engines; engine++) { if (!(GET_HW_DATA(accel_dev)->ae_mask & (1 << engine))) continue; bits = 0; /* ability_map is used to indicate the threads ability */ ability_map = thrd_to_arb_map[engine]; thrd_to_arb_map_gen[engine] = 0; /* parse each thread on the engine */ for (thread = 0; thread < ADF_NUM_THREADS_PER_AE; thread++) { /* get the ability of this thread */ thread_ability = ability_map & ADF_THRD_ABILITY_MASK; ability_map >>= ADF_THRD_ABILITY_BIT_LEN; /* parse each service */ for (service = 0; service < ADF_CFG_MAX_SERVICES; service++) { service_type = GET_SRV_TYPE(ena_srv_mask, service); switch (service_type) { case CRYPTO: service_mask = ADF_CFG_ASYM_SRV_MASK; if (thread_ability & service_mask) thrd_to_arb_map_gen[engine] |= (1 << bits); bits++; service++; service_mask = ADF_CFG_SYM_SRV_MASK; break; case COMP: service_mask = ADF_CFG_DC_SRV_MASK; break; case SYM: service_mask = ADF_CFG_SYM_SRV_MASK; break; case ASYM: service_mask = ADF_CFG_ASYM_SRV_MASK; break; default: service_mask = ADF_CFG_UNKNOWN_SRV_MASK; } if (thread_ability & service_mask) thrd_to_arb_map_gen[engine] |= (1 << bits); bits++; } } } } int adf_cfg_get_fw_image_type(struct adf_accel_dev *accel_dev, enum adf_cfg_fw_image_type *fw_image_type) { *fw_image_type = ADF_FW_IMAGE_CUSTOM1; return 0; } static int adf_cfg_get_caps_enabled(struct adf_accel_dev *accel_dev, u32 *enabled_svc_caps, u32 *enabled_fw_caps) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; u8 i = 0; struct adf_cfg_enabled_services *svcs = NULL; enum adf_cfg_fw_image_type fw_image_type = ADF_FW_IMAGE_DEFAULT; struct adf_hw_device_data *hw_data = accel_dev->hw_device; *enabled_svc_caps = 0; *enabled_fw_caps = 0; /* Get the services enabled by user */ snprintf(key, sizeof(key), ADF_SERVICES_ENABLED); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) return EFAULT; /* * Only the PF driver has the hook for get_fw_image_type as the VF's * enabled service is from PFVF communication. The fw_image_type for * the VF is set to DEFAULT since this type contains all kinds of * enabled service. */ if (hw_data->get_fw_image_type) { if (hw_data->get_fw_image_type(accel_dev, &fw_image_type)) return EFAULT; } for (i = 0; i < ADF_CFG_SVCS_MAX; i++) { svcs = &adf_profiles[fw_image_type].supported_svcs[i]; if (!strncmp(svcs->svcs_enabled, "", ADF_CFG_MAX_VAL_LEN_IN_BYTES)) break; if (!strncmp(val, svcs->svcs_enabled, ADF_CFG_MAX_VAL_LEN_IN_BYTES)) { *enabled_svc_caps = svcs->enabled_svc_cap; *enabled_fw_caps = svcs->enabled_fw_cap; return 0; } } device_printf(GET_DEV(accel_dev), "Invalid ServicesEnabled %s for ServicesProfile: %d\n", val, fw_image_type); return EFAULT; } static void adf_cfg_check_deprecated_params(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; u8 i = 0; for (i = 0; i < ADF_CFG_DEPRE_PARAMS_NUM; i++) { /* give a warning if the deprecated params are set by user */ snprintf(key, sizeof(key), "%s", adf_cfg_deprecated_params[i]); if (!adf_cfg_get_param_value( accel_dev, ADF_GENERAL_SEC, key, val)) { device_printf(GET_DEV(accel_dev), "Parameter '%s' has been deprecated\n", key); } } } static int adf_cfg_check_enabled_services(struct adf_accel_dev *accel_dev, u32 enabled_svc_caps) { u32 hw_caps = GET_HW_DATA(accel_dev)->accel_capabilities_mask; if ((enabled_svc_caps & hw_caps) == enabled_svc_caps) return 0; device_printf(GET_DEV(accel_dev), "Unsupported device configuration\n"); return EFAULT; } static int adf_cfg_update_pf_accel_cap_mask(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 enabled_svc_caps = 0; u32 enabled_fw_caps = 0; if (hw_data->get_accel_cap) { hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); } if (adf_cfg_get_caps_enabled(accel_dev, &enabled_svc_caps, &enabled_fw_caps)) return EFAULT; if (adf_cfg_check_enabled_services(accel_dev, enabled_svc_caps)) return EFAULT; if (!(enabled_svc_caps & ADF_CFG_CAP_ASYM)) hw_data->accel_capabilities_mask &= ~ADF_CFG_CAP_ASYM; if (!(enabled_svc_caps & ADF_CFG_CAP_SYM)) hw_data->accel_capabilities_mask &= ~ADF_CFG_CAP_SYM; if (!(enabled_svc_caps & ADF_CFG_CAP_DC)) hw_data->accel_capabilities_mask &= ~ADF_CFG_CAP_DC; /* Enable FW defined capabilities*/ if (enabled_fw_caps) hw_data->accel_capabilities_mask |= enabled_fw_caps; return 0; } static int adf_cfg_update_vf_accel_cap_mask(struct adf_accel_dev *accel_dev) { u32 enabled_svc_caps = 0; u32 enabled_fw_caps = 0; if (adf_cfg_get_caps_enabled(accel_dev, &enabled_svc_caps, &enabled_fw_caps)) return EFAULT; if (adf_cfg_check_enabled_services(accel_dev, enabled_svc_caps)) return EFAULT; return 0; } int adf_cfg_device_init(struct adf_cfg_device *device, struct adf_accel_dev *accel_dev) { int i = 0; /* max_inst indicates the max instance number one bank can hold */ int max_inst = accel_dev->hw_device->tx_rx_gap; int ret = ENOMEM; struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); adf_cfg_check_deprecated_params(accel_dev); device->bundle_num = 0; device->bundles = (struct adf_cfg_bundle **)malloc( sizeof(struct adf_cfg_bundle *) * accel_dev->hw_device->num_banks, M_QAT, M_WAITOK | M_ZERO); device->bundle_num = accel_dev->hw_device->num_banks; device->instances = (struct adf_cfg_instance **)malloc( sizeof(struct adf_cfg_instance *) * device->bundle_num * max_inst, M_QAT, M_WAITOK | M_ZERO); device->instance_index = 0; device->max_kernel_bundle_nr = -1; ret = EFAULT; /* Update the acceleration capability mask based on User capability */ if (!accel_dev->is_vf) { if (adf_cfg_update_pf_accel_cap_mask(accel_dev)) goto failed; } else { if (adf_cfg_update_vf_accel_cap_mask(accel_dev)) goto failed; } /* Based on the svc configured, get ring_to_svc_map */ if (hw_data->get_ring_to_svc_map) { if (hw_data->get_ring_to_svc_map(accel_dev, &hw_data->ring_to_svc_map)) goto failed; } ret = ENOMEM; /* * 1) get the config information to generate the ring to service * mapping table * 2) init each bundle of this device */ for (i = 0; i < device->bundle_num; i++) { device->bundles[i] = malloc(sizeof(struct adf_cfg_bundle), M_QAT, M_WAITOK | M_ZERO); device->bundles[i]->max_section = max_inst; adf_cfg_bundle_init(device->bundles[i], device, i, accel_dev); } return 0; failed: for (i = 0; i < device->bundle_num; i++) { if (device->bundles[i]) adf_cfg_bundle_clear(device->bundles[i], accel_dev); } for (i = 0; i < (device->bundle_num * max_inst); i++) { if (device->instances && device->instances[i]) free(device->instances[i], M_QAT); } free(device->instances, M_QAT); device->instances = NULL; device_printf(GET_DEV(accel_dev), "Failed to do device init\n"); return ret; } void adf_cfg_device_clear(struct adf_cfg_device *device, struct adf_accel_dev *accel_dev) { int i = 0; for (i = 0; i < device->bundle_num; i++) { if (device->bundles && device->bundles[i]) { adf_cfg_bundle_clear(device->bundles[i], accel_dev); free(device->bundles[i], M_QAT); device->bundles[i] = NULL; } } free(device->bundles, M_QAT); device->bundles = NULL; for (i = 0; i < device->instance_index; i++) { if (device->instances && device->instances[i]) { free(device->instances[i], M_QAT); device->instances[i] = NULL; } } free(device->instances, M_QAT); device->instances = NULL; } static int adf_cfg_static_conf(struct adf_accel_dev *accel_dev) { int ret = 0; unsigned long val = 0; char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char value[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; int cpus; int instances = 0; int cy_poll_instances; int cy_irq_instances; int dc_instances; int i = 0; cpus = num_online_cpus(); instances = GET_MAX_BANKS(accel_dev) > cpus ? GET_MAX_BANKS(accel_dev) : cpus; if (!instances) return EFAULT; if (instances >= ADF_CFG_STATIC_CONF_INST_NUM_DC) dc_instances = ADF_CFG_STATIC_CONF_INST_NUM_DC; else return EFAULT; instances -= dc_instances; if (instances >= ADF_CFG_STATIC_CONF_INST_NUM_CY_POLL) cy_poll_instances = ADF_CFG_STATIC_CONF_INST_NUM_CY_POLL; else return EFAULT; instances -= cy_poll_instances; if (instances >= ADF_CFG_STATIC_CONF_INST_NUM_CY_IRQ) cy_irq_instances = ADF_CFG_STATIC_CONF_INST_NUM_CY_IRQ; else return EFAULT; instances -= cy_irq_instances; ret |= adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC); ret |= adf_cfg_section_add(accel_dev, ADF_KERNEL_SAL_SEC); val = ADF_CFG_STATIC_CONF_VER; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CONFIG_VERSION); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_AUTO_RESET; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_AUTO_RESET_ON_ERROR); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); if (accel_dev->hw_device->get_num_accel_units) { int cy_au = 0; int dc_au = 0; int num_au = accel_dev->hw_device->get_num_accel_units( accel_dev->hw_device); if (num_au > ADF_CFG_STATIC_CONF_NUM_DC_ACCEL_UNITS) { cy_au = num_au - ADF_CFG_STATIC_CONF_NUM_DC_ACCEL_UNITS; dc_au = ADF_CFG_STATIC_CONF_NUM_DC_ACCEL_UNITS; } else if (num_au == ADF_CFG_STATIC_CONF_NUM_DC_ACCEL_UNITS) { cy_au = 1; dc_au = 1; } else { return EFAULT; } val = cy_au; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_NUM_CY_ACCEL_UNITS); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = dc_au; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_NUM_DC_ACCEL_UNITS); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_NUM_INLINE_ACCEL_UNITS; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_NUM_INLINE_ACCEL_UNITS); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); } val = ADF_CFG_STATIC_CONF_CY_ASYM_RING_SIZE; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY ADF_RING_ASYM_SIZE); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_CY_SYM_RING_SIZE; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY ADF_RING_SYM_SIZE); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_DC_INTER_BUF_SIZE; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_INTER_BUF_SIZE); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_SERVICES_ENABLED); if ((cy_poll_instances + cy_irq_instances) == 0 && dc_instances > 0) { snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ADF_CFG_DC); } else if (((cy_poll_instances + cy_irq_instances)) > 0 && dc_instances == 0) { snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ADF_CFG_SYM); } else { snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%s;%s", ADF_CFG_SYM, ADF_CFG_DC); } ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)value, ADF_STR); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DC; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_DC); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DH; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_DH); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DRBG; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_DRBG); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DSA; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_DSA); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_ECC; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_ECC); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_ENABLED; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_ENABLED); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_KEYGEN; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_KEYGEN); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_LN; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_LN); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_PRIME; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_PRIME); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_RSA; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_RSA); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_SYM; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_SYM); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC); val = (cy_poll_instances + cy_irq_instances); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_NUM_CY); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC); val = dc_instances; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_NUM_DC); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC); for (i = 0; i < (cy_irq_instances); i++) { val = i; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_IRQ; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY "%d" ADF_POLL_MODE, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC); snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ADF_CY "%d", i); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_NAME_FORMAT, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)value, ADF_STR); } for (i = cy_irq_instances; i < (cy_poll_instances + cy_irq_instances); i++) { val = i; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_POLL; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY "%d" ADF_POLL_MODE, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC); snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ADF_CY "%d", i); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_NAME_FORMAT, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)value, ADF_STR); } for (i = 0; i < dc_instances; i++) { val = i; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_DC "%d" ADF_ETRMGR_CORE_AFFINITY, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC); val = ADF_CFG_STATIC_CONF_POLL; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_DC "%d" ADF_POLL_MODE, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC); snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ADF_DC "%d", i); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_DC_NAME_FORMAT, i); ret |= adf_cfg_add_key_value_param( accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)value, ADF_STR); } if (ret) ret = EFAULT; return ret; } int adf_config_device(struct adf_accel_dev *accel_dev) { struct adf_cfg_device_data *cfg = NULL; struct adf_cfg_device *cfg_device = NULL; struct adf_cfg_section *sec; struct list_head *list; int ret = ENOMEM; if (!accel_dev) return ret; ret = adf_cfg_static_conf(accel_dev); if (ret) goto failed; cfg = accel_dev->cfg; cfg->dev = NULL; cfg_device = (struct adf_cfg_device *)malloc(sizeof(*cfg_device), M_QAT, M_WAITOK | M_ZERO); ret = EFAULT; if (adf_cfg_device_init(cfg_device, accel_dev)) goto failed; cfg->dev = cfg_device; /* GENERAL and KERNEL section must be processed before others */ list_for_each(list, &cfg->sec_list) { sec = list_entry(list, struct adf_cfg_section, list); if (!strcmp(sec->name, ADF_GENERAL_SEC)) { ret = adf_cfg_process_section(accel_dev, sec->name, accel_dev->accel_id); if (ret) goto failed; sec->processed = true; break; } } list_for_each(list, &cfg->sec_list) { sec = list_entry(list, struct adf_cfg_section, list); if (!strcmp(sec->name, ADF_KERNEL_SEC)) { ret = adf_cfg_process_section(accel_dev, sec->name, accel_dev->accel_id); if (ret) goto failed; sec->processed = true; break; } } list_for_each(list, &cfg->sec_list) { sec = list_entry(list, struct adf_cfg_section, list); if (!strcmp(sec->name, ADF_KERNEL_SAL_SEC)) { ret = adf_cfg_process_section(accel_dev, sec->name, accel_dev->accel_id); if (ret) goto failed; sec->processed = true; break; } } list_for_each(list, &cfg->sec_list) { sec = list_entry(list, struct adf_cfg_section, list); /* avoid reprocessing one section */ if (!sec->processed && !sec->is_derived) { ret = adf_cfg_process_section(accel_dev, sec->name, accel_dev->accel_id); if (ret) goto failed; sec->processed = true; } } /* newly added accel section */ ret = adf_cfg_process_section(accel_dev, ADF_ACCEL_SEC, accel_dev->accel_id); if (ret) goto failed; /* * put item-remove task after item-process * because during process we may fetch values from those items */ list_for_each(list, &cfg->sec_list) { sec = list_entry(list, struct adf_cfg_section, list); if (!sec->is_derived) { ret = adf_cfg_cleanup_section(accel_dev, sec->name, accel_dev->accel_id); if (ret) goto failed; } } ret = 0; set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); failed: if (ret) { if (cfg_device) { adf_cfg_device_clear(cfg_device, accel_dev); free(cfg_device, M_QAT); cfg->dev = NULL; } adf_cfg_del_all(accel_dev); device_printf(GET_DEV(accel_dev), "Failed to config device\n"); } return ret; } diff --git a/sys/dev/qat/qat_common/adf_cfg_section.c b/sys/dev/qat/qat_common/adf_cfg_section.c index efb07784c57a..7cadd32bfc9c 100644 --- a/sys/dev/qat/qat_common/adf_cfg_section.c +++ b/sys/dev/qat/qat_common/adf_cfg_section.c @@ -1,1144 +1,1157 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "adf_cfg_instance.h" #include "adf_cfg_device.h" #include "adf_cfg_section.h" static bool adf_cfg_is_svc_enabled(struct adf_accel_dev *accel_dev, const u8 svc) { int ring_pair_index = 0; u8 serv_type = NA; struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); for (ring_pair_index = 0; ring_pair_index < ADF_CFG_NUM_SERVICES; ring_pair_index++) { serv_type = GET_SRV_TYPE(hw_data->ring_to_svc_map, ring_pair_index); if (serv_type == svc) return true; } return false; } static int adf_cfg_set_core_number_for_instance(struct adf_accel_dev *accel_dev, const char *sec_name, const char *inst_name, int process_num, unsigned long *core_number) { char *core_val = NULL; char *pos = NULL; char **tokens = NULL; int token_index = 0; int core_arr_index = 0; int i = 0; int ret = EFAULT; unsigned long *core_num_arr = NULL; unsigned long core_num; unsigned long start, end; /* do memory allocation */ core_val = malloc(ADF_CFG_MAX_VAL_LEN_IN_BYTES, M_QAT, M_WAITOK | M_ZERO); tokens = malloc(sizeof(char *) * ADF_CFG_MAX_TOKENS, M_QAT, M_WAITOK | M_ZERO); for (i = 0; i < ADF_CFG_MAX_TOKENS; i++) { tokens[i] = malloc(ADF_CFG_MAX_TOKEN_LEN, M_QAT, M_WAITOK | M_ZERO); } core_num_arr = malloc(sizeof(unsigned long) * ADF_CFG_MAX_CORE_NUM, M_QAT, M_WAITOK | M_ZERO); /* parse the core_val */ ret = EFAULT; if (adf_cfg_get_param_value(accel_dev, sec_name, inst_name, core_val)) goto failed; pos = strchr(core_val, ','); while (pos) { pos[0] = '\0'; strlcpy(tokens[token_index++], core_val, ADF_CFG_MAX_TOKEN_LEN); strlcpy(core_val, pos + 1, ADF_CFG_MAX_VAL_LEN_IN_BYTES); pos = strchr(core_val, ','); if (!pos) strlcpy(tokens[token_index++], core_val, ADF_CFG_MAX_VAL_LEN_IN_BYTES); } /* in case there is only N-M */ if (token_index == 0) strlcpy(tokens[token_index++], core_val, ADF_CFG_MAX_VAL_LEN_IN_BYTES); /* parse the tokens such as N-M */ for (i = 0; i < token_index; i++) { pos = strchr(tokens[i], '-'); if (pos) { pos[0] = '\0'; ret = compat_strtoul(tokens[i], 10, &start); if (ret) goto failed; ret = compat_strtoul(pos + 1, 10, &end); if (ret) goto failed; if (start > end) { ret = EFAULT; goto failed; } for (core_num = start; core_num < end + 1; core_num++) core_num_arr[core_arr_index++] = core_num; } else { ret = compat_strtoul(tokens[i], 10, &core_num); if (ret) goto failed; core_num_arr[core_arr_index++] = core_num; } } if (core_arr_index == 0) { ret = compat_strtoul(core_val, 10, &core_num); if (ret) goto failed; else core_num_arr[core_arr_index++] = core_num; } *core_number = core_num_arr[process_num % core_arr_index]; ret = 0; failed: free(core_val, M_QAT); if (tokens) { for (i = 0; i < ADF_CFG_MAX_TOKENS; i++) free(tokens[i], M_QAT); free(tokens, M_QAT); } free(core_num_arr, M_QAT); if (ret) device_printf(GET_DEV(accel_dev), "Get core number failed with error %d\n", ret); return ret; } static int adf_cfg_set_value(struct adf_accel_dev *accel_dev, const char *sec, const char *key, unsigned long *value) { char *val = NULL; int ret = EFAULT; val = malloc(ADF_CFG_MAX_VAL_LEN_IN_BYTES, M_QAT, M_WAITOK | M_ZERO); if (adf_cfg_get_param_value(accel_dev, sec, key, val)) goto out; /* as the key type can be either ADF_DEC or ADF_HEX */ if (compat_strtoul(val, 10, value) && compat_strtoul(val, 16, value)) goto out; ret = 0; out: free(val, M_QAT); return ret; } static void adf_cfg_add_cy_inst_info(struct adf_accel_dev *accel_dev, struct adf_cfg_instance *crypto_inst, const char *derived_sec, int inst_index) { char *key = NULL; unsigned long bank_number = 0; unsigned long ring_number = 0; unsigned long asym_req = 0; unsigned long sym_req = 0; key = malloc(ADF_CFG_MAX_KEY_LEN_IN_BYTES, M_QAT, M_WAITOK | M_ZERO); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_BANK_NUM_FORMAT, inst_index); bank_number = crypto_inst->bundle; adf_cfg_add_key_value_param( accel_dev, derived_sec, key, (void *)&bank_number, ADF_DEC); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_ASYM_TX_FORMAT, inst_index); ring_number = crypto_inst->asym_tx; adf_cfg_add_key_value_param( accel_dev, derived_sec, key, (void *)&ring_number, ADF_DEC); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_SYM_TX_FORMAT, inst_index); ring_number = crypto_inst->sym_tx; adf_cfg_add_key_value_param( accel_dev, derived_sec, key, (void *)&ring_number, ADF_DEC); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_ASYM_RX_FORMAT, inst_index); ring_number = crypto_inst->asym_rx; adf_cfg_add_key_value_param( accel_dev, derived_sec, key, (void *)&ring_number, ADF_DEC); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_SYM_RX_FORMAT, inst_index); ring_number = crypto_inst->sym_rx; adf_cfg_add_key_value_param( accel_dev, derived_sec, key, (void *)&ring_number, ADF_DEC); strlcpy(key, ADF_CY_RING_ASYM_SIZE, ADF_CFG_MAX_KEY_LEN_IN_BYTES); if (adf_cfg_set_value(accel_dev, ADF_GENERAL_SEC, key, &asym_req)) asym_req = ADF_CFG_DEF_CY_RING_ASYM_SIZE; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_RING_ASYM_SIZE_FORMAT, inst_index); adf_cfg_add_key_value_param( accel_dev, derived_sec, key, (void *)&asym_req, ADF_DEC); strlcpy(key, ADF_CY_RING_SYM_SIZE, ADF_CFG_MAX_KEY_LEN_IN_BYTES); if (adf_cfg_set_value(accel_dev, ADF_GENERAL_SEC, key, &sym_req)) sym_req = ADF_CFG_DEF_CY_RING_SYM_SIZE; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_RING_SYM_SIZE_FORMAT, inst_index); adf_cfg_add_key_value_param( accel_dev, derived_sec, key, (void *)&sym_req, ADF_DEC); free(key, M_QAT); } static void adf_cfg_add_dc_inst_info(struct adf_accel_dev *accel_dev, struct adf_cfg_instance *dc_inst, const char *derived_sec, int inst_index) { char *key = NULL; unsigned long bank_number = 0; unsigned long ring_number = 0; unsigned long dc_req = 0; key = malloc(ADF_CFG_MAX_KEY_LEN_IN_BYTES, M_QAT, M_WAITOK | M_ZERO); snprintf(key, ADF_CFG_MAX_STR_LEN, ADF_DC_BANK_NUM_FORMAT, inst_index); bank_number = dc_inst->bundle; adf_cfg_add_key_value_param( accel_dev, derived_sec, key, (void *)&bank_number, ADF_DEC); snprintf(key, ADF_CFG_MAX_STR_LEN, ADF_DC_TX_FORMAT, inst_index); ring_number = dc_inst->dc_tx; adf_cfg_add_key_value_param( accel_dev, derived_sec, key, (void *)&ring_number, ADF_DEC); snprintf(key, ADF_CFG_MAX_STR_LEN, ADF_DC_RX_FORMAT, inst_index); ring_number = dc_inst->dc_rx; adf_cfg_add_key_value_param( accel_dev, derived_sec, key, (void *)&ring_number, ADF_DEC); strlcpy(key, ADF_DC_RING_SIZE, ADF_CFG_MAX_KEY_LEN_IN_BYTES); if (adf_cfg_set_value(accel_dev, ADF_GENERAL_SEC, key, &dc_req)) dc_req = ADF_CFG_DEF_DC_RING_SIZE; snprintf(key, ADF_CFG_MAX_STR_LEN, ADF_DC_RING_SIZE_FORMAT, inst_index); adf_cfg_add_key_value_param( accel_dev, derived_sec, key, (void *)&dc_req, ADF_DEC); free(key, M_QAT); } static void adf_cfg_add_asym_inst_info(struct adf_accel_dev *accel_dev, struct adf_cfg_instance *asym_inst, const char *derived_sec, int inst_index) { char *key = NULL; unsigned long bank_number = 0; unsigned long ring_number = 0; unsigned long asym_req = 0; key = malloc(ADF_CFG_MAX_KEY_LEN_IN_BYTES, M_QAT, M_WAITOK | M_ZERO); - snprintf(key, - ADF_CFG_MAX_KEY_LEN_IN_BYTES, - ADF_CY_BANK_NUM_FORMAT, - inst_index); + if (adf_cy_inst_cross_banks(accel_dev)) + snprintf(key, + ADF_CFG_MAX_KEY_LEN_IN_BYTES, + ADF_CY_ASYM_BANK_NUM_FORMAT, + inst_index); + else + snprintf(key, + ADF_CFG_MAX_KEY_LEN_IN_BYTES, + ADF_CY_BANK_NUM_FORMAT, + inst_index); bank_number = asym_inst->bundle; adf_cfg_add_key_value_param( accel_dev, derived_sec, key, (void *)&bank_number, ADF_DEC); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_ASYM_TX_FORMAT, inst_index); ring_number = asym_inst->asym_tx; adf_cfg_add_key_value_param( accel_dev, derived_sec, key, (void *)&ring_number, ADF_DEC); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_ASYM_RX_FORMAT, inst_index); ring_number = asym_inst->asym_rx; adf_cfg_add_key_value_param( accel_dev, derived_sec, key, (void *)&ring_number, ADF_DEC); strlcpy(key, ADF_CY_RING_ASYM_SIZE, ADF_CFG_MAX_KEY_LEN_IN_BYTES); if (adf_cfg_set_value(accel_dev, ADF_GENERAL_SEC, key, &asym_req)) asym_req = ADF_CFG_DEF_CY_RING_ASYM_SIZE; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_RING_ASYM_SIZE_FORMAT, inst_index); adf_cfg_add_key_value_param( accel_dev, derived_sec, key, (void *)&asym_req, ADF_DEC); free(key, M_QAT); } static void adf_cfg_add_sym_inst_info(struct adf_accel_dev *accel_dev, struct adf_cfg_instance *sym_inst, const char *derived_sec, int inst_index) { char *key = NULL; unsigned long bank_number = 0; unsigned long ring_number = 0; unsigned long sym_req = 0; key = malloc(ADF_CFG_MAX_KEY_LEN_IN_BYTES, M_QAT, M_WAITOK | M_ZERO); - snprintf(key, - ADF_CFG_MAX_KEY_LEN_IN_BYTES, - ADF_CY_BANK_NUM_FORMAT, - inst_index); + if (adf_cy_inst_cross_banks(accel_dev)) + snprintf(key, + ADF_CFG_MAX_KEY_LEN_IN_BYTES, + ADF_CY_SYM_BANK_NUM_FORMAT, + inst_index); + else + snprintf(key, + ADF_CFG_MAX_KEY_LEN_IN_BYTES, + ADF_CY_BANK_NUM_FORMAT, + inst_index); + bank_number = sym_inst->bundle; adf_cfg_add_key_value_param( accel_dev, derived_sec, key, (void *)&bank_number, ADF_DEC); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_SYM_TX_FORMAT, inst_index); ring_number = sym_inst->sym_tx; adf_cfg_add_key_value_param( accel_dev, derived_sec, key, (void *)&ring_number, ADF_DEC); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_SYM_RX_FORMAT, inst_index); ring_number = sym_inst->sym_rx; adf_cfg_add_key_value_param( accel_dev, derived_sec, key, (void *)&ring_number, ADF_DEC); strlcpy(key, ADF_CY_RING_SYM_SIZE, ADF_CFG_MAX_KEY_LEN_IN_BYTES); if (adf_cfg_set_value(accel_dev, ADF_GENERAL_SEC, key, &sym_req)) sym_req = ADF_CFG_DEF_CY_RING_SYM_SIZE; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_RING_SYM_SIZE_FORMAT, inst_index); adf_cfg_add_key_value_param( accel_dev, derived_sec, key, (void *)&sym_req, ADF_DEC); free(key, M_QAT); } static int adf_cfg_section_copy(struct adf_accel_dev *accel_dev, const char *processed_sec, const char *derived_sec) { unsigned long val = 0; struct list_head *list; struct adf_cfg_section *sec_process = adf_cfg_sec_find(accel_dev, processed_sec); if (!sec_process) return EFAULT; list_for_each(list, &sec_process->param_head) { struct adf_cfg_key_val *ptr = list_entry(list, struct adf_cfg_key_val, list); /* * ignore CoreAffinity since it will be generated later, and * there is no need to keep NumProcesses and LimitDevAccess. */ if (strstr(ptr->key, ADF_ETRMGR_CORE_AFFINITY) || strstr(ptr->key, ADF_NUM_PROCESSES) || strstr(ptr->key, ADF_LIMIT_DEV_ACCESS)) continue; if (ptr->type == ADF_DEC) { if (!compat_strtoul(ptr->val, 10, &val)) adf_cfg_add_key_value_param(accel_dev, derived_sec, ptr->key, (void *)&val, ptr->type); } else if (ptr->type == ADF_STR) { adf_cfg_add_key_value_param(accel_dev, derived_sec, ptr->key, (void *)ptr->val, ptr->type); } else if (ptr->type == ADF_HEX) { if (!compat_strtoul(ptr->val, 16, &val)) adf_cfg_add_key_value_param(accel_dev, derived_sec, ptr->key, (void *)val, ptr->type); } } return 0; } static int adf_cfg_create_rings_entries_for_cy_inst(struct adf_accel_dev *accel_dev, const char *processed_sec, const char *derived_sec, int process_num, enum adf_cfg_service_type serv_type) { int i = 0; int ret = EFAULT; unsigned long num_inst = 0, num_dc_inst = 0; unsigned long core_number = 0; unsigned long polling_mode = 0; struct adf_cfg_instance *crypto_inst = NULL; char *key = NULL; char *val = NULL; key = malloc(ADF_CFG_MAX_KEY_LEN_IN_BYTES, M_QAT, M_WAITOK | M_ZERO); val = malloc(ADF_CFG_MAX_VAL_LEN_IN_BYTES, M_QAT, M_WAITOK | M_ZERO); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_SERVICES_ENABLED); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) goto failed; if ((!strncmp(val, ADF_CFG_CY, ADF_CFG_MAX_VAL_LEN_IN_BYTES)) || (!strncmp(val, ADF_CFG_ASYM, ADF_CFG_MAX_VAL_LEN_IN_BYTES)) || (!strncmp(val, ADF_CFG_SYM, ADF_CFG_MAX_VAL_LEN_IN_BYTES))) { strlcpy(key, ADF_NUM_DC, ADF_CFG_MAX_KEY_LEN_IN_BYTES); if (adf_cfg_set_value( accel_dev, processed_sec, key, &num_dc_inst)) goto failed; if (num_dc_inst > 0) { device_printf( GET_DEV(accel_dev), "NumDcInstances > 0,when CY only is enabled\n"); goto failed; } } ret = EFAULT; strlcpy(key, ADF_NUM_CY, ADF_CFG_MAX_KEY_LEN_IN_BYTES); if (adf_cfg_set_value(accel_dev, processed_sec, key, &num_inst)) goto failed; crypto_inst = malloc(sizeof(*crypto_inst), M_QAT, M_WAITOK | M_ZERO); for (i = 0; i < num_inst; i++) { memset(crypto_inst, 0, sizeof(*crypto_inst)); crypto_inst->stype = serv_type; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_CORE_AFFINITY_FORMAT, i); if (adf_cfg_set_core_number_for_instance(accel_dev, processed_sec, key, process_num, &core_number)) goto failed; if (strcmp(processed_sec, ADF_KERNEL_SEC) && strcmp(processed_sec, ADF_KERNEL_SAL_SEC)) adf_cfg_add_key_value_param(accel_dev, derived_sec, key, (void *)&core_number, ADF_DEC); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_NAME_FORMAT, i); if (adf_cfg_get_param_value(accel_dev, processed_sec, key, val)) goto failed; strlcpy(crypto_inst->name, val, sizeof(crypto_inst->name)); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CY_POLL_MODE_FORMAT, i); if (adf_cfg_set_value( accel_dev, processed_sec, key, &polling_mode)) goto failed; crypto_inst->polling_mode = polling_mode; CPU_ZERO(&crypto_inst->affinity_mask); CPU_SET(core_number, &crypto_inst->affinity_mask); if (adf_cfg_get_ring_pairs(accel_dev->cfg->dev, crypto_inst, derived_sec, accel_dev)) goto failed; switch (serv_type) { case CRYPTO: adf_cfg_add_cy_inst_info(accel_dev, crypto_inst, derived_sec, i); break; case ASYM: adf_cfg_add_asym_inst_info(accel_dev, crypto_inst, derived_sec, i); break; case SYM: adf_cfg_add_sym_inst_info(accel_dev, crypto_inst, derived_sec, i); break; default: pr_err("unknown crypto instance type %d.\n", serv_type); goto failed; } } ret = 0; failed: free(crypto_inst, M_QAT); free(val, M_QAT); free(key, M_QAT); if (ret) device_printf(GET_DEV(accel_dev), "Failed to create rings for cy\n"); return ret; } static int adf_cfg_create_rings_entries_for_dc_inst(struct adf_accel_dev *accel_dev, const char *processed_sec, const char *derived_sec, int process_num) { int i = 0; int ret = EFAULT; unsigned long num_inst = 0, num_cy_inst = 0; unsigned long core_number = 0; unsigned long polling_mode = 0; struct adf_cfg_instance *dc_inst = NULL; char *key = NULL; char *val = NULL; key = malloc(ADF_CFG_MAX_KEY_LEN_IN_BYTES, M_QAT, M_WAITOK | M_ZERO); val = malloc(ADF_CFG_MAX_VAL_LEN_IN_BYTES, M_QAT, M_WAITOK | M_ZERO); ret = EFAULT; snprintf(key, ADF_CFG_MAX_STR_LEN, ADF_SERVICES_ENABLED); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) goto failed; if (!strncmp(val, ADF_CFG_DC, ADF_CFG_MAX_VAL_LEN_IN_BYTES)) { strlcpy(key, ADF_NUM_CY, ADF_CFG_MAX_KEY_LEN_IN_BYTES); if (adf_cfg_set_value( accel_dev, processed_sec, key, &num_cy_inst)) goto failed; if (num_cy_inst > 0) { device_printf( GET_DEV(accel_dev), "NumCyInstances > 0,when DC only is enabled\n"); goto failed; } } strlcpy(key, ADF_NUM_DC, ADF_CFG_MAX_KEY_LEN_IN_BYTES); if (adf_cfg_set_value(accel_dev, processed_sec, key, &num_inst)) goto failed; dc_inst = malloc(sizeof(*dc_inst), M_QAT, M_WAITOK | M_ZERO); for (i = 0; i < num_inst; i++) { memset(dc_inst, 0, sizeof(*dc_inst)); dc_inst->stype = COMP; snprintf(key, ADF_CFG_MAX_STR_LEN, ADF_DC_CORE_AFFINITY_FORMAT, i); if (adf_cfg_set_core_number_for_instance(accel_dev, processed_sec, key, process_num, &core_number)) goto failed; if (strcmp(processed_sec, ADF_KERNEL_SEC) && strcmp(processed_sec, ADF_KERNEL_SAL_SEC)) { adf_cfg_add_key_value_param(accel_dev, derived_sec, key, (void *)&core_number, ADF_DEC); } snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_DC_NAME_FORMAT, i); if (adf_cfg_get_param_value(accel_dev, processed_sec, key, val)) goto failed; strlcpy(dc_inst->name, val, sizeof(dc_inst->name)); snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_DC_POLL_MODE_FORMAT, i); if (adf_cfg_set_value( accel_dev, processed_sec, key, &polling_mode)) goto failed; dc_inst->polling_mode = polling_mode; CPU_ZERO(&dc_inst->affinity_mask); CPU_SET(core_number, &dc_inst->affinity_mask); if (adf_cfg_get_ring_pairs( accel_dev->cfg->dev, dc_inst, derived_sec, accel_dev)) goto failed; adf_cfg_add_dc_inst_info(accel_dev, dc_inst, derived_sec, i); } ret = 0; failed: free(dc_inst, M_QAT); free(val, M_QAT); free(key, M_QAT); if (ret) device_printf(GET_DEV(accel_dev), "Failed to create rings for dc\n"); return ret; } static int adf_cfg_process_user_section(struct adf_accel_dev *accel_dev, const char *sec_name, int dev) { int i = 0; int ret = EFAULT; unsigned long num_processes = 0; unsigned long limit_dev_acc = 0; u8 serv_type = 0; char *key = NULL; char *val = NULL; char *derived_sec_name = NULL; key = malloc(ADF_CFG_MAX_KEY_LEN_IN_BYTES, M_QAT, M_WAITOK | M_ZERO); val = malloc(ADF_CFG_MAX_VAL_LEN_IN_BYTES, M_QAT, M_WAITOK | M_ZERO); derived_sec_name = malloc(ADF_CFG_MAX_STR_LEN, M_QAT, M_WAITOK | M_ZERO); strlcpy(key, ADF_NUM_PROCESSES, ADF_CFG_MAX_KEY_LEN_IN_BYTES); if (adf_cfg_set_value(accel_dev, sec_name, key, &num_processes)) num_processes = 0; strlcpy(key, ADF_LIMIT_DEV_ACCESS, ADF_CFG_MAX_KEY_LEN_IN_BYTES); if (adf_cfg_set_value(accel_dev, sec_name, key, &limit_dev_acc)) limit_dev_acc = 0; for (i = 0; i < num_processes; i++) { if (limit_dev_acc) snprintf(derived_sec_name, ADF_CFG_MAX_STR_LEN, ADF_LIMITED_USER_SECTION_NAME_FORMAT, sec_name, dev, i); else snprintf(derived_sec_name, ADF_CFG_MAX_STR_LEN, ADF_USER_SECTION_NAME_FORMAT, sec_name, i); if (adf_cfg_derived_section_add(accel_dev, derived_sec_name)) goto failed; /* copy items to the derived section */ adf_cfg_section_copy(accel_dev, sec_name, derived_sec_name); for (serv_type = NA; serv_type <= USED; serv_type++) { switch (serv_type) { case NA: break; case CRYPTO: case ASYM: case SYM: if (adf_cfg_is_svc_enabled(accel_dev, serv_type)) if (adf_cfg_create_rings_entries_for_cy_inst( accel_dev, sec_name, derived_sec_name, i, (enum adf_cfg_service_type) serv_type)) goto failed; break; case COMP: if (adf_cfg_is_svc_enabled(accel_dev, serv_type)) if (adf_cfg_create_rings_entries_for_dc_inst( accel_dev, sec_name, derived_sec_name, i)) goto failed; break; case USED: break; default: pr_err("Unknown service type %d.\n", serv_type); } } } ret = 0; failed: free(val, M_QAT); free(key, M_QAT); free(derived_sec_name, M_QAT); if (ret) device_printf(GET_DEV(accel_dev), "Failed to process user section %s\n", sec_name); return ret; } static int adf_cfg_cleanup_user_section(struct adf_accel_dev *accel_dev, const char *sec_name) { struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, sec_name); struct list_head *head; struct list_head *list_ptr, *tmp; if (!sec) return EFAULT; if (sec->is_derived) return 0; head = &sec->param_head; list_for_each_prev_safe(list_ptr, tmp, head) { struct adf_cfg_key_val *ptr = list_entry(list_ptr, struct adf_cfg_key_val, list); if (!strcmp(ptr->key, ADF_LIMIT_DEV_ACCESS)) continue; list_del(list_ptr); free(ptr, M_QAT); } return 0; } static int adf_cfg_process_section_no_op(struct adf_accel_dev *accel_dev, const char *sec_name) { return 0; } static int adf_cfg_cleanup_general_section(struct adf_accel_dev *accel_dev, const char *sec_name) { unsigned long first_used_bundle = 0; int ret = EFAULT; char *key = NULL; char *val = NULL; key = malloc(ADF_CFG_MAX_KEY_LEN_IN_BYTES, M_QAT, M_WAITOK | M_ZERO); val = malloc(ADF_CFG_MAX_VAL_LEN_IN_BYTES, M_QAT, M_WAITOK | M_ZERO); /* Remove sections that not needed after processing */ strlcpy(key, ADF_CONFIG_VERSION, ADF_CFG_MAX_KEY_LEN_IN_BYTES); if (adf_cfg_remove_key_param(accel_dev, sec_name, key)) goto failed; strlcpy(key, ADF_CY ADF_RING_ASYM_SIZE, ADF_CFG_MAX_KEY_LEN_IN_BYTES); if (adf_cfg_remove_key_param(accel_dev, sec_name, key)) goto failed; strlcpy(key, ADF_CY ADF_RING_SYM_SIZE, ADF_CFG_MAX_KEY_LEN_IN_BYTES); if (adf_cfg_remove_key_param(accel_dev, sec_name, key)) goto failed; strlcpy(key, ADF_DC ADF_RING_DC_SIZE, ADF_CFG_MAX_KEY_LEN_IN_BYTES); if (adf_cfg_remove_key_param(accel_dev, sec_name, key)) goto failed; /* After all processing done, set the "FirstUserBundle" value */ first_used_bundle = accel_dev->cfg->dev->max_kernel_bundle_nr + 1; strlcpy(key, ADF_FIRST_USER_BUNDLE, ADF_CFG_MAX_KEY_LEN_IN_BYTES); if (adf_cfg_add_key_value_param( accel_dev, sec_name, key, (void *)&first_used_bundle, ADF_DEC)) goto failed; ret = 0; failed: free(key, M_QAT); free(val, M_QAT); if (ret) device_printf(GET_DEV(accel_dev), "Failed to clean up general section\n"); return ret; } static int adf_cfg_process_kernel_section(struct adf_accel_dev *accel_dev, const char *sec_name) { u8 serv_type = 0; for (serv_type = NA; serv_type <= USED; serv_type++) { switch (serv_type) { case NA: break; case CRYPTO: case ASYM: case SYM: if (adf_cfg_is_svc_enabled(accel_dev, serv_type)) if (adf_cfg_create_rings_entries_for_cy_inst( accel_dev, sec_name, sec_name, 0, (enum adf_cfg_service_type)serv_type)) goto failed; break; case COMP: if (adf_cfg_is_svc_enabled(accel_dev, serv_type)) if (adf_cfg_create_rings_entries_for_dc_inst( accel_dev, sec_name, sec_name, 0)) goto failed; break; case USED: break; default: pr_err("Unknown service type of instance %d.\n", serv_type); } } return 0; failed: return EFAULT; } static int adf_cfg_cleanup_kernel_section(struct adf_accel_dev *accel_dev, const char *sec_name) { return 0; } static int adf_cfg_create_accel_section(struct adf_accel_dev *accel_dev, const char *sec_name) { /* Find global settings for coalescing. Use defaults if not found */ unsigned long accel_coales = 0; unsigned long accel_coales_timer = 0; unsigned long accel_coales_num_msg = 0; unsigned long cpu; char *key = NULL; char *val = NULL; int ret = EFAULT; int index = 0; struct adf_hw_device_data *hw_device = accel_dev->hw_device; if (!hw_device) goto failed; key = malloc(ADF_CFG_MAX_KEY_LEN_IN_BYTES, M_QAT, M_WAITOK | M_ZERO); val = malloc(ADF_CFG_MAX_VAL_LEN_IN_BYTES, M_QAT, M_WAITOK | M_ZERO); strlcpy(key, ADF_ETRMGR_COALESCING_ENABLED, ADF_CFG_MAX_KEY_LEN_IN_BYTES); if (adf_cfg_set_value(accel_dev, ADF_GENERAL_SEC, key, &accel_coales)) accel_coales = ADF_CFG_ACCEL_DEF_COALES; strlcpy(key, ADF_ETRMGR_COALESCE_TIMER, ADF_CFG_MAX_KEY_LEN_IN_BYTES); if (adf_cfg_set_value( accel_dev, ADF_GENERAL_SEC, key, &accel_coales_timer)) accel_coales_timer = ADF_CFG_ACCEL_DEF_COALES_TIMER; strlcpy(key, ADF_ETRMGR_COALESCING_MSG_ENABLED, ADF_CFG_MAX_KEY_LEN_IN_BYTES); if (adf_cfg_set_value( accel_dev, ADF_GENERAL_SEC, key, &accel_coales_num_msg)) accel_coales_num_msg = ADF_CFG_ACCEL_DEF_COALES_NUM_MSG; for (index = 0; index < hw_device->num_banks; index++) { snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_ETRMGR_COALESCING_ENABLED_FORMAT, index); ret = adf_cfg_add_key_value_param( accel_dev, sec_name, key, &accel_coales, ADF_DEC); if (ret != 0) goto failed; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_ETRMGR_COALESCE_TIMER_FORMAT, index); ret = adf_cfg_add_key_value_param( accel_dev, sec_name, key, &accel_coales_timer, ADF_DEC); if (ret != 0) goto failed; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_ETRMGR_COALESCING_MSG_ENABLED_FORMAT, index); ret = adf_cfg_add_key_value_param( accel_dev, sec_name, key, &accel_coales_num_msg, ADF_DEC); if (ret != 0) goto failed; cpu = ADF_CFG_AFFINITY_WHATEVER; snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_ETRMGR_CORE_AFFINITY_FORMAT, index); ret = adf_cfg_add_key_value_param( accel_dev, sec_name, key, &cpu, ADF_DEC); if (ret != 0) goto failed; } ret = 0; failed: free(key, M_QAT); free(val, M_QAT); if (ret) device_printf(GET_DEV(accel_dev), "Failed to create accel section\n"); return ret; } static int adf_cfg_cleanup_accel_section(struct adf_accel_dev *accel_dev, const char *sec_name) { return 0; } static int adf_cfg_process_accel_section(struct adf_accel_dev *accel_dev, const char *sec_name) { int accel_num = 0; struct adf_hw_device_data *hw_device = accel_dev->hw_device; char *derived_name = NULL; int ret = EFAULT; if (!hw_device) goto failed; if (hw_device->num_logical_accel == 0) goto failed; derived_name = malloc(ADF_CFG_MAX_SECTION_LEN_IN_BYTES, M_QAT, M_WAITOK | M_ZERO); for (accel_num = 0; accel_num < hw_device->num_logical_accel; accel_num++) { snprintf(derived_name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES, ADF_ACCEL_STR, accel_num); ret = adf_cfg_section_add(accel_dev, derived_name); if (ret != 0) goto failed; ret = adf_cfg_create_accel_section(accel_dev, derived_name); if (ret != 0) goto failed; } ret = 0; failed: free(derived_name, M_QAT); if (ret) device_printf(GET_DEV(accel_dev), "Failed to process accel section\n"); return ret; } int adf_cfg_process_section(struct adf_accel_dev *accel_dev, const char *sec_name, int dev) { if (!strcmp(sec_name, ADF_GENERAL_SEC) || !strcmp(sec_name, ADF_INLINE_SEC)) return adf_cfg_process_section_no_op(accel_dev, sec_name); else if (!strcmp(sec_name, ADF_KERNEL_SEC) || !strcmp(sec_name, ADF_KERNEL_SAL_SEC)) return adf_cfg_process_kernel_section(accel_dev, sec_name); else if (!strcmp(sec_name, ADF_ACCEL_SEC)) return adf_cfg_process_accel_section(accel_dev, sec_name); else return adf_cfg_process_user_section(accel_dev, sec_name, dev); } int adf_cfg_cleanup_section(struct adf_accel_dev *accel_dev, const char *sec_name, int dev) { if (!strcmp(sec_name, ADF_GENERAL_SEC)) return adf_cfg_cleanup_general_section(accel_dev, sec_name); else if (!strcmp(sec_name, ADF_INLINE_SEC)) return adf_cfg_process_section_no_op(accel_dev, sec_name); else if (!strcmp(sec_name, ADF_KERNEL_SEC) || !strcmp(sec_name, ADF_KERNEL_SAL_SEC)) return adf_cfg_cleanup_kernel_section(accel_dev, sec_name); else if (strstr(sec_name, ADF_ACCEL_SEC)) return adf_cfg_cleanup_accel_section(accel_dev, sec_name); else return adf_cfg_cleanup_user_section(accel_dev, sec_name); } int adf_cfg_setup_irq(struct adf_accel_dev *accel_dev) { int ret = EFAULT; struct adf_accel_pci *info_pci_dev = &accel_dev->accel_pci_dev; struct adf_cfg_device *cfg_dev = NULL; struct msix_entry *msixe = NULL; u32 num_msix = 0; int index = 0; int computed_core = 0; if (!accel_dev || !accel_dev->cfg || !accel_dev->hw_device) goto failed; cfg_dev = accel_dev->cfg->dev; if (!cfg_dev) goto failed; msixe = (struct msix_entry *)accel_dev->accel_pci_dev.msix_entries.entries; num_msix = accel_dev->accel_pci_dev.msix_entries.num_entries; if (!msixe) goto cleanup_and_fail; /* * Here we want to set the affinity of kernel and epoll mode * bundle into user defined value. * Because in adf_isr.c we setup core affinity by round-robin * we need to reset it after device up done. */ for (index = 0; index < accel_dev->hw_device->num_banks; index++) { struct adf_cfg_bundle *bundle = cfg_dev->bundles[index]; if (!bundle) continue; if (bundle->type != KERNEL && bundle->polling_mode != ADF_CFG_RESP_EPOLL) continue; if (bundle->number >= num_msix) goto cleanup_and_fail; computed_core = CPU_FFS(&bundle->affinity_mask) - 1; bus_bind_intr(info_pci_dev->pci_dev, msixe[index].irq, computed_core); } ret = 0; cleanup_and_fail: adf_cfg_device_clear(cfg_dev, accel_dev); free(cfg_dev, M_QAT); accel_dev->cfg->dev = NULL; failed: return ret; } diff --git a/sys/dev/qat/qat_common/adf_freebsd_admin.c b/sys/dev/qat/qat_common/adf_freebsd_admin.c index 2ad919887944..403fbcec3835 100644 --- a/sys/dev/qat/qat_common/adf_freebsd_admin.c +++ b/sys/dev/qat/qat_common/adf_freebsd_admin.c @@ -1,602 +1,599 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include "adf_heartbeat.h" #include #include #include #include #include #include #include #include #define ADF_CONST_TABLE_VERSION_BYTE (0) /* Keep version number in range 0-255 */ #define ADF_CONST_TABLE_VERSION (1) /* Admin Messages Registers */ -#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574) -#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578) -#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970 -#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000 +#define ADF_MAILBOX_STRIDE 0x1000 #define ADF_ADMINMSG_LEN 32 #define FREEBSD_ALLIGNMENT_SIZE 64 #define ADF_INIT_CONFIG_SIZE 1024 static u8 const_tab[1024] __aligned(1024) = { ADF_CONST_TABLE_VERSION, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29, 0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, 0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2B, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; #define ADF_ADMIN_POLL_INTERVAL_US 20 #define ADF_ADMIN_POLL_RETRIES 5000 static void dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *addr; addr = arg; if (error == 0 && nseg == 1) *addr = segs[0].ds_addr; else *addr = 0; } int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae, void *in, void *out) { struct adf_admin_comms *admin = accel_dev->admin; struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct resource *mailbox = admin->mailbox_addr; struct admin_info admin_csrs_info; hw_data->get_admin_info(&admin_csrs_info); int offset = ae * ADF_ADMINMSG_LEN * 2; int mb_offset = - ae * ADF_DH895XCC_MAILBOX_STRIDE + admin_csrs_info.mailbox_offset; + ae * ADF_MAILBOX_STRIDE + admin_csrs_info.mailbox_offset; int times, received; struct icp_qat_fw_init_admin_req *request = in; sx_xlock(&admin->lock); if (ADF_CSR_RD(mailbox, mb_offset) == 1) { sx_xunlock(&admin->lock); return EAGAIN; } memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN); ADF_CSR_WR(mailbox, mb_offset, 1); received = 0; for (times = 0; times < ADF_ADMIN_POLL_RETRIES; times++) { usleep_range(ADF_ADMIN_POLL_INTERVAL_US, ADF_ADMIN_POLL_INTERVAL_US * 2); if (ADF_CSR_RD(mailbox, mb_offset) == 0) { received = 1; break; } } if (received) memcpy(out, admin->virt_addr + offset + ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN); else device_printf(GET_DEV(accel_dev), "Failed to send admin msg %d to accelerator %d\n", request->cmd_id, ae); sx_xunlock(&admin->lock); return received ? 0 : EFAULT; } static inline int adf_set_dc_ibuf(struct adf_accel_dev *accel_dev, struct icp_qat_fw_init_admin_req *req) { char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; unsigned long ibuf_size = 0; if (!adf_cfg_get_param_value( accel_dev, ADF_GENERAL_SEC, ADF_INTER_BUF_SIZE, val)) { if (compat_strtoul(val, 0, &ibuf_size)) return EFAULT; } if (ibuf_size != 32 && ibuf_size != 64) ibuf_size = 64; req->ibuf_size_in_kb = ibuf_size; return 0; } int adf_send_admin(struct adf_accel_dev *accel_dev, struct icp_qat_fw_init_admin_req *req, struct icp_qat_fw_init_admin_resp *resp, u32 ae_mask) { int i; unsigned int mask; for (i = 0, mask = ae_mask; mask; i++, mask >>= 1) { if (!(mask & 1)) continue; if (adf_put_admin_msg_sync(accel_dev, i, req, resp) || resp->status) return EFAULT; } return 0; } static int adf_init_me(struct adf_accel_dev *accel_dev) { struct icp_qat_fw_init_admin_req req; struct icp_qat_fw_init_admin_resp resp; struct adf_hw_device_data *hw_device = accel_dev->hw_device; u32 ae_mask = hw_device->ae_mask; explicit_bzero(&req, sizeof(req)); explicit_bzero(&resp, sizeof(resp)); req.cmd_id = ICP_QAT_FW_INIT_ME; if (adf_set_dc_ibuf(accel_dev, &req)) return EFAULT; if (accel_dev->aram_info) { req.init_cfg_sz = sizeof(*accel_dev->aram_info); req.init_cfg_ptr = (u64)accel_dev->admin->aram_map_phys_addr; } if (adf_send_admin(accel_dev, &req, &resp, ae_mask)) return EFAULT; return 0; } static int adf_set_heartbeat_timer(struct adf_accel_dev *accel_dev) { struct icp_qat_fw_init_admin_req req; struct icp_qat_fw_init_admin_resp resp; struct adf_hw_device_data *hw_device = accel_dev->hw_device; u32 ae_mask = hw_device->ae_mask; u32 heartbeat_ticks; explicit_bzero(&req, sizeof(req)); req.cmd_id = ICP_QAT_FW_HEARTBEAT_TIMER_SET; req.hb_cfg_ptr = accel_dev->admin->phy_hb_addr; if (adf_get_hb_timer(accel_dev, &heartbeat_ticks)) return EINVAL; req.heartbeat_ticks = heartbeat_ticks; if (adf_send_admin(accel_dev, &req, &resp, ae_mask)) return EFAULT; return 0; } static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev, u32 *capabilities) { struct icp_qat_fw_init_admin_req req; struct icp_qat_fw_init_admin_resp resp; u32 ae_mask = 1; explicit_bzero(&req, sizeof(req)); req.cmd_id = ICP_QAT_FW_COMP_CAPABILITY_GET; if (adf_send_admin(accel_dev, &req, &resp, ae_mask)) return EFAULT; *capabilities = resp.extended_features; return 0; } static int adf_set_fw_constants(struct adf_accel_dev *accel_dev) { struct icp_qat_fw_init_admin_req req; struct icp_qat_fw_init_admin_resp resp; struct adf_hw_device_data *hw_device = accel_dev->hw_device; - u32 ae_mask = hw_device->ae_mask; + u32 ae_mask = hw_device->admin_ae_mask; explicit_bzero(&req, sizeof(req)); req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG; req.init_cfg_sz = sizeof(const_tab); req.init_cfg_ptr = accel_dev->admin->const_tbl_addr; if (adf_send_admin(accel_dev, &req, &resp, ae_mask)) return EFAULT; return 0; } static int adf_get_fw_status(struct adf_accel_dev *accel_dev, u8 *major, u8 *minor, u8 *patch) { struct icp_qat_fw_init_admin_req req; struct icp_qat_fw_init_admin_resp resp; u32 ae_mask = 1; explicit_bzero(&req, sizeof(req)); req.cmd_id = ICP_QAT_FW_STATUS_GET; if (adf_send_admin(accel_dev, &req, &resp, ae_mask)) return EFAULT; *major = resp.version_major_num; *minor = resp.version_minor_num; *patch = resp.version_patch_num; return 0; } int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp) { struct icp_qat_fw_init_admin_req req; struct icp_qat_fw_init_admin_resp rsp; unsigned int ae_mask = 1; if (!accel_dev || !timestamp) return EFAULT; explicit_bzero(&req, sizeof(req)); req.cmd_id = ICP_QAT_FW_TIMER_GET; if (adf_send_admin(accel_dev, &req, &rsp, ae_mask)) return EFAULT; *timestamp = rsp.timestamp; return 0; } int adf_get_fw_pke_stats(struct adf_accel_dev *accel_dev, u64 *suc_count, u64 *unsuc_count) { struct icp_qat_fw_init_admin_req req = { 0 }; struct icp_qat_fw_init_admin_resp resp = { 0 }; unsigned long sym_ae_msk = 0; u8 sym_ae_msk_size = 0; u8 i = 0; if (!suc_count || !unsuc_count) return EFAULT; sym_ae_msk = accel_dev->au_info->sym_ae_msk; sym_ae_msk_size = sizeof(accel_dev->au_info->sym_ae_msk) * BITS_PER_BYTE; req.cmd_id = ICP_QAT_FW_PKE_REPLAY_STATS_GET; for_each_set_bit(i, &sym_ae_msk, sym_ae_msk_size) { memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp)); if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) || resp.status) { return EFAULT; } *suc_count += resp.successful_count; *unsuc_count += resp.unsuccessful_count; } return 0; } /** * adf_send_admin_init() - Function sends init message to FW * @accel_dev: Pointer to acceleration device. * * Function sends admin init message to the FW * * Return: 0 on success, error code otherwise. */ int adf_send_admin_init(struct adf_accel_dev *accel_dev) { int ret; u32 dc_capabilities = 0; unsigned int storage_enabled = 0; if (GET_HW_DATA(accel_dev)->query_storage_cap) { ret = adf_get_dc_capabilities(accel_dev, &dc_capabilities); if (ret) { device_printf(GET_DEV(accel_dev), "Cannot get dc capabilities\n"); return ret; } accel_dev->hw_device->extended_dc_capabilities = dc_capabilities; } else { ret = GET_HW_DATA(accel_dev)->get_storage_enabled( accel_dev, &storage_enabled); if (ret) { device_printf(GET_DEV(accel_dev), "Cannot get storage enabled\n"); return ret; } } ret = adf_set_heartbeat_timer(accel_dev); if (ret) { if (ret == EINVAL) { device_printf(GET_DEV(accel_dev), "Cannot set heartbeat timer\n"); return ret; } device_printf(GET_DEV(accel_dev), "Heartbeat is not supported\n"); } ret = adf_get_fw_status(accel_dev, &accel_dev->fw_versions.fw_version_major, &accel_dev->fw_versions.fw_version_minor, &accel_dev->fw_versions.fw_version_patch); if (ret) { device_printf(GET_DEV(accel_dev), "Cannot get fw version\n"); return ret; } device_printf(GET_DEV(accel_dev), "FW version: %d.%d.%d\n", accel_dev->fw_versions.fw_version_major, accel_dev->fw_versions.fw_version_minor, accel_dev->fw_versions.fw_version_patch); ret = adf_set_fw_constants(accel_dev); if (ret) { device_printf(GET_DEV(accel_dev), "Cannot set fw constants\n"); return ret; } ret = adf_init_me(accel_dev); if (ret) device_printf(GET_DEV(accel_dev), "Cannot init AE\n"); return ret; } int adf_init_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin = NULL; struct adf_hw_device_data *hw_data = NULL; struct adf_bar *pmisc = NULL; struct resource *csr = NULL; struct admin_info admin_csrs_info; unsigned int adminmsg_u, adminmsg_l; u64 reg_val = 0; int ret = 0; admin = kzalloc_node(sizeof(*accel_dev->admin), M_WAITOK | M_ZERO, dev_to_node(GET_DEV(accel_dev))); hw_data = accel_dev->hw_device; pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; csr = pmisc->virt_addr; ret = bus_dma_mem_create(&admin->dma_mem, accel_dev->dma_tag, FREEBSD_ALLIGNMENT_SIZE, BUS_SPACE_MAXADDR, PAGE_SIZE, 0); if (ret != 0) { device_printf(GET_DEV(accel_dev), "Failed to allocate dma buff\n"); kfree(admin); return ret; } admin->virt_addr = admin->dma_mem.dma_vaddr; admin->phy_addr = admin->dma_mem.dma_baddr; bzero(admin->virt_addr, PAGE_SIZE); ret = bus_dmamap_create(accel_dev->dma_tag, 0, &admin->const_tbl_map); if (ret != 0) { device_printf(GET_DEV(accel_dev), "Failed to create DMA map\n"); bus_dma_mem_free(&admin->dma_mem); kfree(admin); return ret; } ret = bus_dmamap_load(accel_dev->dma_tag, admin->const_tbl_map, (void *)const_tab, 1024, dma_callback, &admin->const_tbl_addr, BUS_DMA_NOWAIT); if (ret == 0 && admin->const_tbl_addr == 0) ret = EFBIG; if (ret != 0) { device_printf(GET_DEV(accel_dev), "Failed to map const table for DMA\n"); bus_dmamap_destroy(accel_dev->dma_tag, admin->const_tbl_map); bus_dma_mem_free(&admin->dma_mem); kfree(admin); return ret; } /* DMA ARAM address map */ if (accel_dev->aram_info) { ret = bus_dmamap_create(accel_dev->dma_tag, 0, &admin->aram_map); if (ret != 0) { device_printf(GET_DEV(accel_dev), "Failed to create DMA map\n"); bus_dma_mem_free(&admin->dma_mem); kfree(admin); return ret; } ret = bus_dmamap_load(accel_dev->dma_tag, admin->aram_map, (void *)accel_dev->aram_info, sizeof(*accel_dev->aram_info), dma_callback, &admin->aram_map_phys_addr, BUS_DMA_NOWAIT); if (ret == 0 && admin->aram_map_phys_addr == 0) ret = EFBIG; if (ret != 0) { device_printf(GET_DEV(accel_dev), "Failed to map aram phys addr for DMA\n"); bus_dmamap_destroy(accel_dev->dma_tag, admin->aram_map); bus_dma_mem_free(&admin->dma_mem); kfree(admin); return ret; } } ret = bus_dma_mem_create(&admin->dma_hb, accel_dev->dma_tag, FREEBSD_ALLIGNMENT_SIZE, BUS_SPACE_MAXADDR, PAGE_SIZE, 0); if (ret != 0) { device_printf(GET_DEV(accel_dev), "Failed to allocate dma buff\n"); bus_dmamap_unload(accel_dev->dma_tag, admin->const_tbl_map); bus_dmamap_destroy(accel_dev->dma_tag, admin->const_tbl_map); bus_dma_mem_free(&admin->dma_mem); kfree(admin); return ret; } admin->virt_hb_addr = admin->dma_hb.dma_vaddr; admin->phy_hb_addr = admin->dma_hb.dma_baddr; bzero(admin->virt_hb_addr, PAGE_SIZE); hw_data->get_admin_info(&admin_csrs_info); adminmsg_u = admin_csrs_info.admin_msg_ur; adminmsg_l = admin_csrs_info.admin_msg_lr; reg_val = (u64)admin->phy_addr; ADF_CSR_WR(csr, adminmsg_u, reg_val >> 32); ADF_CSR_WR(csr, adminmsg_l, reg_val); sx_init(&admin->lock, "qat admin"); admin->mailbox_addr = csr; accel_dev->admin = admin; return 0; } void adf_exit_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin = accel_dev->admin; if (!admin) return; if (admin->virt_addr) bus_dma_mem_free(&admin->dma_mem); if (admin->virt_hb_addr) bus_dma_mem_free(&admin->dma_hb); bus_dmamap_unload(accel_dev->dma_tag, admin->const_tbl_map); bus_dmamap_destroy(accel_dev->dma_tag, admin->const_tbl_map); sx_destroy(&admin->lock); kfree(admin); accel_dev->admin = NULL; } diff --git a/sys/dev/qat/qat_common/adf_freebsd_transport_debug.c b/sys/dev/qat/qat_common/adf_freebsd_transport_debug.c index b8e2bf2ca336..2a597601ffb6 100644 --- a/sys/dev/qat/qat_common/adf_freebsd_transport_debug.c +++ b/sys/dev/qat/qat_common/adf_freebsd_transport_debug.c @@ -1,209 +1,212 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include #include #include #include static int adf_ring_show(SYSCTL_HANDLER_ARGS) { struct adf_etr_ring_data *ring = arg1; struct adf_etr_bank_data *bank = ring->bank; + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev); struct resource *csr = ring->bank->csr_addr; struct sbuf sb; int error, word; uint32_t *wp, *end; sbuf_new_for_sysctl(&sb, NULL, 128, req); { int head, tail, empty; - head = READ_CSR_RING_HEAD(csr, - bank->bank_number, - ring->ring_number); - tail = READ_CSR_RING_TAIL(csr, - bank->bank_number, - ring->ring_number); - empty = READ_CSR_E_STAT(csr, bank->bank_number); + head = csr_ops->read_csr_ring_head(csr, + bank->bank_number, + ring->ring_number); + tail = csr_ops->read_csr_ring_tail(csr, + bank->bank_number, + ring->ring_number); + empty = csr_ops->read_csr_e_stat(csr, bank->bank_number); sbuf_cat(&sb, "\n------- Ring configuration -------\n"); sbuf_printf(&sb, "ring name: %s\n", ring->ring_debug->ring_name); sbuf_printf(&sb, "ring num %d, bank num %d\n", ring->ring_number, ring->bank->bank_number); sbuf_printf(&sb, "head %x, tail %x, empty: %d\n", head, tail, (empty & 1 << ring->ring_number) >> ring->ring_number); sbuf_printf(&sb, "ring size %d, msg size %d\n", ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size), ADF_MSG_SIZE_TO_BYTES(ring->msg_size)); sbuf_cat(&sb, "----------- Ring data ------------\n"); } wp = ring->base_addr; end = (uint32_t *)((char *)ring->base_addr + ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size)); while (wp < end) { sbuf_printf(&sb, "%p:", wp); for (word = 0; word < 32 / 4; word++, wp++) sbuf_printf(&sb, " %08x", *wp); sbuf_printf(&sb, "\n"); } error = sbuf_finish(&sb); sbuf_delete(&sb); return (error); } int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name) { struct adf_etr_ring_debug_entry *ring_debug; char entry_name[8]; ring_debug = malloc(sizeof(*ring_debug), M_QAT, M_WAITOK | M_ZERO); strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name)); snprintf(entry_name, sizeof(entry_name), "ring_%02d", ring->ring_number); ring_debug->debug = SYSCTL_ADD_PROC(&ring->bank->accel_dev->sysctl_ctx, SYSCTL_CHILDREN(ring->bank->bank_debug_dir), OID_AUTO, entry_name, CTLFLAG_RD | CTLTYPE_STRING, ring, 0, adf_ring_show, "A", "Ring configuration"); if (!ring_debug->debug) { printf("QAT: Failed to create ring debug entry.\n"); free(ring_debug, M_QAT); return EFAULT; } ring->ring_debug = ring_debug; return 0; } void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring) { if (ring->ring_debug) { free(ring->ring_debug, M_QAT); ring->ring_debug = NULL; } } static int adf_bank_show(SYSCTL_HANDLER_ARGS) { struct adf_etr_bank_data *bank; struct adf_accel_dev *accel_dev = NULL; + struct adf_hw_csr_ops *csr_ops = NULL; struct adf_hw_device_data *hw_data = NULL; u8 num_rings_per_bank = 0; struct sbuf sb; int error, ring_id; sbuf_new_for_sysctl(&sb, NULL, 128, req); bank = arg1; accel_dev = bank->accel_dev; + csr_ops = GET_CSR_OPS(bank->accel_dev); hw_data = accel_dev->hw_device; num_rings_per_bank = hw_data->num_rings_per_bank; sbuf_printf(&sb, "\n------- Bank %d configuration -------\n", bank->bank_number); for (ring_id = 0; ring_id < num_rings_per_bank; ring_id++) { struct adf_etr_ring_data *ring = &bank->rings[ring_id]; struct resource *csr = bank->csr_addr; int head, tail, empty; if (!(bank->ring_mask & 1 << ring_id)) continue; - head = READ_CSR_RING_HEAD(csr, - bank->bank_number, - ring->ring_number); - tail = READ_CSR_RING_TAIL(csr, - bank->bank_number, - ring->ring_number); - empty = READ_CSR_E_STAT(csr, bank->bank_number); + head = csr_ops->read_csr_ring_head(csr, + bank->bank_number, + ring->ring_number); + tail = csr_ops->read_csr_ring_tail(csr, + bank->bank_number, + ring->ring_number); + empty = csr_ops->read_csr_e_stat(csr, bank->bank_number); sbuf_printf(&sb, "ring num %02d, head %04x, tail %04x, empty: %d\n", ring->ring_number, head, tail, (empty & 1 << ring->ring_number) >> ring->ring_number); } error = sbuf_finish(&sb); sbuf_delete(&sb); return (error); } int adf_bank_debugfs_add(struct adf_etr_bank_data *bank) { struct adf_accel_dev *accel_dev = bank->accel_dev; struct sysctl_oid *parent = accel_dev->transport->debug; char name[9]; snprintf(name, sizeof(name), "bank_%03d", bank->bank_number); bank->bank_debug_dir = SYSCTL_ADD_NODE(&accel_dev->sysctl_ctx, SYSCTL_CHILDREN(parent), OID_AUTO, name, CTLFLAG_RD | CTLFLAG_SKIP, NULL, ""); if (!bank->bank_debug_dir) { printf("QAT: Failed to create bank debug dir.\n"); return EFAULT; } bank->bank_debug_cfg = SYSCTL_ADD_PROC(&accel_dev->sysctl_ctx, SYSCTL_CHILDREN(bank->bank_debug_dir), OID_AUTO, "config", CTLFLAG_RD | CTLTYPE_STRING, bank, 0, adf_bank_show, "A", "Bank configuration"); if (!bank->bank_debug_cfg) { printf("QAT: Failed to create bank debug entry.\n"); return EFAULT; } return 0; } void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank) { } diff --git a/sys/dev/qat/qat_common/adf_gen2_hw_data.c b/sys/dev/qat/qat_common/adf_gen2_hw_data.c new file mode 100644 index 000000000000..d3babf8800ba --- /dev/null +++ b/sys/dev/qat/qat_common/adf_gen2_hw_data.c @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright(c) 2021 Intel Corporation */ +/* $FreeBSD$ */ +#include "adf_gen2_hw_data.h" +#include "icp_qat_hw.h" + +static u64 +build_csr_ring_base_addr(bus_addr_t addr, u32 size) +{ + return BUILD_RING_BASE_ADDR(addr, size); +} + +static u32 +read_csr_ring_head(struct resource *csr_base_addr, u32 bank, u32 ring) +{ + return READ_CSR_RING_HEAD(csr_base_addr, bank, ring); +} + +static void +write_csr_ring_head(struct resource *csr_base_addr, + u32 bank, + u32 ring, + u32 value) +{ + WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value); +} + +static u32 +read_csr_ring_tail(struct resource *csr_base_addr, u32 bank, u32 ring) +{ + return READ_CSR_RING_TAIL(csr_base_addr, bank, ring); +} + +static void +write_csr_ring_tail(struct resource *csr_base_addr, + u32 bank, + u32 ring, + u32 value) +{ + WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); +} + +static u32 +read_csr_e_stat(struct resource *csr_base_addr, u32 bank) +{ + return READ_CSR_E_STAT(csr_base_addr, bank); +} + +static void +write_csr_ring_config(struct resource *csr_base_addr, + u32 bank, + u32 ring, + u32 value) +{ + WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); +} + +static void +write_csr_ring_base(struct resource *csr_base_addr, + u32 bank, + u32 ring, + bus_addr_t addr) +{ + WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); +} + +static void +write_csr_int_flag(struct resource *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); +} + +static void +write_csr_int_srcsel(struct resource *csr_base_addr, u32 bank) +{ + WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); +} + +static void +write_csr_int_col_en(struct resource *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); +} + +static void +write_csr_int_col_ctl(struct resource *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); +} + +static void +write_csr_int_flag_and_col(struct resource *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); +} + +static u32 +read_csr_ring_srv_arb_en(struct resource *csr_base_addr, u32 bank) +{ + return READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank); +} + +static void +write_csr_ring_srv_arb_en(struct resource *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); +} + +void +adf_gen2_init_hw_csr_info(struct adf_hw_csr_info *csr_info) +{ + struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops; + + csr_info->arb_enable_mask = 0xFF; + + csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; + csr_ops->read_csr_ring_head = read_csr_ring_head; + csr_ops->write_csr_ring_head = write_csr_ring_head; + csr_ops->read_csr_ring_tail = read_csr_ring_tail; + csr_ops->write_csr_ring_tail = write_csr_ring_tail; + csr_ops->read_csr_e_stat = read_csr_e_stat; + csr_ops->write_csr_ring_config = write_csr_ring_config; + csr_ops->write_csr_ring_base = write_csr_ring_base; + csr_ops->write_csr_int_flag = write_csr_int_flag; + csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; + csr_ops->write_csr_int_col_en = write_csr_int_col_en; + csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; + csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; + csr_ops->read_csr_ring_srv_arb_en = read_csr_ring_srv_arb_en; + csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; +} +EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_info); diff --git a/sys/dev/qat/qat_common/adf_gen4_hw_data.c b/sys/dev/qat/qat_common/adf_gen4_hw_data.c new file mode 100644 index 000000000000..aae54898afb1 --- /dev/null +++ b/sys/dev/qat/qat_common/adf_gen4_hw_data.c @@ -0,0 +1,176 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright(c) 2021 Intel Corporation */ +/* $FreeBSD$ */ +#include "adf_accel_devices.h" +#include "adf_gen4_hw_data.h" + +static u64 +build_csr_ring_base_addr(bus_addr_t addr, u32 size) +{ + return BUILD_RING_BASE_ADDR(addr, size); +} + +static u32 +read_csr_ring_head(struct resource *csr_base_addr, u32 bank, u32 ring) +{ + return READ_CSR_RING_HEAD(csr_base_addr, bank, ring); +} + +static void +write_csr_ring_head(struct resource *csr_base_addr, + u32 bank, + u32 ring, + u32 value) +{ + WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value); +} + +static u32 +read_csr_ring_tail(struct resource *csr_base_addr, u32 bank, u32 ring) +{ + return READ_CSR_RING_TAIL(csr_base_addr, bank, ring); +} + +static void +write_csr_ring_tail(struct resource *csr_base_addr, + u32 bank, + u32 ring, + u32 value) +{ + WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); +} + +static u32 +read_csr_e_stat(struct resource *csr_base_addr, u32 bank) +{ + return READ_CSR_E_STAT(csr_base_addr, bank); +} + +static void +write_csr_ring_config(struct resource *csr_base_addr, + u32 bank, + u32 ring, + u32 value) +{ + WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); +} + +static void +write_csr_ring_base(struct resource *csr_base_addr, + u32 bank, + u32 ring, + bus_addr_t addr) +{ + WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); +} + +static void +write_csr_int_flag(struct resource *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); +} + +static void +write_csr_int_srcsel(struct resource *csr_base_addr, u32 bank) +{ + WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); +} + +static void +write_csr_int_col_en(struct resource *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); +} + +static void +write_csr_int_col_ctl(struct resource *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); +} + +static void +write_csr_int_flag_and_col(struct resource *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); +} + +static u32 +read_csr_ring_srv_arb_en(struct resource *csr_base_addr, u32 bank) +{ + return READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank); +} + +static void +write_csr_ring_srv_arb_en(struct resource *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); +} + +void +adf_gen4_init_hw_csr_info(struct adf_hw_csr_info *csr_info) +{ + struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops; + + csr_info->arb_enable_mask = 0x1; + + csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; + csr_ops->read_csr_ring_head = read_csr_ring_head; + csr_ops->write_csr_ring_head = write_csr_ring_head; + csr_ops->read_csr_ring_tail = read_csr_ring_tail; + csr_ops->write_csr_ring_tail = write_csr_ring_tail; + csr_ops->read_csr_e_stat = read_csr_e_stat; + csr_ops->write_csr_ring_config = write_csr_ring_config; + csr_ops->write_csr_ring_base = write_csr_ring_base; + csr_ops->write_csr_int_flag = write_csr_int_flag; + csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; + csr_ops->write_csr_int_col_en = write_csr_int_col_en; + csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; + csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; + csr_ops->read_csr_ring_srv_arb_en = read_csr_ring_srv_arb_en; + csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_info); + +static inline void +adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper, u32 *lower) +{ + *lower = lower_32_bits(value); + *upper = upper_32_bits(value); +} + +int +adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE; + u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE; + u32 ssm_wdt_pke_high = 0; + u32 ssm_wdt_pke_low = 0; + u32 ssm_wdt_high = 0; + u32 ssm_wdt_low = 0; + struct resource *pmisc_addr; + struct adf_bar *pmisc; + int pmisc_id; + + pmisc_id = hw_data->get_misc_bar_id(hw_data); + pmisc = &GET_BARS(accel_dev)[pmisc_id]; + pmisc_addr = pmisc->virt_addr; + + /* Convert 64bit WDT timer value into 32bit values for + * mmio write to 32bit CSRs. + */ + adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low); + adf_gen4_unpack_ssm_wdtimer(timer_val_pke, + &ssm_wdt_pke_high, + &ssm_wdt_pke_low); + + /* Enable WDT for sym and dc */ + ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low); + ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high); + /* Enable WDT for pke */ + ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low); + ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high); + + return 0; +} +EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer); diff --git a/sys/dev/qat/qat_common/adf_heartbeat.c b/sys/dev/qat/qat_common/adf_heartbeat.c index cf56ca98af33..1feaa37d48bf 100644 --- a/sys/dev/qat/qat_common/adf_heartbeat.c +++ b/sys/dev/qat/qat_common/adf_heartbeat.c @@ -1,213 +1,220 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include #include #include "qat_freebsd.h" #include "adf_heartbeat.h" #include "adf_common_drv.h" #include "adf_cfg.h" #include "adf_cfg_strings.h" #include "icp_qat_fw_init_admin.h" #include "adf_transport_internal.h" #define MAX_HB_TICKS 0xFFFFFFFF static int adf_check_hb_poll_freq(struct adf_accel_dev *accel_dev) { u64 curr_hb_check_time = 0; char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; unsigned int timer_val = ADF_CFG_HB_DEFAULT_VALUE; curr_hb_check_time = adf_clock_get_current_time(); if (!adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, ADF_HEARTBEAT_TIMER, (char *)timer_str)) { if (compat_strtouint((char *)timer_str, ADF_CFG_BASE_DEC, &timer_val)) timer_val = ADF_CFG_HB_DEFAULT_VALUE; } if ((curr_hb_check_time - accel_dev->heartbeat->last_hb_check_time) < timer_val) { return EINVAL; } accel_dev->heartbeat->last_hb_check_time = curr_hb_check_time; return 0; } int adf_heartbeat_init(struct adf_accel_dev *accel_dev) { if (accel_dev->heartbeat) adf_heartbeat_clean(accel_dev); accel_dev->heartbeat = malloc(sizeof(*accel_dev->heartbeat), M_QAT, M_WAITOK | M_ZERO); return 0; } void adf_heartbeat_clean(struct adf_accel_dev *accel_dev) { free(accel_dev->heartbeat, M_QAT); accel_dev->heartbeat = NULL; } int adf_get_hb_timer(struct adf_accel_dev *accel_dev, unsigned int *value) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; unsigned int timer_val = ADF_CFG_HB_DEFAULT_VALUE; u32 clk_per_sec = 0; - if (!hw_data->get_ae_clock) + /* HB clock may be different than AE clock */ + if (hw_data->get_hb_clock) { + clk_per_sec = (u32)hw_data->get_hb_clock(hw_data); + } else if (hw_data->get_ae_clock) { + clk_per_sec = (u32)hw_data->get_ae_clock(hw_data); + } else { return EINVAL; - - clk_per_sec = (u32)hw_data->get_ae_clock(hw_data); + } /* Get Heartbeat Timer value from the configuration */ if (!adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, ADF_HEARTBEAT_TIMER, (char *)timer_str)) { if (compat_strtouint((char *)timer_str, ADF_CFG_BASE_DEC, &timer_val)) timer_val = ADF_CFG_HB_DEFAULT_VALUE; } if (timer_val < ADF_MIN_HB_TIMER_MS) { device_printf(GET_DEV(accel_dev), "%s value cannot be lesser than %u\n", ADF_HEARTBEAT_TIMER, ADF_MIN_HB_TIMER_MS); return EINVAL; } /* Convert msec to clocks */ clk_per_sec = clk_per_sec / 1000; *value = timer_val * clk_per_sec; return 0; } -struct adf_hb_count { - u16 ae_thread[ADF_NUM_HB_CNT_PER_AE]; -}; - int adf_get_heartbeat_status(struct adf_accel_dev *accel_dev) { + struct icp_qat_fw_init_admin_hb_cnt *live_s, *last_s, *curr_s; struct adf_hw_device_data *hw_device = accel_dev->hw_device; - struct icp_qat_fw_init_admin_hb_stats *live_s = - (struct icp_qat_fw_init_admin_hb_stats *) - accel_dev->admin->virt_hb_addr; const size_t max_aes = hw_device->get_num_aes(hw_device); + const size_t hb_ctrs = hw_device->heartbeat_ctr_num; const size_t stats_size = - max_aes * sizeof(struct icp_qat_fw_init_admin_hb_stats); + max_aes * hb_ctrs * sizeof(struct icp_qat_fw_init_admin_hb_cnt); int ret = 0; size_t ae, thr; + u16 *count_s; unsigned long ae_mask = 0; - int num_threads_per_ae = ADF_NUM_HB_CNT_PER_AE; /* * Memory layout of Heartbeat * * +----------------+----------------+---------+ * | Live value | Last value | Count | * +----------------+----------------+---------+ * \_______________/\_______________/\________/ * ^ ^ ^ * | | | - * | | max_aes * sizeof(adf_hb_count) - * | max_aes * sizeof(icp_qat_fw_init_admin_hb_stats) - * max_aes * sizeof(icp_qat_fw_init_admin_hb_stats) + * | | max_aes * hb_ctrs * + * | | sizeof(u16) + * | | + * | max_aes * hb_ctrs * + * | sizeof(icp_qat_fw_init_admin_hb_cnt) + * | + * max_aes * hb_ctrs * + * sizeof(icp_qat_fw_init_admin_hb_cnt) */ - struct icp_qat_fw_init_admin_hb_stats *curr_s; - struct icp_qat_fw_init_admin_hb_stats *last_s = live_s + max_aes; - struct adf_hb_count *count = (struct adf_hb_count *)(last_s + max_aes); + live_s = (struct icp_qat_fw_init_admin_hb_cnt *) + accel_dev->admin->virt_hb_addr; + last_s = live_s + (max_aes * hb_ctrs); + count_s = (u16 *)(last_s + (max_aes * hb_ctrs)); curr_s = malloc(stats_size, M_QAT, M_WAITOK | M_ZERO); memcpy(curr_s, live_s, stats_size); ae_mask = hw_device->ae_mask; for_each_set_bit(ae, &ae_mask, max_aes) { - for (thr = 0; thr < num_threads_per_ae; ++thr) { - struct icp_qat_fw_init_admin_hb_cnt *curr = - &curr_s[ae].stats[thr]; - struct icp_qat_fw_init_admin_hb_cnt *prev = - &last_s[ae].stats[thr]; - u16 req = curr->req_heartbeat_cnt; - u16 resp = curr->resp_heartbeat_cnt; - u16 last = prev->resp_heartbeat_cnt; + struct icp_qat_fw_init_admin_hb_cnt *curr = + curr_s + ae * hb_ctrs; + struct icp_qat_fw_init_admin_hb_cnt *prev = + last_s + ae * hb_ctrs; + u16 *count = count_s + ae * hb_ctrs; + + for (thr = 0; thr < hb_ctrs; ++thr) { + u16 req = curr[thr].req_heartbeat_cnt; + u16 resp = curr[thr].resp_heartbeat_cnt; + u16 last = prev[thr].resp_heartbeat_cnt; if ((thr == ADF_AE_ADMIN_THREAD || req != resp) && resp == last) { - u16 retry = ++count[ae].ae_thread[thr]; + u16 retry = ++count[thr]; if (retry >= ADF_CFG_HB_COUNT_THRESHOLD) ret = EIO; } else { - count[ae].ae_thread[thr] = 0; + count[thr] = 0; } } } /* Copy current stats for the next iteration */ memcpy(last_s, curr_s, stats_size); free(curr_s, M_QAT); return ret; } int adf_heartbeat_status(struct adf_accel_dev *accel_dev, enum adf_device_heartbeat_status *hb_status) { /* Heartbeat is not implemented in VFs at the moment so they do not * set get_heartbeat_status. Also, in case the device is not up, * unsupported should be returned */ if (!accel_dev || !accel_dev->hw_device || !accel_dev->hw_device->get_heartbeat_status || !accel_dev->heartbeat) { *hb_status = DEV_HB_UNSUPPORTED; return 0; } if (!adf_dev_started(accel_dev) || test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) { *hb_status = DEV_HB_UNRESPONSIVE; accel_dev->heartbeat->last_hb_status = DEV_HB_UNRESPONSIVE; return 0; } if (adf_check_hb_poll_freq(accel_dev) == EINVAL) { *hb_status = accel_dev->heartbeat->last_hb_status; return 0; } accel_dev->heartbeat->hb_sent_counter++; if (unlikely(accel_dev->hw_device->get_heartbeat_status(accel_dev))) { device_printf(GET_DEV(accel_dev), "ERROR: QAT is not responding.\n"); *hb_status = DEV_HB_UNRESPONSIVE; accel_dev->heartbeat->last_hb_status = DEV_HB_UNRESPONSIVE; accel_dev->heartbeat->hb_failed_counter++; return adf_notify_fatal_error(accel_dev); } *hb_status = DEV_HB_ALIVE; accel_dev->heartbeat->last_hb_status = DEV_HB_ALIVE; return 0; } diff --git a/sys/dev/qat/qat_common/adf_hw_arbiter.c b/sys/dev/qat/qat_common/adf_hw_arbiter.c index 586b871f2dfd..e89a5f6ce68c 100644 --- a/sys/dev/qat/qat_common/adf_hw_arbiter.c +++ b/sys/dev/qat/qat_common/adf_hw_arbiter.c @@ -1,186 +1,208 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_transport_internal.h" #define ADF_ARB_NUM 4 #define ADF_ARB_REG_SIZE 0x4 #define ADF_ARB_WTR_SIZE 0x20 #define ADF_ARB_OFFSET 0x30000 #define ADF_ARB_REG_SLOT 0x1000 #define ADF_ARB_WTR_OFFSET 0x010 #define ADF_ARB_RO_EN_OFFSET 0x090 #define ADF_ARB_WQCFG_OFFSET 0x100 #define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \ ADF_CSR_WR(csr_addr, \ ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * (index)), \ value) #define WRITE_CSR_ARB_SARCONFIG(csr_addr, csr_offset, index, value) \ ADF_CSR_WR(csr_addr, (csr_offset) + (ADF_ARB_REG_SIZE * (index)), value) #define READ_CSR_ARB_RINGSRVARBEN(csr_addr, index) \ ADF_CSR_RD(csr_addr, \ ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * (index))) static DEFINE_MUTEX(csr_arb_lock); #define WRITE_CSR_ARB_WRK_2_SER_MAP( \ csr_addr, csr_offset, wrk_to_ser_map_offset, index, value) \ ADF_CSR_WR(csr_addr, \ ((csr_offset) + (wrk_to_ser_map_offset)) + \ (ADF_ARB_REG_SIZE * (index)), \ value) int adf_init_arb(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct arb_info info; struct resource *csr = accel_dev->transport->banks[0].csr_addr; u32 arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1; u32 arb; hw_data->get_arb_info(&info); /* Service arb configured for 32 bytes responses and * ring flow control check enabled. */ for (arb = 0; arb < ADF_ARB_NUM; arb++) WRITE_CSR_ARB_SARCONFIG(csr, info.arbiter_offset, arb, arb_cfg); return 0; } int adf_init_gen2_arb(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct arb_info info; struct resource *csr = accel_dev->transport->banks[0].csr_addr; u32 i; const u32 *thd_2_arb_cfg; /* invoke common adf_init_arb */ adf_init_arb(accel_dev); hw_data->get_arb_info(&info); /* Map worker threads to service arbiters */ hw_data->get_arb_mapping(accel_dev, &thd_2_arb_cfg); if (!thd_2_arb_cfg) return EFAULT; for (i = 0; i < hw_data->num_engines; i++) WRITE_CSR_ARB_WRK_2_SER_MAP(csr, info.arbiter_offset, info.wrk_thd_2_srv_arb_map, i, *(thd_2_arb_cfg + i)); return 0; } void adf_update_ring_arb(struct adf_etr_ring_data *ring) { - WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr, - ring->bank->bank_number, - ring->bank->ring_mask & 0xFF); + int shift; + u32 arben, arben_tx, arben_rx, arb_mask; + struct adf_accel_dev *accel_dev = ring->bank->accel_dev; + struct adf_hw_csr_info *csr_info = &accel_dev->hw_device->csr_info; + struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops; + + arb_mask = csr_info->arb_enable_mask; + shift = hweight32(arb_mask); + + arben_tx = ring->bank->ring_mask & arb_mask; + arben_rx = (ring->bank->ring_mask >> shift) & arb_mask; + arben = arben_tx & arben_rx; + csr_ops->write_csr_ring_srv_arb_en(ring->bank->csr_addr, + ring->bank->bank_number, + arben); } void -adf_enable_ring_arb(void *csr_addr, unsigned int bank_nr, unsigned int mask) +adf_enable_ring_arb(struct adf_accel_dev *accel_dev, + void *csr_addr, + unsigned int bank_nr, + unsigned int mask) { + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); struct resource *csr = csr_addr; u32 arbenable; if (!csr) return; mutex_lock(&csr_arb_lock); - arbenable = READ_CSR_ARB_RINGSRVARBEN(csr, bank_nr); + arbenable = csr_ops->read_csr_ring_srv_arb_en(csr, bank_nr); arbenable |= mask & 0xFF; - WRITE_CSR_ARB_RINGSRVARBEN(csr, bank_nr, arbenable); + csr_ops->write_csr_ring_srv_arb_en(csr, bank_nr, arbenable); mutex_unlock(&csr_arb_lock); } void -adf_disable_ring_arb(void *csr_addr, unsigned int bank_nr, unsigned int mask) +adf_disable_ring_arb(struct adf_accel_dev *accel_dev, + void *csr_addr, + unsigned int bank_nr, + unsigned int mask) { + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); struct resource *csr = csr_addr; u32 arbenable; if (!csr_addr) return; mutex_lock(&csr_arb_lock); - arbenable = READ_CSR_ARB_RINGSRVARBEN(csr, bank_nr); + arbenable = csr_ops->read_csr_ring_srv_arb_en(csr, bank_nr); arbenable &= ~mask & 0xFF; - WRITE_CSR_ARB_RINGSRVARBEN(csr, bank_nr, arbenable); + csr_ops->write_csr_ring_srv_arb_en(csr, bank_nr, arbenable); mutex_unlock(&csr_arb_lock); } void adf_exit_arb(struct adf_accel_dev *accel_dev) { + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct arb_info info; struct resource *csr; unsigned int i; if (!accel_dev->transport) return; csr = accel_dev->transport->banks[0].csr_addr; hw_data->get_arb_info(&info); /* Reset arbiter configuration */ for (i = 0; i < ADF_ARB_NUM; i++) WRITE_CSR_ARB_SARCONFIG(csr, info.arbiter_offset, i, 0); /* Unmap worker threads to service arbiters */ if (hw_data->get_arb_mapping) { for (i = 0; i < hw_data->num_engines; i++) WRITE_CSR_ARB_WRK_2_SER_MAP(csr, info.arbiter_offset, info.wrk_thd_2_srv_arb_map, i, 0); } /* Disable arbitration on all rings */ for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) - WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0); + csr_ops->write_csr_ring_srv_arb_en(csr, i, 0); } void adf_disable_arb(struct adf_accel_dev *accel_dev) { + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); struct resource *csr; unsigned int i; if (!accel_dev || !accel_dev->transport) return; csr = accel_dev->transport->banks[0].csr_addr; /* Disable arbitration on all rings */ for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) - WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0); + csr_ops->write_csr_ring_srv_arb_en(csr, i, 0); } diff --git a/sys/dev/qat/qat_common/adf_init.c b/sys/dev/qat/qat_common/adf_init.c index 2e5f77d22ea6..0fb8618b1f32 100644 --- a/sys/dev/qat/qat_common/adf_init.c +++ b/sys/dev/qat/qat_common/adf_init.c @@ -1,730 +1,737 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_dev_err.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include #include #include "adf_accel_devices.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "icp_qat_fw.h" /* Mask used to check the CompressAndVerify capability bit */ #define DC_CNV_EXTENDED_CAPABILITY (0x01) /* Mask used to check the CompressAndVerifyAndRecover capability bit */ #define DC_CNVNR_EXTENDED_CAPABILITY (0x100) static LIST_HEAD(service_table); static DEFINE_MUTEX(service_lock); static void adf_service_add(struct service_hndl *service) { mutex_lock(&service_lock); list_add(&service->list, &service_table); mutex_unlock(&service_lock); } int adf_service_register(struct service_hndl *service) { memset(service->init_status, 0, sizeof(service->init_status)); memset(service->start_status, 0, sizeof(service->start_status)); adf_service_add(service); return 0; } static void adf_service_remove(struct service_hndl *service) { mutex_lock(&service_lock); list_del(&service->list); mutex_unlock(&service_lock); } int adf_service_unregister(struct service_hndl *service) { int i; for (i = 0; i < ARRAY_SIZE(service->init_status); i++) { if (service->init_status[i] || service->start_status[i]) { pr_err("QAT: Could not remove active service [%d]\n", i); return EFAULT; } } adf_service_remove(service); return 0; } static int adf_cfg_add_device_params(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char hw_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; char mmp_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; struct adf_hw_device_data *hw_data = NULL; unsigned long val; if (!accel_dev) return -EINVAL; hw_data = accel_dev->hw_device; if (adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC)) goto err; snprintf(key, sizeof(key), ADF_DEV_MAX_BANKS); val = GET_MAX_BANKS(accel_dev); if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) goto err; snprintf(key, sizeof(key), ADF_DEV_CAPABILITIES_MASK); val = hw_data->accel_capabilities_mask; if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)val, ADF_HEX)) goto err; snprintf(key, sizeof(key), ADF_DEV_PKG_ID); val = accel_dev->accel_id; if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) goto err; snprintf(key, sizeof(key), ADF_DEV_NODE_ID); val = dev_to_node(GET_DEV(accel_dev)); if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) goto err; snprintf(key, sizeof(key), ADF_DEV_MAX_RINGS_PER_BANK); val = hw_data->num_rings_per_bank; if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) goto err; snprintf(key, sizeof(key), ADF_HW_REV_ID_KEY); snprintf(hw_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%d", accel_dev->accel_pci_dev.revid); if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)hw_version, ADF_STR)) goto err; snprintf(key, sizeof(key), ADF_MMP_VER_KEY); snprintf(mmp_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%d.%d.%d", accel_dev->fw_versions.mmp_version_major, accel_dev->fw_versions.mmp_version_minor, accel_dev->fw_versions.mmp_version_patch); if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)mmp_version, ADF_STR)) goto err; return 0; err: device_printf(GET_DEV(accel_dev), "Failed to add internal values to accel_dev cfg\n"); return -EINVAL; } static int adf_cfg_add_fw_version(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char fw_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; snprintf(key, sizeof(key), ADF_UOF_VER_KEY); snprintf(fw_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%d.%d.%d", accel_dev->fw_versions.fw_version_major, accel_dev->fw_versions.fw_version_minor, accel_dev->fw_versions.fw_version_patch); if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)fw_version, ADF_STR)) return EFAULT; return 0; } static int adf_cfg_add_ext_params(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; struct adf_hw_device_data *hw_data = accel_dev->hw_device; unsigned long val; snprintf(key, sizeof(key), ADF_DC_EXTENDED_FEATURES); val = hw_data->extended_dc_capabilities; if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)val, ADF_HEX)) return -EINVAL; return 0; } void adf_error_notifier(uintptr_t arg) { struct adf_accel_dev *accel_dev = (struct adf_accel_dev *)arg; struct service_hndl *service; struct list_head *list_itr; list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_ERROR)) device_printf(GET_DEV(accel_dev), "Failed to send error event to %s.\n", service->name); } } /** * adf_set_ssm_wdtimer() - Initialize the slice hang watchdog timer. * * Return: 0 on success, error code otherwise. */ int adf_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_bar *misc_bar = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; struct resource *csr = misc_bar->virt_addr; u32 i; unsigned int mask; u32 clk_per_sec = hw_data->get_clock_speed(hw_data); u32 timer_val = ADF_WDT_TIMER_SYM_COMP_MS * (clk_per_sec / 1000); - u32 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE; + u32 timer_val_pke = ADF_GEN2_SSM_WDT_PKE_DEFAULT_VALUE; char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; /* Get Watch Dog Timer for CySym+Comp from the configuration */ if (!adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, ADF_DEV_SSM_WDT_BULK, (char *)timer_str)) { if (!compat_strtouint((char *)timer_str, ADF_CFG_BASE_DEC, &timer_val)) /* Convert msec to CPP clocks */ timer_val = timer_val * (clk_per_sec / 1000); } /* Get Watch Dog Timer for CyAsym from the configuration */ if (!adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, ADF_DEV_SSM_WDT_PKE, (char *)timer_str)) { if (!compat_strtouint((char *)timer_str, ADF_CFG_BASE_DEC, &timer_val_pke)) /* Convert msec to CPP clocks */ timer_val_pke = timer_val_pke * (clk_per_sec / 1000); } for (i = 0, mask = hw_data->accel_mask; mask; i++, mask >>= 1) { if (!(mask & 1)) continue; /* Enable Watch Dog Timer for CySym + Comp */ ADF_CSR_WR(csr, ADF_SSMWDT(i), timer_val); /* Enable Watch Dog Timer for CyAsym */ ADF_CSR_WR(csr, ADF_SSMWDTPKE(i), timer_val_pke); } return 0; } /** * adf_dev_init() - Init data structures and services for the given accel device * @accel_dev: Pointer to acceleration device. * * Initialize the ring data structures and the admin comms and arbitration * services. * * Return: 0 on success, error code otherwise. */ int adf_dev_init(struct adf_accel_dev *accel_dev) { struct service_hndl *service; struct list_head *list_itr; struct adf_hw_device_data *hw_data = accel_dev->hw_device; char value[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; int ret = 0; sysctl_ctx_init(&accel_dev->sysctl_ctx); set_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED, &accel_dev->status); if (!hw_data) { device_printf(GET_DEV(accel_dev), "Failed to init device - hw_data not set\n"); return EFAULT; } if (hw_data->reset_hw_units) hw_data->reset_hw_units(accel_dev); if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) && !accel_dev->is_vf) { device_printf(GET_DEV(accel_dev), "Device not configured\n"); return EFAULT; } if (adf_init_etr_data(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed initialize etr\n"); return EFAULT; } + if (hw_data->init_device && hw_data->init_device(accel_dev)) { + device_printf(GET_DEV(accel_dev), + "Failed to initialize device\n"); + return EFAULT; + } + if (hw_data->init_accel_units && hw_data->init_accel_units(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed initialize accel_units\n"); return EFAULT; } if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed initialize admin comms\n"); return EFAULT; } if (hw_data->init_arb && hw_data->init_arb(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed initialize hw arbiter\n"); return EFAULT; } if (hw_data->set_asym_rings_mask) hw_data->set_asym_rings_mask(accel_dev); hw_data->enable_ints(accel_dev); if (adf_ae_init(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to initialise Acceleration Engine\n"); return EFAULT; } set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status); if (adf_ae_fw_load(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to load acceleration FW\n"); return EFAULT; } set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status); if (hw_data->alloc_irq(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to allocate interrupts\n"); return EFAULT; } set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); if (hw_data->init_ras && hw_data->init_ras(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to init RAS\n"); return EFAULT; } hw_data->enable_ints(accel_dev); hw_data->enable_error_correction(accel_dev); - if (hw_data->enable_vf2pf_comms(accel_dev)) { + if (hw_data->enable_vf2pf_comms && + hw_data->enable_vf2pf_comms(accel_dev)) { device_printf(GET_DEV(accel_dev), "QAT: Failed to enable vf2pf comms\n"); return EFAULT; } if (adf_pf_vf_capabilities_init(accel_dev)) return EFAULT; if (adf_pf_vf_ring_to_svc_init(accel_dev)) return EFAULT; if (adf_cfg_add_device_params(accel_dev)) return EFAULT; if (hw_data->add_pke_stats && hw_data->add_pke_stats(accel_dev)) return EFAULT; if (hw_data->add_misc_error && hw_data->add_misc_error(accel_dev)) return EFAULT; /* * Subservice initialisation is divided into two stages: init and start. * This is to facilitate any ordering dependencies between services * prior to starting any of the accelerators. */ list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { device_printf(GET_DEV(accel_dev), "Failed to initialise service %s\n", service->name); return EFAULT; } set_bit(accel_dev->accel_id, service->init_status); } /* Read autoreset on error parameter */ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, ADF_AUTO_RESET_ON_ERROR, value); if (!ret) { if (compat_strtouint(value, 10, &accel_dev->autoreset_on_error)) { device_printf( GET_DEV(accel_dev), "Failed converting %s to a decimal value\n", ADF_AUTO_RESET_ON_ERROR); return EFAULT; } } return 0; } /** * adf_dev_start() - Start acceleration service for the given accel device * @accel_dev: Pointer to acceleration device. * * Function notifies all the registered services that the acceleration device * is ready to be used. * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. */ int adf_dev_start(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; struct list_head *list_itr; set_bit(ADF_STATUS_STARTING, &accel_dev->status); if (adf_devmgr_verify_id(&accel_dev->accel_id)) { device_printf(GET_DEV(accel_dev), "QAT: Device %d not found\n", accel_dev->accel_id); return ENODEV; } if (adf_ae_start(accel_dev)) { device_printf(GET_DEV(accel_dev), "AE Start Failed\n"); return EFAULT; } set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); if (hw_data->send_admin_init(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to send init message\n"); return EFAULT; } if (adf_cfg_add_fw_version(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to update configuration FW version\n"); return EFAULT; } if (hw_data->measure_clock) hw_data->measure_clock(accel_dev); /* * Set ssm watch dog timer for slice hang detection * Note! Not supported on devices older than C62x */ if (hw_data->set_ssm_wdtimer && hw_data->set_ssm_wdtimer(accel_dev)) { device_printf(GET_DEV(accel_dev), "QAT: Failed to set ssm watch dog timer\n"); return EFAULT; } list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_START)) { device_printf(GET_DEV(accel_dev), "Failed to start service %s\n", service->name); return EFAULT; } set_bit(accel_dev->accel_id, service->start_status); } if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status) && adf_cfg_add_ext_params(accel_dev)) return EFAULT; clear_bit(ADF_STATUS_STARTING, &accel_dev->status); set_bit(ADF_STATUS_STARTED, &accel_dev->status); return 0; } /** * adf_dev_stop() - Stop acceleration service for the given accel device * @accel_dev: Pointer to acceleration device. * * Function notifies all the registered services that the acceleration device * is shuting down. * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. */ int adf_dev_stop(struct adf_accel_dev *accel_dev) { struct service_hndl *service; struct list_head *list_itr; if (adf_devmgr_verify_id(&accel_dev->accel_id)) { device_printf(GET_DEV(accel_dev), "QAT: Device %d not found\n", accel_dev->accel_id); return ENODEV; } if (!adf_dev_started(accel_dev) && !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) { return 0; } if (adf_dev_stop_notify_sync(accel_dev)) { device_printf( GET_DEV(accel_dev), "Waiting for device un-busy failed. Retries limit reached\n"); return EBUSY; } clear_bit(ADF_STATUS_STARTING, &accel_dev->status); clear_bit(ADF_STATUS_STARTED, &accel_dev->status); list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (!test_bit(accel_dev->accel_id, service->start_status)) continue; clear_bit(accel_dev->accel_id, service->start_status); } if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) { if (adf_ae_stop(accel_dev)) device_printf(GET_DEV(accel_dev), "failed to stop AE\n"); else clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); } return 0; } /** * adf_dev_shutdown() - shutdown acceleration services and data strucutures * @accel_dev: Pointer to acceleration device * * Cleanup the ring data structures and the admin comms and arbitration * services. */ void adf_dev_shutdown(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; struct list_head *list_itr; if (test_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED, &accel_dev->status)) { sysctl_ctx_free(&accel_dev->sysctl_ctx); clear_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED, &accel_dev->status); } if (!hw_data) { device_printf( GET_DEV(accel_dev), "QAT: Failed to shutdown device - hw_data not set\n"); return; } if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) { adf_ae_fw_release(accel_dev); clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status); } if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) { if (adf_ae_shutdown(accel_dev)) device_printf(GET_DEV(accel_dev), "Failed to shutdown Accel Engine\n"); else clear_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status); } list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (!test_bit(accel_dev->accel_id, service->init_status)) continue; if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) device_printf(GET_DEV(accel_dev), "Failed to shutdown service %s\n", service->name); else clear_bit(accel_dev->accel_id, service->init_status); } hw_data->disable_iov(accel_dev); if (hw_data->disable_vf2pf_comms) hw_data->disable_vf2pf_comms(accel_dev); if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) { hw_data->free_irq(accel_dev); clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); } /* Delete configuration only if not restarting */ if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) adf_cfg_del_all(accel_dev); if (hw_data->remove_pke_stats) hw_data->remove_pke_stats(accel_dev); if (hw_data->remove_misc_error) hw_data->remove_misc_error(accel_dev); if (hw_data->exit_ras) hw_data->exit_ras(accel_dev); if (hw_data->exit_arb) hw_data->exit_arb(accel_dev); if (hw_data->exit_admin_comms) hw_data->exit_admin_comms(accel_dev); if (hw_data->exit_accel_units) hw_data->exit_accel_units(accel_dev); adf_cleanup_etr_data(accel_dev); if (hw_data->restore_device) hw_data->restore_device(accel_dev); } /** * adf_dev_reset() - Reset acceleration service for the given accel device * @accel_dev: Pointer to acceleration device. * @mode: Specifies reset mode - synchronous or asynchronous. * Function notifies all the registered services that the acceleration device * is resetting. * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. */ int adf_dev_reset(struct adf_accel_dev *accel_dev, enum adf_dev_reset_mode mode) { return adf_dev_aer_schedule_reset(accel_dev, mode); } int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) { struct service_hndl *service; struct list_head *list_itr; list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) device_printf(GET_DEV(accel_dev), "Failed to restart service %s.\n", service->name); } return 0; } int adf_dev_restarting_notify_sync(struct adf_accel_dev *accel_dev) { int times; adf_dev_restarting_notify(accel_dev); for (times = 0; times < ADF_STOP_RETRY; times++) { if (!adf_dev_in_use(accel_dev)) break; dev_dbg(GET_DEV(accel_dev), "retry times=%d\n", times); pause_ms("adfstop", 100); } if (adf_dev_in_use(accel_dev)) { clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); device_printf(GET_DEV(accel_dev), "Device still in use during reset sequence.\n"); return EBUSY; } return 0; } int adf_dev_stop_notify_sync(struct adf_accel_dev *accel_dev) { int times; struct service_hndl *service; struct list_head *list_itr; list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_STOP)) device_printf(GET_DEV(accel_dev), "Failed to restart service %s.\n", service->name); } for (times = 0; times < ADF_STOP_RETRY; times++) { if (!adf_dev_in_use(accel_dev)) break; dev_dbg(GET_DEV(accel_dev), "retry times=%d\n", times); pause_ms("adfstop", 100); } if (adf_dev_in_use(accel_dev)) { clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); device_printf(GET_DEV(accel_dev), "Device still in use during stop sequence.\n"); return EBUSY; } return 0; } int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) { struct service_hndl *service; struct list_head *list_itr; list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) device_printf(GET_DEV(accel_dev), "Failed to restart service %s.\n", service->name); } return 0; } diff --git a/sys/dev/qat/qat_common/adf_isr.c b/sys/dev/qat/qat_common/adf_isr.c index 31e439ee60e6..1b6f232f2c4d 100644 --- a/sys/dev/qat/qat_common/adf_isr.c +++ b/sys/dev/qat/qat_common/adf_isr.c @@ -1,345 +1,351 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include #include #include #include #include #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_cfg.h" #include "adf_cfg_strings.h" #include "adf_cfg_common.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include "adf_dev_err.h" TASKQUEUE_DEFINE_THREAD(qat_pf); static int adf_enable_msix(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *info_pci_dev = &accel_dev->accel_pci_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; int msix_num_entries = 1; int count = 0; int error = 0; int num_vectors = 0; u_int *vectors; + if (hw_data->set_msix_rttable) + hw_data->set_msix_rttable(accel_dev); + /* If SR-IOV is disabled, add entries for each bank */ if (!accel_dev->u1.pf.vf_info) { msix_num_entries += hw_data->num_banks; num_vectors = 0; vectors = NULL; } else { num_vectors = hw_data->num_banks + 1; vectors = malloc(num_vectors * sizeof(u_int), M_QAT, M_WAITOK | M_ZERO); vectors[hw_data->num_banks] = 1; } count = msix_num_entries; error = pci_alloc_msix(info_pci_dev->pci_dev, &count); if (error == 0 && count != msix_num_entries) { pci_release_msi(info_pci_dev->pci_dev); error = EFBIG; } if (error) { device_printf(GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n"); free(vectors, M_QAT); return error; } if (vectors != NULL) { error = pci_remap_msix(info_pci_dev->pci_dev, num_vectors, vectors); free(vectors, M_QAT); if (error) { device_printf(GET_DEV(accel_dev), "Failed to remap MSI-X IRQ(s)\n"); pci_release_msi(info_pci_dev->pci_dev); return error; } } return 0; } static void adf_disable_msix(struct adf_accel_pci *info_pci_dev) { pci_release_msi(info_pci_dev->pci_dev); } static void adf_msix_isr_bundle(void *bank_ptr) { struct adf_etr_bank_data *bank = bank_ptr; struct adf_etr_data *priv_data = bank->accel_dev->transport; + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev); - WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0); + csr_ops->write_csr_int_flag_and_col(bank->csr_addr, + bank->bank_number, + 0); adf_response_handler((uintptr_t)&priv_data->banks[bank->bank_number]); return; } static void adf_msix_isr_ae(void *dev_ptr) { struct adf_accel_dev *accel_dev = dev_ptr; struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_bar *pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; struct resource *pmisc_bar_addr = pmisc->virt_addr; u32 errsou3; u32 errsou5; bool reset_required = false; if (hw_data->ras_interrupts && hw_data->ras_interrupts(accel_dev, &reset_required)) if (reset_required) { adf_notify_fatal_error(accel_dev); goto exit; } if (hw_data->check_slice_hang && hw_data->check_slice_hang(accel_dev)) { } exit: errsou3 = ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU3); errsou5 = ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5); if (errsou3 | errsou5) adf_print_err_registers(accel_dev); else device_printf(GET_DEV(accel_dev), "spurious AE interrupt\n"); return; } static int adf_get_irq_affinity(struct adf_accel_dev *accel_dev, int bank) { int core = CPU_FIRST(); char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; char bankName[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; snprintf(bankName, ADF_CFG_MAX_KEY_LEN_IN_BYTES - 1, ADF_ETRMGR_CORE_AFFINITY_FORMAT, bank); bankName[ADF_CFG_MAX_KEY_LEN_IN_BYTES - 1] = '\0'; if (adf_cfg_get_param_value(accel_dev, "Accelerator0", bankName, val)) { device_printf(GET_DEV(accel_dev), "No CoreAffinity Set - using default core: %d\n", core); } else { if (compat_strtouint(val, 10, &core)) { device_printf(GET_DEV(accel_dev), "Can't get cpu core ID\n"); } } return (core); } static int adf_request_irqs(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *info_pci_dev = &accel_dev->accel_pci_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct msix_entry *msixe = info_pci_dev->msix_entries.entries; int ret = 0, rid = 0, i = 0; struct adf_etr_data *etr_data = accel_dev->transport; int computed_core = 0; /* Request msix irq for all banks unless SR-IOV enabled */ if (!accel_dev->u1.pf.vf_info) { for (i = 0; i < hw_data->num_banks; i++) { struct adf_etr_bank_data *bank = &etr_data->banks[i]; rid = i + 1; msixe[i].irq = bus_alloc_resource_any(info_pci_dev->pci_dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (msixe[i].irq == NULL) { device_printf( GET_DEV(accel_dev), "failed to allocate IRQ for bundle %d\n", i); return ENXIO; } ret = bus_setup_intr(info_pci_dev->pci_dev, msixe[i].irq, INTR_TYPE_MISC | INTR_MPSAFE, NULL, adf_msix_isr_bundle, bank, &msixe[i].cookie); if (ret) { device_printf( GET_DEV(accel_dev), "failed to enable IRQ for bundle %d\n", i); bus_release_resource(info_pci_dev->pci_dev, SYS_RES_IRQ, rid, msixe[i].irq); msixe[i].irq = NULL; return ret; } computed_core = adf_get_irq_affinity(accel_dev, i); bus_describe_intr(info_pci_dev->pci_dev, msixe[i].irq, msixe[i].cookie, "b%d", i); bus_bind_intr(info_pci_dev->pci_dev, msixe[i].irq, computed_core); } } /* Request msix irq for AE */ rid = hw_data->num_banks + 1; msixe[i].irq = bus_alloc_resource_any(info_pci_dev->pci_dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (msixe[i].irq == NULL) { device_printf(GET_DEV(accel_dev), "failed to allocate IRQ for ae-cluster\n"); return ENXIO; } ret = bus_setup_intr(info_pci_dev->pci_dev, msixe[i].irq, INTR_TYPE_MISC | INTR_MPSAFE, NULL, adf_msix_isr_ae, accel_dev, &msixe[i].cookie); if (ret) { device_printf(GET_DEV(accel_dev), "failed to enable IRQ for ae-cluster\n"); bus_release_resource(info_pci_dev->pci_dev, SYS_RES_IRQ, rid, msixe[i].irq); msixe[i].irq = NULL; return ret; } bus_describe_intr(info_pci_dev->pci_dev, msixe[i].irq, msixe[i].cookie, "ae"); return ret; } static void adf_free_irqs(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *info_pci_dev = &accel_dev->accel_pci_dev; struct msix_entry *msixe = info_pci_dev->msix_entries.entries; int i = 0; if (info_pci_dev->msix_entries.num_entries > 0) { for (i = 0; i < info_pci_dev->msix_entries.num_entries; i++) { if (msixe[i].irq != NULL && msixe[i].cookie != NULL) { bus_teardown_intr(info_pci_dev->pci_dev, msixe[i].irq, msixe[i].cookie); bus_free_resource(info_pci_dev->pci_dev, SYS_RES_IRQ, msixe[i].irq); } } } } static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) { struct msix_entry *entries; u32 msix_num_entries = 1; struct adf_hw_device_data *hw_data = accel_dev->hw_device; /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */ if (!accel_dev->u1.pf.vf_info) msix_num_entries += hw_data->num_banks; entries = malloc(msix_num_entries * sizeof(struct msix_entry), M_QAT, M_WAITOK | M_ZERO); accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries; accel_dev->accel_pci_dev.msix_entries.entries = entries; return 0; } static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev) { free(accel_dev->accel_pci_dev.msix_entries.entries, M_QAT); accel_dev->accel_pci_dev.msix_entries.entries = NULL; } /** * adf_vf_isr_resource_free() - Free IRQ for acceleration device * @accel_dev: Pointer to acceleration device. * * Function frees interrupts for acceleration device. */ void adf_isr_resource_free(struct adf_accel_dev *accel_dev) { adf_free_irqs(accel_dev); adf_disable_msix(&accel_dev->accel_pci_dev); adf_isr_free_msix_entry_table(accel_dev); } /** * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device * @accel_dev: Pointer to acceleration device. * * Function allocates interrupts for acceleration device. * * Return: 0 on success, error code otherwise. */ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev) { int ret; ret = adf_isr_alloc_msix_entry_table(accel_dev); if (ret) return ret; if (adf_enable_msix(accel_dev)) goto err_out; if (adf_request_irqs(accel_dev)) goto err_out; return 0; err_out: adf_isr_resource_free(accel_dev); return EFAULT; } diff --git a/sys/dev/qat/qat_common/adf_transport.c b/sys/dev/qat/qat_common/adf_transport.c index e896f1d70e08..a608cb1c217c 100644 --- a/sys/dev/qat/qat_common/adf_transport.c +++ b/sys/dev/qat/qat_common/adf_transport.c @@ -1,747 +1,770 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include #include "adf_accel_devices.h" #include "adf_transport_internal.h" #include "adf_transport_access_macros.h" #include "adf_cfg.h" #include "adf_common_drv.h" #define QAT_RING_ALIGNMENT 64 static inline u32 adf_modulo(u32 data, u32 shift) { u32 div = data >> shift; u32 mult = div << shift; return data - mult; } static inline int adf_check_ring_alignment(u64 addr, u64 size) { if (((size - 1) & addr) != 0) return EFAULT; return 0; } static int adf_verify_ring_size(u32 msg_size, u32 msg_num) { int i = ADF_MIN_RING_SIZE; for (; i <= ADF_MAX_RING_SIZE; i++) if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) return i; return ADF_DEFAULT_RING_SIZE; } static int adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring) { mtx_lock(&bank->lock); if (bank->ring_mask & (1 << ring)) { mtx_unlock(&bank->lock); return EFAULT; } bank->ring_mask |= (1 << ring); mtx_unlock(&bank->lock); return 0; } static void adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring) { mtx_lock(&bank->lock); bank->ring_mask &= ~(1 << ring); mtx_unlock(&bank->lock); } static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring) { + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev); + mtx_lock(&bank->lock); bank->irq_mask |= (1 << ring); mtx_unlock(&bank->lock); - WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask); - WRITE_CSR_INT_COL_CTL(bank->csr_addr, - bank->bank_number, - bank->irq_coalesc_timer); + csr_ops->write_csr_int_col_en(bank->csr_addr, + bank->bank_number, + bank->irq_mask); + csr_ops->write_csr_int_col_ctl(bank->csr_addr, + bank->bank_number, + bank->irq_coalesc_timer); } static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring) { + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev); + mtx_lock(&bank->lock); bank->irq_mask &= ~(1 << ring); mtx_unlock(&bank->lock); - WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask); + csr_ops->write_csr_int_col_en(bank->csr_addr, + bank->bank_number, + bank->irq_mask); } int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg) { + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev); u32 msg_size = 0; if (atomic_add_return(1, ring->inflights) > ring->max_inflights) { atomic_dec(ring->inflights); return EAGAIN; } msg_size = ADF_MSG_SIZE_TO_BYTES(ring->msg_size); mtx_lock(&ring->lock); memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg, msg_size); ring->tail = adf_modulo(ring->tail + msg_size, ADF_RING_SIZE_MODULO(ring->ring_size)); - WRITE_CSR_RING_TAIL(ring->bank->csr_addr, - ring->bank->bank_number, - ring->ring_number, - ring->tail); + csr_ops->write_csr_ring_tail(ring->bank->csr_addr, + ring->bank->bank_number, + ring->ring_number, + ring->tail); ring->csr_tail_offset = ring->tail; mtx_unlock(&ring->lock); return 0; } int adf_handle_response(struct adf_etr_ring_data *ring, u32 quota) { + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev); u32 msg_counter = 0; u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head); if (!quota) quota = ADF_NO_RESPONSE_QUOTA; while ((*msg != ADF_RING_EMPTY_SIG) && (msg_counter < quota)) { ring->callback((u32 *)msg); atomic_dec(ring->inflights); *msg = ADF_RING_EMPTY_SIG; ring->head = adf_modulo(ring->head + ADF_MSG_SIZE_TO_BYTES( ring->msg_size), ADF_RING_SIZE_MODULO(ring->ring_size)); msg_counter++; msg = (u32 *)((uintptr_t)ring->base_addr + ring->head); } if (msg_counter > 0) - WRITE_CSR_RING_HEAD(ring->bank->csr_addr, - ring->bank->bank_number, - ring->ring_number, - ring->head); + csr_ops->write_csr_ring_head(ring->bank->csr_addr, + ring->bank->bank_number, + ring->ring_number, + ring->head); return msg_counter; } int adf_poll_bank(u32 accel_id, u32 bank_num, u32 quota) { int num_resp; struct adf_accel_dev *accel_dev; struct adf_etr_data *trans_data; struct adf_etr_bank_data *bank; struct adf_etr_ring_data *ring; + struct adf_hw_csr_ops *csr_ops; u32 rings_not_empty; u32 ring_num; u32 resp_total = 0; u32 num_rings_per_bank; /* Find the accel device associated with the accelId * passed in. */ accel_dev = adf_devmgr_get_dev_by_id(accel_id); if (!accel_dev) { pr_err("There is no device with id: %d\n", accel_id); return EINVAL; } + csr_ops = GET_CSR_OPS(accel_dev); trans_data = accel_dev->transport; bank = &trans_data->banks[bank_num]; mtx_lock(&bank->lock); /* Read the ring status CSR to determine which rings are empty. */ - rings_not_empty = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number); + rings_not_empty = + csr_ops->read_csr_e_stat(bank->csr_addr, bank->bank_number); /* Complement to find which rings have data to be processed. */ rings_not_empty = (~rings_not_empty) & bank->ring_mask; /* Return RETRY if the bank polling rings * are all empty. */ if (!(rings_not_empty & bank->ring_mask)) { mtx_unlock(&bank->lock); return EAGAIN; } /* * Loop over all rings within this bank. * The ring structure is global to all * rings hence while we loop over all rings in the * bank we use ring_number to get the global ring. */ num_rings_per_bank = accel_dev->hw_device->num_rings_per_bank; for (ring_num = 0; ring_num < num_rings_per_bank; ring_num++) { ring = &bank->rings[ring_num]; /* And with polling ring mask. * If the there is no data on this ring * move to the next one. */ if (!(rings_not_empty & (1 << ring->ring_number))) continue; /* Poll the ring. */ num_resp = adf_handle_response(ring, quota); resp_total += num_resp; } mtx_unlock(&bank->lock); /* Return SUCCESS if there's any response message * returned. */ if (resp_total) return 0; return EAGAIN; } int adf_poll_all_banks(u32 accel_id, u32 quota) { int status = EAGAIN; struct adf_accel_dev *accel_dev; struct adf_etr_data *trans_data; struct adf_etr_bank_data *bank; u32 bank_num; u32 stat_total = 0; /* Find the accel device associated with the accelId * passed in. */ accel_dev = adf_devmgr_get_dev_by_id(accel_id); if (!accel_dev) { pr_err("There is no device with id: %d\n", accel_id); return EINVAL; } /* Loop over banks and call adf_poll_bank */ trans_data = accel_dev->transport; for (bank_num = 0; bank_num < GET_MAX_BANKS(accel_dev); bank_num++) { bank = &trans_data->banks[bank_num]; /* if there are no polling rings on this bank * continue to the next bank number. */ if (bank->ring_mask == 0) continue; status = adf_poll_bank(accel_id, bank_num, quota); /* The successful status should be AGAIN or 0 */ if (status == 0) stat_total++; else if (status != EAGAIN) return status; } /* Return SUCCESS if adf_poll_bank returned SUCCESS * at any stage. adf_poll_bank cannot * return fail in the above case. */ if (stat_total) return 0; return EAGAIN; } static void adf_configure_tx_ring(struct adf_etr_ring_data *ring) { + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev); u32 ring_config = BUILD_RING_CONFIG(ring->ring_size); - WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, - ring->bank->bank_number, - ring->ring_number, - ring_config); + csr_ops->write_csr_ring_config(ring->bank->csr_addr, + ring->bank->bank_number, + ring->ring_number, + ring_config); } static void adf_configure_rx_ring(struct adf_etr_ring_data *ring) { + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev); u32 ring_config = BUILD_RESP_RING_CONFIG(ring->ring_size, ADF_RING_NEAR_WATERMARK_512, ADF_RING_NEAR_WATERMARK_0); - WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, - ring->bank->bank_number, - ring->ring_number, - ring_config); + csr_ops->write_csr_ring_config(ring->bank->csr_addr, + ring->bank->bank_number, + ring->ring_number, + ring_config); } static int adf_init_ring(struct adf_etr_ring_data *ring) { struct adf_etr_bank_data *bank = ring->bank; struct adf_accel_dev *accel_dev = bank->accel_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); u64 ring_base; u32 ring_size_bytes = ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size); ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes); int ret; ret = bus_dma_mem_create(&ring->dma_mem, accel_dev->dma_tag, ring_size_bytes, BUS_SPACE_MAXADDR, ring_size_bytes, M_WAITOK | M_ZERO); if (ret) return ret; ring->base_addr = ring->dma_mem.dma_vaddr; ring->dma_addr = ring->dma_mem.dma_baddr; memset(ring->base_addr, 0x7F, ring_size_bytes); /* The base_addr has to be aligned to the size of the buffer */ if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) { device_printf(GET_DEV(accel_dev), "Ring address not aligned\n"); bus_dma_mem_free(&ring->dma_mem); ring->base_addr = NULL; return EFAULT; } if (hw_data->tx_rings_mask & (1 << ring->ring_number)) adf_configure_tx_ring(ring); else adf_configure_rx_ring(ring); - ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size); - WRITE_CSR_RING_BASE(ring->bank->csr_addr, - ring->bank->bank_number, - ring->ring_number, - ring_base); + ring_base = + csr_ops->build_csr_ring_base_addr(ring->dma_addr, ring->ring_size); + csr_ops->write_csr_ring_base(ring->bank->csr_addr, + ring->bank->bank_number, + ring->ring_number, + ring_base); mtx_init(&ring->lock, "adf bank", NULL, MTX_DEF); return 0; } static void adf_cleanup_ring(struct adf_etr_ring_data *ring) { u32 ring_size_bytes = ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size); ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes); if (ring->base_addr) { explicit_bzero(ring->base_addr, ring_size_bytes); bus_dma_mem_free(&ring->dma_mem); } mtx_destroy(&ring->lock); } int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, u32 bank_num, u32 num_msgs, u32 msg_size, const char *ring_name, adf_callback_fn callback, int poll_mode, struct adf_etr_ring_data **ring_ptr) { struct adf_etr_data *transport_data = accel_dev->transport; struct adf_etr_bank_data *bank; struct adf_etr_ring_data *ring; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; u32 ring_num; int ret; u8 num_rings_per_bank = accel_dev->hw_device->num_rings_per_bank; if (bank_num >= GET_MAX_BANKS(accel_dev)) { device_printf(GET_DEV(accel_dev), "Invalid bank number\n"); return EFAULT; } if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) { device_printf(GET_DEV(accel_dev), "Invalid msg size\n"); return EFAULT; } if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs), ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) { device_printf(GET_DEV(accel_dev), "Invalid ring size for given msg size\n"); return EFAULT; } if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) { device_printf(GET_DEV(accel_dev), "Section %s, no such entry : %s\n", section, ring_name); return EFAULT; } if (compat_strtouint(val, 10, &ring_num)) { device_printf(GET_DEV(accel_dev), "Can't get ring number\n"); return EFAULT; } if (ring_num >= num_rings_per_bank) { device_printf(GET_DEV(accel_dev), "Invalid ring number\n"); return EFAULT; } bank = &transport_data->banks[bank_num]; if (adf_reserve_ring(bank, ring_num)) { device_printf(GET_DEV(accel_dev), "Ring %d, %s already exists.\n", ring_num, ring_name); return EFAULT; } ring = &bank->rings[ring_num]; ring->ring_number = ring_num; ring->bank = bank; ring->callback = callback; ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size); ring->ring_size = adf_verify_ring_size(msg_size, num_msgs); ring->max_inflights = ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size); ring->head = 0; ring->tail = 0; ring->csr_tail_offset = 0; ret = adf_init_ring(ring); if (ret) goto err; /* Enable HW arbitration for the given ring */ adf_update_ring_arb(ring); if (adf_ring_debugfs_add(ring, ring_name)) { device_printf(GET_DEV(accel_dev), "Couldn't add ring debugfs entry\n"); ret = EFAULT; goto err; } /* Enable interrupts if needed */ if (callback && !poll_mode) adf_enable_ring_irq(bank, ring->ring_number); *ring_ptr = ring; return 0; err: adf_cleanup_ring(ring); adf_unreserve_ring(bank, ring_num); adf_update_ring_arb(ring); return ret; } void adf_remove_ring(struct adf_etr_ring_data *ring) { struct adf_etr_bank_data *bank = ring->bank; + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev); /* Disable interrupts for the given ring */ adf_disable_ring_irq(bank, ring->ring_number); /* Clear PCI config space */ - WRITE_CSR_RING_CONFIG(bank->csr_addr, - bank->bank_number, - ring->ring_number, - 0); - WRITE_CSR_RING_BASE(bank->csr_addr, - bank->bank_number, - ring->ring_number, - 0); + csr_ops->write_csr_ring_config(bank->csr_addr, + bank->bank_number, + ring->ring_number, + 0); + csr_ops->write_csr_ring_base(bank->csr_addr, + bank->bank_number, + ring->ring_number, + 0); adf_ring_debugfs_rm(ring); adf_unreserve_ring(bank, ring->ring_number); /* Disable HW arbitration for the given ring */ adf_update_ring_arb(ring); adf_cleanup_ring(ring); } static void adf_ring_response_handler(struct adf_etr_bank_data *bank) { struct adf_accel_dev *accel_dev = bank->accel_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); u8 num_rings_per_bank = hw_data->num_rings_per_bank; u32 empty_rings, i; - empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number); + empty_rings = + csr_ops->read_csr_e_stat(bank->csr_addr, bank->bank_number); empty_rings = ~empty_rings & bank->irq_mask; for (i = 0; i < num_rings_per_bank; ++i) { if (empty_rings & (1 << i)) adf_handle_response(&bank->rings[i], 0); } } void adf_response_handler(uintptr_t bank_addr) { struct adf_etr_bank_data *bank = (void *)bank_addr; + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev); /* Handle all the responses and re-enable IRQs */ adf_ring_response_handler(bank); - WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, - bank->bank_number, - bank->irq_mask); + csr_ops->write_csr_int_flag_and_col(bank->csr_addr, + bank->bank_number, + bank->irq_mask); } static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev, const char *section, const char *format, u32 key, u32 *value) { char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key); if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf)) return EFAULT; if (compat_strtouint(val_buf, 10, value)) return EFAULT; return 0; } static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank, const char *section, u32 bank_num_in_accel) { struct adf_accel_dev *accel_dev = bank->accel_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 coalesc_timer = ADF_COALESCING_DEF_TIME; adf_get_cfg_int(accel_dev, section, ADF_ETRMGR_COALESCE_TIMER_FORMAT, bank_num_in_accel, &coalesc_timer); if (hw_data->get_clock_speed) bank->irq_coalesc_timer = (coalesc_timer * (hw_data->get_clock_speed(hw_data) / USEC_PER_SEC)) / NSEC_PER_USEC; else bank->irq_coalesc_timer = coalesc_timer; if (bank->irq_coalesc_timer > ADF_COALESCING_MAX_TIME) bank->irq_coalesc_timer = ADF_COALESCING_MAX_TIME; else if (bank->irq_coalesc_timer < ADF_COALESCING_MIN_TIME) bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME; } static int adf_init_bank(struct adf_accel_dev *accel_dev, struct adf_etr_bank_data *bank, u32 bank_num, struct resource *csr_addr) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_hw_csr_ops *csr_ops = &hw_data->csr_info.csr_ops; struct adf_etr_ring_data *ring; struct adf_etr_ring_data *tx_ring; u32 i, coalesc_enabled = 0; u8 num_rings_per_bank = hw_data->num_rings_per_bank; + u32 irq_mask = BIT(num_rings_per_bank) - 1; u32 size = 0; explicit_bzero(bank, sizeof(*bank)); bank->bank_number = bank_num; bank->csr_addr = csr_addr; bank->accel_dev = accel_dev; mtx_init(&bank->lock, "adf bank", NULL, MTX_DEF); /* Allocate the rings in the bank */ size = num_rings_per_bank * sizeof(struct adf_etr_ring_data); bank->rings = kzalloc_node(size, M_WAITOK | M_ZERO, dev_to_node(GET_DEV(accel_dev))); /* Enable IRQ coalescing always. This will allow to use * the optimised flag and coalesc register. * If it is disabled in the config file just use min time value */ if ((adf_get_cfg_int(accel_dev, "Accelerator0", ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num, &coalesc_enabled) == 0) && coalesc_enabled) adf_get_coalesc_timer(bank, "Accelerator0", bank_num); else bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME; for (i = 0; i < num_rings_per_bank; i++) { - WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0); - WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0); + csr_ops->write_csr_ring_config(csr_addr, bank_num, i, 0); + csr_ops->write_csr_ring_base(csr_addr, bank_num, i, 0); ring = &bank->rings[i]; if (hw_data->tx_rings_mask & (1 << i)) { ring->inflights = kzalloc_node(sizeof(atomic_t), M_WAITOK | M_ZERO, dev_to_node(GET_DEV(accel_dev))); } else { if (i < hw_data->tx_rx_gap) { device_printf(GET_DEV(accel_dev), "Invalid tx rings mask config\n"); goto err; } tx_ring = &bank->rings[i - hw_data->tx_rx_gap]; ring->inflights = tx_ring->inflights; } } if (adf_bank_debugfs_add(bank)) { device_printf(GET_DEV(accel_dev), "Failed to add bank debugfs entry\n"); goto err; } - WRITE_CSR_INT_FLAG(csr_addr, bank_num, ADF_BANK_INT_FLAG_CLEAR_MASK); - WRITE_CSR_INT_SRCSEL(csr_addr, bank_num); + csr_ops->write_csr_int_flag(csr_addr, bank_num, irq_mask); + csr_ops->write_csr_int_srcsel(csr_addr, bank_num); return 0; err: for (i = 0; i < num_rings_per_bank; i++) { ring = &bank->rings[i]; if (hw_data->tx_rings_mask & (1 << i)) { kfree(ring->inflights); ring->inflights = NULL; } } kfree(bank->rings); return ENOMEM; } /** * adf_init_etr_data() - Initialize transport rings for acceleration device * @accel_dev: Pointer to acceleration device. * * Function initializes the communications channels (rings) to the * acceleration device accel_dev. * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. */ int adf_init_etr_data(struct adf_accel_dev *accel_dev) { struct adf_etr_data *etr_data; struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct resource *csr_addr; u32 size; u32 num_banks = 0; int i, ret; etr_data = kzalloc_node(sizeof(*etr_data), M_WAITOK | M_ZERO, dev_to_node(GET_DEV(accel_dev))); num_banks = GET_MAX_BANKS(accel_dev); size = num_banks * sizeof(struct adf_etr_bank_data); etr_data->banks = kzalloc_node(size, M_WAITOK | M_ZERO, dev_to_node(GET_DEV(accel_dev))); accel_dev->transport = etr_data; i = hw_data->get_etr_bar_id(hw_data); csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr; etr_data->debug = SYSCTL_ADD_NODE(&accel_dev->sysctl_ctx, SYSCTL_CHILDREN( device_get_sysctl_tree(GET_DEV(accel_dev))), OID_AUTO, "transport", CTLFLAG_RD, NULL, "Transport parameters"); if (!etr_data->debug) { device_printf(GET_DEV(accel_dev), "Unable to create transport debugfs entry\n"); ret = ENOENT; goto err_bank_all; } for (i = 0; i < num_banks; i++) { ret = adf_init_bank(accel_dev, &etr_data->banks[i], i, csr_addr); if (ret) goto err_bank_all; } return 0; err_bank_all: kfree(etr_data->banks); kfree(etr_data); accel_dev->transport = NULL; return ret; } static void cleanup_bank(struct adf_etr_bank_data *bank) { u32 i; struct adf_accel_dev *accel_dev = bank->accel_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u8 num_rings_per_bank = hw_data->num_rings_per_bank; for (i = 0; i < num_rings_per_bank; i++) { struct adf_accel_dev *accel_dev = bank->accel_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_etr_ring_data *ring = &bank->rings[i]; if (bank->ring_mask & (1 << i)) adf_cleanup_ring(ring); if (hw_data->tx_rings_mask & (1 << i)) { kfree(ring->inflights); ring->inflights = NULL; } } kfree(bank->rings); adf_bank_debugfs_rm(bank); mtx_destroy(&bank->lock); explicit_bzero(bank, sizeof(*bank)); } static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev) { struct adf_etr_data *etr_data = accel_dev->transport; u32 i, num_banks = GET_MAX_BANKS(accel_dev); for (i = 0; i < num_banks; i++) cleanup_bank(&etr_data->banks[i]); } /** * adf_cleanup_etr_data() - Clear transport rings for acceleration device * @accel_dev: Pointer to acceleration device. * * Function is the clears the communications channels (rings) of the * acceleration device accel_dev. * To be used by QAT device specific drivers. * * Return: void */ void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev) { struct adf_etr_data *etr_data = accel_dev->transport; if (etr_data) { adf_cleanup_etr_handles(accel_dev); kfree(etr_data->banks); kfree(etr_data); accel_dev->transport = NULL; } } diff --git a/sys/dev/qat/qat_common/qat_hal.c b/sys/dev/qat/qat_common/qat_hal.c index 0d711f5997f8..2da0e1141d79 100644 --- a/sys/dev/qat/qat_common/qat_hal.c +++ b/sys/dev/qat/qat_common/qat_hal.c @@ -1,1848 +1,1968 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "icp_qat_hal.h" #include "icp_qat_uclo.h" #define BAD_REGADDR 0xffff #define MAX_RETRY_TIMES 1000000 #define INIT_CTX_ARB_VALUE 0x0 #define INIT_CTX_ENABLE_VALUE 0x0 #define INIT_PC_VALUE 0x0 #define INIT_WAKEUP_EVENTS_VALUE 0x1 #define INIT_SIG_EVENTS_VALUE 0x1 #define INIT_CCENABLE_VALUE 0x2000 #define RST_CSR_QAT_LSB 20 #define RST_CSR_AE_LSB 0 #define MC_TIMESTAMP_ENABLE (0x1 << 7) #define IGNORE_W1C_MASK \ ((~(1 << CE_BREAKPOINT_BITPOS)) & \ (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \ (~(1 << CE_REG_PAR_ERR_BITPOS))) #define INSERT_IMMED_GPRA_CONST(inst, const_val) \ (inst = ((inst & 0xFFFF00C03FFull) | \ ((((const_val) << 12) & 0x0FF00000ull) | \ (((const_val) << 10) & 0x0003FC00ull)))) #define INSERT_IMMED_GPRB_CONST(inst, const_val) \ (inst = ((inst & 0xFFFF00FFF00ull) | \ ((((const_val) << 12) & 0x0FF00000ull) | \ (((const_val) << 0) & 0x000000FFull)))) #define AE(handle, ae) ((handle)->hal_handle->aes[ae]) static const uint64_t inst_4b[] = { 0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull, 0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0A021000000ull }; static const uint64_t inst[] = { 0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull, 0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull, 0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull, 0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull, 0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull, 0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull, 0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull, 0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull, 0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull, 0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull, 0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull, 0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull, 0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull, 0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull, 0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull, 0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull, 0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull, 0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull, 0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull, }; +static const uint64_t inst_CPM2X[] = { + 0x0F0000C0000ull, 0x0D802C00011ull, 0x0F0000C0001ull, 0x0FC066C0001ull, + 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F000500300ull, + 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0A0580C0000ull, + 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull, 0x0A0584C0000ull, + 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull, 0x0A0588C0000ull, + 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull, 0x0A058CC0000ull, + 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull, 0x0A05C0C0000ull, + 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull, 0x0A05C4C0000ull, + 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull, 0x0A05C8C0000ull, + 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull, 0x0A05CCC0000ull, + 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull, 0x0A0400C0000ull, + 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull, 0x0A0402C0000ull, + 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull, 0x0A0404C0000ull, + 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull, 0x0A0406C0000ull, + 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull, 0x0A0408C0000ull, + 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull, 0x0A040AC0000ull, + 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull, 0x0A040CC0000ull, + 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull, 0x0A040EC0000ull, + 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull, 0x0D81341C010ull, + 0x0E000000001ull, 0x0E000010000ull, +}; + void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask) { AE(handle, ae).live_ctx_mask = ctx_mask; } #define CSR_RETRY_TIMES 500 static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int csr, unsigned int *value) { unsigned int iterations = CSR_RETRY_TIMES; do { *value = GET_AE_CSR(handle, ae, csr); if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS)) return 0; } while (iterations--); pr_err("QAT: Read CSR timeout\n"); return EFAULT; } static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int csr, unsigned int value) { unsigned int iterations = CSR_RETRY_TIMES; do { SET_AE_CSR(handle, ae, csr, value); if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS)) return 0; } while (iterations--); pr_err("QAT: Write CSR Timeout\n"); return EFAULT; } static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, unsigned int *events) { unsigned int cur_ctx; qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events); qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); } static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int cycles, int chk_inactive) { unsigned int base_cnt = 0, cur_cnt = 0; unsigned int csr = (1 << ACS_ABO_BITPOS); int times = MAX_RETRY_TIMES; int elapsed_cycles = 0; qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt); base_cnt &= 0xffff; while ((int)cycles > elapsed_cycles && times--) { if (chk_inactive) qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr); qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt); cur_cnt &= 0xffff; elapsed_cycles = cur_cnt - base_cnt; if (elapsed_cycles < 0) elapsed_cycles += 0x10000; /* ensure at least 8 time cycles elapsed in wait_cycles */ if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS))) return 0; } if (times < 0) { pr_err("QAT: wait_num_cycles time out\n"); return EFAULT; } return 0; } void qat_hal_get_scs_neigh_ae(unsigned char ae, unsigned char *ae_neigh) { *ae_neigh = (ae & 0x1) ? (ae - 1) : (ae + 1); } #define CLR_BIT(wrd, bit) ((wrd) & ~(1 << (bit))) #define SET_BIT(wrd, bit) ((wrd) | 1 << (bit)) int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char mode) { unsigned int csr, new_csr; if (mode != 4 && mode != 8) { pr_err("QAT: bad ctx mode=%d\n", mode); return EINVAL; } /* Sets the accelaration engine context mode to either four or eight */ qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); csr = IGNORE_W1C_MASK & csr; new_csr = (mode == 4) ? SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) : CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS); qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr); return 0; } int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char mode) { unsigned int csr, new_csr; + if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { + pr_err("QAT: No next neigh for CPM2X\n"); + return EINVAL; + } + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); csr &= IGNORE_W1C_MASK; new_csr = (mode) ? SET_BIT(csr, CE_NN_MODE_BITPOS) : CLR_BIT(csr, CE_NN_MODE_BITPOS); if (new_csr != csr) qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr); return 0; } int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, enum icp_qat_uof_regtype lm_type, unsigned char mode) { unsigned int csr, new_csr; qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); csr &= IGNORE_W1C_MASK; switch (lm_type) { case ICP_LMEM0: new_csr = (mode) ? SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) : CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS); break; case ICP_LMEM1: new_csr = (mode) ? SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) : CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS); break; case ICP_LMEM2: new_csr = (mode) ? SET_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS) : CLR_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS); break; case ICP_LMEM3: new_csr = (mode) ? SET_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS) : CLR_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS); break; default: pr_err("QAT: lmType = 0x%x\n", lm_type); return EINVAL; } if (new_csr != csr) qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr); return 0; } void qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char mode) { unsigned int csr, new_csr; qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); csr &= IGNORE_W1C_MASK; new_csr = (mode) ? SET_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS) : CLR_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS); if (new_csr != csr) qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr); } void qat_hal_set_ae_scs_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char mode) { unsigned int csr, new_csr; qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr); new_csr = (mode) ? SET_BIT(csr, MMC_SHARE_CS_BITPOS) : CLR_BIT(csr, MMC_SHARE_CS_BITPOS); if (new_csr != csr) qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, new_csr); } static unsigned short qat_hal_get_reg_addr(unsigned int type, unsigned short reg_num) { unsigned short reg_addr; switch (type) { case ICP_GPA_ABS: case ICP_GPB_ABS: reg_addr = 0x80 | (reg_num & 0x7f); break; case ICP_GPA_REL: case ICP_GPB_REL: reg_addr = reg_num & 0x1f; break; case ICP_SR_RD_REL: case ICP_SR_WR_REL: case ICP_SR_REL: reg_addr = 0x180 | (reg_num & 0x1f); break; case ICP_SR_ABS: reg_addr = 0x140 | ((reg_num & 0x3) << 1); break; case ICP_DR_RD_REL: case ICP_DR_WR_REL: case ICP_DR_REL: reg_addr = 0x1c0 | (reg_num & 0x1f); break; case ICP_DR_ABS: reg_addr = 0x100 | ((reg_num & 0x3) << 1); break; case ICP_NEIGH_REL: reg_addr = 0x280 | (reg_num & 0x1f); break; case ICP_LMEM0: reg_addr = 0x200; break; case ICP_LMEM1: reg_addr = 0x220; break; case ICP_LMEM2: reg_addr = 0x2c0; break; case ICP_LMEM3: reg_addr = 0x2e0; break; case ICP_NO_DEST: reg_addr = 0x300 | (reg_num & 0xff); break; default: reg_addr = BAD_REGADDR; break; } return reg_addr; } +static u32 +qat_hal_get_ae_mask_gen4(struct icp_qat_fw_loader_handle *handle) +{ + u32 tg = 0, ae; + u32 valid_ae_mask = 0; + + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { + if (handle->hal_handle->ae_mask & (1 << ae)) { + tg = ae / 4; + valid_ae_mask |= (1 << (tg * 2)); + } + } + return valid_ae_mask; +} + void qat_hal_reset(struct icp_qat_fw_loader_handle *handle) { unsigned int ae_reset_csr[MAX_CPP_NUM]; unsigned int ae_reset_val[MAX_CPP_NUM]; unsigned int valid_ae_mask, valid_slice_mask; unsigned int cpp_num = 1; unsigned int i; if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) { ae_reset_csr[0] = ICP_RESET_CPP0; ae_reset_csr[1] = ICP_RESET_CPP1; if (handle->hal_handle->ae_mask > 0xffff) ++cpp_num; + } else if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { + ae_reset_csr[0] = ICP_RESET_CPP0; } else { ae_reset_csr[0] = ICP_RESET; } for (i = 0; i < cpp_num; i++) { if (i == 0) { - valid_ae_mask = handle->hal_handle->ae_mask & 0xFFFF; - valid_slice_mask = - handle->hal_handle->slice_mask & 0x3F; + if (IS_QAT_GEN4( + pci_get_device(GET_DEV(handle->accel_dev)))) { + valid_ae_mask = + qat_hal_get_ae_mask_gen4(handle); + valid_slice_mask = + handle->hal_handle->slice_mask; + } else { + valid_ae_mask = + handle->hal_handle->ae_mask & 0xFFFF; + valid_slice_mask = + handle->hal_handle->slice_mask & 0x3F; + } } else { valid_ae_mask = (handle->hal_handle->ae_mask >> AES_PER_CPP) & 0xFFFF; valid_slice_mask = (handle->hal_handle->slice_mask >> SLICES_PER_CPP) & 0x3F; } ae_reset_val[i] = GET_GLB_CSR(handle, ae_reset_csr[i]); ae_reset_val[i] |= valid_ae_mask << RST_CSR_AE_LSB; ae_reset_val[i] |= valid_slice_mask << RST_CSR_QAT_LSB; SET_GLB_CSR(handle, ae_reset_csr[i], ae_reset_val[i]); } } static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask, unsigned int ae_csr, unsigned int csr_val) { unsigned int ctx, cur_ctx; qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { if (!(ctx_mask & (1 << ctx))) continue; qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val); } qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); } static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, unsigned int ae_csr, unsigned int *csr_val) { unsigned int cur_ctx; qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val); qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); } static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask, unsigned int events) { unsigned int ctx, cur_ctx; qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { if (!(ctx_mask & (1 << ctx))) continue; qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events); } qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); } static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask, unsigned int events) { unsigned int ctx, cur_ctx; qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { if (!(ctx_mask & (1 << ctx))) continue; qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events); } qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); } static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle) { unsigned int base_cnt, cur_cnt; unsigned char ae; unsigned long ae_mask = handle->hal_handle->ae_mask; int times = MAX_RETRY_TIMES; for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, (unsigned int *)&base_cnt); base_cnt &= 0xffff; do { qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, (unsigned int *)&cur_cnt); cur_cnt &= 0xffff; } while (times-- && (cur_cnt == base_cnt)); if (times < 0) { pr_err("QAT: AE%d is inactive!!\n", ae); return EFAULT; } } return 0; } int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle, unsigned int ae) { unsigned int enable = 0, active = 0; qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable); qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active); if ((enable & (0xff << CE_ENABLE_BITPOS)) || (active & (1 << ACS_ABO_BITPOS))) return 1; else return 0; } static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle) { unsigned int misc_ctl_csr, misc_ctl; unsigned char ae; unsigned long ae_mask = handle->hal_handle->ae_mask; misc_ctl_csr = - (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) ? + (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) ? MISC_CONTROL_C4XXX : MISC_CONTROL; /* stop the timestamp timers */ misc_ctl = GET_GLB_CSR(handle, misc_ctl_csr); if (misc_ctl & MC_TIMESTAMP_ENABLE) SET_GLB_CSR(handle, misc_ctl_csr, misc_ctl & (~MC_TIMESTAMP_ENABLE)); for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0); qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0); } /* start timestamp timers */ SET_GLB_CSR(handle, misc_ctl_csr, misc_ctl | MC_TIMESTAMP_ENABLE); } #define ESRAM_AUTO_TINIT BIT(2) #define ESRAM_AUTO_TINIT_DONE BIT(3) #define ESRAM_AUTO_INIT_USED_CYCLES (1640) #define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle) { uintptr_t csr_addr = ((uintptr_t)handle->hal_ep_csr_addr_v + ESRAM_AUTO_INIT_CSR_OFFSET); unsigned int csr_val; int times = 30; if (pci_get_device(GET_DEV(handle->accel_dev)) != ADF_DH895XCC_PCI_DEVICE_ID) return 0; csr_val = ADF_CSR_RD(handle->hal_misc_addr_v, csr_addr); if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE)) return 0; csr_val = ADF_CSR_RD(handle->hal_misc_addr_v, csr_addr); csr_val |= ESRAM_AUTO_TINIT; ADF_CSR_WR(handle->hal_misc_addr_v, csr_addr, csr_val); do { qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0); csr_val = ADF_CSR_RD(handle->hal_misc_addr_v, csr_addr); } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--); if (times < 0) { pr_err("QAT: Fail to init eSram!\n"); return EFAULT; } return 0; } #define SHRAM_INIT_CYCLES 2060 int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle) { unsigned int ae_reset_csr[MAX_CPP_NUM]; unsigned int ae_reset_val[MAX_CPP_NUM]; unsigned int cpp_num = 1; unsigned int valid_ae_mask, valid_slice_mask; unsigned char ae; unsigned int i; unsigned int clk_csr[MAX_CPP_NUM]; unsigned int clk_val[MAX_CPP_NUM]; unsigned int times = 100; unsigned long ae_mask = handle->hal_handle->ae_mask; if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) { ae_reset_csr[0] = ICP_RESET_CPP0; ae_reset_csr[1] = ICP_RESET_CPP1; clk_csr[0] = ICP_GLOBAL_CLK_ENABLE_CPP0; clk_csr[1] = ICP_GLOBAL_CLK_ENABLE_CPP1; if (handle->hal_handle->ae_mask > 0xffff) ++cpp_num; + } else if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { + ae_reset_csr[0] = ICP_RESET_CPP0; + clk_csr[0] = ICP_GLOBAL_CLK_ENABLE_CPP0; } else { ae_reset_csr[0] = ICP_RESET; clk_csr[0] = ICP_GLOBAL_CLK_ENABLE; } for (i = 0; i < cpp_num; i++) { if (i == 0) { - valid_ae_mask = handle->hal_handle->ae_mask & 0xFFFF; - valid_slice_mask = - handle->hal_handle->slice_mask & 0x3F; + if (IS_QAT_GEN4( + pci_get_device(GET_DEV(handle->accel_dev)))) { + valid_ae_mask = + qat_hal_get_ae_mask_gen4(handle); + valid_slice_mask = + handle->hal_handle->slice_mask; + } else { + valid_ae_mask = + handle->hal_handle->ae_mask & 0xFFFF; + valid_slice_mask = + handle->hal_handle->slice_mask & 0x3F; + } } else { valid_ae_mask = (handle->hal_handle->ae_mask >> AES_PER_CPP) & 0xFFFF; valid_slice_mask = (handle->hal_handle->slice_mask >> SLICES_PER_CPP) & 0x3F; } /* write to the reset csr */ ae_reset_val[i] = GET_GLB_CSR(handle, ae_reset_csr[i]); ae_reset_val[i] &= ~(valid_ae_mask << RST_CSR_AE_LSB); ae_reset_val[i] &= ~(valid_slice_mask << RST_CSR_QAT_LSB); do { SET_GLB_CSR(handle, ae_reset_csr[i], ae_reset_val[i]); if (!(times--)) goto out_err; ae_reset_val[i] = GET_GLB_CSR(handle, ae_reset_csr[i]); } while ( (valid_ae_mask | (valid_slice_mask << RST_CSR_QAT_LSB)) & ae_reset_val[i]); /* enable clock */ clk_val[i] = GET_GLB_CSR(handle, clk_csr[i]); clk_val[i] |= valid_ae_mask << 0; clk_val[i] |= valid_slice_mask << 20; SET_GLB_CSR(handle, clk_csr[i], clk_val[i]); } if (qat_hal_check_ae_alive(handle)) goto out_err; /* Set undefined power-up/reset states to reasonable default values */ for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, INIT_CTX_ENABLE_VALUE); qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX, CTX_STS_INDIRECT, handle->hal_handle->upc_mask & INIT_PC_VALUE); qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE); qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE); qat_hal_put_wakeup_event(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX, INIT_WAKEUP_EVENTS_VALUE); qat_hal_put_sig_event(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX, INIT_SIG_EVENTS_VALUE); } if (qat_hal_init_esram(handle)) goto out_err; if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0)) goto out_err; qat_hal_reset_timestamp(handle); return 0; out_err: pr_err("QAT: failed to get device out of reset\n"); return EFAULT; } static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask) { unsigned int ctx; qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx); ctx &= IGNORE_W1C_MASK & (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS)); qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); } static uint64_t qat_hal_parity_64bit(uint64_t word) { word ^= word >> 1; word ^= word >> 2; word ^= word >> 4; word ^= word >> 8; word ^= word >> 16; word ^= word >> 32; return word & 1; } static uint64_t qat_hal_set_uword_ecc(uint64_t uword) { uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL, bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL, bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL, bit6_mask = 0xdaf69a46910ULL; /* clear the ecc bits */ uword &= ~(0x7fULL << 0x2C); uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C; uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D; uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E; uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F; uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30; uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31; uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32; return uword; } void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, unsigned int words_num, const uint64_t *uword) { unsigned int ustore_addr; - unsigned int i; + unsigned int i, ae_in_group; + + if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { + ae_in_group = ae / 4 * 4; + + for (i = 0; i < AE_TG_NUM_CPM2X; i++) { + if (ae_in_group + i == ae) + continue; + if (ae_in_group + i >= handle->hal_handle->ae_max_num) + break; + if (qat_hal_check_ae_active(handle, ae_in_group + i)) { + pr_err( + "ae%d in T_group is active, cannot write to ustore!\n", + ae_in_group + i); + return; + } + } + } qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); uaddr |= UA_ECS; qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); for (i = 0; i < words_num; i++) { unsigned int uwrd_lo, uwrd_hi; uint64_t tmp; tmp = qat_hal_set_uword_ecc(uword[i]); uwrd_lo = (unsigned int)(tmp & 0xffffffff); uwrd_hi = (unsigned int)(tmp >> 0x20); qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo); qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi); } qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); } void qat_hal_wr_coalesce_uwords(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, unsigned int words_num, u64 *uword) { u64 *even_uwrods, *odd_uwords; unsigned char neigh_ae, odd_ae, even_ae; int i, even_cpy_cnt = 0, odd_cpy_cnt = 0; even_uwrods = malloc(16 * 1024 * sizeof(*uword), M_QAT, M_WAITOK | M_ZERO); odd_uwords = malloc(16 * 1024 * sizeof(*uword), M_QAT, M_WAITOK | M_ZERO); qat_hal_get_scs_neigh_ae(ae, &neigh_ae); if (ae & 1) { odd_ae = ae; even_ae = neigh_ae; } else { odd_ae = neigh_ae; even_ae = ae; } for (i = 0; i < words_num; i++) { if ((uaddr + i) & 1) odd_uwords[odd_cpy_cnt++] = uword[i]; else even_uwrods[even_cpy_cnt++] = uword[i]; } if (even_cpy_cnt) qat_hal_wr_uwords(handle, even_ae, (uaddr + 1) / 2, even_cpy_cnt, even_uwrods); if (odd_cpy_cnt) qat_hal_wr_uwords( handle, odd_ae, uaddr / 2, odd_cpy_cnt, odd_uwords); free(even_uwrods, M_QAT); free(odd_uwords, M_QAT); } static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask) { unsigned int ctx; qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx); ctx &= IGNORE_W1C_MASK; ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF; ctx |= (ctx_mask << CE_ENABLE_BITPOS); qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); } static void qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle) { unsigned char ae; unsigned short reg; unsigned long ae_mask = handle->hal_handle->ae_mask; for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) { qat_hal_init_rd_xfer( handle, ae, 0, ICP_SR_RD_ABS, reg, 0); qat_hal_init_rd_xfer( handle, ae, 0, ICP_DR_RD_ABS, reg, 0); } } } static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) { unsigned char ae; unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX; int times = MAX_RETRY_TIMES; unsigned int csr_val = 0; unsigned int savctx = 0; unsigned int scs_flag = 0; unsigned long ae_mask = handle->hal_handle->ae_mask; int ret = 0; for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); scs_flag = csr_val & (1 << MMC_SHARE_CS_BITPOS); csr_val &= ~(1 << MMC_SHARE_CS_BITPOS); qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val); qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val); csr_val &= IGNORE_W1C_MASK; - csr_val |= CE_NN_MODE; + if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { + csr_val |= CE_NN_MODE; + } qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val); - qat_hal_wr_uwords( - handle, ae, 0, ARRAY_SIZE(inst), (const uint64_t *)inst); + + if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { + if (ae % 4 == 0) + qat_hal_wr_uwords(handle, + ae, + 0, + ARRAY_SIZE(inst_CPM2X), + (const uint64_t *)inst_CPM2X); + } else { + qat_hal_wr_uwords(handle, + ae, + 0, + ARRAY_SIZE(inst), + (const uint64_t *)inst); + } qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, handle->hal_handle->upc_mask & INIT_PC_VALUE); qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0); qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY); qat_hal_wr_indr_csr( handle, ae, ctx_mask, CTX_SIG_EVENTS_INDIRECT, 0); qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0); qat_hal_enable_ctx(handle, ae, ctx_mask); } for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { /* wait for AE to finish */ do { ret = qat_hal_wait_cycles(handle, ae, 20, 1); } while (ret && times--); if (times < 0) { pr_err("QAT: clear GPR of AE %d failed", ae); return EINVAL; } qat_hal_disable_ctx(handle, ae, ctx_mask); qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); if (scs_flag) csr_val |= (1 << MMC_SHARE_CS_BITPOS); qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val); qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO); qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, INIT_CTX_ENABLE_VALUE); qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, handle->hal_handle->upc_mask & INIT_PC_VALUE); qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE); qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE); qat_hal_put_wakeup_event(handle, ae, ctx_mask, INIT_WAKEUP_EVENTS_VALUE); qat_hal_put_sig_event(handle, ae, ctx_mask, INIT_SIG_EVENTS_VALUE); } return 0; } static int qat_hal_check_imr(struct icp_qat_fw_loader_handle *handle) { device_t dev = accel_to_pci_dev(handle->accel_dev); u8 reg_val = 0; if (pci_get_device(GET_DEV(handle->accel_dev)) != ADF_C3XXX_PCI_DEVICE_ID && pci_get_device(GET_DEV(handle->accel_dev)) != ADF_200XX_PCI_DEVICE_ID) return 0; reg_val = pci_read_config(dev, 0x04, 1); /* * PCI command register memory bit and rambaseaddr_lo address * are checked to confirm IMR2 is enabled in BIOS settings */ if ((reg_val & 0x2) && GET_FCU_CSR(handle, FCU_RAMBASE_ADDR_LO)) return 0; return EINVAL; } int qat_hal_init(struct adf_accel_dev *accel_dev) { unsigned char ae; unsigned int cap_offset, ae_offset, ep_offset; unsigned int sram_offset = 0; unsigned int max_en_ae_id = 0; int ret = 0; unsigned long ae_mask; struct icp_qat_fw_loader_handle *handle; if (!accel_dev) { return EFAULT; } struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_bar *misc_bar = &pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)]; struct adf_bar *sram_bar; handle = malloc(sizeof(*handle), M_QAT, M_WAITOK | M_ZERO); handle->hal_misc_addr_v = misc_bar->virt_addr; handle->accel_dev = accel_dev; if (pci_get_device(GET_DEV(handle->accel_dev)) == ADF_DH895XCC_PCI_DEVICE_ID || IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) { sram_bar = &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)]; if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) sram_offset = 0x400000 + accel_dev->aram_info->mmp_region_offset; handle->hal_sram_addr_v = sram_bar->virt_addr; handle->hal_sram_offset = sram_offset; handle->hal_sram_size = sram_bar->size; } GET_CSR_OFFSET(pci_get_device(GET_DEV(handle->accel_dev)), cap_offset, ae_offset, ep_offset); handle->hal_cap_g_ctl_csr_addr_v = cap_offset; handle->hal_cap_ae_xfer_csr_addr_v = ae_offset; handle->hal_ep_csr_addr_v = ep_offset; handle->hal_cap_ae_local_csr_addr_v = ((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET); handle->fw_auth = (pci_get_device(GET_DEV(handle->accel_dev)) == ADF_DH895XCC_PCI_DEVICE_ID) ? false : true; if (handle->fw_auth && qat_hal_check_imr(handle)) { device_printf(GET_DEV(accel_dev), "IMR2 not enabled in BIOS\n"); ret = EINVAL; goto out_hal_handle; } handle->hal_handle = malloc(sizeof(*handle->hal_handle), M_QAT, M_WAITOK | M_ZERO); handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid; handle->hal_handle->ae_mask = hw_data->ae_mask; + handle->hal_handle->admin_ae_mask = hw_data->admin_ae_mask; handle->hal_handle->slice_mask = hw_data->accel_mask; handle->cfg_ae_mask = 0xFFFFFFFF; /* create AE objects */ if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) { handle->hal_handle->upc_mask = 0xffff; handle->hal_handle->max_ustore = 0x2000; } else { handle->hal_handle->upc_mask = 0x1ffff; handle->hal_handle->max_ustore = 0x4000; } ae_mask = hw_data->ae_mask; for_each_set_bit(ae, &ae_mask, ICP_QAT_UCLO_MAX_AE) { handle->hal_handle->aes[ae].free_addr = 0; handle->hal_handle->aes[ae].free_size = handle->hal_handle->max_ustore; handle->hal_handle->aes[ae].ustore_size = handle->hal_handle->max_ustore; handle->hal_handle->aes[ae].live_ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX; max_en_ae_id = ae; } handle->hal_handle->ae_max_num = max_en_ae_id + 1; /* take all AEs out of reset */ if (qat_hal_clr_reset(handle)) { device_printf(GET_DEV(accel_dev), "qat_hal_clr_reset error\n"); ret = EIO; goto out_err; } qat_hal_clear_xfer(handle); if (!handle->fw_auth) { if (qat_hal_clear_gpr(handle)) { ret = EIO; goto out_err; } } /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */ for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { unsigned int csr_val = 0; qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val); csr_val |= 0x1; qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val); } accel_dev->fw_loader->fw_loader = handle; return 0; out_err: free(handle->hal_handle, M_QAT); out_hal_handle: free(handle, M_QAT); return ret; } void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle) { if (!handle) return; free(handle->hal_handle, M_QAT); free(handle, M_QAT); } -void -qat_hal_start(struct icp_qat_fw_loader_handle *handle, - unsigned char ae, - unsigned int ctx_mask) +int +qat_hal_start(struct icp_qat_fw_loader_handle *handle) { + unsigned char ae = 0; int retry = 0; unsigned int fcu_sts = 0; unsigned int fcu_ctl_csr, fcu_sts_csr; + unsigned long ae_mask = handle->hal_handle->ae_mask; + u32 ae_ctr = 0; if (handle->fw_auth) { - if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) { + for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) + { + ae_ctr++; + } + if (IS_QAT_GEN3_OR_GEN4( + pci_get_device(GET_DEV(handle->accel_dev)))) { fcu_ctl_csr = FCU_CONTROL_C4XXX; fcu_sts_csr = FCU_STATUS_C4XXX; } else { fcu_ctl_csr = FCU_CONTROL; fcu_sts_csr = FCU_STATUS; } SET_FCU_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_START); do { pause_ms("adfstop", FW_AUTH_WAIT_PERIOD); fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr); if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1)) - return; + return ae_ctr; } while (retry++ < FW_AUTH_MAX_RETRY); pr_err("QAT: start error (AE 0x%x FCU_STS = 0x%x)\n", ae, fcu_sts); + return 0; } else { - qat_hal_put_wakeup_event(handle, - ae, - (~ctx_mask) & ICP_QAT_UCLO_AE_ALL_CTX, - 0x10000); - qat_hal_enable_ctx(handle, ae, ctx_mask); + for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) + { + qat_hal_put_wakeup_event(handle, + ae, + 0, + IS_QAT_GEN4( + pci_get_device(GET_DEV( + handle->accel_dev))) ? + 0x80000000 : + 0x10000); + qat_hal_enable_ctx(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX); + ae_ctr++; + } + return ae_ctr; } } void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask) { if (!handle->fw_auth) qat_hal_disable_ctx(handle, ae, ctx_mask); } void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask, unsigned int upc) { qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, handle->hal_handle->upc_mask & upc); } static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, unsigned int words_num, uint64_t *uword) { unsigned int i, uwrd_lo, uwrd_hi; unsigned int ustore_addr, misc_control; unsigned int scs_flag = 0; qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control); scs_flag = misc_control & (0x1 << MMC_SHARE_CS_BITPOS); /*disable scs*/ qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control & 0xfffffffb); qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); uaddr |= UA_ECS; for (i = 0; i < words_num; i++) { qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); uaddr++; qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo); qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi); uword[i] = uwrd_hi; uword[i] = (uword[i] << 0x20) | uwrd_lo; } if (scs_flag) misc_control |= (0x1 << MMC_SHARE_CS_BITPOS); qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control); qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); } void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, unsigned int words_num, unsigned int *data) { unsigned int i, ustore_addr; qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); uaddr |= UA_ECS; qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); for (i = 0; i < words_num; i++) { unsigned int uwrd_lo, uwrd_hi, tmp; uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) | ((data[i] & 0xff00) << 2) | (0x3 << 8) | (data[i] & 0xff); uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28); uwrd_hi |= (bitcount32(data[i] & 0xffff) & 0x1) << 8; tmp = ((data[i] >> 0x10) & 0xffff); uwrd_hi |= (bitcount32(tmp) & 0x1) << 9; qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo); qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi); } qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); } #define MAX_EXEC_INST 100 static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, uint64_t *micro_inst, unsigned int inst_num, int code_off, unsigned int max_cycle, unsigned int *endpc) { uint64_t savuwords[MAX_EXEC_INST]; unsigned int ind_lm_addr0, ind_lm_addr1; unsigned int ind_lm_addr2, ind_lm_addr3; unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1; unsigned int ind_lm_addr_byte2, ind_lm_addr_byte3; unsigned int ind_t_index, ind_t_index_byte; unsigned int ind_cnt_sig; unsigned int ind_sig, act_sig; unsigned int csr_val = 0, newcsr_val; unsigned int savctx, scs_flag; unsigned int savcc, wakeup_events, savpc; unsigned int ctxarb_ctl, ctx_enables; if (inst_num > handle->hal_handle->max_ustore || !micro_inst) { pr_err("QAT: invalid instruction num %d\n", inst_num); return EINVAL; } /* save current context */ qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0); qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1); qat_hal_rd_indr_csr( handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX, &ind_lm_addr_byte0); qat_hal_rd_indr_csr( handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX, &ind_lm_addr_byte1); - if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) { + if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { qat_hal_rd_indr_csr( handle, ae, ctx, LM_ADDR_2_INDIRECT, &ind_lm_addr2); qat_hal_rd_indr_csr( handle, ae, ctx, LM_ADDR_3_INDIRECT, &ind_lm_addr3); qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_2_BYTE_INDEX, &ind_lm_addr_byte2); qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_3_BYTE_INDEX, &ind_lm_addr_byte3); qat_hal_rd_indr_csr( handle, ae, ctx, INDIRECT_T_INDEX, &ind_t_index); qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_T_INDEX_BYTE_INDEX, &ind_t_index_byte); } qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); scs_flag = csr_val & (1 << MMC_SHARE_CS_BITPOS); newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS); qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val); if (inst_num <= MAX_EXEC_INST) qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords); qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events); qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc); savpc = (savpc & handle->hal_handle->upc_mask) >> 0; qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); ctx_enables &= IGNORE_W1C_MASK; qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc); qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl); qat_hal_rd_indr_csr( handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT, &ind_cnt_sig); qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig); qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig); /* execute micro codes */ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst); qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0); qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO); if (code_off) qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff); qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY); qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0); qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0); qat_hal_enable_ctx(handle, ae, (1 << ctx)); /* wait for micro codes to finish */ if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0) return EFAULT; if (endpc) { unsigned int ctx_status; qat_hal_rd_indr_csr( handle, ae, ctx, CTX_STS_INDIRECT, &ctx_status); *endpc = ctx_status & handle->hal_handle->upc_mask; } /* retore to saved context */ qat_hal_disable_ctx(handle, ae, (1 << ctx)); if (inst_num <= MAX_EXEC_INST) qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords); qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events); qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, handle->hal_handle->upc_mask & savpc); qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); newcsr_val = scs_flag ? SET_BIT(csr_val, MMC_SHARE_CS_BITPOS) : CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS); qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val); qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc); qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO); qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl); qat_hal_wr_indr_csr( handle, ae, (1 << ctx), LM_ADDR_0_INDIRECT, ind_lm_addr0); qat_hal_wr_indr_csr( handle, ae, (1 << ctx), LM_ADDR_1_INDIRECT, ind_lm_addr1); qat_hal_wr_indr_csr(handle, ae, (1 << ctx), INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0); qat_hal_wr_indr_csr(handle, ae, (1 << ctx), INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1); - if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) { + if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { qat_hal_wr_indr_csr( handle, ae, (1 << ctx), LM_ADDR_2_INDIRECT, ind_lm_addr2); qat_hal_wr_indr_csr( handle, ae, (1 << ctx), LM_ADDR_3_INDIRECT, ind_lm_addr3); qat_hal_wr_indr_csr(handle, ae, (1 << ctx), INDIRECT_LM_ADDR_2_BYTE_INDEX, ind_lm_addr_byte2); qat_hal_wr_indr_csr(handle, ae, (1 << ctx), INDIRECT_LM_ADDR_3_BYTE_INDEX, ind_lm_addr_byte3); qat_hal_wr_indr_csr( handle, ae, (1 << ctx), INDIRECT_T_INDEX, ind_t_index); qat_hal_wr_indr_csr(handle, ae, (1 << ctx), INDIRECT_T_INDEX_BYTE_INDEX, ind_t_index_byte); } qat_hal_wr_indr_csr( handle, ae, (1 << ctx), FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig); qat_hal_wr_indr_csr( handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, ind_sig); qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig); qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); return 0; } static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, enum icp_qat_uof_regtype reg_type, unsigned short reg_num, unsigned int *data) { unsigned int savctx, uaddr, uwrd_lo, uwrd_hi; unsigned int ctxarb_cntl, ustore_addr, ctx_enables; unsigned short reg_addr; int status = 0; unsigned int scs_flag = 0; unsigned int csr_val = 0, newcsr_val = 0; u64 insts, savuword; reg_addr = qat_hal_get_reg_addr(reg_type, reg_num); if (reg_addr == BAD_REGADDR) { pr_err("QAT: bad regaddr=0x%x\n", reg_addr); return EINVAL; } switch (reg_type) { case ICP_GPA_REL: insts = 0xA070000000ull | (reg_addr & 0x3ff); break; default: insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10); break; } qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); scs_flag = csr_val & (1 << MMC_SHARE_CS_BITPOS); newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS); qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val); qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl); qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); ctx_enables &= IGNORE_W1C_MASK; if (ctx != (savctx & ACS_ACNO)) qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO); qat_hal_get_uwords(handle, ae, 0, 1, &savuword); qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); uaddr = UA_ECS; qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); insts = qat_hal_set_uword_ecc(insts); uwrd_lo = (unsigned int)(insts & 0xffffffff); uwrd_hi = (unsigned int)(insts >> 0x20); qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo); qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi); qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); /* delay for at least 8 cycles */ qat_hal_wait_cycles(handle, ae, 0x8, 0); /* * read ALU output * the instruction should have been executed * prior to clearing the ECS in putUwords */ qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data); qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); qat_hal_wr_uwords(handle, ae, 0, 1, &savuword); if (ctx != (savctx & ACS_ACNO)) qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO); qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl); qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); newcsr_val = scs_flag ? SET_BIT(csr_val, MMC_SHARE_CS_BITPOS) : CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS); qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val); qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); return status; } static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, enum icp_qat_uof_regtype reg_type, unsigned short reg_num, unsigned int data) { unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo; uint64_t insts[] = { 0x0F440000000ull, 0x0F040000000ull, 0x0F0000C0300ull, 0x0E000010000ull }; const int num_inst = ARRAY_SIZE(insts), code_off = 1; const int imm_w1 = 0, imm_w0 = 1; dest_addr = qat_hal_get_reg_addr(reg_type, reg_num); if (dest_addr == BAD_REGADDR) { pr_err("QAT: bad destAddr=0x%x\n", dest_addr); return EINVAL; } data16lo = 0xffff & data; data16hi = 0xffff & (data >> 0x10); src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)(0xff & data16hi)); src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)(0xff & data16lo)); switch (reg_type) { case ICP_GPA_REL: insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) | ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff); insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) | ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff); break; default: insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) | ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff); insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) | ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff); break; } return qat_hal_exec_micro_inst( handle, ae, ctx, insts, num_inst, code_off, num_inst * 0x5, NULL); } int qat_hal_get_ins_num(void) { return ARRAY_SIZE(inst_4b); } static int qat_hal_concat_micro_code(uint64_t *micro_inst, unsigned int inst_num, unsigned int size, unsigned int addr, unsigned int *value) { int i; unsigned int cur_value; const uint64_t *inst_arr; unsigned int fixup_offset; int usize = 0; unsigned int orig_num; unsigned int delta; orig_num = inst_num; fixup_offset = inst_num; cur_value = value[0]; inst_arr = inst_4b; usize = ARRAY_SIZE(inst_4b); for (i = 0; i < usize; i++) micro_inst[inst_num++] = inst_arr[i]; INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr)); fixup_offset++; INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0); fixup_offset++; INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0)); fixup_offset++; INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10)); delta = inst_num - orig_num; return (int)delta; } static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, int *pfirst_exec, uint64_t *micro_inst, unsigned int inst_num) { int stat = 0; unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0; unsigned int gprb0 = 0, gprb1 = 0; if (*pfirst_exec) { qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0); qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1); qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2); qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0); qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1); *pfirst_exec = 0; } stat = qat_hal_exec_micro_inst( handle, ae, ctx, micro_inst, inst_num, 1, inst_num * 0x5, NULL); if (stat != 0) return EFAULT; qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0); qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1); qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2); qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0); qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1); return 0; } int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle, unsigned char ae, struct icp_qat_uof_batch_init *lm_init_header) { struct icp_qat_uof_batch_init *plm_init; uint64_t *micro_inst_arry; int micro_inst_num; int alloc_inst_size; int first_exec = 1; int stat = 0; if (!lm_init_header) return 0; plm_init = lm_init_header->next; alloc_inst_size = lm_init_header->size; if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore) alloc_inst_size = handle->hal_handle->max_ustore; micro_inst_arry = malloc(alloc_inst_size * sizeof(uint64_t), M_QAT, M_WAITOK | M_ZERO); micro_inst_num = 0; while (plm_init) { unsigned int addr, *value, size; ae = plm_init->ae; addr = plm_init->addr; value = plm_init->value; size = plm_init->size; micro_inst_num += qat_hal_concat_micro_code( micro_inst_arry, micro_inst_num, size, addr, value); plm_init = plm_init->next; } /* exec micro codes */ if (micro_inst_arry && micro_inst_num > 0) { micro_inst_arry[micro_inst_num++] = 0x0E000010000ull; stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec, micro_inst_arry, micro_inst_num); } free(micro_inst_arry, M_QAT); return stat; } static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, enum icp_qat_uof_regtype reg_type, unsigned short reg_num, unsigned int val) { int status = 0; unsigned int reg_addr; unsigned int ctx_enables; unsigned short mask; unsigned short dr_offset = 0x10; status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); if (CE_INUSE_CONTEXTS & ctx_enables) { if (ctx & 0x1) { pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx); return EINVAL; } mask = 0x1f; dr_offset = 0x20; } else { mask = 0x0f; } if (reg_num & ~mask) return EINVAL; reg_addr = reg_num + (ctx << 0x5); switch (reg_type) { case ICP_SR_RD_REL: case ICP_SR_REL: SET_AE_XFER(handle, ae, reg_addr, val); break; case ICP_DR_RD_REL: case ICP_DR_REL: SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val); break; default: status = EINVAL; break; } return status; } static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, enum icp_qat_uof_regtype reg_type, unsigned short reg_num, unsigned int data) { unsigned int gprval, ctx_enables; unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi, data16low; unsigned short reg_mask; int status = 0; uint64_t micro_inst[] = { 0x0F440000000ull, 0x0F040000000ull, 0x0A000000000ull, 0x0F0000C0300ull, 0x0E000010000ull }; const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1; const unsigned short gprnum = 0, dly = num_inst * 0x5; qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); if (CE_INUSE_CONTEXTS & ctx_enables) { if (ctx & 0x1) { pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx); return EINVAL; } reg_mask = (unsigned short)~0x1f; } else { reg_mask = (unsigned short)~0xf; } if (reg_num & reg_mask) return EINVAL; xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num); if (xfr_addr == BAD_REGADDR) { pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr); return EINVAL; } qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval); gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum); data16low = 0xffff & data; data16hi = 0xffff & (data >> 0x10); src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)(0xff & data16hi)); src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)(0xff & data16low)); micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) | ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff); micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) | ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff); micro_inst[0x2] = micro_inst[0x2] | ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10); status = qat_hal_exec_micro_inst( handle, ae, ctx, micro_inst, num_inst, code_off, dly, NULL); qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval); return status; } static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, unsigned short nn, unsigned int val) { unsigned int ctx_enables; int stat = 0; qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); ctx_enables &= IGNORE_W1C_MASK; qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE); stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val); qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); return stat; } static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned short absreg_num, unsigned short *relreg, unsigned char *ctx) { unsigned int ctx_enables; qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); if (ctx_enables & CE_INUSE_CONTEXTS) { /* 4-ctx mode */ *relreg = absreg_num & 0x1F; *ctx = (absreg_num >> 0x4) & 0x6; } else { /* 8-ctx mode */ *relreg = absreg_num & 0x0F; *ctx = (absreg_num >> 0x4) & 0x7; } return 0; } int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned long ctx_mask, enum icp_qat_uof_regtype reg_type, unsigned short reg_num, unsigned int regdata) { int stat = 0; unsigned short reg; unsigned char ctx = 0; enum icp_qat_uof_regtype type; if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG) return EINVAL; do { if (ctx_mask == 0) { qat_hal_convert_abs_to_rel( handle, ae, reg_num, ®, &ctx); type = reg_type - 1; } else { reg = reg_num; type = reg_type; if (!test_bit(ctx, &ctx_mask)) continue; } stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata); if (stat) { pr_err("QAT: write gpr fail\n"); return EINVAL; } } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX)); return 0; } int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned long ctx_mask, enum icp_qat_uof_regtype reg_type, unsigned short reg_num, unsigned int regdata) { int stat = 0; unsigned short reg; unsigned char ctx = 0; enum icp_qat_uof_regtype type; if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG) return EINVAL; do { if (ctx_mask == 0) { qat_hal_convert_abs_to_rel( handle, ae, reg_num, ®, &ctx); type = reg_type - 3; } else { reg = reg_num; type = reg_type; if (!test_bit(ctx, &ctx_mask)) continue; } stat = qat_hal_put_rel_wr_xfer( handle, ae, ctx, type, reg, regdata); if (stat) { pr_err("QAT: write wr xfer fail\n"); return EINVAL; } } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX)); return 0; } int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned long ctx_mask, enum icp_qat_uof_regtype reg_type, unsigned short reg_num, unsigned int regdata) { int stat = 0; unsigned short reg; unsigned char ctx = 0; enum icp_qat_uof_regtype type; if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG) return EINVAL; do { if (ctx_mask == 0) { qat_hal_convert_abs_to_rel( handle, ae, reg_num, ®, &ctx); type = reg_type - 3; } else { reg = reg_num; type = reg_type; if (!test_bit(ctx, &ctx_mask)) continue; } stat = qat_hal_put_rel_rd_xfer( handle, ae, ctx, type, reg, regdata); if (stat) { pr_err("QAT: write rd xfer fail\n"); return EINVAL; } } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX)); return 0; } int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned long ctx_mask, unsigned short reg_num, unsigned int regdata) { int stat = 0; unsigned char ctx; + if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { + pr_err("QAT: No next neigh for CPM2X\n"); + return EINVAL; + } + if (ctx_mask == 0) return EINVAL; for_each_set_bit(ctx, &ctx_mask, ICP_QAT_UCLO_MAX_CTX) { stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata); if (stat) { pr_err("QAT: write neigh error\n"); return EINVAL; } } return 0; } diff --git a/sys/dev/qat/qat_common/qat_uclo.c b/sys/dev/qat/qat_common/qat_uclo.c index 4232fb0ad095..12b5dafbf73f 100644 --- a/sys/dev/qat/qat_common/qat_uclo.c +++ b/sys/dev/qat/qat_common/qat_uclo.c @@ -1,2188 +1,2414 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include #include #include #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "icp_qat_uclo.h" #include "icp_qat_hal.h" #include "icp_qat_fw_loader_handle.h" #define UWORD_CPYBUF_SIZE 1024 #define INVLD_UWORD 0xffffffffffull #define PID_MINOR_REV 0xf #define PID_MAJOR_REV (0xf << 4) #define MAX_UINT32_VAL 0xfffffffful static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle, unsigned int ae, unsigned int image_num) { struct icp_qat_uclo_aedata *ae_data; struct icp_qat_uclo_encapme *encap_image; struct icp_qat_uclo_page *page = NULL; struct icp_qat_uclo_aeslice *ae_slice = NULL; ae_data = &obj_handle->ae_data[ae]; encap_image = &obj_handle->ae_uimage[image_num]; ae_slice = &ae_data->ae_slices[ae_data->slice_num]; ae_slice->encap_image = encap_image; if (encap_image->img_ptr) { ae_slice->ctx_mask_assigned = encap_image->img_ptr->ctx_assigned; ae_data->shareable_ustore = ICP_QAT_SHARED_USTORE_MODE(encap_image->img_ptr->ae_mode); - ae_data->eff_ustore_size = ae_data->shareable_ustore ? - (obj_handle->ustore_phy_size << 1) : - obj_handle->ustore_phy_size; + if (obj_handle->prod_type == ICP_QAT_AC_4XXX_A_DEV_TYPE) + ae_data->eff_ustore_size = obj_handle->ustore_phy_size; + else { + ae_data->eff_ustore_size = ae_data->shareable_ustore ? + (obj_handle->ustore_phy_size << 1) : + obj_handle->ustore_phy_size; + } } else { ae_slice->ctx_mask_assigned = 0; } ae_slice->region = malloc(sizeof(*ae_slice->region), M_QAT, M_WAITOK | M_ZERO); ae_slice->page = malloc(sizeof(*ae_slice->page), M_QAT, M_WAITOK | M_ZERO); page = ae_slice->page; page->encap_page = encap_image->page; ae_slice->page->region = ae_slice->region; ae_data->slice_num++; return 0; } static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data) { unsigned int i; if (!ae_data) { pr_err("QAT: bad argument, ae_data is NULL\n "); return EINVAL; } for (i = 0; i < ae_data->slice_num; i++) { free(ae_data->ae_slices[i].region, M_QAT); ae_data->ae_slices[i].region = NULL; free(ae_data->ae_slices[i].page, M_QAT); ae_data->ae_slices[i].page = NULL; } return 0; } static char * qat_uclo_get_string(struct icp_qat_uof_strtable *str_table, unsigned int str_offset) { if (!str_table->table_len || str_offset > str_table->table_len) return NULL; return (char *)(((uintptr_t)(str_table->strings)) + str_offset); } static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr) { int maj = hdr->maj_ver & 0xff; int min = hdr->min_ver & 0xff; if (hdr->file_id != ICP_QAT_UOF_FID) { pr_err("QAT: Invalid header 0x%x\n", hdr->file_id); return EINVAL; } if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) { pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n", maj, min); return EINVAL; } return 0; } static int qat_uclo_check_suof_format(const struct icp_qat_suof_filehdr *suof_hdr) { int maj = suof_hdr->maj_ver & 0xff; int min = suof_hdr->min_ver & 0xff; if (suof_hdr->file_id != ICP_QAT_SUOF_FID) { pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id); return EINVAL; } if (suof_hdr->fw_type != 0) { pr_err("QAT: unsupported firmware type\n"); return EINVAL; } if (suof_hdr->num_chunks <= 0x1) { pr_err("QAT: SUOF chunk amount is incorrect\n"); return EINVAL; } if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) { pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n", maj, min); return EINVAL; } return 0; } static int qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle, unsigned int addr, const unsigned int *val, unsigned int num_in_bytes) { unsigned int outval; const unsigned char *ptr = (const unsigned char *)val; if (num_in_bytes > handle->hal_sram_size) { pr_err("QAT: error, mmp size overflow %d\n", num_in_bytes); return EINVAL; } while (num_in_bytes) { memcpy(&outval, ptr, 4); SRAM_WRITE(handle, addr, outval); num_in_bytes -= 4; ptr += 4; addr += 4; } return 0; } static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int addr, unsigned int *val, unsigned int num_in_bytes) { unsigned int outval; unsigned char *ptr = (unsigned char *)val; addr >>= 0x2; /* convert to uword address */ while (num_in_bytes) { memcpy(&outval, ptr, 4); qat_hal_wr_umem(handle, ae, addr++, 1, &outval); num_in_bytes -= 4; ptr += 4; } } static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae, struct icp_qat_uof_batch_init *umem_init_header) { struct icp_qat_uof_batch_init *umem_init; if (!umem_init_header) return; umem_init = umem_init_header->next; while (umem_init) { unsigned int addr, *value, size; ae = umem_init->ae; addr = umem_init->addr; value = umem_init->value; size = umem_init->size; qat_uclo_wr_umem_by_words(handle, ae, addr, value, size); umem_init = umem_init->next; } } static void qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uof_batch_init **base) { struct icp_qat_uof_batch_init *umem_init; umem_init = *base; while (umem_init) { struct icp_qat_uof_batch_init *pre; pre = umem_init; umem_init = umem_init->next; free(pre, M_QAT); } *base = NULL; } static int qat_uclo_parse_num(char *str, unsigned int *num) { char buf[16] = { 0 }; unsigned long ae = 0; int i; strncpy(buf, str, 15); for (i = 0; i < 16; i++) { if (!isdigit(buf[i])) { buf[i] = '\0'; break; } } if ((compat_strtoul(buf, 10, &ae))) return EFAULT; if (ae > MAX_UINT32_VAL) return EFAULT; *num = (unsigned int)ae; return 0; } static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uof_initmem *init_mem, unsigned int size_range, unsigned int *ae) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; char *str; if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) { pr_err("QAT: initmem is out of range"); return EINVAL; } if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) { pr_err("QAT: Memory scope for init_mem error\n"); return EINVAL; } str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name); if (!str) { pr_err("QAT: AE name assigned in UOF init table is NULL\n"); return EINVAL; } if (qat_uclo_parse_num(str, ae)) { pr_err("QAT: Parse num for AE number failed\n"); return EINVAL; } if (*ae >= ICP_QAT_UCLO_MAX_AE) { pr_err("QAT: ae %d out of range\n", *ae); return EINVAL; } return 0; } static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uof_initmem *init_mem, unsigned int ae, struct icp_qat_uof_batch_init **init_tab_base) { struct icp_qat_uof_batch_init *init_header, *tail; struct icp_qat_uof_batch_init *mem_init, *tail_old; struct icp_qat_uof_memvar_attr *mem_val_attr; unsigned int i = 0; mem_val_attr = (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem + sizeof( struct icp_qat_uof_initmem)); init_header = *init_tab_base; if (!init_header) { init_header = malloc(sizeof(*init_header), M_QAT, M_WAITOK | M_ZERO); init_header->size = 1; *init_tab_base = init_header; } tail_old = init_header; while (tail_old->next) tail_old = tail_old->next; tail = tail_old; for (i = 0; i < init_mem->val_attr_num; i++) { mem_init = malloc(sizeof(*mem_init), M_QAT, M_WAITOK | M_ZERO); mem_init->ae = ae; mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte; mem_init->value = &mem_val_attr->value; mem_init->size = 4; mem_init->next = NULL; tail->next = mem_init; tail = mem_init; init_header->size += qat_hal_get_ins_num(); mem_val_attr++; } return 0; } static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uof_initmem *init_mem) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int ae; + unsigned int lmem; + + lmem = IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev))) ? + ICP_QAT_UCLO_MAX_LMEM_REG_2X : + ICP_QAT_UCLO_MAX_LMEM_REG; - if (qat_uclo_fetch_initmem_ae( - handle, init_mem, ICP_QAT_UCLO_MAX_LMEM_REG, &ae)) + if (qat_uclo_fetch_initmem_ae(handle, init_mem, lmem, &ae)) return EINVAL; if (qat_uclo_create_batch_init_list( handle, init_mem, ae, &obj_handle->lm_init_tab[ae])) return EINVAL; return 0; } static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uof_initmem *init_mem) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int ae, ustore_size, uaddr, i; struct icp_qat_uclo_aedata *aed; ustore_size = obj_handle->ustore_phy_size; if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae)) return EINVAL; if (qat_uclo_create_batch_init_list( handle, init_mem, ae, &obj_handle->umem_init_tab[ae])) return EINVAL; /* set the highest ustore address referenced */ uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2; aed = &obj_handle->ae_data[ae]; for (i = 0; i < aed->slice_num; i++) { if (aed->ae_slices[i].encap_image->uwords_num < uaddr) aed->ae_slices[i].encap_image->uwords_num = uaddr; } return 0; } #define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uof_initmem *init_mem) { switch (init_mem->region) { case ICP_QAT_UOF_LMEM_REGION: if (qat_uclo_init_lmem_seg(handle, init_mem)) return EINVAL; break; case ICP_QAT_UOF_UMEM_REGION: if (qat_uclo_init_umem_seg(handle, init_mem)) return EINVAL; break; default: pr_err("QAT: initmem region error. region type=0x%x\n", init_mem->region); return EINVAL; } return 0; } static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uclo_encapme *image) { unsigned int i; struct icp_qat_uclo_encap_page *page; struct icp_qat_uof_image *uof_image; unsigned char ae = 0; unsigned char neigh_ae; unsigned int ustore_size; unsigned int patt_pos; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; uint64_t *fill_data; static unsigned int init[32] = { 0 }; unsigned long ae_mask = handle->hal_handle->ae_mask; uof_image = image->img_ptr; /*if shared CS mode, the ustore size should be 2*ustore_phy_size*/ fill_data = malloc(obj_handle->ustore_phy_size * 2 * sizeof(uint64_t), M_QAT, M_WAITOK | M_ZERO); for (i = 0; i < obj_handle->ustore_phy_size * 2; i++) memcpy(&fill_data[i], &uof_image->fill_pattern, sizeof(uint64_t)); page = image->page; for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { unsigned long cfg_ae_mask = handle->cfg_ae_mask; unsigned long ae_assigned = uof_image->ae_assigned; + const bool gen4 = + IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev))); if (!test_bit(ae, &cfg_ae_mask)) continue; if (!test_bit(ae, &ae_assigned)) continue; - if (obj_handle->ae_data[ae].shareable_ustore && (ae & 1)) { + if (obj_handle->ae_data[ae].shareable_ustore && (ae & 1) && + !gen4) { qat_hal_get_scs_neigh_ae(ae, &neigh_ae); if (test_bit(neigh_ae, &ae_assigned)) continue; } ustore_size = obj_handle->ae_data[ae].eff_ustore_size; patt_pos = page->beg_addr_p + page->micro_words_num; - if (obj_handle->ae_data[ae].shareable_ustore) { + if (obj_handle->ae_data[ae].shareable_ustore && !gen4) { qat_hal_get_scs_neigh_ae(ae, &neigh_ae); if (init[ae] == 0 && page->beg_addr_p != 0) { qat_hal_wr_coalesce_uwords(handle, (unsigned char)ae, 0, page->beg_addr_p, &fill_data[0]); } qat_hal_wr_coalesce_uwords( handle, (unsigned char)ae, patt_pos, ustore_size - patt_pos, &fill_data[page->beg_addr_p]); init[ae] = 1; init[neigh_ae] = 1; } else { + if (gen4 && (ae % 4 != 0)) + continue; + qat_hal_wr_uwords(handle, (unsigned char)ae, 0, page->beg_addr_p, &fill_data[0]); qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos, ustore_size - patt_pos + 1, &fill_data[page->beg_addr_p]); } } free(fill_data, M_QAT); return 0; } static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) { int i; int ae = 0; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem; unsigned long ae_mask = handle->hal_handle->ae_mask; for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) { if (initmem->num_in_bytes) { if (qat_uclo_init_ae_memory(handle, initmem)) return EINVAL; } initmem = (struct icp_qat_uof_initmem *)((uintptr_t)((uintptr_t)initmem + sizeof(struct icp_qat_uof_initmem)) + (sizeof(struct icp_qat_uof_memvar_attr) * initmem->val_attr_num)); } for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { if (qat_hal_batch_wr_lm(handle, ae, obj_handle->lm_init_tab[ae])) { pr_err("QAT: fail to batch init lmem for AE %d\n", ae); return EINVAL; } qat_uclo_cleanup_batch_init_list(handle, &obj_handle->lm_init_tab[ae]); qat_uclo_batch_wr_umem(handle, ae, obj_handle->umem_init_tab[ae]); qat_uclo_cleanup_batch_init_list( handle, &obj_handle->umem_init_tab[ae]); } return 0; } static void * qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr, char *chunk_id, void *cur) { int i; struct icp_qat_uof_chunkhdr *chunk_hdr = (struct icp_qat_uof_chunkhdr *)((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr)); for (i = 0; i < obj_hdr->num_chunks; i++) { if ((cur < (void *)&chunk_hdr[i]) && !strncmp(chunk_hdr[i].chunk_id, chunk_id, ICP_QAT_UOF_OBJID_LEN)) { return &chunk_hdr[i]; } } return NULL; } static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch) { int i; unsigned int topbit = 1 << 0xF; unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch); reg ^= inbyte << 0x8; for (i = 0; i < 0x8; i++) { if (reg & topbit) reg = (reg << 1) ^ 0x1021; else reg <<= 1; } return reg & 0xFFFF; } static unsigned int qat_uclo_calc_str_checksum(const char *ptr, int num) { unsigned int chksum = 0; if (ptr) while (num--) chksum = qat_uclo_calc_checksum(chksum, *ptr++); return chksum; } static struct icp_qat_uclo_objhdr * qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr, char *chunk_id) { struct icp_qat_uof_filechunkhdr *file_chunk; struct icp_qat_uclo_objhdr *obj_hdr; char *chunk; int i; file_chunk = (struct icp_qat_uof_filechunkhdr *)(buf + sizeof(struct icp_qat_uof_filehdr)); for (i = 0; i < file_hdr->num_chunks; i++) { if (!strncmp(file_chunk->chunk_id, chunk_id, ICP_QAT_UOF_OBJID_LEN)) { chunk = buf + file_chunk->offset; if (file_chunk->checksum != qat_uclo_calc_str_checksum(chunk, file_chunk->size)) break; obj_hdr = malloc(sizeof(*obj_hdr), M_QAT, M_WAITOK | M_ZERO); obj_hdr->file_buff = chunk; obj_hdr->checksum = file_chunk->checksum; obj_hdr->size = file_chunk->size; return obj_hdr; } file_chunk++; } return NULL; } static unsigned int qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj, struct icp_qat_uof_image *image) { struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab; struct icp_qat_uof_objtable *neigh_reg_tab; struct icp_qat_uof_code_page *code_page; code_page = (struct icp_qat_uof_code_page *)((char *)image + sizeof(struct icp_qat_uof_image)); uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + code_page->uc_var_tab_offset); imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + code_page->imp_var_tab_offset); imp_expr_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + code_page->imp_expr_tab_offset); if (uc_var_tab->entry_num || imp_var_tab->entry_num || imp_expr_tab->entry_num) { pr_err("QAT: UOF can't contain imported variable to be parsed"); return EINVAL; } neigh_reg_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + code_page->neigh_reg_tab_offset); if (neigh_reg_tab->entry_num) { pr_err("QAT: UOF can't contain neighbor register table\n"); return EINVAL; } if (image->numpages > 1) { pr_err("QAT: UOF can't contain multiple pages\n"); return EINVAL; } if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) { pr_err("QAT: UOF can't use reloadable feature\n"); return EFAULT; } return 0; } static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj *encap_uof_obj, struct icp_qat_uof_image *img, struct icp_qat_uclo_encap_page *page) { struct icp_qat_uof_code_page *code_page; struct icp_qat_uof_code_area *code_area; struct icp_qat_uof_objtable *uword_block_tab; struct icp_qat_uof_uword_block *uwblock; int i; code_page = (struct icp_qat_uof_code_page *)((char *)img + sizeof(struct icp_qat_uof_image)); page->def_page = code_page->def_page; page->page_region = code_page->page_region; page->beg_addr_v = code_page->beg_addr_v; page->beg_addr_p = code_page->beg_addr_p; code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof + code_page->code_area_offset); page->micro_words_num = code_area->micro_words_num; uword_block_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + code_area->uword_block_tab); page->uwblock_num = uword_block_tab->entry_num; uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab + sizeof(struct icp_qat_uof_objtable)); page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock; for (i = 0; i < uword_block_tab->entry_num; i++) page->uwblock[i].micro_words = (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset; } static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, struct icp_qat_uclo_encapme *ae_uimage, int max_image) { int i, j; struct icp_qat_uof_chunkhdr *chunk_hdr = NULL; struct icp_qat_uof_image *image; struct icp_qat_uof_objtable *ae_regtab; struct icp_qat_uof_objtable *init_reg_sym_tab; struct icp_qat_uof_objtable *sbreak_tab; struct icp_qat_uof_encap_obj *encap_uof_obj = &obj_handle->encap_uof_obj; for (j = 0; j < max_image; j++) { chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, ICP_QAT_UOF_IMAG, chunk_hdr); if (!chunk_hdr) break; image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof + chunk_hdr->offset); ae_regtab = (struct icp_qat_uof_objtable *)(image->reg_tab_offset + obj_handle->obj_hdr ->file_buff); ae_uimage[j].ae_reg_num = ae_regtab->entry_num; ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)(((char *)ae_regtab) + sizeof(struct icp_qat_uof_objtable)); init_reg_sym_tab = (struct icp_qat_uof_objtable *)(image->init_reg_sym_tab + obj_handle->obj_hdr ->file_buff); ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num; ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)(((char *)init_reg_sym_tab) + sizeof(struct icp_qat_uof_objtable)); sbreak_tab = (struct icp_qat_uof_objtable *)(image->sbreak_tab + obj_handle->obj_hdr ->file_buff); ae_uimage[j].sbreak_num = sbreak_tab->entry_num; ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)(((char *)sbreak_tab) + sizeof(struct icp_qat_uof_objtable)); ae_uimage[j].img_ptr = image; if (qat_uclo_check_image_compat(encap_uof_obj, image)) goto out_err; ae_uimage[j].page = malloc(sizeof(struct icp_qat_uclo_encap_page), M_QAT, M_WAITOK | M_ZERO); qat_uclo_map_image_page(encap_uof_obj, image, ae_uimage[j].page); } return j; out_err: for (i = 0; i < j; i++) free(ae_uimage[i].page, M_QAT); return 0; } +static int +UcLo_checkTGroupList2X(struct icp_qat_fw_loader_handle *handle) +{ + int i; + unsigned int swAe = 0; + unsigned int ii, jj; + struct icp_qat_uclo_aedata *ae_data0, *ae_datax; + struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; + + for (i = 0; i < obj_handle->uimage_num; i++) { + struct icp_qat_uof_image *image = + obj_handle->ae_uimage[i].img_ptr; + if (image->numpages > 1) { + pr_err( + "Only 1 page is allowed in a UOF for CPM2X; We found %d in %s\n", + image->numpages, + qat_uclo_get_string(&obj_handle->str_table, + image->img_name)); + return EINVAL; + } + } + + for (swAe = 0; + (swAe < obj_handle->ae_num) && (swAe < ICP_QAT_UCLO_MAX_AE); + swAe += AE_TG_NUM_CPM2X) { + if (!qat_hal_check_ae_active(handle, swAe)) { + continue; + } + + for (ii = swAe; ii < (swAe + AE_TG_NUM_CPM2X); ii++) { + ae_data0 = &obj_handle->ae_data[ii]; + if (ae_data0->slice_num != 1) // not assigned + continue; + + for (jj = ii + 1; jj < (swAe + AE_TG_NUM_CPM2X); jj++) { + ae_datax = &obj_handle->ae_data[jj]; + if (ae_datax->slice_num != 1) // not assigned + continue; + if (ae_data0->ae_slices[0] + .encap_image->img_ptr != + ae_datax->ae_slices[0] + .encap_image->img_ptr) { + pr_err("Only 1 list is allowed in a "); + pr_err("Tgroup for CPM2X;\n"); + pr_err("ME%d, %d is assigned", ii, jj); + pr_err(" different list files\n"); + return EINVAL; + } + } + } + } + + return 0; +} + static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae) { int i; int ae = 0; unsigned long ae_mask = handle->hal_handle->ae_mask; unsigned long cfg_ae_mask = handle->cfg_ae_mask; int mflag = 0; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; for_each_set_bit(ae, &ae_mask, max_ae) { if (!test_bit(ae, &cfg_ae_mask)) continue; for (i = 0; i < obj_handle->uimage_num; i++) { unsigned long ae_assigned = obj_handle->ae_uimage[i].img_ptr->ae_assigned; if (!test_bit(ae, &ae_assigned)) continue; mflag = 1; if (qat_uclo_init_ae_data(obj_handle, ae, i)) return EINVAL; } } + if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { + if (UcLo_checkTGroupList2X(handle)) { + return EINVAL; + } + } if (!mflag) { pr_err("QAT: uimage uses AE not set"); return EINVAL; } return 0; } static struct icp_qat_uof_strtable * qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr, char *tab_name, struct icp_qat_uof_strtable *str_table) { struct icp_qat_uof_chunkhdr *chunk_hdr; chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)obj_hdr->file_buff, tab_name, NULL); if (chunk_hdr) { int hdr_size; memcpy(&str_table->table_len, obj_hdr->file_buff + chunk_hdr->offset, sizeof(str_table->table_len)); hdr_size = (char *)&str_table->strings - (char *)str_table; str_table->strings = (uintptr_t)obj_hdr->file_buff + chunk_hdr->offset + hdr_size; return str_table; } return NULL; } static void qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj, struct icp_qat_uclo_init_mem_table *init_mem_tab) { struct icp_qat_uof_chunkhdr *chunk_hdr; chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, ICP_QAT_UOF_IMEM, NULL); if (chunk_hdr) { memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof + chunk_hdr->offset, sizeof(unsigned int)); init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)(encap_uof_obj->beg_uof + chunk_hdr->offset + sizeof(unsigned int)); } } static unsigned int qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle) { switch (pci_get_device(GET_DEV(handle->accel_dev))) { case ADF_DH895XCC_PCI_DEVICE_ID: return ICP_QAT_AC_895XCC_DEV_TYPE; case ADF_C62X_PCI_DEVICE_ID: return ICP_QAT_AC_C62X_DEV_TYPE; case ADF_C3XXX_PCI_DEVICE_ID: return ICP_QAT_AC_C3XXX_DEV_TYPE; case ADF_200XX_PCI_DEVICE_ID: return ICP_QAT_AC_200XX_DEV_TYPE; case ADF_C4XXX_PCI_DEVICE_ID: return ICP_QAT_AC_C4XXX_DEV_TYPE; + case ADF_4XXX_PCI_DEVICE_ID: + case ADF_401XX_PCI_DEVICE_ID: + return ICP_QAT_AC_4XXX_A_DEV_TYPE; default: pr_err("QAT: unsupported device 0x%x\n", pci_get_device(GET_DEV(handle->accel_dev))); return 0; } } static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle) { unsigned int maj_ver, prod_type = obj_handle->prod_type; if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) { pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n", obj_handle->encap_uof_obj.obj_hdr->ac_dev_type, prod_type); return EINVAL; } maj_ver = obj_handle->prod_rev & 0xff; if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver || obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) { pr_err("QAT: UOF maj_ver 0x%x out of range\n", maj_ver); return EINVAL; } return 0; } static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx_mask, enum icp_qat_uof_regtype reg_type, unsigned short reg_addr, unsigned int value) { switch (reg_type) { case ICP_GPA_ABS: case ICP_GPB_ABS: ctx_mask = 0; return qat_hal_init_gpr( handle, ae, ctx_mask, reg_type, reg_addr, value); case ICP_GPA_REL: case ICP_GPB_REL: return qat_hal_init_gpr( handle, ae, ctx_mask, reg_type, reg_addr, value); case ICP_SR_ABS: case ICP_DR_ABS: case ICP_SR_RD_ABS: case ICP_DR_RD_ABS: ctx_mask = 0; return qat_hal_init_rd_xfer( handle, ae, ctx_mask, reg_type, reg_addr, value); case ICP_SR_REL: case ICP_DR_REL: case ICP_SR_RD_REL: case ICP_DR_RD_REL: return qat_hal_init_rd_xfer( handle, ae, ctx_mask, reg_type, reg_addr, value); case ICP_SR_WR_ABS: case ICP_DR_WR_ABS: ctx_mask = 0; return qat_hal_init_wr_xfer( handle, ae, ctx_mask, reg_type, reg_addr, value); case ICP_SR_WR_REL: case ICP_DR_WR_REL: return qat_hal_init_wr_xfer( handle, ae, ctx_mask, reg_type, reg_addr, value); case ICP_NEIGH_REL: return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value); default: pr_err("QAT: UOF uses unsupported reg type 0x%x\n", reg_type); return EFAULT; } return 0; } static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle, unsigned int ae, struct icp_qat_uclo_encapme *encap_ae) { unsigned int i; unsigned char ctx_mask; struct icp_qat_uof_init_regsym *init_regsym; if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) == ICP_QAT_UCLO_MAX_CTX) ctx_mask = 0xff; else ctx_mask = 0x55; for (i = 0; i < encap_ae->init_regsym_num; i++) { unsigned int exp_res; init_regsym = &encap_ae->init_regsym[i]; exp_res = init_regsym->value; switch (init_regsym->init_type) { case ICP_QAT_UOF_INIT_REG: qat_uclo_init_reg(handle, ae, ctx_mask, (enum icp_qat_uof_regtype) init_regsym->reg_type, (unsigned short)init_regsym->reg_addr, exp_res); break; case ICP_QAT_UOF_INIT_REG_CTX: /* check if ctx is appropriate for the ctxMode */ if (!((1 << init_regsym->ctx) & ctx_mask)) { pr_err("QAT: invalid ctx num = 0x%x\n", init_regsym->ctx); return EINVAL; } qat_uclo_init_reg( handle, ae, (unsigned char)(1 << init_regsym->ctx), (enum icp_qat_uof_regtype)init_regsym->reg_type, (unsigned short)init_regsym->reg_addr, exp_res); break; case ICP_QAT_UOF_INIT_EXPR: pr_err("QAT: INIT_EXPR feature not supported\n"); return EINVAL; case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP: pr_err("QAT: INIT_EXPR_ENDIAN_SWAP not supported\n"); return EINVAL; default: break; } } return 0; } static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int s; unsigned int ae = 0; struct icp_qat_uclo_aedata *aed; unsigned long ae_mask = handle->hal_handle->ae_mask; if (obj_handle->global_inited) return 0; if (obj_handle->init_mem_tab.entry_num) { if (qat_uclo_init_memory(handle)) { pr_err("QAT: initialize memory failed\n"); return EINVAL; } } for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { aed = &obj_handle->ae_data[ae]; for (s = 0; s < aed->slice_num; s++) { if (!aed->ae_slices[s].encap_image) continue; if (qat_uclo_init_reg_sym( handle, ae, aed->ae_slices[s].encap_image)) return EINVAL; } } obj_handle->global_inited = 1; return 0; } static int qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uclo_objhandle *obj_handle, unsigned char ae, struct icp_qat_uof_image *uof_image) { unsigned char nn_mode; char ae_mode = 0; ae_mode = (char)ICP_QAT_CTX_MODE(uof_image->ae_mode); if (qat_hal_set_ae_ctx_mode(handle, ae, ae_mode)) { pr_err("QAT: qat_hal_set_ae_ctx_mode error\n"); return EFAULT; } ae_mode = (char)ICP_QAT_SHARED_USTORE_MODE(uof_image->ae_mode); qat_hal_set_ae_scs_mode(handle, ae, ae_mode); - nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode); + if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { + nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode); - if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) { - pr_err("QAT: qat_hal_set_ae_nn_mode error\n"); - return EFAULT; + if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) { + pr_err("QAT: qat_hal_set_ae_nn_mode error\n"); + return EFAULT; + } } ae_mode = (char)ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode); if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, ae_mode)) { pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n"); return EFAULT; } ae_mode = (char)ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode); if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, ae_mode)) { pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n"); return EFAULT; } - if (obj_handle->prod_type == ICP_QAT_AC_C4XXX_DEV_TYPE) { + if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { ae_mode = (char)ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode); if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, ae_mode)) { pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n"); return EFAULT; } ae_mode = (char)ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode); if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, ae_mode)) { pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n"); return EFAULT; } ae_mode = (char)ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode); qat_hal_set_ae_tindex_mode(handle, ae, ae_mode); } return 0; } static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle) { int error; unsigned char s; unsigned char ae = 0; struct icp_qat_uof_image *uof_image; struct icp_qat_uclo_aedata *ae_data; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned long ae_mask = handle->hal_handle->ae_mask; for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { unsigned long cfg_ae_mask = handle->cfg_ae_mask; if (!test_bit(ae, &cfg_ae_mask)) continue; ae_data = &obj_handle->ae_data[ae]; for (s = 0; s < min_t(unsigned int, ae_data->slice_num, ICP_QAT_UCLO_MAX_CTX); s++) { if (!obj_handle->ae_data[ae].ae_slices[s].encap_image) continue; uof_image = ae_data->ae_slices[s].encap_image->img_ptr; error = qat_hal_set_modes(handle, obj_handle, ae, uof_image); if (error) return error; } } return 0; } static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; struct icp_qat_uclo_encapme *image; int a; for (a = 0; a < obj_handle->uimage_num; a++) { image = &obj_handle->ae_uimage[a]; image->uwords_num = image->page->beg_addr_p + image->page->micro_words_num; } } static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int ae; obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff; obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)obj_handle->obj_hdr->file_buff; obj_handle->uword_in_bytes = 6; obj_handle->prod_type = qat_uclo_get_dev_type(handle); obj_handle->prod_rev = PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id); if (qat_uclo_check_uof_compat(obj_handle)) { pr_err("QAT: UOF incompatible\n"); return EINVAL; } obj_handle->uword_buf = malloc(UWORD_CPYBUF_SIZE * sizeof(uint64_t), M_QAT, M_WAITOK | M_ZERO); obj_handle->ustore_phy_size = (obj_handle->prod_type == ICP_QAT_AC_C4XXX_DEV_TYPE) ? 0x2000 : 0x4000; if (!obj_handle->obj_hdr->file_buff || !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT, &obj_handle->str_table)) { pr_err("QAT: UOF doesn't have effective images\n"); goto out_err; } obj_handle->uimage_num = qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage, ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX); if (!obj_handle->uimage_num) goto out_err; if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) { pr_err("QAT: Bad object\n"); goto out_check_uof_aemask_err; } qat_uclo_init_uword_num(handle); qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj, &obj_handle->init_mem_tab); if (qat_uclo_set_ae_mode(handle)) goto out_check_uof_aemask_err; return 0; out_check_uof_aemask_err: for (ae = 0; ae < obj_handle->uimage_num; ae++) free(obj_handle->ae_uimage[ae].page, M_QAT); out_err: free(obj_handle->uword_buf, M_QAT); obj_handle->uword_buf = NULL; return EFAULT; } static int qat_uclo_map_suof_file_hdr(const struct icp_qat_fw_loader_handle *handle, const struct icp_qat_suof_filehdr *suof_ptr, int suof_size) { unsigned int check_sum = 0; unsigned int min_ver_offset = 0; struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; suof_handle->file_id = ICP_QAT_SUOF_FID; suof_handle->suof_buf = (const char *)suof_ptr; suof_handle->suof_size = suof_size; min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr, min_ver); check_sum = qat_uclo_calc_str_checksum((const char *)&suof_ptr->min_ver, min_ver_offset); if (check_sum != suof_ptr->check_sum) { pr_err("QAT: incorrect SUOF checksum\n"); return EINVAL; } suof_handle->check_sum = suof_ptr->check_sum; suof_handle->min_ver = suof_ptr->min_ver; suof_handle->maj_ver = suof_ptr->maj_ver; suof_handle->fw_type = suof_ptr->fw_type; return 0; } static void -qat_uclo_map_simg(struct icp_qat_suof_handle *suof_handle, +qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle, struct icp_qat_suof_img_hdr *suof_img_hdr, struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) { + struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; const struct icp_qat_simg_ae_mode *ae_mode; struct icp_qat_suof_objhdr *suof_objhdr; + unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev)); suof_img_hdr->simg_buf = (suof_handle->suof_buf + suof_chunk_hdr->offset + sizeof(*suof_objhdr)); suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)(suof_handle->suof_buf + suof_chunk_hdr->offset)) ->img_length; suof_img_hdr->css_header = suof_img_hdr->simg_buf; suof_img_hdr->css_key = (suof_img_hdr->css_header + sizeof(struct icp_qat_css_hdr)); suof_img_hdr->css_signature = suof_img_hdr->css_key + - ICP_QAT_CSS_FWSK_MODULUS_LEN + ICP_QAT_CSS_FWSK_EXPONENT_LEN; + ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) + + ICP_QAT_CSS_FWSK_EXPONENT_LEN(device_id); suof_img_hdr->css_simg = - suof_img_hdr->css_signature + ICP_QAT_CSS_SIGNATURE_LEN; + suof_img_hdr->css_signature + ICP_QAT_CSS_SIGNATURE_LEN(device_id); ae_mode = (const struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg); suof_img_hdr->ae_mask = ae_mode->ae_mask; suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name; suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data; suof_img_hdr->fw_type = ae_mode->fw_type; } static void qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle, struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) { char **sym_str = (char **)&suof_handle->sym_str; unsigned int *sym_size = &suof_handle->sym_size; struct icp_qat_suof_strtable *str_table_obj; *sym_size = *(unsigned int *)(uintptr_t)(suof_chunk_hdr->offset + suof_handle->suof_buf); *sym_str = (char *)(uintptr_t)(suof_handle->suof_buf + suof_chunk_hdr->offset + sizeof(str_table_obj->tab_length)); } static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle, struct icp_qat_suof_img_hdr *img_hdr) { const struct icp_qat_simg_ae_mode *img_ae_mode = NULL; unsigned int prod_rev, maj_ver, prod_type; prod_type = qat_uclo_get_dev_type(handle); img_ae_mode = (const struct icp_qat_simg_ae_mode *)img_hdr->css_simg; prod_rev = PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id); if (img_ae_mode->dev_type != prod_type) { pr_err("QAT: incompatible product type %x\n", img_ae_mode->dev_type); return EINVAL; } maj_ver = prod_rev & 0xff; if (maj_ver > img_ae_mode->devmax_ver || maj_ver < img_ae_mode->devmin_ver) { pr_err("QAT: incompatible device maj_ver 0x%x\n", maj_ver); return EINVAL; } return 0; } static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle) { struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; free(sobj_handle->img_table.simg_hdr, M_QAT); sobj_handle->img_table.simg_hdr = NULL; free(handle->sobj_handle, M_QAT); handle->sobj_handle = NULL; } static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr, unsigned int img_id, unsigned int num_simgs) { struct icp_qat_suof_img_hdr img_header; if ((img_id != num_simgs - 1) && img_id != ICP_QAT_UCLO_MAX_AE) { memcpy(&img_header, &suof_img_hdr[num_simgs - 1], sizeof(*suof_img_hdr)); memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id], sizeof(*suof_img_hdr)); memcpy(&suof_img_hdr[img_id], &img_header, sizeof(*suof_img_hdr)); } } static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle, const struct icp_qat_suof_filehdr *suof_ptr, int suof_size) { struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL; struct icp_qat_suof_img_hdr *suof_img_hdr = NULL; - int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE; + int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE, + aeMax_img = ICP_QAT_UCLO_MAX_AE; unsigned int i = 0; struct icp_qat_suof_img_hdr img_header; if (!suof_ptr || suof_size == 0) { pr_err("QAT: input parameter SUOF pointer/size is NULL\n"); return EINVAL; } if (qat_uclo_check_suof_format(suof_ptr)) return EINVAL; ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size); if (ret) return ret; suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)((uintptr_t)suof_ptr + sizeof(*suof_ptr)); qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr); suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1; if (suof_handle->img_table.num_simgs != 0) { suof_img_hdr = malloc(suof_handle->img_table.num_simgs * sizeof(img_header), M_QAT, M_WAITOK | M_ZERO); suof_handle->img_table.simg_hdr = suof_img_hdr; } for (i = 0; i < suof_handle->img_table.num_simgs; i++) { - qat_uclo_map_simg(handle->sobj_handle, + qat_uclo_map_simg(handle, &suof_img_hdr[i], &suof_chunk_hdr[1 + i]); ret = qat_uclo_check_simg_compat(handle, &suof_img_hdr[i]); if (ret) return ret; suof_img_hdr[i].ae_mask &= handle->cfg_ae_mask; if ((suof_img_hdr[i].ae_mask & 0x1) != 0) ae0_img = i; } - qat_uclo_tail_img(suof_img_hdr, - ae0_img, - suof_handle->img_table.num_simgs); + + if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { + qat_uclo_tail_img(suof_img_hdr, + ae0_img, + suof_handle->img_table.num_simgs); + } else { + if (suof_handle->img_table.num_simgs == 1) + return 0; + qat_uclo_tail_img(suof_img_hdr, + ae0_img, + suof_handle->img_table.num_simgs - 1); + for (i = 0; i < suof_handle->img_table.num_simgs; i++) { + if ((suof_img_hdr[i].ae_mask & + (0x1 << (handle->hal_handle->ae_max_num - 1))) != + 0) { + aeMax_img = i; + break; + } + } + qat_uclo_tail_img(suof_img_hdr, + aeMax_img, + suof_handle->img_table.num_simgs); + } return 0; } #define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + (low)) #define BITS_IN_DWORD 32 static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle, struct icp_qat_fw_auth_desc *desc) { unsigned int fcu_sts, mem_cfg_err, retry = 0; unsigned int fcu_ctl_csr, fcu_sts_csr; unsigned int fcu_dram_hi_csr, fcu_dram_lo_csr; u64 bus_addr; bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low) - sizeof(struct icp_qat_auth_chunk); - if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) { + if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { fcu_ctl_csr = FCU_CONTROL_C4XXX; fcu_sts_csr = FCU_STATUS_C4XXX; fcu_dram_hi_csr = FCU_DRAM_ADDR_HI_C4XXX; fcu_dram_lo_csr = FCU_DRAM_ADDR_LO_C4XXX; } else { fcu_ctl_csr = FCU_CONTROL; fcu_sts_csr = FCU_STATUS; fcu_dram_hi_csr = FCU_DRAM_ADDR_HI; fcu_dram_lo_csr = FCU_DRAM_ADDR_LO; } SET_FCU_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD)); SET_FCU_CSR(handle, fcu_dram_lo_csr, bus_addr); SET_FCU_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH); do { pause_ms("adfstop", FW_AUTH_WAIT_PERIOD); fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr); if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL) goto auth_fail; if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1)) if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE) return 0; } while (retry++ < FW_AUTH_MAX_RETRY); auth_fail: pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n", fcu_sts & FCU_AUTH_STS_MASK, retry); if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) { mem_cfg_err = (GET_FCU_CSR(handle, FCU_STATUS1_C4XXX) & MEM_CFG_ERR_BIT); if (mem_cfg_err) pr_err("QAT: MEM_CFG_ERR\n"); } return EINVAL; } +static int +qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle *handle, int imgid) +{ + struct icp_qat_suof_handle *sobj_handle; + + if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) + return 0; + + sobj_handle = (struct icp_qat_suof_handle *)handle->sobj_handle; + if (handle->hal_handle->admin_ae_mask & + sobj_handle->img_table.simg_hdr[imgid].ae_mask) + return 0; + + return 1; +} + +static int +qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_fw_auth_desc *desc) +{ + unsigned int i = 0; + unsigned int fcuSts = 0, fcuAeBroadcastMask = 0; + unsigned int retry = 0; + unsigned int fcuStsCsr = 0; + unsigned int fcuCtlCsr = 0; + unsigned int loadedAes = 0; + unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev)); + + if (IS_QAT_GEN4(device_id)) { + fcuCtlCsr = FCU_CONTROL_4XXX; + fcuStsCsr = FCU_STATUS_4XXX; + } else { + pr_err("Uclo_BroadcastLoadFW only applicable for CPM20\n"); + return EINVAL; + } + + for (i = 0; i < ICP_QAT_UCLO_MAX_AE; i++) { + if (!test_bit(i, (unsigned long *)&handle->hal_handle->ae_mask)) + continue; + + if (qat_hal_check_ae_active(handle, (unsigned char)i)) { + pr_err( + "Uclo_BroadcastLoadFW error (invalid AE status)\n"); + return EINVAL; + } + + if ((desc->ae_mask >> i) & 0x1) { + fcuAeBroadcastMask |= 1 << i; + } + } + + if (fcuAeBroadcastMask) { + retry = 0; + SET_FCU_CSR(handle, + FCU_ME_BROADCAST_MASK_TYPE, + fcuAeBroadcastMask); + SET_FCU_CSR(handle, fcuCtlCsr, FCU_CTRL_CMD_LOAD); + do { + msleep(FW_AUTH_WAIT_PERIOD); + fcuSts = GET_FCU_CSR(handle, fcuStsCsr); + + if ((fcuSts & FCU_AUTH_STS_MASK) == FCU_STS_LOAD_FAIL) { + pr_err( + "Uclo_BroadcastLoadFW fail (fcu_status = 0x%x)\n", + fcuSts & FCU_AUTH_STS_MASK); + return EINVAL; + } else if ((fcuSts & FCU_AUTH_STS_MASK) == + FCU_STS_LOAD_DONE) { + if (IS_QAT_GEN4(device_id)) + loadedAes = + GET_FCU_CSR(handle, + FCU_AE_LOADED_4XXX); + else + loadedAes = + (fcuSts >> FCU_LOADED_AE_POS); + + if ((loadedAes & fcuAeBroadcastMask) == + fcuAeBroadcastMask) + break; + } else if ((fcuSts & FCU_AUTH_STS_MASK) == + FCU_STS_VERI_DONE) { + SET_FCU_CSR(handle, + fcuCtlCsr, + FCU_CTRL_CMD_LOAD); + } + } while (retry++ < FW_BROADCAST_MAX_RETRY); + if (retry > FW_BROADCAST_MAX_RETRY) { + pr_err( + "Uclo_BroadcastLoadFW fail(fcu_status = 0x%x),retry = %d\n", + fcuSts & FCU_AUTH_STS_MASK, + retry); + return EINVAL; + } + } + return 0; +} + static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle, struct icp_firml_dram_desc *dram_desc, unsigned int size) { int ret; ret = bus_dma_mem_create(&dram_desc->dram_mem, handle->accel_dev->dma_tag, 1, BUS_SPACE_MAXADDR, size, 0); if (ret != 0) return ret; dram_desc->dram_base_addr_v = dram_desc->dram_mem.dma_vaddr; dram_desc->dram_bus_addr = dram_desc->dram_mem.dma_baddr; dram_desc->dram_size = size; return 0; } static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle, struct icp_firml_dram_desc *dram_desc) { if (handle && dram_desc && dram_desc->dram_base_addr_v) bus_dma_mem_free(&dram_desc->dram_mem); if (dram_desc) explicit_bzero(dram_desc, sizeof(*dram_desc)); } static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, const char *image, unsigned int size, struct icp_firml_dram_desc *img_desc, struct icp_qat_fw_auth_desc **desc) { const struct icp_qat_css_hdr *css_hdr = (const struct icp_qat_css_hdr *)image; struct icp_qat_fw_auth_desc *auth_desc; struct icp_qat_auth_chunk *auth_chunk; u64 virt_addr, bus_addr, virt_base; unsigned int length, simg_offset = sizeof(*auth_chunk); + unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev)); - if (size > (ICP_QAT_AE_IMG_OFFSET + ICP_QAT_CSS_MAX_IMAGE_LEN)) { + if (size > + (ICP_QAT_AE_IMG_OFFSET(device_id) + ICP_QAT_CSS_MAX_IMAGE_LEN)) { pr_err("QAT: error, input image size overflow %d\n", size); return EINVAL; } length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ? - ICP_QAT_CSS_AE_SIMG_LEN + simg_offset : - size + ICP_QAT_CSS_FWSK_PAD_LEN + simg_offset; + ICP_QAT_CSS_AE_SIMG_LEN(device_id) + simg_offset : + size + ICP_QAT_CSS_FWSK_PAD_LEN(device_id) + simg_offset; if (qat_uclo_simg_alloc(handle, img_desc, length)) { pr_err("QAT: error, allocate continuous dram fail\n"); return -ENOMEM; } auth_chunk = img_desc->dram_base_addr_v; auth_chunk->chunk_size = img_desc->dram_size; auth_chunk->chunk_bus_addr = img_desc->dram_bus_addr; virt_base = (uintptr_t)img_desc->dram_base_addr_v + simg_offset; bus_addr = img_desc->dram_bus_addr + simg_offset; auth_desc = img_desc->dram_base_addr_v; auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); auth_desc->css_hdr_low = (unsigned int)bus_addr; virt_addr = virt_base; memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr)); /* pub key */ bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) + sizeof(*css_hdr); virt_addr = virt_addr + sizeof(*css_hdr); auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); auth_desc->fwsk_pub_low = (unsigned int)bus_addr; memcpy((void *)(uintptr_t)virt_addr, (const void *)(image + sizeof(*css_hdr)), - ICP_QAT_CSS_FWSK_MODULUS_LEN); + ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id)); /* padding */ - explicit_bzero((void *)(uintptr_t)(virt_addr + - ICP_QAT_CSS_FWSK_MODULUS_LEN), - ICP_QAT_CSS_FWSK_PAD_LEN); + explicit_bzero((void *)(uintptr_t)( + virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id)), + ICP_QAT_CSS_FWSK_PAD_LEN(device_id)); /* exponent */ - memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN + - ICP_QAT_CSS_FWSK_PAD_LEN), + memcpy((void *)(uintptr_t)(virt_addr + + ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) + + ICP_QAT_CSS_FWSK_PAD_LEN(device_id)), (const void *)(image + sizeof(*css_hdr) + - ICP_QAT_CSS_FWSK_MODULUS_LEN), + ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id)), sizeof(unsigned int)); /* signature */ bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high, auth_desc->fwsk_pub_low) + - ICP_QAT_CSS_FWSK_PUB_LEN; - virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN; + ICP_QAT_CSS_FWSK_PUB_LEN(device_id); + virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(device_id); auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); auth_desc->signature_low = (unsigned int)bus_addr; memcpy((void *)(uintptr_t)virt_addr, (const void *)(image + sizeof(*css_hdr) + - ICP_QAT_CSS_FWSK_MODULUS_LEN + - ICP_QAT_CSS_FWSK_EXPONENT_LEN), - ICP_QAT_CSS_SIGNATURE_LEN); + ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) + + ICP_QAT_CSS_FWSK_EXPONENT_LEN(device_id)), + ICP_QAT_CSS_SIGNATURE_LEN(device_id)); bus_addr = ADD_ADDR(auth_desc->signature_high, auth_desc->signature_low) + - ICP_QAT_CSS_SIGNATURE_LEN; - virt_addr += ICP_QAT_CSS_SIGNATURE_LEN; + ICP_QAT_CSS_SIGNATURE_LEN(device_id); + virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(device_id); auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); auth_desc->img_low = (unsigned int)bus_addr; - auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET; + auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(device_id); memcpy((void *)(uintptr_t)virt_addr, - (const void *)(image + ICP_QAT_AE_IMG_OFFSET), + (const void *)(image + ICP_QAT_AE_IMG_OFFSET(device_id)), auth_desc->img_len); virt_addr = virt_base; /* AE firmware */ if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type == CSS_AE_FIRMWARE) { auth_desc->img_ae_mode_data_high = auth_desc->img_high; auth_desc->img_ae_mode_data_low = auth_desc->img_low; bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high, auth_desc->img_ae_mode_data_low) + sizeof(struct icp_qat_simg_ae_mode); auth_desc->img_ae_init_data_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); auth_desc->img_ae_init_data_low = (unsigned int)bus_addr; bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN; auth_desc->img_ae_insts_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); auth_desc->img_ae_insts_low = (unsigned int)bus_addr; virt_addr += sizeof(struct icp_qat_css_hdr) + - ICP_QAT_CSS_FWSK_PUB_LEN + ICP_QAT_CSS_SIGNATURE_LEN; + ICP_QAT_CSS_FWSK_PUB_LEN(device_id) + + ICP_QAT_CSS_SIGNATURE_LEN(device_id); auth_desc->ae_mask = ((struct icp_qat_simg_ae_mode *)virt_addr)->ae_mask & handle->cfg_ae_mask; } else { auth_desc->img_ae_insts_high = auth_desc->img_high; auth_desc->img_ae_insts_low = auth_desc->img_low; } *desc = auth_desc; return 0; } static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle, struct icp_qat_fw_auth_desc *desc) { unsigned int i = 0; unsigned int fcu_sts; unsigned int fcu_sts_csr, fcu_ctl_csr; unsigned int loaded_aes = FCU_LOADED_AE_POS; unsigned long ae_mask = handle->hal_handle->ae_mask; - if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) { + if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { fcu_ctl_csr = FCU_CONTROL_C4XXX; fcu_sts_csr = FCU_STATUS_C4XXX; } else { fcu_ctl_csr = FCU_CONTROL; fcu_sts_csr = FCU_STATUS; } for_each_set_bit(i, &ae_mask, handle->hal_handle->ae_max_num) { int retry = 0; if (!((desc->ae_mask >> i) & 0x1)) continue; if (qat_hal_check_ae_active(handle, i)) { pr_err("QAT: AE %d is active\n", i); return EINVAL; } SET_FCU_CSR(handle, fcu_ctl_csr, - (FCU_CTRL_CMD_LOAD | (i << FCU_CTRL_AE_POS))); + (FCU_CTRL_CMD_LOAD | + (IS_QAT_GEN4( + pci_get_device(GET_DEV(handle->accel_dev))) ? + (1 << FCU_CTRL_BROADCAST_POS) : + 0) | + (i << FCU_CTRL_AE_POS))); do { pause_ms("adfstop", FW_AUTH_WAIT_PERIOD); fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr); if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_LOAD_DONE) { - loaded_aes = IS_QAT_GEN3(pci_get_device( + loaded_aes = IS_QAT_GEN3_OR_GEN4(pci_get_device( GET_DEV(handle->accel_dev))) ? GET_FCU_CSR(handle, FCU_AE_LOADED_C4XXX) : (fcu_sts >> FCU_LOADED_AE_POS); if (loaded_aes & (1 << i)) break; } } while (retry++ < FW_AUTH_MAX_RETRY); if (retry > FW_AUTH_MAX_RETRY) { pr_err("QAT: firmware load failed timeout %x\n", retry); return EINVAL; } } return 0; } static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle, const void *addr_ptr, int mem_size) { struct icp_qat_suof_handle *suof_handle; suof_handle = malloc(sizeof(*suof_handle), M_QAT, M_WAITOK | M_ZERO); handle->sobj_handle = suof_handle; if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) { qat_uclo_del_suof(handle); pr_err("QAT: map SUOF failed\n"); return EINVAL; } return 0; } int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, const void *addr_ptr, int mem_size) { struct icp_qat_fw_auth_desc *desc = NULL; struct icp_firml_dram_desc img_desc; int status = 0; if (handle->fw_auth) { status = qat_uclo_map_auth_fw( handle, addr_ptr, mem_size, &img_desc, &desc); if (!status) status = qat_uclo_auth_fw(handle, desc); qat_uclo_simg_free(handle, &img_desc); } else { + if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { + device_printf( + NULL, "QAT: PKE service is not allowed because "); + device_printf(NULL, "MMP fw will not be loaded for "); + device_printf(NULL, + "device 0x%x", + pci_get_device( + GET_DEV(handle->accel_dev))); + return status; + } if (pci_get_device(GET_DEV(handle->accel_dev)) == ADF_C3XXX_PCI_DEVICE_ID) { pr_err("QAT: C3XXX doesn't support unsigned MMP\n"); return EINVAL; } status = qat_uclo_wr_sram_by_words(handle, handle->hal_sram_offset, addr_ptr, mem_size); } return status; } static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, const void *addr_ptr, int mem_size) { struct icp_qat_uof_filehdr *filehdr; struct icp_qat_uclo_objhandle *objhdl; objhdl = malloc(sizeof(*objhdl), M_QAT, M_WAITOK | M_ZERO); objhdl->obj_buf = malloc(mem_size, M_QAT, M_WAITOK); bcopy(addr_ptr, objhdl->obj_buf, mem_size); filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf; if (qat_uclo_check_uof_format(filehdr)) goto out_objhdr_err; objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr, ICP_QAT_UOF_OBJS); if (!objhdl->obj_hdr) { pr_err("QAT: object file chunk is null\n"); goto out_objhdr_err; } handle->obj_handle = objhdl; if (qat_uclo_parse_uof_obj(handle)) goto out_overlay_obj_err; return 0; out_overlay_obj_err: handle->obj_handle = NULL; free(objhdl->obj_hdr, M_QAT); out_objhdr_err: free(objhdl->obj_buf, M_QAT); free(objhdl, M_QAT); return ENOMEM; } static int qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle, const struct icp_qat_mof_file_hdr *mof_ptr, u32 mof_size) { unsigned int checksum = 0; unsigned int min_ver_offset = 0; struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle; mobj_handle->file_id = ICP_QAT_MOF_FID; mobj_handle->mof_buf = (const char *)mof_ptr; mobj_handle->mof_size = mof_size; min_ver_offset = mof_size - offsetof(struct icp_qat_mof_file_hdr, min_ver); checksum = qat_uclo_calc_str_checksum((const char *)&mof_ptr->min_ver, min_ver_offset); if (checksum != mof_ptr->checksum) { pr_err("QAT: incorrect MOF checksum\n"); return EINVAL; } mobj_handle->checksum = mof_ptr->checksum; mobj_handle->min_ver = mof_ptr->min_ver; mobj_handle->maj_ver = mof_ptr->maj_ver; return 0; } void qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle) { struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle; free(mobj_handle->obj_table.obj_hdr, M_QAT); mobj_handle->obj_table.obj_hdr = NULL; free(handle->mobj_handle, M_QAT); handle->mobj_handle = NULL; } static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle, const char *obj_name, const char **obj_ptr, unsigned int *obj_size) { unsigned int i; struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr; for (i = 0; i < mobj_handle->obj_table.num_objs; i++) { if (!strncmp(obj_hdr[i].obj_name, obj_name, ICP_QAT_SUOF_OBJ_NAME_LEN)) { *obj_ptr = obj_hdr[i].obj_buf; *obj_size = obj_hdr[i].obj_size; break; } } if (i >= mobj_handle->obj_table.num_objs) { pr_err("QAT: object %s is not found inside MOF\n", obj_name); return EFAULT; } return 0; } static int qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle, struct icp_qat_mof_objhdr *mobj_hdr, struct icp_qat_mof_obj_chunkhdr *obj_chunkhdr) { if ((strncmp((char *)obj_chunkhdr->chunk_id, ICP_QAT_UOF_IMAG, ICP_QAT_MOF_OBJ_CHUNKID_LEN)) == 0) { mobj_hdr->obj_buf = (const char *)((unsigned long)obj_chunkhdr->offset + mobj_handle->uobjs_hdr); } else if ((strncmp((char *)(obj_chunkhdr->chunk_id), ICP_QAT_SUOF_IMAG, ICP_QAT_MOF_OBJ_CHUNKID_LEN)) == 0) { mobj_hdr->obj_buf = (const char *)((unsigned long)obj_chunkhdr->offset + mobj_handle->sobjs_hdr); } else { pr_err("QAT: unsupported chunk id\n"); return EINVAL; } mobj_hdr->obj_size = (unsigned int)obj_chunkhdr->size; mobj_hdr->obj_name = (char *)(obj_chunkhdr->name + mobj_handle->sym_str); return 0; } static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle) { struct icp_qat_mof_objhdr *mof_obj_hdr; const struct icp_qat_mof_obj_hdr *uobj_hdr; const struct icp_qat_mof_obj_hdr *sobj_hdr; struct icp_qat_mof_obj_chunkhdr *uobj_chunkhdr; struct icp_qat_mof_obj_chunkhdr *sobj_chunkhdr; unsigned int uobj_chunk_num = 0, sobj_chunk_num = 0; unsigned int *valid_chunks = 0; int ret, i; uobj_hdr = (const struct icp_qat_mof_obj_hdr *)mobj_handle->uobjs_hdr; sobj_hdr = (const struct icp_qat_mof_obj_hdr *)mobj_handle->sobjs_hdr; if (uobj_hdr) uobj_chunk_num = uobj_hdr->num_chunks; if (sobj_hdr) sobj_chunk_num = sobj_hdr->num_chunks; mof_obj_hdr = (struct icp_qat_mof_objhdr *) malloc((uobj_chunk_num + sobj_chunk_num) * sizeof(*mof_obj_hdr), M_QAT, M_WAITOK | M_ZERO); mobj_handle->obj_table.obj_hdr = mof_obj_hdr; valid_chunks = &mobj_handle->obj_table.num_objs; uobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)((uintptr_t)uobj_hdr + sizeof(*uobj_hdr)); sobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)((uintptr_t)sobj_hdr + sizeof(*sobj_hdr)); /* map uof objects */ for (i = 0; i < uobj_chunk_num; i++) { ret = qat_uclo_map_obj_from_mof(mobj_handle, &mof_obj_hdr[*valid_chunks], &uobj_chunkhdr[i]); if (ret) return ret; (*valid_chunks)++; } /* map suof objects */ for (i = 0; i < sobj_chunk_num; i++) { ret = qat_uclo_map_obj_from_mof(mobj_handle, &mof_obj_hdr[*valid_chunks], &sobj_chunkhdr[i]); if (ret) return ret; (*valid_chunks)++; } if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunks) { pr_err("QAT: inconsistent UOF/SUOF chunk amount\n"); return EINVAL; } return 0; } static void qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle *mobj_handle, struct icp_qat_mof_chunkhdr *mof_chunkhdr) { char **sym_str = (char **)&mobj_handle->sym_str; unsigned int *sym_size = &mobj_handle->sym_size; struct icp_qat_mof_str_table *str_table_obj; *sym_size = *(unsigned int *)(uintptr_t)(mof_chunkhdr->offset + mobj_handle->mof_buf); *sym_str = (char *)(uintptr_t)(mobj_handle->mof_buf + mof_chunkhdr->offset + sizeof(str_table_obj->tab_len)); } static void qat_uclo_map_mof_chunk(struct icp_qat_mof_handle *mobj_handle, struct icp_qat_mof_chunkhdr *mof_chunkhdr) { if (!strncmp(mof_chunkhdr->chunk_id, ICP_QAT_MOF_SYM_OBJS, ICP_QAT_MOF_OBJ_ID_LEN)) qat_uclo_map_mof_symobjs(mobj_handle, mof_chunkhdr); else if (!strncmp(mof_chunkhdr->chunk_id, ICP_QAT_UOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN)) mobj_handle->uobjs_hdr = mobj_handle->mof_buf + (unsigned long)mof_chunkhdr->offset; else if (!strncmp(mof_chunkhdr->chunk_id, ICP_QAT_SUOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN)) mobj_handle->sobjs_hdr = mobj_handle->mof_buf + (unsigned long)mof_chunkhdr->offset; } static int qat_uclo_check_mof_format(const struct icp_qat_mof_file_hdr *mof_hdr) { int maj = mof_hdr->maj_ver & 0xff; int min = mof_hdr->min_ver & 0xff; if (mof_hdr->file_id != ICP_QAT_MOF_FID) { pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id); return EINVAL; } if (mof_hdr->num_chunks <= 0x1) { pr_err("QAT: MOF chunk amount is incorrect\n"); return EINVAL; } if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) { pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n", maj, min); return EINVAL; } return 0; } static int qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle, const struct icp_qat_mof_file_hdr *mof_ptr, u32 mof_size, const char *obj_name, const char **obj_ptr, unsigned int *obj_size) { struct icp_qat_mof_handle *mobj_handle; struct icp_qat_mof_chunkhdr *mof_chunkhdr; unsigned short chunks_num; int ret; unsigned int i; if (mof_ptr->file_id == ICP_QAT_UOF_FID || mof_ptr->file_id == ICP_QAT_SUOF_FID) { if (obj_ptr) *obj_ptr = (const char *)mof_ptr; if (obj_size) *obj_size = (unsigned int)mof_size; return 0; } if (qat_uclo_check_mof_format(mof_ptr)) return EINVAL; mobj_handle = malloc(sizeof(*mobj_handle), M_QAT, M_WAITOK | M_ZERO); handle->mobj_handle = mobj_handle; ret = qat_uclo_map_mof_file_hdr(handle, mof_ptr, mof_size); if (ret) return ret; mof_chunkhdr = (struct icp_qat_mof_chunkhdr *)((uintptr_t)mof_ptr + sizeof(*mof_ptr)); chunks_num = mof_ptr->num_chunks; /*Parse MOF file chunks*/ for (i = 0; i < chunks_num; i++) qat_uclo_map_mof_chunk(mobj_handle, &mof_chunkhdr[i]); /*All sym_objs uobjs and sobjs should be available*/ if (!mobj_handle->sym_str || (!mobj_handle->uobjs_hdr && !mobj_handle->sobjs_hdr)) return EINVAL; ret = qat_uclo_map_objs_from_mof(mobj_handle); if (ret) return ret; /*Seek specified uof object in MOF*/ ret = qat_uclo_seek_obj_inside_mof(mobj_handle, obj_name, obj_ptr, obj_size); if (ret) return ret; return 0; } int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, const void *addr_ptr, u32 mem_size, const char *obj_name) { const char *obj_addr; u32 obj_size; int ret; BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE > (sizeof(handle->hal_handle->ae_mask) * 8)); if (!handle || !addr_ptr || mem_size < 24) return EINVAL; if (obj_name) { ret = qat_uclo_map_mof_obj( handle, addr_ptr, mem_size, obj_name, &obj_addr, &obj_size); if (ret) return ret; } else { obj_addr = addr_ptr; obj_size = mem_size; } return (handle->fw_auth) ? qat_uclo_map_suof_obj(handle, obj_addr, obj_size) : qat_uclo_map_uof_obj(handle, obj_addr, obj_size); } void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int a; unsigned long ae_mask = handle->hal_handle->ae_mask; if (handle->mobj_handle) qat_uclo_del_mof(handle); if (handle->sobj_handle) qat_uclo_del_suof(handle); if (!obj_handle) return; free(obj_handle->uword_buf, M_QAT); for (a = 0; a < obj_handle->uimage_num; a++) free(obj_handle->ae_uimage[a].page, M_QAT); for_each_set_bit(a, &ae_mask, handle->hal_handle->ae_max_num) { qat_uclo_free_ae_data(&obj_handle->ae_data[a]); } free(obj_handle->obj_hdr, M_QAT); free(obj_handle->obj_buf, M_QAT); free(obj_handle, M_QAT); handle->obj_handle = NULL; } static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle, struct icp_qat_uclo_encap_page *encap_page, uint64_t *uword, unsigned int addr_p, unsigned int raddr, uint64_t fill) { uint64_t uwrd = 0; unsigned int i, addr; if (!encap_page) { *uword = fill; return; } addr = (encap_page->page_region) ? raddr : addr_p; for (i = 0; i < encap_page->uwblock_num; i++) { if (addr >= encap_page->uwblock[i].start_addr && addr <= encap_page->uwblock[i].start_addr + encap_page->uwblock[i].words_num - 1) { addr -= encap_page->uwblock[i].start_addr; addr *= obj_handle->uword_in_bytes; memcpy(&uwrd, (void *)(((uintptr_t)encap_page->uwblock[i] .micro_words) + addr), obj_handle->uword_in_bytes); uwrd = uwrd & 0xbffffffffffull; } } *uword = uwrd; if (*uword == INVLD_UWORD) *uword = fill; } static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uclo_encap_page *encap_page, unsigned int ae) { unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; uint64_t fill_pat; /* load the page starting at appropriate ustore address */ /* get fill-pattern from an image -- they are all the same */ memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern, sizeof(uint64_t)); uw_physical_addr = encap_page->beg_addr_p; uw_relative_addr = 0; words_num = encap_page->micro_words_num; while (words_num) { if (words_num < UWORD_CPYBUF_SIZE) cpylen = words_num; else cpylen = UWORD_CPYBUF_SIZE; /* load the buffer */ for (i = 0; i < cpylen; i++) qat_uclo_fill_uwords(obj_handle, encap_page, &obj_handle->uword_buf[i], uw_physical_addr + i, uw_relative_addr + i, fill_pat); - if (obj_handle->ae_data[ae].shareable_ustore) + if (obj_handle->ae_data[ae].shareable_ustore && + !IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) /* copy the buffer to ustore */ qat_hal_wr_coalesce_uwords(handle, (unsigned char)ae, uw_physical_addr, cpylen, obj_handle->uword_buf); else /* copy the buffer to ustore */ qat_hal_wr_uwords(handle, (unsigned char)ae, uw_physical_addr, cpylen, obj_handle->uword_buf); uw_physical_addr += cpylen; uw_relative_addr += cpylen; words_num -= cpylen; } } static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uof_image *image) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int ctx_mask, s; struct icp_qat_uclo_page *page; unsigned char ae = 0; int ctx; struct icp_qat_uclo_aedata *aed; unsigned long ae_mask = handle->hal_handle->ae_mask; if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX) ctx_mask = 0xff; else ctx_mask = 0x55; /* load the default page and set assigned CTX PC * to the entrypoint address */ for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { unsigned long cfg_ae_mask = handle->cfg_ae_mask; unsigned long ae_assigned = image->ae_assigned; if (!test_bit(ae, &cfg_ae_mask)) continue; if (!test_bit(ae, &ae_assigned)) continue; aed = &obj_handle->ae_data[ae]; /* find the slice to which this image is assigned */ for (s = 0; s < aed->slice_num; s++) { if (image->ctx_assigned & aed->ae_slices[s].ctx_mask_assigned) break; } if (s >= aed->slice_num) continue; page = aed->ae_slices[s].page; if (!page->encap_page->def_page) continue; qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae); page = aed->ae_slices[s].page; for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) aed->ae_slices[s].cur_page[ctx] = (ctx_mask & (1 << ctx)) ? page : NULL; qat_hal_set_live_ctx(handle, (unsigned char)ae, image->ctx_assigned); qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned, image->entry_address); } } static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle) { unsigned int i; struct icp_qat_fw_auth_desc *desc = NULL; struct icp_firml_dram_desc img_desc; struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr; for (i = 0; i < sobj_handle->img_table.num_simgs; i++) { if (qat_uclo_map_auth_fw(handle, (const char *)simg_hdr[i].simg_buf, (unsigned int)(simg_hdr[i].simg_len), &img_desc, &desc)) goto wr_err; if (qat_uclo_auth_fw(handle, desc)) goto wr_err; - if (qat_uclo_load_fw(handle, desc)) - goto wr_err; + if (qat_uclo_is_broadcast(handle, i)) { + if (qat_uclo_broadcast_load_fw(handle, desc)) + goto wr_err; + } else { + if (qat_uclo_load_fw(handle, desc)) + goto wr_err; + } qat_uclo_simg_free(handle, &img_desc); } + return 0; wr_err: qat_uclo_simg_free(handle, &img_desc); return -EINVAL; } static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int i; if (qat_uclo_init_globals(handle)) return EINVAL; for (i = 0; i < obj_handle->uimage_num; i++) { if (!obj_handle->ae_uimage[i].img_ptr) return EINVAL; if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i])) return EINVAL; qat_uclo_wr_uimage_page(handle, obj_handle->ae_uimage[i].img_ptr); } return 0; } int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) { return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) : qat_uclo_wr_uof_img(handle); } int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle, unsigned int cfg_ae_mask) { if (!cfg_ae_mask) return EINVAL; handle->cfg_ae_mask = cfg_ae_mask; return 0; } diff --git a/sys/dev/qat/qat_hw/qat_200xx/adf_200xx_hw_data.c b/sys/dev/qat/qat_hw/qat_200xx/adf_200xx_hw_data.c index 22e1464cff12..5bfe1c7f40b3 100644 --- a/sys/dev/qat/qat_hw/qat_200xx/adf_200xx_hw_data.c +++ b/sys/dev/qat/qat_hw/qat_200xx/adf_200xx_hw_data.c @@ -1,541 +1,545 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include #include #include #include #include +#include #include "adf_200xx_hw_data.h" #include "icp_qat_hw.h" #include "adf_heartbeat.h" /* Worker thread to service arbiter mappings */ static const u32 thrd_to_arb_map[ADF_200XX_MAX_ACCELENGINES] = { 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA }; enum { DEV_200XX_SKU_1 = 0, DEV_200XX_SKU_2 = 1, DEV_200XX_SKU_3 = 2 }; static u32 thrd_to_arb_map_gen[ADF_200XX_MAX_ACCELENGINES] = { 0 }; static struct adf_hw_device_class qat_200xx_class = {.name = ADF_200XX_DEVICE_NAME, .type = DEV_200XX, .instances = 0 }; static u32 get_accel_mask(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 fuse; u32 straps; fuse = pci_read_config(pdev, ADF_DEVICE_FUSECTL_OFFSET, 4); straps = pci_read_config(pdev, ADF_200XX_SOFTSTRAP_CSR_OFFSET, 4); return (~(fuse | straps)) >> ADF_200XX_ACCELERATORS_REG_OFFSET & ADF_200XX_ACCELERATORS_MASK; } static u32 get_ae_mask(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 fuse; u32 me_straps; u32 me_disable; u32 ssms_disabled; fuse = pci_read_config(pdev, ADF_DEVICE_FUSECTL_OFFSET, 4); me_straps = pci_read_config(pdev, ADF_200XX_SOFTSTRAP_CSR_OFFSET, 4); /* If SSMs are disabled, then disable the corresponding MEs */ ssms_disabled = (~get_accel_mask(accel_dev)) & ADF_200XX_ACCELERATORS_MASK; me_disable = 0x3; while (ssms_disabled) { if (ssms_disabled & 1) me_straps |= me_disable; ssms_disabled >>= 1; me_disable <<= 2; } return (~(fuse | me_straps)) & ADF_200XX_ACCELENGINES_MASK; } static u32 get_num_accels(struct adf_hw_device_data *self) { u32 i, ctr = 0; if (!self || !self->accel_mask) return 0; for (i = 0; i < ADF_200XX_MAX_ACCELERATORS; i++) { if (self->accel_mask & (1 << i)) ctr++; } return ctr; } static u32 get_num_aes(struct adf_hw_device_data *self) { u32 i, ctr = 0; if (!self || !self->ae_mask) return 0; for (i = 0; i < ADF_200XX_MAX_ACCELENGINES; i++) { if (self->ae_mask & (1 << i)) ctr++; } return ctr; } static u32 get_misc_bar_id(struct adf_hw_device_data *self) { return ADF_200XX_PMISC_BAR; } static u32 get_etr_bar_id(struct adf_hw_device_data *self) { return ADF_200XX_ETR_BAR; } static u32 get_sram_bar_id(struct adf_hw_device_data *self) { return 0; } static enum dev_sku_info get_sku(struct adf_hw_device_data *self) { int aes = get_num_aes(self); if (aes == 6) return DEV_SKU_4; return DEV_SKU_UNKNOWN; } static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, u32 const **arb_map_config) { int i; struct adf_hw_device_data *hw_device = accel_dev->hw_device; for (i = 0; i < ADF_200XX_MAX_ACCELENGINES; i++) { thrd_to_arb_map_gen[i] = 0; if (hw_device->ae_mask & (1 << i)) thrd_to_arb_map_gen[i] = thrd_to_arb_map[i]; } adf_cfg_gen_dispatch_arbiter(accel_dev, thrd_to_arb_map, thrd_to_arb_map_gen, ADF_200XX_MAX_ACCELENGINES); *arb_map_config = thrd_to_arb_map_gen; } static u32 get_pf2vf_offset(u32 i) { return ADF_200XX_PF2VF_OFFSET(i); } static u32 get_vintmsk_offset(u32 i) { return ADF_200XX_VINTMSK_OFFSET(i); } static void get_arb_info(struct arb_info *arb_csrs_info) { arb_csrs_info->arbiter_offset = ADF_200XX_ARB_OFFSET; arb_csrs_info->wrk_thd_2_srv_arb_map = ADF_200XX_ARB_WRK_2_SER_MAP_OFFSET; arb_csrs_info->wrk_cfg_offset = ADF_200XX_ARB_WQCFG_OFFSET; } static void get_admin_info(struct admin_info *admin_csrs_info) { admin_csrs_info->mailbox_offset = ADF_200XX_MAILBOX_BASE_OFFSET; admin_csrs_info->admin_msg_ur = ADF_200XX_ADMINMSGUR_OFFSET; admin_csrs_info->admin_msg_lr = ADF_200XX_ADMINMSGLR_OFFSET; } static void get_errsou_offset(u32 *errsou3, u32 *errsou5) { *errsou3 = ADF_200XX_ERRSOU3; *errsou5 = ADF_200XX_ERRSOU5; } static u32 get_clock_speed(struct adf_hw_device_data *self) { /* CPP clock is half high-speed clock */ return self->clock_frequency / 2; } static void adf_enable_error_interrupts(struct resource *csr) { ADF_CSR_WR(csr, ADF_ERRMSK0, ADF_200XX_ERRMSK0_CERR); /* ME0-ME3 */ ADF_CSR_WR(csr, ADF_ERRMSK1, ADF_200XX_ERRMSK1_CERR); /* ME4-ME5 */ ADF_CSR_WR(csr, ADF_ERRMSK5, ADF_200XX_ERRMSK5_CERR); /* SSM2 */ /* Reset everything except VFtoPF1_16. */ adf_csr_fetch_and_and(csr, ADF_ERRMSK3, ADF_200XX_VF2PF1_16); /* RI CPP bus interface error detection and reporting. */ ADF_CSR_WR(csr, ADF_200XX_RICPPINTCTL, ADF_200XX_RICPP_EN); /* TI CPP bus interface error detection and reporting. */ ADF_CSR_WR(csr, ADF_200XX_TICPPINTCTL, ADF_200XX_TICPP_EN); /* Enable CFC Error interrupts and logging. */ ADF_CSR_WR(csr, ADF_200XX_CPP_CFC_ERR_CTRL, ADF_200XX_CPP_CFC_UE); } static void adf_disable_error_interrupts(struct adf_accel_dev *accel_dev) { struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_200XX_PMISC_BAR]; struct resource *csr = misc_bar->virt_addr; /* ME0-ME3 */ ADF_CSR_WR(csr, ADF_ERRMSK0, ADF_200XX_ERRMSK0_UERR | ADF_200XX_ERRMSK0_CERR); /* ME4-ME5 */ ADF_CSR_WR(csr, ADF_ERRMSK1, ADF_200XX_ERRMSK1_UERR | ADF_200XX_ERRMSK1_CERR); /* CPP Push Pull, RI, TI, SSM0-SSM1, CFC */ ADF_CSR_WR(csr, ADF_ERRMSK3, ADF_200XX_ERRMSK3_UERR); /* SSM2 */ ADF_CSR_WR(csr, ADF_ERRMSK5, ADF_200XX_ERRMSK5_UERR); } static int adf_check_uncorrectable_error(struct adf_accel_dev *accel_dev) { struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_200XX_PMISC_BAR]; struct resource *csr = misc_bar->virt_addr; u32 errsou0 = ADF_CSR_RD(csr, ADF_ERRSOU0) & ADF_200XX_ERRMSK0_UERR; u32 errsou1 = ADF_CSR_RD(csr, ADF_ERRSOU1) & ADF_200XX_ERRMSK1_UERR; u32 errsou3 = ADF_CSR_RD(csr, ADF_ERRSOU3) & ADF_200XX_ERRMSK3_UERR; u32 errsou5 = ADF_CSR_RD(csr, ADF_ERRSOU5) & ADF_200XX_ERRMSK5_UERR; return (errsou0 | errsou1 | errsou3 | errsou5); } static void adf_enable_mmp_error_correction(struct resource *csr, struct adf_hw_device_data *hw_data) { unsigned int dev, mmp; unsigned int mask; /* Enable MMP Logging */ for (dev = 0, mask = hw_data->accel_mask; mask; dev++, mask >>= 1) { if (!(mask & 1)) continue; /* Set power-up */ adf_csr_fetch_and_and(csr, ADF_200XX_SLICEPWRDOWN(dev), ~ADF_200XX_MMP_PWR_UP_MSK); if (hw_data->accel_capabilities_mask & ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC) { for (mmp = 0; mmp < ADF_MAX_MMP; ++mmp) { /* * The device supports PKE, * so enable error reporting from MMP memory */ adf_csr_fetch_and_or(csr, ADF_UERRSSMMMP(dev, mmp), ADF_200XX_UERRSSMMMP_EN); /* * The device supports PKE, * so enable error correction from MMP memory */ adf_csr_fetch_and_or(csr, ADF_CERRSSMMMP(dev, mmp), ADF_200XX_CERRSSMMMP_EN); } } else { for (mmp = 0; mmp < ADF_MAX_MMP; ++mmp) { /* * The device doesn't support PKE, * so disable error reporting from MMP memory */ adf_csr_fetch_and_and(csr, ADF_UERRSSMMMP(dev, mmp), ~ADF_200XX_UERRSSMMMP_EN); /* * The device doesn't support PKE, * so disable error correction from MMP memory */ adf_csr_fetch_and_and(csr, ADF_CERRSSMMMP(dev, mmp), ~ADF_200XX_CERRSSMMMP_EN); } } /* Restore power-down value */ adf_csr_fetch_and_or(csr, ADF_200XX_SLICEPWRDOWN(dev), ADF_200XX_MMP_PWR_UP_MSK); /* Disabling correctable error interrupts. */ ADF_CSR_WR(csr, ADF_200XX_INTMASKSSM(dev), ADF_200XX_INTMASKSSM_UERR); } } static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_device = accel_dev->hw_device; struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_200XX_PMISC_BAR]; struct resource *csr = misc_bar->virt_addr; unsigned int val, i; unsigned int mask; /* Enable Accel Engine error detection & correction */ mask = hw_device->ae_mask; for (i = 0; mask; i++, mask >>= 1) { if (!(mask & 1)) continue; val = ADF_CSR_RD(csr, ADF_200XX_AE_CTX_ENABLES(i)); val |= ADF_200XX_ENABLE_AE_ECC_ERR; ADF_CSR_WR(csr, ADF_200XX_AE_CTX_ENABLES(i), val); val = ADF_CSR_RD(csr, ADF_200XX_AE_MISC_CONTROL(i)); val |= ADF_200XX_ENABLE_AE_ECC_PARITY_CORR; ADF_CSR_WR(csr, ADF_200XX_AE_MISC_CONTROL(i), val); } /* Enable shared memory error detection & correction */ mask = hw_device->accel_mask; for (i = 0; mask; i++, mask >>= 1) { if (!(mask & 1)) continue; val = ADF_CSR_RD(csr, ADF_200XX_UERRSSMSH(i)); val |= ADF_200XX_ERRSSMSH_EN; ADF_CSR_WR(csr, ADF_200XX_UERRSSMSH(i), val); val = ADF_CSR_RD(csr, ADF_200XX_CERRSSMSH(i)); val |= ADF_200XX_ERRSSMSH_EN; ADF_CSR_WR(csr, ADF_200XX_CERRSSMSH(i), val); val = ADF_CSR_RD(csr, ADF_PPERR(i)); val |= ADF_200XX_PPERR_EN; ADF_CSR_WR(csr, ADF_PPERR(i), val); } adf_enable_error_interrupts(csr); adf_enable_mmp_error_correction(csr, hw_device); } static void adf_enable_ints(struct adf_accel_dev *accel_dev) { struct resource *addr; addr = (&GET_BARS(accel_dev)[ADF_200XX_PMISC_BAR])->virt_addr; /* Enable bundle and misc interrupts */ ADF_CSR_WR(addr, ADF_200XX_SMIAPF0_MASK_OFFSET, ADF_200XX_SMIA0_MASK); ADF_CSR_WR(addr, ADF_200XX_SMIAPF1_MASK_OFFSET, ADF_200XX_SMIA1_MASK); } static u32 get_ae_clock(struct adf_hw_device_data *self) { /* * Clock update interval is <16> ticks for 200xx. */ return self->clock_frequency / 16; } static int get_storage_enabled(struct adf_accel_dev *accel_dev, uint32_t *storage_enabled) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; strlcpy(key, ADF_STORAGE_FIRMWARE_ENABLED, sizeof(key)); if (!adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) { if (kstrtouint(val, 0, storage_enabled)) return -EFAULT; } return 0; } static int measure_clock(struct adf_accel_dev *accel_dev) { u32 frequency; int ret = 0; ret = adf_dev_measure_clock(accel_dev, &frequency, ADF_200XX_MIN_AE_FREQ, ADF_200XX_MAX_AE_FREQ); if (ret) return ret; accel_dev->hw_device->clock_frequency = frequency; return 0; } static u32 adf_200xx_get_hw_cap(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 legfuses; u32 capabilities; u32 straps; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 fuses = hw_data->fuses; /* Read accelerator capabilities mask */ legfuses = pci_read_config(pdev, ADF_DEVICE_LEGFUSE_OFFSET, 4); capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC + ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC + ICP_ACCEL_CAPABILITIES_CIPHER + ICP_ACCEL_CAPABILITIES_AUTHENTICATION + ICP_ACCEL_CAPABILITIES_COMPRESSION + ICP_ACCEL_CAPABILITIES_ZUC + ICP_ACCEL_CAPABILITIES_SHA3 + ICP_ACCEL_CAPABILITIES_HKDF + ICP_ACCEL_CAPABILITIES_ECEDMONT + ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN; if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) capabilities &= ~(ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | ICP_ACCEL_CAPABILITIES_CIPHER | ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN); if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; if (legfuses & ICP_ACCEL_MASK_PKE_SLICE) capabilities &= ~(ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | ICP_ACCEL_CAPABILITIES_ECEDMONT); if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; if (legfuses & ICP_ACCEL_MASK_EIA3_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_ZUC; if (legfuses & ICP_ACCEL_MASK_SHA3_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_SHA3; straps = pci_read_config(pdev, ADF_200XX_SOFTSTRAP_CSR_OFFSET, 4); if ((straps | fuses) & ADF_200XX_POWERGATE_PKE) capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; if ((straps | fuses) & ADF_200XX_POWERGATE_CY) capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; return capabilities; } static const char * get_obj_name(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services service) { return ADF_CXXX_AE_FW_NAME_CUSTOM1; } static uint32_t get_objs_num(struct adf_accel_dev *accel_dev) { return 1; } static uint32_t get_obj_cfg_ae_mask(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services services) { return accel_dev->hw_device->ae_mask; } void adf_init_hw_data_200xx(struct adf_hw_device_data *hw_data) { hw_data->dev_class = &qat_200xx_class; hw_data->instance_id = qat_200xx_class.instances++; hw_data->num_banks = ADF_200XX_ETR_MAX_BANKS; hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK; hw_data->num_accel = ADF_200XX_MAX_ACCELERATORS; hw_data->num_logical_accel = 1; hw_data->num_engines = ADF_200XX_MAX_ACCELENGINES; hw_data->tx_rx_gap = ADF_200XX_RX_RINGS_OFFSET; hw_data->tx_rings_mask = ADF_200XX_TX_RINGS_MASK; hw_data->alloc_irq = adf_isr_resource_alloc; hw_data->free_irq = adf_isr_resource_free; hw_data->enable_error_correction = adf_enable_error_correction; hw_data->check_uncorrectable_error = adf_check_uncorrectable_error; hw_data->print_err_registers = adf_print_err_registers; hw_data->disable_error_interrupts = adf_disable_error_interrupts; hw_data->get_accel_mask = get_accel_mask; hw_data->get_ae_mask = get_ae_mask; hw_data->get_num_accels = get_num_accels; hw_data->get_num_aes = get_num_aes; hw_data->get_sram_bar_id = get_sram_bar_id; hw_data->get_etr_bar_id = get_etr_bar_id; hw_data->get_misc_bar_id = get_misc_bar_id; hw_data->get_pf2vf_offset = get_pf2vf_offset; hw_data->get_vintmsk_offset = get_vintmsk_offset; hw_data->get_arb_info = get_arb_info; hw_data->get_admin_info = get_admin_info; hw_data->get_errsou_offset = get_errsou_offset; hw_data->get_clock_speed = get_clock_speed; hw_data->get_sku = get_sku; + hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE; hw_data->fw_name = ADF_200XX_FW; hw_data->fw_mmp_name = ADF_200XX_MMP; hw_data->init_admin_comms = adf_init_admin_comms; hw_data->exit_admin_comms = adf_exit_admin_comms; hw_data->disable_iov = adf_disable_sriov; hw_data->send_admin_init = adf_send_admin_init; hw_data->init_arb = adf_init_gen2_arb; hw_data->exit_arb = adf_exit_arb; hw_data->get_arb_mapping = adf_get_arbiter_mapping; hw_data->enable_ints = adf_enable_ints; hw_data->set_ssm_wdtimer = adf_set_ssm_wdtimer; hw_data->check_slice_hang = adf_check_slice_hang; hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; hw_data->disable_vf2pf_comms = adf_pf_disable_vf2pf_comms; hw_data->restore_device = adf_dev_restore; hw_data->reset_device = adf_reset_flr; hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; hw_data->measure_clock = measure_clock; hw_data->get_ae_clock = get_ae_clock; hw_data->reset_device = adf_reset_flr; hw_data->get_objs_num = get_objs_num; hw_data->get_obj_name = get_obj_name; hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask; hw_data->get_accel_cap = adf_200xx_get_hw_cap; hw_data->clock_frequency = ADF_200XX_AE_FREQ; hw_data->extended_dc_capabilities = 0; hw_data->get_storage_enabled = get_storage_enabled; hw_data->query_storage_cap = 1; hw_data->get_heartbeat_status = adf_get_heartbeat_status; hw_data->get_ae_clock = get_ae_clock; hw_data->storage_enable = 0; hw_data->get_ring_to_svc_map = adf_cfg_get_services_enabled; hw_data->config_device = adf_config_device; hw_data->set_asym_rings_mask = adf_cfg_set_asym_rings_mask; hw_data->ring_to_svc_map = ADF_DEFAULT_RING_TO_SRV_MAP; hw_data->pre_reset = adf_dev_pre_reset; hw_data->post_reset = adf_dev_post_reset; + + adf_gen2_init_hw_csr_info(&hw_data->csr_info); } void adf_clean_hw_data_200xx(struct adf_hw_device_data *hw_data) { hw_data->dev_class->instances--; } diff --git a/sys/dev/qat/qat_hw/qat_200xx/adf_drv.c b/sys/dev/qat/qat_hw/qat_200xx/adf_drv.c index 65626ac4b56f..75b9778ab84f 100644 --- a/sys/dev/qat/qat_hw/qat_200xx/adf_drv.c +++ b/sys/dev/qat/qat_hw/qat_200xx/adf_drv.c @@ -1,280 +1,281 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "adf_200xx_hw_data.h" #include "adf_fw_counters.h" #include "adf_cfg_device.h" #include #include #include #include #include #include "adf_heartbeat_dbg.h" #include "adf_cnvnr_freq_counters.h" static MALLOC_DEFINE(M_QAT_200XX, "qat_200xx", "qat_200xx"); #define ADF_SYSTEM_DEVICE(device_id) \ { \ PCI_VENDOR_ID_INTEL, device_id \ } static const struct pci_device_id adf_pci_tbl[] = { ADF_SYSTEM_DEVICE(ADF_200XX_PCI_DEVICE_ID), { 0, } }; static int adf_probe(device_t dev) { const struct pci_device_id *id; for (id = adf_pci_tbl; id->vendor != 0; id++) { if (pci_get_vendor(dev) == id->vendor && pci_get_device(dev) == id->device) { device_set_desc(dev, "Intel " ADF_200XX_DEVICE_NAME " QuickAssist"); return BUS_PROBE_GENERIC; } } return ENXIO; } static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; int i; if (accel_dev->dma_tag) bus_dma_tag_destroy(accel_dev->dma_tag); for (i = 0; i < ADF_PCI_MAX_BARS; i++) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; if (bar->virt_addr) bus_free_resource(accel_pci_dev->pci_dev, SYS_RES_MEMORY, bar->virt_addr); } if (accel_dev->hw_device) { switch (pci_get_device(accel_pci_dev->pci_dev)) { case ADF_200XX_PCI_DEVICE_ID: adf_clean_hw_data_200xx(accel_dev->hw_device); break; default: break; } free(accel_dev->hw_device, M_QAT_200XX); accel_dev->hw_device = NULL; } adf_cfg_dev_remove(accel_dev); adf_devmgr_rm_dev(accel_dev, NULL); } static int adf_attach(device_t dev) { struct adf_accel_dev *accel_dev; struct adf_accel_pci *accel_pci_dev; struct adf_hw_device_data *hw_data; unsigned int i = 0, bar_nr = 0, reg_val = 0; int ret, rid; struct adf_cfg_device *cfg_dev = NULL; /* Set pci MaxPayLoad to 256. Implemented to avoid the issue of * Pci-passthrough causing Maxpayload to be reset to 128 bytes * when the device is reset. */ if (pci_get_max_payload(dev) != 256) pci_set_max_payload(dev, 256); accel_dev = device_get_softc(dev); INIT_LIST_HEAD(&accel_dev->crypto_list); accel_pci_dev = &accel_dev->accel_pci_dev; accel_pci_dev->pci_dev = dev; if (bus_get_domain(dev, &accel_pci_dev->node) != 0) accel_pci_dev->node = 0; /* XXX: Revisit if we actually need a devmgr table at all. */ /* Add accel device to accel table. * This should be called before adf_cleanup_accel is called */ if (adf_devmgr_add_dev(accel_dev, NULL)) { device_printf(dev, "Failed to add new accelerator device.\n"); return ENXIO; } /* Allocate and configure device configuration structure */ hw_data = malloc(sizeof(*hw_data), M_QAT_200XX, M_WAITOK | M_ZERO); accel_dev->hw_device = hw_data; adf_init_hw_data_200xx(accel_dev->hw_device); accel_pci_dev->revid = pci_get_revid(dev); hw_data->fuses = pci_read_config(dev, ADF_DEVICE_FUSECTL_OFFSET, 4); if (accel_pci_dev->revid == 0x00) { device_printf(dev, "A0 stepping is not supported.\n"); ret = ENODEV; goto out_err; } /* Get PPAERUCM values and store */ ret = adf_aer_store_ppaerucm_reg(dev, hw_data); if (ret) goto out_err; /* Clear PFIEERRUNCSTSR register bits if they are set */ reg_val = pci_read_config(dev, ADF_200XX_PFIEERRUNCSTSR, 4); if (reg_val) { device_printf( dev, "Clearing PFIEERRUNCSTSR, previous status : %0x\n", reg_val); pci_write_config(dev, ADF_200XX_PFIEERRUNCSTSR, reg_val, 4); } /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(accel_dev); hw_data->ae_mask = hw_data->get_ae_mask(accel_dev); + hw_data->admin_ae_mask = hw_data->ae_mask; accel_pci_dev->sku = hw_data->get_sku(hw_data); /* If the device has no acceleration engines then ignore it. */ if (!hw_data->accel_mask || !hw_data->ae_mask || (~hw_data->ae_mask & 0x01)) { device_printf(dev, "No acceleration units found\n"); ret = ENXIO; goto out_err; } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); if (ret) goto out_err; ret = adf_clock_debugfs_add(accel_dev); if (ret) goto out_err; pci_set_max_read_req(dev, 1024); ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, /* BUS_SPACE_UNRESTRICTED */ 1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &accel_dev->dma_tag); if (ret) goto out_err; if (hw_data->get_accel_cap) { hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); } /* Find and map all the device's BARS */ for (bar_nr = 0; i < ADF_PCI_MAX_BARS && bar_nr < PCIR_MAX_BAR_0; bar_nr++) { struct adf_bar *bar; /* * XXX: This isn't quite right as it will ignore a BAR * that wasn't assigned a valid resource range by the * firmware. */ rid = PCIR_BAR(bar_nr); if (bus_get_resource(dev, SYS_RES_MEMORY, rid, NULL, NULL) != 0) continue; bar = &accel_pci_dev->pci_bars[i++]; bar->virt_addr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!bar->virt_addr) { device_printf(dev, "Failed to map BAR %d\n", bar_nr); ret = ENXIO; goto out_err; } bar->base_addr = rman_get_start(bar->virt_addr); bar->size = rman_get_size(bar->virt_addr); } pci_enable_busmaster(dev); if (!accel_dev->hw_device->config_device) { ret = EFAULT; goto out_err; } ret = accel_dev->hw_device->config_device(accel_dev); if (ret) goto out_err; ret = adf_dev_init(accel_dev); if (ret) goto out_dev_shutdown; ret = adf_dev_start(accel_dev); if (ret) goto out_dev_stop; cfg_dev = accel_dev->cfg->dev; adf_cfg_device_clear(cfg_dev, accel_dev); free(cfg_dev, M_QAT); accel_dev->cfg->dev = NULL; return ret; out_dev_stop: adf_dev_stop(accel_dev); out_dev_shutdown: adf_dev_shutdown(accel_dev); out_err: adf_cleanup_accel(accel_dev); return ret; } static int adf_detach(device_t dev) { struct adf_accel_dev *accel_dev = device_get_softc(dev); if (adf_dev_stop(accel_dev)) { device_printf(dev, "Failed to stop QAT accel dev\n"); return EBUSY; } adf_dev_shutdown(accel_dev); adf_cleanup_accel(accel_dev); return 0; } static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe), DEVMETHOD(device_attach, adf_attach), DEVMETHOD(device_detach, adf_detach), DEVMETHOD_END }; static driver_t adf_driver = { "qat", adf_methods, sizeof(struct adf_accel_dev) }; DRIVER_MODULE_ORDERED(qat_200xx, pci, adf_driver, NULL, NULL, SI_ORDER_THIRD); MODULE_VERSION(qat_200xx, 1); MODULE_DEPEND(qat_200xx, qat_common, 1, 1, 1); MODULE_DEPEND(qat_200xx, qat_api, 1, 1, 1); MODULE_DEPEND(qat_200xx, linuxkpi, 1, 1, 1); diff --git a/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c b/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c new file mode 100644 index 000000000000..1c8c9a2fda4c --- /dev/null +++ b/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c @@ -0,0 +1,973 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright(c) 2007-2022 Intel Corporation */ +/* $FreeBSD$ */ +#include +#include +#include +#include +#include +#include +#include +#include "adf_4xxx_hw_data.h" +#include "adf_heartbeat.h" +#include "icp_qat_fw_init_admin.h" +#include "icp_qat_hw.h" + +#define ADF_CONST_TABLE_SIZE 1024 + +struct adf_fw_config { + u32 ae_mask; + char *obj_name; +}; + +/* Accel unit information */ +static const struct adf_accel_unit adf_4xxx_au_a_ae[] = { + { 0x1, 0x1, 0xF, 0x1B, 4, ADF_ACCEL_SERVICE_NULL }, + { 0x2, 0x1, 0xF0, 0x6C0, 4, ADF_ACCEL_SERVICE_NULL }, + { 0x4, 0x1, 0x100, 0xF000, 1, ADF_ACCEL_ADMIN }, +}; + +/* Worker thread to service arbiter mappings */ +static u32 thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = { 0x5555555, 0x5555555, + 0x5555555, 0x5555555, + 0xAAAAAAA, 0xAAAAAAA, + 0xAAAAAAA, 0xAAAAAAA, + 0x0 }; + +/* Masks representing ME thread-service mappings. + * Thread 7 carries out Admin work and is thus + * left out. + */ +static u8 default_active_thd_mask = 0x7F; +static u8 dc_me_active_thd_mask = 0x03; + +static u32 thrd_to_arb_map_gen[ADF_4XXX_MAX_ACCELENGINES] = { 0 }; + +#define ADF_4XXX_ASYM_SYM \ + (ASYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ + ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ + SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT) + +#define ADF_4XXX_DC \ + (COMP | COMP << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ + COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ + COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) + +#define ADF_4XXX_SYM \ + (SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ + SYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ + SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT) + +#define ADF_4XXX_ASYM \ + (ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ + ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ + ASYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT) + +#define ADF_4XXX_ASYM_DC \ + (ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ + COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ + COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) + +#define ADF_4XXX_SYM_DC \ + (SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ + COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ + COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) + +#define ADF_4XXX_NA \ + (NA | NA << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ + NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ + NA << ADF_CFG_SERV_RING_PAIR_3_SHIFT) + +#define ADF_4XXX_DEFAULT_RING_TO_SRV_MAP ADF_4XXX_ASYM_SYM + +struct adf_enabled_services { + const char svcs_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + u16 rng_to_svc_msk; +}; + +static struct adf_enabled_services adf_4xxx_svcs[] = { + { "dc", ADF_4XXX_DC }, + { "sym", ADF_4XXX_SYM }, + { "asym", ADF_4XXX_ASYM }, + { "dc;asym", ADF_4XXX_ASYM_DC }, + { "asym;dc", ADF_4XXX_ASYM_DC }, + { "sym;dc", ADF_4XXX_SYM_DC }, + { "dc;sym", ADF_4XXX_SYM_DC }, + { "asym;sym", ADF_4XXX_ASYM_SYM }, + { "sym;asym", ADF_4XXX_ASYM_SYM }, +}; + +static struct adf_hw_device_class adf_4xxx_class = { + .name = ADF_4XXX_DEVICE_NAME, + .type = DEV_4XXX, + .instances = 0, +}; + +static u32 +get_accel_mask(struct adf_accel_dev *accel_dev) +{ + return ADF_4XXX_ACCELERATORS_MASK; +} + +static u32 +get_ae_mask(struct adf_accel_dev *accel_dev) +{ + u32 fusectl4 = accel_dev->hw_device->fuses; + + return ~fusectl4 & ADF_4XXX_ACCELENGINES_MASK; +} + +static int +get_ring_to_svc_map(struct adf_accel_dev *accel_dev, u16 *ring_to_svc_map) +{ + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + char val[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + u32 i = 0; + + *ring_to_svc_map = 0; + /* Get the services enabled by user */ + snprintf(key, sizeof(key), ADF_SERVICES_ENABLED); + if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) + return EFAULT; + + for (i = 0; i < ARRAY_SIZE(adf_4xxx_svcs); i++) { + if (!strncmp(val, + adf_4xxx_svcs[i].svcs_enabled, + ADF_CFG_MAX_KEY_LEN_IN_BYTES)) { + *ring_to_svc_map = adf_4xxx_svcs[i].rng_to_svc_msk; + return 0; + } + } + + device_printf(GET_DEV(accel_dev), + "Invalid services enabled: %s\n", + val); + return EFAULT; +} + +static u32 +get_num_accels(struct adf_hw_device_data *self) +{ + return ADF_4XXX_MAX_ACCELERATORS; +} + +static u32 +get_num_aes(struct adf_hw_device_data *self) +{ + if (!self || !self->ae_mask) + return 0; + + return hweight32(self->ae_mask); +} + +static u32 +get_misc_bar_id(struct adf_hw_device_data *self) +{ + return ADF_4XXX_PMISC_BAR; +} + +static u32 +get_etr_bar_id(struct adf_hw_device_data *self) +{ + return ADF_4XXX_ETR_BAR; +} + +static u32 +get_sram_bar_id(struct adf_hw_device_data *self) +{ + return ADF_4XXX_SRAM_BAR; +} + +/* + * The vector routing table is used to select the MSI-X entry to use for each + * interrupt source. + * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts. + * The final entry corresponds to VF2PF or error interrupts. + * This vector table could be used to configure one MSI-X entry to be shared + * between multiple interrupt sources. + * + * The default routing is set to have a one to one correspondence between the + * interrupt source and the MSI-X entry used. + */ +static void +set_msix_default_rttable(struct adf_accel_dev *accel_dev) +{ + struct resource *csr; + int i; + + csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; + for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++) + ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i); +} + +static u32 +adf_4xxx_get_hw_cap(struct adf_accel_dev *accel_dev) +{ + device_t pdev = accel_dev->accel_pci_dev.pci_dev; + u32 fusectl1; + u32 capabilities; + + /* Read accelerator capabilities mask */ + fusectl1 = pci_read_config(pdev, ADF_4XXX_FUSECTL1_OFFSET, 4); + capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | + ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | + ICP_ACCEL_CAPABILITIES_CIPHER | + ICP_ACCEL_CAPABILITIES_AUTHENTICATION | + ICP_ACCEL_CAPABILITIES_COMPRESSION | + ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION | + ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION | + ICP_ACCEL_CAPABILITIES_HKDF | ICP_ACCEL_CAPABILITIES_SHA3_EXT | + ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 | + ICP_ACCEL_CAPABILITIES_CHACHA_POLY | + ICP_ACCEL_CAPABILITIES_AESGCM_SPC | + ICP_ACCEL_CAPABILITIES_AES_V2 | ICP_ACCEL_CAPABILITIES_RL; + + if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) { + capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; + capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + } + if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) + capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; + if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) + capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; + if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) { + capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; + capabilities &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; + } + if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) { + capabilities &= ~ICP_ACCEL_CAPABILITIES_SM3; + capabilities &= ~ICP_ACCEL_CAPABILITIES_SM4; + } + return capabilities; +} + +static u32 +get_hb_clock(struct adf_hw_device_data *self) +{ + /* + * 4XXX uses KPT counter for HB + */ + return ADF_4XXX_KPT_COUNTER_FREQ; +} + +static u32 +get_ae_clock(struct adf_hw_device_data *self) +{ + /* + * Clock update interval is <16> ticks for qat_4xxx. + */ + return self->clock_frequency / 16; +} + +static int +measure_clock(struct adf_accel_dev *accel_dev) +{ + u32 frequency; + int ret = 0; + + ret = adf_dev_measure_clock(accel_dev, + &frequency, + ADF_4XXX_MIN_AE_FREQ, + ADF_4XXX_MAX_AE_FREQ); + if (ret) + return ret; + + accel_dev->hw_device->clock_frequency = frequency; + return 0; +} + +static int +adf_4xxx_configure_accel_units(struct adf_accel_dev *accel_dev) +{ + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES] = { 0 }; + char val_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; + + if (adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC)) + goto err; + + snprintf(key, sizeof(key), ADF_SERVICES_ENABLED); + snprintf(val_str, + sizeof(val_str), + ADF_CFG_ASYM ADF_SERVICES_SEPARATOR ADF_CFG_SYM); + + if (adf_cfg_add_key_value_param( + accel_dev, ADF_GENERAL_SEC, key, (void *)val_str, ADF_STR)) + goto err; + + return 0; +err: + device_printf(GET_DEV(accel_dev), "Failed to configure accel units\n"); + return EINVAL; +} + +static u32 +get_num_accel_units(struct adf_hw_device_data *self) +{ + return ADF_4XXX_MAX_ACCELUNITS; +} + +static void +get_accel_unit(struct adf_hw_device_data *self, + struct adf_accel_unit **accel_unit) +{ + memcpy(*accel_unit, adf_4xxx_au_a_ae, sizeof(adf_4xxx_au_a_ae)); +} + +static void +adf_exit_accel_unit_services(struct adf_accel_dev *accel_dev) +{ + if (accel_dev->au_info) { + kfree(accel_dev->au_info->au); + accel_dev->au_info->au = NULL; + kfree(accel_dev->au_info); + accel_dev->au_info = NULL; + } +} + +static int +get_accel_unit_config(struct adf_accel_dev *accel_dev, + u8 *num_sym_au, + u8 *num_dc_au, + u8 *num_asym_au) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + u32 num_au = hw_data->get_num_accel_units(hw_data); + /* One AU will be allocated by default if a service enabled */ + u32 alloc_au = 1; + /* There's always one AU that is used for Admin AE */ + u32 service_mask = ADF_ACCEL_ADMIN; + char *token, *cur_str; + u32 disabled_caps = 0; + + /* Get the services enabled by user */ + snprintf(key, sizeof(key), ADF_SERVICES_ENABLED); + if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) + return EFAULT; + cur_str = val; + token = strsep(&cur_str, ADF_SERVICES_SEPARATOR); + while (token) { + if (!strncmp(token, ADF_CFG_SYM, strlen(ADF_CFG_SYM))) + service_mask |= ADF_ACCEL_CRYPTO; + if (!strncmp(token, ADF_CFG_ASYM, strlen(ADF_CFG_ASYM))) + service_mask |= ADF_ACCEL_ASYM; + + /* cy means both asym & crypto should be enabled + * Hardware resources allocation check will be done later + */ + if (!strncmp(token, ADF_CFG_CY, strlen(ADF_CFG_CY))) + service_mask |= ADF_ACCEL_ASYM | ADF_ACCEL_CRYPTO; + if (!strncmp(token, ADF_SERVICE_DC, strlen(ADF_SERVICE_DC))) + service_mask |= ADF_ACCEL_COMPRESSION; + + token = strsep(&cur_str, ADF_SERVICES_SEPARATOR); + } + + /* Ensure the user won't enable more services than it can support */ + if (hweight32(service_mask) > num_au) { + device_printf(GET_DEV(accel_dev), + "Can't enable more services than "); + device_printf(GET_DEV(accel_dev), "%d!\n", num_au); + return EFAULT; + } else if (hweight32(service_mask) == 2) { + /* Due to limitation, besides AU for Admin AE + * only 2 more AUs can be allocated + */ + alloc_au = 2; + } + + if (service_mask & ADF_ACCEL_CRYPTO) + *num_sym_au = alloc_au; + if (service_mask & ADF_ACCEL_ASYM) + *num_asym_au = alloc_au; + if (service_mask & ADF_ACCEL_COMPRESSION) + *num_dc_au = alloc_au; + + /*update capability*/ + if (!*num_sym_au || !(service_mask & ADF_ACCEL_CRYPTO)) { + disabled_caps = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | + ICP_ACCEL_CAPABILITIES_CIPHER | + ICP_ACCEL_CAPABILITIES_SHA3_EXT | + ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 | + ICP_ACCEL_CAPABILITIES_CHACHA_POLY | + ICP_ACCEL_CAPABILITIES_AESGCM_SPC | + ICP_ACCEL_CAPABILITIES_AES_V2; + } + if (!*num_asym_au || !(service_mask & ADF_ACCEL_ASYM)) { + disabled_caps |= ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | + ICP_ACCEL_CAPABILITIES_AUTHENTICATION; + } + if (!*num_dc_au || !(service_mask & ADF_ACCEL_COMPRESSION)) { + disabled_caps |= ICP_ACCEL_CAPABILITIES_COMPRESSION | + ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION | + ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION | + ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; + accel_dev->hw_device->extended_dc_capabilities = 0; + } + accel_dev->hw_device->accel_capabilities_mask = + adf_4xxx_get_hw_cap(accel_dev) & ~disabled_caps; + + hw_data->service_mask = service_mask; + hw_data->service_to_load_mask = service_mask; + + return 0; +} + +static int +adf_init_accel_unit_services(struct adf_accel_dev *accel_dev) +{ + u8 num_sym_au = 0, num_dc_au = 0, num_asym_au = 0; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u32 num_au = hw_data->get_num_accel_units(hw_data); + u32 au_size = num_au * sizeof(struct adf_accel_unit); + u8 i; + + if (get_accel_unit_config( + accel_dev, &num_sym_au, &num_dc_au, &num_asym_au)) + return EFAULT; + + accel_dev->au_info = kzalloc(sizeof(*accel_dev->au_info), GFP_KERNEL); + if (!accel_dev->au_info) + return ENOMEM; + + accel_dev->au_info->au = kzalloc(au_size, GFP_KERNEL); + if (!accel_dev->au_info->au) { + kfree(accel_dev->au_info); + accel_dev->au_info = NULL; + return ENOMEM; + } + + accel_dev->au_info->num_cy_au = num_sym_au; + accel_dev->au_info->num_dc_au = num_dc_au; + accel_dev->au_info->num_asym_au = num_asym_au; + + get_accel_unit(hw_data, &accel_dev->au_info->au); + + /* Enable ASYM accel units */ + for (i = 0; i < num_au && num_asym_au > 0; i++) { + if (accel_dev->au_info->au[i].services == + ADF_ACCEL_SERVICE_NULL) { + accel_dev->au_info->au[i].services = ADF_ACCEL_ASYM; + num_asym_au--; + } + } + /* Enable SYM accel units */ + for (i = 0; i < num_au && num_sym_au > 0; i++) { + if (accel_dev->au_info->au[i].services == + ADF_ACCEL_SERVICE_NULL) { + accel_dev->au_info->au[i].services = ADF_ACCEL_CRYPTO; + num_sym_au--; + } + } + /* Enable compression accel units */ + for (i = 0; i < num_au && num_dc_au > 0; i++) { + if (accel_dev->au_info->au[i].services == + ADF_ACCEL_SERVICE_NULL) { + accel_dev->au_info->au[i].services = + ADF_ACCEL_COMPRESSION; + num_dc_au--; + } + } + accel_dev->au_info->dc_ae_msk |= + hw_data->get_obj_cfg_ae_mask(accel_dev, ADF_ACCEL_COMPRESSION); + + return 0; +} + +static int +adf_init_accel_units(struct adf_accel_dev *accel_dev) +{ + return adf_init_accel_unit_services(accel_dev); +} + +static void +adf_exit_accel_units(struct adf_accel_dev *accel_dev) +{ + /* reset the AU service */ + adf_exit_accel_unit_services(accel_dev); +} + +static const char * +get_obj_name(struct adf_accel_dev *accel_dev, + enum adf_accel_unit_services service) +{ + switch (service) { + case ADF_ACCEL_ASYM: + return ADF_4XXX_ASYM_OBJ; + case ADF_ACCEL_CRYPTO: + return ADF_4XXX_SYM_OBJ; + case ADF_ACCEL_COMPRESSION: + return ADF_4XXX_DC_OBJ; + case ADF_ACCEL_ADMIN: + return ADF_4XXX_ADMIN_OBJ; + default: + return NULL; + } +} + +static uint32_t +get_objs_num(struct adf_accel_dev *accel_dev) +{ + return ADF_4XXX_MAX_OBJ; +} + +static uint32_t +get_obj_cfg_ae_mask(struct adf_accel_dev *accel_dev, + enum adf_accel_unit_services service) +{ + u32 ae_mask = 0; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u32 num_au = hw_data->get_num_accel_units(hw_data); + struct adf_accel_unit *accel_unit = accel_dev->au_info->au; + u32 i = 0; + + if (service == ADF_ACCEL_SERVICE_NULL) + return 0; + + for (i = 0; i < num_au; i++) { + if (accel_unit[i].services == service) + ae_mask |= accel_unit[i].ae_mask; + } + + return ae_mask; +} + +static enum adf_accel_unit_services +adf_4xxx_get_service_type(struct adf_accel_dev *accel_dev, s32 obj_num) +{ + struct adf_accel_unit *accel_unit; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u8 num_au = hw_data->get_num_accel_units(hw_data); + int i; + + if (!hw_data->service_to_load_mask) + return ADF_ACCEL_SERVICE_NULL; + + if (accel_dev->au_info && accel_dev->au_info->au) + accel_unit = accel_dev->au_info->au; + else + return ADF_ACCEL_SERVICE_NULL; + + for (i = num_au - 2; i >= 0; i--) { + if (hw_data->service_to_load_mask & accel_unit[i].services) { + hw_data->service_to_load_mask &= + ~accel_unit[i].services; + return accel_unit[i].services; + } + } + + /* admin AE should be loaded last */ + if (hw_data->service_to_load_mask & accel_unit[num_au - 1].services) { + hw_data->service_to_load_mask &= + ~accel_unit[num_au - 1].services; + return accel_unit[num_au - 1].services; + } + + return ADF_ACCEL_SERVICE_NULL; +} + +static void +get_ring_svc_map_data(int ring_pair_index, + u16 ring_to_svc_map, + u8 *serv_type, + int *ring_index, + int *num_rings_per_srv, + int bundle_num) +{ + *serv_type = + GET_SRV_TYPE(ring_to_svc_map, bundle_num % ADF_CFG_NUM_SERVICES); + *ring_index = 0; + *num_rings_per_srv = ADF_4XXX_NUM_RINGS_PER_BANK / 2; +} + +static int +adf_get_dc_extcapabilities(struct adf_accel_dev *accel_dev, u32 *capabilities) +{ + struct icp_qat_fw_init_admin_req req; + struct icp_qat_fw_init_admin_resp resp; + u8 i; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u8 num_au = hw_data->get_num_accel_units(hw_data); + u32 first_dc_ae = 0; + + for (i = 0; i < num_au; i++) { + if (accel_dev->au_info->au[i].services & + ADF_ACCEL_COMPRESSION) { + first_dc_ae = accel_dev->au_info->au[i].ae_mask; + first_dc_ae &= ~(first_dc_ae - 1); + } + } + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + req.cmd_id = ICP_QAT_FW_COMP_CAPABILITY_GET; + + if (likely(first_dc_ae)) { + if (adf_send_admin(accel_dev, &req, &resp, first_dc_ae) || + resp.status) { + *capabilities = 0; + return EFAULT; + } + + *capabilities = resp.extended_features; + } + + return 0; +} + +static int +adf_get_fw_status(struct adf_accel_dev *accel_dev, + u8 *major, + u8 *minor, + u8 *patch) +{ + struct icp_qat_fw_init_admin_req req; + struct icp_qat_fw_init_admin_resp resp; + u32 ae_mask = 1; + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + req.cmd_id = ICP_QAT_FW_STATUS_GET; + + if (adf_send_admin(accel_dev, &req, &resp, ae_mask)) + return EFAULT; + + *major = resp.version_major_num; + *minor = resp.version_minor_num; + *patch = resp.version_patch_num; + + return 0; +} + +static int +adf_4xxx_send_admin_init(struct adf_accel_dev *accel_dev) +{ + int ret = 0; + struct icp_qat_fw_init_admin_req req; + struct icp_qat_fw_init_admin_resp resp; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u32 ae_mask = hw_data->ae_mask; + u32 admin_ae_mask = hw_data->admin_ae_mask; + u8 num_au = hw_data->get_num_accel_units(hw_data); + u8 i; + u32 dc_capabilities = 0; + + for (i = 0; i < num_au; i++) { + if (accel_dev->au_info->au[i].services == + ADF_ACCEL_SERVICE_NULL) + ae_mask &= ~accel_dev->au_info->au[i].ae_mask; + + if (accel_dev->au_info->au[i].services != ADF_ACCEL_ADMIN) + admin_ae_mask &= ~accel_dev->au_info->au[i].ae_mask; + } + + if (!accel_dev->admin) { + device_printf(GET_DEV(accel_dev), "adf_admin not available\n"); + return EFAULT; + } + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG; + req.init_cfg_sz = ADF_CONST_TABLE_SIZE; + req.init_cfg_ptr = accel_dev->admin->const_tbl_addr; + if (adf_send_admin(accel_dev, &req, &resp, admin_ae_mask)) { + device_printf(GET_DEV(accel_dev), + "Error sending constants config message\n"); + return EFAULT; + } + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + req.cmd_id = ICP_QAT_FW_INIT_ME; + if (adf_send_admin(accel_dev, &req, &resp, ae_mask)) { + device_printf(GET_DEV(accel_dev), + "Error sending init message\n"); + return EFAULT; + } + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + req.cmd_id = ICP_QAT_FW_HEARTBEAT_TIMER_SET; + req.init_cfg_ptr = accel_dev->admin->phy_hb_addr; + if (adf_get_hb_timer(accel_dev, &req.heartbeat_ticks)) + return EINVAL; + + if (adf_send_admin(accel_dev, &req, &resp, ae_mask)) + device_printf(GET_DEV(accel_dev), + "Heartbeat is not supported\n"); + + ret = adf_get_dc_extcapabilities(accel_dev, &dc_capabilities); + if (unlikely(ret)) { + device_printf(GET_DEV(accel_dev), + "Could not get FW ext. capabilities\n"); + } + + accel_dev->hw_device->extended_dc_capabilities = dc_capabilities; + + adf_get_fw_status(accel_dev, + &accel_dev->fw_versions.fw_version_major, + &accel_dev->fw_versions.fw_version_minor, + &accel_dev->fw_versions.fw_version_patch); + + device_printf(GET_DEV(accel_dev), + "FW version: %d.%d.%d\n", + accel_dev->fw_versions.fw_version_major, + accel_dev->fw_versions.fw_version_minor, + accel_dev->fw_versions.fw_version_patch); + + return ret; +} + +static enum dev_sku_info +get_sku(struct adf_hw_device_data *self) +{ + return DEV_SKU_1; +} + +static struct adf_accel_unit * +get_au_by_ae(struct adf_accel_dev *accel_dev, int ae_num) +{ + int i = 0; + struct adf_accel_unit *accel_unit = accel_dev->au_info->au; + + if (!accel_unit) + return NULL; + + for (i = 0; i < ADF_4XXX_MAX_ACCELUNITS; i++) + if (accel_unit[i].ae_mask & BIT(ae_num)) + return &accel_unit[i]; + + return NULL; +} + +static bool +check_accel_unit_service(enum adf_accel_unit_services au_srv, + enum adf_cfg_service_type ring_srv) +{ + if ((au_srv & ADF_ACCEL_SERVICE_NULL) && ring_srv == NA) + return true; + if ((au_srv & ADF_ACCEL_COMPRESSION) && ring_srv == COMP) + return true; + if ((au_srv & ADF_ACCEL_ASYM) && ring_srv == ASYM) + return true; + if ((au_srv & ADF_ACCEL_CRYPTO) && ring_srv == SYM) + return true; + + return false; +} + +static void +adf_4xxx_cfg_gen_dispatch_arbiter(struct adf_accel_dev *accel_dev, + u32 *thrd_to_arb_map_gen) +{ + struct adf_accel_unit *au = NULL; + int engine = 0; + int thread = 0; + int service; + u16 ena_srv_mask; + u16 service_type; + u32 service_mask; + unsigned long thd_srv_mask = default_active_thd_mask; + + ena_srv_mask = accel_dev->hw_device->ring_to_svc_map; + /* If ring_to_svc_map is not changed, return default arbiter value */ + if (ena_srv_mask == ADF_4XXX_DEFAULT_RING_TO_SRV_MAP) { + memcpy(thrd_to_arb_map_gen, + thrd_to_arb_map, + sizeof(thrd_to_arb_map_gen[0]) * + ADF_4XXX_MAX_ACCELENGINES); + return; + } + + for (engine = 0; engine < ADF_4XXX_MAX_ACCELENGINES - 1; engine++) { + thrd_to_arb_map_gen[engine] = 0; + service_mask = 0; + au = get_au_by_ae(accel_dev, engine); + if (!au) + continue; + + for (service = 0; service < ADF_CFG_MAX_SERVICES; service++) { + service_type = GET_SRV_TYPE(ena_srv_mask, service); + if (check_accel_unit_service(au->services, + service_type)) + service_mask |= BIT(service); + } + + if (au->services == ADF_ACCEL_COMPRESSION) + thd_srv_mask = dc_me_active_thd_mask; + else + thd_srv_mask = default_active_thd_mask; + + for_each_set_bit(thread, &thd_srv_mask, 8) + { + thrd_to_arb_map_gen[engine] |= + (service_mask << (ADF_CFG_MAX_SERVICES * thread)); + } + } +} + +static void +adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, + u32 const **arb_map_config) +{ + int i; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + + for (i = 1; i < ADF_4XXX_MAX_ACCELENGINES; i++) { + if (~hw_device->ae_mask & (1 << i)) + thrd_to_arb_map[i] = 0; + } + adf_4xxx_cfg_gen_dispatch_arbiter(accel_dev, thrd_to_arb_map_gen); + *arb_map_config = thrd_to_arb_map_gen; +} + +static void +get_arb_info(struct arb_info *arb_info) +{ + arb_info->wrk_cfg_offset = ADF_4XXX_ARB_CONFIG; + arb_info->arbiter_offset = ADF_4XXX_ARB_OFFSET; + arb_info->wrk_thd_2_srv_arb_map = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET; +} + +static void +get_admin_info(struct admin_info *admin_csrs_info) +{ + admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET; + admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET; + admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET; +} + +static void +adf_enable_error_correction(struct adf_accel_dev *accel_dev) +{ + struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR]; + struct resource *csr = misc_bar->virt_addr; + + /* Enable all in errsou3 except VFLR notification on host */ + ADF_CSR_WR(csr, ADF_4XXX_ERRMSK3, ADF_4XXX_VFLNOTIFY); +} + +static void +adf_enable_ints(struct adf_accel_dev *accel_dev) +{ + struct resource *addr; + + addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; + + /* Enable bundle interrupts */ + ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0); + ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0); + + /* Enable misc interrupts */ + ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0); +} + +static int +adf_init_device(struct adf_accel_dev *accel_dev) +{ + struct resource *addr; + u32 status; + u32 csr; + int ret; + + addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; + + /* Temporarily mask PM interrupt */ + csr = ADF_CSR_RD(addr, ADF_4XXX_ERRMSK2); + csr |= ADF_4XXX_PM_SOU; + ADF_CSR_WR(addr, ADF_4XXX_ERRMSK2, csr); + + /* Set DRV_ACTIVE bit to power up the device */ + ADF_CSR_WR(addr, ADF_4XXX_PM_INTERRUPT, ADF_4XXX_PM_DRV_ACTIVE); + + /* Poll status register to make sure the device is powered up */ + status = 0; + ret = read_poll_timeout(ADF_CSR_RD, + status, + status & ADF_4XXX_PM_INIT_STATE, + ADF_4XXX_PM_POLL_DELAY_US, + ADF_4XXX_PM_POLL_TIMEOUT_US, + true, + addr, + ADF_4XXX_PM_STATUS); + if (ret) + device_printf(GET_DEV(accel_dev), + "Failed to power up the device\n"); + + return ret; +} + +void +adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class = &adf_4xxx_class; + hw_data->instance_id = adf_4xxx_class.instances++; + hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS; + hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK; + hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS; + hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES; + hw_data->num_logical_accel = 1; + hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK; + hw_data->alloc_irq = adf_isr_resource_alloc; + hw_data->free_irq = adf_isr_resource_free; + hw_data->enable_error_correction = adf_enable_error_correction; + hw_data->get_accel_mask = get_accel_mask; + hw_data->get_ae_mask = get_ae_mask; + hw_data->get_num_accels = get_num_accels; + hw_data->get_num_aes = get_num_aes; + hw_data->get_sram_bar_id = get_sram_bar_id; + hw_data->get_etr_bar_id = get_etr_bar_id; + hw_data->get_misc_bar_id = get_misc_bar_id; + hw_data->get_arb_info = get_arb_info; + hw_data->get_admin_info = get_admin_info; + hw_data->get_accel_cap = adf_4xxx_get_hw_cap; + hw_data->clock_frequency = ADF_4XXX_AE_FREQ; + hw_data->get_sku = get_sku; + hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE; + hw_data->fw_name = ADF_4XXX_FW; + hw_data->fw_mmp_name = ADF_4XXX_MMP; + hw_data->init_admin_comms = adf_init_admin_comms; + hw_data->exit_admin_comms = adf_exit_admin_comms; + hw_data->send_admin_init = adf_4xxx_send_admin_init; + hw_data->init_arb = adf_init_gen2_arb; + hw_data->exit_arb = adf_exit_arb; + hw_data->get_arb_mapping = adf_get_arbiter_mapping; + hw_data->enable_ints = adf_enable_ints; + hw_data->init_device = adf_init_device; + hw_data->reset_device = adf_reset_flr; + hw_data->restore_device = adf_dev_restore; + hw_data->init_accel_units = adf_init_accel_units; + hw_data->exit_accel_units = adf_exit_accel_units; + hw_data->get_num_accel_units = get_num_accel_units; + hw_data->configure_accel_units = adf_4xxx_configure_accel_units; + hw_data->get_ring_to_svc_map = get_ring_to_svc_map; + hw_data->get_ring_svc_map_data = get_ring_svc_map_data; + hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK; + hw_data->get_objs_num = get_objs_num; + hw_data->get_obj_name = get_obj_name; + hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask; + hw_data->get_service_type = adf_4xxx_get_service_type; + hw_data->set_msix_rttable = set_msix_default_rttable; + hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; + hw_data->disable_iov = adf_disable_sriov; + hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; + hw_data->config_device = adf_config_device; + hw_data->set_asym_rings_mask = adf_cfg_set_asym_rings_mask; + hw_data->get_hb_clock = get_hb_clock; + hw_data->get_heartbeat_status = adf_get_heartbeat_status; + hw_data->get_ae_clock = get_ae_clock; + hw_data->measure_clock = measure_clock; + hw_data->query_storage_cap = 1; + + adf_gen4_init_hw_csr_info(&hw_data->csr_info); +} + +void +adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class->instances--; +} diff --git a/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.h b/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.h new file mode 100644 index 000000000000..c3e9750e2b17 --- /dev/null +++ b/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright(c) 2007 - 2022 Intel Corporation */ +/* $FreeBSD$ */ +#ifndef ADF_4XXX_HW_DATA_H_ +#define ADF_4XXX_HW_DATA_H_ + +#include + +/* PCIe configuration space */ +#define ADF_4XXX_SRAM_BAR 0 +#define ADF_4XXX_PMISC_BAR 1 +#define ADF_4XXX_ETR_BAR 2 +#define ADF_4XXX_RX_RINGS_OFFSET 1 +#define ADF_4XXX_TX_RINGS_MASK 0x1 +#define ADF_4XXX_MAX_ACCELERATORS 1 +#define ADF_4XXX_MAX_ACCELENGINES 9 +#define ADF_4XXX_BAR_MASK (BIT(0) | BIT(2) | BIT(4)) + +/* 2 Accel units dedicated to services and */ +/* 1 Accel unit dedicated to Admin AE */ +#define ADF_4XXX_MAX_ACCELUNITS 3 + +/* Physical function fuses */ +#define ADF_4XXX_FUSECTL0_OFFSET (0x2C8) +#define ADF_4XXX_FUSECTL1_OFFSET (0x2CC) +#define ADF_4XXX_FUSECTL2_OFFSET (0x2D0) +#define ADF_4XXX_FUSECTL3_OFFSET (0x2D4) +#define ADF_4XXX_FUSECTL4_OFFSET (0x2D8) +#define ADF_4XXX_FUSECTL5_OFFSET (0x2DC) + +#define ADF_4XXX_ACCELERATORS_MASK (0x1) +#define ADF_4XXX_ACCELENGINES_MASK (0x1FF) +#define ADF_4XXX_ADMIN_AE_MASK (0x100) + +#define ADF_4XXX_ETR_MAX_BANKS 64 + +/* MSIX interrupt */ +#define ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET (0x41A040) +#define ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET (0x41A044) +#define ADF_4XXX_SMIAPF_MASK_OFFSET (0x41A084) +#define ADF_4XXX_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i)*0x04)) + +/* Bank and ring configuration */ +#define ADF_4XXX_NUM_RINGS_PER_BANK 2 + +/* Error source registers */ +#define ADF_4XXX_ERRSOU0 (0x41A200) +#define ADF_4XXX_ERRSOU1 (0x41A204) +#define ADF_4XXX_ERRSOU2 (0x41A208) +#define ADF_4XXX_ERRSOU3 (0x41A20C) + +/* Error source mask registers */ +#define ADF_4XXX_ERRMSK0 (0x41A210) +#define ADF_4XXX_ERRMSK1 (0x41A214) +#define ADF_4XXX_ERRMSK2 (0x41A218) +#define ADF_4XXX_ERRMSK3 (0x41A21C) + +#define ADF_4XXX_VFLNOTIFY BIT(7) + +/* Arbiter configuration */ +#define ADF_4XXX_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0)) +#define ADF_4XXX_ARB_OFFSET (0x0) +#define ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET (0x400) + +/* Admin Interface Reg Offset */ +#define ADF_4XXX_ADMINMSGUR_OFFSET (0x500574) +#define ADF_4XXX_ADMINMSGLR_OFFSET (0x500578) +#define ADF_4XXX_MAILBOX_BASE_OFFSET (0x600970) + +/* Power management */ +#define ADF_4XXX_PM_POLL_DELAY_US 20 +#define ADF_4XXX_PM_POLL_TIMEOUT_US USEC_PER_SEC +#define ADF_4XXX_PM_STATUS (0x50A00C) +#define ADF_4XXX_PM_INTERRUPT (0x50A028) +#define ADF_4XXX_PM_DRV_ACTIVE BIT(20) +#define ADF_4XXX_PM_INIT_STATE BIT(21) +/* Power management source in ERRSOU2 and ERRMSK2 */ +#define ADF_4XXX_PM_SOU BIT(18) + +/* Firmware Binaries */ +#define ADF_4XXX_FW "qat_4xxx_fw" +#define ADF_4XXX_MMP "qat_4xxx_mmp_fw" +#define ADF_4XXX_DC_OBJ "qat_4xxx_dc.bin" +#define ADF_4XXX_SYM_OBJ "qat_4xxx_sym.bin" +#define ADF_4XXX_ASYM_OBJ "qat_4xxx_asym.bin" +#define ADF_4XXX_ADMIN_OBJ "qat_4xxx_admin.bin" + +/* Only 3 types of images can be loaded including the admin image */ +#define ADF_4XXX_MAX_OBJ 3 + +#define ADF_4XXX_AE_FREQ (1000 * 1000000) +#define ADF_4XXX_KPT_COUNTER_FREQ (100 * 1000000) + +#define ADF_4XXX_MIN_AE_FREQ (9 * 1000000) +#define ADF_4XXX_MAX_AE_FREQ (1100 * 1000000) + +/* qat_4xxx fuse bits are different from old GENs, redefine them */ +enum icp_qat_4xxx_slice_mask { + ICP_ACCEL_4XXX_MASK_CIPHER_SLICE = BIT(0), + ICP_ACCEL_4XXX_MASK_AUTH_SLICE = BIT(1), + ICP_ACCEL_4XXX_MASK_PKE_SLICE = BIT(2), + ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE = BIT(3), + ICP_ACCEL_4XXX_MASK_UCS_SLICE = BIT(4), + ICP_ACCEL_4XXX_MASK_EIA3_SLICE = BIT(5), + ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(6), +}; + +void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data); +void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data); + +#endif diff --git a/sys/dev/qat/qat_hw/qat_c4xxx/adf_drv.c b/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c similarity index 83% copy from sys/dev/qat/qat_hw/qat_c4xxx/adf_drv.c copy to sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c index 3470e4e8a8a0..76dcf7b37dee 100644 --- a/sys/dev/qat/qat_hw/qat_c4xxx/adf_drv.c +++ b/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c @@ -1,268 +1,267 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007 - 2022 Intel Corporation */ /* $FreeBSD$ */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" -#include "adf_c4xxx_hw_data.h" +#include "adf_4xxx_hw_data.h" +#include "adf_gen4_hw_data.h" #include "adf_fw_counters.h" #include "adf_cfg_device.h" #include #include #include #include #include #include "adf_heartbeat_dbg.h" #include "adf_cnvnr_freq_counters.h" -static MALLOC_DEFINE(M_QAT_C4XXX, "qat_c4xx", "qat_c4xx"); +static MALLOC_DEFINE(M_QAT_4XXX, "qat_4xxx", "qat_4xxx"); #define ADF_SYSTEM_DEVICE(device_id) \ { \ PCI_VENDOR_ID_INTEL, device_id \ } static const struct pci_device_id adf_pci_tbl[] = - { ADF_SYSTEM_DEVICE(ADF_C4XXX_PCI_DEVICE_ID), + { ADF_SYSTEM_DEVICE(ADF_4XXX_PCI_DEVICE_ID), + ADF_SYSTEM_DEVICE(ADF_401XX_PCI_DEVICE_ID), { 0, } }; static int adf_probe(device_t dev) { const struct pci_device_id *id; for (id = adf_pci_tbl; id->vendor != 0; id++) { if (pci_get_vendor(dev) == id->vendor && pci_get_device(dev) == id->device) { device_set_desc(dev, - "Intel " ADF_C4XXX_DEVICE_NAME + "Intel " ADF_4XXX_DEVICE_NAME " QuickAssist"); return BUS_PROBE_GENERIC; } } return ENXIO; } static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; int i; if (accel_dev->dma_tag) bus_dma_tag_destroy(accel_dev->dma_tag); for (i = 0; i < ADF_PCI_MAX_BARS; i++) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; if (bar->virt_addr) bus_free_resource(accel_pci_dev->pci_dev, SYS_RES_MEMORY, bar->virt_addr); } if (accel_dev->hw_device) { switch (pci_get_device(accel_pci_dev->pci_dev)) { - case ADF_C4XXX_PCI_DEVICE_ID: - adf_clean_hw_data_c4xxx(accel_dev->hw_device); + case ADF_4XXX_PCI_DEVICE_ID: + case ADF_401XX_PCI_DEVICE_ID: + adf_clean_hw_data_4xxx(accel_dev->hw_device); break; default: break; } - free(accel_dev->hw_device, M_QAT_C4XXX); + free(accel_dev->hw_device, M_QAT_4XXX); accel_dev->hw_device = NULL; } adf_cfg_dev_remove(accel_dev); adf_devmgr_rm_dev(accel_dev, NULL); } static int adf_attach(device_t dev) { struct adf_accel_dev *accel_dev; struct adf_accel_pci *accel_pci_dev; struct adf_hw_device_data *hw_data; unsigned int i, bar_nr; int ret, rid; struct adf_cfg_device *cfg_dev = NULL; /* Set pci MaxPayLoad to 256. Implemented to avoid the issue of * Pci-passthrough causing Maxpayload to be reset to 128 bytes * when the device is reset. */ if (pci_get_max_payload(dev) != 256) pci_set_max_payload(dev, 256); accel_dev = device_get_softc(dev); INIT_LIST_HEAD(&accel_dev->crypto_list); accel_pci_dev = &accel_dev->accel_pci_dev; accel_pci_dev->pci_dev = dev; if (bus_get_domain(dev, &accel_pci_dev->node) != 0) accel_pci_dev->node = 0; - /* XXX: Revisit if we actually need a devmgr table at all. */ - /* Add accel device to accel table. * This should be called before adf_cleanup_accel is called */ if (adf_devmgr_add_dev(accel_dev, NULL)) { device_printf(dev, "Failed to add new accelerator device.\n"); return ENXIO; } /* Allocate and configure device configuration structure */ - hw_data = malloc(sizeof(*hw_data), M_QAT_C4XXX, M_WAITOK | M_ZERO); + hw_data = malloc(sizeof(*hw_data), M_QAT_4XXX, M_WAITOK | M_ZERO); accel_dev->hw_device = hw_data; - adf_init_hw_data_c4xxx(accel_dev->hw_device); + adf_init_hw_data_4xxx(accel_dev->hw_device); accel_pci_dev->revid = pci_get_revid(dev); - hw_data->fuses = pci_read_config(dev, ADF_DEVICE_FUSECTL_OFFSET, 4); + hw_data->fuses = pci_read_config(dev, ADF_4XXX_FUSECTL4_OFFSET, 4); + if (accel_pci_dev->revid == 0x00) { + device_printf(dev, "A0 stepping is not supported.\n"); + ret = ENODEV; + goto out_err; + } /* Get PPAERUCM values and store */ ret = adf_aer_store_ppaerucm_reg(dev, hw_data); if (ret) goto out_err; /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(accel_dev); hw_data->ae_mask = hw_data->get_ae_mask(accel_dev); + accel_pci_dev->sku = hw_data->get_sku(hw_data); /* If the device has no acceleration engines then ignore it. */ if (!hw_data->accel_mask || !hw_data->ae_mask || (~hw_data->ae_mask & 0x01)) { device_printf(dev, "No acceleration units found\n"); ret = ENXIO; goto out_err; } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); if (ret) goto out_err; - ret = adf_clock_debugfs_add(accel_dev); if (ret) goto out_err; pci_set_max_read_req(dev, 1024); ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, - /*BUS_SPACE_UNRESTRICTED*/ 1, + /* BUS_SPACE_UNRESTRICTED */ 1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &accel_dev->dma_tag); if (ret) goto out_err; if (hw_data->get_accel_cap) { hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); } - accel_pci_dev->sku = hw_data->get_sku(hw_data); - /* Find and map all the device's BARS */ i = 0; for (bar_nr = 0; i < ADF_PCI_MAX_BARS && bar_nr < PCIR_MAX_BAR_0; bar_nr++) { struct adf_bar *bar; - /* - * XXX: This isn't quite right as it will ignore a BAR - * that wasn't assigned a valid resource range by the - * firmware. - */ rid = PCIR_BAR(bar_nr); if (bus_get_resource(dev, SYS_RES_MEMORY, rid, NULL, NULL) != 0) continue; bar = &accel_pci_dev->pci_bars[i++]; bar->virt_addr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!bar->virt_addr) { device_printf(dev, "Failed to map BAR %d\n", bar_nr); ret = ENXIO; goto out_err; } bar->base_addr = rman_get_start(bar->virt_addr); - bar->size = rman_get_start(bar->virt_addr); + bar->size = rman_get_size(bar->virt_addr); } pci_enable_busmaster(dev); if (!accel_dev->hw_device->config_device) { ret = EFAULT; goto out_err; } ret = accel_dev->hw_device->config_device(accel_dev); if (ret) goto out_err; ret = adf_dev_init(accel_dev); if (ret) goto out_dev_shutdown; ret = adf_dev_start(accel_dev); if (ret) goto out_dev_stop; cfg_dev = accel_dev->cfg->dev; adf_cfg_device_clear(cfg_dev, accel_dev); free(cfg_dev, M_QAT); accel_dev->cfg->dev = NULL; return ret; out_dev_stop: adf_dev_stop(accel_dev); out_dev_shutdown: adf_dev_shutdown(accel_dev); out_err: adf_cleanup_accel(accel_dev); return ret; } static int adf_detach(device_t dev) { struct adf_accel_dev *accel_dev = device_get_softc(dev); if (adf_dev_stop(accel_dev)) { device_printf(dev, "Failed to stop QAT accel dev\n"); return EBUSY; } adf_dev_shutdown(accel_dev); adf_cleanup_accel(accel_dev); return 0; } static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe), DEVMETHOD(device_attach, adf_attach), DEVMETHOD(device_detach, adf_detach), DEVMETHOD_END }; static driver_t adf_driver = { "qat", adf_methods, sizeof(struct adf_accel_dev) }; -DRIVER_MODULE_ORDERED(qat_c4xxx, pci, adf_driver, NULL, NULL, SI_ORDER_THIRD); -MODULE_VERSION(qat_c4xxx, 1); -MODULE_DEPEND(qat_c4xxx, qat_common, 1, 1, 1); -MODULE_DEPEND(qat_c4xxx, qat_api, 1, 1, 1); -MODULE_DEPEND(qat_c4xxx, linuxkpi, 1, 1, 1); +DRIVER_MODULE_ORDERED(qat_4xxx, pci, adf_driver, NULL, NULL, SI_ORDER_THIRD); +MODULE_VERSION(qat_4xxx, 1); +MODULE_DEPEND(qat_4xxx, qat_common, 1, 1, 1); +MODULE_DEPEND(qat_4xxx, qat_api, 1, 1, 1); +MODULE_DEPEND(qat_4xxx, linuxkpi, 1, 1, 1); diff --git a/sys/dev/qat/qat_hw/qat_c3xxx/adf_c3xxx_hw_data.c b/sys/dev/qat/qat_hw/qat_c3xxx/adf_c3xxx_hw_data.c index 5f7fe3249353..a13683800c8e 100644 --- a/sys/dev/qat/qat_hw/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/sys/dev/qat/qat_hw/qat_c3xxx/adf_c3xxx_hw_data.c @@ -1,415 +1,419 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include #include #include #include #include +#include #include "adf_c3xxx_hw_data.h" #include "icp_qat_hw.h" #include "adf_heartbeat.h" /* Worker thread to service arbiter mappings */ static const u32 thrd_to_arb_map[ADF_C3XXX_MAX_ACCELENGINES] = { 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA }; enum { DEV_C3XXX_SKU_1 = 0, DEV_C3XXX_SKU_2 = 1, DEV_C3XXX_SKU_3 = 2 }; static u32 thrd_to_arb_map_gen[ADF_C3XXX_MAX_ACCELENGINES] = { 0 }; static struct adf_hw_device_class c3xxx_class = {.name = ADF_C3XXX_DEVICE_NAME, .type = DEV_C3XXX, .instances = 0 }; static u32 get_accel_mask(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 fuse; u32 straps; fuse = pci_read_config(pdev, ADF_DEVICE_FUSECTL_OFFSET, 4); straps = pci_read_config(pdev, ADF_C3XXX_SOFTSTRAP_CSR_OFFSET, 4); return (~(fuse | straps)) >> ADF_C3XXX_ACCELERATORS_REG_OFFSET & ADF_C3XXX_ACCELERATORS_MASK; } static u32 get_ae_mask(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 fuse; u32 me_straps; u32 me_disable; u32 ssms_disabled; fuse = pci_read_config(pdev, ADF_DEVICE_FUSECTL_OFFSET, 4); me_straps = pci_read_config(pdev, ADF_C3XXX_SOFTSTRAP_CSR_OFFSET, 4); /* If SSMs are disabled, then disable the corresponding MEs */ ssms_disabled = (~get_accel_mask(accel_dev)) & ADF_C3XXX_ACCELERATORS_MASK; me_disable = 0x3; while (ssms_disabled) { if (ssms_disabled & 1) me_straps |= me_disable; ssms_disabled >>= 1; me_disable <<= 2; } return (~(fuse | me_straps)) & ADF_C3XXX_ACCELENGINES_MASK; } static u32 get_num_accels(struct adf_hw_device_data *self) { u32 i, ctr = 0; if (!self || !self->accel_mask) return 0; for (i = 0; i < ADF_C3XXX_MAX_ACCELERATORS; i++) { if (self->accel_mask & (1 << i)) ctr++; } return ctr; } static u32 get_num_aes(struct adf_hw_device_data *self) { u32 i, ctr = 0; if (!self || !self->ae_mask) return 0; for (i = 0; i < ADF_C3XXX_MAX_ACCELENGINES; i++) { if (self->ae_mask & (1 << i)) ctr++; } return ctr; } static u32 get_misc_bar_id(struct adf_hw_device_data *self) { return ADF_C3XXX_PMISC_BAR; } static u32 get_etr_bar_id(struct adf_hw_device_data *self) { return ADF_C3XXX_ETR_BAR; } static u32 get_sram_bar_id(struct adf_hw_device_data *self) { return 0; } static enum dev_sku_info get_sku(struct adf_hw_device_data *self) { int aes = get_num_aes(self); if (aes == 6) return DEV_SKU_4; return DEV_SKU_UNKNOWN; } static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, u32 const **arb_map_config) { int i; struct adf_hw_device_data *hw_device = accel_dev->hw_device; for (i = 0; i < ADF_C3XXX_MAX_ACCELENGINES; i++) { thrd_to_arb_map_gen[i] = 0; if (hw_device->ae_mask & (1 << i)) thrd_to_arb_map_gen[i] = thrd_to_arb_map[i]; } adf_cfg_gen_dispatch_arbiter(accel_dev, thrd_to_arb_map, thrd_to_arb_map_gen, ADF_C3XXX_MAX_ACCELENGINES); *arb_map_config = thrd_to_arb_map_gen; } static u32 get_pf2vf_offset(u32 i) { return ADF_C3XXX_PF2VF_OFFSET(i); } static u32 get_vintmsk_offset(u32 i) { return ADF_C3XXX_VINTMSK_OFFSET(i); } static void get_arb_info(struct arb_info *arb_csrs_info) { arb_csrs_info->arbiter_offset = ADF_C3XXX_ARB_OFFSET; arb_csrs_info->wrk_thd_2_srv_arb_map = ADF_C3XXX_ARB_WRK_2_SER_MAP_OFFSET; arb_csrs_info->wrk_cfg_offset = ADF_C3XXX_ARB_WQCFG_OFFSET; } static void get_admin_info(struct admin_info *admin_csrs_info) { admin_csrs_info->mailbox_offset = ADF_C3XXX_MAILBOX_BASE_OFFSET; admin_csrs_info->admin_msg_ur = ADF_C3XXX_ADMINMSGUR_OFFSET; admin_csrs_info->admin_msg_lr = ADF_C3XXX_ADMINMSGLR_OFFSET; } static void get_errsou_offset(u32 *errsou3, u32 *errsou5) { *errsou3 = ADF_C3XXX_ERRSOU3; *errsou5 = ADF_C3XXX_ERRSOU5; } static u32 get_clock_speed(struct adf_hw_device_data *self) { /* CPP clock is half high-speed clock */ return self->clock_frequency / 2; } static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_device = accel_dev->hw_device; struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR]; struct resource *csr = misc_bar->virt_addr; unsigned int val, i; unsigned int mask; /* Enable Accel Engine error detection & correction */ mask = hw_device->ae_mask; for (i = 0; mask; i++, mask >>= 1) { if (!(mask & 1)) continue; val = ADF_CSR_RD(csr, ADF_C3XXX_AE_CTX_ENABLES(i)); val |= ADF_C3XXX_ENABLE_AE_ECC_ERR; ADF_CSR_WR(csr, ADF_C3XXX_AE_CTX_ENABLES(i), val); val = ADF_CSR_RD(csr, ADF_C3XXX_AE_MISC_CONTROL(i)); val |= ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR; ADF_CSR_WR(csr, ADF_C3XXX_AE_MISC_CONTROL(i), val); } /* Enable shared memory error detection & correction */ mask = hw_device->accel_mask; for (i = 0; mask; i++, mask >>= 1) { if (!(mask & 1)) continue; val = ADF_CSR_RD(csr, ADF_C3XXX_UERRSSMSH(i)); val |= ADF_C3XXX_ERRSSMSH_EN; ADF_CSR_WR(csr, ADF_C3XXX_UERRSSMSH(i), val); val = ADF_CSR_RD(csr, ADF_C3XXX_CERRSSMSH(i)); val |= ADF_C3XXX_ERRSSMSH_EN; ADF_CSR_WR(csr, ADF_C3XXX_CERRSSMSH(i), val); } } static void adf_enable_ints(struct adf_accel_dev *accel_dev) { struct resource *addr; addr = (&GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR])->virt_addr; /* Enable bundle and misc interrupts */ ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF0_MASK_OFFSET, ADF_C3XXX_SMIA0_MASK); ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF1_MASK_OFFSET, ADF_C3XXX_SMIA1_MASK); } static u32 get_ae_clock(struct adf_hw_device_data *self) { /* * Clock update interval is <16> ticks for c3xxx. */ return self->clock_frequency / 16; } static int get_storage_enabled(struct adf_accel_dev *accel_dev, uint32_t *storage_enabled) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; strlcpy(key, ADF_STORAGE_FIRMWARE_ENABLED, sizeof(key)); if (!adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) { if (kstrtouint(val, 0, storage_enabled)) return -EFAULT; } return 0; } static int measure_clock(struct adf_accel_dev *accel_dev) { u32 frequency; int ret = 0; ret = adf_dev_measure_clock(accel_dev, &frequency, ADF_C3XXX_MIN_AE_FREQ, ADF_C3XXX_MAX_AE_FREQ); if (ret) return ret; accel_dev->hw_device->clock_frequency = frequency; return 0; } static u32 c3xxx_get_hw_cap(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 legfuses; u32 capabilities; u32 straps; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 fuses = hw_data->fuses; /* Read accelerator capabilities mask */ legfuses = pci_read_config(pdev, ADF_DEVICE_LEGFUSE_OFFSET, 4); capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC + ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC + ICP_ACCEL_CAPABILITIES_CIPHER + ICP_ACCEL_CAPABILITIES_AUTHENTICATION + ICP_ACCEL_CAPABILITIES_COMPRESSION + ICP_ACCEL_CAPABILITIES_ZUC + ICP_ACCEL_CAPABILITIES_SHA3 + ICP_ACCEL_CAPABILITIES_HKDF + ICP_ACCEL_CAPABILITIES_ECEDMONT + ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN; if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) capabilities &= ~(ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | ICP_ACCEL_CAPABILITIES_CIPHER | ICP_ACCEL_CAPABILITIES_HKDF | ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN); if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; if (legfuses & ICP_ACCEL_MASK_PKE_SLICE) capabilities &= ~(ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | ICP_ACCEL_CAPABILITIES_ECEDMONT); if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; if (legfuses & ICP_ACCEL_MASK_EIA3_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_ZUC; if (legfuses & ICP_ACCEL_MASK_SHA3_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_SHA3; straps = pci_read_config(pdev, ADF_C3XXX_SOFTSTRAP_CSR_OFFSET, 4); if ((straps | fuses) & ADF_C3XXX_POWERGATE_PKE) capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; if ((straps | fuses) & ADF_C3XXX_POWERGATE_CY) capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; return capabilities; } static const char * get_obj_name(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services service) { return ADF_CXXX_AE_FW_NAME_CUSTOM1; } static uint32_t get_objs_num(struct adf_accel_dev *accel_dev) { return 1; } static uint32_t get_obj_cfg_ae_mask(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services services) { return accel_dev->hw_device->ae_mask; } void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data) { hw_data->dev_class = &c3xxx_class; hw_data->instance_id = c3xxx_class.instances++; hw_data->num_banks = ADF_C3XXX_ETR_MAX_BANKS; hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK; hw_data->num_accel = ADF_C3XXX_MAX_ACCELERATORS; hw_data->num_logical_accel = 1; hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES; hw_data->tx_rx_gap = ADF_C3XXX_RX_RINGS_OFFSET; hw_data->tx_rings_mask = ADF_C3XXX_TX_RINGS_MASK; hw_data->alloc_irq = adf_isr_resource_alloc; hw_data->free_irq = adf_isr_resource_free; hw_data->enable_error_correction = adf_enable_error_correction; hw_data->print_err_registers = adf_print_err_registers; hw_data->get_accel_mask = get_accel_mask; hw_data->get_ae_mask = get_ae_mask; hw_data->get_num_accels = get_num_accels; hw_data->get_num_aes = get_num_aes; hw_data->get_sram_bar_id = get_sram_bar_id; hw_data->get_etr_bar_id = get_etr_bar_id; hw_data->get_misc_bar_id = get_misc_bar_id; hw_data->get_pf2vf_offset = get_pf2vf_offset; hw_data->get_vintmsk_offset = get_vintmsk_offset; hw_data->get_arb_info = get_arb_info; hw_data->get_admin_info = get_admin_info; hw_data->get_errsou_offset = get_errsou_offset; hw_data->get_clock_speed = get_clock_speed; hw_data->get_sku = get_sku; + hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE; hw_data->fw_name = ADF_C3XXX_FW; hw_data->fw_mmp_name = ADF_C3XXX_MMP; hw_data->init_admin_comms = adf_init_admin_comms; hw_data->exit_admin_comms = adf_exit_admin_comms; hw_data->disable_iov = adf_disable_sriov; hw_data->send_admin_init = adf_send_admin_init; hw_data->init_arb = adf_init_gen2_arb; hw_data->exit_arb = adf_exit_arb; hw_data->get_arb_mapping = adf_get_arbiter_mapping; hw_data->enable_ints = adf_enable_ints; hw_data->set_ssm_wdtimer = adf_set_ssm_wdtimer; hw_data->check_slice_hang = adf_check_slice_hang; hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; hw_data->disable_vf2pf_comms = adf_pf_disable_vf2pf_comms; hw_data->restore_device = adf_dev_restore; hw_data->reset_device = adf_reset_flr; hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; hw_data->measure_clock = measure_clock; hw_data->get_ae_clock = get_ae_clock; hw_data->reset_device = adf_reset_flr; hw_data->get_objs_num = get_objs_num; hw_data->get_obj_name = get_obj_name; hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask; hw_data->get_accel_cap = c3xxx_get_hw_cap; hw_data->clock_frequency = ADF_C3XXX_AE_FREQ; hw_data->extended_dc_capabilities = 0; hw_data->get_storage_enabled = get_storage_enabled; hw_data->query_storage_cap = 1; hw_data->get_heartbeat_status = adf_get_heartbeat_status; hw_data->get_ae_clock = get_ae_clock; hw_data->storage_enable = 0; hw_data->get_fw_image_type = adf_cfg_get_fw_image_type; hw_data->get_ring_to_svc_map = adf_cfg_get_services_enabled; hw_data->config_device = adf_config_device; hw_data->set_asym_rings_mask = adf_cfg_set_asym_rings_mask; hw_data->ring_to_svc_map = ADF_DEFAULT_RING_TO_SRV_MAP; hw_data->pre_reset = adf_dev_pre_reset; hw_data->post_reset = adf_dev_post_reset; + + adf_gen2_init_hw_csr_info(&hw_data->csr_info); } void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data) { hw_data->dev_class->instances--; } diff --git a/sys/dev/qat/qat_hw/qat_c3xxx/adf_drv.c b/sys/dev/qat/qat_hw/qat_c3xxx/adf_drv.c index 307be8d31879..6d17b3216a29 100644 --- a/sys/dev/qat/qat_hw/qat_c3xxx/adf_drv.c +++ b/sys/dev/qat/qat_hw/qat_c3xxx/adf_drv.c @@ -1,269 +1,270 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "adf_c3xxx_hw_data.h" #include "adf_fw_counters.h" #include "adf_cfg_device.h" #include #include #include #include #include #include "adf_heartbeat_dbg.h" #include "adf_cnvnr_freq_counters.h" static MALLOC_DEFINE(M_QAT_C3XXX, "qat_c3xxx", "qat_c3xxx"); #define ADF_SYSTEM_DEVICE(device_id) \ { \ PCI_VENDOR_ID_INTEL, device_id \ } static const struct pci_device_id adf_pci_tbl[] = { ADF_SYSTEM_DEVICE(ADF_C3XXX_PCI_DEVICE_ID), { 0, } }; static int adf_probe(device_t dev) { const struct pci_device_id *id; for (id = adf_pci_tbl; id->vendor != 0; id++) { if (pci_get_vendor(dev) == id->vendor && pci_get_device(dev) == id->device) { device_set_desc(dev, "Intel " ADF_C3XXX_DEVICE_NAME " QuickAssist"); return BUS_PROBE_GENERIC; } } return ENXIO; } static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; int i; if (accel_dev->dma_tag) bus_dma_tag_destroy(accel_dev->dma_tag); for (i = 0; i < ADF_PCI_MAX_BARS; i++) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; if (bar->virt_addr) bus_free_resource(accel_pci_dev->pci_dev, SYS_RES_MEMORY, bar->virt_addr); } if (accel_dev->hw_device) { switch (pci_get_device(accel_pci_dev->pci_dev)) { case ADF_C3XXX_PCI_DEVICE_ID: adf_clean_hw_data_c3xxx(accel_dev->hw_device); break; default: break; } free(accel_dev->hw_device, M_QAT_C3XXX); accel_dev->hw_device = NULL; } adf_cfg_dev_remove(accel_dev); adf_devmgr_rm_dev(accel_dev, NULL); } static int adf_attach(device_t dev) { struct adf_accel_dev *accel_dev; struct adf_accel_pci *accel_pci_dev; struct adf_hw_device_data *hw_data; unsigned int i, bar_nr; int ret, rid; struct adf_cfg_device *cfg_dev = NULL; /* Set pci MaxPayLoad to 256. Implemented to avoid the issue of * Pci-passthrough causing Maxpayload to be reset to 128 bytes * when the device is reset. */ if (pci_get_max_payload(dev) != 256) pci_set_max_payload(dev, 256); accel_dev = device_get_softc(dev); INIT_LIST_HEAD(&accel_dev->crypto_list); accel_pci_dev = &accel_dev->accel_pci_dev; accel_pci_dev->pci_dev = dev; if (bus_get_domain(dev, &accel_pci_dev->node) != 0) accel_pci_dev->node = 0; /* XXX: Revisit if we actually need a devmgr table at all. */ /* Add accel device to accel table. * This should be called before adf_cleanup_accel is called */ if (adf_devmgr_add_dev(accel_dev, NULL)) { device_printf(dev, "Failed to add new accelerator device.\n"); return ENXIO; } /* Allocate and configure device configuration structure */ hw_data = malloc(sizeof(*hw_data), M_QAT_C3XXX, M_WAITOK | M_ZERO); accel_dev->hw_device = hw_data; adf_init_hw_data_c3xxx(accel_dev->hw_device); accel_pci_dev->revid = pci_get_revid(dev); hw_data->fuses = pci_read_config(dev, ADF_DEVICE_FUSECTL_OFFSET, 4); if (accel_pci_dev->revid == 0x00) { device_printf(dev, "A0 stepping is not supported.\n"); ret = ENODEV; goto out_err; } /* Get PPAERUCM values and store */ ret = adf_aer_store_ppaerucm_reg(dev, hw_data); if (ret) goto out_err; /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(accel_dev); hw_data->ae_mask = hw_data->get_ae_mask(accel_dev); + hw_data->admin_ae_mask = hw_data->ae_mask; accel_pci_dev->sku = hw_data->get_sku(hw_data); /* If the device has no acceleration engines then ignore it. */ if (!hw_data->accel_mask || !hw_data->ae_mask || ((~hw_data->ae_mask) & 0x01)) { device_printf(dev, "No acceleration units found\n"); ret = ENXIO; goto out_err; } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); if (ret) goto out_err; ret = adf_clock_debugfs_add(accel_dev); if (ret) goto out_err; pci_set_max_read_req(dev, 1024); ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, /* BUS_SPACE_UNRESTRICTED */ 1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &accel_dev->dma_tag); if (ret) goto out_err; if (hw_data->get_accel_cap) { hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); } /* Find and map all the device's BARS */ i = 0; for (bar_nr = 0; i < ADF_PCI_MAX_BARS && bar_nr < PCIR_MAX_BAR_0; bar_nr++) { struct adf_bar *bar; /* * XXX: This isn't quite right as it will ignore a BAR * that wasn't assigned a valid resource range by the * firmware. */ rid = PCIR_BAR(bar_nr); if (bus_get_resource(dev, SYS_RES_MEMORY, rid, NULL, NULL) != 0) continue; bar = &accel_pci_dev->pci_bars[i++]; bar->virt_addr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (bar->virt_addr == NULL) { device_printf(dev, "Failed to map BAR %d\n", bar_nr); ret = ENXIO; goto out_err; } bar->base_addr = rman_get_start(bar->virt_addr); bar->size = rman_get_size(bar->virt_addr); } pci_enable_busmaster(dev); if (!accel_dev->hw_device->config_device) { ret = EFAULT; goto out_err; } ret = accel_dev->hw_device->config_device(accel_dev); if (ret) goto out_err; ret = adf_dev_init(accel_dev); if (ret) goto out_dev_shutdown; ret = adf_dev_start(accel_dev); if (ret) goto out_dev_stop; cfg_dev = accel_dev->cfg->dev; adf_cfg_device_clear(cfg_dev, accel_dev); free(cfg_dev, M_QAT); accel_dev->cfg->dev = NULL; return ret; out_dev_stop: adf_dev_stop(accel_dev); out_dev_shutdown: adf_dev_shutdown(accel_dev); out_err: adf_cleanup_accel(accel_dev); return ret; } static int adf_detach(device_t dev) { struct adf_accel_dev *accel_dev = device_get_softc(dev); if (adf_dev_stop(accel_dev)) { device_printf(dev, "Failed to stop QAT accel dev\n"); return EBUSY; } adf_dev_shutdown(accel_dev); adf_cleanup_accel(accel_dev); return 0; } static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe), DEVMETHOD(device_attach, adf_attach), DEVMETHOD(device_detach, adf_detach), DEVMETHOD_END }; static driver_t adf_driver = { "qat", adf_methods, sizeof(struct adf_accel_dev) }; DRIVER_MODULE_ORDERED(qat_c3xxx, pci, adf_driver, NULL, NULL, SI_ORDER_THIRD); MODULE_VERSION(qat_c3xxx, 1); MODULE_DEPEND(qat_c3xxx, qat_common, 1, 1, 1); MODULE_DEPEND(qat_c3xxx, qat_api, 1, 1, 1); MODULE_DEPEND(qat_c3xxx, linuxkpi, 1, 1, 1); diff --git a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_hw_data.c b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_hw_data.c index c8ff19d00bde..28dafa68a357 100644 --- a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_hw_data.c +++ b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_hw_data.c @@ -1,2302 +1,2238 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include #include #include #include #include #include #include #include +#include #include "adf_c4xxx_hw_data.h" #include "adf_c4xxx_reset.h" #include "adf_c4xxx_inline.h" #include "adf_c4xxx_ras.h" #include "adf_c4xxx_misc_error_stats.h" #include "adf_c4xxx_pke_replay_stats.h" #include "adf_heartbeat.h" #include "icp_qat_fw_init_admin.h" #include "icp_qat_hw.h" /* accel unit information */ static struct adf_accel_unit adf_c4xxx_au_32_ae[] = { { 0x1, 0x3, 0x3F, 0x1B, 6, ADF_ACCEL_SERVICE_NULL }, { 0x2, 0xC, 0xFC0, 0x6C0, 6, ADF_ACCEL_SERVICE_NULL }, { 0x4, 0x30, 0xF000, 0xF000, 4, ADF_ACCEL_SERVICE_NULL }, { 0x8, 0xC0, 0x3F0000, 0x1B0000, 6, ADF_ACCEL_SERVICE_NULL }, { 0x10, 0x300, 0xFC00000, 0x6C00000, 6, ADF_ACCEL_SERVICE_NULL }, { 0x20, 0xC00, 0xF0000000, 0xF0000000, 4, ADF_ACCEL_SERVICE_NULL } }; static struct adf_accel_unit adf_c4xxx_au_24_ae[] = { { 0x1, 0x3, 0x3F, 0x1B, 6, ADF_ACCEL_SERVICE_NULL }, { 0x2, 0xC, 0xFC0, 0x6C0, 6, ADF_ACCEL_SERVICE_NULL }, { 0x8, 0xC0, 0x3F0000, 0x1B0000, 6, ADF_ACCEL_SERVICE_NULL }, { 0x10, 0x300, 0xFC00000, 0x6C00000, 6, ADF_ACCEL_SERVICE_NULL }, }; static struct adf_accel_unit adf_c4xxx_au_12_ae[] = { { 0x1, 0x3, 0x3F, 0x1B, 6, ADF_ACCEL_SERVICE_NULL }, { 0x8, 0xC0, 0x3F0000, 0x1B0000, 6, ADF_ACCEL_SERVICE_NULL }, }; static struct adf_accel_unit adf_c4xxx_au_emulation[] = { { 0x1, 0x3, 0x3F, 0x1B, 6, ADF_ACCEL_SERVICE_NULL }, { 0x2, 0xC, 0xC0, 0xC0, 2, ADF_ACCEL_SERVICE_NULL } }; /* Accel engine threads for each of the following services * , , , */ /* Thread mapping for SKU capable of symmetric cryptography */ static const struct adf_ae_info adf_c4xxx_32_ae_sym[] = { { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 2, 6, 3 }, { 2, 6, 3 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 2, 6, 3 }, { 2, 6, 3 } }; static const struct adf_ae_info adf_c4xxx_24_ae_sym[] = { { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }; static const struct adf_ae_info adf_c4xxx_12_ae_sym[] = { { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }; /* Thread mapping for SKU capable of asymmetric and symmetric cryptography */ static const struct adf_ae_info adf_c4xxx_32_ae[] = { { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 2, 5, 3 }, { 2, 5, 3 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 2, 5, 3 }, { 2, 5, 3 } }; static const struct adf_ae_info adf_c4xxx_24_ae[] = { { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }; static const struct adf_ae_info adf_c4xxx_12_ae[] = { { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }; static struct adf_hw_device_class c4xxx_class = {.name = ADF_C4XXX_DEVICE_NAME, .type = DEV_C4XXX, .instances = 0 }; struct icp_qat_fw_init_c4xxx_admin_hb_stats { struct icp_qat_fw_init_admin_hb_cnt stats[ADF_NUM_THREADS_PER_AE]; }; struct adf_hb_count { u16 ae_thread[ADF_NUM_THREADS_PER_AE]; }; static const int sku_cy_au[] = ADF_C4XXX_NUM_CY_AU; static const int sku_dc_au[] = ADF_C4XXX_NUM_DC_AU; static const int sku_inline_au[] = ADF_C4XXX_NUM_INLINE_AU; /* * C4xxx devices introduce new fuses and soft straps and * are different from previous gen device implementations. */ static u32 get_accel_mask(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 fusectl0; u32 softstrappull0; fusectl0 = pci_read_config(pdev, ADF_C4XXX_FUSECTL0_OFFSET, 4); softstrappull0 = pci_read_config(pdev, ADF_C4XXX_SOFTSTRAPPULL0_OFFSET, 4); return (~(fusectl0 | softstrappull0)) & ADF_C4XXX_ACCELERATORS_MASK; } static u32 get_ae_mask(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 fusectl1; u32 softstrappull1; fusectl1 = pci_read_config(pdev, ADF_C4XXX_FUSECTL1_OFFSET, 4); softstrappull1 = pci_read_config(pdev, ADF_C4XXX_SOFTSTRAPPULL1_OFFSET, 4); /* Assume that AE and AU disable masks are consistent, so no * checks against the AU mask are performed */ return (~(fusectl1 | softstrappull1)) & ADF_C4XXX_ACCELENGINES_MASK; } static u32 get_num_accels(struct adf_hw_device_data *self) { return self ? hweight32(self->accel_mask) : 0; } static u32 get_num_aes(struct adf_hw_device_data *self) { return self ? hweight32(self->ae_mask) : 0; } static u32 get_misc_bar_id(struct adf_hw_device_data *self) { return ADF_C4XXX_PMISC_BAR; } static u32 get_etr_bar_id(struct adf_hw_device_data *self) { return ADF_C4XXX_ETR_BAR; } static u32 get_sram_bar_id(struct adf_hw_device_data *self) { return ADF_C4XXX_SRAM_BAR; } static inline void c4xxx_unpack_ssm_wdtimer(u64 value, u32 *upper, u32 *lower) { *lower = lower_32_bits(value); *upper = upper_32_bits(value); } /** * c4xxx_set_ssm_wdtimer() - Initialize the slice hang watchdog timer. * * @param accel_dev Structure holding accelerator data. * @return 0 on success, error code otherwise. */ static int c4xxx_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_device = accel_dev->hw_device; struct adf_bar *misc_bar = &GET_BARS(accel_dev)[hw_device->get_misc_bar_id(hw_device)]; struct resource *csr = misc_bar->virt_addr; unsigned long accel_mask = hw_device->accel_mask; u32 accel = 0; u64 timer_val = ADF_C4XXX_SSM_WDT_64BIT_DEFAULT_VALUE; u64 timer_val_pke = ADF_C4XXX_SSM_WDT_PKE_64BIT_DEFAULT_VALUE; u32 ssm_wdt_low = 0, ssm_wdt_high = 0; u32 ssm_wdt_pke_low = 0, ssm_wdt_pke_high = 0; /* Convert 64bit Slice Hang watchdog value into 32bit values for * mmio write to 32bit CSRs. */ c4xxx_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low); c4xxx_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high, &ssm_wdt_pke_low); /* Configures Slice Hang watchdogs */ for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS) { ADF_CSR_WR(csr, ADF_C4XXX_SSMWDTL_OFFSET(accel), ssm_wdt_low); ADF_CSR_WR(csr, ADF_C4XXX_SSMWDTH_OFFSET(accel), ssm_wdt_high); ADF_CSR_WR(csr, ADF_C4XXX_SSMWDTPKEL_OFFSET(accel), ssm_wdt_pke_low); ADF_CSR_WR(csr, ADF_C4XXX_SSMWDTPKEH_OFFSET(accel), ssm_wdt_pke_high); } return 0; } /** * c4xxx_check_slice_hang() - Check slice hang status * * Return: true if a slice hange interrupt is serviced.. */ static bool c4xxx_check_slice_hang(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_device = accel_dev->hw_device; struct adf_bar *misc_bar = &GET_BARS(accel_dev)[hw_device->get_misc_bar_id(hw_device)]; struct resource *csr = misc_bar->virt_addr; u32 slice_hang_offset; u32 ia_slice_hang_offset; u32 fw_irq_source; u32 ia_irq_source; u32 accel_num = 0; bool handled = false; u32 errsou10 = ADF_CSR_RD(csr, ADF_C4XXX_ERRSOU10); unsigned long accel_mask; accel_mask = hw_device->accel_mask; for_each_set_bit(accel_num, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS) { if (!(errsou10 & ADF_C4XXX_IRQ_SRC_MASK(accel_num))) continue; fw_irq_source = ADF_CSR_RD(csr, ADF_INTSTATSSM(accel_num)); ia_irq_source = ADF_CSR_RD(csr, ADF_C4XXX_IAINTSTATSSM(accel_num)); ia_slice_hang_offset = ADF_C4XXX_IASLICEHANGSTATUS_OFFSET(accel_num); /* FW did not clear SliceHang error, IA logs and clears * the error */ if ((fw_irq_source & ADF_INTSTATSSM_SHANGERR) && (ia_irq_source & ADF_INTSTATSSM_SHANGERR)) { slice_hang_offset = ADF_C4XXX_SLICEHANGSTATUS_OFFSET(accel_num); /* Bring hung slice out of reset */ adf_csr_fetch_and_and(csr, slice_hang_offset, ~0); /* Log SliceHang error and clear an interrupt */ handled = adf_handle_slice_hang(accel_dev, accel_num, csr, ia_slice_hang_offset); atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]); } /* FW cleared SliceHang, IA only logs an error */ else if (!(fw_irq_source & ADF_INTSTATSSM_SHANGERR) && (ia_irq_source & ADF_INTSTATSSM_SHANGERR)) { /* Log SliceHang error and clear an interrupt */ handled = adf_handle_slice_hang(accel_dev, accel_num, csr, ia_slice_hang_offset); atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]); } /* Clear the associated IA interrupt */ adf_csr_fetch_and_and(csr, ADF_C4XXX_IAINTSTATSSM(accel_num), ~BIT(13)); } return handled; } static bool get_eth_doorbell_msg(struct adf_accel_dev *accel_dev) { struct resource *csr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr; struct adf_hw_device_data *hw_device = accel_dev->hw_device; u32 errsou11 = ADF_CSR_RD(csr, ADF_C4XXX_ERRSOU11); u32 doorbell_int = ADF_CSR_RD(csr, ADF_C4XXX_ETH_DOORBELL_INT); u32 eth_doorbell_reg[ADF_C4XXX_NUM_ETH_DOORBELL_REGS]; bool handled = false; u32 data_reg; u8 i; /* Reset cannot be acknowledged until the reset */ hw_device->reset_ack = false; /* Check if doorbell interrupt occurred. */ if (errsou11 & ADF_C4XXX_DOORBELL_INT_SRC) { /* Decode doorbell messages from ethernet device */ for (i = 0; i < ADF_C4XXX_NUM_ETH_DOORBELL_REGS; i++) { eth_doorbell_reg[i] = 0; if (doorbell_int & BIT(i)) { data_reg = ADF_C4XXX_ETH_DOORBELL(i); eth_doorbell_reg[i] = ADF_CSR_RD(csr, data_reg); device_printf( GET_DEV(accel_dev), "Receives Doorbell message(0x%08x)\n", eth_doorbell_reg[i]); } } /* Only need to check PF0 */ if (eth_doorbell_reg[0] == ADF_C4XXX_IOSFSB_RESET_ACK) { device_printf(GET_DEV(accel_dev), "Receives pending reset ACK\n"); hw_device->reset_ack = true; } /* Clear the interrupt source */ ADF_CSR_WR(csr, ADF_C4XXX_ETH_DOORBELL_INT, ADF_C4XXX_ETH_DOORBELL_MASK); handled = true; } return handled; } static enum dev_sku_info get_sku(struct adf_hw_device_data *self) { int aes = get_num_aes(self); u32 capabilities = self->accel_capabilities_mask; bool sym_only_sku = false; /* Check if SKU is capable only of symmetric cryptography * via device capabilities. */ if ((capabilities & ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC) && !(capabilities & ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC) && !(capabilities & ADF_ACCEL_CAPABILITIES_COMPRESSION)) sym_only_sku = true; switch (aes) { case ADF_C4XXX_HIGH_SKU_AES: if (sym_only_sku) return DEV_SKU_1_CY; return DEV_SKU_1; case ADF_C4XXX_MED_SKU_AES: if (sym_only_sku) return DEV_SKU_2_CY; return DEV_SKU_2; case ADF_C4XXX_LOW_SKU_AES: if (sym_only_sku) return DEV_SKU_3_CY; return DEV_SKU_3; }; return DEV_SKU_UNKNOWN; } static bool c4xxx_check_prod_sku(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 fusectl0 = 0; fusectl0 = pci_read_config(pdev, ADF_C4XXX_FUSECTL0_OFFSET, 4); if (fusectl0 & ADF_C4XXX_FUSE_PROD_SKU_MASK) return true; else return false; } static bool adf_check_sym_only_sku_c4xxx(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 legfuse = 0; legfuse = pci_read_config(pdev, ADF_DEVICE_LEGFUSE_OFFSET, 4); if (legfuse & ADF_C4XXX_LEGFUSE_BASE_SKU_MASK) return true; else return false; } static void adf_enable_slice_hang_detection(struct adf_accel_dev *accel_dev) { struct resource *csr; struct adf_hw_device_data *hw_device = accel_dev->hw_device; u32 accel = 0; unsigned long accel_mask; csr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr; accel_mask = hw_device->accel_mask; for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS) { /* Unmasks Slice Hang interrupts so they can be seen by IA. */ ADF_CSR_WR(csr, ADF_C4XXX_SHINTMASKSSM_OFFSET(accel), ADF_C4XXX_SHINTMASKSSM_VAL); } } static void adf_enable_ras(struct adf_accel_dev *accel_dev) { struct resource *csr; struct adf_hw_device_data *hw_device = accel_dev->hw_device; u32 accel = 0; unsigned long accel_mask; csr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr; accel_mask = hw_device->accel_mask; for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS) { ADF_CSR_WR(csr, ADF_C4XXX_GET_SSMFEATREN_OFFSET(accel), ADF_C4XXX_SSMFEATREN_VAL); } } static u32 get_clock_speed(struct adf_hw_device_data *self) { /* c4xxx CPP clock is equal to high-speed clock */ return self->clock_frequency; } static void adf_enable_error_interrupts(struct adf_accel_dev *accel_dev) { struct resource *csr, *aram_csr; struct adf_hw_device_data *hw_device = accel_dev->hw_device; u32 accel = 0; unsigned long accel_mask; csr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr; aram_csr = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr; accel_mask = hw_device->accel_mask; for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS) { /* Enable shared memory, MMP, CPP, PPERR interrupts * for a given accel */ ADF_CSR_WR(csr, ADF_C4XXX_GET_INTMASKSSM_OFFSET(accel), 0); /* Enable SPP parity error interrupts for a given accel */ ADF_CSR_WR(csr, ADF_C4XXX_GET_SPPPARERRMSK_OFFSET(accel), 0); /* Enable ssm soft parity errors on given accel */ ADF_CSR_WR(csr, ADF_C4XXX_GET_SSMSOFTERRORPARITY_MASK_OFFSET(accel), ADF_C4XXX_SSMSOFTERRORPARITY_MASK_VAL); } /* Enable interrupts for VFtoPF0_127. */ ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK4, ADF_C4XXX_VF2PF0_31); ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK5, ADF_C4XXX_VF2PF32_63); ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK6, ADF_C4XXX_VF2PF64_95); ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK7, ADF_C4XXX_VF2PF96_127); /* Enable interrupts signaling ECC correctable errors for all AEs */ ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK8, ADF_C4XXX_ERRMSK8_COERR); ADF_CSR_WR(csr, ADF_C4XXX_HI_ME_COR_ERRLOG_ENABLE, ADF_C4XXX_HI_ME_COR_ERRLOG_ENABLE_MASK); /* Enable error interrupts reported by ERRSOU9 */ ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK9, ADF_C4XXX_ERRMSK9_IRQ_MASK); /* Enable uncorrectable errors on all the AE */ ADF_CSR_WR(csr, ADF_C4XXX_HI_ME_UNCERR_LOG_ENABLE, ADF_C4XXX_HI_ME_UNCERR_LOG_ENABLE_MASK); /* Enable CPP Agent to report command parity errors */ ADF_CSR_WR(csr, ADF_C4XXX_HI_CPP_AGENT_CMD_PAR_ERR_LOG_ENABLE, ADF_C4XXX_HI_CPP_AGENT_CMD_PAR_ERR_LOG_ENABLE_MASK); /* Enable reporting of RI memory parity errors */ ADF_CSR_WR(csr, ADF_C4XXX_RI_MEM_PAR_ERR_EN0, ADF_C4XXX_RI_MEM_PAR_ERR_EN0_MASK); /* Enable reporting of TI memory parity errors */ ADF_CSR_WR(csr, ADF_C4XXX_TI_MEM_PAR_ERR_EN0, ADF_C4XXX_TI_MEM_PAR_ERR_EN0_MASK); ADF_CSR_WR(csr, ADF_C4XXX_TI_MEM_PAR_ERR_EN1, ADF_C4XXX_TI_MEM_PAR_ERR_EN1_MASK); /* Enable SSM errors */ ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK10, ADF_C4XXX_ERRMSK10_SSM_ERR); /* Enable miscellaneous errors (ethernet doorbell aram, ici, ice) */ ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK11, ADF_C4XXX_ERRMSK11_ERR); /* RI CPP bus interface error detection and reporting. */ ADF_CSR_WR(csr, ADF_C4XXX_RICPPINTCTL, ADF_C4XXX_RICPP_EN); /* TI CPP bus interface error detection and reporting. */ ADF_CSR_WR(csr, ADF_C4XXX_TICPPINTCTL, ADF_C4XXX_TICPP_EN); /* Enable CFC Error interrupts and logging. */ ADF_CSR_WR(csr, ADF_C4XXX_CPP_CFC_ERR_CTRL, ADF_C4XXX_CPP_CFC_UE); /* Enable ARAM correctable error detection. */ ADF_CSR_WR(aram_csr, ADF_C4XXX_ARAMCERR, ADF_C4XXX_ARAM_CERR); /* Enable ARAM uncorrectable error detection. */ ADF_CSR_WR(aram_csr, ADF_C4XXX_ARAMUERR, ADF_C4XXX_ARAM_UERR); /* Enable Push/Pull Misc Uncorrectable error interrupts and logging */ ADF_CSR_WR(aram_csr, ADF_C4XXX_CPPMEMTGTERR, ADF_C4XXX_TGT_UERR); } static void adf_enable_mmp_error_correction(struct resource *csr, struct adf_hw_device_data *hw_data) { unsigned int accel = 0, mmp; unsigned long uerrssmmmp_mask, cerrssmmmp_mask; enum operation op; unsigned long accel_mask; /* Prepare values and operation that will be performed on * UERRSSMMMP and CERRSSMMMP registers on each MMP */ if (hw_data->accel_capabilities_mask & ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC) { uerrssmmmp_mask = ADF_C4XXX_UERRSSMMMP_EN; cerrssmmmp_mask = ADF_C4XXX_CERRSSMMMP_EN; op = OR; } else { uerrssmmmp_mask = ~ADF_C4XXX_UERRSSMMMP_EN; cerrssmmmp_mask = ~ADF_C4XXX_CERRSSMMMP_EN; op = AND; } accel_mask = hw_data->accel_mask; /* Enable MMP Logging */ for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS) { /* Set power-up */ adf_csr_fetch_and_and(csr, ADF_C4XXX_SLICEPWRDOWN(accel), ~ADF_C4XXX_MMP_PWR_UP_MSK); for (mmp = 0; mmp < ADF_C4XXX_MAX_MMP; ++mmp) { adf_csr_fetch_and_update(op, csr, ADF_C4XXX_UERRSSMMMP(accel, mmp), uerrssmmmp_mask); adf_csr_fetch_and_update(op, csr, ADF_C4XXX_CERRSSMMMP(accel, mmp), cerrssmmmp_mask); } /* Restore power-down value */ adf_csr_fetch_and_or(csr, ADF_C4XXX_SLICEPWRDOWN(accel), ADF_C4XXX_MMP_PWR_UP_MSK); } } static u32 get_pf2vf_offset(u32 i) { return ADF_C4XXX_PF2VF_OFFSET(i); } static u32 get_vintmsk_offset(u32 i) { return ADF_C4XXX_VINTMSK_OFFSET(i); } static void get_arb_info(struct arb_info *arb_csrs_info) { arb_csrs_info->arbiter_offset = ADF_C4XXX_ARB_OFFSET; arb_csrs_info->wrk_cfg_offset = ADF_C4XXX_ARB_WQCFG_OFFSET; } static void get_admin_info(struct admin_info *admin_csrs_info) { admin_csrs_info->mailbox_offset = ADF_C4XXX_MAILBOX_BASE_OFFSET; admin_csrs_info->admin_msg_ur = ADF_C4XXX_ADMINMSGUR_OFFSET; admin_csrs_info->admin_msg_lr = ADF_C4XXX_ADMINMSGLR_OFFSET; } static void get_errsou_offset(u32 *errsou3, u32 *errsou5) { *errsou3 = ADF_C4XXX_ERRSOU3; *errsou5 = ADF_C4XXX_ERRSOU5; } static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_device = accel_dev->hw_device; struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR]; struct resource *csr = misc_bar->virt_addr; unsigned int val, i = 0; unsigned long ae_mask; unsigned long accel_mask; ae_mask = hw_device->ae_mask; /* Enable Accel Engine error detection & correction */ for_each_set_bit(i, &ae_mask, ADF_C4XXX_MAX_ACCELENGINES) { val = ADF_CSR_RD(csr, ADF_C4XXX_AE_CTX_ENABLES(i)); val |= ADF_C4XXX_ENABLE_AE_ECC_ERR; ADF_CSR_WR(csr, ADF_C4XXX_AE_CTX_ENABLES(i), val); val = ADF_CSR_RD(csr, ADF_C4XXX_AE_MISC_CONTROL(i)); val |= ADF_C4XXX_ENABLE_AE_ECC_PARITY_CORR; ADF_CSR_WR(csr, ADF_C4XXX_AE_MISC_CONTROL(i), val); } accel_mask = hw_device->accel_mask; /* Enable shared memory error detection & correction */ for_each_set_bit(i, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS) { val = ADF_CSR_RD(csr, ADF_C4XXX_UERRSSMSH(i)); val |= ADF_C4XXX_ERRSSMSH_EN; ADF_CSR_WR(csr, ADF_C4XXX_UERRSSMSH(i), val); val = ADF_CSR_RD(csr, ADF_C4XXX_CERRSSMSH(i)); val |= ADF_C4XXX_ERRSSMSH_EN; ADF_CSR_WR(csr, ADF_C4XXX_CERRSSMSH(i), val); } adf_enable_ras(accel_dev); adf_enable_mmp_error_correction(csr, hw_device); adf_enable_slice_hang_detection(accel_dev); adf_enable_error_interrupts(accel_dev); } static void adf_enable_ints(struct adf_accel_dev *accel_dev) { struct resource *addr; addr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr; /* Enable bundle interrupts */ ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF0_MASK_OFFSET, ADF_C4XXX_SMIA0_MASK); ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF1_MASK_OFFSET, ADF_C4XXX_SMIA1_MASK); ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF2_MASK_OFFSET, ADF_C4XXX_SMIA2_MASK); ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF3_MASK_OFFSET, ADF_C4XXX_SMIA3_MASK); /*Enable misc interrupts*/ ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF4_MASK_OFFSET, ADF_C4XXX_SMIA4_MASK); } static u32 get_ae_clock(struct adf_hw_device_data *self) { /* Clock update interval is <16> ticks for c4xxx. */ return self->clock_frequency / 16; } static int measure_clock(struct adf_accel_dev *accel_dev) { u32 frequency; int ret = 0; ret = adf_dev_measure_clock(accel_dev, &frequency, ADF_C4XXX_MIN_AE_FREQ, ADF_C4XXX_MAX_AE_FREQ); if (ret) return ret; accel_dev->hw_device->clock_frequency = frequency; return 0; } static int get_storage_enabled(struct adf_accel_dev *accel_dev, uint32_t *storage_enabled) { if (accel_dev->au_info->num_dc_au > 0) { *storage_enabled = 1; GET_HW_DATA(accel_dev)->extended_dc_capabilities = ICP_ACCEL_CAPABILITIES_ADVANCED_COMPRESSION; } return 0; } static u32 c4xxx_get_hw_cap(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 legfuses; u32 softstrappull0, softstrappull2; u32 fusectl0, fusectl2; u32 capabilities; /* Read accelerator capabilities mask */ legfuses = pci_read_config(pdev, ADF_DEVICE_LEGFUSE_OFFSET, 4); capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | ICP_ACCEL_CAPABILITIES_CIPHER | ICP_ACCEL_CAPABILITIES_AUTHENTICATION | ICP_ACCEL_CAPABILITIES_COMPRESSION | ICP_ACCEL_CAPABILITIES_ZUC | ICP_ACCEL_CAPABILITIES_HKDF | ICP_ACCEL_CAPABILITIES_SHA3_EXT | ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 | ICP_ACCEL_CAPABILITIES_CHACHA_POLY | ICP_ACCEL_CAPABILITIES_AESGCM_SPC | - ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY | ICP_ACCEL_CAPABILITIES_ECEDMONT; if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) { capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER; } if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; if (legfuses & ICP_ACCEL_MASK_PKE_SLICE) capabilities &= ~(ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | ICP_ACCEL_CAPABILITIES_ECEDMONT); if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE) { capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; capabilities &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY; } if (legfuses & ICP_ACCEL_MASK_EIA3_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_ZUC; if (legfuses & ICP_ACCEL_MASK_SM3_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_SM3; if (legfuses & ICP_ACCEL_MASK_SM4_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_SM4; /* Read fusectl0 & softstrappull0 registers to ensure inline * acceleration is not disabled */ softstrappull0 = pci_read_config(pdev, ADF_C4XXX_SOFTSTRAPPULL0_OFFSET, 4); fusectl0 = pci_read_config(pdev, ADF_C4XXX_FUSECTL0_OFFSET, 4); if ((fusectl0 | softstrappull0) & ADF_C4XXX_FUSE_DISABLE_INLINE_MASK) capabilities &= ~ICP_ACCEL_CAPABILITIES_INLINE; /* Read fusectl2 & softstrappull2 registers to check out if * PKE/DC are enabled/disabled */ softstrappull2 = pci_read_config(pdev, ADF_C4XXX_SOFTSTRAPPULL2_OFFSET, 4); fusectl2 = pci_read_config(pdev, ADF_C4XXX_FUSECTL2_OFFSET, 4); /* Disable PKE/DC cap if there are no PKE/DC-enabled AUs. */ if (!(~fusectl2 & ~softstrappull2 & ADF_C4XXX_FUSE_PKE_MASK)) capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; if (!(~fusectl2 & ~softstrappull2 & ADF_C4XXX_FUSE_COMP_MASK)) capabilities &= ~(ICP_ACCEL_CAPABILITIES_COMPRESSION | ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY); return capabilities; } static int c4xxx_configure_accel_units(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES] = { 0 }; unsigned long val; char val_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; int sku; struct adf_hw_device_data *hw_data = accel_dev->hw_device; sku = get_sku(hw_data); if (adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC)) goto err; snprintf(key, sizeof(key), ADF_SERVICES_ENABLED); /* Base station SKU supports symmetric cryptography only. */ if (adf_check_sym_only_sku_c4xxx(accel_dev)) snprintf(val_str, sizeof(val_str), ADF_SERVICE_SYM); else snprintf(val_str, sizeof(val_str), ADF_SERVICE_CY); val = sku_dc_au[sku]; if (val) { strncat(val_str, ADF_SERVICES_SEPARATOR ADF_SERVICE_DC, ADF_CFG_MAX_VAL_LEN_IN_BYTES - strnlen(val_str, sizeof(val_str)) - ADF_CFG_NULL_TERM_SIZE); } if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)val_str, ADF_STR)) goto err; snprintf(key, sizeof(key), ADF_NUM_CY_ACCEL_UNITS); val = sku_cy_au[sku]; if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) goto err; snprintf(key, sizeof(key), ADF_NUM_DC_ACCEL_UNITS); val = sku_dc_au[sku]; if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) goto err; snprintf(key, sizeof(key), ADF_NUM_INLINE_ACCEL_UNITS); val = sku_inline_au[sku]; if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) goto err; return 0; err: device_printf(GET_DEV(accel_dev), "Failed to configure accel units\n"); return EINVAL; } static void update_hw_capability(struct adf_accel_dev *accel_dev) { struct adf_accel_unit_info *au_info = accel_dev->au_info; struct adf_hw_device_data *hw_device = accel_dev->hw_device; u32 disabled_caps = 0; if (!au_info->asym_ae_msk) disabled_caps = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | ICP_ACCEL_CAPABILITIES_AUTHENTICATION; if (!au_info->sym_ae_msk) disabled_caps |= ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | ICP_ACCEL_CAPABILITIES_CIPHER | ICP_ACCEL_CAPABILITIES_ZUC | ICP_ACCEL_CAPABILITIES_SHA3_EXT | ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 | ICP_ACCEL_CAPABILITIES_CHACHA_POLY | ICP_ACCEL_CAPABILITIES_AESGCM_SPC; if (!au_info->dc_ae_msk) { disabled_caps |= ICP_ACCEL_CAPABILITIES_COMPRESSION | ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY; hw_device->extended_dc_capabilities = 0; } if (!au_info->inline_ingress_msk && !au_info->inline_egress_msk) disabled_caps |= ICP_ACCEL_CAPABILITIES_INLINE; hw_device->accel_capabilities_mask = c4xxx_get_hw_cap(accel_dev) & ~disabled_caps; } static void c4xxx_set_sadb_size(struct adf_accel_dev *accel_dev) { u32 sadb_reg_value = 0; struct resource *aram_csr_base; aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr; if (accel_dev->au_info->num_inline_au) { /* REG_SA_DB_CTRL register initialisation */ sadb_reg_value = ADF_C4XXX_SADB_REG_VALUE(accel_dev); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_DB_CTRL, sadb_reg_value); } else { /* Zero the SADB size when inline is disabled. */ adf_csr_fetch_and_and(aram_csr_base, ADF_C4XXX_REG_SA_DB_CTRL, ADF_C4XXX_SADB_SIZE_BIT); } /* REG_SA_CTRL_LOCK register initialisation. We set the lock * bit in order to prevent the REG_SA_DB_CTRL to be * overwritten */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_CTRL_LOCK, ADF_C4XXX_DEFAULT_SA_CTRL_LOCKOUT); } static void c4xxx_init_error_notification_configuration(struct adf_accel_dev *accel_dev, u32 offset) { struct resource *aram_csr_base; aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr; /* configure error notification configuration registers */ /* Set CD Parity error */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CD_RF_PARITY_ERR_0 + offset, ADF_C4XXX_CD_RF_PARITY_ERR_0_VAL); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CD_RF_PARITY_ERR_1 + offset, ADF_C4XXX_CD_RF_PARITY_ERR_1_VAL); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CD_RF_PARITY_ERR_2 + offset, ADF_C4XXX_CD_RF_PARITY_ERR_2_VAL); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CD_RF_PARITY_ERR_3 + offset, ADF_C4XXX_CD_RF_PARITY_ERR_3_VAL); /* Set CD RAM ECC Correctable Error */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CD_CERR + offset, ADF_C4XXX_CD_CERR_VAL); /* Set CD RAM ECC UnCorrectable Error */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CD_UERR + offset, ADF_C4XXX_CD_UERR_VAL); /* Set Inline (excl cmd_dis) Parity Error */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_INLN_RF_PARITY_ERR_0 + offset, ADF_C4XXX_INLN_RF_PARITY_ERR_0_VAL); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_INLN_RF_PARITY_ERR_1 + offset, ADF_C4XXX_INLN_RF_PARITY_ERR_1_VAL); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_INLN_RF_PARITY_ERR_2 + offset, ADF_C4XXX_INLN_RF_PARITY_ERR_2_VAL); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_INLN_RF_PARITY_ERR_3 + offset, ADF_C4XXX_INLN_RF_PARITY_ERR_3_VAL); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_INLN_RF_PARITY_ERR_4 + offset, ADF_C4XXX_INLN_RF_PARITY_ERR_4_VAL); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_INLN_RF_PARITY_ERR_5 + offset, ADF_C4XXX_INLN_RF_PARITY_ERR_5_VAL); /* Set Parser RAM ECC Correctable Error */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSER_CERR + offset, ADF_C4XXX_PARSER_CERR_VAL); /* Set Parser RAM ECC UnCorrectable Error */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSER_UERR + offset, ADF_C4XXX_PARSER_UERR_VAL); /* Set CTPB RAM ECC Correctable Error */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CTPB_CERR + offset, ADF_C4XXX_CTPB_CERR_VAL); /* Set CTPB RAM ECC UnCorrectable Error */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CTPB_UERR + offset, ADF_C4XXX_CTPB_UERR_VAL); /* Set CPP Interface Status */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CPPM_ERR_STAT + offset, ADF_C4XXX_CPPM_ERR_STAT_VAL); /* Set CGST_MGMT_INT */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CONGESTION_MGMT_INT + offset, ADF_C4XXX_CONGESTION_MGMT_INI_VAL); /* CPP Interface Status */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_CPPT_ERR_STAT + offset, ADF_C4XXX_CPPT_ERR_STAT_VAL); /* MAC Interrupt Mask */ ADF_CSR_WR64(aram_csr_base, ADF_C4XXX_IC_MAC_IM + offset, ADF_C4XXX_MAC_IM_VAL); } static void c4xxx_enable_parse_extraction(struct adf_accel_dev *accel_dev) { struct resource *aram_csr_base; aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr; /* Enable Inline Parse Extraction CRSs */ /* Set IC_PARSE_CTRL register */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_CTRL_OFFSET, ADF_C4XXX_IC_PARSE_CTRL_OFFSET_DEFAULT_VALUE); /* Set IC_PARSE_FIXED_DATA(0) */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_FIXED_DATA(0), ADF_C4XXX_DEFAULT_IC_PARSE_FIXED_DATA_0); /* Set IC_PARSE_FIXED_LENGTH */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_FIXED_LENGTH, ADF_C4XXX_DEFAULT_IC_PARSE_FIXED_LEN); /* Configure ESP protocol from an IPv4 header */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV4_OFFSET_0, ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_OFFS_0_VALUE); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV4_LENGTH_0, ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_LEN_0_VALUE); /* Configure protocol extraction field from an IPv4 header */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV4_OFFSET_1, ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_OFFS_1_VALUE); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV4_LENGTH_1, ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_LEN_1_VALUE); /* Configure SPI extraction field from an IPv4 header */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV4_OFFSET_2, ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_OFFS_2_VALUE); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV4_LENGTH_2, ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_LEN_2_VALUE); /* Configure destination field IP address from an IPv4 header */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV4_OFFSET_3, ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_OFFS_3_VALUE); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV4_LENGTH_3, ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_LEN_3_VALUE); /* Configure function number extraction field from an IPv6 header */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV6_OFFSET_0, ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_OFFS_0_VALUE); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV6_LENGTH_0, ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_LEN_0_VALUE); /* Configure protocol extraction field from an IPv6 header */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV6_OFFSET_1, ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_OFFS_1_VALUE); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV6_LENGTH_1, ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_LEN_1_VALUE); /* Configure SPI extraction field from an IPv6 header */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV6_OFFSET_2, ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_OFFS_2_VALUE); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV6_LENGTH_2, ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_LEN_2_VALUE); /* Configure destination field IP address from an IPv6 header */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV6_OFFSET_3, ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_OFFS_3_VALUE); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_IC_PARSE_IPV6_LENGTH_3, ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_LEN_3_VALUE); } static int adf_get_inline_ipsec_algo_group(struct adf_accel_dev *accel_dev, unsigned long *ipsec_algo_group) { char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; if (adf_cfg_get_param_value( accel_dev, ADF_INLINE_SEC, ADF_INLINE_IPSEC_ALGO_GROUP, val)) return EFAULT; if (kstrtoul(val, 0, ipsec_algo_group)) return EFAULT; /* Verify the ipsec_algo_group */ if (*ipsec_algo_group >= IPSEC_ALGO_GROUP_DELIMITER) { device_printf( GET_DEV(accel_dev), "Unsupported IPSEC algo group %lu in config file!\n", *ipsec_algo_group); return EFAULT; } return 0; } static int c4xxx_init_inline_hw(struct adf_accel_dev *accel_dev) { u32 sa_entry_reg_value = 0; u32 sa_fn_lim = 0; u32 supported_algo = 0; struct resource *aram_csr_base; u32 offset; unsigned long ipsec_algo_group = IPSEC_DEFAUL_ALGO_GROUP; aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr; if (adf_get_inline_ipsec_algo_group(accel_dev, &ipsec_algo_group)) return EFAULT; sa_entry_reg_value |= (ADF_C4XXX_DEFAULT_LU_KEY_LEN << ADF_C4XXX_LU_KEY_LEN_BIT_OFFSET); if (ipsec_algo_group == IPSEC_DEFAUL_ALGO_GROUP) { sa_entry_reg_value |= ADF_C4XXX_DEFAULT_SA_SIZE; sa_fn_lim = ADF_C4XXX_FUNC_LIMIT(accel_dev, ADF_C4XXX_DEFAULT_SA_SIZE); supported_algo = ADF_C4XXX_DEFAULT_SUPPORTED_ALGORITHMS; } else if (ipsec_algo_group == IPSEC_ALGO_GROUP1) { sa_entry_reg_value |= ADF_C4XXX_ALGO_GROUP1_SA_SIZE; sa_fn_lim = ADF_C4XXX_FUNC_LIMIT(accel_dev, ADF_C4XXX_ALGO_GROUP1_SA_SIZE); supported_algo = ADF_C4XXX_SUPPORTED_ALGORITHMS_GROUP1; } else { return EFAULT; } /* REG_SA_ENTRY_CTRL register initialisation */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_ENTRY_CTRL, sa_entry_reg_value); /* REG_SAL_FUNC_LIMITS register initialisation. Only the first register * needs to be initialised to enable as it is assigned to a physical * function. Other registers will be initialised by the LAN PF driver. * The function limits is initialised to its maximal value. */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_FUNC_LIMITS, sa_fn_lim); /* Initialize REG_SA_SCRATCH[0] register to * advertise supported crypto algorithms */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_SCRATCH_0, supported_algo); /* REG_SA_SCRATCH[2] register initialisation * to advertise supported crypto offload features. */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_SCRATCH_2, ADF_C4XXX_DEFAULT_CY_OFFLOAD_FEATURES); /* Overwrite default MAC_CFG register in ingress offset */ ADF_CSR_WR64(aram_csr_base, ADF_C4XXX_MAC_CFG + ADF_C4XXX_INLINE_INGRESS_OFFSET, ADF_C4XXX_MAC_CFG_VALUE); /* Overwrite default MAC_CFG register in egress offset */ ADF_CSR_WR64(aram_csr_base, ADF_C4XXX_MAC_CFG + ADF_C4XXX_INLINE_EGRESS_OFFSET, ADF_C4XXX_MAC_CFG_VALUE); /* Overwrite default MAC_PIA_CFG * (Packet Interface Adapter Configuration) registers * in ingress offset */ ADF_CSR_WR64(aram_csr_base, ADF_C4XXX_MAC_PIA_CFG + ADF_C4XXX_INLINE_INGRESS_OFFSET, ADF_C4XXX_MAC_PIA_CFG_VALUE); /* Overwrite default MAC_PIA_CFG in egress offset */ ADF_CSR_WR64(aram_csr_base, ADF_C4XXX_MAC_PIA_CFG + ADF_C4XXX_INLINE_EGRESS_OFFSET, ADF_C4XXX_MAC_PIA_CFG_VALUE); c4xxx_enable_parse_extraction(accel_dev); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_INGRESS_CMD_DIS_MISC, ADF_C4XXX_REG_CMD_DIS_MISC_DEFAULT_VALUE); ADF_CSR_WR(aram_csr_base, ADF_C4XXX_EGRESS_CMD_DIS_MISC, ADF_C4XXX_REG_CMD_DIS_MISC_DEFAULT_VALUE); /* Set bits<1:0> in ADF_C4XXX_INLINE_CAPABILITY register to * advertize that both ingress and egress directions are available */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_INLINE_CAPABILITY, ADF_C4XXX_INLINE_CAPABILITIES); /* Set error notification configuration of ingress */ offset = ADF_C4XXX_INLINE_INGRESS_OFFSET; c4xxx_init_error_notification_configuration(accel_dev, offset); /* Set error notification configuration of egress */ offset = ADF_C4XXX_INLINE_EGRESS_OFFSET; c4xxx_init_error_notification_configuration(accel_dev, offset); return 0; } static void adf_enable_inline_notification(struct adf_accel_dev *accel_dev) { struct resource *aram_csr_base; aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr; /* Set bit<0> in ADF_C4XXX_REG_SA_INLINE_ENABLE to advertise * that inline is enabled. */ ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_INLINE_ENABLE, ADF_C4XXX_INLINE_ENABLED); } static int c4xxx_init_aram_config(struct adf_accel_dev *accel_dev) { u32 aram_size = ADF_C4XXX_2MB_ARAM_SIZE; u32 ibuff_mem_needed = 0; u32 usable_aram_size = 0; struct adf_hw_aram_info *aram_info; u32 sa_db_ctl_value; struct resource *aram_csr_base; u8 profile = 0; u32 sadb_size = 0; u32 sa_size = 0; unsigned long ipsec_algo_group = IPSEC_DEFAUL_ALGO_GROUP; u32 i; if (accel_dev->au_info->num_inline_au > 0) if (adf_get_inline_ipsec_algo_group(accel_dev, &ipsec_algo_group)) return EFAULT; /* Allocate memory for adf_hw_aram_info */ aram_info = kzalloc(sizeof(*accel_dev->aram_info), GFP_KERNEL); if (!aram_info) return ENOMEM; /* Initialise Inline direction */ aram_info->inline_direction_egress_mask = 0; if (accel_dev->au_info->num_inline_au) { /* Set inline direction bitmap in the ARAM to * inform firmware which ME is egress */ aram_info->inline_direction_egress_mask = accel_dev->au_info->inline_egress_msk; /* User profile is valid, we can now add it * in the ARAM partition table */ aram_info->inline_congest_mngt_profile = profile; } /* Initialise DC ME mask, "1" = ME is used for DC operations */ aram_info->dc_ae_mask = accel_dev->au_info->dc_ae_msk; /* Initialise CY ME mask, "1" = ME is used for CY operations * Since asym service can also be enabled on inline AEs, here * we use the sym ae mask for configuring the cy_ae_msk */ aram_info->cy_ae_mask = accel_dev->au_info->sym_ae_msk; /* Configure number of long words in the ARAM */ aram_info->num_aram_lw_entries = ADF_C4XXX_NUM_ARAM_ENTRIES; /* Reset region offset values to 0xffffffff */ aram_info->mmp_region_offset = ~aram_info->mmp_region_offset; aram_info->skm_region_offset = ~aram_info->skm_region_offset; aram_info->inter_buff_aram_region_offset = ~aram_info->inter_buff_aram_region_offset; /* Determine ARAM size */ aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr; sa_db_ctl_value = ADF_CSR_RD(aram_csr_base, ADF_C4XXX_REG_SA_DB_CTRL); aram_size = (sa_db_ctl_value & ADF_C4XXX_SADB_SIZE_BIT) ? ADF_C4XXX_2MB_ARAM_SIZE : ADF_C4XXX_4MB_ARAM_SIZE; device_printf(GET_DEV(accel_dev), "Total available accelerator memory: %uMB\n", aram_size / ADF_C4XXX_1MB_SIZE); /* Compute MMP region offset */ aram_info->mmp_region_size = ADF_C4XXX_DEFAULT_MMP_REGION_SIZE; aram_info->mmp_region_offset = aram_size - aram_info->mmp_region_size; if (accel_dev->au_info->num_cy_au || accel_dev->au_info->num_inline_au) { /* Crypto is available therefore we must * include space in the ARAM for SKM. */ aram_info->skm_region_size = ADF_C4XXX_DEFAULT_SKM_REGION_SIZE; /* Compute SKM region offset */ aram_info->skm_region_offset = aram_size - (aram_info->mmp_region_size + aram_info->skm_region_size); } /* SADB always start at offset 0. */ if (accel_dev->au_info->num_inline_au) { /* Inline is available therefore we must * use remaining ARAM for the SADB. */ sadb_size = aram_size - (aram_info->mmp_region_size + aram_info->skm_region_size); /* * When the inline service is enabled, the policy is that * compression gives up it's space in ARAM to allow for a * larger SADB. Compression must use DRAM instead of ARAM. */ aram_info->inter_buff_aram_region_size = 0; /* the SADB size must be an integral multiple of the SA size */ if (ipsec_algo_group == IPSEC_DEFAUL_ALGO_GROUP) { sa_size = ADF_C4XXX_DEFAULT_SA_SIZE; } else { /* IPSEC_ALGO_GROUP1 * Total 2 algo groups. */ sa_size = ADF_C4XXX_ALGO_GROUP1_SA_SIZE; } sadb_size = sadb_size - (sadb_size % ADF_C4XXX_SA_SIZE_IN_BYTES(sa_size)); aram_info->sadb_region_size = sadb_size; } if (accel_dev->au_info->num_dc_au && !accel_dev->au_info->num_inline_au) { /* Compression is available therefore we must see if there is * space in the ARAM for intermediate buffers. */ aram_info->inter_buff_aram_region_size = 0; usable_aram_size = aram_size - (aram_info->mmp_region_size + aram_info->skm_region_size); for (i = 1; i <= accel_dev->au_info->num_dc_au; i++) { if ((i * ADF_C4XXX_AU_COMPR_INTERM_SIZE) > usable_aram_size) break; ibuff_mem_needed = i * ADF_C4XXX_AU_COMPR_INTERM_SIZE; } /* Set remaining ARAM to intermediate buffers. Firmware handles * fallback to DRAM for cases were number of AU assigned * to compression exceeds available ARAM memory. */ aram_info->inter_buff_aram_region_size = ibuff_mem_needed; /* If ARAM is used for compression set its initial offset. */ if (aram_info->inter_buff_aram_region_size) aram_info->inter_buff_aram_region_offset = 0; } accel_dev->aram_info = aram_info; return 0; } static void c4xxx_exit_aram_config(struct adf_accel_dev *accel_dev) { kfree(accel_dev->aram_info); accel_dev->aram_info = NULL; } static u32 get_num_accel_units(struct adf_hw_device_data *self) { u32 i = 0, num_accel = 0; unsigned long accel_mask = 0; if (!self || !self->accel_mask) return 0; accel_mask = self->accel_mask; for_each_set_bit(i, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS) { num_accel++; } return num_accel / ADF_C4XXX_NUM_ACCEL_PER_AU; } static int get_accel_unit(struct adf_hw_device_data *self, struct adf_accel_unit **accel_unit) { enum dev_sku_info sku; sku = get_sku(self); switch (sku) { case DEV_SKU_1: case DEV_SKU_1_CY: *accel_unit = adf_c4xxx_au_32_ae; break; case DEV_SKU_2: case DEV_SKU_2_CY: *accel_unit = adf_c4xxx_au_24_ae; break; case DEV_SKU_3: case DEV_SKU_3_CY: *accel_unit = adf_c4xxx_au_12_ae; break; default: *accel_unit = adf_c4xxx_au_emulation; break; } return 0; } static int get_ae_info(struct adf_hw_device_data *self, const struct adf_ae_info **ae_info) { enum dev_sku_info sku; sku = get_sku(self); switch (sku) { case DEV_SKU_1: *ae_info = adf_c4xxx_32_ae; break; case DEV_SKU_1_CY: *ae_info = adf_c4xxx_32_ae_sym; break; case DEV_SKU_2: *ae_info = adf_c4xxx_24_ae; break; case DEV_SKU_2_CY: *ae_info = adf_c4xxx_24_ae_sym; break; case DEV_SKU_3: *ae_info = adf_c4xxx_12_ae; break; case DEV_SKU_3_CY: *ae_info = adf_c4xxx_12_ae_sym; break; default: *ae_info = adf_c4xxx_12_ae; break; } return 0; } static int adf_add_debugfs_info(struct adf_accel_dev *accel_dev) { /* Add Accel Unit configuration table to debug FS interface */ if (c4xxx_init_ae_config(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to create entry for AE configuration\n"); return EFAULT; } return 0; } static void adf_remove_debugfs_info(struct adf_accel_dev *accel_dev) { /* Remove Accel Unit configuration table from debug FS interface */ c4xxx_exit_ae_config(accel_dev); } static int check_svc_to_hw_capabilities(struct adf_accel_dev *accel_dev, const char *svc_name, enum icp_qat_capabilities_mask cap) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 hw_cap = hw_data->accel_capabilities_mask; hw_cap &= cap; if (hw_cap != cap) { device_printf(GET_DEV(accel_dev), "Service not supported by accelerator: %s\n", svc_name); return EPERM; } return 0; } static int check_accel_unit_config(struct adf_accel_dev *accel_dev, u8 num_cy_au, u8 num_dc_au, u8 num_inline_au) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; u32 num_au = hw_data->get_num_accel_units(hw_data); u32 service_mask = ADF_ACCEL_SERVICE_NULL; char *token, *cur_str; int ret = 0; /* Get the services enabled by user */ snprintf(key, sizeof(key), ADF_SERVICES_ENABLED); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) return EFAULT; cur_str = val; token = strsep(&cur_str, ADF_SERVICES_SEPARATOR); while (token) { if (!strncmp(token, ADF_SERVICE_CY, strlen(ADF_SERVICE_CY))) { service_mask |= ADF_ACCEL_CRYPTO; ret |= check_svc_to_hw_capabilities( accel_dev, token, ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC); } if (!strncmp(token, ADF_CFG_SYM, strlen(ADF_CFG_SYM))) { service_mask |= ADF_ACCEL_CRYPTO; ret |= check_svc_to_hw_capabilities( accel_dev, token, ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC); } if (!strncmp(token, ADF_CFG_ASYM, strlen(ADF_CFG_ASYM))) { /* Handle a special case of services 'asym;inline' * enabled where ASYM is handled by Inline firmware * at AE level. This configuration allows to enable * ASYM service without accel units assigned to * CRYPTO service, e.g. * num_inline_au = 6 * num_cy_au = 0 */ if (num_inline_au < num_au) service_mask |= ADF_ACCEL_CRYPTO; ret |= check_svc_to_hw_capabilities( accel_dev, token, ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC); } if (!strncmp(token, ADF_SERVICE_DC, strlen(ADF_SERVICE_DC))) { service_mask |= ADF_ACCEL_COMPRESSION; ret |= check_svc_to_hw_capabilities( accel_dev, token, ICP_ACCEL_CAPABILITIES_COMPRESSION); } if (!strncmp(token, ADF_SERVICE_INLINE, strlen(ADF_SERVICE_INLINE))) { service_mask |= ADF_ACCEL_INLINE_CRYPTO; ret |= check_svc_to_hw_capabilities( accel_dev, token, ICP_ACCEL_CAPABILITIES_INLINE); } token = strsep(&cur_str, ADF_SERVICES_SEPARATOR); } /* Ensure the user doesn't enable services that are not supported by * accelerator. */ if (ret) { device_printf(GET_DEV(accel_dev), "Invalid accelerator configuration.\n"); return EFAULT; } if (!(service_mask & ADF_ACCEL_COMPRESSION) && num_dc_au > 0) { device_printf(GET_DEV(accel_dev), "Invalid accel unit config.\n"); device_printf( GET_DEV(accel_dev), "DC accel units set when dc service not enabled\n"); return EFAULT; } if (!(service_mask & ADF_ACCEL_CRYPTO) && num_cy_au > 0) { device_printf(GET_DEV(accel_dev), "Invalid accel unit config.\n"); device_printf( GET_DEV(accel_dev), "CY accel units set when cy service not enabled\n"); return EFAULT; } if (!(service_mask & ADF_ACCEL_INLINE_CRYPTO) && num_inline_au > 0) { device_printf(GET_DEV(accel_dev), "Invalid accel unit config.\n" "Inline feature not supported.\n"); return EFAULT; } hw_data->service_mask = service_mask; /* Ensure the user doesn't allocate more than max accel units */ if (num_au != (num_cy_au + num_dc_au + num_inline_au)) { device_printf(GET_DEV(accel_dev), "Invalid accel unit config.\n"); device_printf(GET_DEV(accel_dev), "Max accel units is %d\n", num_au); return EFAULT; } /* Ensure user allocates hardware resources for enabled services */ if (!num_cy_au && (service_mask & ADF_ACCEL_CRYPTO)) { device_printf(GET_DEV(accel_dev), "Failed to enable cy service!\n"); device_printf(GET_DEV(accel_dev), "%s should not be 0", ADF_NUM_CY_ACCEL_UNITS); return EFAULT; } if (!num_dc_au && (service_mask & ADF_ACCEL_COMPRESSION)) { device_printf(GET_DEV(accel_dev), "Failed to enable dc service!\n"); device_printf(GET_DEV(accel_dev), "%s should not be 0", ADF_NUM_DC_ACCEL_UNITS); return EFAULT; } if (!num_inline_au && (service_mask & ADF_ACCEL_INLINE_CRYPTO)) { device_printf(GET_DEV(accel_dev), "Failed to enable"); device_printf(GET_DEV(accel_dev), " inline service!"); device_printf(GET_DEV(accel_dev), " %s should not be 0\n", ADF_NUM_INLINE_ACCEL_UNITS); return EFAULT; } return 0; } static int get_accel_unit_config(struct adf_accel_dev *accel_dev, u8 *num_cy_au, u8 *num_dc_au, u8 *num_inline_au) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; /* Get the number of accel units allocated for each service */ snprintf(key, sizeof(key), ADF_NUM_CY_ACCEL_UNITS); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) return EFAULT; if (compat_strtou8(val, 10, num_cy_au)) return EFAULT; snprintf(key, sizeof(key), ADF_NUM_DC_ACCEL_UNITS); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) return EFAULT; if (compat_strtou8(val, 10, num_dc_au)) return EFAULT; snprintf(key, sizeof(key), ADF_NUM_INLINE_ACCEL_UNITS); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) return EFAULT; if (compat_strtou8(val, 10, num_inline_au)) return EFAULT; return 0; } /* Function reads the inline ingress/egress configuration * and returns the number of AEs reserved for ingress * and egress for accel units which are allocated for * inline service */ static int adf_get_inline_config(struct adf_accel_dev *accel_dev, u32 *num_ingress_aes) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; char *value; u32 num_au = hw_data->get_num_accel_units(hw_data); unsigned long ingress, egress = 0; struct adf_accel_unit *accel_unit = accel_dev->au_info->au; u32 num_inline_aes = 0, num_ingress_ae = 0; u32 i = 0; snprintf(key, sizeof(key), ADF_INLINE_INGRESS); if (adf_cfg_get_param_value(accel_dev, ADF_INLINE_SEC, key, val)) { device_printf(GET_DEV(accel_dev), "Failed to find ingress\n"); return EFAULT; } value = val; value = strsep(&value, ADF_C4XXX_PERCENTAGE); if (compat_strtoul(value, 10, &ingress)) return EFAULT; snprintf(key, sizeof(key), ADF_INLINE_EGRESS); if (adf_cfg_get_param_value(accel_dev, ADF_INLINE_SEC, key, val)) { device_printf(GET_DEV(accel_dev), "Failed to find egress\n"); return EFAULT; } value = val; value = strsep(&value, ADF_C4XXX_PERCENTAGE); if (compat_strtoul(value, 10, &egress)) return EFAULT; if (ingress + egress != ADF_C4XXX_100) { device_printf(GET_DEV(accel_dev), "The sum of ingress and egress should be 100\n"); return EFAULT; } for (i = 0; i < num_au; i++) { if (accel_unit[i].services == ADF_ACCEL_INLINE_CRYPTO) num_inline_aes += accel_unit[i].num_ae; } num_ingress_ae = num_inline_aes * ingress / ADF_C4XXX_100; if (((num_inline_aes * ingress) % ADF_C4XXX_100) > ADF_C4XXX_ROUND_LIMIT) num_ingress_ae++; *num_ingress_aes = num_ingress_ae; return 0; } static int adf_set_inline_ae_mask(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 num_au = hw_data->get_num_accel_units(hw_data); struct adf_accel_unit_info *au_info = accel_dev->au_info; struct adf_accel_unit *accel_unit = accel_dev->au_info->au; u32 num_ingress_ae = 0; u32 ingress_msk = 0; u32 i, j, ae_mask; if (adf_get_inline_config(accel_dev, &num_ingress_ae)) return EFAULT; for (i = 0; i < num_au; i++) { j = 0; if (accel_unit[i].services == ADF_ACCEL_INLINE_CRYPTO) { /* AEs with inline service enabled are also used * for asymmetric crypto */ au_info->asym_ae_msk |= accel_unit[i].ae_mask; ae_mask = accel_unit[i].ae_mask; while (num_ingress_ae && ae_mask) { if (ae_mask & 1) { ingress_msk |= BIT(j); num_ingress_ae--; } ae_mask = ae_mask >> 1; j++; } au_info->inline_ingress_msk |= ingress_msk; au_info->inline_egress_msk |= ~(au_info->inline_ingress_msk) & accel_unit[i].ae_mask; } } return 0; } static int adf_set_ae_mask(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 num_au = hw_data->get_num_accel_units(hw_data); struct adf_accel_unit_info *au_info = accel_dev->au_info; struct adf_accel_unit *accel_unit = accel_dev->au_info->au; char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; char *token, *cur_str; bool asym_en = false, sym_en = false; u32 i; /* Get the services enabled by user */ snprintf(key, sizeof(key), ADF_SERVICES_ENABLED); if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) return EFAULT; cur_str = val; token = strsep(&cur_str, ADF_SERVICES_SEPARATOR); while (token) { if (!strncmp(token, ADF_CFG_ASYM, strlen(ADF_CFG_ASYM))) asym_en = true; if (!strncmp(token, ADF_CFG_SYM, strlen(ADF_CFG_SYM))) sym_en = true; if (!strncmp(token, ADF_CFG_CY, strlen(ADF_CFG_CY))) { sym_en = true; asym_en = true; } token = strsep(&cur_str, ADF_SERVICES_SEPARATOR); } for (i = 0; i < num_au; i++) { if (accel_unit[i].services == ADF_ACCEL_CRYPTO) { /* AEs that support crypto can perform both * symmetric and asymmetric crypto, however * we only enable the threads if the relevant * service is also enabled */ if (asym_en) au_info->asym_ae_msk |= accel_unit[i].ae_mask; if (sym_en) au_info->sym_ae_msk |= accel_unit[i].ae_mask; } else if (accel_unit[i].services == ADF_ACCEL_COMPRESSION) { au_info->dc_ae_msk |= accel_unit[i].comp_ae_mask; } } return 0; } static int adf_init_accel_unit_services(struct adf_accel_dev *accel_dev) { u8 num_cy_au, num_dc_au, num_inline_au; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 num_au = hw_data->get_num_accel_units(hw_data); struct adf_accel_unit *accel_unit; const struct adf_ae_info *ae_info; int i; if (get_accel_unit_config( accel_dev, &num_cy_au, &num_dc_au, &num_inline_au)) { device_printf(GET_DEV(accel_dev), "Invalid accel unit cfg\n"); return EFAULT; } if (check_accel_unit_config( accel_dev, num_cy_au, num_dc_au, num_inline_au)) return EFAULT; accel_dev->au_info = kzalloc(sizeof(*accel_dev->au_info), GFP_KERNEL); if (!accel_dev->au_info) return ENOMEM; accel_dev->au_info->num_cy_au = num_cy_au; accel_dev->au_info->num_dc_au = num_dc_au; accel_dev->au_info->num_inline_au = num_inline_au; if (get_ae_info(hw_data, &ae_info)) { device_printf(GET_DEV(accel_dev), "Failed to get ae info\n"); goto err_au_info; } accel_dev->au_info->ae_info = ae_info; if (get_accel_unit(hw_data, &accel_unit)) { device_printf(GET_DEV(accel_dev), "Failed to get accel unit\n"); goto err_ae_info; } /* Enable compression accel units */ /* Accel units with 4AEs are reserved for compression first */ for (i = num_au - 1; i >= 0 && num_dc_au > 0; i--) { if (accel_unit[i].num_ae == ADF_C4XXX_4_AE) { accel_unit[i].services = ADF_ACCEL_COMPRESSION; num_dc_au--; } } for (i = num_au - 1; i >= 0 && num_dc_au > 0; i--) { if (accel_unit[i].services == ADF_ACCEL_SERVICE_NULL) { accel_unit[i].services = ADF_ACCEL_COMPRESSION; num_dc_au--; } } /* Enable inline accel units */ for (i = 0; i < num_au && num_inline_au > 0; i++) { if (accel_unit[i].services == ADF_ACCEL_SERVICE_NULL) { accel_unit[i].services = ADF_ACCEL_INLINE_CRYPTO; num_inline_au--; } } /* Enable crypto accel units */ for (i = 0; i < num_au && num_cy_au > 0; i++) { if (accel_unit[i].services == ADF_ACCEL_SERVICE_NULL) { accel_unit[i].services = ADF_ACCEL_CRYPTO; num_cy_au--; } } accel_dev->au_info->au = accel_unit; return 0; err_ae_info: accel_dev->au_info->ae_info = NULL; err_au_info: kfree(accel_dev->au_info); accel_dev->au_info = NULL; return EFAULT; } static void adf_exit_accel_unit_services(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 num_au = hw_data->get_num_accel_units(hw_data); int i; if (accel_dev->au_info) { if (accel_dev->au_info->au) { for (i = 0; i < num_au; i++) { accel_dev->au_info->au[i].services = ADF_ACCEL_SERVICE_NULL; } } accel_dev->au_info->au = NULL; accel_dev->au_info->ae_info = NULL; kfree(accel_dev->au_info); accel_dev->au_info = NULL; } } static inline void adf_c4xxx_reset_hw_units(struct adf_accel_dev *accel_dev) { struct resource *pmisc = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr; u32 global_clk_enable = ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC_ARAM | ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC_ICI_ENABLE | ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC_ICE_ENABLE; u32 ixp_reset_generic = ADF_C4XXX_IXP_RESET_GENERIC_ARAM | ADF_C4XXX_IXP_RESET_GENERIC_INLINE_EGRESS | ADF_C4XXX_IXP_RESET_GENERIC_INLINE_INGRESS; /* To properly reset each of the units driver must: * 1)Call out resetactive state using ixp reset generic * register; * 2)Disable generic clock; * 3)Take device out of reset by clearing ixp reset * generic register; * 4)Re-enable generic clock; */ ADF_CSR_WR(pmisc, ADF_C4XXX_IXP_RESET_GENERIC, ixp_reset_generic); ADF_CSR_WR(pmisc, ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC, ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC_DISABLE_ALL); ADF_CSR_WR(pmisc, ADF_C4XXX_IXP_RESET_GENERIC, ADF_C4XXX_IXP_RESET_GENERIC_OUT_OF_RESET_TRIGGER); ADF_CSR_WR(pmisc, ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC, global_clk_enable); } static int adf_init_accel_units(struct adf_accel_dev *accel_dev) { struct resource *csr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr; if (adf_init_accel_unit_services(accel_dev)) return EFAULT; /* Set cy and dc enabled AE masks */ if (accel_dev->au_info->num_cy_au || accel_dev->au_info->num_dc_au) { if (adf_set_ae_mask(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to set ae masks\n"); goto err_au; } } /* Set ingress/egress ae mask if inline is enabled */ if (accel_dev->au_info->num_inline_au) { if (adf_set_inline_ae_mask(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to set inline ae masks\n"); goto err_au; } } /* Define ARAM regions */ if (c4xxx_init_aram_config(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to init aram config\n"); goto err_au; } /* Configure h/w registers for inline operations */ if (accel_dev->au_info->num_inline_au > 0) /* Initialise configuration parsing registers */ if (c4xxx_init_inline_hw(accel_dev)) goto err_au; c4xxx_set_sadb_size(accel_dev); if (accel_dev->au_info->num_inline_au > 0) { /* ici/ice interrupt shall be enabled after msi-x enabled */ ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK11, ADF_C4XXX_ERRMSK11_ERR_DISABLE_ICI_ICE_INTR); adf_enable_inline_notification(accel_dev); } update_hw_capability(accel_dev); if (adf_add_debugfs_info(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to add debug FS information\n"); goto err_au; } return 0; err_au: /* Free and clear accel unit data structures */ adf_exit_accel_unit_services(accel_dev); return EFAULT; } static void adf_exit_accel_units(struct adf_accel_dev *accel_dev) { adf_exit_accel_unit_services(accel_dev); /* Free aram mapping structure */ c4xxx_exit_aram_config(accel_dev); /* Remove entries in debug FS */ adf_remove_debugfs_info(accel_dev); } static const char * get_obj_name(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services service) { u32 capabilities = GET_HW_DATA(accel_dev)->accel_capabilities_mask; bool sym_only_sku = false; /* Check if SKU is capable only of symmetric cryptography * via device capabilities. */ if ((capabilities & ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC) && !(capabilities & ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC) && !(capabilities & ADF_ACCEL_CAPABILITIES_COMPRESSION)) sym_only_sku = true; switch (service) { case ADF_ACCEL_INLINE_CRYPTO: return ADF_C4XXX_INLINE_OBJ; case ADF_ACCEL_CRYPTO: if (sym_only_sku) return ADF_C4XXX_SYM_OBJ; else return ADF_C4XXX_CY_OBJ; break; case ADF_ACCEL_COMPRESSION: return ADF_C4XXX_DC_OBJ; default: return NULL; } } static uint32_t get_objs_num(struct adf_accel_dev *accel_dev) { u32 srv = 0; u32 max_srv_id = 0; unsigned long service_mask = accel_dev->hw_device->service_mask; /* The objects number corresponds to the number of services */ for_each_set_bit(srv, &service_mask, ADF_C4XXX_MAX_OBJ) { max_srv_id = srv; } return (max_srv_id + 1); } static uint32_t get_obj_cfg_ae_mask(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services service) { u32 ae_mask = 0; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 num_au = hw_data->get_num_accel_units(hw_data); struct adf_accel_unit *accel_unit = accel_dev->au_info->au; u32 i = 0; if (service == ADF_ACCEL_SERVICE_NULL) return 0; for (i = 0; i < num_au; i++) { if (accel_unit[i].services == service) ae_mask |= accel_unit[i].ae_mask; } return ae_mask; } static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable) { struct resource *addr; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 num_aes = hw_data->get_num_aes(hw_data); u32 reg = 0x0; u32 i; addr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr; /* Set/Unset Valid bits in AE Thread to PCIe Function Mapping */ for (i = 0; i < ADF_C4XXX_AE2FUNC_REG_PER_AE * num_aes; i++) { reg = ADF_CSR_RD(addr + ADF_C4XXX_AE2FUNC_MAP_OFFSET, i * ADF_C4XXX_AE2FUNC_MAP_REG_SIZE); if (enable) reg |= ADF_C4XXX_AE2FUNC_MAP_VALID; else reg &= ~ADF_C4XXX_AE2FUNC_MAP_VALID; ADF_CSR_WR(addr + ADF_C4XXX_AE2FUNC_MAP_OFFSET, i * ADF_C4XXX_AE2FUNC_MAP_REG_SIZE, reg); } } -static int -adf_get_heartbeat_status_c4xxx(struct adf_accel_dev *accel_dev) -{ - struct adf_hw_device_data *hw_device = accel_dev->hw_device; - struct icp_qat_fw_init_c4xxx_admin_hb_stats *live_s = - (struct icp_qat_fw_init_c4xxx_admin_hb_stats *) - accel_dev->admin->virt_hb_addr; - const size_t max_aes = hw_device->get_num_aes(hw_device); - const size_t stats_size = - max_aes * sizeof(struct icp_qat_fw_init_c4xxx_admin_hb_stats); - int ret = 0; - size_t ae = 0, thr; - unsigned long ae_mask = 0; - int num_threads_per_ae = ADF_NUM_THREADS_PER_AE; - - /* - * Memory layout of Heartbeat - * - * +----------------+----------------+---------+ - * | Live value | Last value | Count | - * +----------------+----------------+---------+ - * \_______________/\_______________/\________/ - * ^ ^ ^ - * | | | - * | | max_aes * sizeof(adf_hb_count) - * | max_aes * - * sizeof(icp_qat_fw_init_c4xxx_admin_hb_stats) - * max_aes * sizeof(icp_qat_fw_init_c4xxx_admin_hb_stats) - */ - struct icp_qat_fw_init_c4xxx_admin_hb_stats *curr_s; - struct icp_qat_fw_init_c4xxx_admin_hb_stats *last_s = live_s + max_aes; - struct adf_hb_count *count = (struct adf_hb_count *)(last_s + max_aes); - - curr_s = malloc(stats_size, M_QAT, M_WAITOK | M_ZERO); - - memcpy(curr_s, live_s, stats_size); - ae_mask = hw_device->ae_mask; - - for_each_set_bit(ae, &ae_mask, max_aes) - { - for (thr = 0; thr < num_threads_per_ae; ++thr) { - struct icp_qat_fw_init_admin_hb_cnt *curr = - &curr_s[ae].stats[thr]; - struct icp_qat_fw_init_admin_hb_cnt *prev = - &last_s[ae].stats[thr]; - u16 req = curr->req_heartbeat_cnt; - u16 resp = curr->resp_heartbeat_cnt; - u16 last = prev->resp_heartbeat_cnt; - - if ((thr == ADF_AE_ADMIN_THREAD || req != resp) && - resp == last) { - u16 retry = ++count[ae].ae_thread[thr]; - - if (retry >= ADF_CFG_HB_COUNT_THRESHOLD) - ret = EIO; - } else { - count[ae].ae_thread[thr] = 0; - } - } - } - - /* Copy current stats for the next iteration */ - memcpy(last_s, curr_s, stats_size); - free(curr_s, M_QAT); - - return ret; -} - void adf_init_hw_data_c4xxx(struct adf_hw_device_data *hw_data) { hw_data->dev_class = &c4xxx_class; hw_data->instance_id = c4xxx_class.instances++; hw_data->num_banks = ADF_C4XXX_ETR_MAX_BANKS; hw_data->num_rings_per_bank = ADF_C4XXX_NUM_RINGS_PER_BANK; hw_data->num_accel = ADF_C4XXX_MAX_ACCELERATORS; hw_data->num_engines = ADF_C4XXX_MAX_ACCELENGINES; hw_data->num_logical_accel = 1; hw_data->tx_rx_gap = ADF_C4XXX_RX_RINGS_OFFSET; hw_data->tx_rings_mask = ADF_C4XXX_TX_RINGS_MASK; hw_data->alloc_irq = adf_isr_resource_alloc; hw_data->free_irq = adf_isr_resource_free; hw_data->enable_error_correction = adf_enable_error_correction; hw_data->init_ras = adf_init_ras; hw_data->exit_ras = adf_exit_ras; hw_data->ras_interrupts = adf_ras_interrupts; hw_data->get_accel_mask = get_accel_mask; hw_data->get_ae_mask = get_ae_mask; hw_data->get_num_accels = get_num_accels; hw_data->get_num_aes = get_num_aes; hw_data->get_num_accel_units = get_num_accel_units; hw_data->get_sram_bar_id = get_sram_bar_id; hw_data->get_etr_bar_id = get_etr_bar_id; hw_data->get_misc_bar_id = get_misc_bar_id; hw_data->get_pf2vf_offset = get_pf2vf_offset; hw_data->get_vintmsk_offset = get_vintmsk_offset; hw_data->get_arb_info = get_arb_info; hw_data->get_admin_info = get_admin_info; hw_data->get_errsou_offset = get_errsou_offset; hw_data->get_clock_speed = get_clock_speed; hw_data->get_eth_doorbell_msg = get_eth_doorbell_msg; hw_data->get_sku = get_sku; + hw_data->heartbeat_ctr_num = ADF_NUM_THREADS_PER_AE; hw_data->check_prod_sku = c4xxx_check_prod_sku; hw_data->fw_name = ADF_C4XXX_FW; hw_data->fw_mmp_name = ADF_C4XXX_MMP; hw_data->get_obj_name = get_obj_name; hw_data->get_objs_num = get_objs_num; hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask; hw_data->init_admin_comms = adf_init_admin_comms; hw_data->exit_admin_comms = adf_exit_admin_comms; hw_data->configure_iov_threads = configure_iov_threads; hw_data->disable_iov = adf_disable_sriov; hw_data->send_admin_init = adf_send_admin_init; hw_data->init_arb = adf_init_arb_c4xxx; hw_data->exit_arb = adf_exit_arb_c4xxx; hw_data->disable_arb = adf_disable_arb; hw_data->enable_ints = adf_enable_ints; hw_data->set_ssm_wdtimer = c4xxx_set_ssm_wdtimer; hw_data->check_slice_hang = c4xxx_check_slice_hang; hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; hw_data->disable_vf2pf_comms = adf_pf_disable_vf2pf_comms; hw_data->reset_device = adf_reset_flr; hw_data->restore_device = adf_c4xxx_dev_restore; hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; hw_data->init_accel_units = adf_init_accel_units; hw_data->reset_hw_units = adf_c4xxx_reset_hw_units; hw_data->exit_accel_units = adf_exit_accel_units; hw_data->ring_to_svc_map = ADF_DEFAULT_RING_TO_SRV_MAP; - hw_data->get_heartbeat_status = adf_get_heartbeat_status_c4xxx; + hw_data->get_heartbeat_status = adf_get_heartbeat_status; hw_data->get_ae_clock = get_ae_clock; hw_data->clock_frequency = ADF_C4XXX_AE_FREQ; hw_data->measure_clock = measure_clock; hw_data->add_pke_stats = adf_pke_replay_counters_add_c4xxx; hw_data->remove_pke_stats = adf_pke_replay_counters_remove_c4xxx; hw_data->add_misc_error = adf_misc_error_add_c4xxx; hw_data->remove_misc_error = adf_misc_error_remove_c4xxx; hw_data->extended_dc_capabilities = 0; hw_data->get_storage_enabled = get_storage_enabled; hw_data->query_storage_cap = 0; hw_data->get_accel_cap = c4xxx_get_hw_cap; hw_data->configure_accel_units = c4xxx_configure_accel_units; hw_data->pre_reset = adf_dev_pre_reset; hw_data->post_reset = adf_dev_post_reset; hw_data->get_ring_to_svc_map = adf_cfg_get_services_enabled; hw_data->count_ras_event = adf_fw_count_ras_event; hw_data->config_device = adf_config_device; hw_data->set_asym_rings_mask = adf_cfg_set_asym_rings_mask; + + adf_gen2_init_hw_csr_info(&hw_data->csr_info); + hw_data->csr_info.arb_enable_mask = 0xF; } void adf_clean_hw_data_c4xxx(struct adf_hw_device_data *hw_data) { hw_data->dev_class->instances--; } void remove_oid(struct adf_accel_dev *accel_dev, struct sysctl_oid *oid) { struct sysctl_ctx_list *qat_sysctl_ctx; int ret; qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); ret = sysctl_ctx_entry_del(qat_sysctl_ctx, oid); if (ret) device_printf(GET_DEV(accel_dev), "Failed to delete entry\n"); ret = sysctl_remove_oid(oid, 1, 1); if (ret) device_printf(GET_DEV(accel_dev), "Failed to delete oid\n"); } diff --git a/sys/dev/qat/qat_hw/qat_c4xxx/adf_drv.c b/sys/dev/qat/qat_hw/qat_c4xxx/adf_drv.c index 3470e4e8a8a0..1a116ef4acb0 100644 --- a/sys/dev/qat/qat_hw/qat_c4xxx/adf_drv.c +++ b/sys/dev/qat/qat_hw/qat_c4xxx/adf_drv.c @@ -1,268 +1,269 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "adf_c4xxx_hw_data.h" #include "adf_fw_counters.h" #include "adf_cfg_device.h" #include #include #include #include #include #include "adf_heartbeat_dbg.h" #include "adf_cnvnr_freq_counters.h" static MALLOC_DEFINE(M_QAT_C4XXX, "qat_c4xx", "qat_c4xx"); #define ADF_SYSTEM_DEVICE(device_id) \ { \ PCI_VENDOR_ID_INTEL, device_id \ } static const struct pci_device_id adf_pci_tbl[] = { ADF_SYSTEM_DEVICE(ADF_C4XXX_PCI_DEVICE_ID), { 0, } }; static int adf_probe(device_t dev) { const struct pci_device_id *id; for (id = adf_pci_tbl; id->vendor != 0; id++) { if (pci_get_vendor(dev) == id->vendor && pci_get_device(dev) == id->device) { device_set_desc(dev, "Intel " ADF_C4XXX_DEVICE_NAME " QuickAssist"); return BUS_PROBE_GENERIC; } } return ENXIO; } static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; int i; if (accel_dev->dma_tag) bus_dma_tag_destroy(accel_dev->dma_tag); for (i = 0; i < ADF_PCI_MAX_BARS; i++) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; if (bar->virt_addr) bus_free_resource(accel_pci_dev->pci_dev, SYS_RES_MEMORY, bar->virt_addr); } if (accel_dev->hw_device) { switch (pci_get_device(accel_pci_dev->pci_dev)) { case ADF_C4XXX_PCI_DEVICE_ID: adf_clean_hw_data_c4xxx(accel_dev->hw_device); break; default: break; } free(accel_dev->hw_device, M_QAT_C4XXX); accel_dev->hw_device = NULL; } adf_cfg_dev_remove(accel_dev); adf_devmgr_rm_dev(accel_dev, NULL); } static int adf_attach(device_t dev) { struct adf_accel_dev *accel_dev; struct adf_accel_pci *accel_pci_dev; struct adf_hw_device_data *hw_data; unsigned int i, bar_nr; int ret, rid; struct adf_cfg_device *cfg_dev = NULL; /* Set pci MaxPayLoad to 256. Implemented to avoid the issue of * Pci-passthrough causing Maxpayload to be reset to 128 bytes * when the device is reset. */ if (pci_get_max_payload(dev) != 256) pci_set_max_payload(dev, 256); accel_dev = device_get_softc(dev); INIT_LIST_HEAD(&accel_dev->crypto_list); accel_pci_dev = &accel_dev->accel_pci_dev; accel_pci_dev->pci_dev = dev; if (bus_get_domain(dev, &accel_pci_dev->node) != 0) accel_pci_dev->node = 0; /* XXX: Revisit if we actually need a devmgr table at all. */ /* Add accel device to accel table. * This should be called before adf_cleanup_accel is called */ if (adf_devmgr_add_dev(accel_dev, NULL)) { device_printf(dev, "Failed to add new accelerator device.\n"); return ENXIO; } /* Allocate and configure device configuration structure */ hw_data = malloc(sizeof(*hw_data), M_QAT_C4XXX, M_WAITOK | M_ZERO); accel_dev->hw_device = hw_data; adf_init_hw_data_c4xxx(accel_dev->hw_device); accel_pci_dev->revid = pci_get_revid(dev); hw_data->fuses = pci_read_config(dev, ADF_DEVICE_FUSECTL_OFFSET, 4); /* Get PPAERUCM values and store */ ret = adf_aer_store_ppaerucm_reg(dev, hw_data); if (ret) goto out_err; /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(accel_dev); hw_data->ae_mask = hw_data->get_ae_mask(accel_dev); + hw_data->admin_ae_mask = hw_data->ae_mask; /* If the device has no acceleration engines then ignore it. */ if (!hw_data->accel_mask || !hw_data->ae_mask || (~hw_data->ae_mask & 0x01)) { device_printf(dev, "No acceleration units found\n"); ret = ENXIO; goto out_err; } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); if (ret) goto out_err; ret = adf_clock_debugfs_add(accel_dev); if (ret) goto out_err; pci_set_max_read_req(dev, 1024); ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, /*BUS_SPACE_UNRESTRICTED*/ 1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &accel_dev->dma_tag); if (ret) goto out_err; if (hw_data->get_accel_cap) { hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); } accel_pci_dev->sku = hw_data->get_sku(hw_data); /* Find and map all the device's BARS */ i = 0; for (bar_nr = 0; i < ADF_PCI_MAX_BARS && bar_nr < PCIR_MAX_BAR_0; bar_nr++) { struct adf_bar *bar; /* * XXX: This isn't quite right as it will ignore a BAR * that wasn't assigned a valid resource range by the * firmware. */ rid = PCIR_BAR(bar_nr); if (bus_get_resource(dev, SYS_RES_MEMORY, rid, NULL, NULL) != 0) continue; bar = &accel_pci_dev->pci_bars[i++]; bar->virt_addr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!bar->virt_addr) { device_printf(dev, "Failed to map BAR %d\n", bar_nr); ret = ENXIO; goto out_err; } bar->base_addr = rman_get_start(bar->virt_addr); bar->size = rman_get_start(bar->virt_addr); } pci_enable_busmaster(dev); if (!accel_dev->hw_device->config_device) { ret = EFAULT; goto out_err; } ret = accel_dev->hw_device->config_device(accel_dev); if (ret) goto out_err; ret = adf_dev_init(accel_dev); if (ret) goto out_dev_shutdown; ret = adf_dev_start(accel_dev); if (ret) goto out_dev_stop; cfg_dev = accel_dev->cfg->dev; adf_cfg_device_clear(cfg_dev, accel_dev); free(cfg_dev, M_QAT); accel_dev->cfg->dev = NULL; return ret; out_dev_stop: adf_dev_stop(accel_dev); out_dev_shutdown: adf_dev_shutdown(accel_dev); out_err: adf_cleanup_accel(accel_dev); return ret; } static int adf_detach(device_t dev) { struct adf_accel_dev *accel_dev = device_get_softc(dev); if (adf_dev_stop(accel_dev)) { device_printf(dev, "Failed to stop QAT accel dev\n"); return EBUSY; } adf_dev_shutdown(accel_dev); adf_cleanup_accel(accel_dev); return 0; } static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe), DEVMETHOD(device_attach, adf_attach), DEVMETHOD(device_detach, adf_detach), DEVMETHOD_END }; static driver_t adf_driver = { "qat", adf_methods, sizeof(struct adf_accel_dev) }; DRIVER_MODULE_ORDERED(qat_c4xxx, pci, adf_driver, NULL, NULL, SI_ORDER_THIRD); MODULE_VERSION(qat_c4xxx, 1); MODULE_DEPEND(qat_c4xxx, qat_common, 1, 1, 1); MODULE_DEPEND(qat_c4xxx, qat_api, 1, 1, 1); MODULE_DEPEND(qat_c4xxx, linuxkpi, 1, 1, 1); diff --git a/sys/dev/qat/qat_hw/qat_c62x/adf_c62x_hw_data.c b/sys/dev/qat/qat_hw/qat_c62x/adf_c62x_hw_data.c index 2ff72b94fa53..bf73b60adc39 100644 --- a/sys/dev/qat/qat_hw/qat_c62x/adf_c62x_hw_data.c +++ b/sys/dev/qat/qat_hw/qat_c62x/adf_c62x_hw_data.c @@ -1,420 +1,424 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include #include #include #include #include +#include #include "adf_c62x_hw_data.h" #include "icp_qat_hw.h" #include "adf_cfg.h" #include "adf_heartbeat.h" /* Worker thread to service arbiter mappings */ static const u32 thrd_to_arb_map[ADF_C62X_MAX_ACCELENGINES] = { 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA }; enum { DEV_C62X_SKU_1 = 0, DEV_C62X_SKU_2 = 1 }; static u32 thrd_to_arb_map_gen[ADF_C62X_MAX_ACCELENGINES] = { 0 }; static struct adf_hw_device_class c62x_class = {.name = ADF_C62X_DEVICE_NAME, .type = DEV_C62X, .instances = 0 }; static u32 get_accel_mask(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 fuse; u32 straps; fuse = pci_read_config(pdev, ADF_DEVICE_FUSECTL_OFFSET, 4); straps = pci_read_config(pdev, ADF_C62X_SOFTSTRAP_CSR_OFFSET, 4); return (~(fuse | straps)) >> ADF_C62X_ACCELERATORS_REG_OFFSET & ADF_C62X_ACCELERATORS_MASK; } static u32 get_ae_mask(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 fuse; u32 me_straps; u32 me_disable; u32 ssms_disabled; fuse = pci_read_config(pdev, ADF_DEVICE_FUSECTL_OFFSET, 4); me_straps = pci_read_config(pdev, ADF_C62X_SOFTSTRAP_CSR_OFFSET, 4); /* If SSMs are disabled, then disable the corresponding MEs */ ssms_disabled = (~get_accel_mask(accel_dev)) & ADF_C62X_ACCELERATORS_MASK; me_disable = 0x3; while (ssms_disabled) { if (ssms_disabled & 1) me_straps |= me_disable; ssms_disabled >>= 1; me_disable <<= 2; } return (~(fuse | me_straps)) & ADF_C62X_ACCELENGINES_MASK; } static u32 get_num_accels(struct adf_hw_device_data *self) { u32 i, ctr = 0; if (!self || !self->accel_mask) return 0; for (i = 0; i < ADF_C62X_MAX_ACCELERATORS; i++) { if (self->accel_mask & (1 << i)) ctr++; } return ctr; } static u32 get_num_aes(struct adf_hw_device_data *self) { u32 i, ctr = 0; if (!self || !self->ae_mask) return 0; for (i = 0; i < ADF_C62X_MAX_ACCELENGINES; i++) { if (self->ae_mask & (1 << i)) ctr++; } return ctr; } static u32 get_misc_bar_id(struct adf_hw_device_data *self) { return ADF_C62X_PMISC_BAR; } static u32 get_etr_bar_id(struct adf_hw_device_data *self) { return ADF_C62X_ETR_BAR; } static u32 get_sram_bar_id(struct adf_hw_device_data *self) { return ADF_C62X_SRAM_BAR; } static enum dev_sku_info get_sku(struct adf_hw_device_data *self) { int aes = get_num_aes(self); if (aes == 8) return DEV_SKU_2; else if (aes == 10) return DEV_SKU_4; return DEV_SKU_UNKNOWN; } static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, u32 const **arb_map_config) { int i; struct adf_hw_device_data *hw_device = accel_dev->hw_device; for (i = 0; i < ADF_C62X_MAX_ACCELENGINES; i++) { thrd_to_arb_map_gen[i] = 0; if (hw_device->ae_mask & (1 << i)) thrd_to_arb_map_gen[i] = thrd_to_arb_map[i]; } adf_cfg_gen_dispatch_arbiter(accel_dev, thrd_to_arb_map, thrd_to_arb_map_gen, ADF_C62X_MAX_ACCELENGINES); *arb_map_config = thrd_to_arb_map_gen; } static u32 get_pf2vf_offset(u32 i) { return ADF_C62X_PF2VF_OFFSET(i); } static u32 get_vintmsk_offset(u32 i) { return ADF_C62X_VINTMSK_OFFSET(i); } static void get_arb_info(struct arb_info *arb_csrs_info) { arb_csrs_info->arbiter_offset = ADF_C62X_ARB_OFFSET; arb_csrs_info->wrk_thd_2_srv_arb_map = ADF_C62X_ARB_WRK_2_SER_MAP_OFFSET; arb_csrs_info->wrk_cfg_offset = ADF_C62X_ARB_WQCFG_OFFSET; } static void get_admin_info(struct admin_info *admin_csrs_info) { admin_csrs_info->mailbox_offset = ADF_C62X_MAILBOX_BASE_OFFSET; admin_csrs_info->admin_msg_ur = ADF_C62X_ADMINMSGUR_OFFSET; admin_csrs_info->admin_msg_lr = ADF_C62X_ADMINMSGLR_OFFSET; } static void get_errsou_offset(u32 *errsou3, u32 *errsou5) { *errsou3 = ADF_C62X_ERRSOU3; *errsou5 = ADF_C62X_ERRSOU5; } static u32 get_clock_speed(struct adf_hw_device_data *self) { /* CPP clock is half high-speed clock */ return self->clock_frequency / 2; } static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_device = accel_dev->hw_device; struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR]; struct resource *csr = misc_bar->virt_addr; unsigned int val, i; unsigned int mask; /* Enable Accel Engine error detection & correction */ mask = hw_device->ae_mask; for (i = 0; mask; i++, mask >>= 1) { if (!(mask & 1)) continue; val = ADF_CSR_RD(csr, ADF_C62X_AE_CTX_ENABLES(i)); val |= ADF_C62X_ENABLE_AE_ECC_ERR; ADF_CSR_WR(csr, ADF_C62X_AE_CTX_ENABLES(i), val); val = ADF_CSR_RD(csr, ADF_C62X_AE_MISC_CONTROL(i)); val |= ADF_C62X_ENABLE_AE_ECC_PARITY_CORR; ADF_CSR_WR(csr, ADF_C62X_AE_MISC_CONTROL(i), val); } /* Enable shared memory error detection & correction */ mask = hw_device->accel_mask; for (i = 0; mask; i++, mask >>= 1) { if (!(mask & 1)) continue; val = ADF_CSR_RD(csr, ADF_C62X_UERRSSMSH(i)); val |= ADF_C62X_ERRSSMSH_EN; ADF_CSR_WR(csr, ADF_C62X_UERRSSMSH(i), val); val = ADF_CSR_RD(csr, ADF_C62X_CERRSSMSH(i)); val |= ADF_C62X_ERRSSMSH_EN; ADF_CSR_WR(csr, ADF_C62X_CERRSSMSH(i), val); } } static void adf_enable_ints(struct adf_accel_dev *accel_dev) { struct resource *addr; addr = (&GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR])->virt_addr; /* Enable bundle and misc interrupts */ ADF_CSR_WR(addr, ADF_C62X_SMIAPF0_MASK_OFFSET, ADF_C62X_SMIA0_MASK); ADF_CSR_WR(addr, ADF_C62X_SMIAPF1_MASK_OFFSET, ADF_C62X_SMIA1_MASK); } static u32 get_ae_clock(struct adf_hw_device_data *self) { /* * Clock update interval is <16> ticks for c62x. */ return self->clock_frequency / 16; } static int get_storage_enabled(struct adf_accel_dev *accel_dev, uint32_t *storage_enabled) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; strlcpy(key, ADF_STORAGE_FIRMWARE_ENABLED, sizeof(key)); if (!adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) { if (kstrtouint(val, 0, storage_enabled)) return -EFAULT; } return 0; } static int measure_clock(struct adf_accel_dev *accel_dev) { u32 frequency; int ret = 0; ret = adf_dev_measure_clock(accel_dev, &frequency, ADF_C62X_MIN_AE_FREQ, ADF_C62X_MAX_AE_FREQ); if (ret) return ret; accel_dev->hw_device->clock_frequency = frequency; return 0; } static u32 c62x_get_hw_cap(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 legfuses; u32 capabilities; u32 straps; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 fuses = hw_data->fuses; /* Read accelerator capabilities mask */ legfuses = pci_read_config(pdev, ADF_DEVICE_LEGFUSE_OFFSET, 4); capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC + ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC + ICP_ACCEL_CAPABILITIES_CIPHER + ICP_ACCEL_CAPABILITIES_AUTHENTICATION + ICP_ACCEL_CAPABILITIES_COMPRESSION + ICP_ACCEL_CAPABILITIES_ZUC + ICP_ACCEL_CAPABILITIES_SHA3 + ICP_ACCEL_CAPABILITIES_HKDF + ICP_ACCEL_CAPABILITIES_ECEDMONT + ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN; if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) capabilities &= ~(ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | ICP_ACCEL_CAPABILITIES_CIPHER | ICP_ACCEL_CAPABILITIES_HKDF | ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN); if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; if (legfuses & ICP_ACCEL_MASK_PKE_SLICE) capabilities &= ~(ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | ICP_ACCEL_CAPABILITIES_ECEDMONT); if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; if (legfuses & ICP_ACCEL_MASK_EIA3_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_ZUC; if (legfuses & ICP_ACCEL_MASK_SHA3_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_SHA3; straps = pci_read_config(pdev, ADF_C62X_SOFTSTRAP_CSR_OFFSET, 4); if ((straps | fuses) & ADF_C62X_POWERGATE_PKE) capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; if ((straps | fuses) & ADF_C62X_POWERGATE_DC) capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; return capabilities; } static const char * get_obj_name(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services service) { return ADF_CXXX_AE_FW_NAME_CUSTOM1; } static uint32_t get_objs_num(struct adf_accel_dev *accel_dev) { return 1; } static uint32_t get_obj_cfg_ae_mask(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services services) { return accel_dev->hw_device->ae_mask; } void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data) { hw_data->dev_class = &c62x_class; hw_data->instance_id = c62x_class.instances++; hw_data->num_banks = ADF_C62X_ETR_MAX_BANKS; hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK; hw_data->num_accel = ADF_C62X_MAX_ACCELERATORS; hw_data->num_logical_accel = 1; hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES; hw_data->tx_rx_gap = ADF_C62X_RX_RINGS_OFFSET; hw_data->tx_rings_mask = ADF_C62X_TX_RINGS_MASK; hw_data->alloc_irq = adf_isr_resource_alloc; hw_data->free_irq = adf_isr_resource_free; hw_data->enable_error_correction = adf_enable_error_correction; hw_data->print_err_registers = adf_print_err_registers; hw_data->get_accel_mask = get_accel_mask; hw_data->get_ae_mask = get_ae_mask; hw_data->get_num_accels = get_num_accels; hw_data->get_num_aes = get_num_aes; hw_data->get_sram_bar_id = get_sram_bar_id; hw_data->get_etr_bar_id = get_etr_bar_id; hw_data->get_misc_bar_id = get_misc_bar_id; hw_data->get_pf2vf_offset = get_pf2vf_offset; hw_data->get_vintmsk_offset = get_vintmsk_offset; hw_data->get_arb_info = get_arb_info; hw_data->get_admin_info = get_admin_info; hw_data->get_errsou_offset = get_errsou_offset; hw_data->get_clock_speed = get_clock_speed; hw_data->get_sku = get_sku; + hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE; hw_data->fw_name = ADF_C62X_FW; hw_data->fw_mmp_name = ADF_C62X_MMP; hw_data->init_admin_comms = adf_init_admin_comms; hw_data->exit_admin_comms = adf_exit_admin_comms; hw_data->disable_iov = adf_disable_sriov; hw_data->send_admin_init = adf_send_admin_init; hw_data->init_arb = adf_init_gen2_arb; hw_data->exit_arb = adf_exit_arb; hw_data->get_arb_mapping = adf_get_arbiter_mapping; hw_data->enable_ints = adf_enable_ints; hw_data->set_ssm_wdtimer = adf_set_ssm_wdtimer; hw_data->check_slice_hang = adf_check_slice_hang; hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; hw_data->disable_vf2pf_comms = adf_pf_disable_vf2pf_comms; hw_data->restore_device = adf_dev_restore; hw_data->reset_device = adf_reset_flr; hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; hw_data->get_objs_num = get_objs_num; hw_data->get_obj_name = get_obj_name; hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask; hw_data->clock_frequency = ADF_C62X_AE_FREQ; hw_data->measure_clock = measure_clock; hw_data->get_ae_clock = get_ae_clock; hw_data->get_accel_cap = c62x_get_hw_cap; hw_data->reset_device = adf_reset_flr; hw_data->extended_dc_capabilities = 0; hw_data->get_storage_enabled = get_storage_enabled; hw_data->query_storage_cap = 1; hw_data->get_heartbeat_status = adf_get_heartbeat_status; hw_data->get_ae_clock = get_ae_clock; hw_data->storage_enable = 0; hw_data->get_fw_image_type = adf_cfg_get_fw_image_type; hw_data->config_device = adf_config_device; hw_data->get_ring_to_svc_map = adf_cfg_get_services_enabled; hw_data->set_asym_rings_mask = adf_cfg_set_asym_rings_mask; hw_data->ring_to_svc_map = ADF_DEFAULT_RING_TO_SRV_MAP; hw_data->pre_reset = adf_dev_pre_reset; hw_data->post_reset = adf_dev_post_reset; + + adf_gen2_init_hw_csr_info(&hw_data->csr_info); } void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data) { hw_data->dev_class->instances--; } diff --git a/sys/dev/qat/qat_hw/qat_c62x/adf_drv.c b/sys/dev/qat/qat_hw/qat_c62x/adf_drv.c index ebeb4949a8c2..198bd1e0a78d 100644 --- a/sys/dev/qat/qat_hw/qat_c62x/adf_drv.c +++ b/sys/dev/qat/qat_hw/qat_c62x/adf_drv.c @@ -1,270 +1,271 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "adf_c62x_hw_data.h" #include "adf_fw_counters.h" #include "adf_cfg_device.h" #include #include #include #include #include #include "adf_heartbeat_dbg.h" #include "adf_cnvnr_freq_counters.h" static MALLOC_DEFINE(M_QAT_C62X, "qat_c62x", "qat_c62x"); #define ADF_SYSTEM_DEVICE(device_id) \ { \ PCI_VENDOR_ID_INTEL, device_id \ } static const struct pci_device_id adf_pci_tbl[] = { ADF_SYSTEM_DEVICE( ADF_C62X_PCI_DEVICE_ID), { 0, } }; static int adf_probe(device_t dev) { const struct pci_device_id *id; for (id = adf_pci_tbl; id->vendor != 0; id++) { if (pci_get_vendor(dev) == id->vendor && pci_get_device(dev) == id->device) { device_set_desc(dev, "Intel " ADF_C62X_DEVICE_NAME " QuickAssist"); return BUS_PROBE_GENERIC; } } return ENXIO; } static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; int i; if (accel_dev->dma_tag) bus_dma_tag_destroy(accel_dev->dma_tag); for (i = 0; i < ADF_PCI_MAX_BARS; i++) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; if (bar->virt_addr) bus_free_resource(accel_pci_dev->pci_dev, SYS_RES_MEMORY, bar->virt_addr); } if (accel_dev->hw_device) { switch (pci_get_device(accel_pci_dev->pci_dev)) { case ADF_C62X_PCI_DEVICE_ID: adf_clean_hw_data_c62x(accel_dev->hw_device); break; default: break; } free(accel_dev->hw_device, M_QAT_C62X); accel_dev->hw_device = NULL; } adf_cfg_dev_remove(accel_dev); adf_devmgr_rm_dev(accel_dev, NULL); } static int adf_attach(device_t dev) { struct adf_accel_dev *accel_dev; struct adf_accel_pci *accel_pci_dev; struct adf_hw_device_data *hw_data; unsigned int i, bar_nr; int ret, rid; struct adf_cfg_device *cfg_dev = NULL; /* Set pci MaxPayLoad to 256. Implemented to avoid the issue of * Pci-passthrough causing Maxpayload to be reset to 128 bytes * when the device is reset. */ if (pci_get_max_payload(dev) != 256) pci_set_max_payload(dev, 256); accel_dev = device_get_softc(dev); INIT_LIST_HEAD(&accel_dev->crypto_list); accel_pci_dev = &accel_dev->accel_pci_dev; accel_pci_dev->pci_dev = dev; if (bus_get_domain(dev, &accel_pci_dev->node) != 0) accel_pci_dev->node = 0; /* XXX: Revisit if we actually need a devmgr table at all. */ /* Add accel device to accel table. * This should be called before adf_cleanup_accel is called */ if (adf_devmgr_add_dev(accel_dev, NULL)) { device_printf(dev, "Failed to add new accelerator device.\n"); return ENXIO; } /* Allocate and configure device configuration structure */ hw_data = malloc(sizeof(*hw_data), M_QAT_C62X, M_WAITOK | M_ZERO); accel_dev->hw_device = hw_data; adf_init_hw_data_c62x(accel_dev->hw_device); accel_pci_dev->revid = pci_get_revid(dev); hw_data->fuses = pci_read_config(dev, ADF_DEVICE_FUSECTL_OFFSET, 4); if (accel_pci_dev->revid == 0x00) { device_printf(dev, "A0 stepping is not supported.\n"); ret = ENODEV; goto out_err; } /* Get PPAERUCM values and store */ ret = adf_aer_store_ppaerucm_reg(dev, hw_data); if (ret) goto out_err; /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(accel_dev); hw_data->ae_mask = hw_data->get_ae_mask(accel_dev); + hw_data->admin_ae_mask = hw_data->ae_mask; accel_pci_dev->sku = hw_data->get_sku(hw_data); /* If the device has no acceleration engines then ignore it. */ if (!hw_data->accel_mask || !hw_data->ae_mask || ((~hw_data->ae_mask) & 0x01)) { device_printf(dev, "No acceleration units found\n"); ret = ENXIO; goto out_err; } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); if (ret) goto out_err; ret = adf_clock_debugfs_add(accel_dev); if (ret) goto out_err; pci_set_max_read_req(dev, 1024); ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, /* BUS_SPACE_UNRESTRICTED */ 1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &accel_dev->dma_tag); if (ret) goto out_err; if (hw_data->get_accel_cap) { hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); } /* Find and map all the device's BARS */ i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0; for (bar_nr = 0; i < ADF_PCI_MAX_BARS && bar_nr < PCIR_MAX_BAR_0; bar_nr++) { struct adf_bar *bar; /* * XXX: This isn't quite right as it will ignore a BAR * that wasn't assigned a valid resource range by the * firmware. */ rid = PCIR_BAR(bar_nr); if (bus_get_resource(dev, SYS_RES_MEMORY, rid, NULL, NULL) != 0) continue; bar = &accel_pci_dev->pci_bars[i++]; bar->virt_addr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (bar->virt_addr == NULL) { device_printf(dev, "Failed to map BAR %d\n", bar_nr); ret = ENXIO; goto out_err; } bar->base_addr = rman_get_start(bar->virt_addr); bar->size = rman_get_size(bar->virt_addr); } pci_enable_busmaster(dev); if (!accel_dev->hw_device->config_device) { ret = EFAULT; goto out_err; } ret = accel_dev->hw_device->config_device(accel_dev); if (ret) goto out_err; ret = adf_dev_init(accel_dev); if (ret) goto out_dev_shutdown; ret = adf_dev_start(accel_dev); if (ret) goto out_dev_stop; cfg_dev = accel_dev->cfg->dev; adf_cfg_device_clear(cfg_dev, accel_dev); free(cfg_dev, M_QAT); accel_dev->cfg->dev = NULL; return ret; out_dev_stop: adf_dev_stop(accel_dev); out_dev_shutdown: adf_dev_shutdown(accel_dev); out_err: adf_cleanup_accel(accel_dev); return ret; } static int adf_detach(device_t dev) { struct adf_accel_dev *accel_dev = device_get_softc(dev); if (adf_dev_stop(accel_dev)) { device_printf(dev, "Failed to stop QAT accel dev\n"); return EBUSY; } adf_dev_shutdown(accel_dev); adf_cleanup_accel(accel_dev); return 0; } static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe), DEVMETHOD(device_attach, adf_attach), DEVMETHOD(device_detach, adf_detach), DEVMETHOD_END }; static driver_t adf_driver = { "qat", adf_methods, sizeof(struct adf_accel_dev) }; DRIVER_MODULE_ORDERED(qat_c62x, pci, adf_driver, NULL, NULL, SI_ORDER_THIRD); MODULE_VERSION(qat_c62x, 1); MODULE_DEPEND(qat_c62x, qat_common, 1, 1, 1); MODULE_DEPEND(qat_c62x, qat_api, 1, 1, 1); MODULE_DEPEND(qat_c62x, linuxkpi, 1, 1, 1); diff --git a/sys/dev/qat/qat_hw/qat_dh895xcc/adf_dh895xcc_hw_data.c b/sys/dev/qat/qat_hw/qat_dh895xcc/adf_dh895xcc_hw_data.c index 44dde0750792..59c53db3947a 100644 --- a/sys/dev/qat/qat_hw/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/sys/dev/qat/qat_hw/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -1,405 +1,409 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "qat_freebsd.h" #include "adf_cfg.h" #include #include #include #include +#include #include "adf_dh895xcc_hw_data.h" #include "icp_qat_hw.h" #include "adf_heartbeat.h" /* Worker thread to service arbiter mappings based on dev SKUs */ static const u32 thrd_to_arb_map_sku4[] = { 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666, 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; static const u32 thrd_to_arb_map_sku6[] = { 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666, 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222 }; static const u32 thrd_to_arb_map_sku3[] = { 0x00000888, 0x00000000, 0x00000888, 0x00000000, 0x00000888, 0x00000000, 0x00000888, 0x00000000, 0x00000888, 0x00000000, 0x00000888, 0x00000000 }; static u32 thrd_to_arb_map_gen[ADF_DH895XCC_MAX_ACCELENGINES] = { 0 }; static struct adf_hw_device_class dh895xcc_class = {.name = ADF_DH895XCC_DEVICE_NAME, .type = DEV_DH895XCC, .instances = 0 }; static u32 get_accel_mask(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 fuse; fuse = pci_read_config(pdev, ADF_DEVICE_FUSECTL_OFFSET, 4); return (~fuse) >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET & ADF_DH895XCC_ACCELERATORS_MASK; } static u32 get_ae_mask(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 fuse; fuse = pci_read_config(pdev, ADF_DEVICE_FUSECTL_OFFSET, 4); return (~fuse) & ADF_DH895XCC_ACCELENGINES_MASK; } static uint32_t get_num_accels(struct adf_hw_device_data *self) { uint32_t i, ctr = 0; if (!self || !self->accel_mask) return 0; for (i = 0; i < ADF_DH895XCC_MAX_ACCELERATORS; i++) { if (self->accel_mask & (1 << i)) ctr++; } return ctr; } static uint32_t get_num_aes(struct adf_hw_device_data *self) { uint32_t i, ctr = 0; if (!self || !self->ae_mask) return 0; for (i = 0; i < ADF_DH895XCC_MAX_ACCELENGINES; i++) { if (self->ae_mask & (1 << i)) ctr++; } return ctr; } static uint32_t get_misc_bar_id(struct adf_hw_device_data *self) { return ADF_DH895XCC_PMISC_BAR; } static uint32_t get_etr_bar_id(struct adf_hw_device_data *self) { return ADF_DH895XCC_ETR_BAR; } static uint32_t get_sram_bar_id(struct adf_hw_device_data *self) { return ADF_DH895XCC_SRAM_BAR; } static enum dev_sku_info get_sku(struct adf_hw_device_data *self) { int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK) >> ADF_DH895XCC_FUSECTL_SKU_SHIFT; switch (sku) { case ADF_DH895XCC_FUSECTL_SKU_1: return DEV_SKU_1; case ADF_DH895XCC_FUSECTL_SKU_2: return DEV_SKU_2; case ADF_DH895XCC_FUSECTL_SKU_3: return DEV_SKU_3; case ADF_DH895XCC_FUSECTL_SKU_4: return DEV_SKU_4; default: return DEV_SKU_UNKNOWN; } return DEV_SKU_UNKNOWN; } static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, u32 const **arb_map_config) { switch (accel_dev->accel_pci_dev.sku) { case DEV_SKU_1: adf_cfg_gen_dispatch_arbiter(accel_dev, thrd_to_arb_map_sku4, thrd_to_arb_map_gen, ADF_DH895XCC_MAX_ACCELENGINES); *arb_map_config = thrd_to_arb_map_gen; break; case DEV_SKU_2: case DEV_SKU_4: adf_cfg_gen_dispatch_arbiter(accel_dev, thrd_to_arb_map_sku6, thrd_to_arb_map_gen, ADF_DH895XCC_MAX_ACCELENGINES); *arb_map_config = thrd_to_arb_map_gen; break; case DEV_SKU_3: adf_cfg_gen_dispatch_arbiter(accel_dev, thrd_to_arb_map_sku3, thrd_to_arb_map_gen, ADF_DH895XCC_MAX_ACCELENGINES); *arb_map_config = thrd_to_arb_map_gen; break; default: device_printf(GET_DEV(accel_dev), "The configuration doesn't match any SKU"); *arb_map_config = NULL; } } static uint32_t get_pf2vf_offset(uint32_t i) { return ADF_DH895XCC_PF2VF_OFFSET(i); } static uint32_t get_vintmsk_offset(uint32_t i) { return ADF_DH895XCC_VINTMSK_OFFSET(i); } static void get_arb_info(struct arb_info *arb_csrs_info) { arb_csrs_info->arbiter_offset = ADF_DH895XCC_ARB_OFFSET; arb_csrs_info->wrk_thd_2_srv_arb_map = ADF_DH895XCC_ARB_WRK_2_SER_MAP_OFFSET; arb_csrs_info->wrk_cfg_offset = ADF_DH895XCC_ARB_WQCFG_OFFSET; } static void get_admin_info(struct admin_info *admin_csrs_info) { admin_csrs_info->mailbox_offset = ADF_DH895XCC_MAILBOX_BASE_OFFSET; admin_csrs_info->admin_msg_ur = ADF_DH895XCC_ADMINMSGUR_OFFSET; admin_csrs_info->admin_msg_lr = ADF_DH895XCC_ADMINMSGLR_OFFSET; } static void get_errsou_offset(u32 *errsou3, u32 *errsou5) { *errsou3 = ADF_DH895XCC_ERRSOU3; *errsou5 = ADF_DH895XCC_ERRSOU5; } static u32 get_clock_speed(struct adf_hw_device_data *self) { /* CPP clock is half high-speed clock */ return self->clock_frequency / 2; } static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_device = accel_dev->hw_device; struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR]; struct resource *csr = misc_bar->virt_addr; unsigned int val, i; unsigned int mask; /* Enable Accel Engine error detection & correction */ mask = hw_device->ae_mask; for (i = 0; mask; i++, mask >>= 1) { if (!(mask & 1)) continue; val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i)); val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR; ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val); val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i)); val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR; ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val); } /* Enable shared memory error detection & correction */ mask = hw_device->accel_mask; for (i = 0; mask; i++, mask >>= 1) { if (!(mask & 1)) continue; val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i)); val |= ADF_DH895XCC_ERRSSMSH_EN; ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val); val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i)); val |= ADF_DH895XCC_ERRSSMSH_EN; ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val); } } static void adf_enable_ints(struct adf_accel_dev *accel_dev) { struct resource *addr; addr = (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr; /* Enable bundle and misc interrupts */ ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET, accel_dev->u1.pf.vf_info ? 0 : (1ULL << GET_MAX_BANKS(accel_dev)) - 1); ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET, ADF_DH895XCC_SMIA1_MASK); } static u32 get_ae_clock(struct adf_hw_device_data *self) { /* * Clock update interval is <16> ticks for dh895xcc. */ return self->clock_frequency / 16; } static int get_storage_enabled(struct adf_accel_dev *accel_dev, u32 *storage_enabled) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; strlcpy(key, ADF_STORAGE_FIRMWARE_ENABLED, sizeof(key)); if (!adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) { if (kstrtouint(val, 0, storage_enabled)) return -EFAULT; } return 0; } static u32 dh895xcc_get_hw_cap(struct adf_accel_dev *accel_dev) { device_t pdev = accel_dev->accel_pci_dev.pci_dev; u32 legfuses; u32 capabilities; /* Read accelerator capabilities mask */ legfuses = pci_read_config(pdev, ADF_DEVICE_LEGFUSE_OFFSET, 4); capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC + ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC + ICP_ACCEL_CAPABILITIES_CIPHER + ICP_ACCEL_CAPABILITIES_AUTHENTICATION + ICP_ACCEL_CAPABILITIES_COMPRESSION + ICP_ACCEL_CAPABILITIES_RAND + ICP_ACCEL_CAPABILITIES_HKDF + ICP_ACCEL_CAPABILITIES_ECEDMONT + ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN; if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) capabilities &= ~(ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | ICP_ACCEL_CAPABILITIES_CIPHER | ICP_ACCEL_CAPABILITIES_HKDF | ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN); if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; if (legfuses & ICP_ACCEL_MASK_PKE_SLICE) capabilities &= ~(ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | ICP_ACCEL_CAPABILITIES_ECEDMONT); if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; return capabilities; } static const char * get_obj_name(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services service) { return ADF_DH895XCC_AE_FW_NAME_CUSTOM1; } static u32 get_objs_num(struct adf_accel_dev *accel_dev) { return 1; } static u32 get_obj_cfg_ae_mask(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services services) { return accel_dev->hw_device->ae_mask; } void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) { hw_data->dev_class = &dh895xcc_class; hw_data->instance_id = dh895xcc_class.instances++; hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS; hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK; hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS; hw_data->num_logical_accel = 1; hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES; hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET; hw_data->tx_rings_mask = ADF_DH895XCC_TX_RINGS_MASK; hw_data->alloc_irq = adf_isr_resource_alloc; hw_data->free_irq = adf_isr_resource_free; hw_data->enable_error_correction = adf_enable_error_correction; hw_data->print_err_registers = adf_print_err_registers; hw_data->get_accel_mask = get_accel_mask; hw_data->get_ae_mask = get_ae_mask; hw_data->get_num_accels = get_num_accels; hw_data->get_num_aes = get_num_aes; hw_data->get_etr_bar_id = get_etr_bar_id; hw_data->get_misc_bar_id = get_misc_bar_id; hw_data->get_pf2vf_offset = get_pf2vf_offset; hw_data->get_vintmsk_offset = get_vintmsk_offset; hw_data->get_arb_info = get_arb_info; hw_data->get_admin_info = get_admin_info; hw_data->get_errsou_offset = get_errsou_offset; hw_data->get_clock_speed = get_clock_speed; hw_data->get_sram_bar_id = get_sram_bar_id; hw_data->get_sku = get_sku; + hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE; hw_data->fw_name = ADF_DH895XCC_FW; hw_data->fw_mmp_name = ADF_DH895XCC_MMP; hw_data->init_admin_comms = adf_init_admin_comms; hw_data->exit_admin_comms = adf_exit_admin_comms; hw_data->disable_iov = adf_disable_sriov; hw_data->send_admin_init = adf_send_admin_init; hw_data->init_arb = adf_init_gen2_arb; hw_data->exit_arb = adf_exit_arb; hw_data->get_arb_mapping = adf_get_arbiter_mapping; hw_data->enable_ints = adf_enable_ints; hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; hw_data->disable_vf2pf_comms = adf_pf_disable_vf2pf_comms; hw_data->reset_device = adf_reset_sbr; hw_data->restore_device = adf_dev_restore; hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; hw_data->get_accel_cap = dh895xcc_get_hw_cap; hw_data->get_heartbeat_status = adf_get_heartbeat_status; hw_data->get_ae_clock = get_ae_clock; hw_data->get_objs_num = get_objs_num; hw_data->get_obj_name = get_obj_name; hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask; hw_data->clock_frequency = ADF_DH895XCC_AE_FREQ; hw_data->extended_dc_capabilities = 0; hw_data->get_storage_enabled = get_storage_enabled; hw_data->query_storage_cap = 1; hw_data->get_heartbeat_status = adf_get_heartbeat_status; hw_data->get_ae_clock = get_ae_clock; hw_data->storage_enable = 0; hw_data->get_fw_image_type = adf_cfg_get_fw_image_type; hw_data->config_device = adf_config_device; hw_data->get_ring_to_svc_map = adf_cfg_get_services_enabled; hw_data->set_asym_rings_mask = adf_cfg_set_asym_rings_mask; hw_data->ring_to_svc_map = ADF_DEFAULT_RING_TO_SRV_MAP; hw_data->pre_reset = adf_dev_pre_reset; hw_data->post_reset = adf_dev_post_reset; + + adf_gen2_init_hw_csr_info(&hw_data->csr_info); } void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) { hw_data->dev_class->instances--; } diff --git a/sys/dev/qat/qat_hw/qat_dh895xcc/adf_drv.c b/sys/dev/qat/qat_hw/qat_dh895xcc/adf_drv.c index 2ddff279e4e2..0ab733bab18f 100644 --- a/sys/dev/qat/qat_hw/qat_dh895xcc/adf_drv.c +++ b/sys/dev/qat/qat_hw/qat_dh895xcc/adf_drv.c @@ -1,262 +1,263 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ /* $FreeBSD$ */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "adf_dh895xcc_hw_data.h" #include "adf_fw_counters.h" #include "adf_cfg_device.h" #include #include #include #include #include #include "adf_heartbeat_dbg.h" #include "adf_cnvnr_freq_counters.h" static MALLOC_DEFINE(M_QAT_DH895XCC, "qat_dh895xcc", "qat_dh895xcc"); #define ADF_SYSTEM_DEVICE(device_id) \ { \ PCI_VENDOR_ID_INTEL, device_id \ } static const struct pci_device_id adf_pci_tbl[] = { ADF_SYSTEM_DEVICE(ADF_DH895XCC_PCI_DEVICE_ID), { 0, } }; static int adf_probe(device_t dev) { const struct pci_device_id *id; for (id = adf_pci_tbl; id->vendor != 0; id++) { if (pci_get_vendor(dev) == id->vendor && pci_get_device(dev) == id->device) { device_set_desc(dev, "Intel " ADF_DH895XCC_DEVICE_NAME " QuickAssist"); return BUS_PROBE_DEFAULT; } } return ENXIO; } static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; int i; if (accel_dev->dma_tag) bus_dma_tag_destroy(accel_dev->dma_tag); for (i = 0; i < ADF_PCI_MAX_BARS; i++) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; if (bar->virt_addr) bus_free_resource(accel_pci_dev->pci_dev, SYS_RES_MEMORY, bar->virt_addr); } if (accel_dev->hw_device) { switch (pci_get_device(accel_pci_dev->pci_dev)) { case ADF_DH895XCC_PCI_DEVICE_ID: adf_clean_hw_data_dh895xcc(accel_dev->hw_device); break; default: break; } free(accel_dev->hw_device, M_QAT_DH895XCC); accel_dev->hw_device = NULL; } adf_cfg_dev_remove(accel_dev); adf_devmgr_rm_dev(accel_dev, NULL); } static int adf_attach(device_t dev) { struct adf_accel_dev *accel_dev; struct adf_accel_pci *accel_pci_dev; struct adf_hw_device_data *hw_data; unsigned int i, bar_nr; int ret, rid; struct adf_cfg_device *cfg_dev = NULL; /* Set pci MaxPayLoad to 256. Implemented to avoid the issue of * Pci-passthrough causing Maxpayload to be reset to 128 bytes * when the device is reset. */ if (pci_get_max_payload(dev) != 256) pci_set_max_payload(dev, 256); accel_dev = device_get_softc(dev); INIT_LIST_HEAD(&accel_dev->crypto_list); accel_pci_dev = &accel_dev->accel_pci_dev; accel_pci_dev->pci_dev = dev; if (bus_get_domain(dev, &accel_pci_dev->node) != 0) accel_pci_dev->node = 0; /* Add accel device to accel table. * This should be called before adf_cleanup_accel is called */ if (adf_devmgr_add_dev(accel_dev, NULL)) { device_printf(dev, "Failed to add new accelerator device.\n"); return ENXIO; } /* Allocate and configure device configuration structure */ hw_data = malloc(sizeof(*hw_data), M_QAT_DH895XCC, M_WAITOK | M_ZERO); accel_dev->hw_device = hw_data; adf_init_hw_data_dh895xcc(accel_dev->hw_device); accel_pci_dev->revid = pci_get_revid(dev); hw_data->fuses = pci_read_config(dev, ADF_DEVICE_FUSECTL_OFFSET, 4); /* Get PPAERUCM values and store */ ret = adf_aer_store_ppaerucm_reg(dev, hw_data); if (ret) goto out_err; /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(accel_dev); hw_data->ae_mask = hw_data->get_ae_mask(accel_dev); + hw_data->admin_ae_mask = hw_data->ae_mask; accel_pci_dev->sku = hw_data->get_sku(hw_data); /* If the device has no acceleration engines then ignore it. */ if (!hw_data->accel_mask || !hw_data->ae_mask || ((~hw_data->ae_mask) & 0x01)) { device_printf(dev, "No acceleration units found\n"); ret = ENXIO; goto out_err; } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); if (ret) goto out_err; pci_set_max_read_req(dev, 1024); ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, /* BUS_SPACE_UNRESTRICTED */ 1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &accel_dev->dma_tag); if (ret) goto out_err; if (hw_data->get_accel_cap) { hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); } /* Find and map all the device's BARS */ i = 0; for (bar_nr = 0; i < ADF_PCI_MAX_BARS && bar_nr < PCIR_MAX_BAR_0; bar_nr++) { struct adf_bar *bar; /* * This will ignore a BAR * that wasn't assigned a valid resource range by the * firmware. */ rid = PCIR_BAR(bar_nr); if (bus_get_resource(dev, SYS_RES_MEMORY, rid, NULL, NULL) != 0) continue; bar = &accel_pci_dev->pci_bars[i++]; bar->virt_addr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (bar->virt_addr == NULL) { device_printf(dev, "Failed to map BAR %d\n", bar_nr); ret = ENXIO; goto out_err; } bar->base_addr = rman_get_start(bar->virt_addr); bar->size = rman_get_size(bar->virt_addr); } pci_enable_busmaster(dev); if (!accel_dev->hw_device->config_device) { ret = EFAULT; goto out_err; } ret = accel_dev->hw_device->config_device(accel_dev); if (ret) goto out_err; ret = adf_dev_init(accel_dev); if (ret) goto out_dev_shutdown; ret = adf_dev_start(accel_dev); if (ret) goto out_dev_stop; cfg_dev = accel_dev->cfg->dev; adf_cfg_device_clear(cfg_dev, accel_dev); free(cfg_dev, M_QAT); accel_dev->cfg->dev = NULL; return ret; out_dev_stop: adf_dev_stop(accel_dev); out_dev_shutdown: adf_dev_shutdown(accel_dev); out_err: adf_cleanup_accel(accel_dev); return ret; } static int adf_detach(device_t dev) { struct adf_accel_dev *accel_dev = device_get_softc(dev); if (adf_dev_stop(accel_dev)) { device_printf(dev, "Failed to stop QAT accel dev\n"); return EBUSY; } adf_dev_shutdown(accel_dev); adf_cleanup_accel(accel_dev); return 0; } static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe), DEVMETHOD(device_attach, adf_attach), DEVMETHOD(device_detach, adf_detach), DEVMETHOD_END }; static driver_t adf_driver = { "qat", adf_methods, sizeof(struct adf_accel_dev) }; DRIVER_MODULE_ORDERED(qat_dh895xcc, pci, adf_driver, NULL, NULL, SI_ORDER_THIRD); MODULE_VERSION(qat_dh895xcc, 1); MODULE_DEPEND(qat_dh895xcc, qat_common, 1, 1, 1); MODULE_DEPEND(qat_dh895xcc, qat_api, 1, 1, 1); MODULE_DEPEND(qat_dh895xcc, linuxkpi, 1, 1, 1); diff --git a/sys/modules/qat/qat_api/Makefile b/sys/modules/qat/qat_api/Makefile index 65da6fa52fad..70886968ab56 100644 --- a/sys/modules/qat/qat_api/Makefile +++ b/sys/modules/qat/qat_api/Makefile @@ -1,75 +1,76 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2007-2022 Intel Corporation # $FreeBSD$ .PATH: ${SRCTOP}/sys/dev/qat/qat_api KMOD= qat_api SRCS+= freebsd_module.c SRCS+= common/compression/dc_datapath.c SRCS+= common/compression/dc_header_footer.c SRCS+= common/compression/dc_session.c SRCS+= common/compression/dc_stats.c SRCS+= common/compression/dc_buffers.c SRCS+= common/compression/dc_dp.c SRCS+= common/compression/icp_sal_dc_err.c SRCS+= common/utils/lac_buffer_desc.c SRCS+= common/utils/lac_mem.c SRCS+= common/utils/lac_mem_pools.c SRCS+= common/utils/lac_sync.c SRCS+= common/utils/sal_service_state.c SRCS+= common/utils/sal_statistics.c SRCS+= common/utils/sal_string_parse.c SRCS+= common/utils/sal_versions.c SRCS+= common/utils/sal_user_process.c SRCS+= common/ctrl/sal_list.c SRCS+= common/ctrl/sal_compression.c SRCS+= common/ctrl/sal_ctrl_services.c SRCS+= common/ctrl/sal_create_services.c SRCS+= common/ctrl/sal_crypto.c SRCS+= common/qat_comms/sal_qat_cmn_msg.c SRCS+= common/crypto/sym/lac_sym_api.c SRCS+= common/crypto/sym/lac_sym_cb.c SRCS+= common/crypto/sym/lac_sym_queue.c SRCS+= common/crypto/sym/lac_sym_cipher.c SRCS+= common/crypto/sym/lac_sym_alg_chain.c SRCS+= common/crypto/sym/lac_sym_auth_enc.c SRCS+= common/crypto/sym/lac_sym_hash.c SRCS+= common/crypto/sym/lac_sym_hash_sw_precomputes.c SRCS+= common/crypto/sym/lac_sym_stats.c SRCS+= common/crypto/sym/lac_sym_compile_check.c SRCS+= common/crypto/sym/lac_sym_partial.c SRCS+= common/crypto/sym/lac_sym_dp.c SRCS+= common/crypto/sym/qat/lac_sym_qat.c SRCS+= common/crypto/sym/qat/lac_sym_qat_hash.c SRCS+= common/crypto/sym/qat/lac_sym_qat_hash_defs_lookup.c SRCS+= common/crypto/sym/qat/lac_sym_qat_cipher.c +SRCS+= common/crypto/sym/qat/lac_sym_qat_constants_table.c SRCS+= common/crypto/sym/qat/lac_sym_qat_key.c SRCS+= common/crypto/sym/key/lac_sym_key.c SRCS+= common/stubs/lac_stubs.c SRCS+= device/dev_info.c SRCS+= qat_kernel/src/lac_adf_interface_freebsd.c SRCS+= qat_kernel/src/qat_transport.c SRCS+= qat_kernel/src/lac_symbols.c SRCS+= qat_utils/src/QatUtilsServices.c SRCS+= qat_utils/src/QatUtilsSemaphore.c SRCS+= qat_utils/src/QatUtilsSpinLock.c SRCS+= qat_utils/src/QatUtilsAtomic.c SRCS+= qat_utils/src/QatUtilsCrypto.c SRCS+= bus_if.h cryptodev_if.h device_if.h pci_if.h vnode_if.h CFLAGS+= -I${SRCTOP}/sys/dev/qat/qat_api/include CFLAGS+= -I${SRCTOP}/sys/dev/qat/qat_api/include/lac CFLAGS+= -I${SRCTOP}/sys/dev/qat/qat_api/include/dc CFLAGS+= -I${SRCTOP}/sys/dev/qat/qat_api/qat_direct/include CFLAGS+= -I${SRCTOP}/sys/dev/qat/qat_api/qat_utils/include CFLAGS+= -I${SRCTOP}/sys/dev/qat/qat_api/common/include CFLAGS+= -I${SRCTOP}/sys/dev/qat/qat_api/common/compression/include CFLAGS+= -I${SRCTOP}/sys/dev/qat/qat_api/common/crypto/sym/include CFLAGS+= -I${SRCTOP}/sys/dev/qat/qat_api/firmware/include CFLAGS+= -I${SRCTOP}/sys/dev/qat/include/common CFLAGS+= ${LINUXKPI_INCLUDES} .include CWARNFLAGS+= -Wno-cast-qual diff --git a/sys/modules/qat/qat_common/Makefile b/sys/modules/qat/qat_common/Makefile index 9645d3b765db..22ea235fdce6 100644 --- a/sys/modules/qat/qat_common/Makefile +++ b/sys/modules/qat/qat_common/Makefile @@ -1,29 +1,31 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2007-2022 Intel Corporation # $FreeBSD$ .PATH: ${SRCTOP}/sys/dev/qat/qat_common KMOD= qat_common SRCS+= adf_accel_engine.c adf_freebsd_admin.c adf_aer.c adf_cfg.c qat_common_module.c SRCS+= adf_heartbeat.c adf_freebsd_heartbeat_dbg.c SRCS+= adf_dev_mgr.c adf_hw_arbiter.c SRCS+= adf_init.c adf_transport.c adf_isr.c adf_fw_counters.c adf_dev_err.c +SRCS+= adf_gen2_hw_data.c +SRCS+= adf_gen4_hw_data.c SRCS+= qat_freebsd.c SRCS+= adf_freebsd_cfg_dev_dbg.c adf_freebsd_ver_dbg.c SRCS+= adf_cfg_device.c adf_cfg_section.c adf_cfg_instance.c adf_cfg_bundle.c SRCS+= qat_hal.c qat_uclo.c SRCS+= adf_vf_isr.c adf_pf2vf_msg.c SRCS+= adf_vf2pf_msg.c SRCS+= adf_pf2vf_capabilities.c SRCS+= adf_pf2vf_ring_to_svc_map.c SRCS+= adf_freebsd_transport_debug.c adf_clock.c SRCS+= adf_freebsd_cnvnr_ctrs_dbg.c SRCS+= adf_freebsd_pfvf_ctrs_dbg.c SRCS+= bus_if.h device_if.h pci_if.h vnode_if.h CFLAGS+= -I${SRCTOP}/sys/dev/qat/include CFLAGS+= -I${SRCTOP}/sys/dev/qat/include/common CFLAGS+= ${LINUXKPI_INCLUDES} .include diff --git a/sys/modules/qat/qat_hw/Makefile b/sys/modules/qat/qat_hw/Makefile index 820af989b536..40c1d26b4687 100644 --- a/sys/modules/qat/qat_hw/Makefile +++ b/sys/modules/qat/qat_hw/Makefile @@ -1,27 +1,28 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2007-2022 Intel Corporation # $FreeBSD$ .PATH: ${SRCTOP}/sys/dev/qat/qat_hw KMOD= qat_hw SRCS+= qat_c62x/adf_c62x_hw_data.c qat_c62x/adf_drv.c SRCS+= qat_200xx/adf_200xx_hw_data.c qat_200xx/adf_drv.c +SRCS+= qat_4xxx/adf_4xxx_hw_data.c qat_4xxx/adf_drv.c SRCS+= qat_c3xxx/adf_c3xxx_hw_data.c qat_c3xxx/adf_drv.c SRCS+= qat_dh895xcc/adf_dh895xcc_hw_data.c qat_dh895xcc/adf_drv.c SRCS+= qat_c4xxx/adf_c4xxx_hw_data.c qat_c4xxx/adf_drv.c qat_c4xxx/adf_c4xxx_ae_config.c qat_c4xxx/adf_c4xxx_misc_error_stats.c SRCS+= qat_c4xxx/adf_c4xxx_pke_replay_stats.c qat_c4xxx/adf_c4xxx_ras.c qat_c4xxx/adf_c4xxx_res_part.c SRCS+= qat_c4xxx/adf_c4xxx_reset.c SRCS+= device_if.h bus_if.h vnode_if.h pci_if.h cryptodev_if.h CFLAGS+= ${LINUXKPI_INCLUDES} CFLAGS+= -I${SRCTOP}/sys/dev/qat/include CFLAGS+= -I${SRCTOP}/sys/dev/qat/include/common CFLAGS+= -I${SRCTOP}/sys/dev/qat/qat_api/include CFLAGS+= -I${SRCTOP}/sys/dev/qat/qat_api/common/include CFLAGS+= -I${SRCTOP}/sys/dev/qat/qat_api/include/lac CFLAGS+= -I${SRCTOP}/sys/dev/qat/qat_api/qat_utils/include CFLAGS+= -I${SRCTOP}/sys/dev/qat/qat_api/qat_direct/include CFLAGS+= -I${SRCTOP}/sys/dev/qat/qat_api/firmware/include CFLAGS+= -I${SRCTOP}/sys/dev/qat/qat_api/common/crypto/sym/include .include diff --git a/sys/modules/qatfw/Makefile b/sys/modules/qatfw/Makefile index 6c87a28c6527..ac7bddd18858 100644 --- a/sys/modules/qatfw/Makefile +++ b/sys/modules/qatfw/Makefile @@ -1,10 +1,11 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2007-2022 Intel Corporation # $FreeBSD$ SUBDIR= qat_c62x \ qat_200xx \ qat_c3xxx \ qat_c4xxx \ - qat_dh895xcc + qat_dh895xcc \ + qat_4xxx .include diff --git a/sys/modules/qatfw/qat_4xxx/Makefile b/sys/modules/qatfw/qat_4xxx/Makefile new file mode 100644 index 000000000000..895bb4d0cb9f --- /dev/null +++ b/sys/modules/qatfw/qat_4xxx/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2007-2022 Intel Corporation +# $FreeBSD$ +.PATH: ${SRCTOP}/sys/contrib/dev/qat + +KMOD= qat_4xxx_fw + +FIRMWS= ${SRCTOP}/sys/contrib/dev/qat/qat_4xxx.bin:qat_4xxx_fw:111 ${SRCTOP}/sys/contrib/dev/qat/qat_4xxx_mmp.bin:qat_4xxx_mmp_fw:111 + +.include