diff --git a/sys/dev/qat/include/adf_dbgfs.h b/sys/dev/qat/include/adf_dbgfs.h new file mode 100644 index 000000000000..a07933c0f02d --- /dev/null +++ b/sys/dev/qat/include/adf_dbgfs.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright(c) 2007-2025 Intel Corporation */ + +#ifndef ADF_DBGFS_H +#define ADF_DBGFS_H + +void adf_dbgfs_init(struct adf_accel_dev *accel_dev); +void adf_dbgfs_add(struct adf_accel_dev *accel_dev); +void adf_dbgfs_rm(struct adf_accel_dev *accel_dev); +void adf_dbgfs_exit(struct adf_accel_dev *accel_dev); +#endif diff --git a/sys/dev/qat/include/adf_heartbeat.h b/sys/dev/qat/include/adf_heartbeat.h index b2eab7139c1f..7a72678e77de 100644 --- a/sys/dev/qat/include/adf_heartbeat.h +++ b/sys/dev/qat/include/adf_heartbeat.h @@ -1,32 +1,32 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #ifndef ADF_HEARTBEAT_H_ #define ADF_HEARTBEAT_H_ #include "adf_cfg_common.h" struct adf_accel_dev; struct qat_sysctl { unsigned int hb_sysctlvar; struct sysctl_oid *oid; }; struct adf_heartbeat { unsigned int hb_sent_counter; unsigned int hb_failed_counter; u64 last_hb_check_time; enum adf_device_heartbeat_status last_hb_status; struct qat_sysctl heartbeat; - struct qat_sysctl *heartbeat_sent; - struct qat_sysctl *heartbeat_failed; + struct qat_sysctl heartbeat_sent; + struct qat_sysctl heartbeat_failed; }; int adf_heartbeat_init(struct adf_accel_dev *accel_dev); void adf_heartbeat_clean(struct adf_accel_dev *accel_dev); int adf_get_hb_timer(struct adf_accel_dev *accel_dev, unsigned int *value); int adf_get_heartbeat_status(struct adf_accel_dev *accel_dev); int adf_heartbeat_status(struct adf_accel_dev *accel_dev, enum adf_device_heartbeat_status *hb_status); #endif /* ADF_HEARTBEAT_H_ */ diff --git a/sys/dev/qat/include/common/adf_accel_devices.h b/sys/dev/qat/include/common/adf_accel_devices.h index 3731cf6587b2..b3f82234bc2b 100644 --- a/sys/dev/qat/include/common/adf_accel_devices.h +++ b/sys/dev/qat/include/common/adf_accel_devices.h @@ -1,721 +1,725 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2025 Intel Corporation */ #ifndef ADF_ACCEL_DEVICES_H_ #define ADF_ACCEL_DEVICES_H_ #include "qat_freebsd.h" #include "adf_cfg_common.h" #include "adf_pfvf_msg.h" #include "opt_qat.h" #define ADF_CFG_NUM_SERVICES 4 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf" #define ADF_C62X_DEVICE_NAME "c6xx" #define ADF_C62XVF_DEVICE_NAME "c6xxvf" #define ADF_C3XXX_DEVICE_NAME "c3xxx" #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf" #define ADF_200XX_DEVICE_NAME "200xx" #define ADF_200XXVF_DEVICE_NAME "200xxvf" #define ADF_C4XXX_DEVICE_NAME "c4xxx" #define ADF_C4XXXVF_DEVICE_NAME "c4xxxvf" #define ADF_4XXX_DEVICE_NAME "4xxx" #define ADF_4XXXVF_DEVICE_NAME "4xxxvf" #define ADF_DH895XCC_PCI_DEVICE_ID 0x435 #define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443 #define ADF_C62X_PCI_DEVICE_ID 0x37c8 #define ADF_C62XIOV_PCI_DEVICE_ID 0x37c9 #define ADF_C3XXX_PCI_DEVICE_ID 0x19e2 #define ADF_C3XXXIOV_PCI_DEVICE_ID 0x19e3 #define ADF_200XX_PCI_DEVICE_ID 0x18ee #define ADF_200XXIOV_PCI_DEVICE_ID 0x18ef #define ADF_D15XX_PCI_DEVICE_ID 0x6f54 #define ADF_D15XXIOV_PCI_DEVICE_ID 0x6f55 #define ADF_C4XXX_PCI_DEVICE_ID 0x18a0 #define ADF_C4XXXIOV_PCI_DEVICE_ID 0x18a1 #define ADF_4XXX_PCI_DEVICE_ID 0x4940 #define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941 #define ADF_401XX_PCI_DEVICE_ID 0x4942 #define ADF_401XXIOV_PCI_DEVICE_ID 0x4943 #define IS_QAT_GEN3(ID) ({ (ID == ADF_C4XXX_PCI_DEVICE_ID); }) static inline bool IS_QAT_GEN4(const unsigned int id) { return (id == ADF_4XXX_PCI_DEVICE_ID || id == ADF_401XX_PCI_DEVICE_ID || id == ADF_4XXXIOV_PCI_DEVICE_ID || id == ADF_401XXIOV_PCI_DEVICE_ID); } #define IS_QAT_GEN3_OR_GEN4(ID) (IS_QAT_GEN3(ID) || IS_QAT_GEN4(ID)) #define ADF_VF2PF_SET_SIZE 32 #define ADF_MAX_VF2PF_SET 4 #define ADF_VF2PF_SET_OFFSET(set_nr) ((set_nr)*ADF_VF2PF_SET_SIZE) #define ADF_VF2PF_VFNR_TO_SET(vf_nr) ((vf_nr) / ADF_VF2PF_SET_SIZE) #define ADF_VF2PF_VFNR_TO_MASK(vf_nr) \ ({ \ u32 vf_nr_ = (vf_nr); \ BIT((vf_nr_)-ADF_VF2PF_SET_SIZE *ADF_VF2PF_VFNR_TO_SET( \ vf_nr_)); \ }) #define ADF_DEVICE_FUSECTL_OFFSET 0x40 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C #define ADF_DEVICE_FUSECTL_MASK 0x80000000 #define ADF_PCI_MAX_BARS 3 #define ADF_DEVICE_NAME_LENGTH 32 #define ADF_ETR_MAX_RINGS_PER_BANK 16 #define ADF_MAX_MSIX_VECTOR_NAME 32 #define ADF_DEVICE_NAME_PREFIX "qat_" #define ADF_STOP_RETRY 50 #define ADF_NUM_THREADS_PER_AE (8) #define ADF_AE_ADMIN_THREAD (7) #define ADF_NUM_PKE_STRAND (2) #define ADF_AE_STRAND0_THREAD (8) #define ADF_AE_STRAND1_THREAD (9) #define ADF_CFG_NUM_SERVICES 4 #define ADF_SRV_TYPE_BIT_LEN 3 #define ADF_SRV_TYPE_MASK 0x7 #define ADF_RINGS_PER_SRV_TYPE 2 #define ADF_THRD_ABILITY_BIT_LEN 4 #define ADF_THRD_ABILITY_MASK 0xf #define ADF_VF_OFFSET 0x8 #define ADF_MAX_FUNC_PER_DEV 0x7 #define ADF_PCI_DEV_OFFSET 0x3 #define ADF_SRV_TYPE_BIT_LEN 3 #define ADF_SRV_TYPE_MASK 0x7 #define GET_SRV_TYPE(ena_srv_mask, srv) \ (((ena_srv_mask) >> (ADF_SRV_TYPE_BIT_LEN * (srv))) & ADF_SRV_TYPE_MASK) #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_info.csr_ops) #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->csr_info.pfvf_ops) #define ADF_DEFAULT_RING_TO_SRV_MAP \ (CRYPTO | CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) enum adf_accel_capabilities { ADF_ACCEL_CAPABILITIES_NULL = 0, ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1, ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2, ADF_ACCEL_CAPABILITIES_CIPHER = 4, ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8, ADF_ACCEL_CAPABILITIES_COMPRESSION = 32, ADF_ACCEL_CAPABILITIES_DEPRECATED = 64, ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128 }; struct adf_bar { rman_res_t base_addr; struct resource *virt_addr; rman_res_t size; } __packed; struct adf_accel_msix { struct msix_entry *entries; u32 num_entries; } __packed; struct adf_accel_pci { device_t pci_dev; struct adf_accel_msix msix_entries; struct adf_bar pci_bars[ADF_PCI_MAX_BARS]; uint8_t revid; uint8_t sku; int node; } __packed; enum dev_state { DEV_DOWN = 0, DEV_UP }; enum dev_sku_info { DEV_SKU_1 = 0, DEV_SKU_2, DEV_SKU_3, DEV_SKU_4, DEV_SKU_VF, DEV_SKU_1_CY, DEV_SKU_2_CY, DEV_SKU_3_CY, DEV_SKU_UNKNOWN }; static inline const char * get_sku_info(enum dev_sku_info info) { switch (info) { case DEV_SKU_1: return "SKU1"; case DEV_SKU_1_CY: return "SKU1CY"; case DEV_SKU_2: return "SKU2"; case DEV_SKU_2_CY: return "SKU2CY"; case DEV_SKU_3: return "SKU3"; case DEV_SKU_3_CY: return "SKU3CY"; case DEV_SKU_4: return "SKU4"; case DEV_SKU_VF: return "SKUVF"; case DEV_SKU_UNKNOWN: default: break; } return "Unknown SKU"; } enum adf_accel_unit_services { ADF_ACCEL_SERVICE_NULL = 0, ADF_ACCEL_INLINE_CRYPTO = 1, ADF_ACCEL_CRYPTO = 2, ADF_ACCEL_COMPRESSION = 4, ADF_ACCEL_ASYM = 8, ADF_ACCEL_ADMIN = 16 }; struct adf_ae_info { u32 num_asym_thd; u32 num_sym_thd; u32 num_dc_thd; } __packed; struct adf_accel_unit { u8 au_mask; u32 accel_mask; u64 ae_mask; u64 comp_ae_mask; u32 num_ae; enum adf_accel_unit_services services; } __packed; struct adf_accel_unit_info { u32 inline_ingress_msk; u32 inline_egress_msk; u32 sym_ae_msk; u32 asym_ae_msk; u32 dc_ae_msk; u8 num_cy_au; u8 num_dc_au; u8 num_asym_au; u8 num_inline_au; struct adf_accel_unit *au; const struct adf_ae_info *ae_info; } __packed; struct adf_hw_aram_info { /* Inline Egress mask. "1" = AE is working with egress traffic */ u32 inline_direction_egress_mask; /* Inline congestion managmenet profiles set in config file */ u32 inline_congest_mngt_profile; /* Initialise CY AE mask, "1" = AE is used for CY operations */ u32 cy_ae_mask; /* Initialise DC AE mask, "1" = AE is used for DC operations */ u32 dc_ae_mask; /* Number of long words used to define the ARAM regions */ u32 num_aram_lw_entries; /* ARAM region definitions */ u32 mmp_region_size; u32 mmp_region_offset; u32 skm_region_size; u32 skm_region_offset; /* * Defines size and offset of compression intermediate buffers stored * in ARAM (device's on-chip memory). */ u32 inter_buff_aram_region_size; u32 inter_buff_aram_region_offset; u32 sadb_region_size; u32 sadb_region_offset; } __packed; struct adf_hw_device_class { const char *name; const enum adf_device_type type; uint32_t instances; } __packed; struct arb_info { u32 arbiter_offset; u32 wrk_thd_2_srv_arb_map; u32 wrk_cfg_offset; } __packed; struct admin_info { u32 admin_msg_ur; u32 admin_msg_lr; u32 mailbox_offset; } __packed; struct adf_hw_csr_ops { u64 (*build_csr_ring_base_addr)(bus_addr_t addr, u32 size); u32 (*read_csr_ring_head)(struct resource *csr_base_addr, u32 bank, u32 ring); void (*write_csr_ring_head)(struct resource *csr_base_addr, u32 bank, u32 ring, u32 value); u32 (*read_csr_ring_tail)(struct resource *csr_base_addr, u32 bank, u32 ring); void (*write_csr_ring_tail)(struct resource *csr_base_addr, u32 bank, u32 ring, u32 value); u32 (*read_csr_e_stat)(struct resource *csr_base_addr, u32 bank); void (*write_csr_ring_config)(struct resource *csr_base_addr, u32 bank, u32 ring, u32 value); bus_addr_t (*read_csr_ring_base)(struct resource *csr_base_addr, u32 bank, u32 ring); void (*write_csr_ring_base)(struct resource *csr_base_addr, u32 bank, u32 ring, bus_addr_t addr); void (*write_csr_int_flag)(struct resource *csr_base_addr, u32 bank, u32 value); void (*write_csr_int_srcsel)(struct resource *csr_base_addr, u32 bank); void (*write_csr_int_col_en)(struct resource *csr_base_addr, u32 bank, u32 value); void (*write_csr_int_col_ctl)(struct resource *csr_base_addr, u32 bank, u32 value); void (*write_csr_int_flag_and_col)(struct resource *csr_base_addr, u32 bank, u32 value); u32 (*read_csr_ring_srv_arb_en)(struct resource *csr_base_addr, u32 bank); void (*write_csr_ring_srv_arb_en)(struct resource *csr_base_addr, u32 bank, u32 value); u32 (*get_src_sel_mask)(void); u32 (*get_int_col_ctl_enable_mask)(void); u32 (*get_bank_irq_mask)(u32 irq_mask); }; struct adf_cfg_device_data; struct adf_accel_dev; struct adf_etr_data; struct adf_etr_ring_data; struct adf_pfvf_ops { int (*enable_comms)(struct adf_accel_dev *accel_dev); u32 (*get_pf2vf_offset)(u32 i); u32 (*get_vf2pf_offset)(u32 i); void (*enable_vf2pf_interrupts)(struct resource *pmisc_addr, u32 vf_mask); void (*disable_all_vf2pf_interrupts)(struct resource *pmisc_addr); u32 (*disable_pending_vf2pf_interrupts)(struct resource *pmisc_addr); int (*send_msg)(struct adf_accel_dev *accel_dev, struct pfvf_message msg, u32 pfvf_offset, struct mutex *csr_lock); struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev, u32 pfvf_offset, u8 compat_ver); }; struct adf_hw_csr_info { struct adf_hw_csr_ops csr_ops; struct adf_pfvf_ops pfvf_ops; u32 csr_addr_offset; u32 ring_bundle_size; u32 bank_int_flag_clear_mask; u32 num_rings_per_int_srcsel; u32 arb_enable_mask; }; struct adf_hw_device_data { struct adf_hw_device_class *dev_class; uint32_t (*get_accel_mask)(struct adf_accel_dev *accel_dev); uint32_t (*get_ae_mask)(struct adf_accel_dev *accel_dev); uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self); uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self); uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self); uint32_t (*get_num_aes)(struct adf_hw_device_data *self); uint32_t (*get_num_accels)(struct adf_hw_device_data *self); void (*notify_and_wait_ethernet)(struct adf_accel_dev *accel_dev); bool (*get_eth_doorbell_msg)(struct adf_accel_dev *accel_dev); void (*get_arb_info)(struct arb_info *arb_csrs_info); void (*get_admin_info)(struct admin_info *admin_csrs_info); void (*get_errsou_offset)(u32 *errsou3, u32 *errsou5); uint32_t (*get_num_accel_units)(struct adf_hw_device_data *self); int (*init_accel_units)(struct adf_accel_dev *accel_dev); void (*exit_accel_units)(struct adf_accel_dev *accel_dev); uint32_t (*get_clock_speed)(struct adf_hw_device_data *self); enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self); bool (*check_prod_sku)(struct adf_accel_dev *accel_dev); int (*alloc_irq)(struct adf_accel_dev *accel_dev); void (*free_irq)(struct adf_accel_dev *accel_dev); void (*enable_error_correction)(struct adf_accel_dev *accel_dev); int (*check_uncorrectable_error)(struct adf_accel_dev *accel_dev); void (*print_err_registers)(struct adf_accel_dev *accel_dev); void (*disable_error_interrupts)(struct adf_accel_dev *accel_dev); int (*init_ras)(struct adf_accel_dev *accel_dev); void (*exit_ras)(struct adf_accel_dev *accel_dev); void (*disable_arb)(struct adf_accel_dev *accel_dev); void (*update_ras_errors)(struct adf_accel_dev *accel_dev, int error); bool (*ras_interrupts)(struct adf_accel_dev *accel_dev, bool *reset_required); int (*init_admin_comms)(struct adf_accel_dev *accel_dev); void (*exit_admin_comms)(struct adf_accel_dev *accel_dev); int (*send_admin_init)(struct adf_accel_dev *accel_dev); void (*set_asym_rings_mask)(struct adf_accel_dev *accel_dev); int (*get_ring_to_svc_map)(struct adf_accel_dev *accel_dev, u16 *ring_to_svc_map); uint32_t (*get_accel_cap)(struct adf_accel_dev *accel_dev); int (*init_arb)(struct adf_accel_dev *accel_dev); void (*exit_arb)(struct adf_accel_dev *accel_dev); void (*get_arb_mapping)(struct adf_accel_dev *accel_dev, const uint32_t **cfg); int (*init_device)(struct adf_accel_dev *accel_dev); int (*get_heartbeat_status)(struct adf_accel_dev *accel_dev); int (*int_timer_init)(struct adf_accel_dev *accel_dev); void (*int_timer_exit)(struct adf_accel_dev *accel_dev); uint32_t (*get_ae_clock)(struct adf_hw_device_data *self); uint32_t (*get_hb_clock)(struct adf_hw_device_data *self); void (*disable_iov)(struct adf_accel_dev *accel_dev); void (*configure_iov_threads)(struct adf_accel_dev *accel_dev, bool enable); void (*enable_ints)(struct adf_accel_dev *accel_dev); bool (*check_slice_hang)(struct adf_accel_dev *accel_dev); int (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev); void (*enable_pf2vf_interrupt)(struct adf_accel_dev *accel_dev); void (*disable_pf2vf_interrupt)(struct adf_accel_dev *accel_dev); int (*interrupt_active_pf2vf)(struct adf_accel_dev *accel_dev); int (*get_int_active_bundles)(struct adf_accel_dev *accel_dev); void (*reset_device)(struct adf_accel_dev *accel_dev); void (*reset_hw_units)(struct adf_accel_dev *accel_dev); int (*measure_clock)(struct adf_accel_dev *accel_dev); void (*restore_device)(struct adf_accel_dev *accel_dev); uint32_t (*get_obj_cfg_ae_mask)(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services services); enum adf_accel_unit_services ( *get_service_type)(struct adf_accel_dev *accel_dev, s32 obj_num); int (*add_pke_stats)(struct adf_accel_dev *accel_dev); void (*remove_pke_stats)(struct adf_accel_dev *accel_dev); int (*add_misc_error)(struct adf_accel_dev *accel_dev); int (*count_ras_event)(struct adf_accel_dev *accel_dev, u32 *ras_event, char *aeidstr); void (*remove_misc_error)(struct adf_accel_dev *accel_dev); int (*configure_accel_units)(struct adf_accel_dev *accel_dev); int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_number); void (*config_ring_irq)(struct adf_accel_dev *accel_dev, u32 bank_number, u16 ring_mask); uint32_t (*get_objs_num)(struct adf_accel_dev *accel_dev); const char *(*get_obj_name)(struct adf_accel_dev *accel_dev, enum adf_accel_unit_services services); void (*pre_reset)(struct adf_accel_dev *accel_dev); void (*post_reset)(struct adf_accel_dev *accel_dev); void (*set_msix_rttable)(struct adf_accel_dev *accel_dev); void (*get_ring_svc_map_data)(int ring_pair_index, u16 ring_to_svc_map, u8 *serv_type, int *ring_index, int *num_rings_per_srv, int bundle_num); struct adf_hw_csr_info csr_info; const char *fw_name; const char *fw_mmp_name; bool reset_ack; uint32_t fuses; uint32_t accel_capabilities_mask; uint32_t instance_id; uint16_t accel_mask; u32 aerucm_mask; u32 ae_mask; u32 admin_ae_mask; u32 service_mask; u32 service_to_load_mask; u32 heartbeat_ctr_num; uint16_t tx_rings_mask; uint8_t tx_rx_gap; uint8_t num_banks; u8 num_rings_per_bank; uint8_t num_accel; uint8_t num_logical_accel; uint8_t num_engines; int (*get_storage_enabled)(struct adf_accel_dev *accel_dev, uint32_t *storage_enabled); u8 query_storage_cap; u32 clock_frequency; u8 storage_enable; u32 extended_dc_capabilities; int (*config_device)(struct adf_accel_dev *accel_dev); u32 asym_ae_active_thd_mask; u16 asym_rings_mask; int (*get_fw_image_type)(struct adf_accel_dev *accel_dev, enum adf_cfg_fw_image_type *fw_image_type); u16 ring_to_svc_map; } __packed; /* helper enum for performing CSR operations */ enum operation { AND, OR, }; /* 32-bit CSR write macro */ #define ADF_CSR_WR(csr_base, csr_offset, val) \ bus_write_4(csr_base, csr_offset, val) /* 64-bit CSR write macro */ #ifdef __x86_64__ #define ADF_CSR_WR64(csr_base, csr_offset, val) \ bus_write_8(csr_base, csr_offset, val) #else static __inline void adf_csr_wr64(struct resource *csr_base, bus_size_t offset, uint64_t value) { bus_write_4(csr_base, offset, (uint32_t)value); bus_write_4(csr_base, offset + 4, (uint32_t)(value >> 32)); } #define ADF_CSR_WR64(csr_base, csr_offset, val) \ adf_csr_wr64(csr_base, csr_offset, val) #endif /* 32-bit CSR read macro */ #define ADF_CSR_RD(csr_base, csr_offset) bus_read_4(csr_base, csr_offset) /* 64-bit CSR read macro */ #ifdef __x86_64__ #define ADF_CSR_RD64(csr_base, csr_offset) bus_read_8(csr_base, csr_offset) #else static __inline uint64_t adf_csr_rd64(struct resource *csr_base, bus_size_t offset) { return (((uint64_t)bus_read_4(csr_base, offset)) | (((uint64_t)bus_read_4(csr_base, offset + 4)) << 32)); } #define ADF_CSR_RD64(csr_base, csr_offset) adf_csr_rd64(csr_base, csr_offset) #endif #define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev) #define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars) #define GET_HW_DATA(accel_dev) (accel_dev->hw_device) #define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks) #define GET_DEV_SKU(accel_dev) (accel_dev->accel_pci_dev.sku) #define GET_NUM_RINGS_PER_BANK(accel_dev) \ (GET_HW_DATA(accel_dev)->num_rings_per_bank) #define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines) #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev #define GET_SRV_TYPE(ena_srv_mask, srv) \ (((ena_srv_mask) >> (ADF_SRV_TYPE_BIT_LEN * (srv))) & ADF_SRV_TYPE_MASK) #define SET_ASYM_MASK(asym_mask, srv) \ ({ \ typeof(srv) srv_ = (srv); \ (asym_mask) |= ((1 << (srv_)*ADF_RINGS_PER_SRV_TYPE) | \ (1 << ((srv_)*ADF_RINGS_PER_SRV_TYPE + 1))); \ }) #define GET_NUM_RINGS_PER_BANK(accel_dev) \ (GET_HW_DATA(accel_dev)->num_rings_per_bank) #define GET_MAX_PROCESSES(accel_dev) \ ({ \ typeof(accel_dev) dev = (accel_dev); \ (GET_MAX_BANKS(dev) * (GET_NUM_RINGS_PER_BANK(dev) / 2)); \ }) #define GET_DU_TABLE(accel_dev) (accel_dev->du_table) static inline void adf_csr_fetch_and_and(struct resource *csr, size_t offs, unsigned long mask) { unsigned int val = ADF_CSR_RD(csr, offs); val &= mask; ADF_CSR_WR(csr, offs, val); } static inline void adf_csr_fetch_and_or(struct resource *csr, size_t offs, unsigned long mask) { unsigned int val = ADF_CSR_RD(csr, offs); val |= mask; ADF_CSR_WR(csr, offs, val); } static inline void adf_csr_fetch_and_update(enum operation op, struct resource *csr, size_t offs, unsigned long mask) { switch (op) { case AND: adf_csr_fetch_and_and(csr, offs, mask); break; case OR: adf_csr_fetch_and_or(csr, offs, mask); break; } } struct pfvf_stats { struct dentry *stats_file; /* Messages put in CSR */ unsigned int tx; /* Messages read from CSR */ unsigned int rx; /* Interrupt fired but int bit was clear */ unsigned int spurious; /* Block messages sent */ unsigned int blk_tx; /* Block messages received */ unsigned int blk_rx; /* Blocks received with CRC errors */ unsigned int crc_err; /* CSR in use by other side */ unsigned int busy; /* Receiver did not acknowledge */ unsigned int no_ack; /* Collision detected */ unsigned int collision; /* Couldn't send a response */ unsigned int tx_timeout; /* Didn't receive a response */ unsigned int rx_timeout; /* Responses received */ unsigned int rx_rsp; /* Messages re-transmitted */ unsigned int retry; /* Event put timeout */ unsigned int event_timeout; }; #define NUM_PFVF_COUNTERS 14 void adf_get_admin_info(struct admin_info *admin_csrs_info); struct adf_admin_comms { bus_addr_t phy_addr; bus_addr_t const_tbl_addr; bus_addr_t aram_map_phys_addr; bus_addr_t phy_hb_addr; bus_dmamap_t aram_map; bus_dmamap_t const_tbl_map; bus_dmamap_t hb_map; char *virt_addr; char *virt_hb_addr; struct resource *mailbox_addr; struct sx lock; struct bus_dmamem dma_mem; struct bus_dmamem dma_hb; }; struct icp_qat_fw_loader_handle; struct adf_fw_loader_data { struct icp_qat_fw_loader_handle *fw_loader; const struct firmware *uof_fw; const struct firmware *mmp_fw; }; struct adf_accel_vf_info { struct adf_accel_dev *accel_dev; struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */ u32 vf_nr; bool init; u8 compat_ver; struct pfvf_stats pfvf_counters; }; struct adf_fw_versions { u8 fw_version_major; u8 fw_version_minor; u8 fw_version_patch; u8 mmp_version_major; u8 mmp_version_minor; u8 mmp_version_patch; }; struct adf_int_timer { struct adf_accel_dev *accel_dev; struct workqueue_struct *timer_irq_wq; struct timer_list timer; u32 timeout_val; u32 int_cnt; bool enabled; }; #define ADF_COMPAT_CHECKER_MAX 8 typedef int (*adf_iov_compat_checker_t)(struct adf_accel_dev *accel_dev, u8 vf_compat_ver); struct adf_accel_compat_manager { u8 num_chker; adf_iov_compat_checker_t iov_compat_checkers[ADF_COMPAT_CHECKER_MAX]; }; struct adf_heartbeat; struct adf_accel_dev { struct adf_hw_aram_info *aram_info; struct adf_accel_unit_info *au_info; struct adf_etr_data *transport; struct adf_hw_device_data *hw_device; struct adf_cfg_device_data *cfg; struct adf_fw_loader_data *fw_loader; struct adf_admin_comms *admin; struct adf_uio_control_accel *accel; struct adf_heartbeat *heartbeat; struct adf_int_timer *int_timer; struct adf_fw_versions fw_versions; unsigned int autoreset_on_error; struct adf_fw_counters_data *fw_counters_data; struct sysctl_oid *debugfs_ae_config; struct list_head crypto_list; atomic_t *ras_counters; unsigned long status; atomic_t ref_count; bus_dma_tag_t dma_tag; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *ras_correctable; struct sysctl_oid *ras_uncorrectable; struct sysctl_oid *ras_fatal; struct sysctl_oid *ras_reset; struct sysctl_oid *pke_replay_dbgfile; struct sysctl_oid *misc_error_dbgfile; + struct sysctl_oid *fw_version_oid; + struct sysctl_oid *mmp_version_oid; + struct sysctl_oid *hw_version_oid; + struct sysctl_oid *cnv_error_oid; struct list_head list; struct adf_accel_pci accel_pci_dev; struct adf_accel_compat_manager *cm; u8 compat_ver; #ifdef QAT_DISABLE_SAFE_DC_MODE struct sysctl_oid *safe_dc_mode; u8 disable_safe_dc_mode; #endif /* QAT_DISABLE_SAFE_DC_MODE */ union { struct { /* vf_info is non-zero when SR-IOV is init'ed */ struct adf_accel_vf_info *vf_info; int num_vfs; } pf; struct { bool irq_enabled; struct resource *irq; void *cookie; struct task pf2vf_bh_tasklet; struct mutex vf2pf_lock; /* protect CSR access */ struct completion msg_received; struct pfvf_message response; /* temp field holding pf2vf response */ enum ring_reset_result rpreset_sts; struct mutex rpreset_lock; /* protect rpreset_sts */ struct pfvf_stats pfvf_counters; u8 pf_compat_ver; } vf; } u1; bool is_vf; u32 accel_id; void *lac_dev; }; #endif diff --git a/sys/dev/qat/qat_common/adf_cfg.c b/sys/dev/qat/qat_common/adf_cfg.c index 736ede860840..37ab44fdb1f6 100644 --- a/sys/dev/qat/qat_common/adf_cfg.c +++ b/sys/dev/qat/qat_common/adf_cfg.c @@ -1,622 +1,592 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "adf_accel_devices.h" #include "adf_cfg.h" #include "adf_common_drv.h" -#include "adf_cfg_dev_dbg.h" #include "adf_cfg_device.h" #include "adf_cfg_sysctl.h" -#include "adf_heartbeat_dbg.h" -#include "adf_ver_dbg.h" -#include "adf_fw_counters.h" -#include "adf_cnvnr_freq_counters.h" /** * adf_cfg_dev_add() - Create an acceleration device configuration table. * @accel_dev: Pointer to acceleration device. * * Function creates a configuration table for the given acceleration device. * The table stores device specific config values. * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. */ int adf_cfg_dev_add(struct adf_accel_dev *accel_dev) { struct adf_cfg_device_data *dev_cfg_data; dev_cfg_data = malloc(sizeof(*dev_cfg_data), M_QAT, M_WAITOK | M_ZERO); INIT_LIST_HEAD(&dev_cfg_data->sec_list); sx_init(&dev_cfg_data->lock, "qat cfg data"); accel_dev->cfg = dev_cfg_data; /* Default device configuration initialization */ if (!accel_dev->is_vf) { if (IS_QAT_GEN4(pci_get_device(GET_DEV(accel_dev)))) { dev_cfg_data->num_user_processes = ADF_CFG_STATIC_CONF_USER_PROCESSES_NUM; strncpy(dev_cfg_data->cfg_mode, ADF_CFG_KERNEL_USER, ADF_CFG_MAX_VAL); if (accel_dev->accel_id % 2 == 0) { strncpy(dev_cfg_data->cfg_services, ADF_CFG_SYM_ASYM, ADF_CFG_MAX_VAL); } else { strncpy(dev_cfg_data->cfg_services, ADF_CFG_DC, ADF_CFG_MAX_VAL); } } else { strncpy(dev_cfg_data->cfg_mode, ADF_CFG_KERNEL, ADF_CFG_MAX_VAL); dev_cfg_data->num_user_processes = 0; strncpy(dev_cfg_data->cfg_services, ADF_CFG_SYM_DC, ADF_CFG_MAX_VAL); } } else { dev_cfg_data->num_user_processes = ADF_CFG_STATIC_CONF_USER_PROCESSES_NUM; strncpy(dev_cfg_data->cfg_mode, ADF_CFG_KERNEL, ADF_CFG_MAX_VAL); strncpy(dev_cfg_data->cfg_services, "sym;asym", ADF_CFG_MAX_VAL); } - if (adf_cfg_sysctl_add(accel_dev)) - goto err; - - if (adf_cfg_dev_dbg_add(accel_dev)) - goto err; - - if (!accel_dev->is_vf) { - if (adf_heartbeat_dbg_add(accel_dev)) - goto err; - - if (adf_ver_dbg_add(accel_dev)) - goto err; - - if (adf_fw_counters_add(accel_dev)) - goto err; - - if (adf_cnvnr_freq_counters_add(accel_dev)) - goto err; + if (adf_cfg_sysctl_add(accel_dev)) { + free(dev_cfg_data, M_QAT); + accel_dev->cfg = NULL; + return EFAULT; } - return 0; -err: - free(dev_cfg_data, M_QAT); - accel_dev->cfg = NULL; - return EFAULT; + return 0; } static void adf_cfg_section_del_all(struct list_head *head); void adf_cfg_del_all(struct adf_accel_dev *accel_dev) { struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; sx_xlock(&dev_cfg_data->lock); adf_cfg_section_del_all(&dev_cfg_data->sec_list); sx_xunlock(&dev_cfg_data->lock); clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); } void adf_cfg_depot_del_all(struct list_head *head) { adf_cfg_section_del_all(head); } /** * adf_cfg_dev_remove() - Clears acceleration device configuration table. * @accel_dev: Pointer to acceleration device. * * Function removes configuration table from the given acceleration device * and frees all allocated memory. * To be used by QAT device specific drivers. * * Return: void */ void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev) { struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; if (!dev_cfg_data) return; sx_xlock(&dev_cfg_data->lock); adf_cfg_section_del_all(&dev_cfg_data->sec_list); sx_xunlock(&dev_cfg_data->lock); adf_cfg_sysctl_remove(accel_dev); - adf_cfg_dev_dbg_remove(accel_dev); - if (!accel_dev->is_vf) { - adf_ver_dbg_del(accel_dev); - adf_heartbeat_dbg_del(accel_dev); - adf_fw_counters_remove(accel_dev); - adf_cnvnr_freq_counters_remove(accel_dev); - } free(dev_cfg_data, M_QAT); accel_dev->cfg = NULL; } static void adf_cfg_keyval_add(struct adf_cfg_key_val *new, struct adf_cfg_section *sec) { list_add_tail(&new->list, &sec->param_head); } static void adf_cfg_keyval_remove(const char *key, struct adf_cfg_section *sec) { struct list_head *list_ptr, *tmp; struct list_head *head = &sec->param_head; list_for_each_prev_safe(list_ptr, tmp, head) { struct adf_cfg_key_val *ptr = list_entry(list_ptr, struct adf_cfg_key_val, list); if (strncmp(ptr->key, key, sizeof(ptr->key)) != 0) continue; list_del(list_ptr); free(ptr, M_QAT); break; } } static int adf_cfg_section_restore_all(struct adf_accel_dev *accel_dev, struct adf_cfg_depot_list *cfg_depot_list) { struct adf_cfg_section *ptr_sec, *iter_sec; struct adf_cfg_key_val *ptr_key; struct list_head *list, *tmp; struct list_head *restore_list = &accel_dev->cfg->sec_list; struct list_head *head = &cfg_depot_list[accel_dev->accel_id].sec_list; INIT_LIST_HEAD(restore_list); list_for_each_prev_safe(list, tmp, head) { ptr_sec = list_entry(list, struct adf_cfg_section, list); iter_sec = malloc(sizeof(*iter_sec), M_QAT, M_WAITOK | M_ZERO); strlcpy(iter_sec->name, ptr_sec->name, sizeof(iter_sec->name)); INIT_LIST_HEAD(&iter_sec->param_head); /* now we restore all the parameters */ list_for_each_entry(ptr_key, &ptr_sec->param_head, list) { struct adf_cfg_key_val *key_val; key_val = malloc(sizeof(*key_val), M_QAT, M_WAITOK | M_ZERO); memcpy(key_val, ptr_key, sizeof(*key_val)); list_add_tail(&key_val->list, &iter_sec->param_head); } list_add_tail(&iter_sec->list, restore_list); } adf_cfg_section_del_all(head); return 0; } int adf_cfg_depot_restore_all(struct adf_accel_dev *accel_dev, struct adf_cfg_depot_list *cfg_depot_list) { struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; int ret = 0; sx_xlock(&dev_cfg_data->lock); ret = adf_cfg_section_restore_all(accel_dev, cfg_depot_list); sx_xunlock(&dev_cfg_data->lock); return ret; } /** * adf_cfg_section_del() - Delete config section entry to config table. * @accel_dev: Pointer to acceleration device. * @name: Name of the section * * Function deletes configuration section where key - value entries * will be stored. * To be used by QAT device specific drivers. */ static void adf_cfg_section_del(struct adf_accel_dev *accel_dev, const char *name) { struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, name); if (!sec) return; adf_cfg_keyval_del_all(&sec->param_head); list_del(&sec->list); free(sec, M_QAT); } void adf_cfg_keyval_del_all(struct list_head *head) { struct list_head *list_ptr, *tmp; list_for_each_prev_safe(list_ptr, tmp, head) { struct adf_cfg_key_val *ptr = list_entry(list_ptr, struct adf_cfg_key_val, list); list_del(list_ptr); free(ptr, M_QAT); } } static void adf_cfg_section_del_all(struct list_head *head) { struct adf_cfg_section *ptr; struct list_head *list, *tmp; list_for_each_prev_safe(list, tmp, head) { ptr = list_entry(list, struct adf_cfg_section, list); adf_cfg_keyval_del_all(&ptr->param_head); list_del(list); free(ptr, M_QAT); } } static struct adf_cfg_key_val * adf_cfg_key_value_find(struct adf_cfg_section *s, const char *key) { struct list_head *list; list_for_each(list, &s->param_head) { struct adf_cfg_key_val *ptr = list_entry(list, struct adf_cfg_key_val, list); if (!strncmp(ptr->key, key, sizeof(ptr->key))) return ptr; } return NULL; } struct adf_cfg_section * adf_cfg_sec_find(struct adf_accel_dev *accel_dev, const char *sec_name) { struct adf_cfg_device_data *cfg = accel_dev->cfg; struct list_head *list; list_for_each(list, &cfg->sec_list) { struct adf_cfg_section *ptr = list_entry(list, struct adf_cfg_section, list); if (!strncmp(ptr->name, sec_name, sizeof(ptr->name))) return ptr; } return NULL; } static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev, const char *sec_name, const char *key_name, char *val) { struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, sec_name); struct adf_cfg_key_val *keyval = NULL; if (sec) keyval = adf_cfg_key_value_find(sec, key_name); if (keyval) { memcpy(val, keyval->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES); return 0; } return -1; } /** * adf_cfg_add_key_value_param() - Add key-value config entry to config table. * @accel_dev: Pointer to acceleration device. * @section_name: Name of the section where the param will be added * @key: The key string * @val: Value pain for the given @key * @type: Type - string, int or address * * Function adds configuration key - value entry in the appropriate section * in the given acceleration device * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. */ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, const char *section_name, const char *key, const void *val, enum adf_cfg_val_type type) { char temp_val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; struct adf_cfg_device_data *cfg = accel_dev->cfg; struct adf_cfg_key_val *key_val; struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev, section_name); if (!section) return EFAULT; key_val = malloc(sizeof(*key_val), M_QAT, M_WAITOK | M_ZERO); INIT_LIST_HEAD(&key_val->list); strlcpy(key_val->key, key, sizeof(key_val->key)); if (type == ADF_DEC) { snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%ld", (*((const long *)val))); } else if (type == ADF_STR) { strlcpy(key_val->val, (const char *)val, sizeof(key_val->val)); } else if (type == ADF_HEX) { snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "0x%lx", (unsigned long)val); } else { device_printf(GET_DEV(accel_dev), "Unknown type given.\n"); free(key_val, M_QAT); return -1; } key_val->type = type; /* Add the key-value pair as below policy: * 1. If the key doesn't exist, add it, * 2. If the key already exists with a different value * then delete it, * 3. If the key exists with the same value, then return * without doing anything. */ if (adf_cfg_key_val_get(accel_dev, section_name, key, temp_val) == 0) { if (strncmp(temp_val, key_val->val, sizeof(temp_val)) != 0) { adf_cfg_keyval_remove(key, section); } else { free(key_val, M_QAT); return 0; } } sx_xlock(&cfg->lock); adf_cfg_keyval_add(key_val, section); sx_xunlock(&cfg->lock); return 0; } int adf_cfg_save_section(struct adf_accel_dev *accel_dev, const char *name, struct adf_cfg_section *section) { struct adf_cfg_key_val *ptr; struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, name); if (!sec) { device_printf(GET_DEV(accel_dev), "Couldn't find section %s\n", name); return EFAULT; } strlcpy(section->name, name, sizeof(section->name)); INIT_LIST_HEAD(§ion->param_head); /* now we save all the parameters */ list_for_each_entry(ptr, &sec->param_head, list) { struct adf_cfg_key_val *key_val; key_val = malloc(sizeof(*key_val), M_QAT, M_WAITOK | M_ZERO); memcpy(key_val, ptr, sizeof(*key_val)); list_add_tail(&key_val->list, §ion->param_head); } return 0; } static int adf_cfg_section_save_all(struct adf_accel_dev *accel_dev, struct adf_cfg_depot_list *cfg_depot_list) { struct adf_cfg_section *ptr_sec, *iter_sec; struct list_head *list, *tmp, *save_list; struct list_head *head = &accel_dev->cfg->sec_list; save_list = &cfg_depot_list[accel_dev->accel_id].sec_list; list_for_each_prev_safe(list, tmp, head) { ptr_sec = list_entry(list, struct adf_cfg_section, list); iter_sec = malloc(sizeof(*iter_sec), M_QAT, M_WAITOK | M_ZERO); adf_cfg_save_section(accel_dev, ptr_sec->name, iter_sec); list_add_tail(&iter_sec->list, save_list); } return 0; } int adf_cfg_depot_save_all(struct adf_accel_dev *accel_dev, struct adf_cfg_depot_list *cfg_depot_list) { struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; int ret = 0; sx_xlock(&dev_cfg_data->lock); ret = adf_cfg_section_save_all(accel_dev, cfg_depot_list); sx_xunlock(&dev_cfg_data->lock); return ret; } /** * adf_cfg_remove_key_param() - remove config entry in config table. * @accel_dev: Pointer to acceleration device. * @section_name: Name of the section where the param will be added * @key: The key string * * Function remove configuration key * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. */ int adf_cfg_remove_key_param(struct adf_accel_dev *accel_dev, const char *section_name, const char *key) { struct adf_cfg_device_data *cfg = accel_dev->cfg; struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev, section_name); if (!section) return EFAULT; sx_xlock(&cfg->lock); adf_cfg_keyval_remove(key, section); sx_xunlock(&cfg->lock); return 0; } /** * adf_cfg_section_add() - Add config section entry to config table. * @accel_dev: Pointer to acceleration device. * @name: Name of the section * * Function adds configuration section where key - value entries * will be stored. * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. */ int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name) { struct adf_cfg_device_data *cfg = accel_dev->cfg; struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, name); if (sec) return 0; sec = malloc(sizeof(*sec), M_QAT, M_WAITOK | M_ZERO); strlcpy(sec->name, name, sizeof(sec->name)); INIT_LIST_HEAD(&sec->param_head); sx_xlock(&cfg->lock); list_add_tail(&sec->list, &cfg->sec_list); sx_xunlock(&cfg->lock); return 0; } /* need to differentiate derived section with the original section */ int adf_cfg_derived_section_add(struct adf_accel_dev *accel_dev, const char *name) { struct adf_cfg_device_data *cfg = accel_dev->cfg; struct adf_cfg_section *sec = NULL; if (adf_cfg_section_add(accel_dev, name)) return EFAULT; sec = adf_cfg_sec_find(accel_dev, name); if (!sec) return EFAULT; sx_xlock(&cfg->lock); sec->is_derived = true; sx_xunlock(&cfg->lock); return 0; } static int adf_cfg_restore_key_value_param(struct adf_accel_dev *accel_dev, const char *section_name, const char *key, const char *val, enum adf_cfg_val_type type) { struct adf_cfg_device_data *cfg = accel_dev->cfg; struct adf_cfg_key_val *key_val; struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev, section_name); if (!section) return EFAULT; key_val = malloc(sizeof(*key_val), M_QAT, M_WAITOK | M_ZERO); INIT_LIST_HEAD(&key_val->list); strlcpy(key_val->key, key, sizeof(key_val->key)); strlcpy(key_val->val, val, sizeof(key_val->val)); key_val->type = type; sx_xlock(&cfg->lock); adf_cfg_keyval_add(key_val, section); sx_xunlock(&cfg->lock); return 0; } int adf_cfg_restore_section(struct adf_accel_dev *accel_dev, struct adf_cfg_section *section) { struct adf_cfg_key_val *ptr; int ret = 0; ret = adf_cfg_section_add(accel_dev, section->name); if (ret) goto err; list_for_each_entry(ptr, §ion->param_head, list) { ret = adf_cfg_restore_key_value_param( accel_dev, section->name, ptr->key, ptr->val, ptr->type); if (ret) goto err_remove_sec; } return 0; err_remove_sec: adf_cfg_section_del(accel_dev, section->name); err: device_printf(GET_DEV(accel_dev), "Failed to restore section %s\n", section->name); return ret; } int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev, const char *section, const char *name, char *value) { struct adf_cfg_device_data *cfg = accel_dev->cfg; int ret; sx_slock(&cfg->lock); ret = adf_cfg_key_val_get(accel_dev, section, name, value); sx_sunlock(&cfg->lock); return ret; } diff --git a/sys/dev/qat/qat_common/adf_freebsd_cnvnr_ctrs_dbg.c b/sys/dev/qat/qat_common/adf_freebsd_cnvnr_ctrs_dbg.c index 960c71472bc8..ead172635e59 100644 --- a/sys/dev/qat/qat_common/adf_freebsd_cnvnr_ctrs_dbg.c +++ b/sys/dev/qat/qat_common/adf_freebsd_cnvnr_ctrs_dbg.c @@ -1,178 +1,195 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include #include #include #include "adf_cnvnr_freq_counters.h" #include "adf_common_drv.h" #include "adf_cfg.h" #include "icp_qat_fw_init_admin.h" #define ADF_CNVNR_ERR_MASK 0xFFF #define LINE \ "+-----------------------------------------------------------------+\n" #define BANNER \ "| CNV Error Freq Statistics for Qat Device |\n" #define NEW_LINE "\n" #define REPORT_ENTRY_FORMAT \ "|[AE %2d]: TotalErrors: %5d : LastError: %s [%5d] |\n" #define MAX_LINE_LENGTH 128 #define MAX_REPORT_SIZE ((ADF_MAX_ACCELENGINES + 3) * MAX_LINE_LENGTH) #define PRINT_LINE(line) \ (snprintf( \ report_ptr, MAX_REPORT_SIZE - (report_ptr - report), "%s", line)) const char *cnvnr_err_str[] = {"No Error ", "Checksum Error", "Length Error-P", "Decomp Error ", "Xlat Error ", "Length Error-C", "Unknown Error "}; /* Handler for HB status check */ static int qat_cnvnr_ctrs_dbg_read(SYSCTL_HANDLER_ARGS) { struct adf_accel_dev *accel_dev = arg1; struct adf_hw_device_data *hw_device; struct icp_qat_fw_init_admin_req request; struct icp_qat_fw_init_admin_resp response; unsigned long dc_ae_msk = 0; u8 num_aes = 0, ae = 0, error_type = 0, bytes_written = 0; s16 latest_error = 0; char report[MAX_REPORT_SIZE]; char *report_ptr = report; /* Defensive check */ if (!accel_dev || accel_dev->accel_id > ADF_MAX_DEVICES) return EINVAL; if (!adf_dev_started(accel_dev)) { device_printf(GET_DEV(accel_dev), "QAT Device not started\n"); return EINVAL; } hw_device = accel_dev->hw_device; if (!hw_device) { device_printf(GET_DEV(accel_dev), "Failed to get hw_device.\n"); return EFAULT; } /* Clean report memory */ explicit_bzero(report, sizeof(report)); /* Adding banner to report */ bytes_written = PRINT_LINE(NEW_LINE); if (bytes_written <= 0) return EINVAL; report_ptr += bytes_written; bytes_written = PRINT_LINE(LINE); if (bytes_written <= 0) return EINVAL; report_ptr += bytes_written; bytes_written = PRINT_LINE(BANNER); if (bytes_written <= 0) return EINVAL; report_ptr += bytes_written; bytes_written = PRINT_LINE(LINE); if (bytes_written <= 0) return EINVAL; report_ptr += bytes_written; if (accel_dev->au_info) dc_ae_msk = accel_dev->au_info->dc_ae_msk; /* Extracting number of Acceleration Engines */ num_aes = hw_device->get_num_aes(hw_device); for (ae = 0; ae < num_aes; ae++) { if (accel_dev->au_info && !test_bit(ae, &dc_ae_msk)) continue; explicit_bzero(&response, sizeof(struct icp_qat_fw_init_admin_resp)); request.cmd_id = ICP_QAT_FW_CNV_STATS_GET; if (adf_put_admin_msg_sync( accel_dev, ae, &request, &response) || response.status) { return EFAULT; } error_type = CNV_ERROR_TYPE_GET(response.latest_error); if (error_type == CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH_ERROR || error_type == CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH_ERROR) { latest_error = CNV_ERROR_LENGTH_DELTA_GET(response.latest_error); } else if (error_type == CNV_ERR_TYPE_DECOMPRESSION_ERROR || error_type == CNV_ERR_TYPE_TRANSLATION_ERROR) { latest_error = CNV_ERROR_DECOMP_STATUS_GET(response.latest_error); } else { latest_error = response.latest_error & ADF_CNVNR_ERR_MASK; } bytes_written = snprintf(report_ptr, MAX_REPORT_SIZE - (report_ptr - report), REPORT_ENTRY_FORMAT, ae, response.error_count, cnvnr_err_str[error_type], latest_error); if (bytes_written <= 0) { - printf("ERROR: No space left in CnV ctrs line buffer\n" - "\tAcceleration ID: %d, Engine: %d\n", - accel_dev->accel_id, - ae); + device_printf( + GET_DEV(accel_dev), + "ERROR: No space left in CnV ctrs line buffer\n" + "\tAcceleration ID: %d, Engine: %d\n", + accel_dev->accel_id, + ae); break; } report_ptr += bytes_written; } sysctl_handle_string(oidp, report, sizeof(report), req); return 0; } int adf_cnvnr_freq_counters_add(struct adf_accel_dev *accel_dev) { struct sysctl_ctx_list *qat_sysctl_ctx; struct sysctl_oid *qat_cnvnr_ctrs_sysctl_tree; - struct sysctl_oid *oid_rc; /* Defensive checks */ if (!accel_dev) return EINVAL; /* Creating context and tree */ qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); qat_cnvnr_ctrs_sysctl_tree = device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev); /* Create "cnv_error" string type leaf - with callback */ - oid_rc = SYSCTL_ADD_PROC(qat_sysctl_ctx, - SYSCTL_CHILDREN(qat_cnvnr_ctrs_sysctl_tree), - OID_AUTO, - "cnv_error", - CTLTYPE_STRING | CTLFLAG_RD, - accel_dev, - 0, - qat_cnvnr_ctrs_dbg_read, - "IU", - "QAT CnVnR status"); - - if (!oid_rc) { - printf("ERROR: Memory allocation failed\n"); + accel_dev->cnv_error_oid = + SYSCTL_ADD_PROC(qat_sysctl_ctx, + SYSCTL_CHILDREN(qat_cnvnr_ctrs_sysctl_tree), + OID_AUTO, + "cnv_error", + CTLTYPE_STRING | CTLFLAG_RD, + accel_dev, + 0, + qat_cnvnr_ctrs_dbg_read, + "IU", + "QAT CnVnR status"); + + if (!accel_dev->cnv_error_oid) { + device_printf( + GET_DEV(accel_dev), + "Failed to create qat cnvnr freq counters sysctl entry.\n"); return ENOMEM; } return 0; } void adf_cnvnr_freq_counters_remove(struct adf_accel_dev *accel_dev) { + struct sysctl_ctx_list *qat_sysctl_ctx; + + if (!accel_dev) + return; + + qat_sysctl_ctx = + device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); + + if (accel_dev->cnv_error_oid) { + sysctl_ctx_entry_del(qat_sysctl_ctx, accel_dev->cnv_error_oid); + sysctl_remove_oid(accel_dev->cnv_error_oid, 1, 1); + accel_dev->cnv_error_oid = NULL; + } } diff --git a/sys/dev/qat/qat_common/adf_freebsd_dbgfs.c b/sys/dev/qat/qat_common/adf_freebsd_dbgfs.c new file mode 100644 index 000000000000..21ffb0adf559 --- /dev/null +++ b/sys/dev/qat/qat_common/adf_freebsd_dbgfs.c @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright(c) 2007-2025 Intel Corporation */ + +#include "adf_accel_devices.h" +#include "adf_cfg_dev_dbg.h" +#include "adf_cnvnr_freq_counters.h" +#include "adf_common_drv.h" +#include "adf_dbgfs.h" +#include "adf_fw_counters.h" +#include "adf_freebsd_pfvf_ctrs_dbg.h" +#include "adf_heartbeat_dbg.h" +#include "adf_ver_dbg.h" + +/** + * adf_dbgfs_init() - add persistent debugfs entries + * @accel_dev: Pointer to acceleration device. + * + * This function creates debugfs entries that are persistent through a device + * state change (from up to down or vice versa). + */ +void +adf_dbgfs_init(struct adf_accel_dev *accel_dev) +{ + adf_cfg_dev_dbg_add(accel_dev); +} + +/** + * adf_dbgfs_exit() - remove persistent debugfs entries + * @accel_dev: Pointer to acceleration device. + */ +void +adf_dbgfs_exit(struct adf_accel_dev *accel_dev) +{ + adf_cfg_dev_dbg_remove(accel_dev); +} + +/** + * adf_dbgfs_add() - add non-persistent debugfs entries + * @accel_dev: Pointer to acceleration device. + * + * This function creates debugfs entries that are not persistent through + * a device state change (from up to down or vice versa). + */ +void +adf_dbgfs_add(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->is_vf) { + adf_heartbeat_dbg_add(accel_dev); + adf_ver_dbg_add(accel_dev); + adf_fw_counters_add(accel_dev); + adf_cnvnr_freq_counters_add(accel_dev); + } +} + +/** + * adf_dbgfs_rm() - remove non-persistent debugfs entries + * @accel_dev: Pointer to acceleration device. + */ +void +adf_dbgfs_rm(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->is_vf) { + adf_cnvnr_freq_counters_remove(accel_dev); + adf_fw_counters_remove(accel_dev); + adf_ver_dbg_del(accel_dev); + adf_heartbeat_dbg_del(accel_dev); + } +} diff --git a/sys/dev/qat/qat_common/adf_freebsd_heartbeat_dbg.c b/sys/dev/qat/qat_common/adf_freebsd_heartbeat_dbg.c index c22640045fda..8690c000760c 100644 --- a/sys/dev/qat/qat_common/adf_freebsd_heartbeat_dbg.c +++ b/sys/dev/qat/qat_common/adf_freebsd_heartbeat_dbg.c @@ -1,105 +1,137 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include #include #include #include "adf_heartbeat_dbg.h" #include "adf_common_drv.h" #include "adf_cfg.h" #include "adf_heartbeat.h" #define HB_SYSCTL_ERR(RC) \ do { \ if (RC == NULL) { \ printf( \ "Memory allocation failed in adf_heartbeat_dbg_add\n"); \ return ENOMEM; \ } \ } while (0) /* Handler for HB status check */ static int qat_dev_hb_read(SYSCTL_HANDLER_ARGS) { enum adf_device_heartbeat_status hb_status = DEV_HB_UNRESPONSIVE; struct adf_accel_dev *accel_dev = arg1; struct adf_heartbeat *hb; int ret = 0; if (accel_dev == NULL) { return EINVAL; } hb = accel_dev->heartbeat; /* if FW is loaded, proceed else set heartbeat down */ if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) { adf_heartbeat_status(accel_dev, &hb_status); } if (hb_status == DEV_HB_ALIVE) { hb->heartbeat.hb_sysctlvar = 1; } else { hb->heartbeat.hb_sysctlvar = 0; } ret = sysctl_handle_int(oidp, &hb->heartbeat.hb_sysctlvar, 0, req); return ret; } int adf_heartbeat_dbg_add(struct adf_accel_dev *accel_dev) { struct sysctl_ctx_list *qat_hb_sysctl_ctx; struct sysctl_oid *qat_hb_sysctl_tree; struct adf_heartbeat *hb; - struct sysctl_oid *rc = 0; if (accel_dev == NULL) { return EINVAL; } if (adf_heartbeat_init(accel_dev)) return EINVAL; hb = accel_dev->heartbeat; qat_hb_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); qat_hb_sysctl_tree = device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev); - rc = SYSCTL_ADD_UINT(qat_hb_sysctl_ctx, - SYSCTL_CHILDREN(qat_hb_sysctl_tree), - OID_AUTO, - "heartbeat_sent", - CTLFLAG_RD, - &hb->hb_sent_counter, - 0, - "HB sent count"); - HB_SYSCTL_ERR(rc); - - rc = SYSCTL_ADD_UINT(qat_hb_sysctl_ctx, - SYSCTL_CHILDREN(qat_hb_sysctl_tree), - OID_AUTO, - "heartbeat_failed", - CTLFLAG_RD, - &hb->hb_failed_counter, - 0, - "HB failed count"); - HB_SYSCTL_ERR(rc); - - rc = SYSCTL_ADD_PROC(qat_hb_sysctl_ctx, - SYSCTL_CHILDREN(qat_hb_sysctl_tree), - OID_AUTO, - "heartbeat", - CTLTYPE_INT | CTLFLAG_RD, - accel_dev, - 0, - qat_dev_hb_read, - "IU", - "QAT device status"); - HB_SYSCTL_ERR(rc); + hb->heartbeat_sent.oid = + SYSCTL_ADD_UINT(qat_hb_sysctl_ctx, + SYSCTL_CHILDREN(qat_hb_sysctl_tree), + OID_AUTO, + "heartbeat_sent", + CTLFLAG_RD, + &hb->hb_sent_counter, + 0, + "HB sent count"); + HB_SYSCTL_ERR(hb->heartbeat_sent.oid); + + hb->heartbeat_failed.oid = + SYSCTL_ADD_UINT(qat_hb_sysctl_ctx, + SYSCTL_CHILDREN(qat_hb_sysctl_tree), + OID_AUTO, + "heartbeat_failed", + CTLFLAG_RD, + &hb->hb_failed_counter, + 0, + "HB failed count"); + HB_SYSCTL_ERR(hb->heartbeat_failed.oid); + + hb->heartbeat.oid = SYSCTL_ADD_PROC(qat_hb_sysctl_ctx, + SYSCTL_CHILDREN(qat_hb_sysctl_tree), + OID_AUTO, + "heartbeat", + CTLTYPE_INT | CTLFLAG_RD, + accel_dev, + 0, + qat_dev_hb_read, + "IU", + "QAT device status"); + HB_SYSCTL_ERR(hb->heartbeat.oid); return 0; } int adf_heartbeat_dbg_del(struct adf_accel_dev *accel_dev) { + struct sysctl_ctx_list *qat_sysctl_ctx; + struct adf_heartbeat *hb; + + if (!accel_dev) { + return EINVAL; + } + + hb = accel_dev->heartbeat; + + qat_sysctl_ctx = + device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); + + if (hb->heartbeat.oid) { + sysctl_ctx_entry_del(qat_sysctl_ctx, hb->heartbeat.oid); + sysctl_remove_oid(hb->heartbeat.oid, 1, 1); + hb->heartbeat.oid = NULL; + } + + if (hb->heartbeat_failed.oid) { + sysctl_ctx_entry_del(qat_sysctl_ctx, hb->heartbeat_failed.oid); + sysctl_remove_oid(hb->heartbeat_failed.oid, 1, 1); + hb->heartbeat_failed.oid = NULL; + } + + if (hb->heartbeat_sent.oid) { + sysctl_ctx_entry_del(qat_sysctl_ctx, hb->heartbeat_sent.oid); + sysctl_remove_oid(hb->heartbeat_sent.oid, 1, 1); + hb->heartbeat_sent.oid = NULL; + } + adf_heartbeat_clean(accel_dev); + return 0; } diff --git a/sys/dev/qat/qat_common/adf_freebsd_ver_dbg.c b/sys/dev/qat/qat_common/adf_freebsd_ver_dbg.c index 31805d5fb91e..98cde6a742c1 100644 --- a/sys/dev/qat/qat_common/adf_freebsd_ver_dbg.c +++ b/sys/dev/qat/qat_common/adf_freebsd_ver_dbg.c @@ -1,148 +1,176 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "adf_ver_dbg.h" static int adf_sysctl_read_fw_versions(SYSCTL_HANDLER_ARGS) { struct adf_accel_dev *accel_dev = arg1; char fw_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; if (!accel_dev) return -EINVAL; if (adf_dev_started(accel_dev)) snprintf(fw_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%d.%d.%d", accel_dev->fw_versions.fw_version_major, accel_dev->fw_versions.fw_version_minor, accel_dev->fw_versions.fw_version_patch); else snprintf(fw_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ""); return SYSCTL_OUT(req, fw_version, strnlen(fw_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES)); } static int adf_sysctl_read_hw_versions(SYSCTL_HANDLER_ARGS) { struct adf_accel_dev *accel_dev = arg1; char hw_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; if (!accel_dev) return -EINVAL; if (adf_dev_started(accel_dev)) snprintf(hw_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%d", accel_dev->accel_pci_dev.revid); else snprintf(hw_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ""); return SYSCTL_OUT(req, hw_version, strnlen(hw_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES)); } static int adf_sysctl_read_mmp_versions(SYSCTL_HANDLER_ARGS) { struct adf_accel_dev *accel_dev = arg1; char mmp_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; if (!accel_dev) return -EINVAL; if (adf_dev_started(accel_dev)) snprintf(mmp_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%d.%d.%d", accel_dev->fw_versions.mmp_version_major, accel_dev->fw_versions.mmp_version_minor, accel_dev->fw_versions.mmp_version_patch); if (adf_dev_started(accel_dev)) snprintf(mmp_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%d.%d.%d", accel_dev->fw_versions.mmp_version_major, accel_dev->fw_versions.mmp_version_minor, accel_dev->fw_versions.mmp_version_patch); else snprintf(mmp_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ""); return SYSCTL_OUT(req, mmp_version, strnlen(mmp_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES)); } int adf_ver_dbg_add(struct adf_accel_dev *accel_dev) { struct sysctl_ctx_list *qat_sysctl_ctx; struct sysctl_oid *qat_sysctl_tree; - struct sysctl_oid *rc = 0; if (!accel_dev) return -EINVAL; qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); qat_sysctl_tree = device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev); - rc = SYSCTL_ADD_OID(qat_sysctl_ctx, - SYSCTL_CHILDREN(qat_sysctl_tree), - OID_AUTO, - "fw_version", - CTLTYPE_STRING | CTLFLAG_RD, - accel_dev, - 0, - adf_sysctl_read_fw_versions, - "A", - "QAT FW version"); - if (!rc) + accel_dev->fw_version_oid = + SYSCTL_ADD_OID(qat_sysctl_ctx, + SYSCTL_CHILDREN(qat_sysctl_tree), + OID_AUTO, + "fw_version", + CTLTYPE_STRING | CTLFLAG_RD, + accel_dev, + 0, + adf_sysctl_read_fw_versions, + "A", + "QAT FW version"); + if (!accel_dev->fw_version_oid) goto err; - rc = SYSCTL_ADD_OID(qat_sysctl_ctx, - SYSCTL_CHILDREN(qat_sysctl_tree), - OID_AUTO, - "hw_version", - CTLTYPE_STRING | CTLFLAG_RD, - accel_dev, - 0, - adf_sysctl_read_hw_versions, - "A", - "QAT HW version"); - if (!rc) + accel_dev->hw_version_oid = + SYSCTL_ADD_OID(qat_sysctl_ctx, + SYSCTL_CHILDREN(qat_sysctl_tree), + OID_AUTO, + "hw_version", + CTLTYPE_STRING | CTLFLAG_RD, + accel_dev, + 0, + adf_sysctl_read_hw_versions, + "A", + "QAT HW version"); + if (!accel_dev->hw_version_oid) goto err; - rc = SYSCTL_ADD_OID(qat_sysctl_ctx, - SYSCTL_CHILDREN(qat_sysctl_tree), - OID_AUTO, - "mmp_version", - CTLTYPE_STRING | CTLFLAG_RD, - accel_dev, - 0, - adf_sysctl_read_mmp_versions, - "A", - "QAT MMP version"); - if (!rc) + accel_dev->mmp_version_oid = + SYSCTL_ADD_OID(qat_sysctl_ctx, + SYSCTL_CHILDREN(qat_sysctl_tree), + OID_AUTO, + "mmp_version", + CTLTYPE_STRING | CTLFLAG_RD, + accel_dev, + 0, + adf_sysctl_read_mmp_versions, + "A", + "QAT MMP version"); + if (!accel_dev->mmp_version_oid) goto err; return 0; err: device_printf(GET_DEV(accel_dev), "Failed to add firmware versions to sysctl\n"); return -EINVAL; } void adf_ver_dbg_del(struct adf_accel_dev *accel_dev) { + struct sysctl_ctx_list *qat_sysctl_ctx; + + if (!accel_dev) + return; + + qat_sysctl_ctx = + device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); + + if (accel_dev->mmp_version_oid) { + sysctl_ctx_entry_del(qat_sysctl_ctx, + accel_dev->mmp_version_oid); + sysctl_remove_oid(accel_dev->mmp_version_oid, 1, 1); + accel_dev->mmp_version_oid = NULL; + } + + if (accel_dev->hw_version_oid) { + sysctl_ctx_entry_del(qat_sysctl_ctx, accel_dev->hw_version_oid); + sysctl_remove_oid(accel_dev->hw_version_oid, 1, 1); + accel_dev->hw_version_oid = NULL; + } + + if (accel_dev->fw_version_oid) { + sysctl_ctx_entry_del(qat_sysctl_ctx, accel_dev->fw_version_oid); + sysctl_remove_oid(accel_dev->fw_version_oid, 1, 1); + accel_dev->fw_version_oid = NULL; + } } diff --git a/sys/dev/qat/qat_common/adf_fw_counters.c b/sys/dev/qat/qat_common/adf_fw_counters.c index ea674b27bd0f..1acabe4c9364 100644 --- a/sys/dev/qat/qat_common/adf_fw_counters.c +++ b/sys/dev/qat/qat_common/adf_fw_counters.c @@ -1,410 +1,422 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include #include #include #include "adf_accel_devices.h" #include "adf_fw_counters.h" #include "adf_common_drv.h" #include "icp_qat_fw_init_admin.h" #include #include #define ADF_FW_COUNTERS_BUF_SZ 4096 #define ADF_RAS_EVENT_STR "RAS events" #define ADF_FW_REQ_STR "Firmware Requests" #define ADF_FW_RESP_STR "Firmware Responses" static void adf_fw_counters_section_del_all(struct list_head *head); static void adf_fw_counters_del_all(struct adf_accel_dev *accel_dev); static int adf_fw_counters_add_key_value_param(struct adf_accel_dev *accel_dev, const char *section_name, const unsigned long sec_name_max_size, const char *key, const void *val); static int adf_fw_counters_section_add(struct adf_accel_dev *accel_dev, const char *name, const unsigned long name_max_size); int adf_get_fw_counters(struct adf_accel_dev *accel_dev); int adf_read_fw_counters(SYSCTL_HANDLER_ARGS); int adf_get_fw_counters(struct adf_accel_dev *accel_dev) { struct icp_qat_fw_init_admin_req req; struct icp_qat_fw_init_admin_resp resp; unsigned long ae_mask; int i; int ret = 0; char aeidstr[16] = { 0 }; struct adf_hw_device_data *hw_device; if (!accel_dev) { ret = EFAULT; goto fail_clean; } if (!adf_dev_started(accel_dev)) { device_printf(GET_DEV(accel_dev), "Qat Device not started\n"); ret = EFAULT; goto fail_clean; } hw_device = accel_dev->hw_device; if (!hw_device) { ret = EFAULT; goto fail_clean; } adf_fw_counters_del_all(accel_dev); explicit_bzero(&req, sizeof(struct icp_qat_fw_init_admin_req)); req.cmd_id = ICP_QAT_FW_COUNTERS_GET; ae_mask = hw_device->ae_mask; for_each_set_bit(i, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) { explicit_bzero(&resp, sizeof(struct icp_qat_fw_init_admin_resp)); if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) || resp.status) { resp.req_rec_count = ADF_FW_COUNTERS_NO_RESPONSE; resp.resp_sent_count = ADF_FW_COUNTERS_NO_RESPONSE; resp.ras_event_count = ADF_FW_COUNTERS_NO_RESPONSE; } explicit_bzero(aeidstr, sizeof(aeidstr)); snprintf(aeidstr, sizeof(aeidstr), "AE %2d", i); if (adf_fw_counters_section_add(accel_dev, aeidstr, sizeof(aeidstr))) { ret = ENOMEM; goto fail_clean; } if (adf_fw_counters_add_key_value_param( accel_dev, aeidstr, sizeof(aeidstr), ADF_FW_REQ_STR, (void *)&resp.req_rec_count)) { adf_fw_counters_del_all(accel_dev); ret = ENOMEM; goto fail_clean; } if (adf_fw_counters_add_key_value_param( accel_dev, aeidstr, sizeof(aeidstr), ADF_FW_RESP_STR, (void *)&resp.resp_sent_count)) { adf_fw_counters_del_all(accel_dev); ret = ENOMEM; goto fail_clean; } if (hw_device->count_ras_event && hw_device->count_ras_event(accel_dev, (void *)&resp.ras_event_count, aeidstr)) { adf_fw_counters_del_all(accel_dev); ret = ENOMEM; goto fail_clean; } } fail_clean: return ret; } int adf_read_fw_counters(SYSCTL_HANDLER_ARGS) { struct adf_accel_dev *accel_dev = arg1; struct adf_fw_counters_section *ptr = NULL; struct list_head *list = NULL, *list_ptr = NULL; struct list_head *tmp = NULL, *tmp_val = NULL; int ret = 0; struct sbuf *sbuf = NULL; char *cbuf = NULL; if (accel_dev == NULL) { return EINVAL; } cbuf = malloc(ADF_FW_COUNTERS_BUF_SZ, M_QAT, M_WAITOK | M_ZERO); sbuf = sbuf_new(NULL, cbuf, ADF_FW_COUNTERS_BUF_SZ, SBUF_FIXEDLEN); if (sbuf == NULL) { free(cbuf, M_QAT); return ENOMEM; } ret = adf_get_fw_counters(accel_dev); if (ret) { sbuf_delete(sbuf); free(cbuf, M_QAT); return ret; } sbuf_printf(sbuf, "\n+------------------------------------------------+\n"); sbuf_printf( sbuf, "| FW Statistics for Qat Device |\n"); sbuf_printf(sbuf, "+------------------------------------------------+\n"); list_for_each_prev_safe(list, tmp, &accel_dev->fw_counters_data->ae_sec_list) { ptr = list_entry(list, struct adf_fw_counters_section, list); sbuf_printf(sbuf, "%s\n", ptr->name); list_for_each_prev_safe(list_ptr, tmp_val, &ptr->param_head) { struct adf_fw_counters_val *count = list_entry(list_ptr, struct adf_fw_counters_val, list); sbuf_printf(sbuf, "%s:%s\n", count->key, count->val); } } sbuf_finish(sbuf); ret = SYSCTL_OUT(req, sbuf_data(sbuf), sbuf_len(sbuf)); sbuf_delete(sbuf); free(cbuf, M_QAT); return ret; } int adf_fw_count_ras_event(struct adf_accel_dev *accel_dev, u32 *ras_event, char *aeidstr) { unsigned long count = 0; if (!accel_dev || !ras_event || !aeidstr) return EINVAL; count = (*ras_event == ADF_FW_COUNTERS_NO_RESPONSE ? ADF_FW_COUNTERS_NO_RESPONSE : (unsigned long)*ras_event); return adf_fw_counters_add_key_value_param( accel_dev, aeidstr, 16, ADF_RAS_EVENT_STR, (void *)&count); } /** * adf_fw_counters_add() - Create an acceleration device FW counters table. * @accel_dev: Pointer to acceleration device. * * Function creates a FW counters statistics table for the given * acceleration device. * The table stores device specific values of FW Requests sent to the FW and * FW Responses received from the FW. * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. */ int adf_fw_counters_add(struct adf_accel_dev *accel_dev) { struct adf_fw_counters_data *fw_counters_data; struct sysctl_ctx_list *qat_sysctl_ctx; struct sysctl_oid *qat_sysctl_tree; - struct sysctl_oid *rc = 0; fw_counters_data = malloc(sizeof(*fw_counters_data), M_QAT, M_WAITOK | M_ZERO); INIT_LIST_HEAD(&fw_counters_data->ae_sec_list); init_rwsem(&fw_counters_data->lock); accel_dev->fw_counters_data = fw_counters_data; qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); qat_sysctl_tree = device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev); - rc = SYSCTL_ADD_OID(qat_sysctl_ctx, - SYSCTL_CHILDREN(qat_sysctl_tree), - OID_AUTO, - "fw_counters", - CTLTYPE_STRING | CTLFLAG_RD, - accel_dev, - 0, - adf_read_fw_counters, - "A", - "QAT FW counters"); - if (!rc) + fw_counters_data->debug = + SYSCTL_ADD_OID(qat_sysctl_ctx, + SYSCTL_CHILDREN(qat_sysctl_tree), + OID_AUTO, + "fw_counters", + CTLTYPE_STRING | CTLFLAG_RD, + accel_dev, + 0, + adf_read_fw_counters, + "A", + "QAT FW counters"); + if (!fw_counters_data->debug) { + free(fw_counters_data, M_QAT); + accel_dev->fw_counters_data = NULL; return ENOMEM; - else - return 0; + } + + return 0; } static void adf_fw_counters_del_all(struct adf_accel_dev *accel_dev) { struct adf_fw_counters_data *fw_counters_data = accel_dev->fw_counters_data; down_write(&fw_counters_data->lock); adf_fw_counters_section_del_all(&fw_counters_data->ae_sec_list); up_write(&fw_counters_data->lock); } static void adf_fw_counters_keyval_add(struct adf_fw_counters_val *new, struct adf_fw_counters_section *sec) { list_add_tail(&new->list, &sec->param_head); } static void adf_fw_counters_keyval_del_all(struct list_head *head) { struct list_head *list_ptr = NULL, *tmp = NULL; list_for_each_prev_safe(list_ptr, tmp, head) { struct adf_fw_counters_val *ptr = list_entry(list_ptr, struct adf_fw_counters_val, list); list_del(list_ptr); free(ptr, M_QAT); } } static void adf_fw_counters_section_del_all(struct list_head *head) { struct adf_fw_counters_section *ptr = NULL; struct list_head *list = NULL, *tmp = NULL; list_for_each_prev_safe(list, tmp, head) { ptr = list_entry(list, struct adf_fw_counters_section, list); adf_fw_counters_keyval_del_all(&ptr->param_head); list_del(list); free(ptr, M_QAT); } } static struct adf_fw_counters_section * adf_fw_counters_sec_find(struct adf_accel_dev *accel_dev, const char *sec_name, const unsigned long sec_name_max_size) { struct adf_fw_counters_data *fw_counters_data = accel_dev->fw_counters_data; struct list_head *list = NULL; list_for_each(list, &fw_counters_data->ae_sec_list) { struct adf_fw_counters_section *ptr = list_entry(list, struct adf_fw_counters_section, list); if (!strncmp(ptr->name, sec_name, sec_name_max_size)) return ptr; } return NULL; } static int adf_fw_counters_add_key_value_param(struct adf_accel_dev *accel_dev, const char *section_name, const unsigned long sec_name_max_size, const char *key, const void *val) { struct adf_fw_counters_data *fw_counters_data = accel_dev->fw_counters_data; struct adf_fw_counters_val *key_val; struct adf_fw_counters_section *section = adf_fw_counters_sec_find(accel_dev, section_name, sec_name_max_size); long tmp = *((const long *)val); if (!section) return EFAULT; key_val = malloc(sizeof(*key_val), M_QAT, M_WAITOK | M_ZERO); INIT_LIST_HEAD(&key_val->list); if (tmp == ADF_FW_COUNTERS_NO_RESPONSE) { snprintf(key_val->val, FW_COUNTERS_MAX_VAL_LEN_IN_BYTES, "No Response"); } else { snprintf(key_val->val, FW_COUNTERS_MAX_VAL_LEN_IN_BYTES, "%ld", tmp); } strlcpy(key_val->key, key, sizeof(key_val->key)); down_write(&fw_counters_data->lock); adf_fw_counters_keyval_add(key_val, section); up_write(&fw_counters_data->lock); return 0; } /** * adf_fw_counters_section_add() - Add AE section entry to FW counters table. * @accel_dev: Pointer to acceleration device. * @name: Name of the section * * Function adds a section for each AE where FW Requests/Responses and their * values will be stored. * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. */ static int adf_fw_counters_section_add(struct adf_accel_dev *accel_dev, const char *name, const unsigned long name_max_size) { struct adf_fw_counters_data *fw_counters_data = accel_dev->fw_counters_data; struct adf_fw_counters_section *sec = adf_fw_counters_sec_find(accel_dev, name, name_max_size); if (sec) return 0; sec = malloc(sizeof(*sec), M_QAT, M_WAITOK | M_ZERO); strlcpy(sec->name, name, sizeof(sec->name)); INIT_LIST_HEAD(&sec->param_head); down_write(&fw_counters_data->lock); list_add_tail(&sec->list, &fw_counters_data->ae_sec_list); up_write(&fw_counters_data->lock); return 0; } /** * adf_fw_counters_remove() - Clears acceleration device FW counters table. * @accel_dev: Pointer to acceleration device. * * Function removes FW counters table from the given acceleration device * and frees all allocated memory. * To be used by QAT device specific drivers. * * Return: void */ void adf_fw_counters_remove(struct adf_accel_dev *accel_dev) { + struct sysctl_ctx_list *qat_sysctl_ctx; struct adf_fw_counters_data *fw_counters_data = accel_dev->fw_counters_data; if (!fw_counters_data) return; + if (fw_counters_data->debug) { + qat_sysctl_ctx = + device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); + sysctl_ctx_entry_del(qat_sysctl_ctx, fw_counters_data->debug); + sysctl_remove_oid(fw_counters_data->debug, 1, 1); + fw_counters_data->debug = NULL; + } + down_write(&fw_counters_data->lock); adf_fw_counters_section_del_all(&fw_counters_data->ae_sec_list); up_write(&fw_counters_data->lock); free(fw_counters_data, M_QAT); accel_dev->fw_counters_data = NULL; } diff --git a/sys/dev/qat/qat_common/adf_init.c b/sys/dev/qat/qat_common/adf_init.c index f0b75db3f6ed..9f239b8f63d3 100644 --- a/sys/dev/qat/qat_common/adf_init.c +++ b/sys/dev/qat/qat_common/adf_init.c @@ -1,750 +1,755 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" +#include "adf_dbgfs.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_dev_err.h" #include "adf_uio.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include #include #include "adf_accel_devices.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "icp_qat_fw.h" /* Mask used to check the CompressAndVerify capability bit */ #define DC_CNV_EXTENDED_CAPABILITY (0x01) /* Mask used to check the CompressAndVerifyAndRecover capability bit */ #define DC_CNVNR_EXTENDED_CAPABILITY (0x100) static LIST_HEAD(service_table); static DEFINE_MUTEX(service_lock); static void adf_service_add(struct service_hndl *service) { mutex_lock(&service_lock); list_add(&service->list, &service_table); mutex_unlock(&service_lock); } int adf_service_register(struct service_hndl *service) { memset(service->init_status, 0, sizeof(service->init_status)); memset(service->start_status, 0, sizeof(service->start_status)); adf_service_add(service); return 0; } static void adf_service_remove(struct service_hndl *service) { mutex_lock(&service_lock); list_del(&service->list); mutex_unlock(&service_lock); } int adf_service_unregister(struct service_hndl *service) { int i; for (i = 0; i < ARRAY_SIZE(service->init_status); i++) { if (service->init_status[i] || service->start_status[i]) { pr_err("QAT: Could not remove active service [%d]\n", i); return EFAULT; } } adf_service_remove(service); return 0; } static int adf_cfg_add_device_params(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char hw_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; char mmp_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; struct adf_hw_device_data *hw_data = NULL; unsigned long val; if (!accel_dev) return -EINVAL; hw_data = accel_dev->hw_device; if (adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC)) goto err; snprintf(key, sizeof(key), ADF_DEV_MAX_BANKS); val = GET_MAX_BANKS(accel_dev); if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) goto err; snprintf(key, sizeof(key), ADF_DEV_CAPABILITIES_MASK); val = hw_data->accel_capabilities_mask; if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)val, ADF_HEX)) goto err; snprintf(key, sizeof(key), ADF_DEV_PKG_ID); val = accel_dev->accel_id; if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) goto err; snprintf(key, sizeof(key), ADF_DEV_NODE_ID); val = dev_to_node(GET_DEV(accel_dev)); if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) goto err; snprintf(key, sizeof(key), ADF_DEV_MAX_RINGS_PER_BANK); val = hw_data->num_rings_per_bank; if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) goto err; snprintf(key, sizeof(key), ADF_HW_REV_ID_KEY); snprintf(hw_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%d", accel_dev->accel_pci_dev.revid); if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)hw_version, ADF_STR)) goto err; snprintf(key, sizeof(key), ADF_MMP_VER_KEY); snprintf(mmp_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%d.%d.%d", accel_dev->fw_versions.mmp_version_major, accel_dev->fw_versions.mmp_version_minor, accel_dev->fw_versions.mmp_version_patch); if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)mmp_version, ADF_STR)) goto err; return 0; err: device_printf(GET_DEV(accel_dev), "Failed to add internal values to accel_dev cfg\n"); return -EINVAL; } static int adf_cfg_add_fw_version(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char fw_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; snprintf(key, sizeof(key), ADF_UOF_VER_KEY); snprintf(fw_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%d.%d.%d", accel_dev->fw_versions.fw_version_major, accel_dev->fw_versions.fw_version_minor, accel_dev->fw_versions.fw_version_patch); if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)fw_version, ADF_STR)) return EFAULT; return 0; } static int adf_cfg_add_ext_params(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; struct adf_hw_device_data *hw_data = accel_dev->hw_device; unsigned long val; snprintf(key, sizeof(key), ADF_DC_EXTENDED_FEATURES); val = hw_data->extended_dc_capabilities; if (adf_cfg_add_key_value_param( accel_dev, ADF_GENERAL_SEC, key, (void *)val, ADF_HEX)) return -EINVAL; return 0; } void adf_error_notifier(uintptr_t arg) { struct adf_accel_dev *accel_dev = (struct adf_accel_dev *)arg; struct service_hndl *service; struct list_head *list_itr; list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_ERROR)) device_printf(GET_DEV(accel_dev), "Failed to send error event to %s.\n", service->name); } } /** * adf_set_ssm_wdtimer() - Initialize the slice hang watchdog timer. * * Return: 0 on success, error code otherwise. */ int adf_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_bar *misc_bar = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; struct resource *csr = misc_bar->virt_addr; u32 i; unsigned int mask; u32 clk_per_sec = hw_data->get_clock_speed(hw_data); u32 timer_val = ADF_WDT_TIMER_SYM_COMP_MS * (clk_per_sec / 1000); u32 timer_val_pke = ADF_GEN2_SSM_WDT_PKE_DEFAULT_VALUE; char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; /* Get Watch Dog Timer for CySym+Comp from the configuration */ if (!adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, ADF_DEV_SSM_WDT_BULK, (char *)timer_str)) { if (!compat_strtouint((char *)timer_str, ADF_CFG_BASE_DEC, &timer_val)) /* Convert msec to CPP clocks */ timer_val = timer_val * (clk_per_sec / 1000); } /* Get Watch Dog Timer for CyAsym from the configuration */ if (!adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, ADF_DEV_SSM_WDT_PKE, (char *)timer_str)) { if (!compat_strtouint((char *)timer_str, ADF_CFG_BASE_DEC, &timer_val_pke)) /* Convert msec to CPP clocks */ timer_val_pke = timer_val_pke * (clk_per_sec / 1000); } for (i = 0, mask = hw_data->accel_mask; mask; i++, mask >>= 1) { if (!(mask & 1)) continue; /* Enable Watch Dog Timer for CySym + Comp */ ADF_CSR_WR(csr, ADF_SSMWDT(i), timer_val); /* Enable Watch Dog Timer for CyAsym */ ADF_CSR_WR(csr, ADF_SSMWDTPKE(i), timer_val_pke); } return 0; } /** * adf_dev_init() - Init data structures and services for the given accel device * @accel_dev: Pointer to acceleration device. * * Initialize the ring data structures and the admin comms and arbitration * services. * * Return: 0 on success, error code otherwise. */ int adf_dev_init(struct adf_accel_dev *accel_dev) { struct service_hndl *service; struct list_head *list_itr; struct adf_hw_device_data *hw_data = accel_dev->hw_device; char value[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; int ret = 0; sysctl_ctx_init(&accel_dev->sysctl_ctx); set_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED, &accel_dev->status); if (!hw_data) { device_printf(GET_DEV(accel_dev), "Failed to init device - hw_data not set\n"); return EFAULT; } if (hw_data->reset_hw_units) hw_data->reset_hw_units(accel_dev); if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) && !accel_dev->is_vf) { device_printf(GET_DEV(accel_dev), "Device not configured\n"); return EFAULT; } if (adf_init_etr_data(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed initialize etr\n"); return EFAULT; } if (hw_data->init_device && hw_data->init_device(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to initialize device\n"); return EFAULT; } if (hw_data->init_accel_units && hw_data->init_accel_units(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed initialize accel_units\n"); return EFAULT; } if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed initialize admin comms\n"); return EFAULT; } if (hw_data->init_arb && hw_data->init_arb(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed initialize hw arbiter\n"); return EFAULT; } if (hw_data->set_asym_rings_mask) hw_data->set_asym_rings_mask(accel_dev); hw_data->enable_ints(accel_dev); if (adf_ae_init(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to initialise Acceleration Engine\n"); return EFAULT; } set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status); if (adf_ae_fw_load(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to load acceleration FW\n"); return EFAULT; } set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status); if (hw_data->alloc_irq(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to allocate interrupts\n"); return EFAULT; } set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); if (hw_data->init_ras && hw_data->init_ras(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to init RAS\n"); return EFAULT; } hw_data->enable_ints(accel_dev); hw_data->enable_error_correction(accel_dev); ret = hw_data->csr_info.pfvf_ops.enable_comms(accel_dev); if (ret) return ret; if (adf_cfg_add_device_params(accel_dev)) return EFAULT; if (hw_data->add_pke_stats && hw_data->add_pke_stats(accel_dev)) return EFAULT; if (hw_data->add_misc_error && hw_data->add_misc_error(accel_dev)) return EFAULT; /* * Subservice initialisation is divided into two stages: init and start. * This is to facilitate any ordering dependencies between services * prior to starting any of the accelerators. */ list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { device_printf(GET_DEV(accel_dev), "Failed to initialise service %s\n", service->name); return EFAULT; } set_bit(accel_dev->accel_id, service->init_status); } /* Read autoreset on error parameter */ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, ADF_AUTO_RESET_ON_ERROR, value); if (!ret) { if (compat_strtouint(value, 10, &accel_dev->autoreset_on_error)) { device_printf( GET_DEV(accel_dev), "Failed converting %s to a decimal value\n", ADF_AUTO_RESET_ON_ERROR); return EFAULT; } } return 0; } /** * adf_dev_start() - Start acceleration service for the given accel device * @accel_dev: Pointer to acceleration device. * * Function notifies all the registered services that the acceleration device * is ready to be used. * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. */ int adf_dev_start(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; struct list_head *list_itr; set_bit(ADF_STATUS_STARTING, &accel_dev->status); if (adf_devmgr_verify_id(&accel_dev->accel_id)) { device_printf(GET_DEV(accel_dev), "QAT: Device %d not found\n", accel_dev->accel_id); return ENODEV; } if (adf_ae_start(accel_dev)) { device_printf(GET_DEV(accel_dev), "AE Start Failed\n"); return EFAULT; } set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); if (hw_data->send_admin_init(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to send init message\n"); return EFAULT; } if (adf_cfg_add_fw_version(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to update configuration FW version\n"); return EFAULT; } if (hw_data->measure_clock) hw_data->measure_clock(accel_dev); /* * Set ssm watch dog timer for slice hang detection * Note! Not supported on devices older than C62x */ if (hw_data->set_ssm_wdtimer && hw_data->set_ssm_wdtimer(accel_dev)) { device_printf(GET_DEV(accel_dev), "QAT: Failed to set ssm watch dog timer\n"); return EFAULT; } if (hw_data->int_timer_init && hw_data->int_timer_init(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to init heartbeat interrupt timer\n"); return -EFAULT; } list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_START)) { device_printf(GET_DEV(accel_dev), "Failed to start service %s\n", service->name); return EFAULT; } set_bit(accel_dev->accel_id, service->start_status); } if (accel_dev->is_vf || !accel_dev->u1.pf.vf_info) { /*Register UIO devices */ if (adf_uio_register(accel_dev)) { adf_uio_remove(accel_dev); device_printf(GET_DEV(accel_dev), "Failed to register UIO devices\n"); set_bit(ADF_STATUS_STARTING, &accel_dev->status); clear_bit(ADF_STATUS_STARTED, &accel_dev->status); return ENODEV; } } if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status) && adf_cfg_add_ext_params(accel_dev)) return EFAULT; clear_bit(ADF_STATUS_STARTING, &accel_dev->status); set_bit(ADF_STATUS_STARTED, &accel_dev->status); + adf_dbgfs_add(accel_dev); + return 0; } /** * adf_dev_stop() - Stop acceleration service for the given accel device * @accel_dev: Pointer to acceleration device. * * Function notifies all the registered services that the acceleration device * is shuting down. * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. */ int adf_dev_stop(struct adf_accel_dev *accel_dev) { struct service_hndl *service; struct list_head *list_itr; if (adf_devmgr_verify_id(&accel_dev->accel_id)) { device_printf(GET_DEV(accel_dev), "QAT: Device %d not found\n", accel_dev->accel_id); return ENODEV; } if (!adf_dev_started(accel_dev) && !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) { return 0; } if (adf_dev_stop_notify_sync(accel_dev)) { device_printf( GET_DEV(accel_dev), "Waiting for device un-busy failed. Retries limit reached\n"); return EBUSY; } + adf_dbgfs_rm(accel_dev); + clear_bit(ADF_STATUS_STARTING, &accel_dev->status); clear_bit(ADF_STATUS_STARTED, &accel_dev->status); if (accel_dev->hw_device->int_timer_exit) accel_dev->hw_device->int_timer_exit(accel_dev); list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (!test_bit(accel_dev->accel_id, service->start_status)) continue; clear_bit(accel_dev->accel_id, service->start_status); } if (accel_dev->is_vf || !accel_dev->u1.pf.vf_info) { /* Remove UIO Devices */ adf_uio_remove(accel_dev); } if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) { if (adf_ae_stop(accel_dev)) device_printf(GET_DEV(accel_dev), "failed to stop AE\n"); else clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); } return 0; } /** * adf_dev_shutdown() - shutdown acceleration services and data strucutures * @accel_dev: Pointer to acceleration device * * Cleanup the ring data structures and the admin comms and arbitration * services. */ void adf_dev_shutdown(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; struct list_head *list_itr; if (test_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED, &accel_dev->status)) { sysctl_ctx_free(&accel_dev->sysctl_ctx); clear_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED, &accel_dev->status); } if (!hw_data) { device_printf( GET_DEV(accel_dev), "QAT: Failed to shutdown device - hw_data not set\n"); return; } if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) { adf_ae_fw_release(accel_dev); clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status); } if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) { if (adf_ae_shutdown(accel_dev)) device_printf(GET_DEV(accel_dev), "Failed to shutdown Accel Engine\n"); else clear_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status); } list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (!test_bit(accel_dev->accel_id, service->init_status)) continue; if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) device_printf(GET_DEV(accel_dev), "Failed to shutdown service %s\n", service->name); else clear_bit(accel_dev->accel_id, service->init_status); } hw_data->disable_iov(accel_dev); if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) { hw_data->free_irq(accel_dev); clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); } /* Delete configuration only if not restarting */ if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) adf_cfg_del_all(accel_dev); if (hw_data->remove_pke_stats) hw_data->remove_pke_stats(accel_dev); if (hw_data->remove_misc_error) hw_data->remove_misc_error(accel_dev); if (hw_data->exit_ras) hw_data->exit_ras(accel_dev); if (hw_data->exit_arb) hw_data->exit_arb(accel_dev); if (hw_data->exit_admin_comms) hw_data->exit_admin_comms(accel_dev); if (hw_data->exit_accel_units) hw_data->exit_accel_units(accel_dev); adf_cleanup_etr_data(accel_dev); if (hw_data->restore_device) hw_data->restore_device(accel_dev); } /** * adf_dev_reset() - Reset acceleration service for the given accel device * @accel_dev: Pointer to acceleration device. * @mode: Specifies reset mode - synchronous or asynchronous. * Function notifies all the registered services that the acceleration device * is resetting. * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. */ int adf_dev_reset(struct adf_accel_dev *accel_dev, enum adf_dev_reset_mode mode) { return adf_dev_aer_schedule_reset(accel_dev, mode); } int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) { struct service_hndl *service; struct list_head *list_itr; list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) device_printf(GET_DEV(accel_dev), "Failed to restart service %s.\n", service->name); } return 0; } int adf_dev_restarting_notify_sync(struct adf_accel_dev *accel_dev) { int times; adf_dev_restarting_notify(accel_dev); for (times = 0; times < ADF_STOP_RETRY; times++) { if (!adf_dev_in_use(accel_dev)) break; dev_dbg(GET_DEV(accel_dev), "retry times=%d\n", times); pause_ms("adfstop", 100); } if (adf_dev_in_use(accel_dev)) { clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); device_printf(GET_DEV(accel_dev), "Device still in use during reset sequence.\n"); return EBUSY; } return 0; } int adf_dev_stop_notify_sync(struct adf_accel_dev *accel_dev) { int times; struct service_hndl *service; struct list_head *list_itr; list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_STOP)) device_printf(GET_DEV(accel_dev), "Failed to restart service %s.\n", service->name); } for (times = 0; times < ADF_STOP_RETRY; times++) { if (!adf_dev_in_use(accel_dev)) break; dev_dbg(GET_DEV(accel_dev), "retry times=%d\n", times); pause_ms("adfstop", 100); } if (adf_dev_in_use(accel_dev)) { clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); device_printf(GET_DEV(accel_dev), "Device still in use during stop sequence.\n"); return EBUSY; } return 0; } int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) { struct service_hndl *service; struct list_head *list_itr; list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) device_printf(GET_DEV(accel_dev), "Failed to restart service %s.\n", service->name); } return 0; } diff --git a/sys/dev/qat/qat_hw/qat_200xx/adf_drv.c b/sys/dev/qat/qat_hw/qat_200xx/adf_drv.c index f9b8c742d339..9e2462c05657 100644 --- a/sys/dev/qat/qat_hw/qat_200xx/adf_drv.c +++ b/sys/dev/qat/qat_hw/qat_200xx/adf_drv.c @@ -1,280 +1,282 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "adf_200xx_hw_data.h" #include "adf_fw_counters.h" #include "adf_cfg_device.h" +#include "adf_dbgfs.h" #include #include #include #include #include -#include "adf_heartbeat_dbg.h" -#include "adf_cnvnr_freq_counters.h" static MALLOC_DEFINE(M_QAT_200XX, "qat_200xx", "qat_200xx"); #define ADF_SYSTEM_DEVICE(device_id) \ { \ PCI_VENDOR_ID_INTEL, device_id \ } static const struct pci_device_id adf_pci_tbl[] = { ADF_SYSTEM_DEVICE(ADF_200XX_PCI_DEVICE_ID), { 0, } }; static int adf_probe(device_t dev) { const struct pci_device_id *id; for (id = adf_pci_tbl; id->vendor != 0; id++) { if (pci_get_vendor(dev) == id->vendor && pci_get_device(dev) == id->device) { device_set_desc(dev, "Intel " ADF_200XX_DEVICE_NAME " QuickAssist"); return BUS_PROBE_GENERIC; } } return ENXIO; } static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; int i; if (accel_dev->dma_tag) bus_dma_tag_destroy(accel_dev->dma_tag); for (i = 0; i < ADF_PCI_MAX_BARS; i++) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; if (bar->virt_addr) bus_free_resource(accel_pci_dev->pci_dev, SYS_RES_MEMORY, bar->virt_addr); } if (accel_dev->hw_device) { switch (pci_get_device(accel_pci_dev->pci_dev)) { case ADF_200XX_PCI_DEVICE_ID: adf_clean_hw_data_200xx(accel_dev->hw_device); break; default: break; } free(accel_dev->hw_device, M_QAT_200XX); accel_dev->hw_device = NULL; } + adf_dbgfs_exit(accel_dev); adf_cfg_dev_remove(accel_dev); adf_devmgr_rm_dev(accel_dev, NULL); } static int adf_attach(device_t dev) { struct adf_accel_dev *accel_dev; struct adf_accel_pci *accel_pci_dev; struct adf_hw_device_data *hw_data; unsigned int i = 0, bar_nr = 0, reg_val = 0; int ret, rid; struct adf_cfg_device *cfg_dev = NULL; /* Set pci MaxPayLoad to 256. Implemented to avoid the issue of * Pci-passthrough causing Maxpayload to be reset to 128 bytes * when the device is reset. */ if (pci_get_max_payload(dev) != 256) pci_set_max_payload(dev, 256); accel_dev = device_get_softc(dev); INIT_LIST_HEAD(&accel_dev->crypto_list); accel_pci_dev = &accel_dev->accel_pci_dev; accel_pci_dev->pci_dev = dev; if (bus_get_domain(dev, &accel_pci_dev->node) != 0) accel_pci_dev->node = 0; /* XXX: Revisit if we actually need a devmgr table at all. */ /* Add accel device to accel table. * This should be called before adf_cleanup_accel is called */ if (adf_devmgr_add_dev(accel_dev, NULL)) { device_printf(dev, "Failed to add new accelerator device.\n"); return ENXIO; } /* Allocate and configure device configuration structure */ hw_data = malloc(sizeof(*hw_data), M_QAT_200XX, M_WAITOK | M_ZERO); accel_dev->hw_device = hw_data; adf_init_hw_data_200xx(accel_dev->hw_device); accel_pci_dev->revid = pci_get_revid(dev); hw_data->fuses = pci_read_config(dev, ADF_DEVICE_FUSECTL_OFFSET, 4); if (accel_pci_dev->revid == 0x00) { device_printf(dev, "A0 stepping is not supported.\n"); ret = ENODEV; goto out_err; } /* Get PPAERUCM values and store */ ret = adf_aer_store_ppaerucm_reg(dev, hw_data); if (ret) goto out_err; /* Clear PFIEERRUNCSTSR register bits if they are set */ reg_val = pci_read_config(dev, ADF_200XX_PFIEERRUNCSTSR, 4); if (reg_val) { device_printf( dev, "Clearing PFIEERRUNCSTSR, previous status : %0x\n", reg_val); pci_write_config(dev, ADF_200XX_PFIEERRUNCSTSR, reg_val, 4); } /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(accel_dev); hw_data->ae_mask = hw_data->get_ae_mask(accel_dev); hw_data->admin_ae_mask = hw_data->ae_mask; accel_pci_dev->sku = hw_data->get_sku(hw_data); /* If the device has no acceleration engines then ignore it. */ if (!hw_data->accel_mask || !hw_data->ae_mask || (~hw_data->ae_mask & 0x01)) { device_printf(dev, "No acceleration units found\n"); ret = ENXIO; goto out_err; } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); if (ret) goto out_err; ret = adf_clock_debugfs_add(accel_dev); if (ret) goto out_err; pci_set_max_read_req(dev, 1024); ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, /* BUS_SPACE_UNRESTRICTED */ 1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &accel_dev->dma_tag); if (ret) goto out_err; if (hw_data->get_accel_cap) { hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); } /* Find and map all the device's BARS */ for (bar_nr = 0; i < ADF_PCI_MAX_BARS && bar_nr < PCIR_MAX_BAR_0; bar_nr++) { struct adf_bar *bar; /* * XXX: This isn't quite right as it will ignore a BAR * that wasn't assigned a valid resource range by the * firmware. */ rid = PCIR_BAR(bar_nr); if (bus_get_resource(dev, SYS_RES_MEMORY, rid, NULL, NULL) != 0) continue; bar = &accel_pci_dev->pci_bars[i++]; bar->virt_addr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!bar->virt_addr) { device_printf(dev, "Failed to map BAR %d\n", bar_nr); ret = ENXIO; goto out_err; } bar->base_addr = rman_get_start(bar->virt_addr); bar->size = rman_get_size(bar->virt_addr); } pci_enable_busmaster(dev); + adf_dbgfs_init(accel_dev); + if (!accel_dev->hw_device->config_device) { ret = EFAULT; goto out_err; } ret = accel_dev->hw_device->config_device(accel_dev); if (ret) goto out_err; ret = adf_dev_init(accel_dev); if (ret) goto out_dev_shutdown; ret = adf_dev_start(accel_dev); if (ret) goto out_dev_stop; cfg_dev = accel_dev->cfg->dev; adf_cfg_device_clear(cfg_dev, accel_dev); free(cfg_dev, M_QAT); accel_dev->cfg->dev = NULL; return ret; out_dev_stop: adf_dev_stop(accel_dev); out_dev_shutdown: adf_dev_shutdown(accel_dev); out_err: adf_cleanup_accel(accel_dev); return ret; } static int adf_detach(device_t dev) { struct adf_accel_dev *accel_dev = device_get_softc(dev); if (adf_dev_stop(accel_dev)) { device_printf(dev, "Failed to stop QAT accel dev\n"); return EBUSY; } adf_dev_shutdown(accel_dev); adf_cleanup_accel(accel_dev); return 0; } static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe), DEVMETHOD(device_attach, adf_attach), DEVMETHOD(device_detach, adf_detach), DEVMETHOD_END }; static driver_t adf_driver = { "qat", adf_methods, sizeof(struct adf_accel_dev) }; DRIVER_MODULE_ORDERED(qat_200xx, pci, adf_driver, NULL, NULL, SI_ORDER_THIRD); MODULE_VERSION(qat_200xx, 1); MODULE_DEPEND(qat_200xx, qat_common, 1, 1, 1); MODULE_DEPEND(qat_200xx, qat_api, 1, 1, 1); MODULE_DEPEND(qat_200xx, linuxkpi, 1, 1, 1); diff --git a/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c b/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c index 9b585fe7d3d1..08fbf5d989e2 100644 --- a/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c +++ b/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c @@ -1,345 +1,347 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007 - 2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "adf_4xxx_hw_data.h" #include "adf_gen4_hw_data.h" #include "adf_fw_counters.h" #include "adf_cfg_device.h" +#include "adf_dbgfs.h" #include #include #include #include #include -#include "adf_heartbeat_dbg.h" -#include "adf_cnvnr_freq_counters.h" static MALLOC_DEFINE(M_QAT_4XXX, "qat_4xxx", "qat_4xxx"); #define ADF_SYSTEM_DEVICE(device_id) \ { \ PCI_VENDOR_ID_INTEL, device_id \ } static const struct pci_device_id adf_pci_tbl[] = { ADF_SYSTEM_DEVICE(ADF_4XXX_PCI_DEVICE_ID), ADF_SYSTEM_DEVICE(ADF_401XX_PCI_DEVICE_ID), { 0, } }; static int adf_probe(device_t dev) { const struct pci_device_id *id; for (id = adf_pci_tbl; id->vendor != 0; id++) { if (pci_get_vendor(dev) == id->vendor && pci_get_device(dev) == id->device) { device_set_desc(dev, "Intel " ADF_4XXX_DEVICE_NAME " QuickAssist"); return BUS_PROBE_GENERIC; } } return ENXIO; } #ifdef QAT_DISABLE_SAFE_DC_MODE static int adf_4xxx_sysctl_disable_safe_dc_mode(SYSCTL_HANDLER_ARGS) { struct adf_accel_dev *accel_dev = arg1; int error, value = accel_dev->disable_safe_dc_mode; error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) return error; if (value != 1 && value != 0) return EINVAL; if (adf_dev_started(accel_dev)) { device_printf( GET_DEV(accel_dev), "QAT: configuration can only be changed in \"down\" device state\n"); return EBUSY; } accel_dev->disable_safe_dc_mode = (u8)value; return 0; } static void adf_4xxx_disable_safe_dc_sysctl_add(struct adf_accel_dev *accel_dev) { struct sysctl_ctx_list *qat_sysctl_ctx; struct sysctl_oid *qat_sysctl_tree; qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); qat_sysctl_tree = device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev); accel_dev->safe_dc_mode = SYSCTL_ADD_OID(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_sysctl_tree), OID_AUTO, "disable_safe_dc_mode", CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_TUN | CTLFLAG_SKIP, accel_dev, 0, adf_4xxx_sysctl_disable_safe_dc_mode, "LU", "Disable QAT safe data compression mode"); } static void adf_4xxx_disable_safe_dc_sysctl_remove(struct adf_accel_dev *accel_dev) { int ret; struct sysctl_ctx_list *qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); ret = sysctl_ctx_entry_del(qat_sysctl_ctx, accel_dev->safe_dc_mode); if (ret) { device_printf(GET_DEV(accel_dev), "Failed to delete entry\n"); } else { ret = sysctl_remove_oid(accel_dev->safe_dc_mode, 1, 1); if (ret) device_printf(GET_DEV(accel_dev), "Failed to delete oid\n"); } } #endif /* QAT_DISABLE_SAFE_DC_MODE */ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; int i; if (accel_dev->dma_tag) bus_dma_tag_destroy(accel_dev->dma_tag); for (i = 0; i < ADF_PCI_MAX_BARS; i++) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; if (bar->virt_addr) bus_free_resource(accel_pci_dev->pci_dev, SYS_RES_MEMORY, bar->virt_addr); } if (accel_dev->hw_device) { switch (pci_get_device(accel_pci_dev->pci_dev)) { case ADF_4XXX_PCI_DEVICE_ID: case ADF_401XX_PCI_DEVICE_ID: adf_clean_hw_data_4xxx(accel_dev->hw_device); break; default: break; } free(accel_dev->hw_device, M_QAT_4XXX); accel_dev->hw_device = NULL; } #ifdef QAT_DISABLE_SAFE_DC_MODE adf_4xxx_disable_safe_dc_sysctl_remove(accel_dev); #endif /* QAT_DISABLE_SAFE_DC_MODE */ + adf_dbgfs_exit(accel_dev); adf_cfg_dev_remove(accel_dev); adf_devmgr_rm_dev(accel_dev, NULL); } static int adf_attach(device_t dev) { struct adf_accel_dev *accel_dev; struct adf_accel_pci *accel_pci_dev; struct adf_hw_device_data *hw_data; unsigned int bar_nr; int ret, rid; struct adf_cfg_device *cfg_dev = NULL; /* Set pci MaxPayLoad to 512. Implemented to avoid the issue of * Pci-passthrough causing Maxpayload to be reset to 128 bytes * when the device is reset. */ if (pci_get_max_payload(dev) != 512) pci_set_max_payload(dev, 512); accel_dev = device_get_softc(dev); INIT_LIST_HEAD(&accel_dev->crypto_list); accel_pci_dev = &accel_dev->accel_pci_dev; accel_pci_dev->pci_dev = dev; if (bus_get_domain(dev, &accel_pci_dev->node) != 0) accel_pci_dev->node = 0; /* Add accel device to accel table. * This should be called before adf_cleanup_accel is called */ if (adf_devmgr_add_dev(accel_dev, NULL)) { device_printf(dev, "Failed to add new accelerator device.\n"); return ENXIO; } /* Allocate and configure device configuration structure */ hw_data = malloc(sizeof(*hw_data), M_QAT_4XXX, M_WAITOK | M_ZERO); accel_dev->hw_device = hw_data; adf_init_hw_data_4xxx(accel_dev->hw_device, pci_get_device(dev)); accel_pci_dev->revid = pci_get_revid(dev); hw_data->fuses = pci_read_config(dev, ADF_4XXX_FUSECTL4_OFFSET, 4); if (accel_pci_dev->revid == 0x00) { device_printf(dev, "A0 stepping is not supported.\n"); ret = ENODEV; goto out_err; } /* Get PPAERUCM values and store */ ret = adf_aer_store_ppaerucm_reg(dev, hw_data); if (ret) goto out_err; /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(accel_dev); hw_data->ae_mask = hw_data->get_ae_mask(accel_dev); accel_pci_dev->sku = hw_data->get_sku(hw_data); /* If the device has no acceleration engines then ignore it. */ if (!hw_data->accel_mask || !hw_data->ae_mask || (~hw_data->ae_mask & 0x01)) { device_printf(dev, "No acceleration units found\n"); ret = ENXIO; goto out_err; } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); if (ret) goto out_err; ret = adf_clock_debugfs_add(accel_dev); if (ret) goto out_err; #ifdef QAT_DISABLE_SAFE_DC_MODE adf_4xxx_disable_safe_dc_sysctl_add(accel_dev); #endif /* QAT_DISABLE_SAFE_DC_MODE */ pci_set_max_read_req(dev, 4096); ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, /* BUS_SPACE_UNRESTRICTED */ 1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &accel_dev->dma_tag); if (ret) goto out_err; if (hw_data->get_accel_cap) { hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); } /* Find and map all the device's BARS */ /* Logical BARs configuration for 64bit BARs: bar 0 and 1 - logical BAR0 bar 2 and 3 - logical BAR1 bar 4 and 5 - logical BAR3 */ for (bar_nr = 0; bar_nr < (ADF_PCI_MAX_BARS * 2) && bar_nr < PCIR_MAX_BAR_0; bar_nr += 2) { struct adf_bar *bar; rid = PCIR_BAR(bar_nr); bar = &accel_pci_dev->pci_bars[bar_nr / 2]; bar->virt_addr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!bar->virt_addr) { device_printf(dev, "Failed to map BAR %d\n", bar_nr); ret = ENXIO; goto out_err; } bar->base_addr = rman_get_start(bar->virt_addr); bar->size = rman_get_size(bar->virt_addr); } pci_enable_busmaster(dev); + adf_dbgfs_init(accel_dev); + if (!accel_dev->hw_device->config_device) { ret = EFAULT; goto out_err; } ret = accel_dev->hw_device->config_device(accel_dev); if (ret) goto out_err; ret = adf_dev_init(accel_dev); if (ret) goto out_dev_shutdown; ret = adf_dev_start(accel_dev); if (ret) goto out_dev_stop; cfg_dev = accel_dev->cfg->dev; adf_cfg_device_clear(cfg_dev, accel_dev); free(cfg_dev, M_QAT); accel_dev->cfg->dev = NULL; return ret; out_dev_stop: adf_dev_stop(accel_dev); out_dev_shutdown: adf_dev_shutdown(accel_dev); out_err: adf_cleanup_accel(accel_dev); return ret; } static int adf_detach(device_t dev) { struct adf_accel_dev *accel_dev = device_get_softc(dev); if (adf_dev_stop(accel_dev)) { device_printf(dev, "Failed to stop QAT accel dev\n"); return EBUSY; } adf_dev_shutdown(accel_dev); adf_cleanup_accel(accel_dev); return 0; } static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe), DEVMETHOD(device_attach, adf_attach), DEVMETHOD(device_detach, adf_detach), DEVMETHOD_END }; static driver_t adf_driver = { "qat", adf_methods, sizeof(struct adf_accel_dev) }; DRIVER_MODULE_ORDERED(qat_4xxx, pci, adf_driver, NULL, NULL, SI_ORDER_THIRD); MODULE_VERSION(qat_4xxx, 1); MODULE_DEPEND(qat_4xxx, qat_common, 1, 1, 1); MODULE_DEPEND(qat_4xxx, qat_api, 1, 1, 1); MODULE_DEPEND(qat_4xxx, linuxkpi, 1, 1, 1); diff --git a/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c b/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c index 9b66ae4b2370..42189c28b21a 100644 --- a/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c +++ b/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c @@ -1,266 +1,270 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "qat_freebsd.h" #include #include #include #include "adf_4xxxvf_hw_data.h" #include "adf_gen4_hw_data.h" #include "adf_fw_counters.h" #include "adf_cfg_device.h" +#include "adf_dbgfs.h" #include #include #include #include #include static MALLOC_DEFINE(M_QAT_4XXXVF, "qat_4xxxvf", "qat_4xxxvf"); #define ADF_SYSTEM_DEVICE(device_id) \ { \ PCI_VENDOR_ID_INTEL, device_id \ } static const struct pci_device_id adf_pci_tbl[] = { ADF_SYSTEM_DEVICE(ADF_4XXXIOV_PCI_DEVICE_ID), ADF_SYSTEM_DEVICE(ADF_401XXIOV_PCI_DEVICE_ID), { 0, } }; static int adf_probe(device_t dev) { const struct pci_device_id *id; for (id = adf_pci_tbl; id->vendor != 0; id++) { if (pci_get_vendor(dev) == id->vendor && pci_get_device(dev) == id->device) { device_set_desc(dev, "Intel " ADF_4XXXVF_DEVICE_NAME " QuickAssist"); return BUS_PROBE_GENERIC; } } return ENXIO; } static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; struct adf_accel_dev *pf; int i; if (accel_dev->dma_tag) bus_dma_tag_destroy(accel_dev->dma_tag); for (i = 0; i < ADF_PCI_MAX_BARS; i++) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; if (bar->virt_addr) bus_free_resource(accel_pci_dev->pci_dev, SYS_RES_MEMORY, bar->virt_addr); } /* * As adf_clean_hw_data_4xxxiov() will update class index, before * index is updated, vf must be remove from accel_table. */ pf = adf_devmgr_pci_to_accel_dev(pci_find_pf(accel_pci_dev->pci_dev)); adf_devmgr_rm_dev(accel_dev, pf); if (accel_dev->hw_device) { switch (pci_get_device(accel_pci_dev->pci_dev)) { case ADF_4XXXIOV_PCI_DEVICE_ID: case ADF_401XXIOV_PCI_DEVICE_ID: adf_clean_hw_data_4xxxiov(accel_dev->hw_device); break; default: break; } free(accel_dev->hw_device, M_QAT_4XXXVF); accel_dev->hw_device = NULL; } + adf_dbgfs_exit(accel_dev); adf_cfg_dev_remove(accel_dev); } static int adf_attach(device_t dev) { struct adf_accel_dev *accel_dev; struct adf_accel_dev *pf; struct adf_accel_pci *accel_pci_dev; struct adf_hw_device_data *hw_data; unsigned int bar_nr; int ret = 0; int rid; struct adf_cfg_device *cfg_dev = NULL; accel_dev = device_get_softc(dev); accel_dev->is_vf = true; pf = adf_devmgr_pci_to_accel_dev(pci_find_pf(dev)); INIT_LIST_HEAD(&accel_dev->crypto_list); accel_pci_dev = &accel_dev->accel_pci_dev; accel_pci_dev->pci_dev = dev; if (bus_get_domain(dev, &accel_pci_dev->node) != 0) accel_pci_dev->node = 0; /* Add accel device to accel table */ if (adf_devmgr_add_dev(accel_dev, pf)) { device_printf(GET_DEV(accel_dev), "Failed to add new accelerator device.\n"); return -EFAULT; } /* Allocate and configure device configuration structure */ hw_data = malloc(sizeof(*hw_data), M_QAT_4XXXVF, M_WAITOK | M_ZERO); accel_dev->hw_device = hw_data; adf_init_hw_data_4xxxiov(accel_dev->hw_device); accel_pci_dev->revid = pci_get_revid(dev); hw_data->fuses = pci_read_config(dev, ADF_4XXXIOV_VFFUSECTL4_OFFSET, 4); /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(accel_dev); hw_data->ae_mask = hw_data->get_ae_mask(accel_dev); hw_data->admin_ae_mask = hw_data->ae_mask; accel_pci_dev->sku = hw_data->get_sku(hw_data); /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); if (ret) goto out_err; pci_set_max_read_req(dev, 1024); ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, /* BUS_SPACE_UNRESTRICTED */ 1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &accel_dev->dma_tag); hw_data->accel_capabilities_mask = adf_4xxxvf_get_hw_cap(accel_dev); /* Find and map all the device's BARS */ /* Logical BARs configuration for 64bit BARs: bar 0 and 1 - logical BAR0 bar 2 and 3 - logical BAR1 bar 4 and 5 - logical BAR3 */ for (bar_nr = 0; bar_nr < (ADF_PCI_MAX_BARS * 2) && bar_nr < PCIR_MAX_BAR_0; bar_nr += 2) { struct adf_bar *bar; rid = PCIR_BAR(bar_nr); bar = &accel_pci_dev->pci_bars[bar_nr / 2]; bar->virt_addr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!bar->virt_addr) { device_printf(dev, "Failed to map BAR %d\n", bar_nr); ret = ENXIO; goto out_err; } bar->base_addr = rman_get_start(bar->virt_addr); bar->size = rman_get_size(bar->virt_addr); } pci_enable_busmaster(dev); + adf_dbgfs_init(accel_dev); + /* Completion for VF2PF request/response message exchange */ init_completion(&accel_dev->u1.vf.msg_received); mutex_init(&accel_dev->u1.vf.rpreset_lock); ret = hw_data->config_device(accel_dev); if (ret) goto out_err; ret = adf_dev_init(accel_dev); if (!ret) ret = adf_dev_start(accel_dev); if (ret) { device_printf( GET_DEV(accel_dev), "Failed to start - make sure PF enabled services match VF configuration.\n"); adf_dev_stop(accel_dev); adf_dev_shutdown(accel_dev); return 0; } cfg_dev = accel_dev->cfg->dev; adf_cfg_device_clear(cfg_dev, accel_dev); free(cfg_dev, M_QAT); accel_dev->cfg->dev = NULL; return ret; out_err: adf_cleanup_accel(accel_dev); return ret; } static int adf_detach(device_t dev) { struct adf_accel_dev *accel_dev = device_get_softc(dev); if (!accel_dev) { printf("QAT: Driver removal failed\n"); return EFAULT; } adf_flush_vf_wq(accel_dev); clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); adf_dev_stop(accel_dev); adf_dev_shutdown(accel_dev); adf_cleanup_accel(accel_dev); return 0; } static int adf_modevent(module_t mod, int type, void *data) { switch (type) { case MOD_UNLOAD: adf_clean_vf_map(true); return 0; default: return EOPNOTSUPP; } } static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe), DEVMETHOD(device_attach, adf_attach), DEVMETHOD(device_detach, adf_detach), DEVMETHOD_END }; static driver_t adf_driver = { "qat", adf_methods, sizeof(struct adf_accel_dev) }; DRIVER_MODULE_ORDERED(qat_4xxxvf, pci, adf_driver, adf_modevent, NULL, SI_ORDER_THIRD); MODULE_VERSION(qat_4xxxvf, 1); MODULE_DEPEND(qat_4xxxvf, qat_common, 1, 1, 1); MODULE_DEPEND(qat_4xxxvf, qat_api, 1, 1, 1); MODULE_DEPEND(qat_4xxxvf, linuxkpi, 1, 1, 1); diff --git a/sys/dev/qat/qat_hw/qat_c3xxx/adf_drv.c b/sys/dev/qat/qat_hw/qat_c3xxx/adf_drv.c index 74e9e2292623..9f2c9374e968 100644 --- a/sys/dev/qat/qat_hw/qat_c3xxx/adf_drv.c +++ b/sys/dev/qat/qat_hw/qat_c3xxx/adf_drv.c @@ -1,269 +1,271 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "adf_c3xxx_hw_data.h" #include "adf_fw_counters.h" #include "adf_cfg_device.h" +#include "adf_dbgfs.h" #include #include #include #include #include -#include "adf_heartbeat_dbg.h" -#include "adf_cnvnr_freq_counters.h" static MALLOC_DEFINE(M_QAT_C3XXX, "qat_c3xxx", "qat_c3xxx"); #define ADF_SYSTEM_DEVICE(device_id) \ { \ PCI_VENDOR_ID_INTEL, device_id \ } static const struct pci_device_id adf_pci_tbl[] = { ADF_SYSTEM_DEVICE(ADF_C3XXX_PCI_DEVICE_ID), { 0, } }; static int adf_probe(device_t dev) { const struct pci_device_id *id; for (id = adf_pci_tbl; id->vendor != 0; id++) { if (pci_get_vendor(dev) == id->vendor && pci_get_device(dev) == id->device) { device_set_desc(dev, "Intel " ADF_C3XXX_DEVICE_NAME " QuickAssist"); return BUS_PROBE_GENERIC; } } return ENXIO; } static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; int i; if (accel_dev->dma_tag) bus_dma_tag_destroy(accel_dev->dma_tag); for (i = 0; i < ADF_PCI_MAX_BARS; i++) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; if (bar->virt_addr) bus_free_resource(accel_pci_dev->pci_dev, SYS_RES_MEMORY, bar->virt_addr); } if (accel_dev->hw_device) { switch (pci_get_device(accel_pci_dev->pci_dev)) { case ADF_C3XXX_PCI_DEVICE_ID: adf_clean_hw_data_c3xxx(accel_dev->hw_device); break; default: break; } free(accel_dev->hw_device, M_QAT_C3XXX); accel_dev->hw_device = NULL; } + adf_dbgfs_exit(accel_dev); adf_cfg_dev_remove(accel_dev); adf_devmgr_rm_dev(accel_dev, NULL); } static int adf_attach(device_t dev) { struct adf_accel_dev *accel_dev; struct adf_accel_pci *accel_pci_dev; struct adf_hw_device_data *hw_data; unsigned int i, bar_nr; int ret, rid; struct adf_cfg_device *cfg_dev = NULL; /* Set pci MaxPayLoad to 256. Implemented to avoid the issue of * Pci-passthrough causing Maxpayload to be reset to 128 bytes * when the device is reset. */ if (pci_get_max_payload(dev) != 256) pci_set_max_payload(dev, 256); accel_dev = device_get_softc(dev); INIT_LIST_HEAD(&accel_dev->crypto_list); accel_pci_dev = &accel_dev->accel_pci_dev; accel_pci_dev->pci_dev = dev; if (bus_get_domain(dev, &accel_pci_dev->node) != 0) accel_pci_dev->node = 0; /* XXX: Revisit if we actually need a devmgr table at all. */ /* Add accel device to accel table. * This should be called before adf_cleanup_accel is called */ if (adf_devmgr_add_dev(accel_dev, NULL)) { device_printf(dev, "Failed to add new accelerator device.\n"); return ENXIO; } /* Allocate and configure device configuration structure */ hw_data = malloc(sizeof(*hw_data), M_QAT_C3XXX, M_WAITOK | M_ZERO); accel_dev->hw_device = hw_data; adf_init_hw_data_c3xxx(accel_dev->hw_device); accel_pci_dev->revid = pci_get_revid(dev); hw_data->fuses = pci_read_config(dev, ADF_DEVICE_FUSECTL_OFFSET, 4); if (accel_pci_dev->revid == 0x00) { device_printf(dev, "A0 stepping is not supported.\n"); ret = ENODEV; goto out_err; } /* Get PPAERUCM values and store */ ret = adf_aer_store_ppaerucm_reg(dev, hw_data); if (ret) goto out_err; /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(accel_dev); hw_data->ae_mask = hw_data->get_ae_mask(accel_dev); hw_data->admin_ae_mask = hw_data->ae_mask; accel_pci_dev->sku = hw_data->get_sku(hw_data); /* If the device has no acceleration engines then ignore it. */ if (!hw_data->accel_mask || !hw_data->ae_mask || ((~hw_data->ae_mask) & 0x01)) { device_printf(dev, "No acceleration units found\n"); ret = ENXIO; goto out_err; } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); if (ret) goto out_err; ret = adf_clock_debugfs_add(accel_dev); if (ret) goto out_err; pci_set_max_read_req(dev, 1024); ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, /* BUS_SPACE_UNRESTRICTED */ 1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &accel_dev->dma_tag); if (ret) goto out_err; if (hw_data->get_accel_cap) { hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); } /* Find and map all the device's BARS */ i = 0; for (bar_nr = 0; i < ADF_PCI_MAX_BARS && bar_nr < PCIR_MAX_BAR_0; bar_nr++) { struct adf_bar *bar; /* * XXX: This isn't quite right as it will ignore a BAR * that wasn't assigned a valid resource range by the * firmware. */ rid = PCIR_BAR(bar_nr); if (bus_get_resource(dev, SYS_RES_MEMORY, rid, NULL, NULL) != 0) continue; bar = &accel_pci_dev->pci_bars[i++]; bar->virt_addr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (bar->virt_addr == NULL) { device_printf(dev, "Failed to map BAR %d\n", bar_nr); ret = ENXIO; goto out_err; } bar->base_addr = rman_get_start(bar->virt_addr); bar->size = rman_get_size(bar->virt_addr); } pci_enable_busmaster(dev); + adf_dbgfs_init(accel_dev); + if (!accel_dev->hw_device->config_device) { ret = EFAULT; goto out_err; } ret = accel_dev->hw_device->config_device(accel_dev); if (ret) goto out_err; ret = adf_dev_init(accel_dev); if (ret) goto out_dev_shutdown; ret = adf_dev_start(accel_dev); if (ret) goto out_dev_stop; cfg_dev = accel_dev->cfg->dev; adf_cfg_device_clear(cfg_dev, accel_dev); free(cfg_dev, M_QAT); accel_dev->cfg->dev = NULL; return ret; out_dev_stop: adf_dev_stop(accel_dev); out_dev_shutdown: adf_dev_shutdown(accel_dev); out_err: adf_cleanup_accel(accel_dev); return ret; } static int adf_detach(device_t dev) { struct adf_accel_dev *accel_dev = device_get_softc(dev); if (adf_dev_stop(accel_dev)) { device_printf(dev, "Failed to stop QAT accel dev\n"); return EBUSY; } adf_dev_shutdown(accel_dev); adf_cleanup_accel(accel_dev); return 0; } static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe), DEVMETHOD(device_attach, adf_attach), DEVMETHOD(device_detach, adf_detach), DEVMETHOD_END }; static driver_t adf_driver = { "qat", adf_methods, sizeof(struct adf_accel_dev) }; DRIVER_MODULE_ORDERED(qat_c3xxx, pci, adf_driver, NULL, NULL, SI_ORDER_THIRD); MODULE_VERSION(qat_c3xxx, 1); MODULE_DEPEND(qat_c3xxx, qat_common, 1, 1, 1); MODULE_DEPEND(qat_c3xxx, qat_api, 1, 1, 1); MODULE_DEPEND(qat_c3xxx, linuxkpi, 1, 1, 1); diff --git a/sys/dev/qat/qat_hw/qat_c4xxx/adf_drv.c b/sys/dev/qat/qat_hw/qat_c4xxx/adf_drv.c index 0e206e960a6c..697f5b1ce786 100644 --- a/sys/dev/qat/qat_hw/qat_c4xxx/adf_drv.c +++ b/sys/dev/qat/qat_hw/qat_c4xxx/adf_drv.c @@ -1,268 +1,270 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "adf_c4xxx_hw_data.h" #include "adf_fw_counters.h" #include "adf_cfg_device.h" +#include "adf_dbgfs.h" #include #include #include #include #include -#include "adf_heartbeat_dbg.h" -#include "adf_cnvnr_freq_counters.h" static MALLOC_DEFINE(M_QAT_C4XXX, "qat_c4xx", "qat_c4xx"); #define ADF_SYSTEM_DEVICE(device_id) \ { \ PCI_VENDOR_ID_INTEL, device_id \ } static const struct pci_device_id adf_pci_tbl[] = { ADF_SYSTEM_DEVICE(ADF_C4XXX_PCI_DEVICE_ID), { 0, } }; static int adf_probe(device_t dev) { const struct pci_device_id *id; for (id = adf_pci_tbl; id->vendor != 0; id++) { if (pci_get_vendor(dev) == id->vendor && pci_get_device(dev) == id->device) { device_set_desc(dev, "Intel " ADF_C4XXX_DEVICE_NAME " QuickAssist"); return BUS_PROBE_GENERIC; } } return ENXIO; } static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; int i; if (accel_dev->dma_tag) bus_dma_tag_destroy(accel_dev->dma_tag); for (i = 0; i < ADF_PCI_MAX_BARS; i++) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; if (bar->virt_addr) bus_free_resource(accel_pci_dev->pci_dev, SYS_RES_MEMORY, bar->virt_addr); } if (accel_dev->hw_device) { switch (pci_get_device(accel_pci_dev->pci_dev)) { case ADF_C4XXX_PCI_DEVICE_ID: adf_clean_hw_data_c4xxx(accel_dev->hw_device); break; default: break; } free(accel_dev->hw_device, M_QAT_C4XXX); accel_dev->hw_device = NULL; } + adf_dbgfs_exit(accel_dev); adf_cfg_dev_remove(accel_dev); adf_devmgr_rm_dev(accel_dev, NULL); } static int adf_attach(device_t dev) { struct adf_accel_dev *accel_dev; struct adf_accel_pci *accel_pci_dev; struct adf_hw_device_data *hw_data; unsigned int i, bar_nr; int ret, rid; struct adf_cfg_device *cfg_dev = NULL; /* Set pci MaxPayLoad to 256. Implemented to avoid the issue of * Pci-passthrough causing Maxpayload to be reset to 128 bytes * when the device is reset. */ if (pci_get_max_payload(dev) != 256) pci_set_max_payload(dev, 256); accel_dev = device_get_softc(dev); INIT_LIST_HEAD(&accel_dev->crypto_list); accel_pci_dev = &accel_dev->accel_pci_dev; accel_pci_dev->pci_dev = dev; if (bus_get_domain(dev, &accel_pci_dev->node) != 0) accel_pci_dev->node = 0; /* XXX: Revisit if we actually need a devmgr table at all. */ /* Add accel device to accel table. * This should be called before adf_cleanup_accel is called */ if (adf_devmgr_add_dev(accel_dev, NULL)) { device_printf(dev, "Failed to add new accelerator device.\n"); return ENXIO; } /* Allocate and configure device configuration structure */ hw_data = malloc(sizeof(*hw_data), M_QAT_C4XXX, M_WAITOK | M_ZERO); accel_dev->hw_device = hw_data; adf_init_hw_data_c4xxx(accel_dev->hw_device); accel_pci_dev->revid = pci_get_revid(dev); hw_data->fuses = pci_read_config(dev, ADF_DEVICE_FUSECTL_OFFSET, 4); /* Get PPAERUCM values and store */ ret = adf_aer_store_ppaerucm_reg(dev, hw_data); if (ret) goto out_err; /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(accel_dev); hw_data->ae_mask = hw_data->get_ae_mask(accel_dev); hw_data->admin_ae_mask = hw_data->ae_mask; /* If the device has no acceleration engines then ignore it. */ if (!hw_data->accel_mask || !hw_data->ae_mask || (~hw_data->ae_mask & 0x01)) { device_printf(dev, "No acceleration units found\n"); ret = ENXIO; goto out_err; } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); if (ret) goto out_err; ret = adf_clock_debugfs_add(accel_dev); if (ret) goto out_err; pci_set_max_read_req(dev, 1024); ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, /*BUS_SPACE_UNRESTRICTED*/ 1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &accel_dev->dma_tag); if (ret) goto out_err; if (hw_data->get_accel_cap) { hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); } accel_pci_dev->sku = hw_data->get_sku(hw_data); /* Find and map all the device's BARS */ i = 0; for (bar_nr = 0; i < ADF_PCI_MAX_BARS && bar_nr < PCIR_MAX_BAR_0; bar_nr++) { struct adf_bar *bar; /* * XXX: This isn't quite right as it will ignore a BAR * that wasn't assigned a valid resource range by the * firmware. */ rid = PCIR_BAR(bar_nr); if (bus_get_resource(dev, SYS_RES_MEMORY, rid, NULL, NULL) != 0) continue; bar = &accel_pci_dev->pci_bars[i++]; bar->virt_addr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!bar->virt_addr) { device_printf(dev, "Failed to map BAR %d\n", bar_nr); ret = ENXIO; goto out_err; } bar->base_addr = rman_get_start(bar->virt_addr); bar->size = rman_get_start(bar->virt_addr); } pci_enable_busmaster(dev); + adf_dbgfs_init(accel_dev); + if (!accel_dev->hw_device->config_device) { ret = EFAULT; goto out_err; } ret = accel_dev->hw_device->config_device(accel_dev); if (ret) goto out_err; ret = adf_dev_init(accel_dev); if (ret) goto out_dev_shutdown; ret = adf_dev_start(accel_dev); if (ret) goto out_dev_stop; cfg_dev = accel_dev->cfg->dev; adf_cfg_device_clear(cfg_dev, accel_dev); free(cfg_dev, M_QAT); accel_dev->cfg->dev = NULL; return ret; out_dev_stop: adf_dev_stop(accel_dev); out_dev_shutdown: adf_dev_shutdown(accel_dev); out_err: adf_cleanup_accel(accel_dev); return ret; } static int adf_detach(device_t dev) { struct adf_accel_dev *accel_dev = device_get_softc(dev); if (adf_dev_stop(accel_dev)) { device_printf(dev, "Failed to stop QAT accel dev\n"); return EBUSY; } adf_dev_shutdown(accel_dev); adf_cleanup_accel(accel_dev); return 0; } static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe), DEVMETHOD(device_attach, adf_attach), DEVMETHOD(device_detach, adf_detach), DEVMETHOD_END }; static driver_t adf_driver = { "qat", adf_methods, sizeof(struct adf_accel_dev) }; DRIVER_MODULE_ORDERED(qat_c4xxx, pci, adf_driver, NULL, NULL, SI_ORDER_THIRD); MODULE_VERSION(qat_c4xxx, 1); MODULE_DEPEND(qat_c4xxx, qat_common, 1, 1, 1); MODULE_DEPEND(qat_c4xxx, qat_api, 1, 1, 1); MODULE_DEPEND(qat_c4xxx, linuxkpi, 1, 1, 1); diff --git a/sys/dev/qat/qat_hw/qat_c62x/adf_drv.c b/sys/dev/qat/qat_hw/qat_c62x/adf_drv.c index 4035a8385bd5..1d48b85b2e12 100644 --- a/sys/dev/qat/qat_hw/qat_c62x/adf_drv.c +++ b/sys/dev/qat/qat_hw/qat_c62x/adf_drv.c @@ -1,270 +1,272 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "adf_c62x_hw_data.h" #include "adf_fw_counters.h" #include "adf_cfg_device.h" +#include "adf_dbgfs.h" #include #include #include #include #include -#include "adf_heartbeat_dbg.h" -#include "adf_cnvnr_freq_counters.h" static MALLOC_DEFINE(M_QAT_C62X, "qat_c62x", "qat_c62x"); #define ADF_SYSTEM_DEVICE(device_id) \ { \ PCI_VENDOR_ID_INTEL, device_id \ } static const struct pci_device_id adf_pci_tbl[] = { ADF_SYSTEM_DEVICE( ADF_C62X_PCI_DEVICE_ID), { 0, } }; static int adf_probe(device_t dev) { const struct pci_device_id *id; for (id = adf_pci_tbl; id->vendor != 0; id++) { if (pci_get_vendor(dev) == id->vendor && pci_get_device(dev) == id->device) { device_set_desc(dev, "Intel " ADF_C62X_DEVICE_NAME " QuickAssist"); return BUS_PROBE_GENERIC; } } return ENXIO; } static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; int i; if (accel_dev->dma_tag) bus_dma_tag_destroy(accel_dev->dma_tag); for (i = 0; i < ADF_PCI_MAX_BARS; i++) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; if (bar->virt_addr) bus_free_resource(accel_pci_dev->pci_dev, SYS_RES_MEMORY, bar->virt_addr); } if (accel_dev->hw_device) { switch (pci_get_device(accel_pci_dev->pci_dev)) { case ADF_C62X_PCI_DEVICE_ID: adf_clean_hw_data_c62x(accel_dev->hw_device); break; default: break; } free(accel_dev->hw_device, M_QAT_C62X); accel_dev->hw_device = NULL; } + adf_dbgfs_exit(accel_dev); adf_cfg_dev_remove(accel_dev); adf_devmgr_rm_dev(accel_dev, NULL); } static int adf_attach(device_t dev) { struct adf_accel_dev *accel_dev; struct adf_accel_pci *accel_pci_dev; struct adf_hw_device_data *hw_data; unsigned int i, bar_nr; int ret, rid; struct adf_cfg_device *cfg_dev = NULL; /* Set pci MaxPayLoad to 256. Implemented to avoid the issue of * Pci-passthrough causing Maxpayload to be reset to 128 bytes * when the device is reset. */ if (pci_get_max_payload(dev) != 256) pci_set_max_payload(dev, 256); accel_dev = device_get_softc(dev); INIT_LIST_HEAD(&accel_dev->crypto_list); accel_pci_dev = &accel_dev->accel_pci_dev; accel_pci_dev->pci_dev = dev; if (bus_get_domain(dev, &accel_pci_dev->node) != 0) accel_pci_dev->node = 0; /* XXX: Revisit if we actually need a devmgr table at all. */ /* Add accel device to accel table. * This should be called before adf_cleanup_accel is called */ if (adf_devmgr_add_dev(accel_dev, NULL)) { device_printf(dev, "Failed to add new accelerator device.\n"); return ENXIO; } /* Allocate and configure device configuration structure */ hw_data = malloc(sizeof(*hw_data), M_QAT_C62X, M_WAITOK | M_ZERO); accel_dev->hw_device = hw_data; adf_init_hw_data_c62x(accel_dev->hw_device); accel_pci_dev->revid = pci_get_revid(dev); hw_data->fuses = pci_read_config(dev, ADF_DEVICE_FUSECTL_OFFSET, 4); if (accel_pci_dev->revid == 0x00) { device_printf(dev, "A0 stepping is not supported.\n"); ret = ENODEV; goto out_err; } /* Get PPAERUCM values and store */ ret = adf_aer_store_ppaerucm_reg(dev, hw_data); if (ret) goto out_err; /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(accel_dev); hw_data->ae_mask = hw_data->get_ae_mask(accel_dev); hw_data->admin_ae_mask = hw_data->ae_mask; accel_pci_dev->sku = hw_data->get_sku(hw_data); /* If the device has no acceleration engines then ignore it. */ if (!hw_data->accel_mask || !hw_data->ae_mask || ((~hw_data->ae_mask) & 0x01)) { device_printf(dev, "No acceleration units found\n"); ret = ENXIO; goto out_err; } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); if (ret) goto out_err; ret = adf_clock_debugfs_add(accel_dev); if (ret) goto out_err; pci_set_max_read_req(dev, 1024); ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, /* BUS_SPACE_UNRESTRICTED */ 1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &accel_dev->dma_tag); if (ret) goto out_err; if (hw_data->get_accel_cap) { hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); } /* Find and map all the device's BARS */ i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0; for (bar_nr = 0; i < ADF_PCI_MAX_BARS && bar_nr < PCIR_MAX_BAR_0; bar_nr++) { struct adf_bar *bar; /* * XXX: This isn't quite right as it will ignore a BAR * that wasn't assigned a valid resource range by the * firmware. */ rid = PCIR_BAR(bar_nr); if (bus_get_resource(dev, SYS_RES_MEMORY, rid, NULL, NULL) != 0) continue; bar = &accel_pci_dev->pci_bars[i++]; bar->virt_addr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (bar->virt_addr == NULL) { device_printf(dev, "Failed to map BAR %d\n", bar_nr); ret = ENXIO; goto out_err; } bar->base_addr = rman_get_start(bar->virt_addr); bar->size = rman_get_size(bar->virt_addr); } pci_enable_busmaster(dev); + adf_dbgfs_init(accel_dev); + if (!accel_dev->hw_device->config_device) { ret = EFAULT; goto out_err; } ret = accel_dev->hw_device->config_device(accel_dev); if (ret) goto out_err; ret = adf_dev_init(accel_dev); if (ret) goto out_dev_shutdown; ret = adf_dev_start(accel_dev); if (ret) goto out_dev_stop; cfg_dev = accel_dev->cfg->dev; adf_cfg_device_clear(cfg_dev, accel_dev); free(cfg_dev, M_QAT); accel_dev->cfg->dev = NULL; return ret; out_dev_stop: adf_dev_stop(accel_dev); out_dev_shutdown: adf_dev_shutdown(accel_dev); out_err: adf_cleanup_accel(accel_dev); return ret; } static int adf_detach(device_t dev) { struct adf_accel_dev *accel_dev = device_get_softc(dev); if (adf_dev_stop(accel_dev)) { device_printf(dev, "Failed to stop QAT accel dev\n"); return EBUSY; } adf_dev_shutdown(accel_dev); adf_cleanup_accel(accel_dev); return 0; } static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe), DEVMETHOD(device_attach, adf_attach), DEVMETHOD(device_detach, adf_detach), DEVMETHOD_END }; static driver_t adf_driver = { "qat", adf_methods, sizeof(struct adf_accel_dev) }; DRIVER_MODULE_ORDERED(qat_c62x, pci, adf_driver, NULL, NULL, SI_ORDER_THIRD); MODULE_VERSION(qat_c62x, 1); MODULE_DEPEND(qat_c62x, qat_common, 1, 1, 1); MODULE_DEPEND(qat_c62x, qat_api, 1, 1, 1); MODULE_DEPEND(qat_c62x, linuxkpi, 1, 1, 1); diff --git a/sys/dev/qat/qat_hw/qat_dh895xcc/adf_drv.c b/sys/dev/qat/qat_hw/qat_dh895xcc/adf_drv.c index f7a31da23ba9..d8f2ba0a99b7 100644 --- a/sys/dev/qat/qat_hw/qat_dh895xcc/adf_drv.c +++ b/sys/dev/qat/qat_hw/qat_dh895xcc/adf_drv.c @@ -1,262 +1,264 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "adf_dh895xcc_hw_data.h" #include "adf_fw_counters.h" #include "adf_cfg_device.h" +#include "adf_dbgfs.h" #include #include #include #include #include -#include "adf_heartbeat_dbg.h" -#include "adf_cnvnr_freq_counters.h" static MALLOC_DEFINE(M_QAT_DH895XCC, "qat_dh895xcc", "qat_dh895xcc"); #define ADF_SYSTEM_DEVICE(device_id) \ { \ PCI_VENDOR_ID_INTEL, device_id \ } static const struct pci_device_id adf_pci_tbl[] = { ADF_SYSTEM_DEVICE(ADF_DH895XCC_PCI_DEVICE_ID), { 0, } }; static int adf_probe(device_t dev) { const struct pci_device_id *id; for (id = adf_pci_tbl; id->vendor != 0; id++) { if (pci_get_vendor(dev) == id->vendor && pci_get_device(dev) == id->device) { device_set_desc(dev, "Intel " ADF_DH895XCC_DEVICE_NAME " QuickAssist"); return BUS_PROBE_DEFAULT; } } return ENXIO; } static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; int i; if (accel_dev->dma_tag) bus_dma_tag_destroy(accel_dev->dma_tag); for (i = 0; i < ADF_PCI_MAX_BARS; i++) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; if (bar->virt_addr) bus_free_resource(accel_pci_dev->pci_dev, SYS_RES_MEMORY, bar->virt_addr); } if (accel_dev->hw_device) { switch (pci_get_device(accel_pci_dev->pci_dev)) { case ADF_DH895XCC_PCI_DEVICE_ID: adf_clean_hw_data_dh895xcc(accel_dev->hw_device); break; default: break; } free(accel_dev->hw_device, M_QAT_DH895XCC); accel_dev->hw_device = NULL; } + adf_dbgfs_exit(accel_dev); adf_cfg_dev_remove(accel_dev); adf_devmgr_rm_dev(accel_dev, NULL); } static int adf_attach(device_t dev) { struct adf_accel_dev *accel_dev; struct adf_accel_pci *accel_pci_dev; struct adf_hw_device_data *hw_data; unsigned int i, bar_nr; int ret, rid; struct adf_cfg_device *cfg_dev = NULL; /* Set pci MaxPayLoad to 256. Implemented to avoid the issue of * Pci-passthrough causing Maxpayload to be reset to 128 bytes * when the device is reset. */ if (pci_get_max_payload(dev) != 256) pci_set_max_payload(dev, 256); accel_dev = device_get_softc(dev); INIT_LIST_HEAD(&accel_dev->crypto_list); accel_pci_dev = &accel_dev->accel_pci_dev; accel_pci_dev->pci_dev = dev; if (bus_get_domain(dev, &accel_pci_dev->node) != 0) accel_pci_dev->node = 0; /* Add accel device to accel table. * This should be called before adf_cleanup_accel is called */ if (adf_devmgr_add_dev(accel_dev, NULL)) { device_printf(dev, "Failed to add new accelerator device.\n"); return ENXIO; } /* Allocate and configure device configuration structure */ hw_data = malloc(sizeof(*hw_data), M_QAT_DH895XCC, M_WAITOK | M_ZERO); accel_dev->hw_device = hw_data; adf_init_hw_data_dh895xcc(accel_dev->hw_device); accel_pci_dev->revid = pci_get_revid(dev); hw_data->fuses = pci_read_config(dev, ADF_DEVICE_FUSECTL_OFFSET, 4); /* Get PPAERUCM values and store */ ret = adf_aer_store_ppaerucm_reg(dev, hw_data); if (ret) goto out_err; /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(accel_dev); hw_data->ae_mask = hw_data->get_ae_mask(accel_dev); hw_data->admin_ae_mask = hw_data->ae_mask; accel_pci_dev->sku = hw_data->get_sku(hw_data); /* If the device has no acceleration engines then ignore it. */ if (!hw_data->accel_mask || !hw_data->ae_mask || ((~hw_data->ae_mask) & 0x01)) { device_printf(dev, "No acceleration units found\n"); ret = ENXIO; goto out_err; } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); if (ret) goto out_err; pci_set_max_read_req(dev, 1024); ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, /* BUS_SPACE_UNRESTRICTED */ 1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &accel_dev->dma_tag); if (ret) goto out_err; if (hw_data->get_accel_cap) { hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); } /* Find and map all the device's BARS */ i = 0; for (bar_nr = 0; i < ADF_PCI_MAX_BARS && bar_nr < PCIR_MAX_BAR_0; bar_nr++) { struct adf_bar *bar; /* * This will ignore a BAR * that wasn't assigned a valid resource range by the * firmware. */ rid = PCIR_BAR(bar_nr); if (bus_get_resource(dev, SYS_RES_MEMORY, rid, NULL, NULL) != 0) continue; bar = &accel_pci_dev->pci_bars[i++]; bar->virt_addr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (bar->virt_addr == NULL) { device_printf(dev, "Failed to map BAR %d\n", bar_nr); ret = ENXIO; goto out_err; } bar->base_addr = rman_get_start(bar->virt_addr); bar->size = rman_get_size(bar->virt_addr); } pci_enable_busmaster(dev); + adf_dbgfs_init(accel_dev); + if (!accel_dev->hw_device->config_device) { ret = EFAULT; goto out_err; } ret = accel_dev->hw_device->config_device(accel_dev); if (ret) goto out_err; ret = adf_dev_init(accel_dev); if (ret) goto out_dev_shutdown; ret = adf_dev_start(accel_dev); if (ret) goto out_dev_stop; cfg_dev = accel_dev->cfg->dev; adf_cfg_device_clear(cfg_dev, accel_dev); free(cfg_dev, M_QAT); accel_dev->cfg->dev = NULL; return ret; out_dev_stop: adf_dev_stop(accel_dev); out_dev_shutdown: adf_dev_shutdown(accel_dev); out_err: adf_cleanup_accel(accel_dev); return ret; } static int adf_detach(device_t dev) { struct adf_accel_dev *accel_dev = device_get_softc(dev); if (adf_dev_stop(accel_dev)) { device_printf(dev, "Failed to stop QAT accel dev\n"); return EBUSY; } adf_dev_shutdown(accel_dev); adf_cleanup_accel(accel_dev); return 0; } static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe), DEVMETHOD(device_attach, adf_attach), DEVMETHOD(device_detach, adf_detach), DEVMETHOD_END }; static driver_t adf_driver = { "qat", adf_methods, sizeof(struct adf_accel_dev) }; DRIVER_MODULE_ORDERED(qat_dh895xcc, pci, adf_driver, NULL, NULL, SI_ORDER_THIRD); MODULE_VERSION(qat_dh895xcc, 1); MODULE_DEPEND(qat_dh895xcc, qat_common, 1, 1, 1); MODULE_DEPEND(qat_dh895xcc, qat_api, 1, 1, 1); MODULE_DEPEND(qat_dh895xcc, linuxkpi, 1, 1, 1); diff --git a/sys/modules/qat/qat_common/Makefile b/sys/modules/qat/qat_common/Makefile index c2131cc1e24b..af8ce86d6be2 100644 --- a/sys/modules/qat/qat_common/Makefile +++ b/sys/modules/qat/qat_common/Makefile @@ -1,43 +1,44 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2007-2022 Intel Corporation .PATH: ${SRCTOP}/sys/dev/qat/qat_common KMOD= qat_common SRCS+= adf_accel_engine.c adf_freebsd_admin.c adf_aer.c adf_cfg.c qat_common_module.c SRCS+= adf_heartbeat.c adf_freebsd_heartbeat_dbg.c SRCS+= adf_freebsd_dev_processes.c adf_freebsd_uio.c adf_freebsd_uio_cleanup.c SRCS+= adf_ctl_drv.c adf_dev_mgr.c adf_hw_arbiter.c SRCS+= adf_init.c adf_transport.c adf_isr.c adf_fw_counters.c adf_dev_err.c SRCS+= adf_gen2_hw_data.c SRCS+= adf_gen4_hw_data.c SRCS+= qat_freebsd.c SRCS+= adf_freebsd_cfg_dev_dbg.c adf_freebsd_ver_dbg.c SRCS+= adf_cfg_device.c adf_cfg_section.c adf_cfg_instance.c adf_cfg_bundle.c adf_cfg_sysctl.c SRCS+= qat_hal.c qat_uclo.c SRCS+= adf_vf_isr.c SRCS+= adf_gen4_pfvf.c SRCS+= adf_gen4_timer.c SRCS+= adf_pfvf_utils.c adf_pfvf_vf_msg.c adf_pfvf_vf_proto.c SRCS+= adf_gen4vf_hw_csr_data.c SRCS+= adf_freebsd_transport_debug.c adf_clock.c SRCS+= adf_freebsd_cnvnr_ctrs_dbg.c +SRCS+= adf_freebsd_dbgfs.c SRCS+= adf_freebsd_pfvf_ctrs_dbg.c SRCS+= bus_if.h device_if.h pci_if.h vnode_if.h opt_qat.h CFLAGS+= -I${SRCTOP}/sys/dev/qat/include CFLAGS+= -I${SRCTOP}/sys/dev/qat/include/common CFLAGS+= ${LINUXKPI_INCLUDES} .if !defined(KERNBUILDDIR) CFLAGS+= -include opt_qat.h MKDEP= -include opt_qat.h opt_qat.h: :> ${.TARGET} .if defined(QAT_DISABLE_SAFE_DC_MODE) && ${QAT_DISABLE_SAFE_DC_MODE} == "1" @echo "#define QAT_DISABLE_SAFE_DC_MODE 1" >> ${.TARGET} .endif .endif .include