diff --git a/sys/dev/qat/qat_common/adf_cfg_sysctl.c b/sys/dev/qat/qat_common/adf_cfg_sysctl.c index 621c3cc5b6c6..1a836765c94a 100644 --- a/sys/dev/qat/qat_common/adf_cfg_sysctl.c +++ b/sys/dev/qat/qat_common/adf_cfg_sysctl.c @@ -1,342 +1,355 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include #include #include #include "adf_accel_devices.h" #include "adf_cfg.h" #include "adf_cfg_sysctl.h" #include "adf_cfg_device.h" #include "adf_common_drv.h" #include #include +#include #define ADF_CFG_SYSCTL_BUF_SZ ADF_CFG_MAX_VAL #define ADF_CFG_UP_STR "up" #define ADF_CFG_DOWN_STR "down" #define ADF_CFG_MAX_USER_PROCESSES 64 static int adf_cfg_down(struct adf_accel_dev *accel_dev) { int ret = 0; if (!adf_dev_started(accel_dev)) { device_printf(GET_DEV(accel_dev), "Device qat_dev%d already down\n", accel_dev->accel_id); return 0; } if (adf_dev_in_use(accel_dev)) { pr_err("QAT: Device %d in use\n", accel_dev->accel_id); goto out; } if (adf_dev_stop(accel_dev)) { device_printf(GET_DEV(accel_dev), "Failed to stop qat_dev%d\n", accel_dev->accel_id); ret = EFAULT; goto out; } adf_dev_shutdown(accel_dev); out: return ret; } static int adf_cfg_up(struct adf_accel_dev *accel_dev) { int ret; if (adf_dev_started(accel_dev)) return 0; if (NULL == accel_dev->hw_device->config_device) return ENXIO; ret = accel_dev->hw_device->config_device(accel_dev); if (ret) { device_printf(GET_DEV(accel_dev), "Failed to start qat_dev%d\n", accel_dev->accel_id); return ret; } ret = adf_dev_init(accel_dev); if (!ret) ret = adf_dev_start(accel_dev); if (ret) { device_printf(GET_DEV(accel_dev), "Failed to start qat_dev%d\n", accel_dev->accel_id); adf_dev_stop(accel_dev); adf_dev_shutdown(accel_dev); } if (!ret) { struct adf_cfg_device *cfg_dev = NULL; cfg_dev = accel_dev->cfg->dev; adf_cfg_device_clear(cfg_dev, accel_dev); free(cfg_dev, M_QAT); accel_dev->cfg->dev = NULL; } return 0; } static const char *const cfg_serv[] = { "sym;asym", "sym", "asym", "dc", "sym;dc", "asym;dc", "cy", "cy;dc" }; static const char *const cfg_mode[] = { "ks;us", "us", "ks" }; static int adf_cfg_sysctl_services_handle(SYSCTL_HANDLER_ARGS) { struct adf_cfg_device_data *dev_cfg_data; struct adf_accel_dev *accel_dev; char buf[ADF_CFG_SYSCTL_BUF_SZ]; unsigned int len; int ret = 0; int i = 0; + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + accel_dev = arg1; if (!accel_dev) return ENXIO; dev_cfg_data = accel_dev->cfg; if (!dev_cfg_data) return ENXIO; strlcpy(buf, dev_cfg_data->cfg_services, sizeof(buf)); ret = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (ret != 0 || req->newptr == NULL) return ret; /* Handle config change */ if (adf_dev_started(accel_dev)) { device_printf( GET_DEV(accel_dev), "QAT: configuration could be changed in down state only\n"); return EINVAL; } len = strlen(buf); for (i = 0; i < ARRAY_SIZE(cfg_serv); i++) { if ((len > 0 && strncasecmp(cfg_serv[i], buf, len) == 0)) { strlcpy(dev_cfg_data->cfg_services, buf, ADF_CFG_MAX_VAL); break; } } if (i == ARRAY_SIZE(cfg_serv)) { device_printf(GET_DEV(accel_dev), "Unknown service configuration\n"); ret = EINVAL; } return ret; } static int adf_cfg_sysctl_mode_handle(SYSCTL_HANDLER_ARGS) { struct adf_cfg_device_data *dev_cfg_data; struct adf_accel_dev *accel_dev; char buf[ADF_CFG_SYSCTL_BUF_SZ]; unsigned int len; int ret = 0; int i = 0; + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + accel_dev = arg1; if (!accel_dev) return ENXIO; dev_cfg_data = accel_dev->cfg; if (!dev_cfg_data) return ENXIO; strlcpy(buf, dev_cfg_data->cfg_mode, sizeof(buf)); ret = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (ret != 0 || req->newptr == NULL) return ret; /* Handle config change */ if (adf_dev_started(accel_dev)) { device_printf( GET_DEV(accel_dev), "QAT: configuration could be changed in down state only\n"); return EBUSY; } len = strlen(buf); for (i = 0; i < ARRAY_SIZE(cfg_mode); i++) { if ((len > 0 && strncasecmp(cfg_mode[i], buf, len) == 0)) { strlcpy(dev_cfg_data->cfg_mode, buf, ADF_CFG_MAX_VAL); break; } } if (i == ARRAY_SIZE(cfg_mode)) { device_printf(GET_DEV(accel_dev), "Unknown configuration mode\n"); ret = EINVAL; } return ret; } static int adf_cfg_sysctl_handle(SYSCTL_HANDLER_ARGS) { struct adf_cfg_device_data *dev_cfg_data; struct adf_accel_dev *accel_dev; char buf[ADF_CFG_SYSCTL_BUF_SZ] = { 0 }; unsigned int len; int ret = 0; + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + accel_dev = arg1; if (!accel_dev) return ENXIO; dev_cfg_data = accel_dev->cfg; if (!dev_cfg_data) return ENXIO; if (adf_dev_started(accel_dev)) { strlcpy(buf, ADF_CFG_UP_STR, sizeof(buf)); } else { strlcpy(buf, ADF_CFG_DOWN_STR, sizeof(buf)); } ret = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (ret != 0 || req->newptr == NULL) return ret; len = strlen(buf); if ((len > 0 && strncasecmp(ADF_CFG_UP_STR, buf, len) == 0)) { ret = adf_cfg_up(accel_dev); } else if (len > 0 && strncasecmp(ADF_CFG_DOWN_STR, buf, len) == 0) { ret = adf_cfg_down(accel_dev); } else { device_printf(GET_DEV(accel_dev), "QAT: Invalid operation\n"); ret = EINVAL; } return ret; } static int adf_cfg_sysctl_num_processes_handle(SYSCTL_HANDLER_ARGS) { struct adf_cfg_device_data *dev_cfg_data; struct adf_accel_dev *accel_dev; uint32_t num_user_processes = 0; int ret = 0; + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + accel_dev = arg1; if (!accel_dev) return ENXIO; dev_cfg_data = accel_dev->cfg; if (!dev_cfg_data) return ENXIO; num_user_processes = dev_cfg_data->num_user_processes; ret = sysctl_handle_int(oidp, &num_user_processes, 0, req); if (ret != 0 || req->newptr == NULL) return ret; if (adf_dev_started(accel_dev)) { device_printf( GET_DEV(accel_dev), "QAT: configuration could be changed in down state only\n"); return EBUSY; } if (num_user_processes > ADF_CFG_MAX_USER_PROCESSES) { return EINVAL; } dev_cfg_data->num_user_processes = num_user_processes; return ret; } int adf_cfg_sysctl_add(struct adf_accel_dev *accel_dev) { struct sysctl_ctx_list *qat_sysctl_ctx; struct sysctl_oid *qat_sysctl_tree; if (!accel_dev) return EINVAL; qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); qat_sysctl_tree = device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev); SYSCTL_ADD_PROC(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_sysctl_tree), OID_AUTO, "state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, accel_dev, 0, adf_cfg_sysctl_handle, "A", "QAT State"); SYSCTL_ADD_PROC(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_sysctl_tree), OID_AUTO, "cfg_services", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, accel_dev, 0, adf_cfg_sysctl_services_handle, "A", "QAT services confguration"); SYSCTL_ADD_PROC(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_sysctl_tree), OID_AUTO, "cfg_mode", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, accel_dev, 0, adf_cfg_sysctl_mode_handle, "A", "QAT mode configuration"); SYSCTL_ADD_PROC(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_sysctl_tree), OID_AUTO, "num_user_processes", CTLTYPE_U32 | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, accel_dev, 0, adf_cfg_sysctl_num_processes_handle, "I", "QAT user processes number "); return 0; } void adf_cfg_sysctl_remove(struct adf_accel_dev *accel_dev) { } diff --git a/sys/dev/qat/qat_common/adf_clock.c b/sys/dev/qat/qat_common/adf_clock.c index 36204c9939ac..f5d4116505b6 100644 --- a/sys/dev/qat/qat_common/adf_clock.c +++ b/sys/dev/qat/qat_common/adf_clock.c @@ -1,186 +1,208 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "adf_accel_devices.h" #include "adf_common_drv.h" #include +#include #define MEASURE_CLOCK_RETRIES 10 #define MEASURE_CLOCK_DELTA_THRESHOLD 100 #define MEASURE_CLOCK_DELAY 10000 #define ME_CLK_DIVIDER 16 #define CLK_DBGFS_FILE "frequency" #define HB_SYSCTL_ERR(RC) \ do { \ if (!RC) { \ device_printf(GET_DEV(accel_dev), \ "Memory allocation failed in \ adf_heartbeat_dbg_add\n"); \ return ENOMEM; \ } \ } while (0) +static int adf_clock_read_frequency(SYSCTL_HANDLER_ARGS) +{ + struct adf_accel_dev *accel_dev = arg1; + struct adf_hw_device_data *hw_data; + int error = EFAULT; + + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + + if (accel_dev == NULL) + return EINVAL; + + hw_data = accel_dev->hw_device; + + error = sysctl_handle_int(oidp, &hw_data->clock_frequency, 0, req); + if (error || !req->newptr) + return error; + + return (0); +} + int adf_clock_debugfs_add(struct adf_accel_dev *accel_dev) { - struct adf_hw_device_data *hw_data = accel_dev->hw_device; - struct sysctl_ctx_list *qat_sysctl_ctx; struct sysctl_oid *qat_sysctl_tree; struct sysctl_oid *rc = 0; qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); qat_sysctl_tree = device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev); - rc = SYSCTL_ADD_UINT(qat_sysctl_ctx, + rc = SYSCTL_ADD_PROC(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_sysctl_tree), OID_AUTO, CLK_DBGFS_FILE, - CTLFLAG_RD, - &hw_data->clock_frequency, + CTLTYPE_INT | CTLFLAG_RD, + accel_dev, 0, + adf_clock_read_frequency, + "IU", "clock frequency"); HB_SYSCTL_ERR(rc); return 0; } /** * adf_dev_measure_clock() -- Measure the CPM clock frequency * @accel_dev: Pointer to acceleration device. * @frequency: Pointer to returned frequency in Hz. * * Return: 0 on success, error code otherwise. */ static int measure_clock(struct adf_accel_dev *accel_dev, u32 *frequency) { struct timespec ts1; struct timespec ts2; struct timespec ts3; struct timespec ts4; struct timespec delta; u64 delta_us = 0; u64 timestamp1 = 0; u64 timestamp2 = 0; u64 temp = 0; int tries = 0; if (!accel_dev || !frequency) return EIO; do { nanotime(&ts1); if (adf_get_fw_timestamp(accel_dev, ×tamp1)) { device_printf(GET_DEV(accel_dev), "Failed to get fw timestamp\n"); return EIO; } nanotime(&ts2); delta = timespec_sub(ts2, ts1); temp = delta.tv_nsec; do_div(temp, NSEC_PER_USEC); delta_us = delta.tv_sec * USEC_PER_SEC + temp; } while (delta_us > MEASURE_CLOCK_DELTA_THRESHOLD && ++tries < MEASURE_CLOCK_RETRIES); if (tries >= MEASURE_CLOCK_RETRIES) { device_printf(GET_DEV(accel_dev), "Excessive clock measure delay\n"); return EIO; } usleep_range(MEASURE_CLOCK_DELAY, MEASURE_CLOCK_DELAY * 2); tries = 0; do { nanotime(&ts3); if (adf_get_fw_timestamp(accel_dev, ×tamp2)) { device_printf(GET_DEV(accel_dev), "Failed to get fw timestamp\n"); return EIO; } nanotime(&ts4); delta = timespec_sub(ts4, ts3); temp = delta.tv_nsec; do_div(temp, NSEC_PER_USEC); delta_us = delta.tv_sec * USEC_PER_SEC + temp; } while (delta_us > MEASURE_CLOCK_DELTA_THRESHOLD && ++tries < MEASURE_CLOCK_RETRIES); if (tries >= MEASURE_CLOCK_RETRIES) { device_printf(GET_DEV(accel_dev), "Excessive clock measure delay\n"); return EIO; } delta = timespec_sub(ts3, ts1); temp = delta.tv_sec * NSEC_PER_SEC + delta.tv_nsec + (NSEC_PER_USEC / 2); do_div(temp, NSEC_PER_USEC); delta_us = temp; /* Don't pretend that this gives better than 100KHz resolution */ temp = (timestamp2 - timestamp1) * ME_CLK_DIVIDER * 10 + (delta_us / 2); do_div(temp, delta_us); *frequency = temp * 100000; return 0; } /** * adf_dev_measure_clock() -- Measure the CPM clock frequency * @accel_dev: Pointer to acceleration device. * @frequency: Pointer to returned frequency in Hz. * @min: Minimum expected frequency * @max: Maximum expected frequency * * Return: 0 on success, error code otherwise. */ int adf_dev_measure_clock(struct adf_accel_dev *accel_dev, u32 *frequency, u32 min, u32 max) { int ret; u32 freq; ret = measure_clock(accel_dev, &freq); if (ret) return ret; if (freq < min) { device_printf(GET_DEV(accel_dev), "Slow clock %d MHz measured, assuming %d\n", freq, min); freq = min; } else if (freq > max) { device_printf(GET_DEV(accel_dev), "Fast clock %d MHz measured, assuming %d\n", freq, max); freq = max; } *frequency = freq; return 0; } static inline u64 timespec_to_ms(const struct timespec *ts) { return (uint64_t)(ts->tv_sec * (1000)) + (ts->tv_nsec / NSEC_PER_MSEC); } u64 adf_clock_get_current_time(void) { struct timespec ts; getnanotime(&ts); return timespec_to_ms(&ts); } diff --git a/sys/dev/qat/qat_common/adf_freebsd_cfg_dev_dbg.c b/sys/dev/qat/qat_common/adf_freebsd_cfg_dev_dbg.c index 7585dd9b29d4..6068d7d99496 100644 --- a/sys/dev/qat/qat_common/adf_freebsd_cfg_dev_dbg.c +++ b/sys/dev/qat/qat_common/adf_freebsd_cfg_dev_dbg.c @@ -1,77 +1,81 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "qat_freebsd.h" #include "adf_common_drv.h" #include "adf_cfg_device.h" #include "adf_cfg_dev_dbg.h" #include #include #include #include #include #include #include #include +#include static int qat_dev_cfg_show(SYSCTL_HANDLER_ARGS) { struct adf_cfg_device_data *dev_cfg; struct adf_cfg_section *sec; struct adf_cfg_key_val *ptr; struct sbuf sb; int error; + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + sbuf_new_for_sysctl(&sb, NULL, 128, req); dev_cfg = arg1; sx_slock(&dev_cfg->lock); list_for_each_entry(sec, &dev_cfg->sec_list, list) { sbuf_printf(&sb, "[%s]\n", sec->name); list_for_each_entry(ptr, &sec->param_head, list) { sbuf_printf(&sb, "%s = %s\n", ptr->key, ptr->val); } } sx_sunlock(&dev_cfg->lock); error = sbuf_finish(&sb); sbuf_delete(&sb); return error; } int adf_cfg_dev_dbg_add(struct adf_accel_dev *accel_dev) { struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; device_t dev; dev = GET_DEV(accel_dev); dev_cfg_data->debug = SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "dev_cfg", CTLFLAG_RD | CTLTYPE_STRING, dev_cfg_data, 0, qat_dev_cfg_show, "A", "Device configuration"); if (!dev_cfg_data->debug) { device_printf(dev, "Failed to create qat cfg sysctl.\n"); return ENXIO; } return 0; } void adf_cfg_dev_dbg_remove(struct adf_accel_dev *accel_dev) { struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; if (dev_cfg_data->dev) { adf_cfg_device_clear(dev_cfg_data->dev, accel_dev); free(dev_cfg_data->dev, M_QAT); dev_cfg_data->dev = NULL; } } diff --git a/sys/dev/qat/qat_common/adf_freebsd_cnvnr_ctrs_dbg.c b/sys/dev/qat/qat_common/adf_freebsd_cnvnr_ctrs_dbg.c index ead172635e59..539059589bc8 100644 --- a/sys/dev/qat/qat_common/adf_freebsd_cnvnr_ctrs_dbg.c +++ b/sys/dev/qat/qat_common/adf_freebsd_cnvnr_ctrs_dbg.c @@ -1,195 +1,199 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2025 Intel Corporation */ #include #include #include +#include #include "adf_cnvnr_freq_counters.h" #include "adf_common_drv.h" #include "adf_cfg.h" #include "icp_qat_fw_init_admin.h" #define ADF_CNVNR_ERR_MASK 0xFFF #define LINE \ "+-----------------------------------------------------------------+\n" #define BANNER \ "| CNV Error Freq Statistics for Qat Device |\n" #define NEW_LINE "\n" #define REPORT_ENTRY_FORMAT \ "|[AE %2d]: TotalErrors: %5d : LastError: %s [%5d] |\n" #define MAX_LINE_LENGTH 128 #define MAX_REPORT_SIZE ((ADF_MAX_ACCELENGINES + 3) * MAX_LINE_LENGTH) #define PRINT_LINE(line) \ (snprintf( \ report_ptr, MAX_REPORT_SIZE - (report_ptr - report), "%s", line)) const char *cnvnr_err_str[] = {"No Error ", "Checksum Error", "Length Error-P", "Decomp Error ", "Xlat Error ", "Length Error-C", "Unknown Error "}; /* Handler for HB status check */ static int qat_cnvnr_ctrs_dbg_read(SYSCTL_HANDLER_ARGS) { struct adf_accel_dev *accel_dev = arg1; struct adf_hw_device_data *hw_device; struct icp_qat_fw_init_admin_req request; struct icp_qat_fw_init_admin_resp response; unsigned long dc_ae_msk = 0; u8 num_aes = 0, ae = 0, error_type = 0, bytes_written = 0; s16 latest_error = 0; char report[MAX_REPORT_SIZE]; char *report_ptr = report; + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + /* Defensive check */ if (!accel_dev || accel_dev->accel_id > ADF_MAX_DEVICES) return EINVAL; if (!adf_dev_started(accel_dev)) { device_printf(GET_DEV(accel_dev), "QAT Device not started\n"); return EINVAL; } hw_device = accel_dev->hw_device; if (!hw_device) { device_printf(GET_DEV(accel_dev), "Failed to get hw_device.\n"); return EFAULT; } /* Clean report memory */ explicit_bzero(report, sizeof(report)); /* Adding banner to report */ bytes_written = PRINT_LINE(NEW_LINE); if (bytes_written <= 0) return EINVAL; report_ptr += bytes_written; bytes_written = PRINT_LINE(LINE); if (bytes_written <= 0) return EINVAL; report_ptr += bytes_written; bytes_written = PRINT_LINE(BANNER); if (bytes_written <= 0) return EINVAL; report_ptr += bytes_written; bytes_written = PRINT_LINE(LINE); if (bytes_written <= 0) return EINVAL; report_ptr += bytes_written; if (accel_dev->au_info) dc_ae_msk = accel_dev->au_info->dc_ae_msk; /* Extracting number of Acceleration Engines */ num_aes = hw_device->get_num_aes(hw_device); for (ae = 0; ae < num_aes; ae++) { if (accel_dev->au_info && !test_bit(ae, &dc_ae_msk)) continue; explicit_bzero(&response, sizeof(struct icp_qat_fw_init_admin_resp)); request.cmd_id = ICP_QAT_FW_CNV_STATS_GET; if (adf_put_admin_msg_sync( accel_dev, ae, &request, &response) || response.status) { return EFAULT; } error_type = CNV_ERROR_TYPE_GET(response.latest_error); if (error_type == CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH_ERROR || error_type == CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH_ERROR) { latest_error = CNV_ERROR_LENGTH_DELTA_GET(response.latest_error); } else if (error_type == CNV_ERR_TYPE_DECOMPRESSION_ERROR || error_type == CNV_ERR_TYPE_TRANSLATION_ERROR) { latest_error = CNV_ERROR_DECOMP_STATUS_GET(response.latest_error); } else { latest_error = response.latest_error & ADF_CNVNR_ERR_MASK; } bytes_written = snprintf(report_ptr, MAX_REPORT_SIZE - (report_ptr - report), REPORT_ENTRY_FORMAT, ae, response.error_count, cnvnr_err_str[error_type], latest_error); if (bytes_written <= 0) { device_printf( GET_DEV(accel_dev), "ERROR: No space left in CnV ctrs line buffer\n" "\tAcceleration ID: %d, Engine: %d\n", accel_dev->accel_id, ae); break; } report_ptr += bytes_written; } sysctl_handle_string(oidp, report, sizeof(report), req); return 0; } int adf_cnvnr_freq_counters_add(struct adf_accel_dev *accel_dev) { struct sysctl_ctx_list *qat_sysctl_ctx; struct sysctl_oid *qat_cnvnr_ctrs_sysctl_tree; /* Defensive checks */ if (!accel_dev) return EINVAL; /* Creating context and tree */ qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); qat_cnvnr_ctrs_sysctl_tree = device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev); /* Create "cnv_error" string type leaf - with callback */ accel_dev->cnv_error_oid = SYSCTL_ADD_PROC(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_cnvnr_ctrs_sysctl_tree), OID_AUTO, "cnv_error", CTLTYPE_STRING | CTLFLAG_RD, accel_dev, 0, qat_cnvnr_ctrs_dbg_read, "IU", "QAT CnVnR status"); if (!accel_dev->cnv_error_oid) { device_printf( GET_DEV(accel_dev), "Failed to create qat cnvnr freq counters sysctl entry.\n"); return ENOMEM; } return 0; } void adf_cnvnr_freq_counters_remove(struct adf_accel_dev *accel_dev) { struct sysctl_ctx_list *qat_sysctl_ctx; if (!accel_dev) return; qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); if (accel_dev->cnv_error_oid) { sysctl_ctx_entry_del(qat_sysctl_ctx, accel_dev->cnv_error_oid); sysctl_remove_oid(accel_dev->cnv_error_oid, 1, 1); accel_dev->cnv_error_oid = NULL; } } diff --git a/sys/dev/qat/qat_common/adf_freebsd_heartbeat_dbg.c b/sys/dev/qat/qat_common/adf_freebsd_heartbeat_dbg.c index 8690c000760c..e7b4840600e1 100644 --- a/sys/dev/qat/qat_common/adf_freebsd_heartbeat_dbg.c +++ b/sys/dev/qat/qat_common/adf_freebsd_heartbeat_dbg.c @@ -1,137 +1,189 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2025 Intel Corporation */ #include #include #include +#include #include "adf_heartbeat_dbg.h" #include "adf_common_drv.h" #include "adf_cfg.h" #include "adf_heartbeat.h" #define HB_SYSCTL_ERR(RC) \ do { \ if (RC == NULL) { \ printf( \ "Memory allocation failed in adf_heartbeat_dbg_add\n"); \ return ENOMEM; \ } \ } while (0) + +static int qat_dev_hb_read_sent(SYSCTL_HANDLER_ARGS) +{ + struct adf_accel_dev *accel_dev = arg1; + struct adf_heartbeat *hb; + int error = EFAULT; + + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + + if (accel_dev == NULL) + return EINVAL; + + hb = accel_dev->heartbeat; + + error = sysctl_handle_int(oidp, &hb->hb_sent_counter, 0, req); + if (error || !req->newptr) + return error; + + return (0); +} + +static int qat_dev_hb_read_failed(SYSCTL_HANDLER_ARGS) +{ + struct adf_accel_dev *accel_dev = arg1; + struct adf_heartbeat *hb; + int error = EFAULT; + + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + + if (accel_dev == NULL) + return EINVAL; + + hb = accel_dev->heartbeat; + + error = sysctl_handle_int(oidp, &hb->hb_failed_counter, 0, req); + if (error || !req->newptr) + return error; + + return (0); +} + /* Handler for HB status check */ static int qat_dev_hb_read(SYSCTL_HANDLER_ARGS) { enum adf_device_heartbeat_status hb_status = DEV_HB_UNRESPONSIVE; struct adf_accel_dev *accel_dev = arg1; struct adf_heartbeat *hb; int ret = 0; + + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + if (accel_dev == NULL) { return EINVAL; } hb = accel_dev->heartbeat; /* if FW is loaded, proceed else set heartbeat down */ if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) { adf_heartbeat_status(accel_dev, &hb_status); } if (hb_status == DEV_HB_ALIVE) { hb->heartbeat.hb_sysctlvar = 1; } else { hb->heartbeat.hb_sysctlvar = 0; } ret = sysctl_handle_int(oidp, &hb->heartbeat.hb_sysctlvar, 0, req); return ret; } int adf_heartbeat_dbg_add(struct adf_accel_dev *accel_dev) { struct sysctl_ctx_list *qat_hb_sysctl_ctx; struct sysctl_oid *qat_hb_sysctl_tree; struct adf_heartbeat *hb; if (accel_dev == NULL) { return EINVAL; } if (adf_heartbeat_init(accel_dev)) return EINVAL; hb = accel_dev->heartbeat; qat_hb_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); qat_hb_sysctl_tree = device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev); hb->heartbeat_sent.oid = - SYSCTL_ADD_UINT(qat_hb_sysctl_ctx, + SYSCTL_ADD_PROC(qat_hb_sysctl_ctx, SYSCTL_CHILDREN(qat_hb_sysctl_tree), OID_AUTO, "heartbeat_sent", - CTLFLAG_RD, - &hb->hb_sent_counter, + CTLTYPE_INT | CTLFLAG_RD, + accel_dev, 0, - "HB sent count"); + qat_dev_hb_read_sent, + "IU", + "HB failed count"); HB_SYSCTL_ERR(hb->heartbeat_sent.oid); hb->heartbeat_failed.oid = - SYSCTL_ADD_UINT(qat_hb_sysctl_ctx, + SYSCTL_ADD_PROC(qat_hb_sysctl_ctx, SYSCTL_CHILDREN(qat_hb_sysctl_tree), OID_AUTO, "heartbeat_failed", - CTLFLAG_RD, - &hb->hb_failed_counter, + CTLTYPE_INT | CTLFLAG_RD, + accel_dev, 0, + qat_dev_hb_read_failed, + "IU", "HB failed count"); HB_SYSCTL_ERR(hb->heartbeat_failed.oid); hb->heartbeat.oid = SYSCTL_ADD_PROC(qat_hb_sysctl_ctx, SYSCTL_CHILDREN(qat_hb_sysctl_tree), OID_AUTO, "heartbeat", CTLTYPE_INT | CTLFLAG_RD, accel_dev, 0, qat_dev_hb_read, "IU", "QAT device status"); HB_SYSCTL_ERR(hb->heartbeat.oid); return 0; } int adf_heartbeat_dbg_del(struct adf_accel_dev *accel_dev) { struct sysctl_ctx_list *qat_sysctl_ctx; struct adf_heartbeat *hb; if (!accel_dev) { return EINVAL; } hb = accel_dev->heartbeat; qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); if (hb->heartbeat.oid) { sysctl_ctx_entry_del(qat_sysctl_ctx, hb->heartbeat.oid); sysctl_remove_oid(hb->heartbeat.oid, 1, 1); hb->heartbeat.oid = NULL; } if (hb->heartbeat_failed.oid) { sysctl_ctx_entry_del(qat_sysctl_ctx, hb->heartbeat_failed.oid); sysctl_remove_oid(hb->heartbeat_failed.oid, 1, 1); hb->heartbeat_failed.oid = NULL; } if (hb->heartbeat_sent.oid) { sysctl_ctx_entry_del(qat_sysctl_ctx, hb->heartbeat_sent.oid); sysctl_remove_oid(hb->heartbeat_sent.oid, 1, 1); hb->heartbeat_sent.oid = NULL; } adf_heartbeat_clean(accel_dev); return 0; } diff --git a/sys/dev/qat/qat_common/adf_freebsd_pfvf_ctrs_dbg.c b/sys/dev/qat/qat_common/adf_freebsd_pfvf_ctrs_dbg.c index 76830e2920c3..a50e5fa62a18 100644 --- a/sys/dev/qat/qat_common/adf_freebsd_pfvf_ctrs_dbg.c +++ b/sys/dev/qat/qat_common/adf_freebsd_pfvf_ctrs_dbg.c @@ -1,136 +1,140 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_dev_err.h" #include "adf_freebsd_pfvf_ctrs_dbg.h" +#include #define MAX_REPORT_LINES (14) #define MAX_REPORT_LINE_LEN (64) #define MAX_REPORT_SIZE (MAX_REPORT_LINES * MAX_REPORT_LINE_LEN) static void adf_pfvf_ctrs_prepare_report(char *rep, struct pfvf_stats *pfvf_counters) { unsigned int value = 0; char *string = "unknown"; unsigned int pos = 0; char *ptr = rep; for (pos = 0; pos < MAX_REPORT_LINES; pos++) { switch (pos) { case 0: string = "Messages written to CSR"; value = pfvf_counters->tx; break; case 1: string = "Messages read from CSR"; value = pfvf_counters->rx; break; case 2: string = "Spurious Interrupt"; value = pfvf_counters->spurious; break; case 3: string = "Block messages sent"; value = pfvf_counters->blk_tx; break; case 4: string = "Block messages received"; value = pfvf_counters->blk_rx; break; case 5: string = "Blocks received with CRC errors"; value = pfvf_counters->crc_err; break; case 6: string = "CSR in use"; value = pfvf_counters->busy; break; case 7: string = "No acknowledgment"; value = pfvf_counters->no_ack; break; case 8: string = "Collisions"; value = pfvf_counters->collision; break; case 9: string = "Put msg timeout"; value = pfvf_counters->tx_timeout; break; case 10: string = "No response received"; value = pfvf_counters->rx_timeout; break; case 11: string = "Responses received"; value = pfvf_counters->rx_rsp; break; case 12: string = "Messages re-transmitted"; value = pfvf_counters->retry; break; case 13: string = "Put event timeout"; value = pfvf_counters->event_timeout; break; default: value = 0; } if (value) ptr += snprintf(ptr, (MAX_REPORT_SIZE - (ptr - rep)), "%s %u\n", string, value); } } static int adf_pfvf_ctrs_show(SYSCTL_HANDLER_ARGS) { struct pfvf_stats *pfvf_counters = arg1; char report[MAX_REPORT_SIZE]; + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + if (!pfvf_counters) return EINVAL; explicit_bzero(report, sizeof(report)); adf_pfvf_ctrs_prepare_report(report, pfvf_counters); sysctl_handle_string(oidp, report, sizeof(report), req); return 0; } int adf_pfvf_ctrs_dbg_add(struct adf_accel_dev *accel_dev) { struct sysctl_ctx_list *qat_sysctl_ctx; struct sysctl_oid *qat_pfvf_ctrs_sysctl_tree; struct sysctl_oid *oid_pfvf; device_t dev; if (!accel_dev || accel_dev->accel_id > ADF_MAX_DEVICES) return EINVAL; dev = GET_DEV(accel_dev); qat_sysctl_ctx = device_get_sysctl_ctx(dev); qat_pfvf_ctrs_sysctl_tree = device_get_sysctl_tree(dev); oid_pfvf = SYSCTL_ADD_PROC(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_pfvf_ctrs_sysctl_tree), OID_AUTO, "pfvf_counters", CTLTYPE_STRING | CTLFLAG_RD, &accel_dev->u1.vf.pfvf_counters, 0, adf_pfvf_ctrs_show, "A", "QAT PFVF counters"); if (!oid_pfvf) { device_printf(dev, "Failure creating PFVF counters sysctl\n"); return ENOMEM; } return 0; } diff --git a/sys/dev/qat/qat_common/adf_freebsd_transport_debug.c b/sys/dev/qat/qat_common/adf_freebsd_transport_debug.c index 35375bb20014..78ea6a7a5083 100644 --- a/sys/dev/qat/qat_common/adf_freebsd_transport_debug.c +++ b/sys/dev/qat/qat_common/adf_freebsd_transport_debug.c @@ -1,211 +1,218 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include #include #include #include +#include static int adf_ring_show(SYSCTL_HANDLER_ARGS) { struct adf_etr_ring_data *ring = arg1; struct adf_etr_bank_data *bank = ring->bank; struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev); struct resource *csr = ring->bank->csr_addr; struct sbuf sb; int error, word; uint32_t *wp, *end; + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + sbuf_new_for_sysctl(&sb, NULL, 128, req); { int head, tail, empty; head = csr_ops->read_csr_ring_head(csr, bank->bank_number, ring->ring_number); tail = csr_ops->read_csr_ring_tail(csr, bank->bank_number, ring->ring_number); empty = csr_ops->read_csr_e_stat(csr, bank->bank_number); sbuf_cat(&sb, "\n------- Ring configuration -------\n"); sbuf_printf(&sb, "ring name: %s\n", ring->ring_debug->ring_name); sbuf_printf(&sb, "ring num %d, bank num %d\n", ring->ring_number, ring->bank->bank_number); sbuf_printf(&sb, "head %x, tail %x, empty: %d\n", head, tail, (empty & 1 << ring->ring_number) >> ring->ring_number); sbuf_printf(&sb, "ring size %d, msg size %d\n", ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size), ADF_MSG_SIZE_TO_BYTES(ring->msg_size)); sbuf_cat(&sb, "----------- Ring data ------------\n"); } wp = ring->base_addr; end = (uint32_t *)((char *)ring->base_addr + ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size)); while (wp < end) { sbuf_printf(&sb, "%p:", wp); for (word = 0; word < 32 / 4; word++, wp++) sbuf_printf(&sb, " %08x", *wp); sbuf_printf(&sb, "\n"); } error = sbuf_finish(&sb); sbuf_delete(&sb); return (error); } int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name) { struct adf_etr_ring_debug_entry *ring_debug; char entry_name[8]; ring_debug = malloc(sizeof(*ring_debug), M_QAT, M_WAITOK | M_ZERO); strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name)); snprintf(entry_name, sizeof(entry_name), "ring_%02d", ring->ring_number); ring_debug->debug = SYSCTL_ADD_PROC(&ring->bank->accel_dev->sysctl_ctx, SYSCTL_CHILDREN(ring->bank->bank_debug_dir), OID_AUTO, entry_name, CTLFLAG_RD | CTLTYPE_STRING, ring, 0, adf_ring_show, "A", "Ring configuration"); if (!ring_debug->debug) { printf("QAT: Failed to create ring debug entry.\n"); free(ring_debug, M_QAT); return EFAULT; } ring->ring_debug = ring_debug; return 0; } void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring) { if (ring->ring_debug) { free(ring->ring_debug, M_QAT); ring->ring_debug = NULL; } } static int adf_bank_show(SYSCTL_HANDLER_ARGS) { struct adf_etr_bank_data *bank; struct adf_accel_dev *accel_dev = NULL; struct adf_hw_csr_ops *csr_ops = NULL; struct adf_hw_device_data *hw_data = NULL; u8 num_rings_per_bank = 0; struct sbuf sb; int error, ring_id; + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + sbuf_new_for_sysctl(&sb, NULL, 128, req); bank = arg1; accel_dev = bank->accel_dev; csr_ops = GET_CSR_OPS(bank->accel_dev); hw_data = accel_dev->hw_device; num_rings_per_bank = hw_data->num_rings_per_bank; sbuf_printf(&sb, "\n------- Bank %d configuration -------\n", bank->bank_number); for (ring_id = 0; ring_id < num_rings_per_bank; ring_id++) { struct adf_etr_ring_data *ring = &bank->rings[ring_id]; struct resource *csr = bank->csr_addr; int head, tail, empty; if (!(bank->ring_mask & 1 << ring_id)) continue; head = csr_ops->read_csr_ring_head(csr, bank->bank_number, ring->ring_number); tail = csr_ops->read_csr_ring_tail(csr, bank->bank_number, ring->ring_number); empty = csr_ops->read_csr_e_stat(csr, bank->bank_number); sbuf_printf(&sb, "ring num %02d, head %04x, tail %04x, empty: %d\n", ring->ring_number, head, tail, (empty & 1 << ring->ring_number) >> ring->ring_number); } error = sbuf_finish(&sb); sbuf_delete(&sb); return (error); } int adf_bank_debugfs_add(struct adf_etr_bank_data *bank) { struct adf_accel_dev *accel_dev = bank->accel_dev; struct sysctl_oid *parent = accel_dev->transport->debug; char name[9]; snprintf(name, sizeof(name), "bank_%03d", bank->bank_number); bank->bank_debug_dir = SYSCTL_ADD_NODE(&accel_dev->sysctl_ctx, SYSCTL_CHILDREN(parent), OID_AUTO, name, CTLFLAG_RD | CTLFLAG_SKIP, NULL, ""); if (!bank->bank_debug_dir) { printf("QAT: Failed to create bank debug dir.\n"); return EFAULT; } bank->bank_debug_cfg = SYSCTL_ADD_PROC(&accel_dev->sysctl_ctx, SYSCTL_CHILDREN(bank->bank_debug_dir), OID_AUTO, "config", CTLFLAG_RD | CTLTYPE_STRING, bank, 0, adf_bank_show, "A", "Bank configuration"); if (!bank->bank_debug_cfg) { printf("QAT: Failed to create bank debug entry.\n"); return EFAULT; } return 0; } void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank) { } diff --git a/sys/dev/qat/qat_common/adf_freebsd_ver_dbg.c b/sys/dev/qat/qat_common/adf_freebsd_ver_dbg.c index 98cde6a742c1..041481435426 100644 --- a/sys/dev/qat/qat_common/adf_freebsd_ver_dbg.c +++ b/sys/dev/qat/qat_common/adf_freebsd_ver_dbg.c @@ -1,176 +1,186 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2025 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "adf_ver_dbg.h" +#include static int adf_sysctl_read_fw_versions(SYSCTL_HANDLER_ARGS) { struct adf_accel_dev *accel_dev = arg1; char fw_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + if (!accel_dev) return -EINVAL; if (adf_dev_started(accel_dev)) snprintf(fw_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%d.%d.%d", accel_dev->fw_versions.fw_version_major, accel_dev->fw_versions.fw_version_minor, accel_dev->fw_versions.fw_version_patch); else snprintf(fw_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ""); return SYSCTL_OUT(req, fw_version, strnlen(fw_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES)); } static int adf_sysctl_read_hw_versions(SYSCTL_HANDLER_ARGS) { struct adf_accel_dev *accel_dev = arg1; char hw_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + if (!accel_dev) return -EINVAL; if (adf_dev_started(accel_dev)) snprintf(hw_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%d", accel_dev->accel_pci_dev.revid); else snprintf(hw_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ""); return SYSCTL_OUT(req, hw_version, strnlen(hw_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES)); } static int adf_sysctl_read_mmp_versions(SYSCTL_HANDLER_ARGS) { struct adf_accel_dev *accel_dev = arg1; char mmp_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + if (!accel_dev) return -EINVAL; if (adf_dev_started(accel_dev)) snprintf(mmp_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%d.%d.%d", accel_dev->fw_versions.mmp_version_major, accel_dev->fw_versions.mmp_version_minor, accel_dev->fw_versions.mmp_version_patch); if (adf_dev_started(accel_dev)) snprintf(mmp_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%d.%d.%d", accel_dev->fw_versions.mmp_version_major, accel_dev->fw_versions.mmp_version_minor, accel_dev->fw_versions.mmp_version_patch); else snprintf(mmp_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ""); return SYSCTL_OUT(req, mmp_version, strnlen(mmp_version, ADF_CFG_MAX_VAL_LEN_IN_BYTES)); } int adf_ver_dbg_add(struct adf_accel_dev *accel_dev) { struct sysctl_ctx_list *qat_sysctl_ctx; struct sysctl_oid *qat_sysctl_tree; if (!accel_dev) return -EINVAL; qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); qat_sysctl_tree = device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev); accel_dev->fw_version_oid = SYSCTL_ADD_OID(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_sysctl_tree), OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, accel_dev, 0, adf_sysctl_read_fw_versions, "A", "QAT FW version"); if (!accel_dev->fw_version_oid) goto err; accel_dev->hw_version_oid = SYSCTL_ADD_OID(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_sysctl_tree), OID_AUTO, "hw_version", CTLTYPE_STRING | CTLFLAG_RD, accel_dev, 0, adf_sysctl_read_hw_versions, "A", "QAT HW version"); if (!accel_dev->hw_version_oid) goto err; accel_dev->mmp_version_oid = SYSCTL_ADD_OID(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_sysctl_tree), OID_AUTO, "mmp_version", CTLTYPE_STRING | CTLFLAG_RD, accel_dev, 0, adf_sysctl_read_mmp_versions, "A", "QAT MMP version"); if (!accel_dev->mmp_version_oid) goto err; return 0; err: device_printf(GET_DEV(accel_dev), "Failed to add firmware versions to sysctl\n"); return -EINVAL; } void adf_ver_dbg_del(struct adf_accel_dev *accel_dev) { struct sysctl_ctx_list *qat_sysctl_ctx; if (!accel_dev) return; qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); if (accel_dev->mmp_version_oid) { sysctl_ctx_entry_del(qat_sysctl_ctx, accel_dev->mmp_version_oid); sysctl_remove_oid(accel_dev->mmp_version_oid, 1, 1); accel_dev->mmp_version_oid = NULL; } if (accel_dev->hw_version_oid) { sysctl_ctx_entry_del(qat_sysctl_ctx, accel_dev->hw_version_oid); sysctl_remove_oid(accel_dev->hw_version_oid, 1, 1); accel_dev->hw_version_oid = NULL; } if (accel_dev->fw_version_oid) { sysctl_ctx_entry_del(qat_sysctl_ctx, accel_dev->fw_version_oid); sysctl_remove_oid(accel_dev->fw_version_oid, 1, 1); accel_dev->fw_version_oid = NULL; } } diff --git a/sys/dev/qat/qat_common/adf_fw_counters.c b/sys/dev/qat/qat_common/adf_fw_counters.c index 1acabe4c9364..1356fa89e775 100644 --- a/sys/dev/qat/qat_common/adf_fw_counters.c +++ b/sys/dev/qat/qat_common/adf_fw_counters.c @@ -1,422 +1,426 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2025 Intel Corporation */ #include #include #include #include "adf_accel_devices.h" #include "adf_fw_counters.h" #include "adf_common_drv.h" #include "icp_qat_fw_init_admin.h" #include #include +#include #define ADF_FW_COUNTERS_BUF_SZ 4096 #define ADF_RAS_EVENT_STR "RAS events" #define ADF_FW_REQ_STR "Firmware Requests" #define ADF_FW_RESP_STR "Firmware Responses" static void adf_fw_counters_section_del_all(struct list_head *head); static void adf_fw_counters_del_all(struct adf_accel_dev *accel_dev); static int adf_fw_counters_add_key_value_param(struct adf_accel_dev *accel_dev, const char *section_name, const unsigned long sec_name_max_size, const char *key, const void *val); static int adf_fw_counters_section_add(struct adf_accel_dev *accel_dev, const char *name, const unsigned long name_max_size); int adf_get_fw_counters(struct adf_accel_dev *accel_dev); int adf_read_fw_counters(SYSCTL_HANDLER_ARGS); int adf_get_fw_counters(struct adf_accel_dev *accel_dev) { struct icp_qat_fw_init_admin_req req; struct icp_qat_fw_init_admin_resp resp; unsigned long ae_mask; int i; int ret = 0; char aeidstr[16] = { 0 }; struct adf_hw_device_data *hw_device; if (!accel_dev) { ret = EFAULT; goto fail_clean; } if (!adf_dev_started(accel_dev)) { device_printf(GET_DEV(accel_dev), "Qat Device not started\n"); ret = EFAULT; goto fail_clean; } hw_device = accel_dev->hw_device; if (!hw_device) { ret = EFAULT; goto fail_clean; } adf_fw_counters_del_all(accel_dev); explicit_bzero(&req, sizeof(struct icp_qat_fw_init_admin_req)); req.cmd_id = ICP_QAT_FW_COUNTERS_GET; ae_mask = hw_device->ae_mask; for_each_set_bit(i, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) { explicit_bzero(&resp, sizeof(struct icp_qat_fw_init_admin_resp)); if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) || resp.status) { resp.req_rec_count = ADF_FW_COUNTERS_NO_RESPONSE; resp.resp_sent_count = ADF_FW_COUNTERS_NO_RESPONSE; resp.ras_event_count = ADF_FW_COUNTERS_NO_RESPONSE; } explicit_bzero(aeidstr, sizeof(aeidstr)); snprintf(aeidstr, sizeof(aeidstr), "AE %2d", i); if (adf_fw_counters_section_add(accel_dev, aeidstr, sizeof(aeidstr))) { ret = ENOMEM; goto fail_clean; } if (adf_fw_counters_add_key_value_param( accel_dev, aeidstr, sizeof(aeidstr), ADF_FW_REQ_STR, (void *)&resp.req_rec_count)) { adf_fw_counters_del_all(accel_dev); ret = ENOMEM; goto fail_clean; } if (adf_fw_counters_add_key_value_param( accel_dev, aeidstr, sizeof(aeidstr), ADF_FW_RESP_STR, (void *)&resp.resp_sent_count)) { adf_fw_counters_del_all(accel_dev); ret = ENOMEM; goto fail_clean; } if (hw_device->count_ras_event && hw_device->count_ras_event(accel_dev, (void *)&resp.ras_event_count, aeidstr)) { adf_fw_counters_del_all(accel_dev); ret = ENOMEM; goto fail_clean; } } fail_clean: return ret; } int adf_read_fw_counters(SYSCTL_HANDLER_ARGS) { struct adf_accel_dev *accel_dev = arg1; struct adf_fw_counters_section *ptr = NULL; struct list_head *list = NULL, *list_ptr = NULL; struct list_head *tmp = NULL, *tmp_val = NULL; int ret = 0; struct sbuf *sbuf = NULL; char *cbuf = NULL; + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + if (accel_dev == NULL) { return EINVAL; } cbuf = malloc(ADF_FW_COUNTERS_BUF_SZ, M_QAT, M_WAITOK | M_ZERO); sbuf = sbuf_new(NULL, cbuf, ADF_FW_COUNTERS_BUF_SZ, SBUF_FIXEDLEN); if (sbuf == NULL) { free(cbuf, M_QAT); return ENOMEM; } ret = adf_get_fw_counters(accel_dev); if (ret) { sbuf_delete(sbuf); free(cbuf, M_QAT); return ret; } sbuf_printf(sbuf, "\n+------------------------------------------------+\n"); sbuf_printf( sbuf, "| FW Statistics for Qat Device |\n"); sbuf_printf(sbuf, "+------------------------------------------------+\n"); list_for_each_prev_safe(list, tmp, &accel_dev->fw_counters_data->ae_sec_list) { ptr = list_entry(list, struct adf_fw_counters_section, list); sbuf_printf(sbuf, "%s\n", ptr->name); list_for_each_prev_safe(list_ptr, tmp_val, &ptr->param_head) { struct adf_fw_counters_val *count = list_entry(list_ptr, struct adf_fw_counters_val, list); sbuf_printf(sbuf, "%s:%s\n", count->key, count->val); } } sbuf_finish(sbuf); ret = SYSCTL_OUT(req, sbuf_data(sbuf), sbuf_len(sbuf)); sbuf_delete(sbuf); free(cbuf, M_QAT); return ret; } int adf_fw_count_ras_event(struct adf_accel_dev *accel_dev, u32 *ras_event, char *aeidstr) { unsigned long count = 0; if (!accel_dev || !ras_event || !aeidstr) return EINVAL; count = (*ras_event == ADF_FW_COUNTERS_NO_RESPONSE ? ADF_FW_COUNTERS_NO_RESPONSE : (unsigned long)*ras_event); return adf_fw_counters_add_key_value_param( accel_dev, aeidstr, 16, ADF_RAS_EVENT_STR, (void *)&count); } /** * adf_fw_counters_add() - Create an acceleration device FW counters table. * @accel_dev: Pointer to acceleration device. * * Function creates a FW counters statistics table for the given * acceleration device. * The table stores device specific values of FW Requests sent to the FW and * FW Responses received from the FW. * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. */ int adf_fw_counters_add(struct adf_accel_dev *accel_dev) { struct adf_fw_counters_data *fw_counters_data; struct sysctl_ctx_list *qat_sysctl_ctx; struct sysctl_oid *qat_sysctl_tree; fw_counters_data = malloc(sizeof(*fw_counters_data), M_QAT, M_WAITOK | M_ZERO); INIT_LIST_HEAD(&fw_counters_data->ae_sec_list); init_rwsem(&fw_counters_data->lock); accel_dev->fw_counters_data = fw_counters_data; qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); qat_sysctl_tree = device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev); fw_counters_data->debug = SYSCTL_ADD_OID(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_sysctl_tree), OID_AUTO, "fw_counters", CTLTYPE_STRING | CTLFLAG_RD, accel_dev, 0, adf_read_fw_counters, "A", "QAT FW counters"); if (!fw_counters_data->debug) { free(fw_counters_data, M_QAT); accel_dev->fw_counters_data = NULL; return ENOMEM; } return 0; } static void adf_fw_counters_del_all(struct adf_accel_dev *accel_dev) { struct adf_fw_counters_data *fw_counters_data = accel_dev->fw_counters_data; down_write(&fw_counters_data->lock); adf_fw_counters_section_del_all(&fw_counters_data->ae_sec_list); up_write(&fw_counters_data->lock); } static void adf_fw_counters_keyval_add(struct adf_fw_counters_val *new, struct adf_fw_counters_section *sec) { list_add_tail(&new->list, &sec->param_head); } static void adf_fw_counters_keyval_del_all(struct list_head *head) { struct list_head *list_ptr = NULL, *tmp = NULL; list_for_each_prev_safe(list_ptr, tmp, head) { struct adf_fw_counters_val *ptr = list_entry(list_ptr, struct adf_fw_counters_val, list); list_del(list_ptr); free(ptr, M_QAT); } } static void adf_fw_counters_section_del_all(struct list_head *head) { struct adf_fw_counters_section *ptr = NULL; struct list_head *list = NULL, *tmp = NULL; list_for_each_prev_safe(list, tmp, head) { ptr = list_entry(list, struct adf_fw_counters_section, list); adf_fw_counters_keyval_del_all(&ptr->param_head); list_del(list); free(ptr, M_QAT); } } static struct adf_fw_counters_section * adf_fw_counters_sec_find(struct adf_accel_dev *accel_dev, const char *sec_name, const unsigned long sec_name_max_size) { struct adf_fw_counters_data *fw_counters_data = accel_dev->fw_counters_data; struct list_head *list = NULL; list_for_each(list, &fw_counters_data->ae_sec_list) { struct adf_fw_counters_section *ptr = list_entry(list, struct adf_fw_counters_section, list); if (!strncmp(ptr->name, sec_name, sec_name_max_size)) return ptr; } return NULL; } static int adf_fw_counters_add_key_value_param(struct adf_accel_dev *accel_dev, const char *section_name, const unsigned long sec_name_max_size, const char *key, const void *val) { struct adf_fw_counters_data *fw_counters_data = accel_dev->fw_counters_data; struct adf_fw_counters_val *key_val; struct adf_fw_counters_section *section = adf_fw_counters_sec_find(accel_dev, section_name, sec_name_max_size); long tmp = *((const long *)val); if (!section) return EFAULT; key_val = malloc(sizeof(*key_val), M_QAT, M_WAITOK | M_ZERO); INIT_LIST_HEAD(&key_val->list); if (tmp == ADF_FW_COUNTERS_NO_RESPONSE) { snprintf(key_val->val, FW_COUNTERS_MAX_VAL_LEN_IN_BYTES, "No Response"); } else { snprintf(key_val->val, FW_COUNTERS_MAX_VAL_LEN_IN_BYTES, "%ld", tmp); } strlcpy(key_val->key, key, sizeof(key_val->key)); down_write(&fw_counters_data->lock); adf_fw_counters_keyval_add(key_val, section); up_write(&fw_counters_data->lock); return 0; } /** * adf_fw_counters_section_add() - Add AE section entry to FW counters table. * @accel_dev: Pointer to acceleration device. * @name: Name of the section * * Function adds a section for each AE where FW Requests/Responses and their * values will be stored. * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. */ static int adf_fw_counters_section_add(struct adf_accel_dev *accel_dev, const char *name, const unsigned long name_max_size) { struct adf_fw_counters_data *fw_counters_data = accel_dev->fw_counters_data; struct adf_fw_counters_section *sec = adf_fw_counters_sec_find(accel_dev, name, name_max_size); if (sec) return 0; sec = malloc(sizeof(*sec), M_QAT, M_WAITOK | M_ZERO); strlcpy(sec->name, name, sizeof(sec->name)); INIT_LIST_HEAD(&sec->param_head); down_write(&fw_counters_data->lock); list_add_tail(&sec->list, &fw_counters_data->ae_sec_list); up_write(&fw_counters_data->lock); return 0; } /** * adf_fw_counters_remove() - Clears acceleration device FW counters table. * @accel_dev: Pointer to acceleration device. * * Function removes FW counters table from the given acceleration device * and frees all allocated memory. * To be used by QAT device specific drivers. * * Return: void */ void adf_fw_counters_remove(struct adf_accel_dev *accel_dev) { struct sysctl_ctx_list *qat_sysctl_ctx; struct adf_fw_counters_data *fw_counters_data = accel_dev->fw_counters_data; if (!fw_counters_data) return; if (fw_counters_data->debug) { qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); sysctl_ctx_entry_del(qat_sysctl_ctx, fw_counters_data->debug); sysctl_remove_oid(fw_counters_data->debug, 1, 1); fw_counters_data->debug = NULL; } down_write(&fw_counters_data->lock); adf_fw_counters_section_del_all(&fw_counters_data->ae_sec_list); up_write(&fw_counters_data->lock); free(fw_counters_data, M_QAT); accel_dev->fw_counters_data = NULL; } diff --git a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_ae_config.c b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_ae_config.c index e68d0bca80fc..a2bb36727fd4 100644 --- a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_ae_config.c +++ b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_ae_config.c @@ -1,166 +1,171 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "adf_c4xxx_hw_data.h" #include #include #include #include #include #include #include #include +#include #include #include #include /* String buffer size */ #define AE_INFO_BUFFER_SIZE 50 #define AE_CONFIG_DBG_FILE "ae_config" static u8 find_first_me_index(const u32 au_mask) { u8 i; u32 mask = au_mask; /* Retrieve the index of the first ME of an accel unit */ for (i = 0; i < ADF_C4XXX_MAX_ACCELENGINES; i++) { if (mask & BIT(i)) return i; } return 0; } static u8 get_au_index(u8 au_mask) { u8 au_index = 0; while (au_mask) { if (au_mask == BIT(0)) return au_index; au_index++; au_mask = au_mask >> 1; } return 0; } static int adf_ae_config_show(SYSCTL_HANDLER_ARGS) { struct sbuf sb; struct adf_accel_dev *accel_dev = arg1; struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_accel_unit *accel_unit = accel_dev->au_info->au; u8 i, j; u8 au_index; u8 ae_index; u8 num_aes; int ret = 0; + + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + u32 num_au = hw_data->get_num_accel_units(hw_data); sbuf_new_for_sysctl(&sb, NULL, 2048, req); sbuf_printf(&sb, "\n"); for (i = 0; i < num_au; i++) { /* Retrieve accel unit index */ au_index = get_au_index(accel_unit[i].au_mask); /* Retrieve index of fist ME in current accel unit */ ae_index = find_first_me_index(accel_unit[i].ae_mask); num_aes = accel_unit[i].num_ae; /* Retrieve accel unit type */ switch (accel_unit[i].services) { case ADF_ACCEL_CRYPTO: sbuf_printf(&sb, "\tAccel unit %d - CRYPTO\n", au_index); /* Display ME assignment for a particular accel unit */ for (j = ae_index; j < (num_aes + ae_index); j++) sbuf_printf(&sb, "\t\tAE[%d]: crypto\n", j); break; case ADF_ACCEL_COMPRESSION: sbuf_printf(&sb, "\tAccel unit %d - COMPRESSION\n", au_index); /* Display ME assignment for a particular accel unit */ for (j = ae_index; j < (num_aes + ae_index); j++) sbuf_printf(&sb, "\t\tAE[%d]: compression\n", j); break; case ADF_ACCEL_SERVICE_NULL: default: break; } } sbuf_finish(&sb); ret = SYSCTL_OUT(req, sbuf_data(&sb), sbuf_len(&sb)); sbuf_delete(&sb); return ret; } static int c4xxx_add_debugfs_ae_config(struct adf_accel_dev *accel_dev) { struct sysctl_ctx_list *qat_sysctl_ctx = NULL; struct sysctl_oid *qat_sysctl_tree = NULL; struct sysctl_oid *ae_conf_ctl = NULL; qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); qat_sysctl_tree = device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev); ae_conf_ctl = SYSCTL_ADD_PROC(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_sysctl_tree), OID_AUTO, AE_CONFIG_DBG_FILE, CTLTYPE_STRING | CTLFLAG_RD, accel_dev, 0, adf_ae_config_show, "A", "AE config"); accel_dev->debugfs_ae_config = ae_conf_ctl; if (!accel_dev->debugfs_ae_config) { device_printf(GET_DEV(accel_dev), "Could not create debug ae config entry.\n"); return EFAULT; } return 0; } int c4xxx_init_ae_config(struct adf_accel_dev *accel_dev) { int ret = 0; /* Add a new file in debug file system with h/w version. */ ret = c4xxx_add_debugfs_ae_config(accel_dev); if (ret) { c4xxx_exit_ae_config(accel_dev); device_printf(GET_DEV(accel_dev), "Could not create debugfs ae config file\n"); return EINVAL; } return 0; } void c4xxx_exit_ae_config(struct adf_accel_dev *accel_dev) { if (!accel_dev->debugfs_ae_config) return; /* Delete ae configuration file */ remove_oid(accel_dev, accel_dev->debugfs_ae_config); accel_dev->debugfs_ae_config = NULL; } diff --git a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_misc_error_stats.c b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_misc_error_stats.c index 3821e60df746..4fdbec791ce6 100644 --- a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_misc_error_stats.c +++ b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_misc_error_stats.c @@ -1,105 +1,109 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "adf_c4xxx_hw_data.h" #include "adf_c4xxx_misc_error_stats.h" #include "adf_common_drv.h" #include "adf_cfg_common.h" #include #include +#include #define MISC_ERROR_DBG_FILE "misc_error_stats" #define LINE \ "+-----------------------------------------------------------------+\n" #define BANNER \ "| Miscellaneous Error Statistics for Qat Device |\n" static void *misc_counter; struct adf_dev_miscellaneous_stats { u64 misc_counter; }; static int qat_misc_error_show(SYSCTL_HANDLER_ARGS) { struct sbuf sb; + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + sbuf_new_for_sysctl(&sb, NULL, 256, req); sbuf_printf(&sb, "\n"); sbuf_printf(&sb, LINE); sbuf_printf(&sb, "| Miscellaneous Error: %40llu |\n", (unsigned long long)((struct adf_dev_miscellaneous_stats *) misc_counter) ->misc_counter); sbuf_finish(&sb); SYSCTL_OUT(req, sbuf_data(&sb), sbuf_len(&sb)); sbuf_delete(&sb); return 0; } /** * adf_misc_error_add_c4xxx() - Create debugfs entry for * acceleration device Freq counters. * @accel_dev: Pointer to acceleration device. * * Return: 0 on success, error code otherwise. */ int adf_misc_error_add_c4xxx(struct adf_accel_dev *accel_dev) { struct sysctl_ctx_list *qat_sysctl_ctx = NULL; struct sysctl_oid *qat_sysctl_tree = NULL; struct sysctl_oid *misc_er_file = NULL; qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); qat_sysctl_tree = device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev); misc_er_file = SYSCTL_ADD_PROC(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_sysctl_tree), OID_AUTO, MISC_ERROR_DBG_FILE, CTLTYPE_STRING | CTLFLAG_RD, accel_dev, 0, qat_misc_error_show, "A", "QAT Miscellaneous Error Statistics"); accel_dev->misc_error_dbgfile = misc_er_file; if (!accel_dev->misc_error_dbgfile) { device_printf( GET_DEV(accel_dev), "Failed to create qat miscellaneous error debugfs entry.\n"); return ENOENT; } misc_counter = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!misc_counter) return ENOMEM; memset(misc_counter, 0, PAGE_SIZE); return 0; } /** * adf_misc_error_remove_c4xxx() - Remove debugfs entry for * acceleration device misc error counter. * @accel_dev: Pointer to acceleration device. * * Return: void */ void adf_misc_error_remove_c4xxx(struct adf_accel_dev *accel_dev) { if (accel_dev->misc_error_dbgfile) { remove_oid(accel_dev, accel_dev->misc_error_dbgfile); accel_dev->misc_error_dbgfile = NULL; } kfree(misc_counter); misc_counter = NULL; } diff --git a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_pke_replay_stats.c b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_pke_replay_stats.c index 61a879900f9c..06145a3d7906 100644 --- a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_pke_replay_stats.c +++ b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_pke_replay_stats.c @@ -1,99 +1,103 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "adf_c4xxx_hw_data.h" #include "adf_c4xxx_pke_replay_stats.h" #include "adf_common_drv.h" #include "icp_qat_fw_init_admin.h" #include #include +#include #define PKE_REPLAY_DBG_FILE "pke_replay_stats" #define LINE \ "+-----------------------------------------------------------------+\n" #define BANNER \ "| PKE Replay Statistics for Qat Device |\n" static int qat_pke_replay_counters_show(SYSCTL_HANDLER_ARGS) { struct sbuf sb; struct adf_accel_dev *accel_dev = arg1; int ret = 0; u64 suc_counter = 0; u64 unsuc_counter = 0; + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + sbuf_new_for_sysctl(&sb, NULL, 256, req); sbuf_printf(&sb, "\n"); sbuf_printf(&sb, LINE); ret = adf_get_fw_pke_stats(accel_dev, &suc_counter, &unsuc_counter); if (ret) return ret; sbuf_printf( &sb, "| Successful Replays: %40llu |\n| Unsuccessful Replays: %40llu |\n", (unsigned long long)suc_counter, (unsigned long long)unsuc_counter); sbuf_finish(&sb); SYSCTL_OUT(req, sbuf_data(&sb), sbuf_len(&sb)); sbuf_delete(&sb); return 0; } /** * adf_pke_replay_counters_add_c4xxx() - Create debugfs entry for * acceleration device Freq counters. * @accel_dev: Pointer to acceleration device. * * Return: 0 on success, error code otherwise. */ int adf_pke_replay_counters_add_c4xxx(struct adf_accel_dev *accel_dev) { struct sysctl_ctx_list *qat_sysctl_ctx = NULL; struct sysctl_oid *qat_sysctl_tree = NULL; struct sysctl_oid *pke_rep_file = NULL; qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); qat_sysctl_tree = device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev); pke_rep_file = SYSCTL_ADD_PROC(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_sysctl_tree), OID_AUTO, PKE_REPLAY_DBG_FILE, CTLTYPE_STRING | CTLFLAG_RD, accel_dev, 0, qat_pke_replay_counters_show, "A", "QAT PKE Replay Statistics"); accel_dev->pke_replay_dbgfile = pke_rep_file; if (!accel_dev->pke_replay_dbgfile) { device_printf( GET_DEV(accel_dev), "Failed to create qat pke replay debugfs entry.\n"); return ENOENT; } return 0; } /** * adf_pke_replay_counters_remove_c4xxx() - Remove debugfs entry for * acceleration device Freq counters. * @accel_dev: Pointer to acceleration device. * * Return: void */ void adf_pke_replay_counters_remove_c4xxx(struct adf_accel_dev *accel_dev) { if (accel_dev->pke_replay_dbgfile) { remove_oid(accel_dev, accel_dev->pke_replay_dbgfile); accel_dev->pke_replay_dbgfile = NULL; } } diff --git a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_ras.c b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_ras.c index d7cf8e350fa4..af4c6d123c84 100644 --- a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_ras.c +++ b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_ras.c @@ -1,1344 +1,1357 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright(c) 2007-2022 Intel Corporation */ +/* Copyright(c) 2007-2025 Intel Corporation */ #include "adf_c4xxx_ras.h" #include "adf_accel_devices.h" #include "adf_c4xxx_hw_data.h" #include #include "adf_c4xxx_inline.h" +#include #define ADF_RAS_STR_LEN 64 static int adf_sysctl_read_ras_correctable(SYSCTL_HANDLER_ARGS) { struct adf_accel_dev *accel_dev = arg1; unsigned long counter = 0; + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + if (accel_dev->ras_counters) counter = atomic_read(&accel_dev->ras_counters[ADF_RAS_CORR]); return SYSCTL_OUT(req, &counter, sizeof(counter)); } static int adf_sysctl_read_ras_uncorrectable(SYSCTL_HANDLER_ARGS) { struct adf_accel_dev *accel_dev = arg1; unsigned long counter = 0; + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + if (accel_dev->ras_counters) counter = atomic_read(&accel_dev->ras_counters[ADF_RAS_UNCORR]); return SYSCTL_OUT(req, &counter, sizeof(counter)); } static int adf_sysctl_read_ras_fatal(SYSCTL_HANDLER_ARGS) { struct adf_accel_dev *accel_dev = arg1; unsigned long counter = 0; + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + if (accel_dev->ras_counters) counter = atomic_read(&accel_dev->ras_counters[ADF_RAS_FATAL]); return SYSCTL_OUT(req, &counter, sizeof(counter)); } static int adf_sysctl_write_ras_reset(SYSCTL_HANDLER_ARGS) { struct adf_accel_dev *accel_dev = arg1; int value = 0; int ret = SYSCTL_IN(req, &value, sizeof(value)); + if (priv_check(curthread, PRIV_DRIVER) != 0) + return EPERM; + if (!ret && value != 0 && accel_dev->ras_counters) { } return SYSCTL_OUT(req, &value, sizeof(value)); } int adf_init_ras(struct adf_accel_dev *accel_dev) { struct sysctl_ctx_list *qat_sysctl_ctx; struct sysctl_oid *qat_sysctl_tree; struct sysctl_oid *ras_corr; struct sysctl_oid *ras_uncor; struct sysctl_oid *ras_fat; struct sysctl_oid *ras_res; int i; accel_dev->ras_counters = kcalloc(ADF_RAS_ERRORS, sizeof(*accel_dev->ras_counters), GFP_KERNEL); if (!accel_dev->ras_counters) return -ENOMEM; for (i = 0; i < ADF_RAS_ERRORS; ++i) qat_sysctl_ctx = device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev); qat_sysctl_tree = device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev); ras_corr = SYSCTL_ADD_OID(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_sysctl_tree), OID_AUTO, "ras_correctable", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_DYN, accel_dev, 0, adf_sysctl_read_ras_correctable, "LU", "QAT RAS correctable"); accel_dev->ras_correctable = ras_corr; if (!accel_dev->ras_correctable) { device_printf(GET_DEV(accel_dev), "Failed to register ras_correctable sysctl\n"); return -EINVAL; } ras_uncor = SYSCTL_ADD_OID(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_sysctl_tree), OID_AUTO, "ras_uncorrectable", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_DYN, accel_dev, 0, adf_sysctl_read_ras_uncorrectable, "LU", "QAT RAS uncorrectable"); accel_dev->ras_uncorrectable = ras_uncor; if (!accel_dev->ras_uncorrectable) { device_printf(GET_DEV(accel_dev), "Failed to register ras_uncorrectable sysctl\n"); return -EINVAL; } ras_fat = SYSCTL_ADD_OID(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_sysctl_tree), OID_AUTO, "ras_fatal", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_DYN, accel_dev, 0, adf_sysctl_read_ras_fatal, "LU", "QAT RAS fatal"); accel_dev->ras_fatal = ras_fat; if (!accel_dev->ras_fatal) { device_printf(GET_DEV(accel_dev), "Failed to register ras_fatal sysctl\n"); return -EINVAL; } ras_res = SYSCTL_ADD_OID(qat_sysctl_ctx, SYSCTL_CHILDREN(qat_sysctl_tree), OID_AUTO, "ras_reset", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_DYN, accel_dev, 0, adf_sysctl_write_ras_reset, "I", "QAT RAS reset"); accel_dev->ras_reset = ras_res; if (!accel_dev->ras_reset) { device_printf(GET_DEV(accel_dev), "Failed to register ras_reset sysctl\n"); return -EINVAL; } return 0; } void adf_exit_ras(struct adf_accel_dev *accel_dev) { if (accel_dev->ras_counters) { remove_oid(accel_dev, accel_dev->ras_correctable); remove_oid(accel_dev, accel_dev->ras_uncorrectable); remove_oid(accel_dev, accel_dev->ras_fatal); remove_oid(accel_dev, accel_dev->ras_reset); accel_dev->ras_correctable = NULL; accel_dev->ras_uncorrectable = NULL; accel_dev->ras_fatal = NULL; accel_dev->ras_reset = NULL; kfree(accel_dev->ras_counters); accel_dev->ras_counters = NULL; } } static inline void adf_log_source_iastatssm(struct adf_accel_dev *accel_dev, struct resource *pmisc, u32 iastatssm, u32 accel_num) { if (iastatssm & ADF_C4XXX_IASTATSSM_UERRSSMSH_MASK) device_printf( GET_DEV(accel_dev), "Uncorrectable error shared memory detected in accel: %u\n", accel_num); if (iastatssm & ADF_C4XXX_IASTATSSM_CERRSSMSH_MASK) device_printf( GET_DEV(accel_dev), "Correctable error shared memory detected in accel: %u\n", accel_num); if (iastatssm & ADF_C4XXX_IASTATSSM_UERRSSMMMP0_MASK) device_printf( GET_DEV(accel_dev), "Uncorrectable error MMP0 detected in accel: %u\n", accel_num); if (iastatssm & ADF_C4XXX_IASTATSSM_CERRSSMMMP0_MASK) device_printf(GET_DEV(accel_dev), "Correctable error MMP0 detected in accel: %u\n", accel_num); if (iastatssm & ADF_C4XXX_IASTATSSM_UERRSSMMMP1_MASK) device_printf( GET_DEV(accel_dev), "Uncorrectable error MMP1 detected in accel: %u\n", accel_num); if (iastatssm & ADF_C4XXX_IASTATSSM_CERRSSMMMP1_MASK) device_printf(GET_DEV(accel_dev), "Correctable error MMP1 detected in accel: %u\n", accel_num); if (iastatssm & ADF_C4XXX_IASTATSSM_UERRSSMMMP2_MASK) device_printf( GET_DEV(accel_dev), "Uncorrectable error MMP2 detected in accel: %u\n", accel_num); if (iastatssm & ADF_C4XXX_IASTATSSM_CERRSSMMMP2_MASK) device_printf(GET_DEV(accel_dev), "Correctable error MMP2 detected in accel: %u\n", accel_num); if (iastatssm & ADF_C4XXX_IASTATSSM_UERRSSMMMP3_MASK) device_printf( GET_DEV(accel_dev), "Uncorrectable error MMP3 detected in accel: %u\n", accel_num); if (iastatssm & ADF_C4XXX_IASTATSSM_CERRSSMMMP3_MASK) device_printf(GET_DEV(accel_dev), "Correctable error MMP3 detected in accel: %u\n", accel_num); if (iastatssm & ADF_C4XXX_IASTATSSM_UERRSSMMMP4_MASK) device_printf( GET_DEV(accel_dev), "Uncorrectable error MMP4 detected in accel: %u\n", accel_num); if (iastatssm & ADF_C4XXX_IASTATSSM_CERRSSMMMP4_MASK) device_printf(GET_DEV(accel_dev), "Correctable error MMP4 detected in accel: %u\n", accel_num); if (iastatssm & ADF_C4XXX_IASTATSSM_PPERR_MASK) device_printf( GET_DEV(accel_dev), "Uncorrectable error Push or Pull detected in accel: %u\n", accel_num); if (iastatssm & ADF_C4XXX_IASTATSSM_CPPPAR_ERR_MASK) device_printf( GET_DEV(accel_dev), "Uncorrectable CPP parity error detected in accel: %u\n", accel_num); if (iastatssm & ADF_C4XXX_IASTATSSM_RFPAR_ERR_MASK) device_printf( GET_DEV(accel_dev), "Uncorrectable SSM RF parity error detected in accel: %u\n", accel_num); } static inline void adf_clear_source_statssm(struct adf_accel_dev *accel_dev, struct resource *pmisc, u32 statssm, u32 accel_num) { if (statssm & ADF_C4XXX_IASTATSSM_UERRSSMSH_MASK) adf_csr_fetch_and_and(pmisc, ADF_C4XXX_UERRSSMSH(accel_num), ADF_C4XXX_UERRSSMSH_INTS_CLEAR_MASK); if (statssm & ADF_C4XXX_IASTATSSM_CERRSSMSH_MASK) adf_csr_fetch_and_and(pmisc, ADF_C4XXX_CERRSSMSH(accel_num), ADF_C4XXX_CERRSSMSH_INTS_CLEAR_MASK); if (statssm & ADF_C4XXX_IASTATSSM_UERRSSMMMP0_MASK) adf_csr_fetch_and_and(pmisc, ADF_C4XXX_UERRSSMMMP(accel_num, 0), ~ADF_C4XXX_UERRSSMMMP_INTS_CLEAR_MASK); if (statssm & ADF_C4XXX_IASTATSSM_CERRSSMMMP0_MASK) adf_csr_fetch_and_and(pmisc, ADF_C4XXX_CERRSSMMMP(accel_num, 0), ~ADF_C4XXX_CERRSSMMMP_INTS_CLEAR_MASK); if (statssm & ADF_C4XXX_IASTATSSM_UERRSSMMMP1_MASK) adf_csr_fetch_and_and(pmisc, ADF_C4XXX_UERRSSMMMP(accel_num, 1), ~ADF_C4XXX_UERRSSMMMP_INTS_CLEAR_MASK); if (statssm & ADF_C4XXX_IASTATSSM_CERRSSMMMP1_MASK) adf_csr_fetch_and_and(pmisc, ADF_C4XXX_CERRSSMMMP(accel_num, 1), ~ADF_C4XXX_CERRSSMMMP_INTS_CLEAR_MASK); if (statssm & ADF_C4XXX_IASTATSSM_UERRSSMMMP2_MASK) adf_csr_fetch_and_and(pmisc, ADF_C4XXX_UERRSSMMMP(accel_num, 2), ~ADF_C4XXX_UERRSSMMMP_INTS_CLEAR_MASK); if (statssm & ADF_C4XXX_IASTATSSM_CERRSSMMMP2_MASK) adf_csr_fetch_and_and(pmisc, ADF_C4XXX_CERRSSMMMP(accel_num, 2), ~ADF_C4XXX_CERRSSMMMP_INTS_CLEAR_MASK); if (statssm & ADF_C4XXX_IASTATSSM_UERRSSMMMP3_MASK) adf_csr_fetch_and_and(pmisc, ADF_C4XXX_UERRSSMMMP(accel_num, 3), ~ADF_C4XXX_UERRSSMMMP_INTS_CLEAR_MASK); if (statssm & ADF_C4XXX_IASTATSSM_CERRSSMMMP3_MASK) adf_csr_fetch_and_and(pmisc, ADF_C4XXX_CERRSSMMMP(accel_num, 3), ~ADF_C4XXX_CERRSSMMMP_INTS_CLEAR_MASK); if (statssm & ADF_C4XXX_IASTATSSM_UERRSSMMMP4_MASK) adf_csr_fetch_and_and(pmisc, ADF_C4XXX_UERRSSMMMP(accel_num, 4), ~ADF_C4XXX_UERRSSMMMP_INTS_CLEAR_MASK); if (statssm & ADF_C4XXX_IASTATSSM_CERRSSMMMP4_MASK) adf_csr_fetch_and_and(pmisc, ADF_C4XXX_CERRSSMMMP(accel_num, 4), ~ADF_C4XXX_CERRSSMMMP_INTS_CLEAR_MASK); if (statssm & ADF_C4XXX_IASTATSSM_PPERR_MASK) adf_csr_fetch_and_and(pmisc, ADF_PPERR(accel_num), ~ADF_C4XXX_PPERR_INTS_CLEAR_MASK); if (statssm & ADF_C4XXX_IASTATSSM_RFPAR_ERR_MASK) adf_csr_fetch_and_or(pmisc, ADF_C4XXX_SSMSOFTERRORPARITY(accel_num), 0UL); if (statssm & ADF_C4XXX_IASTATSSM_CPPPAR_ERR_MASK) adf_csr_fetch_and_or(pmisc, ADF_C4XXX_SSMCPPERR(accel_num), 0UL); } static inline void adf_process_errsou8(struct adf_accel_dev *accel_dev, struct resource *pmisc) { int i; u32 mecorrerr = ADF_CSR_RD(pmisc, ADF_C4XXX_HI_ME_COR_ERRLOG); const unsigned long tmp_mecorrerr = mecorrerr; /* For each correctable error in ME increment RAS counter */ for_each_set_bit(i, &tmp_mecorrerr, ADF_C4XXX_HI_ME_COR_ERRLOG_SIZE_IN_BITS) { atomic_inc(&accel_dev->ras_counters[ADF_RAS_CORR]); device_printf(GET_DEV(accel_dev), "Correctable error detected in AE%d\n", i); } /* Clear interrupt from errsou8 (RW1C) */ ADF_CSR_WR(pmisc, ADF_C4XXX_HI_ME_COR_ERRLOG, mecorrerr); } static inline void adf_handle_ae_uncorr_err(struct adf_accel_dev *accel_dev, struct resource *pmisc) { int i; u32 me_uncorr_err = ADF_CSR_RD(pmisc, ADF_C4XXX_HI_ME_UNCERR_LOG); const unsigned long tmp_me_uncorr_err = me_uncorr_err; /* For each uncorrectable fatal error in AE increment RAS error * counter. */ for_each_set_bit(i, &tmp_me_uncorr_err, ADF_C4XXX_HI_ME_UNCOR_ERRLOG_BITS) { atomic_inc(&accel_dev->ras_counters[ADF_RAS_FATAL]); device_printf(GET_DEV(accel_dev), "Uncorrectable error detected in AE%d\n", i); } /* Clear interrupt from me_uncorr_err (RW1C) */ ADF_CSR_WR(pmisc, ADF_C4XXX_HI_ME_UNCERR_LOG, me_uncorr_err); } static inline void adf_handle_ri_mem_par_err(struct adf_accel_dev *accel_dev, struct resource *pmisc, bool *reset_required) { u32 ri_mem_par_err_sts = 0; u32 ri_mem_par_err_ferr = 0; ri_mem_par_err_sts = ADF_CSR_RD(pmisc, ADF_C4XXX_RI_MEM_PAR_ERR_STS); ri_mem_par_err_ferr = ADF_CSR_RD(pmisc, ADF_C4XXX_RI_MEM_PAR_ERR_FERR); if (ri_mem_par_err_sts & ADF_C4XXX_RI_MEM_PAR_ERR_STS_MASK) { atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]); device_printf( GET_DEV(accel_dev), "Uncorrectable RI memory parity error detected.\n"); } if (ri_mem_par_err_sts & ADF_C4XXX_RI_MEM_MSIX_TBL_INT_MASK) { atomic_inc(&accel_dev->ras_counters[ADF_RAS_FATAL]); device_printf( GET_DEV(accel_dev), "Uncorrectable fatal MSIX table parity error detected.\n"); *reset_required = true; } device_printf(GET_DEV(accel_dev), "ri_mem_par_err_sts=0x%X\tri_mem_par_err_ferr=%u\n", ri_mem_par_err_sts, ri_mem_par_err_ferr); ADF_CSR_WR(pmisc, ADF_C4XXX_RI_MEM_PAR_ERR_STS, ri_mem_par_err_sts); } static inline void adf_handle_ti_mem_par_err(struct adf_accel_dev *accel_dev, struct resource *pmisc) { u32 ti_mem_par_err_sts0 = 0; u32 ti_mem_par_err_sts1 = 0; u32 ti_mem_par_err_ferr = 0; ti_mem_par_err_sts0 = ADF_CSR_RD(pmisc, ADF_C4XXX_TI_MEM_PAR_ERR_STS0); ti_mem_par_err_sts1 = ADF_CSR_RD(pmisc, ADF_C4XXX_TI_MEM_PAR_ERR_STS1); ti_mem_par_err_ferr = ADF_CSR_RD(pmisc, ADF_C4XXX_TI_MEM_PAR_ERR_FIRST_ERROR); atomic_inc(&accel_dev->ras_counters[ADF_RAS_FATAL]); ti_mem_par_err_sts1 &= ADF_C4XXX_TI_MEM_PAR_ERR_STS1_MASK; device_printf(GET_DEV(accel_dev), "Uncorrectable TI memory parity error detected.\n"); device_printf(GET_DEV(accel_dev), "ti_mem_par_err_sts0=0x%X\tti_mem_par_err_sts1=0x%X\t" "ti_mem_par_err_ferr=0x%X\n", ti_mem_par_err_sts0, ti_mem_par_err_sts1, ti_mem_par_err_ferr); ADF_CSR_WR(pmisc, ADF_C4XXX_TI_MEM_PAR_ERR_STS0, ti_mem_par_err_sts0); ADF_CSR_WR(pmisc, ADF_C4XXX_TI_MEM_PAR_ERR_STS1, ti_mem_par_err_sts1); } static inline void adf_log_fatal_cmd_par_err(struct adf_accel_dev *accel_dev, char *err_type) { atomic_inc(&accel_dev->ras_counters[ADF_RAS_FATAL]); device_printf(GET_DEV(accel_dev), "Fatal error detected: %s command parity\n", err_type); } static inline void adf_handle_host_cpp_par_err(struct adf_accel_dev *accel_dev, struct resource *pmisc) { u32 host_cpp_par_err = 0; host_cpp_par_err = ADF_CSR_RD(pmisc, ADF_C4XXX_HI_CPP_AGENT_CMD_PAR_ERR_LOG); if (host_cpp_par_err & ADF_C4XXX_TI_CMD_PAR_ERR) adf_log_fatal_cmd_par_err(accel_dev, "TI"); if (host_cpp_par_err & ADF_C4XXX_RI_CMD_PAR_ERR) adf_log_fatal_cmd_par_err(accel_dev, "RI"); if (host_cpp_par_err & ADF_C4XXX_ICI_CMD_PAR_ERR) adf_log_fatal_cmd_par_err(accel_dev, "ICI"); if (host_cpp_par_err & ADF_C4XXX_ICE_CMD_PAR_ERR) adf_log_fatal_cmd_par_err(accel_dev, "ICE"); if (host_cpp_par_err & ADF_C4XXX_ARAM_CMD_PAR_ERR) adf_log_fatal_cmd_par_err(accel_dev, "ARAM"); if (host_cpp_par_err & ADF_C4XXX_CFC_CMD_PAR_ERR) adf_log_fatal_cmd_par_err(accel_dev, "CFC"); if (ADF_C4XXX_SSM_CMD_PAR_ERR(host_cpp_par_err)) adf_log_fatal_cmd_par_err(accel_dev, "SSM"); /* Clear interrupt from host_cpp_par_err (RW1C) */ ADF_CSR_WR(pmisc, ADF_C4XXX_HI_CPP_AGENT_CMD_PAR_ERR_LOG, host_cpp_par_err); } static inline void adf_process_errsou9(struct adf_accel_dev *accel_dev, struct resource *pmisc, u32 errsou, bool *reset_required) { if (errsou & ADF_C4XXX_ME_UNCORR_ERROR) { adf_handle_ae_uncorr_err(accel_dev, pmisc); /* Notify caller that function level reset is required. */ *reset_required = true; } if (errsou & ADF_C4XXX_CPP_CMD_PAR_ERR) { adf_handle_host_cpp_par_err(accel_dev, pmisc); *reset_required = true; } /* RI memory parity errors are uncorrectable non-fatal errors * with exception of bit 22 MSIX table parity error, which should * be treated as fatal error, followed by device restart. */ if (errsou & ADF_C4XXX_RI_MEM_PAR_ERR) adf_handle_ri_mem_par_err(accel_dev, pmisc, reset_required); if (errsou & ADF_C4XXX_TI_MEM_PAR_ERR) { adf_handle_ti_mem_par_err(accel_dev, pmisc); *reset_required = true; } } static inline void adf_process_exprpssmcpr(struct adf_accel_dev *accel_dev, struct resource *pmisc, u32 accel) { u32 exprpssmcpr; /* CPR0 */ exprpssmcpr = ADF_CSR_RD(pmisc, ADF_C4XXX_EXPRPSSMCPR0(accel)); if (exprpssmcpr & ADF_C4XXX_EXPRPSSM_FATAL_MASK) { device_printf(GET_DEV(accel_dev), "Uncorrectable error CPR0 detected in accel %u\n", accel); atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]); } if (exprpssmcpr & ADF_C4XXX_EXPRPSSM_SOFT_MASK) { device_printf(GET_DEV(accel_dev), "Correctable error CPR0 detected in accel %u\n", accel); atomic_inc(&accel_dev->ras_counters[ADF_RAS_CORR]); } ADF_CSR_WR(pmisc, ADF_C4XXX_EXPRPSSMCPR0(accel), 0); /* CPR1 */ exprpssmcpr = ADF_CSR_RD(pmisc, ADF_C4XXX_EXPRPSSMCPR1(accel)); if (exprpssmcpr & ADF_C4XXX_EXPRPSSM_FATAL_MASK) { device_printf(GET_DEV(accel_dev), "Uncorrectable error CPR1 detected in accel %u\n", accel); atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]); } if (exprpssmcpr & ADF_C4XXX_EXPRPSSM_SOFT_MASK) { device_printf(GET_DEV(accel_dev), "Correctable error CPR1 detected in accel %u\n", accel); atomic_inc(&accel_dev->ras_counters[ADF_RAS_CORR]); } ADF_CSR_WR(pmisc, ADF_C4XXX_EXPRPSSMCPR1(accel), 0); } static inline void adf_process_exprpssmxlt(struct adf_accel_dev *accel_dev, struct resource *pmisc, u32 accel) { u32 exprpssmxlt; /* XTL0 */ exprpssmxlt = ADF_CSR_RD(pmisc, ADF_C4XXX_EXPRPSSMXLT0(accel)); if (exprpssmxlt & ADF_C4XXX_EXPRPSSM_FATAL_MASK) { device_printf(GET_DEV(accel_dev), "Uncorrectable error XLT0 detected in accel %u\n", accel); atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]); } if (exprpssmxlt & ADF_C4XXX_EXPRPSSM_SOFT_MASK) { device_printf(GET_DEV(accel_dev), "Correctable error XLT0 detected in accel %u\n", accel); atomic_inc(&accel_dev->ras_counters[ADF_RAS_CORR]); } ADF_CSR_WR(pmisc, ADF_C4XXX_EXPRPSSMXLT0(accel), 0); /* XTL1 */ exprpssmxlt = ADF_CSR_RD(pmisc, ADF_C4XXX_EXPRPSSMXLT1(accel)); if (exprpssmxlt & ADF_C4XXX_EXPRPSSM_FATAL_MASK) { device_printf(GET_DEV(accel_dev), "Uncorrectable error XLT1 detected in accel %u\n", accel); atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]); } if (exprpssmxlt & ADF_C4XXX_EXPRPSSM_SOFT_MASK) { device_printf(GET_DEV(accel_dev), "Correctable error XLT1 detected in accel %u\n", accel); atomic_inc(&accel_dev->ras_counters[ADF_RAS_CORR]); } ADF_CSR_WR(pmisc, ADF_C4XXX_EXPRPSSMXLT0(accel), 0); } static inline void adf_process_spp_par_err(struct adf_accel_dev *accel_dev, struct resource *pmisc, u32 accel, bool *reset_required) { /* All SPP parity errors are treated as uncorrectable fatal errors */ atomic_inc(&accel_dev->ras_counters[ADF_RAS_FATAL]); *reset_required = true; device_printf(GET_DEV(accel_dev), "Uncorrectable fatal SPP parity error detected\n"); } static inline void adf_process_statssm(struct adf_accel_dev *accel_dev, struct resource *pmisc, u32 accel, bool *reset_required) { u32 i; u32 statssm = ADF_CSR_RD(pmisc, ADF_INTSTATSSM(accel)); u32 iastatssm = ADF_CSR_RD(pmisc, ADF_C4XXX_IAINTSTATSSM(accel)); bool type; const unsigned long tmp_iastatssm = iastatssm; /* First collect all errors */ for_each_set_bit(i, &tmp_iastatssm, ADF_C4XXX_IASTATSSM_BITS) { if (i == ADF_C4XXX_IASTATSSM_SLICE_HANG_ERR_BIT) { /* Slice Hang error is being handled in * separate function adf_check_slice_hang_c4xxx(), * which also increments RAS counters for * SliceHang error. */ continue; } if (i == ADF_C4XXX_IASTATSSM_SPP_PAR_ERR_BIT) { adf_process_spp_par_err(accel_dev, pmisc, accel, reset_required); continue; } type = (i % 2) ? ADF_RAS_CORR : ADF_RAS_UNCORR; if (i == ADF_C4XXX_IASTATSSM_CPP_PAR_ERR_BIT) type = ADF_RAS_UNCORR; atomic_inc(&accel_dev->ras_counters[type]); } /* If iastatssm is set, we need to log the error */ if (iastatssm & ADF_C4XXX_IASTATSSM_MASK) adf_log_source_iastatssm(accel_dev, pmisc, iastatssm, accel); /* If statssm is set, we need to clear the error sources */ if (statssm & ADF_C4XXX_IASTATSSM_MASK) adf_clear_source_statssm(accel_dev, pmisc, statssm, accel); /* Clear the iastatssm after clearing error sources */ if (iastatssm & ADF_C4XXX_IASTATSSM_MASK) adf_csr_fetch_and_and(pmisc, ADF_C4XXX_IAINTSTATSSM(accel), ADF_C4XXX_IASTATSSM_CLR_MASK); } static inline void adf_process_errsou10(struct adf_accel_dev *accel_dev, struct resource *pmisc, u32 errsou, u32 num_accels, bool *reset_required) { int accel; const unsigned long tmp_errsou = errsou; for_each_set_bit(accel, &tmp_errsou, num_accels) { adf_process_statssm(accel_dev, pmisc, accel, reset_required); adf_process_exprpssmcpr(accel_dev, pmisc, accel); adf_process_exprpssmxlt(accel_dev, pmisc, accel); } } /* ERRSOU 11 */ static inline void adf_handle_ti_misc_err(struct adf_accel_dev *accel_dev, struct resource *pmisc) { u32 ti_misc_sts = 0; u32 err_type = 0; ti_misc_sts = ADF_CSR_RD(pmisc, ADF_C4XXX_TI_MISC_STS); dev_dbg(GET_DEV(accel_dev), "ti_misc_sts = 0x%X\n", ti_misc_sts); if (ti_misc_sts & ADF_C4XXX_TI_MISC_ERR_MASK) { atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]); /* If TI misc error occurred then check its type */ err_type = ADF_C4XXX_GET_TI_MISC_ERR_TYPE(ti_misc_sts); if (err_type == ADF_C4XXX_TI_BME_RESP_ORDER_ERR) { device_printf( GET_DEV(accel_dev), "Uncorrectable non-fatal BME response order error.\n"); } else if (err_type == ADF_C4XXX_TI_RESP_ORDER_ERR) { device_printf( GET_DEV(accel_dev), "Uncorrectable non-fatal response order error.\n"); } /* Clear the interrupt and allow the next error to be * logged. */ ADF_CSR_WR(pmisc, ADF_C4XXX_TI_MISC_STS, BIT(0)); } } static inline void adf_handle_ri_push_pull_par_err(struct adf_accel_dev *accel_dev, struct resource *pmisc) { u32 ri_cpp_int_sts = 0; u32 err_clear_mask = 0; ri_cpp_int_sts = ADF_CSR_RD(pmisc, ADF_C4XXX_RI_CPP_INT_STS); dev_dbg(GET_DEV(accel_dev), "ri_cpp_int_sts = 0x%X\n", ri_cpp_int_sts); if (ri_cpp_int_sts & ADF_C4XXX_RI_CPP_INT_STS_PUSH_ERR) { atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]); device_printf( GET_DEV(accel_dev), "CPP%d: Uncorrectable non-fatal RI push error detected.\n", ADF_C4XXX_GET_CPP_BUS_FROM_STS(ri_cpp_int_sts)); err_clear_mask |= ADF_C4XXX_RI_CPP_INT_STS_PUSH_ERR; } if (ri_cpp_int_sts & ADF_C4XXX_RI_CPP_INT_STS_PULL_ERR) { atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]); device_printf( GET_DEV(accel_dev), "CPP%d: Uncorrectable non-fatal RI pull error detected.\n", ADF_C4XXX_GET_CPP_BUS_FROM_STS(ri_cpp_int_sts)); err_clear_mask |= ADF_C4XXX_RI_CPP_INT_STS_PULL_ERR; } /* Clear the interrupt for handled errors and allow the next error * to be logged. */ ADF_CSR_WR(pmisc, ADF_C4XXX_RI_CPP_INT_STS, err_clear_mask); } static inline void adf_handle_ti_push_pull_par_err(struct adf_accel_dev *accel_dev, struct resource *pmisc) { u32 ti_cpp_int_sts = 0; u32 err_clear_mask = 0; ti_cpp_int_sts = ADF_CSR_RD(pmisc, ADF_C4XXX_TI_CPP_INT_STS); dev_dbg(GET_DEV(accel_dev), "ti_cpp_int_sts = 0x%X\n", ti_cpp_int_sts); if (ti_cpp_int_sts & ADF_C4XXX_TI_CPP_INT_STS_PUSH_ERR) { atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]); device_printf( GET_DEV(accel_dev), "CPP%d: Uncorrectable non-fatal TI push error detected.\n", ADF_C4XXX_GET_CPP_BUS_FROM_STS(ti_cpp_int_sts)); err_clear_mask |= ADF_C4XXX_TI_CPP_INT_STS_PUSH_ERR; } if (ti_cpp_int_sts & ADF_C4XXX_TI_CPP_INT_STS_PULL_ERR) { atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]); device_printf( GET_DEV(accel_dev), "CPP%d: Uncorrectable non-fatal TI pull error detected.\n", ADF_C4XXX_GET_CPP_BUS_FROM_STS(ti_cpp_int_sts)); err_clear_mask |= ADF_C4XXX_TI_CPP_INT_STS_PULL_ERR; } /* Clear the interrupt for handled errors and allow the next error * to be logged. */ ADF_CSR_WR(pmisc, ADF_C4XXX_TI_CPP_INT_STS, err_clear_mask); } static inline void adf_handle_aram_corr_err(struct adf_accel_dev *accel_dev, struct resource *aram_base_addr) { u32 aram_cerr = 0; aram_cerr = ADF_CSR_RD(aram_base_addr, ADF_C4XXX_ARAMCERR); dev_dbg(GET_DEV(accel_dev), "aram_cerr = 0x%X\n", aram_cerr); if (aram_cerr & ADF_C4XXX_ARAM_CORR_ERR_MASK) { atomic_inc(&accel_dev->ras_counters[ADF_RAS_CORR]); device_printf(GET_DEV(accel_dev), "Correctable ARAM error detected.\n"); } /* Clear correctable ARAM error interrupt. */ ADF_C4XXX_CLEAR_CSR_BIT(aram_cerr, 0); ADF_CSR_WR(aram_base_addr, ADF_C4XXX_ARAMCERR, aram_cerr); } static inline void adf_handle_aram_uncorr_err(struct adf_accel_dev *accel_dev, struct resource *aram_base_addr) { u32 aram_uerr = 0; aram_uerr = ADF_CSR_RD(aram_base_addr, ADF_C4XXX_ARAMUERR); dev_dbg(GET_DEV(accel_dev), "aram_uerr = 0x%X\n", aram_uerr); if (aram_uerr & ADF_C4XXX_ARAM_UNCORR_ERR_MASK) { atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]); device_printf(GET_DEV(accel_dev), "Uncorrectable non-fatal ARAM error detected.\n"); } /* Clear uncorrectable ARAM error interrupt. */ ADF_C4XXX_CLEAR_CSR_BIT(aram_uerr, 0); ADF_CSR_WR(aram_base_addr, ADF_C4XXX_ARAMUERR, aram_uerr); } static inline void adf_handle_ti_pull_par_err(struct adf_accel_dev *accel_dev, struct resource *pmisc) { u32 ti_cpp_int_sts = 0; ti_cpp_int_sts = ADF_CSR_RD(pmisc, ADF_C4XXX_TI_CPP_INT_STS); dev_dbg(GET_DEV(accel_dev), "ti_cpp_int_sts = 0x%X\n", ti_cpp_int_sts); if (ti_cpp_int_sts & ADF_C4XXX_TI_CPP_INT_STS_PUSH_DATA_PAR_ERR) { atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]); device_printf( GET_DEV(accel_dev), "CPP%d: Uncorrectable non-fatal TI pull data parity error detected.\n", ADF_C4XXX_GET_CPP_BUS_FROM_STS(ti_cpp_int_sts)); } /* Clear the interrupt and allow the next error to be logged. */ ADF_CSR_WR(pmisc, ADF_C4XXX_TI_CPP_INT_STS, ADF_C4XXX_TI_CPP_INT_STS_PUSH_DATA_PAR_ERR); } static inline void adf_handle_ri_push_par_err(struct adf_accel_dev *accel_dev, struct resource *pmisc) { u32 ri_cpp_int_sts = 0; ri_cpp_int_sts = ADF_CSR_RD(pmisc, ADF_C4XXX_RI_CPP_INT_STS); dev_dbg(GET_DEV(accel_dev), "ri_cpp_int_sts = 0x%X\n", ri_cpp_int_sts); if (ri_cpp_int_sts & ADF_C4XXX_RI_CPP_INT_STS_PUSH_DATA_PAR_ERR) { atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]); device_printf( GET_DEV(accel_dev), "CPP%d: Uncorrectable non-fatal RI push data parity error detected.\n", ADF_C4XXX_GET_CPP_BUS_FROM_STS(ri_cpp_int_sts)); } /* Clear the interrupt and allow the next error to be logged. */ ADF_CSR_WR(pmisc, ADF_C4XXX_RI_CPP_INT_STS, ADF_C4XXX_RI_CPP_INT_STS_PUSH_DATA_PAR_ERR); } static inline void adf_log_inln_err(struct adf_accel_dev *accel_dev, u32 offset, u8 ras_type, char *msg) { if (ras_type >= ADF_RAS_ERRORS) { device_printf(GET_DEV(accel_dev), "Invalid ras type %u\n", ras_type); return; } if (offset == ADF_C4XXX_INLINE_INGRESS_OFFSET) { if (ras_type == ADF_RAS_CORR) dev_dbg(GET_DEV(accel_dev), "Detect ici %s\n", msg); else device_printf(GET_DEV(accel_dev), "Detect ici %s\n", msg); } else { if (ras_type == ADF_RAS_CORR) dev_dbg(GET_DEV(accel_dev), "Detect ice %s\n", msg); else device_printf(GET_DEV(accel_dev), "Detect ice %s\n", msg); } atomic_inc(&accel_dev->ras_counters[ras_type]); } static inline void adf_handle_parser_uerr(struct adf_accel_dev *accel_dev, struct resource *aram_base_addr, u32 offset, bool *reset_required) { u32 reg_val = 0; reg_val = ADF_CSR_RD(aram_base_addr, ADF_C4XXX_IC_PARSER_UERR + offset); if (reg_val & ADF_C4XXX_PARSER_UERR_INTR) { /* Mask inten */ reg_val &= ~ADF_C4XXX_PARSER_DESC_UERR_INTR_ENA; ADF_CSR_WR(aram_base_addr, ADF_C4XXX_IC_PARSER_UERR + offset, reg_val); /* Fatal error then increase RAS error counter * and reset CPM */ adf_log_inln_err(accel_dev, offset, ADF_RAS_FATAL, "parser uncorr fatal err"); *reset_required = true; } } static inline void adf_handle_mac_intr(struct adf_accel_dev *accel_dev, struct resource *aram_base_addr, u32 offset, bool *reset_required) { u64 reg_val; reg_val = ADF_CSR_RD64(aram_base_addr, ADF_C4XXX_MAC_IP + offset); /* Handle the MAC interrupts masked out in MAC_IM */ if (reg_val & ADF_C4XXX_MAC_ERROR_TX_UNDERRUN) adf_log_inln_err(accel_dev, offset, ADF_RAS_CORR, "err tx underrun"); if (reg_val & ADF_C4XXX_MAC_ERROR_TX_FCS) adf_log_inln_err(accel_dev, offset, ADF_RAS_CORR, "err tx fcs"); if (reg_val & ADF_C4XXX_MAC_ERROR_TX_DATA_CORRUPT) adf_log_inln_err(accel_dev, offset, ADF_RAS_CORR, "err tx data corrupt"); if (reg_val & ADF_C4XXX_MAC_ERROR_RX_OVERRUN) { *reset_required = true; adf_log_inln_err(accel_dev, offset, ADF_RAS_FATAL, "err rx overrun fatal err"); } if (reg_val & ADF_C4XXX_MAC_ERROR_RX_RUNT) { *reset_required = true; adf_log_inln_err(accel_dev, offset, ADF_RAS_FATAL, "err rx runt fatal err"); } if (reg_val & ADF_C4XXX_MAC_ERROR_RX_UNDERSIZE) { *reset_required = true; adf_log_inln_err(accel_dev, offset, ADF_RAS_FATAL, "err rx undersize fatal err"); } if (reg_val & ADF_C4XXX_MAC_ERROR_RX_JABBER) { *reset_required = true; adf_log_inln_err(accel_dev, offset, ADF_RAS_FATAL, "err rx jabber fatal err"); } if (reg_val & ADF_C4XXX_MAC_ERROR_RX_OVERSIZE) { *reset_required = true; adf_log_inln_err(accel_dev, offset, ADF_RAS_FATAL, "err rx oversize fatal err"); } if (reg_val & ADF_C4XXX_MAC_ERROR_RX_FCS) adf_log_inln_err(accel_dev, offset, ADF_RAS_CORR, "err rx fcs"); if (reg_val & ADF_C4XXX_MAC_ERROR_RX_FRAME) adf_log_inln_err(accel_dev, offset, ADF_RAS_CORR, "err rx frame"); if (reg_val & ADF_C4XXX_MAC_ERROR_RX_CODE) adf_log_inln_err(accel_dev, offset, ADF_RAS_CORR, "err rx code"); if (reg_val & ADF_C4XXX_MAC_ERROR_RX_PREAMBLE) adf_log_inln_err(accel_dev, offset, ADF_RAS_CORR, "err rx preamble"); if (reg_val & ADF_C4XXX_MAC_RX_LINK_UP) adf_log_inln_err(accel_dev, offset, ADF_RAS_CORR, "rx link up"); if (reg_val & ADF_C4XXX_MAC_INVALID_SPEED) adf_log_inln_err(accel_dev, offset, ADF_RAS_CORR, "invalid speed"); if (reg_val & ADF_C4XXX_MAC_PIA_RX_FIFO_OVERRUN) { *reset_required = true; adf_log_inln_err(accel_dev, offset, ADF_RAS_FATAL, "pia rx fifo overrun fatal err"); } if (reg_val & ADF_C4XXX_MAC_PIA_TX_FIFO_OVERRUN) { *reset_required = true; adf_log_inln_err(accel_dev, offset, ADF_RAS_FATAL, "pia tx fifo overrun fatal err"); } if (reg_val & ADF_C4XXX_MAC_PIA_TX_FIFO_UNDERRUN) { *reset_required = true; adf_log_inln_err(accel_dev, offset, ADF_RAS_FATAL, "pia tx fifo underrun fatal err"); } /* Clear the interrupt and allow the next error to be logged. */ ADF_CSR_WR64(aram_base_addr, ADF_C4XXX_MAC_IP + offset, reg_val); } static inline bool adf_handle_rf_par_err(struct adf_accel_dev *accel_dev, struct resource *aram_base_addr, u32 rf_par_addr, u32 rf_par_msk, u32 offset, char *msg) { u32 reg_val; unsigned long intr_status; int i; char strbuf[ADF_C4XXX_MAX_STR_LEN]; /* Handle rf parity error */ reg_val = ADF_CSR_RD(aram_base_addr, rf_par_addr + offset); intr_status = reg_val & rf_par_msk; if (intr_status) { for_each_set_bit(i, &intr_status, ADF_C4XXX_RF_PAR_ERR_BITS) { if (i % 2 == 0) snprintf(strbuf, sizeof(strbuf), "%s mul par %u uncorr fatal err", msg, RF_PAR_MUL_MAP(i)); else snprintf(strbuf, sizeof(strbuf), "%s par %u uncorr fatal err", msg, RF_PAR_MAP(i)); adf_log_inln_err(accel_dev, offset, ADF_RAS_FATAL, strbuf); } /* Clear the interrupt and allow the next error to be logged. */ ADF_CSR_WR(aram_base_addr, rf_par_addr + offset, reg_val); return true; } return false; } static inline void adf_handle_cd_rf_par_err(struct adf_accel_dev *accel_dev, struct resource *aram_base_addr, u32 offset, bool *reset_required) { /* Handle reg_cd_rf_parity_err[1] */ *reset_required |= adf_handle_rf_par_err(accel_dev, aram_base_addr, ADF_C4XXX_IC_CD_RF_PARITY_ERR_1, ADF_C4XXX_CD_RF_PAR_ERR_1_INTR, offset, "cd rf par[1]:") ? true : false; } static inline void adf_handle_inln_rf_par_err(struct adf_accel_dev *accel_dev, struct resource *aram_base_addr, u32 offset, bool *reset_required) { /* Handle reg_inln_rf_parity_err[0] */ *reset_required |= adf_handle_rf_par_err(accel_dev, aram_base_addr, ADF_C4XXX_IC_INLN_RF_PARITY_ERR_0, ADF_C4XXX_INLN_RF_PAR_ERR_0_INTR, offset, "inln rf par[0]:") ? true : false; /* Handle reg_inln_rf_parity_err[1] */ *reset_required |= adf_handle_rf_par_err(accel_dev, aram_base_addr, ADF_C4XXX_IC_INLN_RF_PARITY_ERR_1, ADF_C4XXX_INLN_RF_PAR_ERR_1_INTR, offset, "inln rf par[1]:") ? true : false; /* Handle reg_inln_rf_parity_err[2] */ *reset_required |= adf_handle_rf_par_err(accel_dev, aram_base_addr, ADF_C4XXX_IC_INLN_RF_PARITY_ERR_2, ADF_C4XXX_INLN_RF_PAR_ERR_2_INTR, offset, "inln rf par[2]:") ? true : false; /* Handle reg_inln_rf_parity_err[5] */ *reset_required |= adf_handle_rf_par_err(accel_dev, aram_base_addr, ADF_C4XXX_IC_INLN_RF_PARITY_ERR_5, ADF_C4XXX_INLN_RF_PAR_ERR_5_INTR, offset, "inln rf par[5]:") ? true : false; } static inline void adf_handle_congest_mngt_intr(struct adf_accel_dev *accel_dev, struct resource *aram_base_addr, u32 offset, bool *reset_required) { u32 reg_val; reg_val = ADF_CSR_RD(aram_base_addr, ADF_C4XXX_IC_CONGESTION_MGMT_INT + offset); /* A mis-configuration of CPM, a mis-configuration of the Ethernet * Complex or that the traffic profile has deviated from that for * which the resources were configured */ if (reg_val & ADF_C4XXX_CONGESTION_MGMT_CTPB_GLOBAL_CROSSED) { adf_log_inln_err( accel_dev, offset, ADF_RAS_FATAL, "congestion mgmt ctpb global crossed fatal err"); *reset_required = true; } if (reg_val & ADF_C4XXX_CONGESTION_MGMT_XOFF_CIRQ_OUT) { adf_log_inln_err(accel_dev, offset, ADF_RAS_CORR, "congestion mgmt XOFF cirq out err"); } if (reg_val & ADF_C4XXX_CONGESTION_MGMT_XOFF_CIRQ_IN) { adf_log_inln_err(accel_dev, offset, ADF_RAS_CORR, "congestion mgmt XOFF cirq in err"); } /* Clear the interrupt and allow the next error to be logged */ ADF_CSR_WR(aram_base_addr, ADF_C4XXX_IC_CONGESTION_MGMT_INT + offset, reg_val); } static inline void adf_handle_inline_intr(struct adf_accel_dev *accel_dev, struct resource *aram_base_addr, u32 csr_offset, bool *reset_required) { adf_handle_cd_rf_par_err(accel_dev, aram_base_addr, csr_offset, reset_required); adf_handle_parser_uerr(accel_dev, aram_base_addr, csr_offset, reset_required); adf_handle_inln_rf_par_err(accel_dev, aram_base_addr, csr_offset, reset_required); adf_handle_congest_mngt_intr(accel_dev, aram_base_addr, csr_offset, reset_required); adf_handle_mac_intr(accel_dev, aram_base_addr, csr_offset, reset_required); } static inline void adf_process_errsou11(struct adf_accel_dev *accel_dev, struct resource *pmisc, u32 errsou, bool *reset_required) { struct resource *aram_base_addr = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr; if (errsou & ADF_C4XXX_TI_MISC) adf_handle_ti_misc_err(accel_dev, pmisc); if (errsou & ADF_C4XXX_RI_PUSH_PULL_PAR_ERR) adf_handle_ri_push_pull_par_err(accel_dev, pmisc); if (errsou & ADF_C4XXX_TI_PUSH_PULL_PAR_ERR) adf_handle_ti_push_pull_par_err(accel_dev, pmisc); if (errsou & ADF_C4XXX_ARAM_CORR_ERR) adf_handle_aram_corr_err(accel_dev, aram_base_addr); if (errsou & ADF_C4XXX_ARAM_UNCORR_ERR) adf_handle_aram_uncorr_err(accel_dev, aram_base_addr); if (errsou & ADF_C4XXX_TI_PULL_PAR_ERR) adf_handle_ti_pull_par_err(accel_dev, pmisc); if (errsou & ADF_C4XXX_RI_PUSH_PAR_ERR) adf_handle_ri_push_par_err(accel_dev, pmisc); if (errsou & ADF_C4XXX_INLINE_INGRESS_INTR) adf_handle_inline_intr(accel_dev, aram_base_addr, ADF_C4XXX_INLINE_INGRESS_OFFSET, reset_required); if (errsou & ADF_C4XXX_INLINE_EGRESS_INTR) adf_handle_inline_intr(accel_dev, aram_base_addr, ADF_C4XXX_INLINE_EGRESS_OFFSET, reset_required); } bool adf_ras_interrupts(struct adf_accel_dev *accel_dev, bool *reset_required) { u32 errsou = 0; bool handled = false; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 num_accels = hw_data->get_num_accels(hw_data); struct resource *pmisc = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr; if (unlikely(!reset_required)) { device_printf(GET_DEV(accel_dev), "Invalid pointer reset_required\n"); return false; } /* errsou8 */ errsou = ADF_CSR_RD(pmisc, ADF_C4XXX_ERRSOU8); if (errsou & ADF_C4XXX_ERRSOU8_MECORR_MASK) { adf_process_errsou8(accel_dev, pmisc); handled = true; } /* errsou9 */ errsou = ADF_CSR_RD(pmisc, ADF_C4XXX_ERRSOU9); if (errsou & ADF_C4XXX_ERRSOU9_ERROR_MASK) { adf_process_errsou9(accel_dev, pmisc, errsou, reset_required); handled = true; } /* errsou10 */ errsou = ADF_CSR_RD(pmisc, ADF_C4XXX_ERRSOU10); if (errsou & ADF_C4XXX_ERRSOU10_RAS_MASK) { adf_process_errsou10( accel_dev, pmisc, errsou, num_accels, reset_required); handled = true; } /* errsou11 */ errsou = ADF_CSR_RD(pmisc, ADF_C4XXX_ERRSOU11); if (errsou & ADF_C4XXX_ERRSOU11_ERROR_MASK) { adf_process_errsou11(accel_dev, pmisc, errsou, reset_required); handled = true; } return handled; }