Page MenuHomeFreeBSD

D54304.id168386.diff
No OneTemporary

D54304.id168386.diff

This file is larger than 256 KB, so syntax highlighting was skipped.
Index: sys/dev/aq/aq_common.h
===================================================================
--- sys/dev/aq/aq_common.h
+++ sys/dev/aq/aq_common.h
@@ -37,14 +37,6 @@
#include <sys/types.h>
-#define s8 __int8_t
-#define u8 __uint8_t
-#define u16 __uint16_t
-#define s16 __int16_t
-#define u32 __uint32_t
-#define u64 __uint64_t
-#define s64 __int64_t
-#define s32 int
#define ETIME ETIMEDOUT
#define EOK 0
@@ -59,18 +51,18 @@
#endif
#define AQ_HW_WAIT_FOR(_B_, _US_, _N_) \
- do { \
+do { \
unsigned int i; \
- for (i = _N_; (!(_B_)) && i; --i) { \
- usec_delay(_US_); \
- } \
- if (!i) { \
- err = -1; \
- } \
- } while (0)
+ for (i = _N_; (!(_B_)) && i; --i) { \
+ usec_delay(_US_); \
+ } \
+ if (!i) { \
+ err = -1; \
+ } \
+} while (0)
-#define LOWORD(a) ((u16)(a))
+#define LOWORD(a) ((uint16_t)(a))
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#define AQ_VER "0.0.5"
Index: sys/dev/aq/aq_dbg.h
===================================================================
--- sys/dev/aq/aq_dbg.h
+++ sys/dev/aq/aq_dbg.h
@@ -38,8 +38,8 @@
#ifndef AQ_DBG_H
#define AQ_DBG_H
-#include <sys/syslog.h>
#include <sys/systm.h>
+#include <sys/syslog.h>
/*
Debug levels:
0 - no debug
@@ -78,10 +78,10 @@
#if AQ_CFG_DEBUG_LVL > 2
#define AQ_DBG_DUMP_DESC(desc) { \
- volatile u8 *raw = (volatile u8*)(desc); \
- printf( "07-00 %02X%02X%02X%02X %02X%02X%02X%02X 15-08 %02X%02X%02X%02X %02X%02X%02X%02X\n", \
- raw[7], raw[6], raw[5], raw[4], raw[3], raw[2], raw[1], raw[0], \
- raw[15], raw[14], raw[13], raw[12], raw[11], raw[10], raw[9], raw[8]); \
+ volatile uint8_t *raw = (volatile uint8_t*)(desc); \
+ printf( "07-00 %02X%02X%02X%02X %02X%02X%02X%02X 15-08 %02X%02X%02X%02X %02X%02X%02X%02X\n", \
+ raw[7], raw[6], raw[5], raw[4], raw[3], raw[2], raw[1], raw[0], \
+ raw[15], raw[14], raw[13], raw[12], raw[11], raw[10], raw[9], raw[8]); \
}\
#else
@@ -90,27 +90,27 @@
typedef enum aq_debug_level
{
- lvl_error = LOG_ERR,
- lvl_warn = LOG_WARNING,
- lvl_trace = LOG_NOTICE,
- lvl_detail = LOG_INFO,
+ lvl_error = LOG_ERR,
+ lvl_warn = LOG_WARNING,
+ lvl_trace = LOG_NOTICE,
+ lvl_detail = LOG_INFO,
} aq_debug_level;
typedef enum aq_debug_category
{
- dbg_init = 1,
- dbg_config = 1 << 1,
- dbg_tx = 1 << 2,
- dbg_rx = 1 << 3,
- dbg_intr = 1 << 4,
- dbg_fw = 1 << 5,
+ dbg_init = 1,
+ dbg_config = 1 << 1,
+ dbg_tx = 1 << 2,
+ dbg_rx = 1 << 3,
+ dbg_intr = 1 << 4,
+ dbg_fw = 1 << 5,
} aq_debug_category;
#define __FILENAME__ (__builtin_strrchr(__FILE__, '/') ? __builtin_strrchr(__FILE__, '/') + 1 : __FILE__)
extern const aq_debug_level dbg_level_;
-extern const u32 dbg_categories_;
+extern const uint32_t dbg_categories_;
#define log_base_(_lvl, _fmt, args...) printf( "atlantic: " _fmt "\n", ##args)
@@ -130,9 +130,9 @@
#define trace(_cat, _fmt, args...) trace_base_(lvl_trace, _cat, _fmt, ##args)
#define trace_detail(_cat, _fmt, args...) trace_base_(lvl_detail, _cat, _fmt, ##args)
-void trace_aq_tx_descr(int ring_idx, unsigned int pointer, volatile u64 descr[2]);
-void trace_aq_rx_descr(int ring_idx, unsigned int pointer, volatile u64 descr[2]);
-void trace_aq_tx_context_descr(int ring_idx, unsigned int pointer, volatile u64 descr[2]);
+void trace_aq_tx_descr(int ring_idx, unsigned int pointer, volatile uint64_t descr[2]);
+void trace_aq_rx_descr(int ring_idx, unsigned int pointer, volatile uint64_t descr[2]);
+void trace_aq_tx_context_descr(int ring_idx, unsigned int pointer, volatile uint64_t descr[2]);
void DumpHex(const void* data, size_t size);
#endif // AQ_DBG_H
Index: sys/dev/aq/aq_dbg.c
===================================================================
--- sys/dev/aq/aq_dbg.c
+++ sys/dev/aq/aq_dbg.c
@@ -46,7 +46,7 @@
const aq_debug_level dbg_level_ = lvl_detail;
-const u32 dbg_categories_ = dbg_init | dbg_config | dbg_fw;
+const uint32_t dbg_categories_ = dbg_init | dbg_config | dbg_fw;
@@ -55,24 +55,25 @@
(BIT(BIT_BEGIN - BIT_END + 1) -1))
#define __field(TYPE, VAR) TYPE VAR;
-void trace_aq_tx_descr(int ring_idx, unsigned int pointer, volatile u64 descr[2])
+void
+trace_aq_tx_descr(int ring_idx, unsigned int pointer, volatile uint64_t descr[2])
{
#if AQ_CFG_DEBUG_LVL > 2
struct __entry{
__field(unsigned int, ring_idx)
__field(unsigned int, pointer)
/* Tx Descriptor */
- __field(u64, data_buf_addr)
- __field(u32, pay_len)
- __field(u8, ct_en)
- __field(u8, ct_idx)
- __field(u16, rsvd2)
- __field(u8, tx_cmd)
- __field(u8, eop)
- __field(u8, dd)
- __field(u16, buf_len)
- __field(u8, rsvd1)
- __field(u8, des_typ)
+ __field(uint64_t, data_buf_addr)
+ __field(uint32_t, pay_len)
+ __field(uint8_t, ct_en)
+ __field(uint8_t, ct_idx)
+ __field(uint16_t, rsvd2)
+ __field(uint8_t, tx_cmd)
+ __field(uint8_t, eop)
+ __field(uint8_t, dd)
+ __field(uint16_t, buf_len)
+ __field(uint8_t, rsvd1)
+ __field(uint8_t, des_typ)
} entry;
entry.ring_idx = ring_idx;
@@ -98,27 +99,28 @@
#endif
}
-void trace_aq_rx_descr(int ring_idx, unsigned int pointer, volatile u64 descr[2])
+void
+trace_aq_rx_descr(int ring_idx, unsigned int pointer, volatile uint64_t descr[2])
{
#if AQ_CFG_DEBUG_LVL > 2
- u8 dd;
- u8 eop;
- u8 rx_stat;
- u8 rx_estat;
- u8 rsc_cnt;
- u16 pkt_len;
- u16 next_desp;
- u16 vlan_tag;
-
- u8 rss_type;
- u8 pkt_type;
- u8 rdm_err;
- u8 avb_ts;
- u8 rsvd;
- u8 rx_cntl;
- u8 sph;
- u16 hdr_len;
- u32 rss_hash;
+ uint8_t dd;
+ uint8_t eop;
+ uint8_t rx_stat;
+ uint8_t rx_estat;
+ uint8_t rsc_cnt;
+ uint16_t pkt_len;
+ uint16_t next_desp;
+ uint16_t vlan_tag;
+
+ uint8_t rss_type;
+ uint8_t pkt_type;
+ uint8_t rdm_err;
+ uint8_t avb_ts;
+ uint8_t rsvd;
+ uint8_t rx_cntl;
+ uint8_t sph;
+ uint16_t hdr_len;
+ uint32_t rss_hash;
rss_hash = DESCR_FIELD(descr[0], 63, 32);
hdr_len = DESCR_FIELD(descr[0], 31, 22);
@@ -149,24 +151,26 @@
#endif
}
-void trace_aq_tx_context_descr(int ring_idx, unsigned int pointer, volatile u64 descr[2])
+void
+trace_aq_tx_context_descr(int ring_idx, unsigned int pointer,
+ volatile uint64_t descr[2])
{
#if AQ_CFG_DEBUG_LVL > 2
struct __entry_s{
__field(unsigned int, ring_idx)
__field(unsigned int, pointer)
/* Tx Context Descriptor */
- __field(u16, out_len)
- __field(u8, tun_len)
- __field(u64, resvd3)
- __field(u16, mss_len)
- __field(u8, l4_len)
- __field(u8, l3_len)
- __field(u8, l2_len)
- __field(u8, ct_cmd)
- __field(u16, vlan_tag)
- __field(u8, ct_idx)
- __field(u8, des_typ)
+ __field(uint16_t, out_len)
+ __field(uint8_t, tun_len)
+ __field(uint64_t, resvd3)
+ __field(uint16_t, mss_len)
+ __field(uint8_t, l4_len)
+ __field(uint8_t, l3_len)
+ __field(uint8_t, l2_len)
+ __field(uint8_t, ct_cmd)
+ __field(uint16_t, vlan_tag)
+ __field(uint8_t, ct_idx)
+ __field(uint8_t, des_typ)
} entry;
struct __entry_s *__entry = &entry;
__entry->ring_idx = ring_idx;
@@ -192,7 +196,8 @@
#endif
}
-void DumpHex(const void* data, size_t size) {
+void
+DumpHex(const void* data, size_t size) {
#if AQ_CFG_DEBUG_LVL > 3
char ascii[17];
size_t i, j;
@@ -234,4 +239,4 @@
}
}
#endif
-}
\ No newline at end of file
+}
Index: sys/dev/aq/aq_device.h
===================================================================
--- sys/dev/aq/aq_device.h
+++ sys/dev/aq/aq_device.h
@@ -54,40 +54,40 @@
AQ_LINK_10G )
struct aq_stats_s {
- u64 prc;
- u64 uprc;
- u64 mprc;
- u64 bprc;
- u64 cprc;
- u64 erpr;
- u64 dpc;
- u64 brc;
- u64 ubrc;
- u64 mbrc;
- u64 bbrc;
-
- u64 ptc;
- u64 uptc;
- u64 mptc;
- u64 bptc;
- u64 erpt;
- u64 btc;
- u64 ubtc;
- u64 mbtc;
- u64 bbtc;
+ uint64_t prc;
+ uint64_t uprc;
+ uint64_t mprc;
+ uint64_t bprc;
+ uint64_t cprc;
+ uint64_t erpr;
+ uint64_t dpc;
+ uint64_t brc;
+ uint64_t ubrc;
+ uint64_t mbrc;
+ uint64_t bbrc;
+
+ uint64_t ptc;
+ uint64_t uptc;
+ uint64_t mptc;
+ uint64_t bptc;
+ uint64_t erpt;
+ uint64_t btc;
+ uint64_t ubtc;
+ uint64_t mbtc;
+ uint64_t bbtc;
};
enum aq_dev_state_e {
- AQ_DEV_STATE_UNLOAD,
- AQ_DEV_STATE_PCI_STOP,
- AQ_DEV_STATE_DOWN,
- AQ_DEV_STATE_UP,
+ AQ_DEV_STATE_UNLOAD,
+ AQ_DEV_STATE_PCI_STOP,
+ AQ_DEV_STATE_DOWN,
+ AQ_DEV_STATE_UP,
};
struct aq_rx_filters {
- unsigned int rule_cnt;
- struct aq_rx_filter_vlan vlan_filters[AQ_HW_VLAN_MAX_FILTERS];
- struct aq_rx_filter_l2 etype_filters[AQ_HW_ETYPE_MAX_FILTERS];
+ unsigned int rule_cnt;
+ struct aq_rx_filter_vlan vlan_filters[AQ_HW_VLAN_MAX_FILTERS];
+ struct aq_rx_filter_l2 etype_filters[AQ_HW_ETYPE_MAX_FILTERS];
};
struct aq_vlan_tag {
@@ -102,7 +102,7 @@
if_shared_ctx_t sctx;
struct ifmedia * media;
- struct aq_hw hw;
+ struct aq_hw hw;
enum aq_media_type media_type;
uint32_t link_speeds;
@@ -142,7 +142,7 @@
void aq_initmedia(aq_dev_t *aq_dev);
int aq_linkstat_isr(void *arg);
int aq_isr_rx(void *arg);
-void aq_mediastatus_update(aq_dev_t *aq_dev, u32 link_speed, const struct aq_hw_fc_info *fc_neg);
+void aq_mediastatus_update(aq_dev_t *aq_dev, uint32_t link_speed, const struct aq_hw_fc_info *fc_neg);
void aq_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr);
int aq_mediachange(struct ifnet *ifp);
void aq_if_update_admin_status(if_ctx_t ctx);
Index: sys/dev/aq/aq_fw.h
===================================================================
--- sys/dev/aq/aq_fw.h
+++ sys/dev/aq/aq_fw.h
@@ -38,35 +38,36 @@
typedef enum aq_fw_link_speed
{
- aq_fw_none = 0,
- aq_fw_100M = (1 << 0),
- aq_fw_1G = (1 << 1),
- aq_fw_2G5 = (1 << 2),
- aq_fw_5G = (1 << 3),
- aq_fw_10G = (1 << 4),
+ aq_fw_none = 0,
+ aq_fw_100M = (1 << 0),
+ aq_fw_1G = (1 << 1),
+ aq_fw_2G5 = (1 << 2),
+ aq_fw_5G = (1 << 3),
+ aq_fw_10G = (1 << 4),
} aq_fw_link_speed_t;
typedef enum aq_fw_link_fc
{
- aq_fw_fc_none = 0,
- aq_fw_fc_ENABLE_RX = BIT(0),
- aq_fw_fc_ENABLE_TX = BIT(1),
- aq_fw_fc_ENABLE_ALL = aq_fw_fc_ENABLE_RX | aq_fw_fc_ENABLE_TX,
+ aq_fw_fc_none = 0,
+ aq_fw_fc_ENABLE_RX = BIT(0),
+ aq_fw_fc_ENABLE_TX = BIT(1),
+ aq_fw_fc_ENABLE_ALL = aq_fw_fc_ENABLE_RX | aq_fw_fc_ENABLE_TX,
} aq_fw_link_fc_t;
-#define aq_fw_speed_auto (aq_fw_100M | aq_fw_1G | aq_fw_2G5 | aq_fw_5G | aq_fw_10G)
+#define aq_fw_speed_auto \
+ (aq_fw_100M | aq_fw_1G | aq_fw_2G5 | aq_fw_5G | aq_fw_10G)
struct aq_firmware_ops
{
- int (*reset)(struct aq_hw* hal);
+ int (*reset)(struct aq_hw* hal);
- int (*set_mode)(struct aq_hw* hal, enum aq_hw_fw_mpi_state_e mode, aq_fw_link_speed_t speed);
- int (*get_mode)(struct aq_hw* hal, enum aq_hw_fw_mpi_state_e* mode, aq_fw_link_speed_t* speed, aq_fw_link_fc_t* fc);
+ int (*set_mode)(struct aq_hw* hal, enum aq_hw_fw_mpi_state_e mode, aq_fw_link_speed_t speed);
+ int (*get_mode)(struct aq_hw* hal, enum aq_hw_fw_mpi_state_e* mode, aq_fw_link_speed_t* speed, aq_fw_link_fc_t* fc);
- int (*get_mac_addr)(struct aq_hw* hal, u8* mac_addr);
- int (*get_stats)(struct aq_hw* hal, struct aq_hw_stats_s* stats);
+ int (*get_mac_addr)(struct aq_hw* hal, uint8_t* mac_addr);
+ int (*get_stats)(struct aq_hw* hal, struct aq_hw_stats_s* stats);
- int (*led_control)(struct aq_hw* hal, u32 mode);
+ int (*led_control)(struct aq_hw* hal, uint32_t mode);
};
Index: sys/dev/aq/aq_fw.c
===================================================================
--- sys/dev/aq/aq_fw.c
+++ sys/dev/aq/aq_fw.c
@@ -55,10 +55,10 @@
typedef enum aq_fw_bootloader_mode
{
- boot_mode_unknown = 0,
- boot_mode_flb,
- boot_mode_rbl_flash,
- boot_mode_rbl_host_bootload,
+ boot_mode_unknown = 0,
+ boot_mode_flb,
+ boot_mode_rbl_flash,
+ boot_mode_rbl_host_bootload,
} aq_fw_bootloader_mode;
#define AQ_CFG_HOST_BOOT_DISABLE 0
@@ -67,22 +67,22 @@
#define MAC_FW_START_TIMEOUT_MS 10000
#define FW_LOADER_START_TIMEOUT_MS 10000
-const u32 NO_RESET_SCRATCHPAD_ADDRESS = 0;
-const u32 NO_RESET_SCRATCHPAD_LEN_RES = 1;
-const u32 NO_RESET_SCRATCHPAD_RBL_STATUS = 2;
-const u32 NO_RESET_SCRATCHPAD_RBL_STATUS_2 = 3;
-const u32 WRITE_DATA_COMPLETE = 0x55555555;
-const u32 WRITE_DATA_CHUNK_DONE = 0xaaaaaaaa;
-const u32 WRITE_DATA_FAIL_WRONG_ADDRESS = 0x66666666;
+const uint32_t NO_RESET_SCRATCHPAD_ADDRESS = 0;
+const uint32_t NO_RESET_SCRATCHPAD_LEN_RES = 1;
+const uint32_t NO_RESET_SCRATCHPAD_RBL_STATUS = 2;
+const uint32_t NO_RESET_SCRATCHPAD_RBL_STATUS_2 = 3;
+const uint32_t WRITE_DATA_COMPLETE = 0x55555555;
+const uint32_t WRITE_DATA_CHUNK_DONE = 0xaaaaaaaa;
+const uint32_t WRITE_DATA_FAIL_WRONG_ADDRESS = 0x66666666;
-const u32 WAIT_WRITE_TIMEOUT = 1;
-const u32 WAIT_WRITE_TIMEOUT_COUNT = 1000;
+const uint32_t WAIT_WRITE_TIMEOUT = 1;
+const uint32_t WAIT_WRITE_TIMEOUT_COUNT = 1000;
-const u32 RBL_STATUS_SUCCESS = 0xabba;
-const u32 RBL_STATUS_FAILURE = 0xbad;
-const u32 RBL_STATUS_HOST_BOOT = 0xf1a7;
+const uint32_t RBL_STATUS_SUCCESS = 0xabba;
+const uint32_t RBL_STATUS_FAILURE = 0xbad;
+const uint32_t RBL_STATUS_HOST_BOOT = 0xf1a7;
-const u32 SCRATCHPAD_FW_LOADER_STATUS = (0x40 / sizeof(u32));
+const uint32_t SCRATCHPAD_FW_LOADER_STATUS = (0x40 / sizeof(uint32_t));
extern struct aq_firmware_ops aq_fw1x_ops;
@@ -95,258 +95,268 @@
int wait_init_mac_firmware_(struct aq_hw* hw);
-int aq_fw_reset(struct aq_hw* hw)
+int
+aq_fw_reset(struct aq_hw* hw)
{
- int ver = AQ_READ_REG(hw, 0x18);
- u32 bootExitCode = 0;
- int k;
-
- for (k = 0; k < 1000; ++k) {
- u32 flbStatus = reg_glb_daisy_chain_status1_get(hw);
- bootExitCode = AQ_READ_REG(hw, 0x388);
- if (flbStatus != 0x06000000 || bootExitCode != 0)
- break;
- }
-
- if (k == 1000) {
- aq_log_error("Neither RBL nor FLB started");
- return (-EBUSY);
- }
-
- hw->rbl_enabled = bootExitCode != 0;
-
- trace(dbg_init, "RBL enabled = %d", hw->rbl_enabled);
-
- /* Having FW version 0 is an indicator that cold start
- * is in progress. This means two things:
- * 1) Driver have to wait for FW/HW to finish boot (500ms giveup)
- * 2) Driver may skip reset sequence and save time.
- */
- if (hw->fast_start_enabled && !ver) {
- int err = wait_init_mac_firmware_(hw);
- /* Skip reset as it just completed */
- if (!err)
- return (0);
- }
-
- aq_fw_bootloader_mode mode = boot_mode_unknown;
- int err = mac_soft_reset_(hw, &mode);
- if (err < 0) {
- aq_log_error("MAC reset failed: %d", err);
- return (err);
- }
-
- switch (mode) {
- case boot_mode_flb:
- aq_log("FLB> F/W successfully loaded from flash.");
- hw->flash_present = true;
- return wait_init_mac_firmware_(hw);
-
- case boot_mode_rbl_flash:
- aq_log("RBL> F/W loaded from flash. Host Bootload disabled.");
- hw->flash_present = true;
- return wait_init_mac_firmware_(hw);
-
- case boot_mode_unknown:
- aq_log_error("F/W bootload error: unknown bootloader type");
- return (-ENOTSUP);
-
- case boot_mode_rbl_host_bootload:
+ int ver = AQ_READ_REG(hw, 0x18);
+ uint32_t bootExitCode = 0;
+ int k;
+
+ for (k = 0; k < 1000; ++k) {
+ uint32_t flbStatus = reg_glb_daisy_chain_status1_get(hw);
+ bootExitCode = AQ_READ_REG(hw, 0x388);
+ if (flbStatus != 0x06000000 || bootExitCode != 0)
+ break;
+ }
+
+ if (k == 1000) {
+ aq_log_error("Neither RBL nor FLB started");
+ return (-EBUSY);
+ }
+
+ hw->rbl_enabled = bootExitCode != 0;
+
+ trace(dbg_init, "RBL enabled = %d", hw->rbl_enabled);
+
+ /* Having FW version 0 is an indicator that cold start
+ * is in progress. This means two things:
+ * 1) Driver have to wait for FW/HW to finish boot (500ms giveup)
+ * 2) Driver may skip reset sequence and save time.
+ */
+ if (hw->fast_start_enabled && !ver) {
+ int err = wait_init_mac_firmware_(hw);
+ /* Skip reset as it just completed */
+ if (!err)
+ return (0);
+ }
+
+ aq_fw_bootloader_mode mode = boot_mode_unknown;
+ int err = mac_soft_reset_(hw, &mode);
+ if (err < 0) {
+ aq_log_error("MAC reset failed: %d", err);
+ return (err);
+ }
+
+ switch (mode) {
+ case boot_mode_flb:
+ aq_log("FLB> F/W successfully loaded from flash.");
+ hw->flash_present = true;
+ return wait_init_mac_firmware_(hw);
+
+ case boot_mode_rbl_flash:
+ aq_log("RBL> F/W loaded from flash. Host Bootload disabled.");
+ hw->flash_present = true;
+ return wait_init_mac_firmware_(hw);
+
+ case boot_mode_unknown:
+ aq_log_error("F/W bootload error: unknown bootloader type");
+ return (-ENOTSUP);
+
+ case boot_mode_rbl_host_bootload:
#if AQ_CFG_HOST_BOOT_DISABLE
- aq_log_error("RBL> Host Bootload mode: this driver does not support Host Boot");
- return (-ENOTSUP);
+ aq_log_error("RBL> Host Bootload mode: this driver does not support Host Boot");
+ return (-ENOTSUP);
#else
- trace(dbg_init, "RBL> Host Bootload mode");
- break;
+ trace(dbg_init, "RBL> Host Bootload mode");
+ break;
#endif // HOST_BOOT_DISABLE
- }
+ }
- /*
- * #todo: Host Boot
- */
- aq_log_error("RBL> F/W Host Bootload not implemented");
+ /*
+ * #todo: Host Boot
+ */
+ aq_log_error("RBL> F/W Host Bootload not implemented");
- return (-ENOTSUP);
+ return (-ENOTSUP);
}
-int aq_fw_ops_init(struct aq_hw* hw)
+int
+aq_fw_ops_init(struct aq_hw* hw)
{
- if (hw->fw_version.raw == 0)
- hw->fw_version.raw = AQ_READ_REG(hw, 0x18);
-
- aq_log("MAC F/W version is %d.%d.%d",
- hw->fw_version.major_version, hw->fw_version.minor_version,
- hw->fw_version.build_number);
-
- if (hw->fw_version.major_version == 1) {
- trace(dbg_init, "using F/W ops v1.x");
- hw->fw_ops = &aq_fw1x_ops;
- return (EOK);
- } else if (hw->fw_version.major_version >= 2) {
- trace(dbg_init, "using F/W ops v2.x");
- hw->fw_ops = &aq_fw2x_ops;
- return (EOK);
- }
-
- aq_log_error("aq_fw_ops_init(): invalid F/W version %#x", hw->fw_version.raw);
- return (-ENOTSUP);
+ if (hw->fw_version.raw == 0)
+ hw->fw_version.raw = AQ_READ_REG(hw, 0x18);
+
+ aq_log("MAC F/W version is %d.%d.%d",
+ hw->fw_version.major_version, hw->fw_version.minor_version,
+ hw->fw_version.build_number);
+
+ if (hw->fw_version.major_version == 1) {
+ trace(dbg_init, "using F/W ops v1.x");
+ hw->fw_ops = &aq_fw1x_ops;
+ return (EOK);
+ } else if (hw->fw_version.major_version >= 2) {
+ trace(dbg_init, "using F/W ops v2.x");
+ hw->fw_ops = &aq_fw2x_ops;
+ return (EOK);
+ }
+
+ aq_log_error("aq_fw_ops_init(): invalid F/W version %#x",
+ hw->fw_version.raw);
+ return (-ENOTSUP);
}
-int mac_soft_reset_(struct aq_hw* hw, aq_fw_bootloader_mode* mode /*= nullptr*/)
+int
+mac_soft_reset_(struct aq_hw* hw, aq_fw_bootloader_mode* mode /*= nullptr*/)
{
- if (hw->rbl_enabled) {
- return mac_soft_reset_rbl_(hw, mode);
- } else {
- if (mode)
- *mode = boot_mode_flb;
-
- return mac_soft_reset_flb_(hw);
- }
+ if (hw->rbl_enabled) {
+ return mac_soft_reset_rbl_(hw, mode);
+ } else {
+ if (mode)
+ *mode = boot_mode_flb;
+
+ return mac_soft_reset_flb_(hw);
+ }
}
-int mac_soft_reset_flb_(struct aq_hw* hw)
+int
+mac_soft_reset_flb_(struct aq_hw* hw)
{
- int k;
-
- reg_global_ctl2_set(hw, 0x40e1);
- // Let Felicity hardware to complete SMBUS transaction before Global software reset.
- msec_delay(50);
-
- /*
- * If SPI burst transaction was interrupted(before running the script), global software
- * reset may not clear SPI interface. Clean it up manually before global reset.
- */
- reg_glb_nvr_provisioning2_set(hw, 0xa0);
- reg_glb_nvr_interface1_set(hw, 0x9f);
- reg_glb_nvr_interface1_set(hw, 0x809f);
- msec_delay(50);
-
- reg_glb_standard_ctl1_set(hw, (reg_glb_standard_ctl1_get(hw) & ~glb_reg_res_dis_msk) | glb_soft_res_msk);
-
- // Kickstart.
- reg_global_ctl2_set(hw, 0x80e0);
- reg_mif_power_gating_enable_control_set(hw, 0);
- if (!hw->fast_start_enabled)
- reg_glb_general_provisioning9_set(hw, 1);
-
- /*
- * For the case SPI burst transaction was interrupted (by MCP reset above),
- * wait until it is completed by hardware.
- */
- msec_delay(50); // Sleep for 10 ms.
-
- /* MAC Kickstart */
- if (!hw->fast_start_enabled) {
- reg_global_ctl2_set(hw, 0x180e0);
-
- u32 flb_status = 0;
- int k;
- for (k = 0; k < 1000; ++k) {
- flb_status = reg_glb_daisy_chain_status1_get(hw) & 0x10;
- if (flb_status != 0)
- break;
- msec_delay(10); // Sleep for 10 ms.
- }
-
- if (flb_status == 0) {
- trace_error(dbg_init, "FLB> MAC kickstart failed: timed out");
- return (false);
- }
-
- trace(dbg_init, "FLB> MAC kickstart done, %d ms", k);
- /* FW reset */
- reg_global_ctl2_set(hw, 0x80e0);
- // Let Felicity hardware complete SMBUS transaction before Global software reset.
- msec_delay(50);
- }
- reg_glb_cpu_sem_set(hw, 1, 0);
-
- // PHY Kickstart: #undone
-
- // Global software reset
- rx_rx_reg_res_dis_set(hw, 0);
- tx_tx_reg_res_dis_set(hw, 0);
- mpi_tx_reg_res_dis_set(hw, 0);
- reg_glb_standard_ctl1_set(hw, (reg_glb_standard_ctl1_get(hw) & ~glb_reg_res_dis_msk) | glb_soft_res_msk);
-
- bool restart_completed = false;
- for (k = 0; k < 1000; ++k) {
- restart_completed = reg_glb_fw_image_id1_get(hw) != 0;
- if (restart_completed)
- break;
- msec_delay(10);
- }
-
- if (!restart_completed) {
- trace_error(dbg_init, "FLB> Global Soft Reset failed");
- return (false);
- }
-
- trace(dbg_init, "FLB> F/W restart: %d ms", k * 10);
- return (true);
+ int k;
+
+ reg_global_ctl2_set(hw, 0x40e1);
+ // Let Felicity hardware to complete SMBUS transaction before Global software reset.
+ msec_delay(50);
+
+ /*
+ * If SPI burst transaction was interrupted(before running the script), global software
+ * reset may not clear SPI interface. Clean it up manually before global reset.
+ */
+ reg_glb_nvr_provisioning2_set(hw, 0xa0);
+ reg_glb_nvr_interface1_set(hw, 0x9f);
+ reg_glb_nvr_interface1_set(hw, 0x809f);
+ msec_delay(50);
+
+ reg_glb_standard_ctl1_set(hw, (reg_glb_standard_ctl1_get(hw) & ~glb_reg_res_dis_msk) | glb_soft_res_msk);
+
+ // Kickstart.
+ reg_global_ctl2_set(hw, 0x80e0);
+ reg_mif_power_gating_enable_control_set(hw, 0);
+ if (!hw->fast_start_enabled)
+ reg_glb_general_provisioning9_set(hw, 1);
+
+ /*
+ * For the case SPI burst transaction was interrupted (by MCP reset above),
+ * wait until it is completed by hardware.
+ */
+ msec_delay(50); // Sleep for 10 ms.
+
+ /* MAC Kickstart */
+ if (!hw->fast_start_enabled) {
+ reg_global_ctl2_set(hw, 0x180e0);
+
+ uint32_t flb_status = 0;
+ int k;
+ for (k = 0; k < 1000; ++k) {
+ flb_status = reg_glb_daisy_chain_status1_get(hw) & 0x10;
+ if (flb_status != 0)
+ break;
+ msec_delay(10); // Sleep for 10 ms.
+ }
+
+ if (flb_status == 0) {
+ trace_error(dbg_init,
+ "FLB> MAC kickstart failed: timed out");
+ return (false);
+ }
+
+ trace(dbg_init, "FLB> MAC kickstart done, %d ms", k);
+ /* FW reset */
+ reg_global_ctl2_set(hw, 0x80e0);
+ // Let Felicity hardware complete SMBUS transaction before Global software reset.
+ msec_delay(50);
+ }
+ reg_glb_cpu_sem_set(hw, 1, 0);
+
+ // PHY Kickstart: #undone
+
+ // Global software reset
+ rx_rx_reg_res_dis_set(hw, 0);
+ tx_tx_reg_res_dis_set(hw, 0);
+ mpi_tx_reg_res_dis_set(hw, 0);
+ reg_glb_standard_ctl1_set(hw, (reg_glb_standard_ctl1_get(hw) & ~glb_reg_res_dis_msk) | glb_soft_res_msk);
+
+ bool restart_completed = false;
+ for (k = 0; k < 1000; ++k) {
+ restart_completed = reg_glb_fw_image_id1_get(hw) != 0;
+ if (restart_completed)
+ break;
+ msec_delay(10);
+ }
+
+ if (!restart_completed) {
+ trace_error(dbg_init, "FLB> Global Soft Reset failed");
+ return (false);
+ }
+
+ trace(dbg_init, "FLB> F/W restart: %d ms", k * 10);
+ return (true);
}
-int mac_soft_reset_rbl_(struct aq_hw* hw, aq_fw_bootloader_mode* mode)
+int
+mac_soft_reset_rbl_(struct aq_hw* hw, aq_fw_bootloader_mode* mode)
{
- trace(dbg_init, "RBL> MAC reset STARTED!");
-
- reg_global_ctl2_set(hw, 0x40e1);
- reg_glb_cpu_sem_set(hw, 1, 0);
- reg_mif_power_gating_enable_control_set(hw, 0);
-
- // MAC FW will reload PHY FW if 1E.1000.3 was cleaned - #undone
-
- reg_glb_cpu_no_reset_scratchpad_set(hw, 0xDEAD, NO_RESET_SCRATCHPAD_RBL_STATUS);
-
- // Global software reset
- rx_rx_reg_res_dis_set(hw, 0);
- tx_tx_reg_res_dis_set(hw, 0);
- mpi_tx_reg_res_dis_set(hw, 0);
- reg_glb_standard_ctl1_set(hw, (reg_glb_standard_ctl1_get(hw) & ~glb_reg_res_dis_msk) | glb_soft_res_msk);
-
- reg_global_ctl2_set(hw, 0x40e0);
-
- // Wait for RBL to finish boot process.
- u16 rbl_status = 0;
- for (int k = 0; k < RBL_TIMEOUT_MS; ++k) {
- rbl_status = LOWORD(reg_glb_cpu_no_reset_scratchpad_get(hw, NO_RESET_SCRATCHPAD_RBL_STATUS));
- if (rbl_status != 0 && rbl_status != 0xDEAD)
- break;
-
- msec_delay(1);
- }
-
- if (rbl_status == 0 || rbl_status == 0xDEAD) {
- trace_error(dbg_init, "RBL> RBL restart failed: timeout");
- return (-EBUSY);
- }
-
- if (rbl_status == RBL_STATUS_SUCCESS) {
- if (mode)
- *mode = boot_mode_rbl_flash;
- trace(dbg_init, "RBL> reset complete! [Flash]");
- } else if (rbl_status == RBL_STATUS_HOST_BOOT) {
- if (mode)
- *mode = boot_mode_rbl_host_bootload;
- trace(dbg_init, "RBL> reset complete! [Host Bootload]");
- } else {
- trace_error(dbg_init, "unknown RBL status 0x%x", rbl_status);
- return (-EBUSY);
- }
-
- return (EOK);
+ trace(dbg_init, "RBL> MAC reset STARTED!");
+
+ reg_global_ctl2_set(hw, 0x40e1);
+ reg_glb_cpu_sem_set(hw, 1, 0);
+ reg_mif_power_gating_enable_control_set(hw, 0);
+
+ // MAC FW will reload PHY FW if 1E.1000.3 was cleaned - #undone
+
+ reg_glb_cpu_no_reset_scratchpad_set(hw, 0xDEAD,
+ NO_RESET_SCRATCHPAD_RBL_STATUS);
+
+ // Global software reset
+ rx_rx_reg_res_dis_set(hw, 0);
+ tx_tx_reg_res_dis_set(hw, 0);
+ mpi_tx_reg_res_dis_set(hw, 0);
+ reg_glb_standard_ctl1_set(hw, (reg_glb_standard_ctl1_get(hw) & ~glb_reg_res_dis_msk) | glb_soft_res_msk);
+
+ reg_global_ctl2_set(hw, 0x40e0);
+
+ // Wait for RBL to finish boot process.
+ uint16_t rbl_status = 0;
+ for (int k = 0; k < RBL_TIMEOUT_MS; ++k) {
+ rbl_status = LOWORD(reg_glb_cpu_no_reset_scratchpad_get(hw, NO_RESET_SCRATCHPAD_RBL_STATUS));
+ if (rbl_status != 0 && rbl_status != 0xDEAD)
+ break;
+
+ msec_delay(1);
+ }
+
+ if (rbl_status == 0 || rbl_status == 0xDEAD) {
+ trace_error(dbg_init, "RBL> RBL restart failed: timeout");
+ return (-EBUSY);
+ }
+
+ if (rbl_status == RBL_STATUS_SUCCESS) {
+ if (mode)
+ *mode = boot_mode_rbl_flash;
+ trace(dbg_init, "RBL> reset complete! [Flash]");
+ } else if (rbl_status == RBL_STATUS_HOST_BOOT) {
+ if (mode)
+ *mode = boot_mode_rbl_host_bootload;
+ trace(dbg_init, "RBL> reset complete! [Host Bootload]");
+ } else {
+ trace_error(dbg_init, "unknown RBL status 0x%x", rbl_status);
+ return (-EBUSY);
+ }
+
+ return (EOK);
}
-int wait_init_mac_firmware_(struct aq_hw* hw)
+int
+wait_init_mac_firmware_(struct aq_hw* hw)
{
- for (int i = 0; i < MAC_FW_START_TIMEOUT_MS; ++i) {
- if ((hw->fw_version.raw = AQ_READ_REG(hw, 0x18)) != 0)
- return (EOK);
+ for (int i = 0; i < MAC_FW_START_TIMEOUT_MS; ++i) {
+ if ((hw->fw_version.raw = AQ_READ_REG(hw, 0x18)) != 0)
+ return (EOK);
- msec_delay(1);
- }
+ msec_delay(1);
+ }
- trace_error(dbg_init, "timeout waiting for reg 0x18. MAC f/w NOT READY");
- return (-EBUSY);
+ trace_error(dbg_init,
+ "timeout waiting for reg 0x18. MAC f/w NOT READY");
+ return (-EBUSY);
}
Index: sys/dev/aq/aq_fw1x.c
===================================================================
--- sys/dev/aq/aq_fw1x.c
+++ sys/dev/aq/aq_fw1x.c
@@ -31,7 +31,7 @@
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-
+
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
@@ -50,264 +50,275 @@
typedef enum fw1x_mode {
- FW1X_MPI_DEINIT = 0,
- FW1X_MPI_RESERVED = 1,
- FW1X_MPI_INIT = 2,
- FW1X_MPI_POWER = 4,
+ FW1X_MPI_DEINIT = 0,
+ FW1X_MPI_RESERVED = 1,
+ FW1X_MPI_INIT = 2,
+ FW1X_MPI_POWER = 4,
} fw1x_mode;
typedef enum aq_fw1x_rate {
- FW1X_RATE_10G = 1 << 0,
- FW1X_RATE_5G = 1 << 1,
- FW1X_RATE_5GSR = 1 << 2,
- FW1X_RATE_2G5 = 1 << 3,
- FW1X_RATE_1G = 1 << 4,
- FW1X_RATE_100M = 1 << 5,
- FW1X_RATE_INVALID = 1 << 6,
+ FW1X_RATE_10G = 1 << 0,
+ FW1X_RATE_5G = 1 << 1,
+ FW1X_RATE_5GSR = 1 << 2,
+ FW1X_RATE_2G5 = 1 << 3,
+ FW1X_RATE_1G = 1 << 4,
+ FW1X_RATE_100M = 1 << 5,
+ FW1X_RATE_INVALID = 1 << 6,
} aq_fw1x_rate;
typedef union fw1x_state_reg {
- u32 val;
- struct {
- u8 mode;
- u8 reserved1;
- u8 speed;
- u8 reserved2 : 1;
- u8 disableDirtyWake : 1;
- u8 reserved3 : 2;
- u8 downshift : 4;
- };
+ uint32_t val;
+ struct {
+ uint8_t mode;
+ uint8_t reserved1;
+ uint8_t speed;
+ uint8_t reserved2 : 1;
+ uint8_t disableDirtyWake : 1;
+ uint8_t reserved3 : 2;
+ uint8_t downshift : 4;
+ };
} fw1x_state_reg;
int fw1x_reset(struct aq_hw* hw);
-int fw1x_set_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e mode, aq_fw_link_speed_t speed);
-int fw1x_get_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e* mode, aq_fw_link_speed_t* speed, aq_fw_link_fc_t* fc);
-int fw1x_get_mac_addr(struct aq_hw* hw, u8* mac_addr);
+int fw1x_set_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e mode,
+ aq_fw_link_speed_t speed);
+int fw1x_get_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e* mode,
+ aq_fw_link_speed_t* speed, aq_fw_link_fc_t* fc);
+int fw1x_get_mac_addr(struct aq_hw* hw, uint8_t* mac_addr);
int fw1x_get_stats(struct aq_hw* hw, struct aq_hw_stats_s* stats);
-static
-fw1x_mode mpi_mode_to_fw1x_(enum aq_hw_fw_mpi_state_e mode)
+static fw1x_mode
+mpi_mode_to_fw1x_(enum aq_hw_fw_mpi_state_e mode)
{
- switch (mode) {
- case MPI_DEINIT:
- return (FW1X_MPI_DEINIT);
+ switch (mode) {
+ case MPI_DEINIT:
+ return (FW1X_MPI_DEINIT);
- case MPI_INIT:
- return (FW1X_MPI_INIT);
+ case MPI_INIT:
+ return (FW1X_MPI_INIT);
- case MPI_POWER:
- return (FW1X_MPI_POWER);
+ case MPI_POWER:
+ return (FW1X_MPI_POWER);
- case MPI_RESET:
- return (FW1X_MPI_RESERVED);
- }
+ case MPI_RESET:
+ return (FW1X_MPI_RESERVED);
+ }
- /*
- * We shouldn't get here.
- */
+ /*
+ * We shouldn't get here.
+ */
- return (FW1X_MPI_RESERVED);
+ return (FW1X_MPI_RESERVED);
}
-static
-aq_fw1x_rate link_speed_mask_to_fw1x_(u32 /*aq_fw_link_speed*/ speed)
+static aq_fw1x_rate
+link_speed_mask_to_fw1x_(uint32_t /*aq_fw_link_speed*/ speed)
{
- u32 rate = 0;
- if (speed & aq_fw_10G)
- rate |= FW1X_RATE_10G;
+ uint32_t rate = 0;
+ if (speed & aq_fw_10G)
+ rate |= FW1X_RATE_10G;
- if (speed & aq_fw_5G) {
- rate |= FW1X_RATE_5G;
- rate |= FW1X_RATE_5GSR;
- }
+ if (speed & aq_fw_5G) {
+ rate |= FW1X_RATE_5G;
+ rate |= FW1X_RATE_5GSR;
+ }
- if (speed & aq_fw_2G5)
- rate |= FW1X_RATE_2G5;
+ if (speed & aq_fw_2G5)
+ rate |= FW1X_RATE_2G5;
- if (speed & aq_fw_1G)
- rate |= FW1X_RATE_1G;
+ if (speed & aq_fw_1G)
+ rate |= FW1X_RATE_1G;
- if (speed & aq_fw_100M)
- rate |= FW1X_RATE_100M;
+ if (speed & aq_fw_100M)
+ rate |= FW1X_RATE_100M;
- return ((aq_fw1x_rate)rate);
+ return ((aq_fw1x_rate)rate);
}
-static
-aq_fw_link_speed_t fw1x_rate_to_link_speed_(aq_fw1x_rate rate)
+static aq_fw_link_speed_t
+fw1x_rate_to_link_speed_(aq_fw1x_rate rate)
{
- switch (rate) {
- case FW1X_RATE_10G:
- return (aq_fw_10G);
- case FW1X_RATE_5G:
- case FW1X_RATE_5GSR:
- return (aq_fw_5G);
- case FW1X_RATE_2G5:
- return (aq_fw_2G5);
- case FW1X_RATE_1G:
- return (aq_fw_1G);
- case FW1X_RATE_100M:
- return (aq_fw_100M);
- case FW1X_RATE_INVALID:
- return (aq_fw_none);
- }
-
- /*
- * We should never get here.
- */
-
- return (aq_fw_none);
+ switch (rate) {
+ case FW1X_RATE_10G:
+ return (aq_fw_10G);
+ case FW1X_RATE_5G:
+ case FW1X_RATE_5GSR:
+ return (aq_fw_5G);
+ case FW1X_RATE_2G5:
+ return (aq_fw_2G5);
+ case FW1X_RATE_1G:
+ return (aq_fw_1G);
+ case FW1X_RATE_100M:
+ return (aq_fw_100M);
+ case FW1X_RATE_INVALID:
+ return (aq_fw_none);
+ }
+
+ /*
+ * We should never get here.
+ */
+
+ return (aq_fw_none);
}
-int fw1x_reset(struct aq_hw* hal)
+int
+fw1x_reset(struct aq_hw* hal)
{
- u32 tid0 = ~0u; /*< Initial value of MBOX transactionId. */
- struct aq_hw_fw_mbox mbox;
- const int retryCount = 1000;
-
- for (int i = 0; i < retryCount; ++i) {
- // Read the beginning of Statistics structure to capture the Transaction ID.
- aq_hw_fw_downld_dwords(hal, hal->mbox_addr, (u32*)&mbox,
- (u32)((char*)&mbox.stats - (char*)&mbox) / sizeof(u32));
-
- // Successfully read the stats.
- if (tid0 == ~0U) {
- // We have read the initial value.
- tid0 = mbox.transaction_id;
- continue;
- } else if (mbox.transaction_id != tid0) {
- /*
- * Compare transaction ID to initial value.
- * If it's different means f/w is alive. We're done.
- */
-
- return (EOK);
- }
-
- /*
- * Transaction ID value haven't changed since last time.
- * Try reading the stats again.
- */
- usec_delay(10);
- }
-
- trace_error(dbg_init, "F/W 1.x reset finalize timeout");
- return (-EBUSY);
+ uint32_t tid0 = ~0u; /*< Initial value of MBOX transactionId. */
+ struct aq_hw_fw_mbox mbox;
+ const int retryCount = 1000;
+
+ for (int i = 0; i < retryCount; ++i) {
+ // Read the beginning of Statistics structure to capture the Transaction ID.
+ aq_hw_fw_downld_dwords(hal, hal->mbox_addr, (uint32_t*)&mbox,
+ (uint32_t)((char*)&mbox.stats - (char*)&mbox) / sizeof(uint32_t));
+
+ // Successfully read the stats.
+ if (tid0 == ~0U) {
+ // We have read the initial value.
+ tid0 = mbox.transaction_id;
+ continue;
+ } else if (mbox.transaction_id != tid0) {
+ /*
+ * Compare transaction ID to initial value.
+ * If it's different means f/w is alive. We're done.
+ */
+
+ return (EOK);
+ }
+
+ /*
+ * Transaction ID value haven't changed since last time.
+ * Try reading the stats again.
+ */
+ usec_delay(10);
+ }
+
+ trace_error(dbg_init, "F/W 1.x reset finalize timeout");
+ return (-EBUSY);
}
-int fw1x_set_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e mode, aq_fw_link_speed_t speed)
+int
+fw1x_set_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e mode,
+ aq_fw_link_speed_t speed)
{
- union fw1x_state_reg state = {0};
- state.mode = mpi_mode_to_fw1x_(mode);
- state.speed = link_speed_mask_to_fw1x_(speed);
+ union fw1x_state_reg state = {0};
+ state.mode = mpi_mode_to_fw1x_(mode);
+ state.speed = link_speed_mask_to_fw1x_(speed);
- trace(dbg_init, "fw1x> set mode %d, rate mask = %#x; raw = %#x", state.mode, state.speed, state.val);
+ trace(dbg_init, "fw1x> set mode %d, rate mask = %#x; raw = %#x",
+ state.mode, state.speed, state.val);
- AQ_WRITE_REG(hw, FW1X_MPI_CONTROL_ADR, state.val);
+ AQ_WRITE_REG(hw, FW1X_MPI_CONTROL_ADR, state.val);
- return (EOK);
+ return (EOK);
}
-int fw1x_get_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e* mode, aq_fw_link_speed_t* speed, aq_fw_link_fc_t* fc)
+int
+fw1x_get_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e* mode,
+ aq_fw_link_speed_t* speed, aq_fw_link_fc_t* fc)
{
- union fw1x_state_reg state = { .val = AQ_READ_REG(hw, AQ_HW_MPI_STATE_ADR) };
-
- trace(dbg_init, "fw1x> get_mode(): 0x36c -> %x, 0x368 -> %x", state.val, AQ_READ_REG(hw, AQ_HW_MPI_CONTROL_ADR));
-
- enum aq_hw_fw_mpi_state_e md = MPI_DEINIT;
-
- switch (state.mode) {
- case FW1X_MPI_DEINIT:
- md = MPI_DEINIT;
- break;
- case FW1X_MPI_RESERVED:
- md = MPI_RESET;
- break;
- case FW1X_MPI_INIT:
- md = MPI_INIT;
- break;
- case FW1X_MPI_POWER:
- md = MPI_POWER;
- break;
- }
-
- if (mode)
- *mode = md;
-
- if (speed)
- *speed = fw1x_rate_to_link_speed_(state.speed);
-
- *fc = aq_fw_fc_none;
-
- AQ_DBG_EXIT(EOK);
- return (EOK);
+ union fw1x_state_reg state = { .val = AQ_READ_REG(hw, AQ_HW_MPI_STATE_ADR) };
+
+ trace(dbg_init, "fw1x> get_mode(): 0x36c -> %x, 0x368 -> %x",
+ state.val, AQ_READ_REG(hw, AQ_HW_MPI_CONTROL_ADR));
+
+ enum aq_hw_fw_mpi_state_e md = MPI_DEINIT;
+
+ switch (state.mode) {
+ case FW1X_MPI_DEINIT:
+ md = MPI_DEINIT;
+ break;
+ case FW1X_MPI_RESERVED:
+ md = MPI_RESET;
+ break;
+ case FW1X_MPI_INIT:
+ md = MPI_INIT;
+ break;
+ case FW1X_MPI_POWER:
+ md = MPI_POWER;
+ break;
+ }
+
+ if (mode)
+ *mode = md;
+
+ if (speed)
+ *speed = fw1x_rate_to_link_speed_(state.speed);
+
+ *fc = aq_fw_fc_none;
+
+ AQ_DBG_EXIT(EOK);
+ return (EOK);
}
-int fw1x_get_mac_addr(struct aq_hw* hw, u8* mac)
+int
+fw1x_get_mac_addr(struct aq_hw* hw, uint8_t* mac)
{
- int err = -EFAULT;
- u32 mac_addr[2];
+ int err = -EFAULT;
+ uint32_t mac_addr[2];
- AQ_DBG_ENTER();
+ AQ_DBG_ENTER();
- u32 efuse_shadow_addr = AQ_READ_REG(hw, 0x374);
- if (efuse_shadow_addr == 0) {
- trace_error(dbg_init, "couldn't read eFUSE Shadow Address");
- AQ_DBG_EXIT(-EFAULT);
- return (-EFAULT);
- }
+ uint32_t efuse_shadow_addr = AQ_READ_REG(hw, 0x374);
+ if (efuse_shadow_addr == 0) {
+ trace_error(dbg_init, "couldn't read eFUSE Shadow Address");
+ AQ_DBG_EXIT(-EFAULT);
+ return (-EFAULT);
+ }
- err = aq_hw_fw_downld_dwords(hw, efuse_shadow_addr + (40 * 4),
- mac_addr, ARRAY_SIZE(mac_addr));
- if (err < 0) {
- mac_addr[0] = 0;
- mac_addr[1] = 0;
- AQ_DBG_EXIT(err);
- return (err);
- }
+ err = aq_hw_fw_downld_dwords(hw, efuse_shadow_addr + (40 * 4),
+ mac_addr, ARRAY_SIZE(mac_addr));
+ if (err < 0) {
+ mac_addr[0] = 0;
+ mac_addr[1] = 0;
+ AQ_DBG_EXIT(err);
+ return (err);
+ }
- mac_addr[0] = bswap32(mac_addr[0]);
- mac_addr[1] = bswap32(mac_addr[1]);
+ mac_addr[0] = bswap32(mac_addr[0]);
+ mac_addr[1] = bswap32(mac_addr[1]);
- memcpy(mac, (u8*)mac_addr, ETHER_ADDR_LEN);
+ memcpy(mac, (uint8_t*)mac_addr, ETHER_ADDR_LEN);
- trace(dbg_init, "fw1x> eFUSE MAC addr -> %02x-%02x-%02x-%02x-%02x-%02x",
- mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ trace(dbg_init, "fw1x> eFUSE MAC addr -> %02x-%02x-%02x-%02x-%02x-%02x",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
- AQ_DBG_EXIT(EOK);
- return (EOK);
+ AQ_DBG_EXIT(EOK);
+ return (EOK);
}
-int fw1x_get_stats(struct aq_hw* hw, struct aq_hw_stats_s* stats)
+int
+fw1x_get_stats(struct aq_hw* hw, struct aq_hw_stats_s* stats)
{
- int err = 0;
+ int err = 0;
- AQ_DBG_ENTER();
- err = aq_hw_fw_downld_dwords(hw, hw->mbox_addr, (u32*)(void*)&hw->mbox,
- sizeof hw->mbox / sizeof(u32));
+ AQ_DBG_ENTER();
+ err = aq_hw_fw_downld_dwords(hw, hw->mbox_addr,
+ (uint32_t*)(void*)&hw->mbox, sizeof hw->mbox / sizeof(uint32_t));
- if (err >= 0) {
- if (stats != &hw->mbox.stats)
- memcpy(stats, &hw->mbox.stats, sizeof *stats);
+ if (err >= 0) {
+ if (stats != &hw->mbox.stats)
+ memcpy(stats, &hw->mbox.stats, sizeof *stats);
- stats->dpc = reg_rx_dma_stat_counter7get(hw);
- }
+ stats->dpc = reg_rx_dma_stat_counter7get(hw);
+ }
- AQ_DBG_EXIT(err);
- return (err);
+ AQ_DBG_EXIT(err);
+ return (err);
}
struct aq_firmware_ops aq_fw1x_ops =
{
- .reset = fw1x_reset,
+ .reset = fw1x_reset,
- .set_mode = fw1x_set_mode,
- .get_mode = fw1x_get_mode,
+ .set_mode = fw1x_set_mode,
+ .get_mode = fw1x_get_mode,
- .get_mac_addr = fw1x_get_mac_addr,
- .get_stats = fw1x_get_stats,
+ .get_mac_addr = fw1x_get_mac_addr,
+ .get_stats = fw1x_get_stats,
};
Index: sys/dev/aq/aq_fw2x.c
===================================================================
--- sys/dev/aq/aq_fw2x.c
+++ sys/dev/aq/aq_fw2x.c
@@ -52,112 +52,112 @@
#include "aq_dbg.h"
typedef enum {
- CAPS_LO_10BASET_HD = 0x00,
- CAPS_LO_10BASET_FD,
- CAPS_LO_100BASETX_HD,
- CAPS_LO_100BASET4_HD,
- CAPS_LO_100BASET2_HD,
- CAPS_LO_100BASETX_FD,
- CAPS_LO_100BASET2_FD,
- CAPS_LO_1000BASET_HD,
- CAPS_LO_1000BASET_FD,
- CAPS_LO_2P5GBASET_FD,
- CAPS_LO_5GBASET_FD,
- CAPS_LO_10GBASET_FD,
+ CAPS_LO_10BASET_HD = 0x00,
+ CAPS_LO_10BASET_FD,
+ CAPS_LO_100BASETX_HD,
+ CAPS_LO_100BASET4_HD,
+ CAPS_LO_100BASET2_HD,
+ CAPS_LO_100BASETX_FD,
+ CAPS_LO_100BASET2_FD,
+ CAPS_LO_1000BASET_HD,
+ CAPS_LO_1000BASET_FD,
+ CAPS_LO_2P5GBASET_FD,
+ CAPS_LO_5GBASET_FD,
+ CAPS_LO_10GBASET_FD,
} fw2x_caps_lo;
typedef enum {
- CAPS_HI_RESERVED1 = 0x00,
- CAPS_HI_10BASET_EEE,
- CAPS_HI_RESERVED2,
- CAPS_HI_PAUSE,
- CAPS_HI_ASYMMETRIC_PAUSE,
- CAPS_HI_100BASETX_EEE,
- CAPS_HI_RESERVED3,
- CAPS_HI_RESERVED4,
- CAPS_HI_1000BASET_FD_EEE,
- CAPS_HI_2P5GBASET_FD_EEE,
- CAPS_HI_5GBASET_FD_EEE,
- CAPS_HI_10GBASET_FD_EEE,
- CAPS_HI_RESERVED5,
- CAPS_HI_RESERVED6,
- CAPS_HI_RESERVED7,
- CAPS_HI_RESERVED8,
- CAPS_HI_RESERVED9,
- CAPS_HI_CABLE_DIAG,
- CAPS_HI_TEMPERATURE,
- CAPS_HI_DOWNSHIFT,
- CAPS_HI_PTP_AVB_EN,
- CAPS_HI_MEDIA_DETECT,
- CAPS_HI_LINK_DROP,
- CAPS_HI_SLEEP_PROXY,
- CAPS_HI_WOL,
- CAPS_HI_MAC_STOP,
- CAPS_HI_EXT_LOOPBACK,
- CAPS_HI_INT_LOOPBACK,
- CAPS_HI_EFUSE_AGENT,
- CAPS_HI_WOL_TIMER,
- CAPS_HI_STATISTICS,
- CAPS_HI_TRANSACTION_ID,
+ CAPS_HI_RESERVED1 = 0x00,
+ CAPS_HI_10BASET_EEE,
+ CAPS_HI_RESERVED2,
+ CAPS_HI_PAUSE,
+ CAPS_HI_ASYMMETRIC_PAUSE,
+ CAPS_HI_100BASETX_EEE,
+ CAPS_HI_RESERVED3,
+ CAPS_HI_RESERVED4,
+ CAPS_HI_1000BASET_FD_EEE,
+ CAPS_HI_2P5GBASET_FD_EEE,
+ CAPS_HI_5GBASET_FD_EEE,
+ CAPS_HI_10GBASET_FD_EEE,
+ CAPS_HI_RESERVED5,
+ CAPS_HI_RESERVED6,
+ CAPS_HI_RESERVED7,
+ CAPS_HI_RESERVED8,
+ CAPS_HI_RESERVED9,
+ CAPS_HI_CABLE_DIAG,
+ CAPS_HI_TEMPERATURE,
+ CAPS_HI_DOWNSHIFT,
+ CAPS_HI_PTP_AVB_EN,
+ CAPS_HI_MEDIA_DETECT,
+ CAPS_HI_LINK_DROP,
+ CAPS_HI_SLEEP_PROXY,
+ CAPS_HI_WOL,
+ CAPS_HI_MAC_STOP,
+ CAPS_HI_EXT_LOOPBACK,
+ CAPS_HI_INT_LOOPBACK,
+ CAPS_HI_EFUSE_AGENT,
+ CAPS_HI_WOL_TIMER,
+ CAPS_HI_STATISTICS,
+ CAPS_HI_TRANSACTION_ID,
} fw2x_caps_hi;
typedef enum aq_fw2x_rate
{
- FW2X_RATE_100M = 0x20,
- FW2X_RATE_1G = 0x100,
- FW2X_RATE_2G5 = 0x200,
- FW2X_RATE_5G = 0x400,
- FW2X_RATE_10G = 0x800,
+ FW2X_RATE_100M = 0x20,
+ FW2X_RATE_1G = 0x100,
+ FW2X_RATE_2G5 = 0x200,
+ FW2X_RATE_5G = 0x400,
+ FW2X_RATE_10G = 0x800,
} aq_fw2x_rate;
typedef struct fw2x_msm_statistics
{
- uint32_t uprc;
- uint32_t mprc;
- uint32_t bprc;
- uint32_t erpt;
- uint32_t uptc;
- uint32_t mptc;
- uint32_t bptc;
- uint32_t erpr;
- uint32_t mbtc;
- uint32_t bbtc;
- uint32_t mbrc;
- uint32_t bbrc;
- uint32_t ubrc;
- uint32_t ubtc;
- uint32_t ptc;
- uint32_t prc;
+ uint32_t uprc;
+ uint32_t mprc;
+ uint32_t bprc;
+ uint32_t erpt;
+ uint32_t uptc;
+ uint32_t mptc;
+ uint32_t bptc;
+ uint32_t erpr;
+ uint32_t mbtc;
+ uint32_t bbtc;
+ uint32_t mbrc;
+ uint32_t bbrc;
+ uint32_t ubrc;
+ uint32_t ubtc;
+ uint32_t ptc;
+ uint32_t prc;
} fw2x_msm_statistics;
typedef struct fw2x_phy_cable_diag_data
{
- u32 lane_data[4];
+ uint32_t lane_data[4];
} fw2x_phy_cable_diag_data;
typedef struct fw2x_capabilities {
- u32 caps_lo;
- u32 caps_hi;
+ uint32_t caps_lo;
+ uint32_t caps_hi;
} fw2x_capabilities;
typedef struct fw2x_mailbox // struct fwHostInterface
{
- u32 version;
- u32 transaction_id;
- s32 error;
- fw2x_msm_statistics msm; // msmStatistics_t msm;
- u16 phy_h_bit;
- u16 phy_fault_code;
- s16 phy_temperature;
- u8 cable_len;
- u8 reserved1;
- fw2x_phy_cable_diag_data diag_data;
- u32 reserved[8];
-
- fw2x_capabilities caps;
-
- /* ... */
+ uint32_t version;
+ uint32_t transaction_id;
+ int32_t error;
+ fw2x_msm_statistics msm; // msmStatistics_t msm;
+ uint16_t phy_h_bit;
+ uint16_t phy_fault_code;
+ int16_t phy_temperature;
+ uint8_t cable_len;
+ uint8_t reserved1;
+ fw2x_phy_cable_diag_data diag_data;
+ uint32_t reserved[8];
+
+ fw2x_capabilities caps;
+
+ /* ... */
} fw2x_mailbox;
@@ -196,303 +196,331 @@
//@{
int fw2x_reset(struct aq_hw* hw);
-int fw2x_set_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e mode, aq_fw_link_speed_t speed);
-int fw2x_get_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e* mode, aq_fw_link_speed_t* speed, aq_fw_link_fc_t* fc);
+int fw2x_set_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e mode,
+ aq_fw_link_speed_t speed);
+int fw2x_get_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e* mode,
+ aq_fw_link_speed_t* speed, aq_fw_link_fc_t* fc);
-int fw2x_get_mac_addr(struct aq_hw* hw, u8* mac);
+int fw2x_get_mac_addr(struct aq_hw* hw, uint8_t* mac);
int fw2x_get_stats(struct aq_hw* hw, struct aq_hw_stats_s* stats);
//@}
-static u64 read64_(struct aq_hw* hw, u32 addr)
+static uint64_t
+read64_(struct aq_hw* hw, uint32_t addr)
{
- u64 lo = AQ_READ_REG(hw, addr);
- u64 hi = AQ_READ_REG(hw, addr + 4);
- return (lo | (hi << 32));
+ uint64_t lo = AQ_READ_REG(hw, addr);
+ uint64_t hi = AQ_READ_REG(hw, addr + 4);
+ return (lo | (hi << 32));
}
-static uint64_t get_mpi_ctrl_(struct aq_hw* hw)
+static uint64_t
+get_mpi_ctrl_(struct aq_hw* hw)
{
- return read64_(hw, FW2X_MPI_CONTROL_ADDR);
+ return read64_(hw, FW2X_MPI_CONTROL_ADDR);
}
-static uint64_t get_mpi_state_(struct aq_hw* hw)
+static uint64_t
+get_mpi_state_(struct aq_hw* hw)
{
- return read64_(hw, FW2X_MPI_STATE_ADDR);
+ return read64_(hw, FW2X_MPI_STATE_ADDR);
}
-static void set_mpi_ctrl_(struct aq_hw* hw, u64 value)
+static void
+set_mpi_ctrl_(struct aq_hw* hw, uint64_t value)
{
- AQ_WRITE_REG(hw, FW2X_MPI_CONTROL_ADDR, (u32)value);
- AQ_WRITE_REG(hw, FW2X_MPI_CONTROL_ADDR + 4, (u32)(value >> 32));
+ AQ_WRITE_REG(hw, FW2X_MPI_CONTROL_ADDR, (uint32_t)value);
+ AQ_WRITE_REG(hw, FW2X_MPI_CONTROL_ADDR + 4, (uint32_t)(value >> 32));
}
-int fw2x_reset(struct aq_hw* hw)
+int
+fw2x_reset(struct aq_hw* hw)
{
- fw2x_capabilities caps = {0};
- AQ_DBG_ENTER();
- int err = aq_hw_fw_downld_dwords(hw, hw->mbox_addr + offsetof(fw2x_mailbox, caps), (u32*)&caps, sizeof caps/sizeof(u32));
- if (err == EOK) {
- hw->fw_caps = caps.caps_lo | ((u64)caps.caps_hi << 32);
- trace(dbg_init, "fw2x> F/W capabilities mask = %llx", (unsigned long long)hw->fw_caps);
- } else {
- trace_error(dbg_init, "fw2x> can't get F/W capabilities mask, error %d", err);
- }
+ fw2x_capabilities caps = {0};
+ AQ_DBG_ENTER();
+ int err = aq_hw_fw_downld_dwords(hw,
+ hw->mbox_addr + offsetof(fw2x_mailbox, caps),
+ (uint32_t*)&caps, sizeof caps/sizeof(uint32_t));
+ if (err == EOK) {
+ hw->fw_caps = caps.caps_lo | ((uint64_t)caps.caps_hi << 32);
+ trace(dbg_init,
+ "fw2x> F/W capabilities mask = %llx",
+ (unsigned long long)hw->fw_caps);
+ } else {
+ trace_error(dbg_init,
+ "fw2x> can't get F/W capabilities mask, error %d", err);
+ }
AQ_DBG_EXIT(EOK);
return (EOK);
}
-static
-aq_fw2x_rate link_speed_mask_to_fw2x_(u32 speed)
+static aq_fw2x_rate
+link_speed_mask_to_fw2x_(uint32_t speed)
{
- u32 rate = 0;
+ uint32_t rate = 0;
- AQ_DBG_ENTER();
- if (speed & aq_fw_10G)
- rate |= FW2X_RATE_10G;
+ AQ_DBG_ENTER();
+ if (speed & aq_fw_10G)
+ rate |= FW2X_RATE_10G;
- if (speed & aq_fw_5G)
- rate |= FW2X_RATE_5G;
+ if (speed & aq_fw_5G)
+ rate |= FW2X_RATE_5G;
- if (speed & aq_fw_2G5)
- rate |= FW2X_RATE_2G5;
+ if (speed & aq_fw_2G5)
+ rate |= FW2X_RATE_2G5;
- if (speed & aq_fw_1G)
- rate |= FW2X_RATE_1G;
+ if (speed & aq_fw_1G)
+ rate |= FW2X_RATE_1G;
- if (speed & aq_fw_100M)
- rate |= FW2X_RATE_100M;
+ if (speed & aq_fw_100M)
+ rate |= FW2X_RATE_100M;
- AQ_DBG_EXIT(rate);
- return ((aq_fw2x_rate)rate);
+ AQ_DBG_EXIT(rate);
+ return ((aq_fw2x_rate)rate);
}
-int fw2x_set_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e mode, aq_fw_link_speed_t speed)
+int
+fw2x_set_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e mode,
+ aq_fw_link_speed_t speed)
{
- u64 mpi_ctrl = get_mpi_ctrl_(hw);
-
- AQ_DBG_ENTERA("speed=%d", speed);
- switch (mode) {
- case MPI_INIT:
- mpi_ctrl &= ~FW2X_RATE_MASK;
- mpi_ctrl |= link_speed_mask_to_fw2x_(speed);
- mpi_ctrl &= ~FW2X_CAP_LINK_DROP;
+ uint64_t mpi_ctrl = get_mpi_ctrl_(hw);
+
+ AQ_DBG_ENTERA("speed=%d", speed);
+ switch (mode) {
+ case MPI_INIT:
+ mpi_ctrl &= ~FW2X_RATE_MASK;
+ mpi_ctrl |= link_speed_mask_to_fw2x_(speed);
+ mpi_ctrl &= ~FW2X_CAP_LINK_DROP;
#if 0 // #todo #flowcontrol #pause #eee
- if (pHal->pCfg->eee)
- mpi_ctrl |= FW2X_EEE_MASK;
+ if (pHal->pCfg->eee)
+ mpi_ctrl |= FW2X_EEE_MASK;
#endif
- if (hw->fc.fc_rx)
- mpi_ctrl |= FW2X_FW_CAP_PAUSE;
- if (hw->fc.fc_tx)
- mpi_ctrl |= FW2X_FW_CAP_ASYM_PAUSE;
- break;
-
- case MPI_DEINIT:
- mpi_ctrl &= ~(FW2X_RATE_MASK | FW2X_EEE_MASK);
- mpi_ctrl &= ~(FW2X_FW_CAP_PAUSE | FW2X_FW_CAP_ASYM_PAUSE);
- break;
-
- default:
- trace_error(dbg_init, "fw2x> unknown MPI state %d", mode);
- return (-EINVAL);
- }
-
- set_mpi_ctrl_(hw, mpi_ctrl);
- AQ_DBG_EXIT(EOK);
- return (EOK);
+ if (hw->fc.fc_rx)
+ mpi_ctrl |= FW2X_FW_CAP_PAUSE;
+ if (hw->fc.fc_tx)
+ mpi_ctrl |= FW2X_FW_CAP_ASYM_PAUSE;
+ break;
+
+ case MPI_DEINIT:
+ mpi_ctrl &= ~(FW2X_RATE_MASK | FW2X_EEE_MASK);
+ mpi_ctrl &= ~(FW2X_FW_CAP_PAUSE | FW2X_FW_CAP_ASYM_PAUSE);
+ break;
+
+ default:
+ trace_error(dbg_init, "fw2x> unknown MPI state %d", mode);
+ return (-EINVAL);
+ }
+
+ set_mpi_ctrl_(hw, mpi_ctrl);
+ AQ_DBG_EXIT(EOK);
+ return (EOK);
}
-int fw2x_get_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e* mode, aq_fw_link_speed_t* link_speed, aq_fw_link_fc_t* fc)
+int
+fw2x_get_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e* mode,
+ aq_fw_link_speed_t* link_speed, aq_fw_link_fc_t* fc)
{
- u64 mpi_state = get_mpi_state_(hw);
- u32 rates = mpi_state & FW2X_RATE_MASK;
+ uint64_t mpi_state = get_mpi_state_(hw);
+ uint32_t rates = mpi_state & FW2X_RATE_MASK;
// AQ_DBG_ENTER();
- if (mode) {
- u64 mpi_ctrl = get_mpi_ctrl_(hw);
- if (mpi_ctrl & FW2X_RATE_MASK)
- *mode = MPI_INIT;
- else
- *mode = MPI_DEINIT;
- }
-
- aq_fw_link_speed_t speed = aq_fw_none;
-
- if (rates & FW2X_RATE_10G)
- speed = aq_fw_10G;
- else if (rates & FW2X_RATE_5G)
- speed = aq_fw_5G;
- else if (rates & FW2X_RATE_2G5)
- speed = aq_fw_2G5;
- else if (rates & FW2X_RATE_1G)
- speed = aq_fw_1G;
- else if (rates & FW2X_RATE_100M)
- speed = aq_fw_100M;
-
- if (link_speed)
- *link_speed = speed;
-
- *fc = (mpi_state & (FW2X_FW_CAP_PAUSE | FW2X_FW_CAP_ASYM_PAUSE)) >> (32 + CAPS_HI_PAUSE);
-
+ if (mode) {
+ uint64_t mpi_ctrl = get_mpi_ctrl_(hw);
+ if (mpi_ctrl & FW2X_RATE_MASK)
+ *mode = MPI_INIT;
+ else
+ *mode = MPI_DEINIT;
+ }
+
+ aq_fw_link_speed_t speed = aq_fw_none;
+
+ if (rates & FW2X_RATE_10G)
+ speed = aq_fw_10G;
+ else if (rates & FW2X_RATE_5G)
+ speed = aq_fw_5G;
+ else if (rates & FW2X_RATE_2G5)
+ speed = aq_fw_2G5;
+ else if (rates & FW2X_RATE_1G)
+ speed = aq_fw_1G;
+ else if (rates & FW2X_RATE_100M)
+ speed = aq_fw_100M;
+
+ if (link_speed)
+ *link_speed = speed;
+
+ *fc = (mpi_state & (FW2X_FW_CAP_PAUSE | FW2X_FW_CAP_ASYM_PAUSE)) >>
+ (32 + CAPS_HI_PAUSE);
// AQ_DBG_EXIT(0);
- return (EOK);
+ return (EOK);
}
-int fw2x_get_mac_addr(struct aq_hw* hw, u8* mac)
+int
+fw2x_get_mac_addr(struct aq_hw* hw, uint8_t* mac)
{
- int err = -EFAULT;
- u32 mac_addr[2];
-
- AQ_DBG_ENTER();
-
- u32 efuse_shadow_addr = AQ_READ_REG(hw, 0x364);
- if (efuse_shadow_addr == 0) {
- trace_error(dbg_init, "couldn't read eFUSE Shadow Address");
- AQ_DBG_EXIT(-EFAULT);
- return (-EFAULT);
- }
-
- err = aq_hw_fw_downld_dwords(hw, efuse_shadow_addr + (40 * 4),
- mac_addr, ARRAY_SIZE(mac_addr));
- if (err < 0) {
- mac_addr[0] = 0;
- mac_addr[1] = 0;
- AQ_DBG_EXIT(err);
- return (err);
- }
-
- mac_addr[0] = bswap32(mac_addr[0]);
- mac_addr[1] = bswap32(mac_addr[1]);
-
- memcpy(mac, (u8*)mac_addr, ETHER_ADDR_LEN);
-
- AQ_DBG_EXIT(EOK);
- return (EOK);
+ int err = -EFAULT;
+ uint32_t mac_addr[2];
+
+ AQ_DBG_ENTER();
+
+ uint32_t efuse_shadow_addr = AQ_READ_REG(hw, 0x364);
+ if (efuse_shadow_addr == 0) {
+ trace_error(dbg_init, "couldn't read eFUSE Shadow Address");
+ AQ_DBG_EXIT(-EFAULT);
+ return (-EFAULT);
+ }
+
+ err = aq_hw_fw_downld_dwords(hw, efuse_shadow_addr + (40 * 4), mac_addr,
+ ARRAY_SIZE(mac_addr));
+ if (err < 0) {
+ mac_addr[0] = 0;
+ mac_addr[1] = 0;
+ AQ_DBG_EXIT(err);
+ return (err);
+ }
+
+ mac_addr[0] = bswap32(mac_addr[0]);
+ mac_addr[1] = bswap32(mac_addr[1]);
+
+ memcpy(mac, (uint8_t*)mac_addr, ETHER_ADDR_LEN);
+
+ AQ_DBG_EXIT(EOK);
+ return (EOK);
}
-static inline
-void fw2x_stats_to_fw_stats_(struct aq_hw_stats_s* dst, const fw2x_msm_statistics* src)
+static inline void
+fw2x_stats_to_fw_stats_(struct aq_hw_stats_s* dst,
+ const fw2x_msm_statistics* src)
{
- dst->uprc = src->uprc;
- dst->mprc = src->mprc;
- dst->bprc = src->bprc;
- dst->erpt = src->erpt;
- dst->uptc = src->uptc;
- dst->mptc = src->mptc;
- dst->bptc = src->bptc;
- dst->erpr = src->erpr;
- dst->mbtc = src->mbtc;
- dst->bbtc = src->bbtc;
- dst->mbrc = src->mbrc;
- dst->bbrc = src->bbrc;
- dst->ubrc = src->ubrc;
- dst->ubtc = src->ubtc;
- dst->ptc = src->ptc;
- dst->prc = src->prc;
+ dst->uprc = src->uprc;
+ dst->mprc = src->mprc;
+ dst->bprc = src->bprc;
+ dst->erpt = src->erpt;
+ dst->uptc = src->uptc;
+ dst->mptc = src->mptc;
+ dst->bptc = src->bptc;
+ dst->erpr = src->erpr;
+ dst->mbtc = src->mbtc;
+ dst->bbtc = src->bbtc;
+ dst->mbrc = src->mbrc;
+ dst->bbrc = src->bbrc;
+ dst->ubrc = src->ubrc;
+ dst->ubtc = src->ubtc;
+ dst->ptc = src->ptc;
+ dst->prc = src->prc;
}
-static bool toggle_mpi_ctrl_and_wait_(struct aq_hw* hw, u64 mask, u32 timeout_ms, u32 try_count)
+static bool
+toggle_mpi_ctrl_and_wait_(struct aq_hw* hw, uint64_t mask, uint32_t timeout_ms,
+ uint32_t try_count)
{
- u64 ctrl = get_mpi_ctrl_(hw);
- u64 state = get_mpi_state_(hw);
+ uint64_t ctrl = get_mpi_ctrl_(hw);
+ uint64_t state = get_mpi_state_(hw);
// AQ_DBG_ENTER();
- // First, check that control and state values are consistent
- if ((ctrl & mask) != (state & mask)) {
- trace_warn(dbg_fw, "fw2x> MPI control (%#llx) and state (%#llx) are not consistent for mask %#llx!",
- (unsigned long long)ctrl, (unsigned long long)state, (unsigned long long)mask);
+ // First, check that control and state values are consistent
+ if ((ctrl & mask) != (state & mask)) {
+ trace_warn(dbg_fw,
+ "fw2x> MPI control (%#llx) and state (%#llx) are not consistent for mask %#llx!",
+ (unsigned long long)ctrl, (unsigned long long)state,
+ (unsigned long long)mask);
AQ_DBG_EXIT(false);
- return (false);
- }
+ return (false);
+ }
- // Invert bits (toggle) in control register
- ctrl ^= mask;
- set_mpi_ctrl_(hw, ctrl);
+ // Invert bits (toggle) in control register
+ ctrl ^= mask;
+ set_mpi_ctrl_(hw, ctrl);
- // Clear all bits except masked
- ctrl &= mask;
+ // Clear all bits except masked
+ ctrl &= mask;
- // Wait for FW reflecting change in state register
- while (try_count-- != 0) {
- if ((get_mpi_state_(hw) & mask) == ctrl)
+ // Wait for FW reflecting change in state register
+ while (try_count-- != 0) {
+ if ((get_mpi_state_(hw) & mask) == ctrl)
{
// AQ_DBG_EXIT(true);
- return (true);
+ return (true);
}
- msec_delay(timeout_ms);
- }
+ msec_delay(timeout_ms);
+ }
- trace_detail(dbg_fw, "f/w2x> timeout while waiting for response in state register for bit %#llx!", (unsigned long long)mask);
+ trace_detail(dbg_fw,
+ "f/w2x> timeout while waiting for response in state register for bit %#llx!",
+ (unsigned long long)mask);
// AQ_DBG_EXIT(false);
- return (false);
+ return (false);
}
-int fw2x_get_stats(struct aq_hw* hw, struct aq_hw_stats_s* stats)
+int
+fw2x_get_stats(struct aq_hw* hw, struct aq_hw_stats_s* stats)
{
- int err = 0;
- fw2x_msm_statistics fw2x_stats = {0};
+ int err = 0;
+ fw2x_msm_statistics fw2x_stats = {0};
// AQ_DBG_ENTER();
- if ((hw->fw_caps & FW2X_CAP_STATISTICS) == 0) {
- trace_warn(dbg_fw, "fw2x> statistics not supported by F/W");
- return (-ENOTSUP);
- }
+ if ((hw->fw_caps & FW2X_CAP_STATISTICS) == 0) {
+ trace_warn(dbg_fw, "fw2x> statistics not supported by F/W");
+ return (-ENOTSUP);
+ }
- // Say to F/W to update the statistics
- if (!toggle_mpi_ctrl_and_wait_(hw, FW2X_CAP_STATISTICS, 1, 25)) {
- trace_error(dbg_fw, "fw2x> statistics update timeout");
+ // Say to F/W to update the statistics
+ if (!toggle_mpi_ctrl_and_wait_(hw, FW2X_CAP_STATISTICS, 1, 25)) {
+ trace_error(dbg_fw, "fw2x> statistics update timeout");
AQ_DBG_EXIT(-ETIME);
- return (-ETIME);
- }
+ return (-ETIME);
+ }
- err = aq_hw_fw_downld_dwords(hw, hw->mbox_addr + offsetof(fw2x_mailbox, msm),
- (u32*)&fw2x_stats, sizeof fw2x_stats/sizeof(u32));
+ err = aq_hw_fw_downld_dwords(hw,
+ hw->mbox_addr + offsetof(fw2x_mailbox, msm),
+ (uint32_t*)&fw2x_stats, sizeof fw2x_stats/sizeof(uint32_t));
- fw2x_stats_to_fw_stats_(stats, &fw2x_stats);
+ fw2x_stats_to_fw_stats_(stats, &fw2x_stats);
- if (err != EOK)
- trace_error(dbg_fw, "fw2x> download statistics data FAILED, error %d", err);
+ if (err != EOK)
+ trace_error(dbg_fw,
+ "fw2x> download statistics data FAILED, error %d", err);
// AQ_DBG_EXIT(err);
- return (err);
+ return (err);
}
-static int fw2x_led_control(struct aq_hw* hw, u32 onoff)
+static int
+fw2x_led_control(struct aq_hw* hw, uint32_t onoff)
{
- int err = 0;
+ int err = 0;
- AQ_DBG_ENTER();
+ AQ_DBG_ENTER();
- aq_hw_fw_version ver_expected = { .raw = FW2X_FW_MIN_VER_LED};
- if (aq_hw_ver_match(&ver_expected, &hw->fw_version))
- AQ_WRITE_REG(hw, FW2X_MPI_LED_ADDR, (onoff)?
- ((FW2X_LED_BLINK) | (FW2X_LED_BLINK << 2) | (FW2X_LED_BLINK << 4)):
- (FW2X_LED_DEFAULT));
+ aq_hw_fw_version ver_expected = { .raw = FW2X_FW_MIN_VER_LED};
+ if (aq_hw_ver_match(&ver_expected, &hw->fw_version))
+ AQ_WRITE_REG(hw, FW2X_MPI_LED_ADDR,
+ (onoff) ? ((FW2X_LED_BLINK) | (FW2X_LED_BLINK << 2) | (FW2X_LED_BLINK << 4)):
+ (FW2X_LED_DEFAULT));
- AQ_DBG_EXIT(err);
- return (err);
+ AQ_DBG_EXIT(err);
+ return (err);
}
struct aq_firmware_ops aq_fw2x_ops =
{
- .reset = fw2x_reset,
+ .reset = fw2x_reset,
- .set_mode = fw2x_set_mode,
- .get_mode = fw2x_get_mode,
+ .set_mode = fw2x_set_mode,
+ .get_mode = fw2x_get_mode,
- .get_mac_addr = fw2x_get_mac_addr,
- .get_stats = fw2x_get_stats,
+ .get_mac_addr = fw2x_get_mac_addr,
+ .get_stats = fw2x_get_stats,
- .led_control = fw2x_led_control,
+ .led_control = fw2x_led_control,
};
Index: sys/dev/aq/aq_hw.h
===================================================================
--- sys/dev/aq/aq_hw.h
+++ sys/dev/aq/aq_hw.h
@@ -43,24 +43,25 @@
#include "aq_common.h"
#define AQ_WRITE_REG(hw, reg, value) writel(((hw)->hw_addr + (reg)), htole32(value))
-
+
#define AQ_READ_REG(hw, reg) le32toh(readl((hw)->hw_addr + reg))
#define AQ_WRITE_REG_BIT(hw, reg, msk, shift, value) do { \
- if (msk ^ ~0) { \
- u32 reg_old, reg_new = 0U; \
- reg_old = AQ_READ_REG(hw, reg); \
- reg_new = (reg_old & (~msk)) | (value << shift); \
- if (reg_old != reg_new) \
- AQ_WRITE_REG(hw, reg, reg_new); \
- } else { \
- AQ_WRITE_REG(hw, reg, value); \
- } } while(0)
+ if (msk ^ ~0) { \
+ uint32_t reg_old, reg_new = 0U; \
+ reg_old = AQ_READ_REG(hw, reg); \
+ reg_new = (reg_old & (~msk)) | (value << shift); \
+ if (reg_old != reg_new) \
+ AQ_WRITE_REG(hw, reg, reg_new); \
+ } else { \
+ AQ_WRITE_REG(hw, reg, value); \
+ } \
+} while(0)
#define AQ_READ_REG_BIT(a, reg, msk, shift) ( \
- ((AQ_READ_REG(a, reg) & msk) >> shift))
+ ((AQ_READ_REG(a, reg) & msk) >> shift))
#define AQ_HW_FLUSH() { (void)AQ_READ_REG(hw, 0x10); }
@@ -70,104 +71,104 @@
/* Statistics */
struct aq_hw_stats {
- u64 crcerrs;
+ uint64_t crcerrs;
};
struct aq_hw_stats_s {
- u32 uprc;
- u32 mprc;
- u32 bprc;
- u32 erpt;
- u32 uptc;
- u32 mptc;
- u32 bptc;
- u32 erpr;
- u32 mbtc;
- u32 bbtc;
- u32 mbrc;
- u32 bbrc;
- u32 ubrc;
- u32 ubtc;
- u32 ptc;
- u32 prc;
- u32 dpc;
- u32 cprc;
+ uint32_t uprc;
+ uint32_t mprc;
+ uint32_t bprc;
+ uint32_t erpt;
+ uint32_t uptc;
+ uint32_t mptc;
+ uint32_t bptc;
+ uint32_t erpr;
+ uint32_t mbtc;
+ uint32_t bbtc;
+ uint32_t mbrc;
+ uint32_t bbrc;
+ uint32_t ubrc;
+ uint32_t ubtc;
+ uint32_t ptc;
+ uint32_t prc;
+ uint32_t dpc;
+ uint32_t cprc;
} __attribute__((__packed__));
union ip_addr {
- struct {
- u8 addr[16];
- } v6;
- struct {
- u8 padding[12];
- u8 addr[4];
- } v4;
+ struct {
+ uint8_t addr[16];
+ } v6;
+ struct {
+ uint8_t padding[12];
+ uint8_t addr[4];
+ } v4;
} __attribute__((__packed__));
struct aq_hw_fw_mbox {
- u32 version;
- u32 transaction_id;
- int error;
- struct aq_hw_stats_s stats;
+ uint32_t version;
+ uint32_t transaction_id;
+ int error;
+ struct aq_hw_stats_s stats;
} __attribute__((__packed__));
typedef struct aq_hw_fw_version {
- union {
- struct {
- u16 build_number;
- u8 minor_version;
- u8 major_version;
- };
- u32 raw;
- };
+ union {
+ struct {
+ uint16_t build_number;
+ uint8_t minor_version;
+ uint8_t major_version;
+ };
+ uint32_t raw;
+ };
} aq_hw_fw_version;
enum aq_hw_irq_type {
- aq_irq_invalid = 0,
- aq_irq_legacy = 1,
- aq_irq_msi = 2,
- aq_irq_msix = 3,
+ aq_irq_invalid = 0,
+ aq_irq_legacy = 1,
+ aq_irq_msi = 2,
+ aq_irq_msix = 3,
};
struct aq_hw_fc_info {
- bool fc_rx;
- bool fc_tx;
+ bool fc_rx;
+ bool fc_tx;
};
struct aq_hw {
- void *aq_dev;
- u8 *hw_addr;
- u32 regs_size;
-
- u8 mac_addr[ETHER_ADDR_LEN];
-
- enum aq_hw_irq_type irq_type;
-
- struct aq_hw_fc_info fc;
- u16 link_rate;
-
- u16 device_id;
- u16 subsystem_vendor_id;
- u16 subsystem_device_id;
- u16 vendor_id;
- u8 revision_id;
-
- /* Interrupt Moderation value. */
- int itr;
-
- /* Firmware-related stuff. */
- aq_hw_fw_version fw_version;
- const struct aq_firmware_ops* fw_ops;
- bool rbl_enabled;
- bool fast_start_enabled;
- bool flash_present;
- u32 chip_features;
- u64 fw_caps;
+ void *aq_dev;
+ uint8_t *hw_addr;
+ uint32_t regs_size;
+
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+
+ enum aq_hw_irq_type irq_type;
+
+ struct aq_hw_fc_info fc;
+ uint16_t link_rate;
+
+ uint16_t device_id;
+ uint16_t subsystem_vendor_id;
+ uint16_t subsystem_device_id;
+ uint16_t vendor_id;
+ uint8_t revision_id;
+
+ /* Interrupt Moderation value. */
+ int itr;
+
+ /* Firmware-related stuff. */
+ aq_hw_fw_version fw_version;
+ const struct aq_firmware_ops* fw_ops;
+ bool rbl_enabled;
+ bool fast_start_enabled;
+ bool flash_present;
+ uint32_t chip_features;
+ uint64_t fw_caps;
bool lro_enabled;
- u32 mbox_addr;
- struct aq_hw_fw_mbox mbox;
+ uint32_t mbox_addr;
+ struct aq_hw_fw_mbox mbox;
};
#define aq_hw_s aq_hw
@@ -217,7 +218,7 @@
#define AQ_HW_CHIP_REVISION_B0 0x02000000U
#define AQ_HW_CHIP_REVISION_B1 0x04000000U
#define IS_CHIP_FEATURE(HW, _F_) (AQ_HW_CHIP_##_F_ & \
- (HW)->chip_features)
+ (HW)->chip_features)
#define AQ_HW_FW_VER_EXPECTED 0x01050006U
@@ -238,22 +239,22 @@
};
struct aq_rx_filter_vlan {
- u8 enable;
- u8 location;
- u16 vlan_id;
- u8 queue;
+ uint8_t enable;
+ uint8_t location;
+ uint16_t vlan_id;
+ uint8_t queue;
};
#define AQ_HW_VLAN_MAX_FILTERS 16U
#define AQ_HW_ETYPE_MAX_FILTERS 16U
struct aq_rx_filter_l2 {
- u8 enable;
- s8 queue;
- u8 location;
- u8 user_priority_en;
- u8 user_priority;
- u16 ethertype;
+ uint8_t enable;
+ int8_t queue;
+ uint8_t location;
+ uint8_t user_priority_en;
+ uint8_t user_priority;
+ uint16_t ethertype;
};
enum hw_atl_rx_ctrl_registers_l2 {
@@ -262,12 +263,12 @@
};
struct aq_rx_filter_l3l4 {
- u32 cmd;
- u8 location;
- u32 ip_dst[4];
- u32 ip_src[4];
- u16 p_dst;
- u16 p_src;
+ uint32_t cmd;
+ uint8_t location;
+ uint32_t ip_dst[4];
+ uint32_t ip_src[4];
+ uint16_t p_dst;
+ uint16_t p_src;
bool is_ipv6;
};
@@ -301,22 +302,23 @@
((location) - AQ_RX_FIRST_LOC_FL3L4)
enum aq_hw_fw_mpi_state_e {
- MPI_DEINIT = 0,
- MPI_RESET = 1,
- MPI_INIT = 2,
- MPI_POWER = 4,
+ MPI_DEINIT = 0,
+ MPI_RESET = 1,
+ MPI_INIT = 2,
+ MPI_POWER = 4,
};
-int aq_hw_get_mac_permanent(struct aq_hw *hw, u8 *mac);
+int aq_hw_get_mac_permanent(struct aq_hw *hw, uint8_t *mac);
-int aq_hw_mac_addr_set(struct aq_hw *hw, u8 *mac_addr, u8 index);
+int aq_hw_mac_addr_set(struct aq_hw *hw, uint8_t *mac_addr, uint8_t index);
/* link speed in mbps. "0" - no link detected */
-int aq_hw_get_link_state(struct aq_hw *hw, u32 *link_speed, struct aq_hw_fc_info *fc_neg);
+int aq_hw_get_link_state(struct aq_hw *hw, uint32_t *link_speed,
+ struct aq_hw_fc_info *fc_neg);
-int aq_hw_set_link_speed(struct aq_hw *hw, u32 speed);
+int aq_hw_set_link_speed(struct aq_hw *hw, uint32_t speed);
-int aq_hw_fw_downld_dwords(struct aq_hw *hw, u32 a, u32 *p, u32 cnt);
+int aq_hw_fw_downld_dwords(struct aq_hw *hw, uint32_t a, uint32_t *p, uint32_t cnt);
int aq_hw_reset(struct aq_hw *hw);
@@ -324,19 +326,21 @@
int aq_hw_mpi_read_stats(struct aq_hw *hw, struct aq_hw_fw_mbox *pmbox);
-int aq_hw_init(struct aq_hw *hw, u8 *mac_addr, u8 adm_irq, bool msix);
+int aq_hw_init(struct aq_hw *hw, uint8_t *mac_addr, uint8_t adm_irq, bool msix);
int aq_hw_start(struct aq_hw *hw);
int aq_hw_interrupt_moderation_set(struct aq_hw *hw);
-int aq_hw_get_fw_version(struct aq_hw *hw, u32 *fw_version);
+int aq_hw_get_fw_version(struct aq_hw *hw, uint32_t *fw_version);
int aq_hw_deinit(struct aq_hw *hw);
-int aq_hw_ver_match(const aq_hw_fw_version* ver_expected, const aq_hw_fw_version* ver_actual);
+int aq_hw_ver_match(const aq_hw_fw_version* ver_expected,
+ const aq_hw_fw_version* ver_actual);
-void aq_hw_set_promisc(struct aq_hw_s *self, bool l2_promisc, bool vlan_promisc, bool mc_promisc);
+void aq_hw_set_promisc(struct aq_hw_s *self, bool l2_promisc, bool vlan_promisc,
+ bool mc_promisc);
int aq_hw_set_power(struct aq_hw *hw, unsigned int power_state);
@@ -345,11 +349,11 @@
int hw_atl_b0_hw_vlan_promisc_set(struct aq_hw_s *self, bool promisc);
int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
- struct aq_rx_filter_vlan *aq_vlans);
+ struct aq_rx_filter_vlan *aq_vlans);
-int aq_hw_rss_hash_set(struct aq_hw_s *self, u8 rss_key[HW_ATL_RSS_HASHKEY_SIZE]);
-int aq_hw_rss_hash_get(struct aq_hw_s *self, u8 rss_key[HW_ATL_RSS_HASHKEY_SIZE]);
-int aq_hw_rss_set(struct aq_hw_s *self, u8 rss_table[HW_ATL_RSS_INDIRECTION_TABLE_MAX]);
+int aq_hw_rss_hash_set(struct aq_hw_s *self, uint8_t rss_key[HW_ATL_RSS_HASHKEY_SIZE]);
+int aq_hw_rss_hash_get(struct aq_hw_s *self, uint8_t rss_key[HW_ATL_RSS_HASHKEY_SIZE]);
+int aq_hw_rss_set(struct aq_hw_s *self, uint8_t rss_table[HW_ATL_RSS_INDIRECTION_TABLE_MAX]);
int aq_hw_udp_rss_enable(struct aq_hw_s *self, bool enable);
#endif //_AQ_HW_H_
Index: sys/dev/aq/aq_hw.c
===================================================================
--- sys/dev/aq/aq_hw.c
+++ sys/dev/aq/aq_hw.c
@@ -32,11 +32,11 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/endian.h>
#include <sys/param.h>
#include <sys/systm.h>
-#include <machine/cpu.h>
+#include <sys/endian.h>
#include <sys/socket.h>
+#include <machine/cpu.h>
#include <net/if.h>
#include "aq_hw.h"
@@ -48,700 +48,707 @@
#define AQ_CFG_FW_MIN_VER_EXPECTED 0x01050006U
-int aq_hw_err_from_flags(struct aq_hw *hw)
+int
+aq_hw_err_from_flags(struct aq_hw *hw)
{
- return (0);
+ return (0);
}
-static void aq_hw_chip_features_init(struct aq_hw *hw, u32 *p)
+static void
+aq_hw_chip_features_init(struct aq_hw *hw, uint32_t *p)
{
- u32 chip_features = 0U;
- u32 val = reg_glb_mif_id_get(hw);
- u32 mif_rev = val & 0xFFU;
-
- if ((0xFU & mif_rev) == 1U) {
- chip_features |= AQ_HW_CHIP_REVISION_A0 |
- AQ_HW_CHIP_MPI_AQ |
- AQ_HW_CHIP_MIPS;
- } else if ((0xFU & mif_rev) == 2U) {
- chip_features |= AQ_HW_CHIP_REVISION_B0 |
- AQ_HW_CHIP_MPI_AQ |
- AQ_HW_CHIP_MIPS |
- AQ_HW_CHIP_TPO2 |
- AQ_HW_CHIP_RPF2;
- } else if ((0xFU & mif_rev) == 0xAU) {
- chip_features |= AQ_HW_CHIP_REVISION_B1 |
- AQ_HW_CHIP_MPI_AQ |
- AQ_HW_CHIP_MIPS |
- AQ_HW_CHIP_TPO2 |
- AQ_HW_CHIP_RPF2;
- }
-
- *p = chip_features;
+ uint32_t chip_features = 0U;
+ uint32_t val = reg_glb_mif_id_get(hw);
+ uint32_t mif_rev = val & 0xFFU;
+
+ if ((0xFU & mif_rev) == 1U) {
+ chip_features |= AQ_HW_CHIP_REVISION_A0 | AQ_HW_CHIP_MPI_AQ |
+ AQ_HW_CHIP_MIPS;
+ } else if ((0xFU & mif_rev) == 2U) {
+ chip_features |= AQ_HW_CHIP_REVISION_B0 | AQ_HW_CHIP_MPI_AQ |
+ AQ_HW_CHIP_MIPS | AQ_HW_CHIP_TPO2 | AQ_HW_CHIP_RPF2;
+ } else if ((0xFU & mif_rev) == 0xAU) {
+ chip_features |= AQ_HW_CHIP_REVISION_B1 | AQ_HW_CHIP_MPI_AQ |
+ AQ_HW_CHIP_MIPS | AQ_HW_CHIP_TPO2 | AQ_HW_CHIP_RPF2;
+ }
+
+ *p = chip_features;
}
-int aq_hw_fw_downld_dwords(struct aq_hw *hw, u32 a, u32 *p, u32 cnt)
+int
+aq_hw_fw_downld_dwords(struct aq_hw *hw, uint32_t a, uint32_t *p, uint32_t cnt)
{
- int err = 0;
+ int err = 0;
// AQ_DBG_ENTER();
- AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(hw,
- AQ_HW_FW_SM_RAM) == 1U,
- 1U, 10000U);
+ AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(hw, AQ_HW_FW_SM_RAM) == 1U, 1U,
+ 10000U);
- if (err < 0) {
- bool is_locked;
+ if (err < 0) {
+ bool is_locked;
- reg_glb_cpu_sem_set(hw, 1U, AQ_HW_FW_SM_RAM);
- is_locked = reg_glb_cpu_sem_get(hw, AQ_HW_FW_SM_RAM);
- if (!is_locked) {
- err = -ETIME;
- goto err_exit;
- }
- }
+ reg_glb_cpu_sem_set(hw, 1U, AQ_HW_FW_SM_RAM);
+ is_locked = reg_glb_cpu_sem_get(hw, AQ_HW_FW_SM_RAM);
+ if (!is_locked) {
+ err = -ETIME;
+ goto err_exit;
+ }
+ }
- mif_mcp_up_mailbox_addr_set(hw, a);
+ mif_mcp_up_mailbox_addr_set(hw, a);
- for (++cnt; --cnt && !err;) {
- mif_mcp_up_mailbox_execute_operation_set(hw, 1);
+ for (++cnt; --cnt && !err;) {
+ mif_mcp_up_mailbox_execute_operation_set(hw, 1);
- if (IS_CHIP_FEATURE(hw, REVISION_B1))
- AQ_HW_WAIT_FOR(a != mif_mcp_up_mailbox_addr_get(hw), 1U, 1000U);
- else
- AQ_HW_WAIT_FOR(!mif_mcp_up_mailbox_busy_get(hw), 1, 1000U);
+ if (IS_CHIP_FEATURE(hw, REVISION_B1))
+ AQ_HW_WAIT_FOR(a != mif_mcp_up_mailbox_addr_get(hw),
+ 1U, 1000U);
+ else
+ AQ_HW_WAIT_FOR(!mif_mcp_up_mailbox_busy_get(hw), 1,
+ 1000U);
- *(p++) = mif_mcp_up_mailbox_data_get(hw);
- }
+ *(p++) = mif_mcp_up_mailbox_data_get(hw);
+ }
- reg_glb_cpu_sem_set(hw, 1U, AQ_HW_FW_SM_RAM);
+ reg_glb_cpu_sem_set(hw, 1U, AQ_HW_FW_SM_RAM);
err_exit:
// AQ_DBG_EXIT(err);
- return (err);
+ return (err);
}
-int aq_hw_ver_match(const aq_hw_fw_version* ver_expected, const aq_hw_fw_version* ver_actual)
+int
+aq_hw_ver_match(const aq_hw_fw_version* ver_expected,
+ const aq_hw_fw_version* ver_actual)
{
- AQ_DBG_ENTER();
+ AQ_DBG_ENTER();
- if (ver_actual->major_version >= ver_expected->major_version)
- return (true);
- if (ver_actual->minor_version >= ver_expected->minor_version)
- return (true);
- if (ver_actual->build_number >= ver_expected->build_number)
- return (true);
+ if (ver_actual->major_version >= ver_expected->major_version)
+ return (true);
+ if (ver_actual->minor_version >= ver_expected->minor_version)
+ return (true);
+ if (ver_actual->build_number >= ver_expected->build_number)
+ return (true);
- return (false);
+ return (false);
}
-static int aq_hw_init_ucp(struct aq_hw *hw)
+static int
+aq_hw_init_ucp(struct aq_hw *hw)
{
- int err = 0;
- AQ_DBG_ENTER();
+ int err = 0;
+ AQ_DBG_ENTER();
- hw->fw_version.raw = 0;
+ hw->fw_version.raw = 0;
- err = aq_fw_reset(hw);
- if (err != EOK) {
- aq_log_error("aq_hw_init_ucp(): F/W reset failed, err %d", err);
- return (err);
- }
+ err = aq_fw_reset(hw);
+ if (err != EOK) {
+ aq_log_error("aq_hw_init_ucp(): F/W reset failed, err %d", err);
+ return (err);
+ }
- aq_hw_chip_features_init(hw, &hw->chip_features);
- err = aq_fw_ops_init(hw);
- if (err < 0) {
- aq_log_error("could not initialize F/W ops, err %d", err);
- return (-1);
- }
+ aq_hw_chip_features_init(hw, &hw->chip_features);
+ err = aq_fw_ops_init(hw);
+ if (err < 0) {
+ aq_log_error("could not initialize F/W ops, err %d", err);
+ return (-1);
+ }
- if (hw->fw_version.major_version == 1) {
- if (!AQ_READ_REG(hw, 0x370)) {
- unsigned int rnd = 0;
- unsigned int ucp_0x370 = 0;
+ if (hw->fw_version.major_version == 1) {
+ if (!AQ_READ_REG(hw, 0x370)) {
+ unsigned int rnd = 0;
+ unsigned int ucp_0x370 = 0;
- rnd = arc4random();
+ rnd = arc4random();
- ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd);
- AQ_WRITE_REG(hw, AQ_HW_UCP_0X370_REG, ucp_0x370);
- }
+ ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd);
+ AQ_WRITE_REG(hw, AQ_HW_UCP_0X370_REG, ucp_0x370);
+ }
- reg_glb_cpu_scratch_scp_set(hw, 0, 25);
- }
+ reg_glb_cpu_scratch_scp_set(hw, 0, 25);
+ }
- /* check 10 times by 1ms */
- AQ_HW_WAIT_FOR((hw->mbox_addr = AQ_READ_REG(hw, 0x360)) != 0, 400U, 20);
+ /* check 10 times by 1ms */
+ AQ_HW_WAIT_FOR((hw->mbox_addr = AQ_READ_REG(hw, 0x360)) != 0, 400U, 20);
- aq_hw_fw_version ver_expected = { .raw = AQ_CFG_FW_MIN_VER_EXPECTED };
- if (!aq_hw_ver_match(&ver_expected, &hw->fw_version))
- aq_log_error("atlantic: aq_hw_init_ucp(), wrong FW version: expected:%x actual:%x",
- AQ_CFG_FW_MIN_VER_EXPECTED, hw->fw_version.raw);
+ aq_hw_fw_version ver_expected = { .raw = AQ_CFG_FW_MIN_VER_EXPECTED };
+ if (!aq_hw_ver_match(&ver_expected, &hw->fw_version))
+ aq_log_error("atlantic: aq_hw_init_ucp(), wrong FW version: expected:%x actual:%x",
+ AQ_CFG_FW_MIN_VER_EXPECTED, hw->fw_version.raw);
- AQ_DBG_EXIT(err);
- return (err);
+ AQ_DBG_EXIT(err);
+ return (err);
}
-int aq_hw_mpi_create(struct aq_hw *hw)
+int
+aq_hw_mpi_create(struct aq_hw *hw)
{
- int err = 0;
+ int err = 0;
- AQ_DBG_ENTER();
- err = aq_hw_init_ucp(hw);
- if (err < 0)
- goto err_exit;
+ AQ_DBG_ENTER();
+ err = aq_hw_init_ucp(hw);
+ if (err < 0)
+ goto err_exit;
err_exit:
- AQ_DBG_EXIT(err);
- return (err);
+ AQ_DBG_EXIT(err);
+ return (err);
}
-int aq_hw_mpi_read_stats(struct aq_hw *hw, struct aq_hw_fw_mbox *pmbox)
+int
+aq_hw_mpi_read_stats(struct aq_hw *hw, struct aq_hw_fw_mbox *pmbox)
{
- int err = 0;
+ int err = 0;
// AQ_DBG_ENTER();
- if (hw->fw_ops && hw->fw_ops->get_stats) {
- err = hw->fw_ops->get_stats(hw, &pmbox->stats);
- } else {
- err = -ENOTSUP;
- aq_log_error("get_stats() not supported by F/W");
- }
+ if (hw->fw_ops && hw->fw_ops->get_stats) {
+ err = hw->fw_ops->get_stats(hw, &pmbox->stats);
+ } else {
+ err = -ENOTSUP;
+ aq_log_error("get_stats() not supported by F/W");
+ }
- if (err == EOK) {
- pmbox->stats.dpc = reg_rx_dma_stat_counter7get(hw);
- pmbox->stats.cprc = stats_rx_lro_coalesced_pkt_count0_get(hw);
- }
+ if (err == EOK) {
+ pmbox->stats.dpc = reg_rx_dma_stat_counter7get(hw);
+ pmbox->stats.cprc = stats_rx_lro_coalesced_pkt_count0_get(hw);
+ }
// AQ_DBG_EXIT(err);
- return (err);
+ return (err);
}
-static int aq_hw_mpi_set(struct aq_hw *hw,
- enum aq_hw_fw_mpi_state_e state, u32 speed)
+static int
+aq_hw_mpi_set(struct aq_hw *hw, enum aq_hw_fw_mpi_state_e state, uint32_t speed)
{
- int err = -ENOTSUP;
- AQ_DBG_ENTERA("speed %d", speed);
+ int err = -ENOTSUP;
+ AQ_DBG_ENTERA("speed %d", speed);
- if (hw->fw_ops && hw->fw_ops->set_mode) {
- err = hw->fw_ops->set_mode(hw, state, speed);
- } else {
- aq_log_error("set_mode() not supported by F/W");
- }
+ if (hw->fw_ops && hw->fw_ops->set_mode) {
+ err = hw->fw_ops->set_mode(hw, state, speed);
+ } else {
+ aq_log_error("set_mode() not supported by F/W");
+ }
- AQ_DBG_EXIT(err);
- return (err);
+ AQ_DBG_EXIT(err);
+ return (err);
}
-int aq_hw_set_link_speed(struct aq_hw *hw, u32 speed)
+int
+aq_hw_set_link_speed(struct aq_hw *hw, uint32_t speed)
{
- return aq_hw_mpi_set(hw, MPI_INIT, speed);
+ return aq_hw_mpi_set(hw, MPI_INIT, speed);
}
-int aq_hw_get_link_state(struct aq_hw *hw, u32 *link_speed, struct aq_hw_fc_info *fc_neg)
+int
+aq_hw_get_link_state(struct aq_hw *hw, uint32_t *link_speed, struct aq_hw_fc_info *fc_neg)
{
- int err = EOK;
+ int err = EOK;
// AQ_DBG_ENTER();
- enum aq_hw_fw_mpi_state_e mode;
- aq_fw_link_speed_t speed = aq_fw_none;
- aq_fw_link_fc_t fc;
+ enum aq_hw_fw_mpi_state_e mode;
+ aq_fw_link_speed_t speed = aq_fw_none;
+ aq_fw_link_fc_t fc;
- if (hw->fw_ops && hw->fw_ops->get_mode) {
- err = hw->fw_ops->get_mode(hw, &mode, &speed, &fc);
- } else {
- aq_log_error("get_mode() not supported by F/W");
+ if (hw->fw_ops && hw->fw_ops->get_mode) {
+ err = hw->fw_ops->get_mode(hw, &mode, &speed, &fc);
+ } else {
+ aq_log_error("get_mode() not supported by F/W");
AQ_DBG_EXIT(-ENOTSUP);
- return (-ENOTSUP);
- }
+ return (-ENOTSUP);
+ }
- if (err < 0) {
- aq_log_error("get_mode() failed, err %d", err);
+ if (err < 0) {
+ aq_log_error("get_mode() failed, err %d", err);
AQ_DBG_EXIT(err);
- return (err);
- }
+ return (err);
+ }
*link_speed = 0;
- if (mode != MPI_INIT)
- return (0);
+ if (mode != MPI_INIT)
+ return (0);
+
+ switch (speed) {
+ case aq_fw_10G:
+ *link_speed = 10000U;
+ break;
+ case aq_fw_5G:
+ *link_speed = 5000U;
+ break;
+ case aq_fw_2G5:
+ *link_speed = 2500U;
+ break;
+ case aq_fw_1G:
+ *link_speed = 1000U;
+ break;
+ case aq_fw_100M:
+ *link_speed = 100U;
+ break;
+ default:
+ *link_speed = 0U;
+ break;
+ }
- switch (speed) {
- case aq_fw_10G:
- *link_speed = 10000U;
- break;
+ fc_neg->fc_rx = !!(fc & aq_fw_fc_ENABLE_RX);
+ fc_neg->fc_tx = !!(fc & aq_fw_fc_ENABLE_TX);
- case aq_fw_5G:
- *link_speed = 5000U;
- break;
+ // AQ_DBG_EXIT(0);
+ return (0);
+}
- case aq_fw_2G5:
- *link_speed = 2500U;
- break;
+int
+aq_hw_get_mac_permanent(struct aq_hw *hw, uint8_t *mac)
+{
+ int err = -ENOTSUP;
+ AQ_DBG_ENTER();
- case aq_fw_1G:
- *link_speed = 1000U;
- break;
+ if (hw->fw_ops && hw->fw_ops->get_mac_addr)
+ err = hw->fw_ops->get_mac_addr(hw, mac);
- case aq_fw_100M:
- *link_speed = 100U;
- break;
+ /* Couldn't get MAC address from HW. Use auto-generated one. */
+ if ((mac[0] & 1) || ((mac[0] | mac[1] | mac[2]) == 0)) {
+ uint16_t rnd;
+ uint32_t h = 0;
+ uint32_t l = 0;
- default:
- *link_speed = 0U;
- break;
- }
+ printf("atlantic: HW MAC address %x:%x:%x:%x:%x:%x is multicast or empty MAC", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ printf("atlantic: Use random MAC address");
- fc_neg->fc_rx = !!(fc & aq_fw_fc_ENABLE_RX);
- fc_neg->fc_tx = !!(fc & aq_fw_fc_ENABLE_TX);
+ rnd = arc4random();
- // AQ_DBG_EXIT(0);
- return (0);
-}
+ /* chip revision */
+ l = 0xE3000000U | (0xFFFFU & rnd) | (0x00 << 16);
+ h = 0x8001300EU;
-int aq_hw_get_mac_permanent(struct aq_hw *hw, u8 *mac)
-{
- int err = -ENOTSUP;
- AQ_DBG_ENTER();
-
- if (hw->fw_ops && hw->fw_ops->get_mac_addr)
- err = hw->fw_ops->get_mac_addr(hw, mac);
-
- /* Couldn't get MAC address from HW. Use auto-generated one. */
- if ((mac[0] & 1) || ((mac[0] | mac[1] | mac[2]) == 0)) {
- u16 rnd;
- u32 h = 0;
- u32 l = 0;
-
- printf("atlantic: HW MAC address %x:%x:%x:%x:%x:%x is multicast or empty MAC", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
- printf("atlantic: Use random MAC address");
-
- rnd = arc4random();
-
- /* chip revision */
- l = 0xE3000000U
- | (0xFFFFU & rnd)
- | (0x00 << 16);
- h = 0x8001300EU;
-
- mac[5] = (u8)(0xFFU & l);
- l >>= 8;
- mac[4] = (u8)(0xFFU & l);
- l >>= 8;
- mac[3] = (u8)(0xFFU & l);
- l >>= 8;
- mac[2] = (u8)(0xFFU & l);
- mac[1] = (u8)(0xFFU & h);
- h >>= 8;
- mac[0] = (u8)(0xFFU & h);
-
- err = EOK;
- }
-
- AQ_DBG_EXIT(err);
- return (err);
+ mac[5] = (uint8_t)(0xFFU & l);
+ l >>= 8;
+ mac[4] = (uint8_t)(0xFFU & l);
+ l >>= 8;
+ mac[3] = (uint8_t)(0xFFU & l);
+ l >>= 8;
+ mac[2] = (uint8_t)(0xFFU & l);
+ mac[1] = (uint8_t)(0xFFU & h);
+ h >>= 8;
+ mac[0] = (uint8_t)(0xFFU & h);
+
+ err = EOK;
+ }
+
+ AQ_DBG_EXIT(err);
+ return (err);
}
-int aq_hw_deinit(struct aq_hw *hw)
+int
+aq_hw_deinit(struct aq_hw *hw)
{
- AQ_DBG_ENTER();
- aq_hw_mpi_set(hw, MPI_DEINIT, 0);
- AQ_DBG_EXIT(0);
- return (0);
+ AQ_DBG_ENTER();
+ aq_hw_mpi_set(hw, MPI_DEINIT, 0);
+ AQ_DBG_EXIT(0);
+ return (0);
}
-int aq_hw_set_power(struct aq_hw *hw, unsigned int power_state)
+int
+aq_hw_set_power(struct aq_hw *hw, unsigned int power_state)
{
- AQ_DBG_ENTER();
- aq_hw_mpi_set(hw, MPI_POWER, 0);
- AQ_DBG_EXIT(0);
- return (0);
+ AQ_DBG_ENTER();
+ aq_hw_mpi_set(hw, MPI_POWER, 0);
+ AQ_DBG_EXIT(0);
+ return (0);
}
/* HW NIC functions */
-int aq_hw_reset(struct aq_hw *hw)
+int
+aq_hw_reset(struct aq_hw *hw)
{
- int err = 0;
+ int err = 0;
- AQ_DBG_ENTER();
+ AQ_DBG_ENTER();
- err = aq_fw_reset(hw);
- if (err < 0)
- goto err_exit;
+ err = aq_fw_reset(hw);
+ if (err < 0)
+ goto err_exit;
- itr_irq_reg_res_dis_set(hw, 0);
- itr_res_irq_set(hw, 1);
+ itr_irq_reg_res_dis_set(hw, 0);
+ itr_res_irq_set(hw, 1);
- /* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(itr_res_irq_get(hw) == 0, 1000, 10);
- if (err < 0) {
- printf("atlantic: IRQ reset failed: %d", err);
- goto err_exit;
- }
+ /* check 10 times by 1ms */
+ AQ_HW_WAIT_FOR(itr_res_irq_get(hw) == 0, 1000, 10);
+ if (err < 0) {
+ printf("atlantic: IRQ reset failed: %d", err);
+ goto err_exit;
+ }
- if (hw->fw_ops && hw->fw_ops->reset)
- hw->fw_ops->reset(hw);
+ if (hw->fw_ops && hw->fw_ops->reset)
+ hw->fw_ops->reset(hw);
- err = aq_hw_err_from_flags(hw);
+ err = aq_hw_err_from_flags(hw);
err_exit:
- AQ_DBG_EXIT(err);
- return (err);
+ AQ_DBG_EXIT(err);
+ return (err);
}
-static int aq_hw_qos_set(struct aq_hw *hw)
+static int
+aq_hw_qos_set(struct aq_hw *hw)
{
- u32 tc = 0U;
- u32 buff_size = 0U;
- unsigned int i_priority = 0U;
- int err = 0;
-
- AQ_DBG_ENTER();
- /* TPS Descriptor rate init */
- tps_tx_pkt_shed_desc_rate_curr_time_res_set(hw, 0x0U);
- tps_tx_pkt_shed_desc_rate_lim_set(hw, 0xA);
-
- /* TPS VM init */
- tps_tx_pkt_shed_desc_vm_arb_mode_set(hw, 0U);
-
- /* TPS TC credits init */
- tps_tx_pkt_shed_desc_tc_arb_mode_set(hw, 0U);
- tps_tx_pkt_shed_data_arb_mode_set(hw, 0U);
-
- tps_tx_pkt_shed_tc_data_max_credit_set(hw, 0xFFF, 0U);
- tps_tx_pkt_shed_tc_data_weight_set(hw, 0x64, 0U);
- tps_tx_pkt_shed_desc_tc_max_credit_set(hw, 0x50, 0U);
- tps_tx_pkt_shed_desc_tc_weight_set(hw, 0x1E, 0U);
-
- /* Tx buf size */
- buff_size = AQ_HW_TXBUF_MAX;
-
- tpb_tx_pkt_buff_size_per_tc_set(hw, buff_size, tc);
- tpb_tx_buff_hi_threshold_per_tc_set(hw,
- (buff_size * (1024 / 32U) * 66U) /
- 100U, tc);
- tpb_tx_buff_lo_threshold_per_tc_set(hw,
- (buff_size * (1024 / 32U) * 50U) /
- 100U, tc);
-
- /* QoS Rx buf size per TC */
- tc = 0;
- buff_size = AQ_HW_RXBUF_MAX;
-
- rpb_rx_pkt_buff_size_per_tc_set(hw, buff_size, tc);
- rpb_rx_buff_hi_threshold_per_tc_set(hw,
- (buff_size *
- (1024U / 32U) * 66U) /
- 100U, tc);
- rpb_rx_buff_lo_threshold_per_tc_set(hw,
- (buff_size *
- (1024U / 32U) * 50U) /
- 100U, tc);
-
- /* QoS 802.1p priority -> TC mapping */
- for (i_priority = 8U; i_priority--;)
- rpf_rpb_user_priority_tc_map_set(hw, i_priority, 0U);
-
- err = aq_hw_err_from_flags(hw);
- AQ_DBG_EXIT(err);
- return (err);
+ uint32_t tc = 0U;
+ uint32_t buff_size = 0U;
+ unsigned int i_priority = 0U;
+ int err = 0;
+
+ AQ_DBG_ENTER();
+ /* TPS Descriptor rate init */
+ tps_tx_pkt_shed_desc_rate_curr_time_res_set(hw, 0x0U);
+ tps_tx_pkt_shed_desc_rate_lim_set(hw, 0xA);
+
+ /* TPS VM init */
+ tps_tx_pkt_shed_desc_vm_arb_mode_set(hw, 0U);
+
+ /* TPS TC credits init */
+ tps_tx_pkt_shed_desc_tc_arb_mode_set(hw, 0U);
+ tps_tx_pkt_shed_data_arb_mode_set(hw, 0U);
+
+ tps_tx_pkt_shed_tc_data_max_credit_set(hw, 0xFFF, 0U);
+ tps_tx_pkt_shed_tc_data_weight_set(hw, 0x64, 0U);
+ tps_tx_pkt_shed_desc_tc_max_credit_set(hw, 0x50, 0U);
+ tps_tx_pkt_shed_desc_tc_weight_set(hw, 0x1E, 0U);
+
+ /* Tx buf size */
+ buff_size = AQ_HW_TXBUF_MAX;
+
+ tpb_tx_pkt_buff_size_per_tc_set(hw, buff_size, tc);
+ tpb_tx_buff_hi_threshold_per_tc_set(hw,
+ (buff_size * (1024 / 32U) * 66U) / 100U, tc);
+ tpb_tx_buff_lo_threshold_per_tc_set(hw,
+ (buff_size * (1024 / 32U) * 50U) / 100U, tc);
+
+ /* QoS Rx buf size per TC */
+ tc = 0;
+ buff_size = AQ_HW_RXBUF_MAX;
+
+ rpb_rx_pkt_buff_size_per_tc_set(hw, buff_size, tc);
+ rpb_rx_buff_hi_threshold_per_tc_set(hw,
+ (buff_size * (1024U / 32U) * 66U) / 100U, tc);
+ rpb_rx_buff_lo_threshold_per_tc_set(hw,
+ (buff_size * (1024U / 32U) * 50U) / 100U, tc);
+
+ /* QoS 802.1p priority -> TC mapping */
+ for (i_priority = 8U; i_priority--;)
+ rpf_rpb_user_priority_tc_map_set(hw, i_priority, 0U);
+
+ err = aq_hw_err_from_flags(hw);
+ AQ_DBG_EXIT(err);
+ return (err);
}
-static int aq_hw_offload_set(struct aq_hw *hw)
+static int
+aq_hw_offload_set(struct aq_hw *hw)
{
- int err = 0;
-
- AQ_DBG_ENTER();
- /* TX checksums offloads*/
- tpo_ipv4header_crc_offload_en_set(hw, 1);
- tpo_tcp_udp_crc_offload_en_set(hw, 1);
- if (err < 0)
- goto err_exit;
-
- /* RX checksums offloads*/
- rpo_ipv4header_crc_offload_en_set(hw, 1);
- rpo_tcp_udp_crc_offload_en_set(hw, 1);
- if (err < 0)
- goto err_exit;
-
- /* LSO offloads*/
- tdm_large_send_offload_en_set(hw, 0xFFFFFFFFU);
- if (err < 0)
- goto err_exit;
-
-/* LRO offloads */
- {
- u32 i = 0;
- u32 val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
- ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
- ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
+ int err = 0;
- for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
- rpo_lro_max_num_of_descriptors_set(hw, val, i);
+ AQ_DBG_ENTER();
+ /* TX checksums offloads*/
+ tpo_ipv4header_crc_offload_en_set(hw, 1);
+ tpo_tcp_udp_crc_offload_en_set(hw, 1);
+ if (err < 0)
+ goto err_exit;
+
+ /* RX checksums offloads*/
+ rpo_ipv4header_crc_offload_en_set(hw, 1);
+ rpo_tcp_udp_crc_offload_en_set(hw, 1);
+ if (err < 0)
+ goto err_exit;
+
+ /* LSO offloads*/
+ tdm_large_send_offload_en_set(hw, 0xFFFFFFFFU);
+ if (err < 0)
+ goto err_exit;
- rpo_lro_time_base_divider_set(hw, 0x61AU);
- rpo_lro_inactive_interval_set(hw, 0);
- /* the LRO timebase divider is 5 uS (0x61a),
- * to get a maximum coalescing interval of 250 uS,
- * we need to multiply by 50(0x32) to get
- * the default value 250 uS
- */
- rpo_lro_max_coalescing_interval_set(hw, 50);
+/* LRO offloads */
+ {
+ uint32_t i = 0;
+ uint32_t val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
+ ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
+ ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
+
+ for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
+ rpo_lro_max_num_of_descriptors_set(hw, val, i);
+
+ rpo_lro_time_base_divider_set(hw, 0x61AU);
+ rpo_lro_inactive_interval_set(hw, 0);
+ /* the LRO timebase divider is 5 uS (0x61a),
+ * to get a maximum coalescing interval of 250 uS,
+ * we need to multiply by 50(0x32) to get
+ * the default value 250 uS
+ */
+ rpo_lro_max_coalescing_interval_set(hw, 50);
- rpo_lro_qsessions_lim_set(hw, 1U);
+ rpo_lro_qsessions_lim_set(hw, 1U);
- rpo_lro_total_desc_lim_set(hw, 2U);
+ rpo_lro_total_desc_lim_set(hw, 2U);
- rpo_lro_patch_optimization_en_set(hw, 0U);
+ rpo_lro_patch_optimization_en_set(hw, 0U);
- rpo_lro_min_pay_of_first_pkt_set(hw, 10U);
+ rpo_lro_min_pay_of_first_pkt_set(hw, 10U);
- rpo_lro_pkt_lim_set(hw, 1U);
+ rpo_lro_pkt_lim_set(hw, 1U);
- rpo_lro_en_set(hw, (hw->lro_enabled ? 0xFFFFFFFFU : 0U));
- }
+ rpo_lro_en_set(hw, (hw->lro_enabled ? 0xFFFFFFFFU : 0U));
+ }
- err = aq_hw_err_from_flags(hw);
+ err = aq_hw_err_from_flags(hw);
err_exit:
- AQ_DBG_EXIT(err);
- return (err);
+ AQ_DBG_EXIT(err);
+ return (err);
}
-static int aq_hw_init_tx_path(struct aq_hw *hw)
+static int
+aq_hw_init_tx_path(struct aq_hw *hw)
{
- int err = 0;
+ int err = 0;
- AQ_DBG_ENTER();
+ AQ_DBG_ENTER();
- /* Tx TC/RSS number config */
- tpb_tx_tc_mode_set(hw, 1U);
+ /* Tx TC/RSS number config */
+ tpb_tx_tc_mode_set(hw, 1U);
- thm_lso_tcp_flag_of_first_pkt_set(hw, 0x0FF6U);
- thm_lso_tcp_flag_of_middle_pkt_set(hw, 0x0FF6U);
- thm_lso_tcp_flag_of_last_pkt_set(hw, 0x0F7FU);
+ thm_lso_tcp_flag_of_first_pkt_set(hw, 0x0FF6U);
+ thm_lso_tcp_flag_of_middle_pkt_set(hw, 0x0FF6U);
+ thm_lso_tcp_flag_of_last_pkt_set(hw, 0x0F7FU);
- /* Tx interrupts */
- tdm_tx_desc_wr_wb_irq_en_set(hw, 1U);
+ /* Tx interrupts */
+ tdm_tx_desc_wr_wb_irq_en_set(hw, 1U);
- /* misc */
- AQ_WRITE_REG(hw, 0x00007040U, 0x00010000U);//IS_CHIP_FEATURE(TPO2) ? 0x00010000U : 0x00000000U);
- tdm_tx_dca_en_set(hw, 0U);
- tdm_tx_dca_mode_set(hw, 0U);
+ /* misc */
+ AQ_WRITE_REG(hw, 0x00007040U, 0x00010000U);//IS_CHIP_FEATURE(TPO2) ? 0x00010000U : 0x00000000U);
+ tdm_tx_dca_en_set(hw, 0U);
+ tdm_tx_dca_mode_set(hw, 0U);
- tpb_tx_path_scp_ins_en_set(hw, 1U);
+ tpb_tx_path_scp_ins_en_set(hw, 1U);
- err = aq_hw_err_from_flags(hw);
- AQ_DBG_EXIT(err);
- return (err);
+ err = aq_hw_err_from_flags(hw);
+ AQ_DBG_EXIT(err);
+ return (err);
}
-static int aq_hw_init_rx_path(struct aq_hw *hw)
+static int
+aq_hw_init_rx_path(struct aq_hw *hw)
{
- //struct aq_nic_cfg_s *cfg = hw->aq_nic_cfg;
- unsigned int control_reg_val = 0U;
- int i;
- int err;
+ //struct aq_nic_cfg_s *cfg = hw->aq_nic_cfg;
+ unsigned int control_reg_val = 0U;
+ int i;
+ int err;
- AQ_DBG_ENTER();
- /* Rx TC/RSS number config */
- rpb_rpf_rx_traf_class_mode_set(hw, 1U);
+ AQ_DBG_ENTER();
+ /* Rx TC/RSS number config */
+ rpb_rpf_rx_traf_class_mode_set(hw, 1U);
- /* Rx flow control */
- rpb_rx_flow_ctl_mode_set(hw, 1U);
+ /* Rx flow control */
+ rpb_rx_flow_ctl_mode_set(hw, 1U);
- /* RSS Ring selection */
- reg_rx_flr_rss_control1set(hw, 0xB3333333U);
+ /* RSS Ring selection */
+ reg_rx_flr_rss_control1set(hw, 0xB3333333U);
- /* Multicast filters */
- for (i = AQ_HW_MAC_MAX; i--;) {
- rpfl2_uc_flr_en_set(hw, (i == 0U) ? 1U : 0U, i);
- rpfl2unicast_flr_act_set(hw, 1U, i);
- }
+ /* Multicast filters */
+ for (i = AQ_HW_MAC_MAX; i--;) {
+ rpfl2_uc_flr_en_set(hw, (i == 0U) ? 1U : 0U, i);
+ rpfl2unicast_flr_act_set(hw, 1U, i);
+ }
- reg_rx_flr_mcst_flr_msk_set(hw, 0x00000000U);
- reg_rx_flr_mcst_flr_set(hw, 0x00010FFFU, 0U);
+ reg_rx_flr_mcst_flr_msk_set(hw, 0x00000000U);
+ reg_rx_flr_mcst_flr_set(hw, 0x00010FFFU, 0U);
- /* Vlan filters */
- rpf_vlan_outer_etht_set(hw, 0x88A8U);
- rpf_vlan_inner_etht_set(hw, 0x8100U);
+ /* Vlan filters */
+ rpf_vlan_outer_etht_set(hw, 0x88A8U);
+ rpf_vlan_inner_etht_set(hw, 0x8100U);
rpf_vlan_accept_untagged_packets_set(hw, true);
rpf_vlan_untagged_act_set(hw, HW_ATL_RX_HOST);
- rpf_vlan_prom_mode_en_set(hw, 1);
-
- /* Rx Interrupts */
- rdm_rx_desc_wr_wb_irq_en_set(hw, 1U);
+ rpf_vlan_prom_mode_en_set(hw, 1);
+
+ /* Rx Interrupts */
+ rdm_rx_desc_wr_wb_irq_en_set(hw, 1U);
- /* misc */
- control_reg_val = 0x000F0000U; //RPF2
+ /* misc */
+ control_reg_val = 0x000F0000U; //RPF2
- /* RSS hash type set for IP/TCP */
- control_reg_val |= 0x1EU;
+ /* RSS hash type set for IP/TCP */
+ control_reg_val |= 0x1EU;
- AQ_WRITE_REG(hw, 0x00005040U, control_reg_val);
+ AQ_WRITE_REG(hw, 0x00005040U, control_reg_val);
- rpfl2broadcast_en_set(hw, 1U);
- rpfl2broadcast_flr_act_set(hw, 1U);
- rpfl2broadcast_count_threshold_set(hw, 0xFFFFU & (~0U / 256U));
+ rpfl2broadcast_en_set(hw, 1U);
+ rpfl2broadcast_flr_act_set(hw, 1U);
+ rpfl2broadcast_count_threshold_set(hw, 0xFFFFU & (~0U / 256U));
- rdm_rx_dca_en_set(hw, 0U);
- rdm_rx_dca_mode_set(hw, 0U);
+ rdm_rx_dca_en_set(hw, 0U);
+ rdm_rx_dca_mode_set(hw, 0U);
- err = aq_hw_err_from_flags(hw);
- AQ_DBG_EXIT(err);
- return (err);
+ err = aq_hw_err_from_flags(hw);
+ AQ_DBG_EXIT(err);
+ return (err);
}
-int aq_hw_mac_addr_set(struct aq_hw *hw, u8 *mac_addr, u8 index)
+int
+aq_hw_mac_addr_set(struct aq_hw *hw, uint8_t *mac_addr, uint8_t index)
{
- int err = 0;
- unsigned int h = 0U;
- unsigned int l = 0U;
-
- AQ_DBG_ENTER();
- if (!mac_addr) {
- err = -EINVAL;
- goto err_exit;
- }
- h = (mac_addr[0] << 8) | (mac_addr[1]);
- l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
- (mac_addr[4] << 8) | mac_addr[5];
-
- rpfl2_uc_flr_en_set(hw, 0U, index);
- rpfl2unicast_dest_addresslsw_set(hw, l, index);
- rpfl2unicast_dest_addressmsw_set(hw, h, index);
- rpfl2_uc_flr_en_set(hw, 1U, index);
-
- err = aq_hw_err_from_flags(hw);
+ int err = 0;
+ unsigned int h = 0U;
+ unsigned int l = 0U;
+
+ AQ_DBG_ENTER();
+ if (!mac_addr) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ h = (mac_addr[0] << 8) | (mac_addr[1]);
+ l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | (mac_addr[4] << 8) |
+ mac_addr[5];
+
+ rpfl2_uc_flr_en_set(hw, 0U, index);
+ rpfl2unicast_dest_addresslsw_set(hw, l, index);
+ rpfl2unicast_dest_addressmsw_set(hw, h, index);
+ rpfl2_uc_flr_en_set(hw, 1U, index);
+
+ err = aq_hw_err_from_flags(hw);
err_exit:
- AQ_DBG_EXIT(err);
- return (err);
+ AQ_DBG_EXIT(err);
+ return (err);
}
-int aq_hw_init(struct aq_hw *hw, u8 *mac_addr, u8 adm_irq, bool msix)
+int
+aq_hw_init(struct aq_hw *hw, uint8_t *mac_addr, uint8_t adm_irq, bool msix)
{
- int err = 0;
- u32 val = 0;
+ int err = 0;
+ uint32_t val = 0;
- AQ_DBG_ENTER();
+ AQ_DBG_ENTER();
- /* Force limit MRRS on RDM/TDM to 2K */
- val = AQ_READ_REG(hw, AQ_HW_PCI_REG_CONTROL_6_ADR);
- AQ_WRITE_REG(hw, AQ_HW_PCI_REG_CONTROL_6_ADR, (val & ~0x707) | 0x404);
+ /* Force limit MRRS on RDM/TDM to 2K */
+ val = AQ_READ_REG(hw, AQ_HW_PCI_REG_CONTROL_6_ADR);
+ AQ_WRITE_REG(hw, AQ_HW_PCI_REG_CONTROL_6_ADR, (val & ~0x707) | 0x404);
- /* TX DMA total request limit. B0 hardware is not capable to
- * handle more than (8K-MRRS) incoming DMA data.
- * Value 24 in 256byte units
- */
- AQ_WRITE_REG(hw, AQ_HW_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
+ /* TX DMA total request limit. B0 hardware is not capable to
+ * handle more than (8K-MRRS) incoming DMA data.
+ * Value 24 in 256byte units
+ */
+ AQ_WRITE_REG(hw, AQ_HW_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
- aq_hw_init_tx_path(hw);
- aq_hw_init_rx_path(hw);
+ aq_hw_init_tx_path(hw);
+ aq_hw_init_rx_path(hw);
- aq_hw_mac_addr_set(hw, mac_addr, AQ_HW_MAC);
+ aq_hw_mac_addr_set(hw, mac_addr, AQ_HW_MAC);
- aq_hw_mpi_set(hw, MPI_INIT, hw->link_rate);
+ aq_hw_mpi_set(hw, MPI_INIT, hw->link_rate);
- aq_hw_qos_set(hw);
+ aq_hw_qos_set(hw);
- err = aq_hw_err_from_flags(hw);
- if (err < 0)
- goto err_exit;
+ err = aq_hw_err_from_flags(hw);
+ if (err < 0)
+ goto err_exit;
- /* Interrupts */
- //Enable interrupt
- itr_irq_status_cor_en_set(hw, 0); //Disable clear-on-read for status
- itr_irq_auto_mask_clr_en_set(hw, 1); // Enable auto-mask clear.
+ /* Interrupts */
+ //Enable interrupt
+ itr_irq_status_cor_en_set(hw, 0); //Disable clear-on-read for status
+ itr_irq_auto_mask_clr_en_set(hw, 1); // Enable auto-mask clear.
if (msix)
itr_irq_mode_set(hw, 0x6); //MSIX + multi vector
else
itr_irq_mode_set(hw, 0x5); //MSI + multi vector
- reg_gen_irq_map_set(hw, 0x80 | adm_irq, 3);
+ reg_gen_irq_map_set(hw, 0x80 | adm_irq, 3);
- aq_hw_offload_set(hw);
+ aq_hw_offload_set(hw);
err_exit:
- AQ_DBG_EXIT(err);
- return (err);
+ AQ_DBG_EXIT(err);
+ return (err);
}
-int aq_hw_start(struct aq_hw *hw)
+int
+aq_hw_start(struct aq_hw *hw)
{
- int err;
-
- AQ_DBG_ENTER();
- tpb_tx_buff_en_set(hw, 1U);
- rpb_rx_buff_en_set(hw, 1U);
- err = aq_hw_err_from_flags(hw);
- AQ_DBG_EXIT(err);
- return (err);
+ int err;
+
+ AQ_DBG_ENTER();
+ tpb_tx_buff_en_set(hw, 1U);
+ rpb_rx_buff_en_set(hw, 1U);
+ err = aq_hw_err_from_flags(hw);
+ AQ_DBG_EXIT(err);
+ return (err);
}
-int aq_hw_interrupt_moderation_set(struct aq_hw *hw)
+int
+aq_hw_interrupt_moderation_set(struct aq_hw *hw)
{
- static unsigned int AQ_HW_NIC_timers_table_rx_[][2] = {
- {80, 120},//{0x6U, 0x38U},/* 10Gbit */
- {0xCU, 0x70U},/* 5Gbit */
- {0xCU, 0x70U},/* 5Gbit 5GS */
- {0x18U, 0xE0U},/* 2.5Gbit */
- {0x30U, 0x80U},/* 1Gbit */
- {0x4U, 0x50U},/* 100Mbit */
- };
- static unsigned int AQ_HW_NIC_timers_table_tx_[][2] = {
- {0x4fU, 0x1ff},//{0xffU, 0xffU}, /* 10Gbit */
- {0x4fU, 0xffU}, /* 5Gbit */
- {0x4fU, 0xffU}, /* 5Gbit 5GS */
- {0x4fU, 0xffU}, /* 2.5Gbit */
- {0x4fU, 0xffU}, /* 1Gbit */
- {0x4fU, 0xffU}, /* 100Mbit */
- };
-
- u32 speed_index = 0U; //itr settings for 10 g
- u32 itr_rx = 2U;
- u32 itr_tx = 2U;
- int custom_itr = hw->itr;
- int active = custom_itr != 0;
- int err;
-
-
- AQ_DBG_ENTER();
-
- if (custom_itr == -1) {
- itr_rx |= AQ_HW_NIC_timers_table_rx_[speed_index][0] << 0x8U; /* set min timer value */
- itr_rx |= AQ_HW_NIC_timers_table_rx_[speed_index][1] << 0x10U; /* set max timer value */
-
- itr_tx |= AQ_HW_NIC_timers_table_tx_[speed_index][0] << 0x8U; /* set min timer value */
- itr_tx |= AQ_HW_NIC_timers_table_tx_[speed_index][1] << 0x10U; /* set max timer value */
- }else{
- if (custom_itr > 0x1FF)
- custom_itr = 0x1FF;
-
- itr_rx |= (custom_itr/2) << 0x8U; /* set min timer value */
- itr_rx |= custom_itr << 0x10U; /* set max timer value */
-
- itr_tx |= (custom_itr/2) << 0x8U; /* set min timer value */
- itr_tx |= custom_itr << 0x10U; /* set max timer value */
- }
-
- tdm_tx_desc_wr_wb_irq_en_set(hw, !active);
- tdm_tdm_intr_moder_en_set(hw, active);
- rdm_rx_desc_wr_wb_irq_en_set(hw, !active);
- rdm_rdm_intr_moder_en_set(hw, active);
-
- for (int i = HW_ATL_B0_RINGS_MAX; i--;) {
- reg_tx_intr_moder_ctrl_set(hw, itr_tx, i);
- reg_rx_intr_moder_ctrl_set(hw, itr_rx, i);
- }
-
- err = aq_hw_err_from_flags(hw);
- AQ_DBG_EXIT(err);
- return (err);
+ static unsigned int AQ_HW_NIC_timers_table_rx_[][2] = {
+ {80, 120},//{0x6U, 0x38U},/* 10Gbit */
+ {0xCU, 0x70U},/* 5Gbit */
+ {0xCU, 0x70U},/* 5Gbit 5GS */
+ {0x18U, 0xE0U},/* 2.5Gbit */
+ {0x30U, 0x80U},/* 1Gbit */
+ {0x4U, 0x50U},/* 100Mbit */
+ };
+ static unsigned int AQ_HW_NIC_timers_table_tx_[][2] = {
+ {0x4fU, 0x1ff},//{0xffU, 0xffU}, /* 10Gbit */
+ {0x4fU, 0xffU}, /* 5Gbit */
+ {0x4fU, 0xffU}, /* 5Gbit 5GS */
+ {0x4fU, 0xffU}, /* 2.5Gbit */
+ {0x4fU, 0xffU}, /* 1Gbit */
+ {0x4fU, 0xffU}, /* 100Mbit */
+ };
+
+ uint32_t speed_index = 0U; //itr settings for 10 g
+ uint32_t itr_rx = 2U;
+ uint32_t itr_tx = 2U;
+ int custom_itr = hw->itr;
+ int active = custom_itr != 0;
+ int err;
+
+
+ AQ_DBG_ENTER();
+
+ if (custom_itr == -1) {
+ /* set min timer value */
+ itr_rx |= AQ_HW_NIC_timers_table_rx_[speed_index][0] << 0x8U;
+ /* set max timer value */
+ itr_rx |= AQ_HW_NIC_timers_table_rx_[speed_index][1] << 0x10U;
+
+ /* set min timer value */
+ itr_tx |= AQ_HW_NIC_timers_table_tx_[speed_index][0] << 0x8U;
+ /* set max timer value */
+ itr_tx |= AQ_HW_NIC_timers_table_tx_[speed_index][1] << 0x10U;
+ } else {
+ if (custom_itr > 0x1FF)
+ custom_itr = 0x1FF;
+
+ itr_rx |= (custom_itr/2) << 0x8U; /* set min timer value */
+ itr_rx |= custom_itr << 0x10U; /* set max timer value */
+
+ itr_tx |= (custom_itr/2) << 0x8U; /* set min timer value */
+ itr_tx |= custom_itr << 0x10U; /* set max timer value */
+ }
+
+ tdm_tx_desc_wr_wb_irq_en_set(hw, !active);
+ tdm_tdm_intr_moder_en_set(hw, active);
+ rdm_rx_desc_wr_wb_irq_en_set(hw, !active);
+ rdm_rdm_intr_moder_en_set(hw, active);
+
+ for (int i = HW_ATL_B0_RINGS_MAX; i--;) {
+ reg_tx_intr_moder_ctrl_set(hw, itr_tx, i);
+ reg_rx_intr_moder_ctrl_set(hw, itr_rx, i);
+ }
+
+ err = aq_hw_err_from_flags(hw);
+ AQ_DBG_EXIT(err);
+ return (err);
}
/**
@@ -754,8 +761,8 @@
* @param aq_rx_filter_vlan VLAN filter configuration
* @return 0 - OK, <0 - error
*/
-int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
- struct aq_rx_filter_vlan *aq_vlans)
+int
+hw_atl_b0_hw_vlan_set(struct aq_hw_s *self, struct aq_rx_filter_vlan *aq_vlans)
{
int i;
@@ -780,14 +787,16 @@
return aq_hw_err_from_flags(self);
}
-int hw_atl_b0_hw_vlan_promisc_set(struct aq_hw_s *self, bool promisc)
+int
+hw_atl_b0_hw_vlan_promisc_set(struct aq_hw_s *self, bool promisc)
{
hw_atl_rpf_vlan_prom_mode_en_set(self, promisc);
return aq_hw_err_from_flags(self);
}
-void aq_hw_set_promisc(struct aq_hw_s *self, bool l2_promisc, bool vlan_promisc, bool mc_promisc)
+void
+aq_hw_set_promisc(struct aq_hw_s *self, bool l2_promisc, bool vlan_promisc, bool mc_promisc)
{
AQ_DBG_ENTERA("promisc %d, vlan_promisc %d, allmulti %d", l2_promisc, vlan_promisc, mc_promisc);
@@ -801,11 +810,12 @@
AQ_DBG_EXIT(0);
}
-int aq_hw_rss_hash_set(struct aq_hw_s *self, u8 rss_key[HW_ATL_RSS_HASHKEY_SIZE])
+int
+aq_hw_rss_hash_set(struct aq_hw_s *self, uint8_t rss_key[HW_ATL_RSS_HASHKEY_SIZE])
{
- u32 rss_key_dw[HW_ATL_RSS_HASHKEY_SIZE / 4];
- u32 addr = 0U;
- u32 i = 0U;
+ uint32_t rss_key_dw[HW_ATL_RSS_HASHKEY_SIZE / 4];
+ uint32_t addr = 0U;
+ uint32_t i = 0U;
int err = 0;
AQ_DBG_ENTER();
@@ -813,7 +823,7 @@
memcpy(rss_key_dw, rss_key, HW_ATL_RSS_HASHKEY_SIZE);
for (i = 10, addr = 0U; i--; ++addr) {
- u32 key_data = bswap32(rss_key_dw[i]);
+ uint32_t key_data = bswap32(rss_key_dw[i]);
rpf_rss_key_wr_data_set(self, key_data);
rpf_rss_key_addr_set(self, addr);
rpf_rss_key_wr_en_set(self, 1U);
@@ -830,11 +840,12 @@
return (err);
}
-int aq_hw_rss_hash_get(struct aq_hw_s *self, u8 rss_key[HW_ATL_RSS_HASHKEY_SIZE])
+int
+aq_hw_rss_hash_get(struct aq_hw_s *self, uint8_t rss_key[HW_ATL_RSS_HASHKEY_SIZE])
{
- u32 rss_key_dw[HW_ATL_RSS_HASHKEY_SIZE / 4];
- u32 addr = 0U;
- u32 i = 0U;
+ uint32_t rss_key_dw[HW_ATL_RSS_HASHKEY_SIZE / 4];
+ uint32_t addr = 0U;
+ uint32_t i = 0U;
int err = 0;
AQ_DBG_ENTER();
@@ -851,17 +862,18 @@
return (err);
}
-int aq_hw_rss_set(struct aq_hw_s *self, u8 rss_table[HW_ATL_RSS_INDIRECTION_TABLE_MAX])
+int
+aq_hw_rss_set(struct aq_hw_s *self, uint8_t rss_table[HW_ATL_RSS_INDIRECTION_TABLE_MAX])
{
- u16 bitary[(HW_ATL_RSS_INDIRECTION_TABLE_MAX *
+ uint16_t bitary[(HW_ATL_RSS_INDIRECTION_TABLE_MAX *
3 / 16U)];
int err = 0;
- u32 i = 0U;
+ uint32_t i = 0U;
memset(bitary, 0, sizeof(bitary));
for (i = HW_ATL_RSS_INDIRECTION_TABLE_MAX; i--;) {
- (*(u32 *)(bitary + ((i * 3U) / 16U))) |=
+ (*(uint32_t *)(bitary + ((i * 3U) / 16U))) |=
((rss_table[i]) << ((i * 3U) & 0xFU));
}
@@ -881,10 +893,11 @@
return (err);
}
-int aq_hw_udp_rss_enable(struct aq_hw_s *self, bool enable)
+int
+aq_hw_udp_rss_enable(struct aq_hw_s *self, bool enable)
{
int err = 0;
- if(!enable) {
+ if (!enable) {
/* HW bug workaround:
* Disable RSS for UDP using rx flow filter 0.
* HW does not track RSS stream for fragmenged UDP,
Index: sys/dev/aq/aq_hw_llh.h
===================================================================
--- sys/dev/aq/aq_hw_llh.h
+++ sys/dev/aq/aq_hw_llh.h
@@ -46,1131 +46,1164 @@
/* global */
-void reg_glb_fw_image_id1_set(struct aq_hw* hw, u32 value);
-u32 reg_glb_fw_image_id1_get(struct aq_hw* hw);
+void reg_glb_fw_image_id1_set(struct aq_hw* hw, uint32_t value);
+uint32_t reg_glb_fw_image_id1_get(struct aq_hw* hw);
/* set global microprocessor semaphore */
-void reg_glb_cpu_sem_set(struct aq_hw *aq_hw, u32 sem_value, u32 sem_index);
+void reg_glb_cpu_sem_set(struct aq_hw *aq_hw, uint32_t sem_value,
+ uint32_t sem_index);
/* get global microprocessor semaphore */
-u32 reg_glb_cpu_sem_get(struct aq_hw *aq_hw, u32 sem_index);
+uint32_t reg_glb_cpu_sem_get(struct aq_hw *aq_hw, uint32_t sem_index);
/*
-* \brief Get Global Standard Control 1
-* \return GlobalStandardControl1
-*/
-u32 reg_glb_standard_ctl1_get(struct aq_hw* hw);
+ * \brief Get Global Standard Control 1
+ * \return GlobalStandardControl1
+ */
+uint32_t reg_glb_standard_ctl1_get(struct aq_hw* hw);
/*
-* \brief Set Global Standard Control 1
-*/
-void reg_glb_standard_ctl1_set(struct aq_hw* hw, u32 glb_standard_ctl1);
+ * \brief Set Global Standard Control 1
+ */
+void reg_glb_standard_ctl1_set(struct aq_hw* hw, uint32_t glb_standard_ctl1);
/*
-* \brief Set Global Control 2
-*/
-void reg_global_ctl2_set(struct aq_hw* hw, u32 global_ctl2);
+ * \brief Set Global Control 2
+ */
+void reg_global_ctl2_set(struct aq_hw* hw, uint32_t global_ctl2);
/*
-* \brief Get Global Control 2
-* \return GlobalControl2
-*/
-u32 reg_global_ctl2_get(struct aq_hw* hw);
+ * \brief Get Global Control 2
+ * \return GlobalControl2
+ */
+uint32_t reg_global_ctl2_get(struct aq_hw* hw);
/*
-* \brief Set Global Daisy Chain Status 1
-*/
-void reg_glb_daisy_chain_status1_set(struct aq_hw* hw, u32 glb_daisy_chain_status1);
+ * \brief Set Global Daisy Chain Status 1
+ */
+void reg_glb_daisy_chain_status1_set(struct aq_hw* hw,
+ uint32_t glb_daisy_chain_status1);
/*
-* \brief Get Global Daisy Chain Status 1
-* \return glb_daisy_chain_status1
-*/
-u32 reg_glb_daisy_chain_status1_get(struct aq_hw* hw);
+ * \brief Get Global Daisy Chain Status 1
+ * \return glb_daisy_chain_status1
+ */
+uint32_t reg_glb_daisy_chain_status1_get(struct aq_hw* hw);
/*
-* \brief Set Global General Provisioning 9
-*/
-void reg_glb_general_provisioning9_set(struct aq_hw* hw, u32 value);
+ * \brief Set Global General Provisioning 9
+ */
+void reg_glb_general_provisioning9_set(struct aq_hw* hw, uint32_t value);
/*
-* \brief Get Global General Provisioning 9
-* \return GlobalGeneralProvisioning9
-*/
-u32 reg_glb_general_provisioning9_get(struct aq_hw* hw);
+ * \brief Get Global General Provisioning 9
+ * \return GlobalGeneralProvisioning9
+ */
+uint32_t reg_glb_general_provisioning9_get(struct aq_hw* hw);
/*
-* \brief Set Global NVR Provisioning 2
-*/
-void reg_glb_nvr_provisioning2_set(struct aq_hw* hw, u32 value);
+ * \brief Set Global NVR Provisioning 2
+ */
+void reg_glb_nvr_provisioning2_set(struct aq_hw* hw, uint32_t value);
/*
-* \brief Get Global NVR Provisioning 2
-* \return GlobalNvrProvisioning2
-*/
-u32 reg_glb_nvr_provisioning2_get(struct aq_hw* hw);
+ * \brief Get Global NVR Provisioning 2
+ * \return GlobalNvrProvisioning2
+ */
+uint32_t reg_glb_nvr_provisioning2_get(struct aq_hw* hw);
/*
-* \brief Set Global NVR Interface 1
-*/
-void reg_glb_nvr_interface1_set(struct aq_hw* hw, u32 value);
+ * \brief Set Global NVR Interface 1
+ */
+void reg_glb_nvr_interface1_set(struct aq_hw* hw, uint32_t value);
/*
-* \brief Get Global NVR Interface 1
-* \return GlobalNvrInterface1
-*/
-u32 reg_glb_nvr_interface1_get(struct aq_hw* hw);
+ * \brief Get Global NVR Interface 1
+ * \return GlobalNvrInterface1
+ */
+uint32_t reg_glb_nvr_interface1_get(struct aq_hw* hw);
/* set global register reset disable */
-void glb_glb_reg_res_dis_set(struct aq_hw *aq_hw, u32 glb_reg_res_dis);
+void glb_glb_reg_res_dis_set(struct aq_hw *aq_hw, uint32_t glb_reg_res_dis);
/* set soft reset */
-void glb_soft_res_set(struct aq_hw *aq_hw, u32 soft_res);
+void glb_soft_res_set(struct aq_hw *aq_hw, uint32_t soft_res);
/* get soft reset */
-u32 glb_soft_res_get(struct aq_hw *aq_hw);
+uint32_t glb_soft_res_get(struct aq_hw *aq_hw);
/* stats */
-u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw *aq_hw);
+uint32_t rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw *aq_hw);
/* get rx dma good octet counter lsw */
-u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw *aq_hw);
+uint32_t stats_rx_dma_good_octet_counterlsw_get(struct aq_hw *aq_hw);
/* get rx dma good packet counter lsw */
-u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw *aq_hw);
+uint32_t stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw *aq_hw);
/* get tx dma good octet counter lsw */
-u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw *aq_hw);
+uint32_t stats_tx_dma_good_octet_counterlsw_get(struct aq_hw *aq_hw);
/* get tx dma good packet counter lsw */
-u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw *aq_hw);
+uint32_t stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw *aq_hw);
/* get rx dma good octet counter msw */
-u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw *aq_hw);
+uint32_t stats_rx_dma_good_octet_countermsw_get(struct aq_hw *aq_hw);
/* get rx dma good packet counter msw */
-u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw *aq_hw);
+uint32_t stats_rx_dma_good_pkt_countermsw_get(struct aq_hw *aq_hw);
/* get tx dma good octet counter msw */
-u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw *aq_hw);
+uint32_t stats_tx_dma_good_octet_countermsw_get(struct aq_hw *aq_hw);
/* get tx dma good packet counter msw */
-u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw *aq_hw);
+uint32_t stats_tx_dma_good_pkt_countermsw_get(struct aq_hw *aq_hw);
/* get rx lro coalesced packet count lsw */
-u32 stats_rx_lro_coalesced_pkt_count0_get(struct aq_hw *aq_hw);
+uint32_t stats_rx_lro_coalesced_pkt_count0_get(struct aq_hw *aq_hw);
/* get msm rx errors counter register */
-u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw *aq_hw);
+uint32_t reg_mac_msm_rx_errs_cnt_get(struct aq_hw *aq_hw);
/* get msm rx unicast frames counter register */
-u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw *aq_hw);
+uint32_t reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw *aq_hw);
/* get msm rx multicast frames counter register */
-u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw *aq_hw);
+uint32_t reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw *aq_hw);
/* get msm rx broadcast frames counter register */
-u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw *aq_hw);
+uint32_t reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw *aq_hw);
/* get msm rx broadcast octets counter register 1 */
-u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw *aq_hw);
+uint32_t reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw *aq_hw);
/* get msm rx unicast octets counter register 0 */
-u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw *aq_hw);
+uint32_t reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw *aq_hw);
/* get rx dma statistics counter 7 */
-u32 reg_rx_dma_stat_counter7get(struct aq_hw *aq_hw);
+uint32_t reg_rx_dma_stat_counter7get(struct aq_hw *aq_hw);
/* get msm tx errors counter register */
-u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw *aq_hw);
+uint32_t reg_mac_msm_tx_errs_cnt_get(struct aq_hw *aq_hw);
/* get msm tx unicast frames counter register */
-u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw *aq_hw);
+uint32_t reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw *aq_hw);
/* get msm tx multicast frames counter register */
-u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw *aq_hw);
+uint32_t reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw *aq_hw);
/* get msm tx broadcast frames counter register */
-u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw *aq_hw);
+uint32_t reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw *aq_hw);
/* get msm tx multicast octets counter register 1 */
-u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw *aq_hw);
+uint32_t reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw *aq_hw);
/* get msm tx broadcast octets counter register 1 */
-u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw *aq_hw);
+uint32_t reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw *aq_hw);
/* get msm tx unicast octets counter register 0 */
-u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw *aq_hw);
+uint32_t reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw *aq_hw);
/* get global mif identification */
-u32 reg_glb_mif_id_get(struct aq_hw *aq_hw);
+uint32_t reg_glb_mif_id_get(struct aq_hw *aq_hw);
/** \brief Set Tx Register Reset Disable
-* \param txRegisterResetDisable 1 = Disable the S/W reset to MAC-PHY registers, 0 = Enable the S/W reset to MAC-PHY registers
-* \note Default value: 0x1
-* \note PORT="pif_mpi_reg_reset_dsbl_i"
-*/
-void mpi_tx_reg_res_dis_set(struct aq_hw* hw, u32 mpi_tx_reg_res_dis);
+ * \param txRegisterResetDisable 1 = Disable the S/W reset to MAC-PHY registers, 0 = Enable the S/W reset to MAC-PHY registers
+ * \note Default value: 0x1
+ * \note PORT="pif_mpi_reg_reset_dsbl_i"
+ */
+void mpi_tx_reg_res_dis_set(struct aq_hw* hw, uint32_t mpi_tx_reg_res_dis);
/** \brief Get Tx Register Reset Disable
-* \return 1 = Disable the S/W reset to MAC-PHY registers, 0 = Enable the S/W reset to MAC-PHY registers
-* \note Default value: 0x1
-* \note PORT="pif_mpi_reg_reset_dsbl_i"
-*/
-u32 mpi_tx_reg_res_dis_get(struct aq_hw* hw);
+ * \return 1 = Disable the S/W reset to MAC-PHY registers, 0 = Enable the S/W reset to MAC-PHY registers
+ * \note Default value: 0x1
+ * \note PORT="pif_mpi_reg_reset_dsbl_i"
+ */
+uint32_t mpi_tx_reg_res_dis_get(struct aq_hw* hw);
/* interrupt */
/* set interrupt auto mask lsw */
-void itr_irq_auto_masklsw_set(struct aq_hw *aq_hw, u32 irq_auto_masklsw);
+void itr_irq_auto_masklsw_set(struct aq_hw *aq_hw, uint32_t irq_auto_masklsw);
/* set interrupt mapping enable rx */
-void itr_irq_map_en_rx_set(struct aq_hw *aq_hw, u32 irq_map_en_rx, u32 rx);
+void itr_irq_map_en_rx_set(struct aq_hw *aq_hw, uint32_t irq_map_en_rx,
+ uint32_t rx);
/* set interrupt mapping enable tx */
-void itr_irq_map_en_tx_set(struct aq_hw *aq_hw, u32 irq_map_en_tx, u32 tx);
+void itr_irq_map_en_tx_set(struct aq_hw *aq_hw, uint32_t irq_map_en_tx,
+ uint32_t tx);
/* set interrupt mapping rx */
-void itr_irq_map_rx_set(struct aq_hw *aq_hw, u32 irq_map_rx, u32 rx);
+void itr_irq_map_rx_set(struct aq_hw *aq_hw, uint32_t irq_map_rx, uint32_t rx);
/* set interrupt mapping tx */
-void itr_irq_map_tx_set(struct aq_hw *aq_hw, u32 irq_map_tx, u32 tx);
+void itr_irq_map_tx_set(struct aq_hw *aq_hw, uint32_t irq_map_tx, uint32_t tx);
/* set interrupt mask clear lsw */
-void itr_irq_msk_clearlsw_set(struct aq_hw *aq_hw, u32 irq_msk_clearlsw);
+void itr_irq_msk_clearlsw_set(struct aq_hw *aq_hw, uint32_t irq_msk_clearlsw);
/* set interrupt mask set lsw */
-void itr_irq_msk_setlsw_set(struct aq_hw *aq_hw, u32 irq_msk_setlsw);
+void itr_irq_msk_setlsw_set(struct aq_hw *aq_hw, uint32_t irq_msk_setlsw);
/* set interrupt register reset disable */
-void itr_irq_reg_res_dis_set(struct aq_hw *aq_hw, u32 irq_reg_res_dis);
+void itr_irq_reg_res_dis_set(struct aq_hw *aq_hw, uint32_t irq_reg_res_dis);
/* set interrupt status clear lsw */
void itr_irq_status_clearlsw_set(struct aq_hw *aq_hw,
- u32 irq_status_clearlsw);
+ uint32_t irq_status_clearlsw);
/* get interrupt status lsw */
-u32 itr_irq_statuslsw_get(struct aq_hw *aq_hw);
+uint32_t itr_irq_statuslsw_get(struct aq_hw *aq_hw);
/* get reset interrupt */
-u32 itr_res_irq_get(struct aq_hw *aq_hw);
+uint32_t itr_res_irq_get(struct aq_hw *aq_hw);
/* set reset interrupt */
-void itr_res_irq_set(struct aq_hw *aq_hw, u32 res_irq);
+void itr_res_irq_set(struct aq_hw *aq_hw, uint32_t res_irq);
-void itr_irq_mode_set(struct aq_hw *aq_hw, u32 irq_mode);
+void itr_irq_mode_set(struct aq_hw *aq_hw, uint32_t irq_mode);
/* Set Link Interrupt Mapping Enable */
-void itr_link_int_map_en_set(struct aq_hw *aq_hw, u32 link_int_en_map_en);
+void itr_link_int_map_en_set(struct aq_hw *aq_hw, uint32_t link_int_en_map_en);
/* Get Link Interrupt Mapping Enable */
-u32 itr_link_int_map_en_get(struct aq_hw *aq_hw);
+uint32_t itr_link_int_map_en_get(struct aq_hw *aq_hw);
/* Set Link Interrupt Mapping */
-void itr_link_int_map_set(struct aq_hw *aq_hw, u32 link_int_map);
+void itr_link_int_map_set(struct aq_hw *aq_hw, uint32_t link_int_map);
/* Get Link Interrupt Mapping */
-u32 itr_link_int_map_get(struct aq_hw *aq_hw);
+uint32_t itr_link_int_map_get(struct aq_hw *aq_hw);
/* Set MIF Interrupt Mapping Enable */
-void itr_mif_int_map_en_set(struct aq_hw *aq_hw, u32 mif_int_map_en, u32 mif);
+void itr_mif_int_map_en_set(struct aq_hw *aq_hw, uint32_t mif_int_map_en,
+ uint32_t mif);
/* Get MIF Interrupt Mapping Enable */
-u32 itr_mif_int_map_en_get(struct aq_hw *aq_hw, u32 mif);
+uint32_t itr_mif_int_map_en_get(struct aq_hw *aq_hw, uint32_t mif);
/* Set MIF Interrupt Mapping */
-void itr_mif_int_map_set(struct aq_hw *aq_hw, u32 mif_int_map, u32 mif);
+void itr_mif_int_map_set(struct aq_hw *aq_hw, uint32_t mif_int_map,
+ uint32_t mif);
/* Get MIF Interrupt Mapping */
-u32 itr_mif_int_map_get(struct aq_hw *aq_hw, u32 mif);
+uint32_t itr_mif_int_map_get(struct aq_hw *aq_hw, uint32_t mif);
-void itr_irq_status_cor_en_set(struct aq_hw *aq_hw, u32 irq_status_cor_enable);
+void itr_irq_status_cor_en_set(struct aq_hw *aq_hw,
+ uint32_t irq_status_cor_enable);
-void itr_irq_auto_mask_clr_en_set(struct aq_hw *aq_hw, u32 irq_auto_mask_clr_en);
+void itr_irq_auto_mask_clr_en_set(struct aq_hw *aq_hw,
+ uint32_t irq_auto_mask_clr_en);
/* rdm */
/* set cpu id */
-void rdm_cpu_id_set(struct aq_hw *aq_hw, u32 cpuid, u32 dca);
+void rdm_cpu_id_set(struct aq_hw *aq_hw, uint32_t cpuid, uint32_t dca);
/* set rx dca enable */
-void rdm_rx_dca_en_set(struct aq_hw *aq_hw, u32 rx_dca_en);
+void rdm_rx_dca_en_set(struct aq_hw *aq_hw, uint32_t rx_dca_en);
/* set rx dca mode */
-void rdm_rx_dca_mode_set(struct aq_hw *aq_hw, u32 rx_dca_mode);
+void rdm_rx_dca_mode_set(struct aq_hw *aq_hw, uint32_t rx_dca_mode);
/* set rx descriptor data buffer size */
void rdm_rx_desc_data_buff_size_set(struct aq_hw *aq_hw,
- u32 rx_desc_data_buff_size,
- u32 descriptor);
+ uint32_t rx_desc_data_buff_size, uint32_t descriptor);
/* set rx descriptor dca enable */
-void rdm_rx_desc_dca_en_set(struct aq_hw *aq_hw, u32 rx_desc_dca_en,
- u32 dca);
+void rdm_rx_desc_dca_en_set(struct aq_hw *aq_hw, uint32_t rx_desc_dca_en,
+ uint32_t dca);
/* set rx descriptor enable */
-void rdm_rx_desc_en_set(struct aq_hw *aq_hw, u32 rx_desc_en,
- u32 descriptor);
+void rdm_rx_desc_en_set(struct aq_hw *aq_hw, uint32_t rx_desc_en,
+ uint32_t descriptor);
/* set rx descriptor header splitting */
void rdm_rx_desc_head_splitting_set(struct aq_hw *aq_hw,
- u32 rx_desc_head_splitting,
- u32 descriptor);
+ uint32_t rx_desc_head_splitting, uint32_t descriptor);
/* get rx descriptor head pointer */
-u32 rdm_rx_desc_head_ptr_get(struct aq_hw *aq_hw, u32 descriptor);
+uint32_t rdm_rx_desc_head_ptr_get(struct aq_hw *aq_hw, uint32_t descriptor);
/* set rx descriptor length */
-void rdm_rx_desc_len_set(struct aq_hw *aq_hw, u32 rx_desc_len,
- u32 descriptor);
+void rdm_rx_desc_len_set(struct aq_hw *aq_hw, uint32_t rx_desc_len,
+ uint32_t descriptor);
/* set rx descriptor write-back interrupt enable */
void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw *aq_hw,
- u32 rx_desc_wr_wb_irq_en);
+ uint32_t rx_desc_wr_wb_irq_en);
/* set rx header dca enable */
-void rdm_rx_head_dca_en_set(struct aq_hw *aq_hw, u32 rx_head_dca_en,
- u32 dca);
+void rdm_rx_head_dca_en_set(struct aq_hw *aq_hw, uint32_t rx_head_dca_en,
+ uint32_t dca);
/* set rx payload dca enable */
-void rdm_rx_pld_dca_en_set(struct aq_hw *aq_hw, u32 rx_pld_dca_en, u32 dca);
+void rdm_rx_pld_dca_en_set(struct aq_hw *aq_hw, uint32_t rx_pld_dca_en,
+ uint32_t dca);
/* set rx descriptor header buffer size */
void rdm_rx_desc_head_buff_size_set(struct aq_hw *aq_hw,
- u32 rx_desc_head_buff_size,
- u32 descriptor);
+ uint32_t rx_desc_head_buff_size, uint32_t descriptor);
/* set rx descriptor reset */
-void rdm_rx_desc_res_set(struct aq_hw *aq_hw, u32 rx_desc_res,
- u32 descriptor);
+void rdm_rx_desc_res_set(struct aq_hw *aq_hw, uint32_t rx_desc_res,
+ uint32_t descriptor);
/* Set RDM Interrupt Moderation Enable */
-void rdm_rdm_intr_moder_en_set(struct aq_hw *aq_hw, u32 rdm_intr_moder_en);
+void rdm_rdm_intr_moder_en_set(struct aq_hw *aq_hw, uint32_t rdm_intr_moder_en);
/* reg */
/* set general interrupt mapping register */
-void reg_gen_irq_map_set(struct aq_hw *aq_hw, u32 gen_intr_map, u32 regidx);
+void reg_gen_irq_map_set(struct aq_hw *aq_hw, uint32_t gen_intr_map,
+ uint32_t regidx);
/* get general interrupt status register */
-u32 reg_gen_irq_status_get(struct aq_hw *aq_hw);
+uint32_t reg_gen_irq_status_get(struct aq_hw *aq_hw);
/* set interrupt global control register */
-void reg_irq_glb_ctl_set(struct aq_hw *aq_hw, u32 intr_glb_ctl);
+void reg_irq_glb_ctl_set(struct aq_hw *aq_hw, uint32_t intr_glb_ctl);
/* set interrupt throttle register */
-void reg_irq_thr_set(struct aq_hw *aq_hw, u32 intr_thr, u32 throttle);
+void reg_irq_thr_set(struct aq_hw *aq_hw, uint32_t intr_thr, uint32_t throttle);
/* set rx dma descriptor base address lsw */
void reg_rx_dma_desc_base_addresslswset(struct aq_hw *aq_hw,
- u32 rx_dma_desc_base_addrlsw,
- u32 descriptor);
+ uint32_t rx_dma_desc_base_addrlsw, uint32_t descriptor);
/* set rx dma descriptor base address msw */
void reg_rx_dma_desc_base_addressmswset(struct aq_hw *aq_hw,
- u32 rx_dma_desc_base_addrmsw,
- u32 descriptor);
+ uint32_t rx_dma_desc_base_addrmsw, uint32_t descriptor);
/* get rx dma descriptor status register */
-u32 reg_rx_dma_desc_status_get(struct aq_hw *aq_hw, u32 descriptor);
+uint32_t reg_rx_dma_desc_status_get(struct aq_hw *aq_hw, uint32_t descriptor);
/* set rx dma descriptor tail pointer register */
void reg_rx_dma_desc_tail_ptr_set(struct aq_hw *aq_hw,
- u32 rx_dma_desc_tail_ptr,
- u32 descriptor);
+ uint32_t rx_dma_desc_tail_ptr, uint32_t descriptor);
/* get rx dma descriptor tail pointer register */
-u32 reg_rx_dma_desc_tail_ptr_get(struct aq_hw *aq_hw, u32 descriptor);
+uint32_t reg_rx_dma_desc_tail_ptr_get(struct aq_hw *aq_hw, uint32_t descriptor);
/* set rx filter multicast filter mask register */
void reg_rx_flr_mcst_flr_msk_set(struct aq_hw *aq_hw,
- u32 rx_flr_mcst_flr_msk);
+ uint32_t rx_flr_mcst_flr_msk);
/* set rx filter multicast filter register */
-void reg_rx_flr_mcst_flr_set(struct aq_hw *aq_hw, u32 rx_flr_mcst_flr,
- u32 filter);
+void reg_rx_flr_mcst_flr_set(struct aq_hw *aq_hw, uint32_t rx_flr_mcst_flr,
+ uint32_t filter);
/* set rx filter rss control register 1 */
void reg_rx_flr_rss_control1set(struct aq_hw *aq_hw,
- u32 rx_flr_rss_control1);
+ uint32_t rx_flr_rss_control1);
/* Set RX Filter Control Register 2 */
-void reg_rx_flr_control2_set(struct aq_hw *aq_hw, u32 rx_flr_control2);
+void reg_rx_flr_control2_set(struct aq_hw *aq_hw, uint32_t rx_flr_control2);
/* Set RX Interrupt Moderation Control Register */
void reg_rx_intr_moder_ctrl_set(struct aq_hw *aq_hw,
- u32 rx_intr_moderation_ctl,
- u32 queue);
+ uint32_t rx_intr_moderation_ctl, uint32_t queue);
/* set tx dma debug control */
-void reg_tx_dma_debug_ctl_set(struct aq_hw *aq_hw, u32 tx_dma_debug_ctl);
+void reg_tx_dma_debug_ctl_set(struct aq_hw *aq_hw, uint32_t tx_dma_debug_ctl);
/* set tx dma descriptor base address lsw */
void reg_tx_dma_desc_base_addresslswset(struct aq_hw *aq_hw,
- u32 tx_dma_desc_base_addrlsw,
- u32 descriptor);
+ uint32_t tx_dma_desc_base_addrlsw, uint32_t descriptor);
/* set tx dma descriptor base address msw */
void reg_tx_dma_desc_base_addressmswset(struct aq_hw *aq_hw,
- u32 tx_dma_desc_base_addrmsw,
- u32 descriptor);
+ uint32_t tx_dma_desc_base_addrmsw, uint32_t descriptor);
/* set tx dma descriptor tail pointer register */
void reg_tx_dma_desc_tail_ptr_set(struct aq_hw *aq_hw,
- u32 tx_dma_desc_tail_ptr,
- u32 descriptor);
+ uint32_t tx_dma_desc_tail_ptr, uint32_t descriptor);
/* get tx dma descriptor tail pointer register */
-u32 reg_tx_dma_desc_tail_ptr_get(struct aq_hw *aq_hw, u32 descriptor);
+uint32_t reg_tx_dma_desc_tail_ptr_get(struct aq_hw *aq_hw, uint32_t descriptor);
/* Set TX Interrupt Moderation Control Register */
void reg_tx_intr_moder_ctrl_set(struct aq_hw *aq_hw,
- u32 tx_intr_moderation_ctl,
- u32 queue);
+ uint32_t tx_intr_moderation_ctl, uint32_t queue);
/* get global microprocessor scratch pad */
-u32 reg_glb_cpu_scratch_scp_get(struct aq_hw *hw, u32 glb_cpu_scratch_scp_idx);
+uint32_t reg_glb_cpu_scratch_scp_get(struct aq_hw *hw,
+ uint32_t glb_cpu_scratch_scp_idx);
/* set global microprocessor scratch pad */
void reg_glb_cpu_scratch_scp_set(struct aq_hw *aq_hw,
- u32 glb_cpu_scratch_scp, u32 scratch_scp);
+ uint32_t glb_cpu_scratch_scp, uint32_t scratch_scp);
/* get global microprocessor no reset scratch pad */
-u32 reg_glb_cpu_no_reset_scratchpad_get(struct aq_hw* hw, u32 index);
+uint32_t reg_glb_cpu_no_reset_scratchpad_get(struct aq_hw* hw, uint32_t index);
/* set global microprocessor no reset scratch pad */
-void reg_glb_cpu_no_reset_scratchpad_set(struct aq_hw* aq_hw, u32 value,
- u32 index);
+void reg_glb_cpu_no_reset_scratchpad_set(struct aq_hw* aq_hw, uint32_t value,
+ uint32_t index);
/* rpb */
/* set dma system loopback */
-void rpb_dma_sys_lbk_set(struct aq_hw *aq_hw, u32 dma_sys_lbk);
+void rpb_dma_sys_lbk_set(struct aq_hw *aq_hw, uint32_t dma_sys_lbk);
/* set rx traffic class mode */
void rpb_rpf_rx_traf_class_mode_set(struct aq_hw *aq_hw,
- u32 rx_traf_class_mode);
+ uint32_t rx_traf_class_mode);
/* set rx buffer enable */
-void rpb_rx_buff_en_set(struct aq_hw *aq_hw, u32 rx_buff_en);
+void rpb_rx_buff_en_set(struct aq_hw *aq_hw, uint32_t rx_buff_en);
/* set rx buffer high threshold (per tc) */
void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw *aq_hw,
- u32 rx_buff_hi_threshold_per_tc,
- u32 buffer);
+ uint32_t rx_buff_hi_threshold_per_tc, uint32_t buffer);
/* set rx buffer low threshold (per tc) */
void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw *aq_hw,
- u32 rx_buff_lo_threshold_per_tc,
- u32 buffer);
+ uint32_t rx_buff_lo_threshold_per_tc, uint32_t buffer);
/* set rx flow control mode */
-void rpb_rx_flow_ctl_mode_set(struct aq_hw *aq_hw, u32 rx_flow_ctl_mode);
+void rpb_rx_flow_ctl_mode_set(struct aq_hw *aq_hw, uint32_t rx_flow_ctl_mode);
/* set rx packet buffer size (per tc) */
void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw *aq_hw,
- u32 rx_pkt_buff_size_per_tc,
- u32 buffer);
+ uint32_t rx_pkt_buff_size_per_tc, uint32_t buffer);
/* set rx xoff enable (per tc) */
-void rpb_rx_xoff_en_per_tc_set(struct aq_hw *aq_hw, u32 rx_xoff_en_per_tc,
- u32 buffer);
+void rpb_rx_xoff_en_per_tc_set(struct aq_hw *aq_hw, uint32_t rx_xoff_en_per_tc,
+ uint32_t buffer);
/* rpf */
/* set l2 broadcast count threshold */
void rpfl2broadcast_count_threshold_set(struct aq_hw *aq_hw,
- u32 l2broadcast_count_threshold);
+ uint32_t l2broadcast_count_threshold);
/* set l2 broadcast enable */
-void rpfl2broadcast_en_set(struct aq_hw *aq_hw, u32 l2broadcast_en);
+void rpfl2broadcast_en_set(struct aq_hw *aq_hw, uint32_t l2broadcast_en);
/* set l2 broadcast filter action */
void rpfl2broadcast_flr_act_set(struct aq_hw *aq_hw,
- u32 l2broadcast_flr_act);
+ uint32_t l2broadcast_flr_act);
/* set l2 multicast filter enable */
-void rpfl2multicast_flr_en_set(struct aq_hw *aq_hw, u32 l2multicast_flr_en,
- u32 filter);
+void rpfl2multicast_flr_en_set(struct aq_hw *aq_hw, uint32_t l2multicast_flr_en,
+ uint32_t filter);
/* set l2 promiscuous mode enable */
void rpfl2promiscuous_mode_en_set(struct aq_hw *aq_hw,
- u32 l2promiscuous_mode_en);
+ uint32_t l2promiscuous_mode_en);
/* set l2 unicast filter action */
-void rpfl2unicast_flr_act_set(struct aq_hw *aq_hw, u32 l2unicast_flr_act,
- u32 filter);
+void rpfl2unicast_flr_act_set(struct aq_hw *aq_hw, uint32_t l2unicast_flr_act,
+ uint32_t filter);
/* set l2 unicast filter enable */
-void rpfl2_uc_flr_en_set(struct aq_hw *aq_hw, u32 l2unicast_flr_en,
- u32 filter);
+void rpfl2_uc_flr_en_set(struct aq_hw *aq_hw, uint32_t l2unicast_flr_en,
+ uint32_t filter);
/* set l2 unicast destination address lsw */
void rpfl2unicast_dest_addresslsw_set(struct aq_hw *aq_hw,
- u32 l2unicast_dest_addresslsw,
- u32 filter);
+ uint32_t l2unicast_dest_addresslsw, uint32_t filter);
/* set l2 unicast destination address msw */
void rpfl2unicast_dest_addressmsw_set(struct aq_hw *aq_hw,
- u32 l2unicast_dest_addressmsw,
- u32 filter);
+ uint32_t l2unicast_dest_addressmsw, uint32_t filter);
/* Set L2 Accept all Multicast packets */
void rpfl2_accept_all_mc_packets_set(struct aq_hw *aq_hw,
- u32 l2_accept_all_mc_packets);
+ uint32_t l2_accept_all_mc_packets);
/* set user-priority tc mapping */
void rpf_rpb_user_priority_tc_map_set(struct aq_hw *aq_hw,
- u32 user_priority_tc_map, u32 tc);
+ uint32_t user_priority_tc_map, uint32_t tc);
/* set rss key address */
-void rpf_rss_key_addr_set(struct aq_hw *aq_hw, u32 rss_key_addr);
+void rpf_rss_key_addr_set(struct aq_hw *aq_hw, uint32_t rss_key_addr);
/* set rss key write data */
-void rpf_rss_key_wr_data_set(struct aq_hw *aq_hw, u32 rss_key_wr_data);
+void rpf_rss_key_wr_data_set(struct aq_hw *aq_hw, uint32_t rss_key_wr_data);
/* get rss key read data */
-u32 rpf_rss_key_rd_data_get(struct aq_hw *aq_hw);
+uint32_t rpf_rss_key_rd_data_get(struct aq_hw *aq_hw);
/* get rss key write enable */
-u32 rpf_rss_key_wr_en_get(struct aq_hw *aq_hw);
+uint32_t rpf_rss_key_wr_en_get(struct aq_hw *aq_hw);
/* set rss key write enable */
-void rpf_rss_key_wr_en_set(struct aq_hw *aq_hw, u32 rss_key_wr_en);
+void rpf_rss_key_wr_en_set(struct aq_hw *aq_hw, uint32_t rss_key_wr_en);
/* set rss redirection table address */
void rpf_rss_redir_tbl_addr_set(struct aq_hw *aq_hw,
- u32 rss_redir_tbl_addr);
+ uint32_t rss_redir_tbl_addr);
/* set rss redirection table write data */
void rpf_rss_redir_tbl_wr_data_set(struct aq_hw *aq_hw,
- u32 rss_redir_tbl_wr_data);
+ uint32_t rss_redir_tbl_wr_data);
/* get rss redirection write enable */
-u32 rpf_rss_redir_wr_en_get(struct aq_hw *aq_hw);
+uint32_t rpf_rss_redir_wr_en_get(struct aq_hw *aq_hw);
/* set rss redirection write enable */
-void rpf_rss_redir_wr_en_set(struct aq_hw *aq_hw, u32 rss_redir_wr_en);
+void rpf_rss_redir_wr_en_set(struct aq_hw *aq_hw, uint32_t rss_redir_wr_en);
/* set tpo to rpf system loopback */
void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw *aq_hw,
- u32 tpo_to_rpf_sys_lbk);
+ uint32_t tpo_to_rpf_sys_lbk);
/* set vlan inner ethertype */
-void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw *aq_hw, u32 vlan_inner_etht);
+void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw *aq_hw,
+ uint32_t vlan_inner_etht);
/* set vlan outer ethertype */
-void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw *aq_hw, u32 vlan_outer_etht);
+void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw *aq_hw,
+ uint32_t vlan_outer_etht);
/* set vlan promiscuous mode enable */
void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw *aq_hw,
- u32 vlan_prom_mode_en);
+ uint32_t vlan_prom_mode_en);
/* Set VLAN untagged action */
void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw *aq_hw,
- u32 vlan_untagged_act);
+ uint32_t vlan_untagged_act);
/* Set VLAN accept untagged packets */
void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw *aq_hw,
- u32 vlan_acc_untagged_packets);
+ uint32_t vlan_acc_untagged_packets);
/* Set VLAN filter enable */
-void hw_atl_rpf_vlan_flr_en_set(struct aq_hw *aq_hw, u32 vlan_flr_en,
- u32 filter);
+void hw_atl_rpf_vlan_flr_en_set(struct aq_hw *aq_hw, uint32_t vlan_flr_en,
+ uint32_t filter);
/* Set VLAN Filter Action */
-void hw_atl_rpf_vlan_flr_act_set(struct aq_hw *aq_hw, u32 vlan_filter_act,
- u32 filter);
+void hw_atl_rpf_vlan_flr_act_set(struct aq_hw *aq_hw, uint32_t vlan_filter_act,
+ uint32_t filter);
/* Set VLAN ID Filter */
-void hw_atl_rpf_vlan_id_flr_set(struct aq_hw *aq_hw, u32 vlan_id_flr,
- u32 filter);
+void hw_atl_rpf_vlan_id_flr_set(struct aq_hw *aq_hw, uint32_t vlan_id_flr,
+ uint32_t filter);
/* Set VLAN RX queue assignment enable */
-void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw *aq_hw, u32 vlan_rxq_en,
- u32 filter);
+void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw *aq_hw, uint32_t vlan_rxq_en,
+ uint32_t filter);
/* Set VLAN RX queue */
-void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw *aq_hw, u32 vlan_rxq,
- u32 filter);
+void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw *aq_hw, uint32_t vlan_rxq,
+ uint32_t filter);
/* set ethertype filter enable */
-void hw_atl_rpf_etht_flr_en_set(struct aq_hw *aq_hw, u32 etht_flr_en,
- u32 filter);
+void hw_atl_rpf_etht_flr_en_set(struct aq_hw *aq_hw, uint32_t etht_flr_en,
+ uint32_t filter);
/* set ethertype user-priority enable */
void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw *aq_hw,
- u32 etht_user_priority_en,
- u32 filter);
+ uint32_t etht_user_priority_en, uint32_t filter);
/* set ethertype rx queue enable */
void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw *aq_hw,
- u32 etht_rx_queue_en,
- u32 filter);
+ uint32_t etht_rx_queue_en, uint32_t filter);
/* set ethertype rx queue */
-void hw_atl_rpf_etht_rx_queue_set(struct aq_hw *aq_hw, u32 etht_rx_queue,
- u32 filter);
+void hw_atl_rpf_etht_rx_queue_set(struct aq_hw *aq_hw, uint32_t etht_rx_queue,
+ uint32_t filter);
/* set ethertype user-priority */
void hw_atl_rpf_etht_user_priority_set(struct aq_hw *aq_hw,
- u32 etht_user_priority,
- u32 filter);
+ uint32_t etht_user_priority, uint32_t filter);
/* set ethertype management queue */
-void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw *aq_hw, u32 etht_mgt_queue,
- u32 filter);
+void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw *aq_hw, uint32_t etht_mgt_queue,
+ uint32_t filter);
/* set ethertype filter action */
-void hw_atl_rpf_etht_flr_act_set(struct aq_hw *aq_hw, u32 etht_flr_act,
- u32 filter);
+void hw_atl_rpf_etht_flr_act_set(struct aq_hw *aq_hw, uint32_t etht_flr_act,
+ uint32_t filter);
/* set ethertype filter */
-void hw_atl_rpf_etht_flr_set(struct aq_hw *aq_hw, u32 etht_flr, u32 filter);
+void hw_atl_rpf_etht_flr_set(struct aq_hw *aq_hw, uint32_t etht_flr,
+ uint32_t filter);
/* set L3/L4 filter enable */
-void hw_atl_rpf_l3_l4_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_l4_enf_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3 IPv6 enable */
-void hw_atl_rpf_l3_v6_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_v6_enf_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3 source address enable */
-void hw_atl_rpf_l3_saf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_saf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3 destination address enable */
-void hw_atl_rpf_l3_daf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_daf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L4 source port enable */
-void hw_atl_rpf_l4_spf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l4_spf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L4 destination port enable */
-void hw_atl_rpf_l4_dpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l4_dpf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L4 protocol enable */
-void hw_atl_rpf_l4_protf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l4_protf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3 ARP filter enable */
-void hw_atl_rpf_l3_arpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_arpf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3/L4 rx queue enable */
-void hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3/L4 management queue */
-void hw_atl_rpf_l3_l4_mng_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_l4_mng_rxqf_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3/L4 filter action */
-void hw_atl_rpf_l3_l4_actf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_l4_actf_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3/L4 rx queue */
-void hw_atl_rpf_l3_l4_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_l4_rxqf_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L4 protocol value */
-void hw_atl_rpf_l4_protf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l4_protf_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L4 source port */
-void hw_atl_rpf_l4_spd_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l4_spd_set(struct aq_hw *aq_hw, uint32_t val, uint32_t filter);
/* set L4 destination port */
-void hw_atl_rpf_l4_dpd_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l4_dpd_set(struct aq_hw *aq_hw, uint32_t val, uint32_t filter);
/* set vlan inner ethertype */
-void rpf_vlan_inner_etht_set(struct aq_hw *aq_hw, u32 vlan_inner_etht);
+void rpf_vlan_inner_etht_set(struct aq_hw *aq_hw, uint32_t vlan_inner_etht);
/* set vlan outer ethertype */
-void rpf_vlan_outer_etht_set(struct aq_hw *aq_hw, u32 vlan_outer_etht);
+void rpf_vlan_outer_etht_set(struct aq_hw *aq_hw, uint32_t vlan_outer_etht);
/* set vlan promiscuous mode enable */
-void rpf_vlan_prom_mode_en_set(struct aq_hw *aq_hw, u32 vlan_prom_mode_en);
+void rpf_vlan_prom_mode_en_set(struct aq_hw *aq_hw, uint32_t vlan_prom_mode_en);
/* Set VLAN untagged action */
-void rpf_vlan_untagged_act_set(struct aq_hw *aq_hw, u32 vlan_untagged_act);
+void rpf_vlan_untagged_act_set(struct aq_hw *aq_hw, uint32_t vlan_untagged_act);
/* Set VLAN accept untagged packets */
void rpf_vlan_accept_untagged_packets_set(struct aq_hw *aq_hw,
- u32 vlan_accept_untagged_packets);
+ uint32_t vlan_accept_untagged_packets);
/* Set VLAN filter enable */
-void rpf_vlan_flr_en_set(struct aq_hw *aq_hw, u32 vlan_flr_en, u32 filter);
+void rpf_vlan_flr_en_set(struct aq_hw *aq_hw, uint32_t vlan_flr_en,
+ uint32_t filter);
/* Set VLAN Filter Action */
-void rpf_vlan_flr_act_set(struct aq_hw *aq_hw, u32 vlan_filter_act,
- u32 filter);
+void rpf_vlan_flr_act_set(struct aq_hw *aq_hw, uint32_t vlan_filter_act,
+ uint32_t filter);
/* Set VLAN ID Filter */
-void rpf_vlan_id_flr_set(struct aq_hw *aq_hw, u32 vlan_id_flr, u32 filter);
+void rpf_vlan_id_flr_set(struct aq_hw *aq_hw, uint32_t vlan_id_flr,
+ uint32_t filter);
/* set ethertype filter enable */
-void rpf_etht_flr_en_set(struct aq_hw *aq_hw, u32 etht_flr_en, u32 filter);
+void rpf_etht_flr_en_set(struct aq_hw *aq_hw, uint32_t etht_flr_en,
+ uint32_t filter);
/* set ethertype user-priority enable */
void rpf_etht_user_priority_en_set(struct aq_hw *aq_hw,
- u32 etht_user_priority_en, u32 filter);
+ uint32_t etht_user_priority_en, uint32_t filter);
/* set ethertype rx queue enable */
-void rpf_etht_rx_queue_en_set(struct aq_hw *aq_hw, u32 etht_rx_queue_en,
- u32 filter);
+void rpf_etht_rx_queue_en_set(struct aq_hw *aq_hw, uint32_t etht_rx_queue_en,
+ uint32_t filter);
/* set ethertype rx queue */
-void rpf_etht_rx_queue_set(struct aq_hw *aq_hw, u32 etht_rx_queue,
- u32 filter);
+void rpf_etht_rx_queue_set(struct aq_hw *aq_hw, uint32_t etht_rx_queue,
+ uint32_t filter);
/* set ethertype user-priority */
-void rpf_etht_user_priority_set(struct aq_hw *aq_hw, u32 etht_user_priority,
- u32 filter);
+void rpf_etht_user_priority_set(struct aq_hw *aq_hw,
+ uint32_t etht_user_priority, uint32_t filter);
/* set ethertype management queue */
-void rpf_etht_mgt_queue_set(struct aq_hw *aq_hw, u32 etht_mgt_queue,
- u32 filter);
+void rpf_etht_mgt_queue_set(struct aq_hw *aq_hw, uint32_t etht_mgt_queue,
+ uint32_t filter);
/* set ethertype filter action */
-void rpf_etht_flr_act_set(struct aq_hw *aq_hw, u32 etht_flr_act,
- u32 filter);
+void rpf_etht_flr_act_set(struct aq_hw *aq_hw, uint32_t etht_flr_act,
+ uint32_t filter);
/* set ethertype filter */
-void rpf_etht_flr_set(struct aq_hw *aq_hw, u32 etht_flr, u32 filter);
+void rpf_etht_flr_set(struct aq_hw *aq_hw, uint32_t etht_flr, uint32_t filter);
/* set L3/L4 filter enable */
-void hw_atl_rpf_l3_l4_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_l4_enf_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3 IPv6 enable */
-void hw_atl_rpf_l3_v6_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_v6_enf_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3 source address enable */
-void hw_atl_rpf_l3_saf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_saf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3 destination address enable */
-void hw_atl_rpf_l3_daf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_daf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L4 source port enable */
-void hw_atl_rpf_l4_spf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l4_spf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L4 destination port enable */
-void hw_atl_rpf_l4_dpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l4_dpf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L4 protocol enable */
-void hw_atl_rpf_l4_protf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l4_protf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3 ARP filter enable */
-void hw_atl_rpf_l3_arpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_arpf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3/L4 rx queue enable */
-void hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3/L4 management queue */
-void hw_atl_rpf_l3_l4_mng_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_l4_mng_rxqf_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3/L4 filter action */
-void hw_atl_rpf_l3_l4_actf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_l4_actf_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3/L4 rx queue */
-void hw_atl_rpf_l3_l4_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_l4_rxqf_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L4 protocol value */
-void hw_atl_rpf_l4_protf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l4_protf_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L4 source port */
-void hw_atl_rpf_l4_spd_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l4_spd_set(struct aq_hw *aq_hw, uint32_t val, uint32_t filter);
/* set L4 destination port */
-void hw_atl_rpf_l4_dpd_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l4_dpd_set(struct aq_hw *aq_hw, uint32_t val, uint32_t filter);
/* rpo */
/* set ipv4 header checksum offload enable */
void rpo_ipv4header_crc_offload_en_set(struct aq_hw *aq_hw,
- u32 ipv4header_crc_offload_en);
+ uint32_t ipv4header_crc_offload_en);
/* set rx descriptor vlan stripping */
void rpo_rx_desc_vlan_stripping_set(struct aq_hw *aq_hw,
- u32 rx_desc_vlan_stripping,
- u32 descriptor);
+ uint32_t rx_desc_vlan_stripping, uint32_t descriptor);
/* set tcp/udp checksum offload enable */
void rpo_tcp_udp_crc_offload_en_set(struct aq_hw *aq_hw,
- u32 tcp_udp_crc_offload_en);
+ uint32_t tcp_udp_crc_offload_en);
/* Set LRO Patch Optimization Enable. */
void rpo_lro_patch_optimization_en_set(struct aq_hw *aq_hw,
- u32 lro_patch_optimization_en);
+ uint32_t lro_patch_optimization_en);
/* Set Large Receive Offload Enable */
-void rpo_lro_en_set(struct aq_hw *aq_hw, u32 lro_en);
+void rpo_lro_en_set(struct aq_hw *aq_hw, uint32_t lro_en);
/* Set LRO Q Sessions Limit */
-void rpo_lro_qsessions_lim_set(struct aq_hw *aq_hw, u32 lro_qsessions_lim);
+void rpo_lro_qsessions_lim_set(struct aq_hw *aq_hw, uint32_t lro_qsessions_lim);
/* Set LRO Total Descriptor Limit */
-void rpo_lro_total_desc_lim_set(struct aq_hw *aq_hw, u32 lro_total_desc_lim);
+void rpo_lro_total_desc_lim_set(struct aq_hw *aq_hw,
+ uint32_t lro_total_desc_lim);
/* Set LRO Min Payload of First Packet */
void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw *aq_hw,
- u32 lro_min_pld_of_first_pkt);
+ uint32_t lro_min_pld_of_first_pkt);
/* Set LRO Packet Limit */
-void rpo_lro_pkt_lim_set(struct aq_hw *aq_hw, u32 lro_packet_lim);
+void rpo_lro_pkt_lim_set(struct aq_hw *aq_hw, uint32_t lro_packet_lim);
/* Set LRO Max Number of Descriptors */
void rpo_lro_max_num_of_descriptors_set(struct aq_hw *aq_hw,
- u32 lro_max_desc_num, u32 lro);
+ uint32_t lro_max_desc_num, uint32_t lro);
/* Set LRO Time Base Divider */
void rpo_lro_time_base_divider_set(struct aq_hw *aq_hw,
- u32 lro_time_base_divider);
-
+ uint32_t lro_time_base_divider);
/*Set LRO Inactive Interval */
void rpo_lro_inactive_interval_set(struct aq_hw *aq_hw,
- u32 lro_inactive_interval);
+ uint32_t lro_inactive_interval);
/*Set LRO Max Coalescing Interval */
void rpo_lro_max_coalescing_interval_set(struct aq_hw *aq_hw,
- u32 lro_max_coalescing_interval);
+ uint32_t lro_max_coalescing_interval);
/* rx */
/* set rx register reset disable */
-void rx_rx_reg_res_dis_set(struct aq_hw *aq_hw, u32 rx_reg_res_dis);
+void rx_rx_reg_res_dis_set(struct aq_hw *aq_hw, uint32_t rx_reg_res_dis);
/* tdm */
/* set cpu id */
-void tdm_cpu_id_set(struct aq_hw *aq_hw, u32 cpuid, u32 dca);
+void tdm_cpu_id_set(struct aq_hw *aq_hw, uint32_t cpuid, uint32_t dca);
/* set large send offload enable */
void tdm_large_send_offload_en_set(struct aq_hw *aq_hw,
- u32 large_send_offload_en);
+ uint32_t large_send_offload_en);
/* set tx descriptor enable */
-void tdm_tx_desc_en_set(struct aq_hw *aq_hw, u32 tx_desc_en, u32 descriptor);
+void tdm_tx_desc_en_set(struct aq_hw *aq_hw, uint32_t tx_desc_en,
+ uint32_t descriptor);
/* set tx dca enable */
-void tdm_tx_dca_en_set(struct aq_hw *aq_hw, u32 tx_dca_en);
+void tdm_tx_dca_en_set(struct aq_hw *aq_hw, uint32_t tx_dca_en);
/* set tx dca mode */
-void tdm_tx_dca_mode_set(struct aq_hw *aq_hw, u32 tx_dca_mode);
+void tdm_tx_dca_mode_set(struct aq_hw *aq_hw, uint32_t tx_dca_mode);
/* set tx descriptor dca enable */
-void tdm_tx_desc_dca_en_set(struct aq_hw *aq_hw, u32 tx_desc_dca_en, u32 dca);
+void tdm_tx_desc_dca_en_set(struct aq_hw *aq_hw, uint32_t tx_desc_dca_en,
+ uint32_t dca);
/* get tx descriptor head pointer */
-u32 tdm_tx_desc_head_ptr_get(struct aq_hw *aq_hw, u32 descriptor);
+uint32_t tdm_tx_desc_head_ptr_get(struct aq_hw *aq_hw, uint32_t descriptor);
/* set tx descriptor length */
-void tdm_tx_desc_len_set(struct aq_hw *aq_hw, u32 tx_desc_len,
- u32 descriptor);
+void tdm_tx_desc_len_set(struct aq_hw *aq_hw, uint32_t tx_desc_len,
+ uint32_t descriptor);
/* set tx descriptor write-back interrupt enable */
void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw *aq_hw,
- u32 tx_desc_wr_wb_irq_en);
+ uint32_t tx_desc_wr_wb_irq_en);
/* set tx descriptor write-back threshold */
void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw *aq_hw,
- u32 tx_desc_wr_wb_threshold,
- u32 descriptor);
+ uint32_t tx_desc_wr_wb_threshold, uint32_t descriptor);
/* Set TDM Interrupt Moderation Enable */
void tdm_tdm_intr_moder_en_set(struct aq_hw *aq_hw,
- u32 tdm_irq_moderation_en);
+ uint32_t tdm_irq_moderation_en);
/* thm */
/* set lso tcp flag of first packet */
void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw *aq_hw,
- u32 lso_tcp_flag_of_first_pkt);
+ uint32_t lso_tcp_flag_of_first_pkt);
/* set lso tcp flag of last packet */
void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw *aq_hw,
- u32 lso_tcp_flag_of_last_pkt);
+ uint32_t lso_tcp_flag_of_last_pkt);
/* set lso tcp flag of middle packet */
void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw *aq_hw,
- u32 lso_tcp_flag_of_middle_pkt);
+ uint32_t lso_tcp_flag_of_middle_pkt);
/* tpb */
/* set tx buffer enable */
-void tpb_tx_buff_en_set(struct aq_hw *aq_hw, u32 tx_buff_en);
+void tpb_tx_buff_en_set(struct aq_hw *aq_hw, uint32_t tx_buff_en);
/* set tx tc mode */
-void tpb_tx_tc_mode_set(struct aq_hw *aq_hw, u32 tc_mode);
+void tpb_tx_tc_mode_set(struct aq_hw *aq_hw, uint32_t tc_mode);
/* set tx buffer high threshold (per tc) */
void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw *aq_hw,
- u32 tx_buff_hi_threshold_per_tc,
- u32 buffer);
+ uint32_t tx_buff_hi_threshold_per_tc, uint32_t buffer);
/* set tx buffer low threshold (per tc) */
void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw *aq_hw,
- u32 tx_buff_lo_threshold_per_tc,
- u32 buffer);
+ uint32_t tx_buff_lo_threshold_per_tc, uint32_t buffer);
/* set tx dma system loopback enable */
-void tpb_tx_dma_sys_lbk_en_set(struct aq_hw *aq_hw, u32 tx_dma_sys_lbk_en);
+void tpb_tx_dma_sys_lbk_en_set(struct aq_hw *aq_hw, uint32_t tx_dma_sys_lbk_en);
/* set tx packet buffer size (per tc) */
void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw *aq_hw,
- u32 tx_pkt_buff_size_per_tc, u32 buffer);
+ uint32_t tx_pkt_buff_size_per_tc, uint32_t buffer);
/* toggle rdm rx dma descriptor cache init */
void rdm_rx_dma_desc_cache_init_tgl(struct aq_hw *aq_hw);
/* set tx path pad insert enable */
-void tpb_tx_path_scp_ins_en_set(struct aq_hw *aq_hw, u32 tx_path_scp_ins_en);
+void tpb_tx_path_scp_ins_en_set(struct aq_hw *aq_hw,
+ uint32_t tx_path_scp_ins_en);
/* tpo */
/* set ipv4 header checksum offload enable */
void tpo_ipv4header_crc_offload_en_set(struct aq_hw *aq_hw,
- u32 ipv4header_crc_offload_en);
+ uint32_t ipv4header_crc_offload_en);
/* set tcp/udp checksum offload enable */
void tpo_tcp_udp_crc_offload_en_set(struct aq_hw *aq_hw,
- u32 tcp_udp_crc_offload_en);
+ uint32_t tcp_udp_crc_offload_en);
/* set tx pkt system loopback enable */
-void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw *aq_hw, u32 tx_pkt_sys_lbk_en);
+void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw *aq_hw, uint32_t tx_pkt_sys_lbk_en);
/* tps */
/* set tx packet scheduler data arbitration mode */
void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw *aq_hw,
- u32 tx_pkt_shed_data_arb_mode);
+ uint32_t tx_pkt_shed_data_arb_mode);
/* set tx packet scheduler descriptor rate current time reset */
void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw *aq_hw,
- u32 curr_time_res);
+ uint32_t curr_time_res);
/* set tx packet scheduler descriptor rate limit */
void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw *aq_hw,
- u32 tx_pkt_shed_desc_rate_lim);
+ uint32_t tx_pkt_shed_desc_rate_lim);
/* set tx packet scheduler descriptor tc arbitration mode */
void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw *aq_hw,
- u32 tx_pkt_shed_desc_tc_arb_mode);
+ uint32_t tx_pkt_shed_desc_tc_arb_mode);
/* set tx packet scheduler descriptor tc max credit */
void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw *aq_hw,
- u32 tx_pkt_shed_desc_tc_max_credit,
- u32 tc);
+ uint32_t tx_pkt_shed_desc_tc_max_credit, uint32_t tc);
/* set tx packet scheduler descriptor tc weight */
void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw *aq_hw,
- u32 tx_pkt_shed_desc_tc_weight,
- u32 tc);
+ uint32_t tx_pkt_shed_desc_tc_weight, uint32_t tc);
/* set tx packet scheduler descriptor vm arbitration mode */
void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw *aq_hw,
- u32 tx_pkt_shed_desc_vm_arb_mode);
+ uint32_t tx_pkt_shed_desc_vm_arb_mode);
/* set tx packet scheduler tc data max credit */
void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw *aq_hw,
- u32 tx_pkt_shed_tc_data_max_credit,
- u32 tc);
+ uint32_t tx_pkt_shed_tc_data_max_credit, uint32_t tc);
/* set tx packet scheduler tc data weight */
void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw *aq_hw,
- u32 tx_pkt_shed_tc_data_weight,
- u32 tc);
+ uint32_t tx_pkt_shed_tc_data_weight, uint32_t tc);
/* tx */
/* set tx register reset disable */
-void tx_tx_reg_res_dis_set(struct aq_hw *aq_hw, u32 tx_reg_res_dis);
+void tx_tx_reg_res_dis_set(struct aq_hw *aq_hw, uint32_t tx_reg_res_dis);
/* msm */
/* get register access status */
-u32 msm_reg_access_status_get(struct aq_hw *aq_hw);
+uint32_t msm_reg_access_status_get(struct aq_hw *aq_hw);
/* set register address for indirect address */
void msm_reg_addr_for_indirect_addr_set(struct aq_hw *aq_hw,
- u32 reg_addr_for_indirect_addr);
+ uint32_t reg_addr_for_indirect_addr);
/* set register read strobe */
-void msm_reg_rd_strobe_set(struct aq_hw *aq_hw, u32 reg_rd_strobe);
+void msm_reg_rd_strobe_set(struct aq_hw *aq_hw, uint32_t reg_rd_strobe);
/* get register read data */
-u32 msm_reg_rd_data_get(struct aq_hw *aq_hw);
+uint32_t msm_reg_rd_data_get(struct aq_hw *aq_hw);
/* set register write data */
-void msm_reg_wr_data_set(struct aq_hw *aq_hw, u32 reg_wr_data);
+void msm_reg_wr_data_set(struct aq_hw *aq_hw, uint32_t reg_wr_data);
/* set register write strobe */
-void msm_reg_wr_strobe_set(struct aq_hw *aq_hw, u32 reg_wr_strobe);
+void msm_reg_wr_strobe_set(struct aq_hw *aq_hw, uint32_t reg_wr_strobe);
/* pci */
/* set pci register reset disable */
-void pci_pci_reg_res_dis_set(struct aq_hw *aq_hw, u32 pci_reg_res_dis);
+void pci_pci_reg_res_dis_set(struct aq_hw *aq_hw, uint32_t pci_reg_res_dis);
/*
-* \brief Set MIF Power Gating Enable Control
-*/
-void reg_mif_power_gating_enable_control_set(struct aq_hw* hw, u32 value);
+ * \brief Set MIF Power Gating Enable Control
+ */
+void reg_mif_power_gating_enable_control_set(struct aq_hw* hw, uint32_t value);
/*
-* \brief Get MIF Power Gating Enable Control
-* \return MifPowerGatingEnableControl
-*/
-u32 reg_mif_power_gating_enable_control_get(struct aq_hw* hw);
+ * \brief Get MIF Power Gating Enable Control
+ * \return MifPowerGatingEnableControl
+ */
+uint32_t reg_mif_power_gating_enable_control_get(struct aq_hw* hw);
/* get mif up mailbox busy */
-u32 mif_mcp_up_mailbox_busy_get(struct aq_hw *aq_hw);
+uint32_t mif_mcp_up_mailbox_busy_get(struct aq_hw *aq_hw);
/* set mif up mailbox execute operation */
-void mif_mcp_up_mailbox_execute_operation_set(struct aq_hw* hw, u32 value);
+void mif_mcp_up_mailbox_execute_operation_set(struct aq_hw* hw, uint32_t value);
/* get mif uP mailbox address */
-u32 mif_mcp_up_mailbox_addr_get(struct aq_hw *aq_hw);
+uint32_t mif_mcp_up_mailbox_addr_get(struct aq_hw *aq_hw);
/* set mif uP mailbox address */
-void mif_mcp_up_mailbox_addr_set(struct aq_hw *hw, u32 value);
+void mif_mcp_up_mailbox_addr_set(struct aq_hw *hw, uint32_t value);
/* get mif uP mailbox data */
-u32 mif_mcp_up_mailbox_data_get(struct aq_hw *aq_hw);
+uint32_t mif_mcp_up_mailbox_data_get(struct aq_hw *aq_hw);
/* clear ipv4 filter destination address */
-void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw *aq_hw, u8 location);
+void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw *aq_hw, uint8_t location);
/* clear ipv4 filter source address */
-void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw *aq_hw, u8 location);
+void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw *aq_hw, uint8_t location);
/* clear command for filter l3-l4 */
-void hw_atl_rpfl3l4_cmd_clear(struct aq_hw *aq_hw, u8 location);
+void hw_atl_rpfl3l4_cmd_clear(struct aq_hw *aq_hw, uint8_t location);
/* clear ipv6 filter destination address */
-void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw *aq_hw, u8 location);
+void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw *aq_hw, uint8_t location);
/* clear ipv6 filter source address */
-void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw *aq_hw, u8 location);
+void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw *aq_hw, uint8_t location);
/* set ipv4 filter destination address */
-void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw *aq_hw, u8 location,
- u32 ipv4_dest);
+void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw *aq_hw, uint8_t location,
+ uint32_t ipv4_dest);
/* set ipv4 filter source address */
-void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw *aq_hw, u8 location,
- u32 ipv4_src);
+void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw *aq_hw, uint8_t location,
+ uint32_t ipv4_src);
/* set command for filter l3-l4 */
-void hw_atl_rpfl3l4_cmd_set(struct aq_hw *aq_hw, u8 location, u32 cmd);
+void hw_atl_rpfl3l4_cmd_set(struct aq_hw *aq_hw, uint8_t location,
+ uint32_t cmd);
/* set ipv6 filter source address */
-void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw *aq_hw, u8 location,
- u32 *ipv6_src);
+void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw *aq_hw, uint8_t location,
+ uint32_t *ipv6_src);
/* set ipv6 filter destination address */
-void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw *aq_hw, u8 location,
- u32 *ipv6_dest);
+void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw *aq_hw, uint8_t location,
+ uint32_t *ipv6_dest);
/* set vlan inner ethertype */
-void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw *aq_hw, u32 vlan_inner_etht);
+void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw *aq_hw,
+ uint32_t vlan_inner_etht);
/* set vlan outer ethertype */
-void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw *aq_hw, u32 vlan_outer_etht);
+void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw *aq_hw,
+ uint32_t vlan_outer_etht);
/* set vlan promiscuous mode enable */
void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw *aq_hw,
- u32 vlan_prom_mode_en);
+ uint32_t vlan_prom_mode_en);
/* Set VLAN untagged action */
void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw *aq_hw,
- u32 vlan_untagged_act);
+ uint32_t vlan_untagged_act);
/* Set VLAN accept untagged packets */
void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw *aq_hw,
- u32 vlan_acc_untagged_packets);
+ uint32_t vlan_acc_untagged_packets);
/* Set VLAN filter enable */
-void hw_atl_rpf_vlan_flr_en_set(struct aq_hw *aq_hw, u32 vlan_flr_en,
- u32 filter);
+void hw_atl_rpf_vlan_flr_en_set(struct aq_hw *aq_hw, uint32_t vlan_flr_en,
+ uint32_t filter);
/* Set VLAN Filter Action */
-void hw_atl_rpf_vlan_flr_act_set(struct aq_hw *aq_hw, u32 vlan_filter_act,
- u32 filter);
+void hw_atl_rpf_vlan_flr_act_set(struct aq_hw *aq_hw, uint32_t vlan_filter_act,
+ uint32_t filter);
/* Set VLAN ID Filter */
-void hw_atl_rpf_vlan_id_flr_set(struct aq_hw *aq_hw, u32 vlan_id_flr,
- u32 filter);
+void hw_atl_rpf_vlan_id_flr_set(struct aq_hw *aq_hw, uint32_t vlan_id_flr,
+ uint32_t filter);
/* Set VLAN RX queue assignment enable */
-void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw *aq_hw, u32 vlan_rxq_en,
- u32 filter);
+void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw *aq_hw, uint32_t vlan_rxq_en,
+ uint32_t filter);
/* Set VLAN RX queue */
-void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw *aq_hw, u32 vlan_rxq,
- u32 filter);
+void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw *aq_hw, uint32_t vlan_rxq,
+ uint32_t filter);
/* set ethertype filter enable */
-void hw_atl_rpf_etht_flr_en_set(struct aq_hw *aq_hw, u32 etht_flr_en,
- u32 filter);
+void hw_atl_rpf_etht_flr_en_set(struct aq_hw *aq_hw, uint32_t etht_flr_en,
+ uint32_t filter);
/* set ethertype user-priority enable */
void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw *aq_hw,
- u32 etht_user_priority_en,
- u32 filter);
+ uint32_t etht_user_priority_en, uint32_t filter);
/* set ethertype rx queue enable */
void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw *aq_hw,
- u32 etht_rx_queue_en,
- u32 filter);
+ uint32_t etht_rx_queue_en, uint32_t filter);
/* set ethertype rx queue */
-void hw_atl_rpf_etht_rx_queue_set(struct aq_hw *aq_hw, u32 etht_rx_queue,
- u32 filter);
+void hw_atl_rpf_etht_rx_queue_set(struct aq_hw *aq_hw, uint32_t etht_rx_queue,
+ uint32_t filter);
/* set ethertype user-priority */
void hw_atl_rpf_etht_user_priority_set(struct aq_hw *aq_hw,
- u32 etht_user_priority,
- u32 filter);
+ uint32_t etht_user_priority, uint32_t filter);
/* set ethertype management queue */
-void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw *aq_hw, u32 etht_mgt_queue,
- u32 filter);
+void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw *aq_hw, uint32_t etht_mgt_queue,
+ uint32_t filter);
/* set ethertype filter action */
-void hw_atl_rpf_etht_flr_act_set(struct aq_hw *aq_hw, u32 etht_flr_act,
- u32 filter);
+void hw_atl_rpf_etht_flr_act_set(struct aq_hw *aq_hw, uint32_t etht_flr_act,
+ uint32_t filter);
/* set ethertype filter */
-void hw_atl_rpf_etht_flr_set(struct aq_hw *aq_hw, u32 etht_flr, u32 filter);
+void hw_atl_rpf_etht_flr_set(struct aq_hw *aq_hw, uint32_t etht_flr,
+ uint32_t filter);
/* set L3/L4 filter enable */
-void hw_atl_rpf_l3_l4_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_l4_enf_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3 IPv6 enable */
-void hw_atl_rpf_l3_v6_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_v6_enf_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3 source address enable */
-void hw_atl_rpf_l3_saf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_saf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3 destination address enable */
-void hw_atl_rpf_l3_daf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_daf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L4 source port enable */
-void hw_atl_rpf_l4_spf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l4_spf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L4 destination port enable */
-void hw_atl_rpf_l4_dpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l4_dpf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L4 protocol enable */
-void hw_atl_rpf_l4_protf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l4_protf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3 ARP filter enable */
-void hw_atl_rpf_l3_arpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_arpf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3/L4 rx queue enable */
-void hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3/L4 management queue */
-void hw_atl_rpf_l3_l4_mng_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_l4_mng_rxqf_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3/L4 filter action */
-void hw_atl_rpf_l3_l4_actf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_l4_actf_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L3/L4 rx queue */
-void hw_atl_rpf_l3_l4_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l3_l4_rxqf_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L4 protocol value */
-void hw_atl_rpf_l4_protf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l4_protf_set(struct aq_hw *aq_hw, uint32_t val,
+ uint32_t filter);
/* set L4 source port */
-void hw_atl_rpf_l4_spd_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l4_spd_set(struct aq_hw *aq_hw, uint32_t val, uint32_t filter);
/* set L4 destination port */
-void hw_atl_rpf_l4_dpd_set(struct aq_hw *aq_hw, u32 val, u32 filter);
+void hw_atl_rpf_l4_dpd_set(struct aq_hw *aq_hw, uint32_t val, uint32_t filter);
#endif /* HW_ATL_LLH_H */
Index: sys/dev/aq/aq_hw_llh.c
===================================================================
--- sys/dev/aq/aq_hw_llh.c
+++ sys/dev/aq/aq_hw_llh.c
@@ -43,896 +43,956 @@
/* global */
-void reg_glb_fw_image_id1_set(struct aq_hw* hw, u32 value)
+void
+reg_glb_fw_image_id1_set(struct aq_hw* hw, uint32_t value)
{
- AQ_WRITE_REG(hw, glb_fw_image_id1_adr, value);
+ AQ_WRITE_REG(hw, glb_fw_image_id1_adr, value);
}
-u32 reg_glb_fw_image_id1_get(struct aq_hw* hw)
+uint32_t
+reg_glb_fw_image_id1_get(struct aq_hw* hw)
{
- return AQ_READ_REG(hw, glb_fw_image_id1_adr);
+ return AQ_READ_REG(hw, glb_fw_image_id1_adr);
}
-void reg_glb_cpu_sem_set(struct aq_hw *aq_hw, u32 sem_value, u32 sem_index)
+void
+reg_glb_cpu_sem_set(struct aq_hw *aq_hw, uint32_t sem_value, uint32_t sem_index)
{
- AQ_WRITE_REG(aq_hw, glb_cpu_sem_adr(sem_index), sem_value);
+ AQ_WRITE_REG(aq_hw, glb_cpu_sem_adr(sem_index), sem_value);
}
-u32 reg_glb_cpu_sem_get(struct aq_hw *aq_hw, u32 sem_index)
+uint32_t
+reg_glb_cpu_sem_get(struct aq_hw *aq_hw, uint32_t sem_index)
{
- return AQ_READ_REG(aq_hw, glb_cpu_sem_adr(sem_index));
+ return AQ_READ_REG(aq_hw, glb_cpu_sem_adr(sem_index));
}
-u32 reg_glb_standard_ctl1_get(struct aq_hw* hw)
+uint32_t
+reg_glb_standard_ctl1_get(struct aq_hw* hw)
{
- return AQ_READ_REG(hw, glb_standard_ctl1_adr);
+ return AQ_READ_REG(hw, glb_standard_ctl1_adr);
}
-void reg_glb_standard_ctl1_set(struct aq_hw* hw, u32 glb_standard_ctl1)
+void
+reg_glb_standard_ctl1_set(struct aq_hw* hw, uint32_t glb_standard_ctl1)
{
- AQ_WRITE_REG(hw, glb_standard_ctl1_adr, glb_standard_ctl1);
+ AQ_WRITE_REG(hw, glb_standard_ctl1_adr, glb_standard_ctl1);
}
-void reg_global_ctl2_set(struct aq_hw* hw, u32 global_ctl2)
+void
+reg_global_ctl2_set(struct aq_hw* hw, uint32_t global_ctl2)
{
- AQ_WRITE_REG(hw, glb_ctl2_adr, global_ctl2);
+ AQ_WRITE_REG(hw, glb_ctl2_adr, global_ctl2);
}
-u32 reg_global_ctl2_get(struct aq_hw* hw)
+uint32_t
+reg_global_ctl2_get(struct aq_hw* hw)
{
- return AQ_READ_REG(hw, glb_ctl2_adr);
+ return AQ_READ_REG(hw, glb_ctl2_adr);
}
-void reg_glb_daisy_chain_status1_set(struct aq_hw* hw, u32 glb_daisy_chain_status1)
+void
+reg_glb_daisy_chain_status1_set(struct aq_hw* hw, uint32_t glb_daisy_chain_status1)
{
- AQ_WRITE_REG(hw, glb_daisy_chain_status1_adr, glb_daisy_chain_status1);
+ AQ_WRITE_REG(hw, glb_daisy_chain_status1_adr, glb_daisy_chain_status1);
}
-u32 reg_glb_daisy_chain_status1_get(struct aq_hw* hw)
+uint32_t
+reg_glb_daisy_chain_status1_get(struct aq_hw* hw)
{
- return AQ_READ_REG(hw, glb_daisy_chain_status1_adr);
+ return AQ_READ_REG(hw, glb_daisy_chain_status1_adr);
}
-void glb_glb_reg_res_dis_set(struct aq_hw *aq_hw, u32 glb_reg_res_dis)
+void
+glb_glb_reg_res_dis_set(struct aq_hw *aq_hw, uint32_t glb_reg_res_dis)
{
- AQ_WRITE_REG_BIT(aq_hw, glb_reg_res_dis_adr,
- glb_reg_res_dis_msk,
- glb_reg_res_dis_shift,
- glb_reg_res_dis);
+ AQ_WRITE_REG_BIT(aq_hw, glb_reg_res_dis_adr, glb_reg_res_dis_msk,
+ glb_reg_res_dis_shift, glb_reg_res_dis);
}
-void glb_soft_res_set(struct aq_hw *aq_hw, u32 soft_res)
+void
+glb_soft_res_set(struct aq_hw *aq_hw, uint32_t soft_res)
{
- AQ_WRITE_REG_BIT(aq_hw, glb_soft_res_adr, glb_soft_res_msk,
- glb_soft_res_shift, soft_res);
+ AQ_WRITE_REG_BIT(aq_hw, glb_soft_res_adr, glb_soft_res_msk,
+ glb_soft_res_shift, soft_res);
}
-u32 glb_soft_res_get(struct aq_hw *aq_hw)
+uint32_t
+glb_soft_res_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG_BIT(aq_hw, glb_soft_res_adr,
- glb_soft_res_msk,
- glb_soft_res_shift);
+ return AQ_READ_REG_BIT(aq_hw, glb_soft_res_adr, glb_soft_res_msk,
+ glb_soft_res_shift);
}
-u32 reg_rx_dma_stat_counter7get(struct aq_hw *aq_hw)
+uint32_t
+reg_rx_dma_stat_counter7get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG(aq_hw, rx_dma_stat_counter7_adr);
+ return AQ_READ_REG(aq_hw, rx_dma_stat_counter7_adr);
}
-u32 reg_glb_mif_id_get(struct aq_hw *aq_hw)
+uint32_t
+reg_glb_mif_id_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG(aq_hw, glb_mif_id_adr);
+ return AQ_READ_REG(aq_hw, glb_mif_id_adr);
}
-void mpi_tx_reg_res_dis_set(struct aq_hw* hw, u32 mpi_tx_reg_res_dis)
+void
+mpi_tx_reg_res_dis_set(struct aq_hw* hw, uint32_t mpi_tx_reg_res_dis)
{
- AQ_WRITE_REG_BIT(hw, mpi_tx_reg_res_dis_adr,
- mpi_tx_reg_res_dis_msk, mpi_tx_reg_res_dis_shift, mpi_tx_reg_res_dis);
+ AQ_WRITE_REG_BIT(hw, mpi_tx_reg_res_dis_adr, mpi_tx_reg_res_dis_msk,
+ mpi_tx_reg_res_dis_shift, mpi_tx_reg_res_dis);
}
-u32 mpi_tx_reg_res_dis_get(struct aq_hw* hw)
+uint32_t
+mpi_tx_reg_res_dis_get(struct aq_hw* hw)
{
- return AQ_READ_REG_BIT(hw, mpi_tx_reg_res_dis_adr,
- mpi_tx_reg_res_dis_msk, mpi_tx_reg_res_dis_shift);
+ return AQ_READ_REG_BIT(hw, mpi_tx_reg_res_dis_adr,
+ mpi_tx_reg_res_dis_msk, mpi_tx_reg_res_dis_shift);
}
/* stats */
-u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw *aq_hw)
+uint32_t
+rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG(aq_hw, rpb_rx_dma_drop_pkt_cnt_adr);
+ return AQ_READ_REG(aq_hw, rpb_rx_dma_drop_pkt_cnt_adr);
}
-u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw *aq_hw)
+uint32_t
+stats_rx_dma_good_octet_counterlsw_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG(aq_hw, stats_rx_dma_good_octet_counterlsw__adr);
+ return AQ_READ_REG(aq_hw, stats_rx_dma_good_octet_counterlsw__adr);
}
-u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw *aq_hw)
+uint32_t
+stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG(aq_hw, stats_rx_dma_good_pkt_counterlsw__adr);
+ return AQ_READ_REG(aq_hw, stats_rx_dma_good_pkt_counterlsw__adr);
}
-u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw *aq_hw)
+uint32_t
+stats_tx_dma_good_octet_counterlsw_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG(aq_hw, stats_tx_dma_good_octet_counterlsw__adr);
+ return AQ_READ_REG(aq_hw, stats_tx_dma_good_octet_counterlsw__adr);
}
-u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw *aq_hw)
+uint32_t
+stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG(aq_hw, stats_tx_dma_good_pkt_counterlsw__adr);
+ return AQ_READ_REG(aq_hw, stats_tx_dma_good_pkt_counterlsw__adr);
}
-u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw *aq_hw)
+uint32_t
+stats_rx_dma_good_octet_countermsw_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG(aq_hw, stats_rx_dma_good_octet_countermsw__adr);
+ return AQ_READ_REG(aq_hw, stats_rx_dma_good_octet_countermsw__adr);
}
-u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw *aq_hw)
+uint32_t
+stats_rx_dma_good_pkt_countermsw_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG(aq_hw, stats_rx_dma_good_pkt_countermsw__adr);
+ return AQ_READ_REG(aq_hw, stats_rx_dma_good_pkt_countermsw__adr);
}
-u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw *aq_hw)
+uint32_t
+stats_tx_dma_good_octet_countermsw_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG(aq_hw, stats_tx_dma_good_octet_countermsw__adr);
+ return AQ_READ_REG(aq_hw, stats_tx_dma_good_octet_countermsw__adr);
}
-u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw *aq_hw)
+uint32_t
+stats_tx_dma_good_pkt_countermsw_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG(aq_hw, stats_tx_dma_good_pkt_countermsw__adr);
+ return AQ_READ_REG(aq_hw, stats_tx_dma_good_pkt_countermsw__adr);
}
-u32 stats_rx_lro_coalesced_pkt_count0_get(struct aq_hw *aq_hw)
+uint32_t
+stats_rx_lro_coalesced_pkt_count0_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG(aq_hw, stats_rx_lo_coalesced_pkt_count0__addr);
+ return AQ_READ_REG(aq_hw, stats_rx_lo_coalesced_pkt_count0__addr);
}
/* interrupt */
-void itr_irq_auto_masklsw_set(struct aq_hw *aq_hw, u32 irq_auto_masklsw)
+void
+itr_irq_auto_masklsw_set(struct aq_hw *aq_hw, uint32_t irq_auto_masklsw)
{
- AQ_WRITE_REG(aq_hw, itr_iamrlsw_adr, irq_auto_masklsw);
+ AQ_WRITE_REG(aq_hw, itr_iamrlsw_adr, irq_auto_masklsw);
}
-void itr_irq_map_en_rx_set(struct aq_hw *aq_hw, u32 irq_map_en_rx, u32 rx)
+void
+itr_irq_map_en_rx_set(struct aq_hw *aq_hw, uint32_t irq_map_en_rx, uint32_t rx)
{
/* register address for bitfield imr_rx{r}_en */
- static u32 itr_imr_rxren_adr[32] = {
- 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
- 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
- 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
- 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
- 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
- 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
- 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
- 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
- };
+ static uint32_t itr_imr_rxren_adr[32] = {
+ 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+ 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+ 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+ 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+ 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ };
/* bitmask for bitfield imr_rx{r}_en */
- static u32 itr_imr_rxren_msk[32] = {
- 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
- 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
- 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
- 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
- 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
- 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
- 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
- 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U
- };
+ static uint32_t itr_imr_rxren_msk[32] = {
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U
+ };
/* lower bit position of bitfield imr_rx{r}_en */
- static u32 itr_imr_rxren_shift[32] = {
- 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
- 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
- 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
- 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U
- };
+ static uint32_t itr_imr_rxren_shift[32] = {
+ 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
+ 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
+ 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
+ 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U
+ };
- AQ_WRITE_REG_BIT(aq_hw, itr_imr_rxren_adr[rx],
- itr_imr_rxren_msk[rx],
- itr_imr_rxren_shift[rx],
- irq_map_en_rx);
+ AQ_WRITE_REG_BIT(aq_hw, itr_imr_rxren_adr[rx], itr_imr_rxren_msk[rx],
+ itr_imr_rxren_shift[rx], irq_map_en_rx);
}
-void itr_irq_map_en_tx_set(struct aq_hw *aq_hw, u32 irq_map_en_tx, u32 tx)
+void
+itr_irq_map_en_tx_set(struct aq_hw *aq_hw, uint32_t irq_map_en_tx, uint32_t tx)
{
/* register address for bitfield imr_tx{t}_en */
- static u32 itr_imr_txten_adr[32] = {
- 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
- 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
- 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
- 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
- 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
- 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
- 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
- 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
- };
+ static uint32_t itr_imr_txten_adr[32] = {
+ 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+ 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+ 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+ 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+ 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ };
/* bitmask for bitfield imr_tx{t}_en */
- static u32 itr_imr_txten_msk[32] = {
- 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
- 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
- 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
- 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
- 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
- 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
- 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
- 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U
- };
+ static uint32_t itr_imr_txten_msk[32] = {
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U
+ };
/* lower bit position of bitfield imr_tx{t}_en */
- static u32 itr_imr_txten_shift[32] = {
- 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
- 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
- 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
- 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U
- };
+ static uint32_t itr_imr_txten_shift[32] = {
+ 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
+ 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
+ 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
+ 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U
+ };
- AQ_WRITE_REG_BIT(aq_hw, itr_imr_txten_adr[tx],
- itr_imr_txten_msk[tx],
- itr_imr_txten_shift[tx],
- irq_map_en_tx);
+ AQ_WRITE_REG_BIT(aq_hw, itr_imr_txten_adr[tx], itr_imr_txten_msk[tx],
+ itr_imr_txten_shift[tx], irq_map_en_tx);
}
-void itr_irq_map_rx_set(struct aq_hw *aq_hw, u32 irq_map_rx, u32 rx)
+void
+itr_irq_map_rx_set(struct aq_hw *aq_hw, uint32_t irq_map_rx, uint32_t rx)
{
/* register address for bitfield imr_rx{r}[4:0] */
- static u32 itr_imr_rxr_adr[32] = {
- 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
- 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
- 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
- 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
- 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
- 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
- 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
- 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
- };
+ static uint32_t itr_imr_rxr_adr[32] = {
+ 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+ 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+ 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+ 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+ 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ };
/* bitmask for bitfield imr_rx{r}[4:0] */
- static u32 itr_imr_rxr_msk[32] = {
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU
- };
+ static uint32_t itr_imr_rxr_msk[32] = {
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU
+ };
/* lower bit position of bitfield imr_rx{r}[4:0] */
- static u32 itr_imr_rxr_shift[32] = {
- 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
- 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
- 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
- 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U
- };
+ static uint32_t itr_imr_rxr_shift[32] = {
+ 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
+ 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
+ 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
+ 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U
+ };
- AQ_WRITE_REG_BIT(aq_hw, itr_imr_rxr_adr[rx],
- itr_imr_rxr_msk[rx],
- itr_imr_rxr_shift[rx],
- irq_map_rx);
+ AQ_WRITE_REG_BIT(aq_hw, itr_imr_rxr_adr[rx], itr_imr_rxr_msk[rx],
+ itr_imr_rxr_shift[rx], irq_map_rx);
}
-void itr_irq_map_tx_set(struct aq_hw *aq_hw, u32 irq_map_tx, u32 tx)
+void
+itr_irq_map_tx_set(struct aq_hw *aq_hw, uint32_t irq_map_tx, uint32_t tx)
{
/* register address for bitfield imr_tx{t}[4:0] */
- static u32 itr_imr_txt_adr[32] = {
- 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
- 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
- 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
- 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
- 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
- 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
- 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
- 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
- };
+ static uint32_t itr_imr_txt_adr[32] = {
+ 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+ 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+ 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+ 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+ 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ };
/* bitmask for bitfield imr_tx{t}[4:0] */
- static u32 itr_imr_txt_msk[32] = {
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U
- };
+ static uint32_t itr_imr_txt_msk[32] = {
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U
+ };
/* lower bit position of bitfield imr_tx{t}[4:0] */
- static u32 itr_imr_txt_shift[32] = {
- 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
- 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
- 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
- 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U
- };
+ static uint32_t itr_imr_txt_shift[32] = {
+ 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
+ 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
+ 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
+ 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U
+ };
- AQ_WRITE_REG_BIT(aq_hw, itr_imr_txt_adr[tx],
- itr_imr_txt_msk[tx],
- itr_imr_txt_shift[tx],
- irq_map_tx);
+ AQ_WRITE_REG_BIT(aq_hw, itr_imr_txt_adr[tx], itr_imr_txt_msk[tx],
+ itr_imr_txt_shift[tx], irq_map_tx);
}
-void itr_irq_msk_clearlsw_set(struct aq_hw *aq_hw, u32 irq_msk_clearlsw)
+void
+itr_irq_msk_clearlsw_set(struct aq_hw *aq_hw, uint32_t irq_msk_clearlsw)
{
- AQ_WRITE_REG(aq_hw, itr_imcrlsw_adr, irq_msk_clearlsw);
+ AQ_WRITE_REG(aq_hw, itr_imcrlsw_adr, irq_msk_clearlsw);
}
-void itr_irq_msk_setlsw_set(struct aq_hw *aq_hw, u32 irq_msk_setlsw)
+void
+itr_irq_msk_setlsw_set(struct aq_hw *aq_hw, uint32_t irq_msk_setlsw)
{
- AQ_WRITE_REG(aq_hw, itr_imsrlsw_adr, irq_msk_setlsw);
+ AQ_WRITE_REG(aq_hw, itr_imsrlsw_adr, irq_msk_setlsw);
}
-void itr_irq_reg_res_dis_set(struct aq_hw *aq_hw, u32 irq_reg_res_dis)
+void
+itr_irq_reg_res_dis_set(struct aq_hw *aq_hw, uint32_t irq_reg_res_dis)
{
- AQ_WRITE_REG_BIT(aq_hw, itr_reg_res_dsbl_adr,
- itr_reg_res_dsbl_msk,
- itr_reg_res_dsbl_shift, irq_reg_res_dis);
+ AQ_WRITE_REG_BIT(aq_hw, itr_reg_res_dsbl_adr, itr_reg_res_dsbl_msk,
+ itr_reg_res_dsbl_shift, irq_reg_res_dis);
}
-void itr_irq_status_clearlsw_set(struct aq_hw *aq_hw,
- u32 irq_status_clearlsw)
+void
+itr_irq_status_clearlsw_set(struct aq_hw *aq_hw, uint32_t irq_status_clearlsw)
{
- AQ_WRITE_REG(aq_hw, itr_iscrlsw_adr, irq_status_clearlsw);
+ AQ_WRITE_REG(aq_hw, itr_iscrlsw_adr, irq_status_clearlsw);
}
-u32 itr_irq_statuslsw_get(struct aq_hw *aq_hw)
+uint32_t
+itr_irq_statuslsw_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG(aq_hw, itr_isrlsw_adr);
+ return AQ_READ_REG(aq_hw, itr_isrlsw_adr);
}
-u32 itr_res_irq_get(struct aq_hw *aq_hw)
+uint32_t
+itr_res_irq_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG_BIT(aq_hw, itr_res_adr, itr_res_msk,
- itr_res_shift);
+ return AQ_READ_REG_BIT(aq_hw, itr_res_adr, itr_res_msk, itr_res_shift);
}
-void itr_res_irq_set(struct aq_hw *aq_hw, u32 res_irq)
+void
+itr_res_irq_set(struct aq_hw *aq_hw, uint32_t res_irq)
{
- AQ_WRITE_REG_BIT(aq_hw, itr_res_adr, itr_res_msk,
- itr_res_shift, res_irq);
+ AQ_WRITE_REG_BIT(aq_hw, itr_res_adr, itr_res_msk, itr_res_shift,
+ res_irq);
}
-void itr_link_int_map_en_set(struct aq_hw *aq_hw, u32 link_int_en_map_en)
+void
+itr_link_int_map_en_set(struct aq_hw *aq_hw, uint32_t link_int_en_map_en)
{
- AQ_WRITE_REG_BIT(aq_hw, itrImrLinkEn_ADR, itrImrLinkEn_MSK, itrImrLinkEn_SHIFT, link_int_en_map_en);
+ AQ_WRITE_REG_BIT(aq_hw, itrImrLinkEn_ADR, itrImrLinkEn_MSK,
+ itrImrLinkEn_SHIFT, link_int_en_map_en);
}
-u32 itr_link_int_map_en_get(struct aq_hw *aq_hw)
+uint32_t
+itr_link_int_map_en_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG_BIT(aq_hw, itrImrLinkEn_ADR, itrImrLinkEn_MSK, itrImrLinkEn_SHIFT);
+ return AQ_READ_REG_BIT(aq_hw, itrImrLinkEn_ADR, itrImrLinkEn_MSK,
+ itrImrLinkEn_SHIFT);
}
-void itr_link_int_map_set(struct aq_hw *aq_hw, u32 link_int_map)
+void
+itr_link_int_map_set(struct aq_hw *aq_hw, uint32_t link_int_map)
{
- AQ_WRITE_REG_BIT(aq_hw, itrImrLink_ADR, itrImrLink_MSK, itrImrLink_SHIFT, link_int_map);
+ AQ_WRITE_REG_BIT(aq_hw, itrImrLink_ADR, itrImrLink_MSK,
+ itrImrLink_SHIFT, link_int_map);
}
-u32 itr_link_int_map_get(struct aq_hw *aq_hw)
+uint32_t
+itr_link_int_map_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG_BIT(aq_hw, itrImrLink_ADR, itrImrLink_MSK, itrImrLink_SHIFT);
+ return AQ_READ_REG_BIT(aq_hw, itrImrLink_ADR, itrImrLink_MSK,
+ itrImrLink_SHIFT);
}
-void itr_mif_int_map_en_set(struct aq_hw *aq_hw, u32 mifInterruptMappingEnable, u32 mif)
+void
+itr_mif_int_map_en_set(struct aq_hw *aq_hw, uint32_t mifInterruptMappingEnable,
+ uint32_t mif)
{
- AQ_WRITE_REG_BIT(aq_hw, itrImrMifMEn_ADR(mif), itrImrMifMEn_MSK(mif), itrImrMifMEn_SHIFT(mif), mifInterruptMappingEnable);
+ AQ_WRITE_REG_BIT(aq_hw, itrImrMifMEn_ADR(mif), itrImrMifMEn_MSK(mif),
+ itrImrMifMEn_SHIFT(mif), mifInterruptMappingEnable);
}
-u32 itr_mif_int_map_en_get(struct aq_hw *aq_hw, u32 mif)
+uint32_t
+itr_mif_int_map_en_get(struct aq_hw *aq_hw, uint32_t mif)
{
- return AQ_READ_REG_BIT(aq_hw, itrImrMifMEn_ADR(mif), itrImrMifMEn_MSK(mif), itrImrMifMEn_SHIFT(mif));
+ return AQ_READ_REG_BIT(aq_hw, itrImrMifMEn_ADR(mif),
+ itrImrMifMEn_MSK(mif), itrImrMifMEn_SHIFT(mif));
}
-void itr_mif_int_map_set(struct aq_hw *aq_hw, u32 mifInterruptMapping, u32 mif)
+void
+itr_mif_int_map_set(struct aq_hw *aq_hw, uint32_t mifInterruptMapping, uint32_t mif)
{
- AQ_WRITE_REG_BIT(aq_hw, itrImrMifM_ADR(mif), itrImrMifM_MSK(mif), itrImrMifM_SHIFT(mif), mifInterruptMapping);
+ AQ_WRITE_REG_BIT(aq_hw, itrImrMifM_ADR(mif), itrImrMifM_MSK(mif),
+ itrImrMifM_SHIFT(mif), mifInterruptMapping);
}
-u32 itr_mif_int_map_get(struct aq_hw *aq_hw, u32 mif)
+uint32_t
+itr_mif_int_map_get(struct aq_hw *aq_hw, uint32_t mif)
{
- return AQ_READ_REG_BIT(aq_hw, itrImrMifM_ADR(mif), itrImrMifM_MSK(mif), itrImrMifM_SHIFT(mif));
-}
+ return AQ_READ_REG_BIT(aq_hw, itrImrMifM_ADR(mif), itrImrMifM_MSK(mif),
+ itrImrMifM_SHIFT(mif));
+}
-void itr_irq_mode_set(struct aq_hw *aq_hw, u32 irq_mode)
+void
+itr_irq_mode_set(struct aq_hw *aq_hw, uint32_t irq_mode)
{
- AQ_WRITE_REG_BIT(aq_hw, itrIntMode_ADR, itrIntMode_MSK, itrIntMode_SHIFT, irq_mode);
+ AQ_WRITE_REG_BIT(aq_hw, itrIntMode_ADR, itrIntMode_MSK,
+ itrIntMode_SHIFT, irq_mode);
}
-void itr_irq_status_cor_en_set(struct aq_hw *aq_hw, u32 irq_status_cor_en)
+void
+itr_irq_status_cor_en_set(struct aq_hw *aq_hw, uint32_t irq_status_cor_en)
{
- AQ_WRITE_REG_BIT(aq_hw, itrIsrCorEn_ADR, itrIsrCorEn_MSK, itrIsrCorEn_SHIFT, irq_status_cor_en);
+ AQ_WRITE_REG_BIT(aq_hw, itrIsrCorEn_ADR, itrIsrCorEn_MSK,
+ itrIsrCorEn_SHIFT, irq_status_cor_en);
}
-void itr_irq_auto_mask_clr_en_set(struct aq_hw *aq_hw, u32 irq_auto_mask_clr_en)
+void
+itr_irq_auto_mask_clr_en_set(struct aq_hw *aq_hw, uint32_t irq_auto_mask_clr_en)
{
- AQ_WRITE_REG_BIT(aq_hw, itrIamrClrEn_ADR, itrIamrClrEn_MSK, itrIamrClrEn_SHIFT, irq_auto_mask_clr_en);
+ AQ_WRITE_REG_BIT(aq_hw, itrIamrClrEn_ADR, itrIamrClrEn_MSK,
+ itrIamrClrEn_SHIFT, irq_auto_mask_clr_en);
}
/* rdm */
-void rdm_cpu_id_set(struct aq_hw *aq_hw, u32 cpuid, u32 dca)
+void
+rdm_cpu_id_set(struct aq_hw *aq_hw, uint32_t cpuid, uint32_t dca)
{
- AQ_WRITE_REG_BIT(aq_hw, rdm_dcadcpuid_adr(dca),
- rdm_dcadcpuid_msk,
- rdm_dcadcpuid_shift, cpuid);
+ AQ_WRITE_REG_BIT(aq_hw, rdm_dcadcpuid_adr(dca), rdm_dcadcpuid_msk,
+ rdm_dcadcpuid_shift, cpuid);
}
-void rdm_rx_dca_en_set(struct aq_hw *aq_hw, u32 rx_dca_en)
+void
+rdm_rx_dca_en_set(struct aq_hw *aq_hw, uint32_t rx_dca_en)
{
- AQ_WRITE_REG_BIT(aq_hw, rdm_dca_en_adr, rdm_dca_en_msk,
- rdm_dca_en_shift, rx_dca_en);
+ AQ_WRITE_REG_BIT(aq_hw, rdm_dca_en_adr, rdm_dca_en_msk,
+ rdm_dca_en_shift, rx_dca_en);
}
-void rdm_rx_dca_mode_set(struct aq_hw *aq_hw, u32 rx_dca_mode)
+void
+rdm_rx_dca_mode_set(struct aq_hw *aq_hw, uint32_t rx_dca_mode)
{
- AQ_WRITE_REG_BIT(aq_hw, rdm_dca_mode_adr, rdm_dca_mode_msk,
- rdm_dca_mode_shift, rx_dca_mode);
+ AQ_WRITE_REG_BIT(aq_hw, rdm_dca_mode_adr, rdm_dca_mode_msk,
+ rdm_dca_mode_shift, rx_dca_mode);
}
-void rdm_rx_desc_data_buff_size_set(struct aq_hw *aq_hw,
- u32 rx_desc_data_buff_size, u32 descriptor)
+void
+rdm_rx_desc_data_buff_size_set(struct aq_hw *aq_hw, uint32_t rx_desc_data_buff_size,
+ uint32_t descriptor)
{
- AQ_WRITE_REG_BIT(aq_hw, rdm_descddata_size_adr(descriptor),
- rdm_descddata_size_msk,
- rdm_descddata_size_shift,
- rx_desc_data_buff_size);
+ AQ_WRITE_REG_BIT(aq_hw, rdm_descddata_size_adr(descriptor),
+ rdm_descddata_size_msk, rdm_descddata_size_shift,
+ rx_desc_data_buff_size);
}
-void rdm_rx_desc_dca_en_set(struct aq_hw *aq_hw, u32 rx_desc_dca_en, u32 dca)
+void
+rdm_rx_desc_dca_en_set(struct aq_hw *aq_hw, uint32_t rx_desc_dca_en, uint32_t dca)
{
- AQ_WRITE_REG_BIT(aq_hw, rdm_dcaddesc_en_adr(dca),
- rdm_dcaddesc_en_msk,
- rdm_dcaddesc_en_shift,
- rx_desc_dca_en);
+ AQ_WRITE_REG_BIT(aq_hw, rdm_dcaddesc_en_adr(dca), rdm_dcaddesc_en_msk,
+ rdm_dcaddesc_en_shift, rx_desc_dca_en);
}
-void rdm_rx_desc_en_set(struct aq_hw *aq_hw, u32 rx_desc_en, u32 descriptor)
+void
+rdm_rx_desc_en_set(struct aq_hw *aq_hw, uint32_t rx_desc_en, uint32_t descriptor)
{
- AQ_WRITE_REG_BIT(aq_hw, rdm_descden_adr(descriptor),
- rdm_descden_msk,
- rdm_descden_shift,
- rx_desc_en);
+ AQ_WRITE_REG_BIT(aq_hw, rdm_descden_adr(descriptor), rdm_descden_msk,
+ rdm_descden_shift, rx_desc_en);
}
-void rdm_rx_desc_head_buff_size_set(struct aq_hw *aq_hw,
- u32 rx_desc_head_buff_size, u32 descriptor)
+void
+rdm_rx_desc_head_buff_size_set(struct aq_hw *aq_hw,
+ uint32_t rx_desc_head_buff_size, uint32_t descriptor)
{
- AQ_WRITE_REG_BIT(aq_hw, rdm_descdhdr_size_adr(descriptor),
- rdm_descdhdr_size_msk,
- rdm_descdhdr_size_shift,
- rx_desc_head_buff_size);
+ AQ_WRITE_REG_BIT(aq_hw, rdm_descdhdr_size_adr(descriptor),
+ rdm_descdhdr_size_msk, rdm_descdhdr_size_shift,
+ rx_desc_head_buff_size);
}
-void rdm_rx_desc_head_splitting_set(struct aq_hw *aq_hw,
- u32 rx_desc_head_splitting, u32 descriptor)
+void
+rdm_rx_desc_head_splitting_set(struct aq_hw *aq_hw,
+ uint32_t rx_desc_head_splitting, uint32_t descriptor)
{
- AQ_WRITE_REG_BIT(aq_hw, rdm_descdhdr_split_adr(descriptor),
- rdm_descdhdr_split_msk,
- rdm_descdhdr_split_shift,
- rx_desc_head_splitting);
+ AQ_WRITE_REG_BIT(aq_hw, rdm_descdhdr_split_adr(descriptor),
+ rdm_descdhdr_split_msk, rdm_descdhdr_split_shift,
+ rx_desc_head_splitting);
}
-u32 rdm_rx_desc_head_ptr_get(struct aq_hw *aq_hw, u32 descriptor)
+uint32_t
+rdm_rx_desc_head_ptr_get(struct aq_hw *aq_hw, uint32_t descriptor)
{
- return AQ_READ_REG_BIT(aq_hw, rdm_descdhd_adr(descriptor),
- rdm_descdhd_msk, rdm_descdhd_shift);
+ return AQ_READ_REG_BIT(aq_hw, rdm_descdhd_adr(descriptor),
+ rdm_descdhd_msk, rdm_descdhd_shift);
}
-void rdm_rx_desc_len_set(struct aq_hw *aq_hw, u32 rx_desc_len, u32 descriptor)
+void
+rdm_rx_desc_len_set(struct aq_hw *aq_hw, uint32_t rx_desc_len, uint32_t descriptor)
{
- AQ_WRITE_REG_BIT(aq_hw, rdm_descdlen_adr(descriptor),
- rdm_descdlen_msk, rdm_descdlen_shift,
- rx_desc_len);
+ AQ_WRITE_REG_BIT(aq_hw, rdm_descdlen_adr(descriptor), rdm_descdlen_msk,
+ rdm_descdlen_shift, rx_desc_len);
}
-void rdm_rx_desc_res_set(struct aq_hw *aq_hw, u32 rx_desc_res, u32 descriptor)
+void
+rdm_rx_desc_res_set(struct aq_hw *aq_hw, uint32_t rx_desc_res, uint32_t descriptor)
{
- AQ_WRITE_REG_BIT(aq_hw, rdm_descdreset_adr(descriptor),
- rdm_descdreset_msk, rdm_descdreset_shift,
- rx_desc_res);
+ AQ_WRITE_REG_BIT(aq_hw, rdm_descdreset_adr(descriptor),
+ rdm_descdreset_msk, rdm_descdreset_shift, rx_desc_res);
}
-void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw *aq_hw,
- u32 rx_desc_wr_wb_irq_en)
+void
+rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw *aq_hw,
+ uint32_t rx_desc_wr_wb_irq_en)
{
- AQ_WRITE_REG_BIT(aq_hw, rdm_int_desc_wrb_en_adr,
- rdm_int_desc_wrb_en_msk,
- rdm_int_desc_wrb_en_shift,
- rx_desc_wr_wb_irq_en);
+ AQ_WRITE_REG_BIT(aq_hw, rdm_int_desc_wrb_en_adr,
+ rdm_int_desc_wrb_en_msk, rdm_int_desc_wrb_en_shift,
+ rx_desc_wr_wb_irq_en);
}
-void rdm_rx_head_dca_en_set(struct aq_hw *aq_hw, u32 rx_head_dca_en, u32 dca)
+void
+rdm_rx_head_dca_en_set(struct aq_hw *aq_hw, uint32_t rx_head_dca_en, uint32_t dca)
{
- AQ_WRITE_REG_BIT(aq_hw, rdm_dcadhdr_en_adr(dca),
- rdm_dcadhdr_en_msk,
- rdm_dcadhdr_en_shift,
- rx_head_dca_en);
+ AQ_WRITE_REG_BIT(aq_hw, rdm_dcadhdr_en_adr(dca), rdm_dcadhdr_en_msk,
+ rdm_dcadhdr_en_shift, rx_head_dca_en);
}
-void rdm_rx_pld_dca_en_set(struct aq_hw *aq_hw, u32 rx_pld_dca_en, u32 dca)
+void
+rdm_rx_pld_dca_en_set(struct aq_hw *aq_hw, uint32_t rx_pld_dca_en, uint32_t dca)
{
- AQ_WRITE_REG_BIT(aq_hw, rdm_dcadpay_en_adr(dca),
- rdm_dcadpay_en_msk, rdm_dcadpay_en_shift,
- rx_pld_dca_en);
+ AQ_WRITE_REG_BIT(aq_hw, rdm_dcadpay_en_adr(dca), rdm_dcadpay_en_msk,
+ rdm_dcadpay_en_shift, rx_pld_dca_en);
}
-void rdm_rdm_intr_moder_en_set(struct aq_hw *aq_hw, u32 rdm_intr_moder_en)
+void
+rdm_rdm_intr_moder_en_set(struct aq_hw *aq_hw, uint32_t rdm_intr_moder_en)
{
- AQ_WRITE_REG_BIT(aq_hw, rdm_int_rim_en_adr,
- rdm_int_rim_en_msk,
- rdm_int_rim_en_shift,
- rdm_intr_moder_en);
+ AQ_WRITE_REG_BIT(aq_hw, rdm_int_rim_en_adr, rdm_int_rim_en_msk,
+ rdm_int_rim_en_shift, rdm_intr_moder_en);
}
/* reg */
-void reg_gen_irq_map_set(struct aq_hw *aq_hw, u32 gen_intr_map, u32 regidx)
+void
+reg_gen_irq_map_set(struct aq_hw *aq_hw, uint32_t gen_intr_map, uint32_t regidx)
{
- AQ_WRITE_REG(aq_hw, gen_intr_map_adr(regidx), gen_intr_map);
+ AQ_WRITE_REG(aq_hw, gen_intr_map_adr(regidx), gen_intr_map);
}
-u32 reg_gen_irq_status_get(struct aq_hw *aq_hw)
+uint32_t
+reg_gen_irq_status_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG(aq_hw, gen_intr_stat_adr);
+ return AQ_READ_REG(aq_hw, gen_intr_stat_adr);
}
-void reg_irq_glb_ctl_set(struct aq_hw *aq_hw, u32 intr_glb_ctl)
+void
+reg_irq_glb_ctl_set(struct aq_hw *aq_hw, uint32_t intr_glb_ctl)
{
- AQ_WRITE_REG(aq_hw, intr_glb_ctl_adr, intr_glb_ctl);
+ AQ_WRITE_REG(aq_hw, intr_glb_ctl_adr, intr_glb_ctl);
}
-void reg_irq_thr_set(struct aq_hw *aq_hw, u32 intr_thr, u32 throttle)
+void
+reg_irq_thr_set(struct aq_hw *aq_hw, uint32_t intr_thr, uint32_t throttle)
{
- AQ_WRITE_REG(aq_hw, intr_thr_adr(throttle), intr_thr);
+ AQ_WRITE_REG(aq_hw, intr_thr_adr(throttle), intr_thr);
}
-void reg_rx_dma_desc_base_addresslswset(struct aq_hw *aq_hw,
- u32 rx_dma_desc_base_addrlsw,
- u32 descriptor)
+void
+reg_rx_dma_desc_base_addresslswset(struct aq_hw *aq_hw,
+ uint32_t rx_dma_desc_base_addrlsw, uint32_t descriptor)
{
- AQ_WRITE_REG(aq_hw, rx_dma_desc_base_addrlsw_adr(descriptor),
- rx_dma_desc_base_addrlsw);
+ AQ_WRITE_REG(aq_hw, rx_dma_desc_base_addrlsw_adr(descriptor),
+ rx_dma_desc_base_addrlsw);
}
-void reg_rx_dma_desc_base_addressmswset(struct aq_hw *aq_hw,
- u32 rx_dma_desc_base_addrmsw,
- u32 descriptor)
+void
+reg_rx_dma_desc_base_addressmswset(struct aq_hw *aq_hw,
+ uint32_t rx_dma_desc_base_addrmsw, uint32_t descriptor)
{
- AQ_WRITE_REG(aq_hw, rx_dma_desc_base_addrmsw_adr(descriptor),
- rx_dma_desc_base_addrmsw);
+ AQ_WRITE_REG(aq_hw, rx_dma_desc_base_addrmsw_adr(descriptor),
+ rx_dma_desc_base_addrmsw);
}
-u32 reg_rx_dma_desc_status_get(struct aq_hw *aq_hw, u32 descriptor)
+uint32_t
+reg_rx_dma_desc_status_get(struct aq_hw *aq_hw, uint32_t descriptor)
{
- return AQ_READ_REG(aq_hw, rx_dma_desc_stat_adr(descriptor));
+ return AQ_READ_REG(aq_hw, rx_dma_desc_stat_adr(descriptor));
}
-void reg_rx_dma_desc_tail_ptr_set(struct aq_hw *aq_hw,
- u32 rx_dma_desc_tail_ptr, u32 descriptor)
+void
+reg_rx_dma_desc_tail_ptr_set(struct aq_hw *aq_hw,
+ uint32_t rx_dma_desc_tail_ptr, uint32_t descriptor)
{
- AQ_WRITE_REG(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor),
- rx_dma_desc_tail_ptr);
+ AQ_WRITE_REG(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor),
+ rx_dma_desc_tail_ptr);
}
-u32 reg_rx_dma_desc_tail_ptr_get(struct aq_hw *aq_hw, u32 descriptor)
+uint32_t
+reg_rx_dma_desc_tail_ptr_get(struct aq_hw *aq_hw, uint32_t descriptor)
{
- return AQ_READ_REG(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor));
+ return AQ_READ_REG(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor));
}
-void reg_rx_flr_mcst_flr_msk_set(struct aq_hw *aq_hw, u32 rx_flr_mcst_flr_msk)
+void
+reg_rx_flr_mcst_flr_msk_set(struct aq_hw *aq_hw, uint32_t rx_flr_mcst_flr_msk)
{
- AQ_WRITE_REG(aq_hw, rx_flr_mcst_flr_msk_adr, rx_flr_mcst_flr_msk);
+ AQ_WRITE_REG(aq_hw, rx_flr_mcst_flr_msk_adr, rx_flr_mcst_flr_msk);
}
-void reg_rx_flr_mcst_flr_set(struct aq_hw *aq_hw, u32 rx_flr_mcst_flr,
- u32 filter)
+void
+reg_rx_flr_mcst_flr_set(struct aq_hw *aq_hw, uint32_t rx_flr_mcst_flr,
+ uint32_t filter)
{
- AQ_WRITE_REG(aq_hw, rx_flr_mcst_flr_adr(filter), rx_flr_mcst_flr);
+ AQ_WRITE_REG(aq_hw, rx_flr_mcst_flr_adr(filter), rx_flr_mcst_flr);
}
-void reg_rx_flr_rss_control1set(struct aq_hw *aq_hw, u32 rx_flr_rss_control1)
+void
+reg_rx_flr_rss_control1set(struct aq_hw *aq_hw, uint32_t rx_flr_rss_control1)
{
- AQ_WRITE_REG(aq_hw, rx_flr_rss_control1_adr, rx_flr_rss_control1);
+ AQ_WRITE_REG(aq_hw, rx_flr_rss_control1_adr, rx_flr_rss_control1);
}
-void reg_rx_flr_control2_set(struct aq_hw *aq_hw, u32 rx_filter_control2)
+void
+reg_rx_flr_control2_set(struct aq_hw *aq_hw, uint32_t rx_filter_control2)
{
- AQ_WRITE_REG(aq_hw, rx_flr_control2_adr, rx_filter_control2);
+ AQ_WRITE_REG(aq_hw, rx_flr_control2_adr, rx_filter_control2);
}
-void reg_rx_intr_moder_ctrl_set(struct aq_hw *aq_hw,
- u32 rx_intr_moderation_ctl,
- u32 queue)
+void
+reg_rx_intr_moder_ctrl_set(struct aq_hw *aq_hw,
+ uint32_t rx_intr_moderation_ctl, uint32_t queue)
{
- AQ_WRITE_REG(aq_hw, rx_intr_moderation_ctl_adr(queue),
- rx_intr_moderation_ctl);
+ AQ_WRITE_REG(aq_hw, rx_intr_moderation_ctl_adr(queue),
+ rx_intr_moderation_ctl);
}
-void reg_tx_dma_debug_ctl_set(struct aq_hw *aq_hw, u32 tx_dma_debug_ctl)
+void
+reg_tx_dma_debug_ctl_set(struct aq_hw *aq_hw, uint32_t tx_dma_debug_ctl)
{
- AQ_WRITE_REG(aq_hw, tx_dma_debug_ctl_adr, tx_dma_debug_ctl);
+ AQ_WRITE_REG(aq_hw, tx_dma_debug_ctl_adr, tx_dma_debug_ctl);
}
-void reg_tx_dma_desc_base_addresslswset(struct aq_hw *aq_hw,
- u32 tx_dma_desc_base_addrlsw,
- u32 descriptor)
+void
+reg_tx_dma_desc_base_addresslswset(struct aq_hw *aq_hw,
+ uint32_t tx_dma_desc_base_addrlsw, uint32_t descriptor)
{
- AQ_WRITE_REG(aq_hw, tx_dma_desc_base_addrlsw_adr(descriptor),
- tx_dma_desc_base_addrlsw);
+ AQ_WRITE_REG(aq_hw, tx_dma_desc_base_addrlsw_adr(descriptor),
+ tx_dma_desc_base_addrlsw);
}
-void reg_tx_dma_desc_base_addressmswset(struct aq_hw *aq_hw,
- u32 tx_dma_desc_base_addrmsw,
- u32 descriptor)
+void
+reg_tx_dma_desc_base_addressmswset(struct aq_hw *aq_hw,
+ uint32_t tx_dma_desc_base_addrmsw, uint32_t descriptor)
{
- AQ_WRITE_REG(aq_hw, tx_dma_desc_base_addrmsw_adr(descriptor),
- tx_dma_desc_base_addrmsw);
+ AQ_WRITE_REG(aq_hw, tx_dma_desc_base_addrmsw_adr(descriptor),
+ tx_dma_desc_base_addrmsw);
}
-void reg_tx_dma_desc_tail_ptr_set(struct aq_hw *aq_hw,
- u32 tx_dma_desc_tail_ptr, u32 descriptor)
+void
+reg_tx_dma_desc_tail_ptr_set(struct aq_hw *aq_hw,
+ uint32_t tx_dma_desc_tail_ptr, uint32_t descriptor)
{
- //wmb();
+ //wmb();
- AQ_WRITE_REG(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor),
- tx_dma_desc_tail_ptr);
+ AQ_WRITE_REG(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor),
+ tx_dma_desc_tail_ptr);
}
-u32 reg_tx_dma_desc_tail_ptr_get(struct aq_hw *aq_hw, u32 descriptor)
+uint32_t
+reg_tx_dma_desc_tail_ptr_get(struct aq_hw *aq_hw, uint32_t descriptor)
{
- return AQ_READ_REG(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor));
+ return AQ_READ_REG(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor));
}
-void reg_tx_intr_moder_ctrl_set(struct aq_hw *aq_hw,
- u32 tx_intr_moderation_ctl,
- u32 queue)
+void
+reg_tx_intr_moder_ctrl_set(struct aq_hw *aq_hw,
+ uint32_t tx_intr_moderation_ctl, uint32_t queue)
{
- AQ_WRITE_REG(aq_hw, tx_intr_moderation_ctl_adr(queue),
- tx_intr_moderation_ctl);
+ AQ_WRITE_REG(aq_hw, tx_intr_moderation_ctl_adr(queue),
+ tx_intr_moderation_ctl);
}
/* RPB: rx packet buffer */
-void rpb_dma_sys_lbk_set(struct aq_hw *aq_hw, u32 dma_sys_lbk)
+void
+rpb_dma_sys_lbk_set(struct aq_hw *aq_hw, uint32_t dma_sys_lbk)
{
- AQ_WRITE_REG_BIT(aq_hw, rpb_dma_sys_lbk_adr,
- rpb_dma_sys_lbk_msk,
- rpb_dma_sys_lbk_shift, dma_sys_lbk);
+ AQ_WRITE_REG_BIT(aq_hw, rpb_dma_sys_lbk_adr, rpb_dma_sys_lbk_msk,
+ rpb_dma_sys_lbk_shift, dma_sys_lbk);
}
-void rpb_rpf_rx_traf_class_mode_set(struct aq_hw *aq_hw,
- u32 rx_traf_class_mode)
+void
+rpb_rpf_rx_traf_class_mode_set(struct aq_hw *aq_hw,
+ uint32_t rx_traf_class_mode)
{
- AQ_WRITE_REG_BIT(aq_hw, rpb_rpf_rx_tc_mode_adr,
- rpb_rpf_rx_tc_mode_msk,
- rpb_rpf_rx_tc_mode_shift,
- rx_traf_class_mode);
+ AQ_WRITE_REG_BIT(aq_hw, rpb_rpf_rx_tc_mode_adr, rpb_rpf_rx_tc_mode_msk,
+ rpb_rpf_rx_tc_mode_shift, rx_traf_class_mode);
}
-void rpb_rx_buff_en_set(struct aq_hw *aq_hw, u32 rx_buff_en)
+void
+rpb_rx_buff_en_set(struct aq_hw *aq_hw, uint32_t rx_buff_en)
{
- AQ_WRITE_REG_BIT(aq_hw, rpb_rx_buf_en_adr, rpb_rx_buf_en_msk,
- rpb_rx_buf_en_shift, rx_buff_en);
+ AQ_WRITE_REG_BIT(aq_hw, rpb_rx_buf_en_adr, rpb_rx_buf_en_msk,
+ rpb_rx_buf_en_shift, rx_buff_en);
}
-void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw *aq_hw,
- u32 rx_buff_hi_threshold_per_tc,
- u32 buffer)
+void
+rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw *aq_hw,
+ uint32_t rx_buff_hi_threshold_per_tc, uint32_t buffer)
{
- AQ_WRITE_REG_BIT(aq_hw, rpb_rxbhi_thresh_adr(buffer),
- rpb_rxbhi_thresh_msk, rpb_rxbhi_thresh_shift,
- rx_buff_hi_threshold_per_tc);
+ AQ_WRITE_REG_BIT(aq_hw, rpb_rxbhi_thresh_adr(buffer),
+ rpb_rxbhi_thresh_msk, rpb_rxbhi_thresh_shift,
+ rx_buff_hi_threshold_per_tc);
}
-void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw *aq_hw,
- u32 rx_buff_lo_threshold_per_tc,
- u32 buffer)
+void
+rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw *aq_hw,
+ uint32_t rx_buff_lo_threshold_per_tc, uint32_t buffer)
{
- AQ_WRITE_REG_BIT(aq_hw, rpb_rxblo_thresh_adr(buffer),
- rpb_rxblo_thresh_msk,
- rpb_rxblo_thresh_shift,
- rx_buff_lo_threshold_per_tc);
+ AQ_WRITE_REG_BIT(aq_hw, rpb_rxblo_thresh_adr(buffer),
+ rpb_rxblo_thresh_msk, rpb_rxblo_thresh_shift,
+ rx_buff_lo_threshold_per_tc);
}
-void rpb_rx_flow_ctl_mode_set(struct aq_hw *aq_hw, u32 rx_flow_ctl_mode)
+void
+rpb_rx_flow_ctl_mode_set(struct aq_hw *aq_hw, uint32_t rx_flow_ctl_mode)
{
- AQ_WRITE_REG_BIT(aq_hw, rpb_rx_fc_mode_adr,
- rpb_rx_fc_mode_msk,
- rpb_rx_fc_mode_shift, rx_flow_ctl_mode);
+ AQ_WRITE_REG_BIT(aq_hw, rpb_rx_fc_mode_adr, rpb_rx_fc_mode_msk,
+ rpb_rx_fc_mode_shift, rx_flow_ctl_mode);
}
-void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw *aq_hw,
- u32 rx_pkt_buff_size_per_tc, u32 buffer)
+void
+rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw *aq_hw,
+ uint32_t rx_pkt_buff_size_per_tc, uint32_t buffer)
{
- AQ_WRITE_REG_BIT(aq_hw, rpb_rxbbuf_size_adr(buffer),
- rpb_rxbbuf_size_msk, rpb_rxbbuf_size_shift,
- rx_pkt_buff_size_per_tc);
+ AQ_WRITE_REG_BIT(aq_hw, rpb_rxbbuf_size_adr(buffer),
+ rpb_rxbbuf_size_msk, rpb_rxbbuf_size_shift,
+ rx_pkt_buff_size_per_tc);
}
-void rpb_rx_xoff_en_per_tc_set(struct aq_hw *aq_hw, u32 rx_xoff_en_per_tc,
- u32 buffer)
+void
+rpb_rx_xoff_en_per_tc_set(struct aq_hw *aq_hw, uint32_t rx_xoff_en_per_tc,
+ uint32_t buffer)
{
- AQ_WRITE_REG_BIT(aq_hw, rpb_rxbxoff_en_adr(buffer),
- rpb_rxbxoff_en_msk, rpb_rxbxoff_en_shift,
- rx_xoff_en_per_tc);
+ AQ_WRITE_REG_BIT(aq_hw, rpb_rxbxoff_en_adr(buffer),
+ rpb_rxbxoff_en_msk, rpb_rxbxoff_en_shift, rx_xoff_en_per_tc);
}
/* rpf */
-void rpfl2broadcast_count_threshold_set(struct aq_hw *aq_hw,
- u32 l2broadcast_count_threshold)
+void
+rpfl2broadcast_count_threshold_set(struct aq_hw *aq_hw,
+ uint32_t l2broadcast_count_threshold)
{
- AQ_WRITE_REG_BIT(aq_hw, rpfl2bc_thresh_adr,
- rpfl2bc_thresh_msk,
- rpfl2bc_thresh_shift,
- l2broadcast_count_threshold);
+ AQ_WRITE_REG_BIT(aq_hw, rpfl2bc_thresh_adr, rpfl2bc_thresh_msk,
+ rpfl2bc_thresh_shift, l2broadcast_count_threshold);
}
-void rpfl2broadcast_en_set(struct aq_hw *aq_hw, u32 l2broadcast_en)
+void
+rpfl2broadcast_en_set(struct aq_hw *aq_hw, uint32_t l2broadcast_en)
{
- AQ_WRITE_REG_BIT(aq_hw, rpfl2bc_en_adr, rpfl2bc_en_msk,
- rpfl2bc_en_shift, l2broadcast_en);
+ AQ_WRITE_REG_BIT(aq_hw, rpfl2bc_en_adr, rpfl2bc_en_msk,
+ rpfl2bc_en_shift, l2broadcast_en);
}
-void rpfl2broadcast_flr_act_set(struct aq_hw *aq_hw, u32 l2broadcast_flr_act)
+void
+rpfl2broadcast_flr_act_set(struct aq_hw *aq_hw, uint32_t l2broadcast_flr_act)
{
- AQ_WRITE_REG_BIT(aq_hw, rpfl2bc_act_adr, rpfl2bc_act_msk,
- rpfl2bc_act_shift, l2broadcast_flr_act);
+ AQ_WRITE_REG_BIT(aq_hw, rpfl2bc_act_adr, rpfl2bc_act_msk,
+ rpfl2bc_act_shift, l2broadcast_flr_act);
}
-void rpfl2multicast_flr_en_set(struct aq_hw *aq_hw, u32 l2multicast_flr_en,
- u32 filter)
+void
+rpfl2multicast_flr_en_set(struct aq_hw *aq_hw, uint32_t l2multicast_flr_en,
+ uint32_t filter)
{
- AQ_WRITE_REG_BIT(aq_hw, rpfl2mc_enf_adr(filter),
- rpfl2mc_enf_msk,
- rpfl2mc_enf_shift, l2multicast_flr_en);
+ AQ_WRITE_REG_BIT(aq_hw, rpfl2mc_enf_adr(filter), rpfl2mc_enf_msk,
+ rpfl2mc_enf_shift, l2multicast_flr_en);
}
-void rpfl2promiscuous_mode_en_set(struct aq_hw *aq_hw,
- u32 l2promiscuous_mode_en)
+void
+rpfl2promiscuous_mode_en_set(struct aq_hw *aq_hw,
+ uint32_t l2promiscuous_mode_en)
{
- AQ_WRITE_REG_BIT(aq_hw, rpfl2promis_mode_adr,
- rpfl2promis_mode_msk,
- rpfl2promis_mode_shift,
- l2promiscuous_mode_en);
+ AQ_WRITE_REG_BIT(aq_hw, rpfl2promis_mode_adr, rpfl2promis_mode_msk,
+ rpfl2promis_mode_shift, l2promiscuous_mode_en);
}
-void rpfl2unicast_flr_act_set(struct aq_hw *aq_hw, u32 l2unicast_flr_act,
- u32 filter)
+void
+rpfl2unicast_flr_act_set(struct aq_hw *aq_hw, uint32_t l2unicast_flr_act,
+ uint32_t filter)
{
- AQ_WRITE_REG_BIT(aq_hw, rpfl2uc_actf_adr(filter),
- rpfl2uc_actf_msk, rpfl2uc_actf_shift,
- l2unicast_flr_act);
+ AQ_WRITE_REG_BIT(aq_hw, rpfl2uc_actf_adr(filter), rpfl2uc_actf_msk,
+ rpfl2uc_actf_shift, l2unicast_flr_act);
}
-void rpfl2_uc_flr_en_set(struct aq_hw *aq_hw, u32 l2unicast_flr_en,
- u32 filter)
+void
+rpfl2_uc_flr_en_set(struct aq_hw *aq_hw, uint32_t l2unicast_flr_en,
+ uint32_t filter)
{
- AQ_WRITE_REG_BIT(aq_hw, rpfl2uc_enf_adr(filter),
- rpfl2uc_enf_msk,
- rpfl2uc_enf_shift, l2unicast_flr_en);
+ AQ_WRITE_REG_BIT(aq_hw, rpfl2uc_enf_adr(filter), rpfl2uc_enf_msk,
+ rpfl2uc_enf_shift, l2unicast_flr_en);
}
-void rpfl2unicast_dest_addresslsw_set(struct aq_hw *aq_hw,
- u32 l2unicast_dest_addresslsw,
- u32 filter)
+void
+rpfl2unicast_dest_addresslsw_set(struct aq_hw *aq_hw,
+ uint32_t l2unicast_dest_addresslsw, uint32_t filter)
{
- AQ_WRITE_REG(aq_hw, rpfl2uc_daflsw_adr(filter),
- l2unicast_dest_addresslsw);
+ AQ_WRITE_REG(aq_hw, rpfl2uc_daflsw_adr(filter),
+ l2unicast_dest_addresslsw);
}
-void rpfl2unicast_dest_addressmsw_set(struct aq_hw *aq_hw,
- u32 l2unicast_dest_addressmsw,
- u32 filter)
+void
+rpfl2unicast_dest_addressmsw_set(struct aq_hw *aq_hw,
+ uint32_t l2unicast_dest_addressmsw, uint32_t filter)
{
- AQ_WRITE_REG_BIT(aq_hw, rpfl2uc_dafmsw_adr(filter),
- rpfl2uc_dafmsw_msk, rpfl2uc_dafmsw_shift,
- l2unicast_dest_addressmsw);
+ AQ_WRITE_REG_BIT(aq_hw, rpfl2uc_dafmsw_adr(filter), rpfl2uc_dafmsw_msk,
+ rpfl2uc_dafmsw_shift, l2unicast_dest_addressmsw);
}
-void rpfl2_accept_all_mc_packets_set(struct aq_hw *aq_hw,
- u32 l2_accept_all_mc_packets)
+void
+rpfl2_accept_all_mc_packets_set(struct aq_hw *aq_hw,
+ uint32_t l2_accept_all_mc_packets)
{
- AQ_WRITE_REG_BIT(aq_hw, rpfl2mc_accept_all_adr,
- rpfl2mc_accept_all_msk,
- rpfl2mc_accept_all_shift,
- l2_accept_all_mc_packets);
+ AQ_WRITE_REG_BIT(aq_hw, rpfl2mc_accept_all_adr, rpfl2mc_accept_all_msk,
+ rpfl2mc_accept_all_shift, l2_accept_all_mc_packets);
}
-void rpf_rpb_user_priority_tc_map_set(struct aq_hw *aq_hw,
- u32 user_priority_tc_map, u32 tc)
+void
+rpf_rpb_user_priority_tc_map_set(struct aq_hw *aq_hw,
+ uint32_t user_priority_tc_map, uint32_t tc)
{
/* register address for bitfield rx_tc_up{t}[2:0] */
- static u32 rpf_rpb_rx_tc_upt_adr[8] = {
- 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U,
- 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U
- };
+ static uint32_t rpf_rpb_rx_tc_upt_adr[8] = {
+ 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U,
+ 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U
+ };
/* bitmask for bitfield rx_tc_up{t}[2:0] */
- static u32 rpf_rpb_rx_tc_upt_msk[8] = {
- 0x00000007U, 0x00000070U, 0x00000700U, 0x00007000U,
- 0x00070000U, 0x00700000U, 0x07000000U, 0x70000000U
- };
+ static uint32_t rpf_rpb_rx_tc_upt_msk[8] = {
+ 0x00000007U, 0x00000070U, 0x00000700U, 0x00007000U,
+ 0x00070000U, 0x00700000U, 0x07000000U, 0x70000000U
+ };
/* lower bit position of bitfield rx_tc_up{t}[2:0] */
- static u32 rpf_rpb_rx_tc_upt_shft[8] = {
- 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U
- };
+ static uint32_t rpf_rpb_rx_tc_upt_shft[8] = {
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U
+ };
- AQ_WRITE_REG_BIT(aq_hw, rpf_rpb_rx_tc_upt_adr[tc],
- rpf_rpb_rx_tc_upt_msk[tc],
- rpf_rpb_rx_tc_upt_shft[tc],
- user_priority_tc_map);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_rpb_rx_tc_upt_adr[tc],
+ rpf_rpb_rx_tc_upt_msk[tc], rpf_rpb_rx_tc_upt_shft[tc],
+ user_priority_tc_map);
}
-void rpf_rss_key_addr_set(struct aq_hw *aq_hw, u32 rss_key_addr)
+void
+rpf_rss_key_addr_set(struct aq_hw *aq_hw, uint32_t rss_key_addr)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_rss_key_addr_adr,
- rpf_rss_key_addr_msk,
- rpf_rss_key_addr_shift,
- rss_key_addr);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_rss_key_addr_adr, rpf_rss_key_addr_msk,
+ rpf_rss_key_addr_shift, rss_key_addr);
}
-void rpf_rss_key_wr_data_set(struct aq_hw *aq_hw, u32 rss_key_wr_data)
+void
+rpf_rss_key_wr_data_set(struct aq_hw *aq_hw, uint32_t rss_key_wr_data)
{
- AQ_WRITE_REG(aq_hw, rpf_rss_key_wr_data_adr,
- rss_key_wr_data);
+ AQ_WRITE_REG(aq_hw, rpf_rss_key_wr_data_adr, rss_key_wr_data);
}
-u32 rpf_rss_key_rd_data_get(struct aq_hw *aq_hw)
+uint32_t
+rpf_rss_key_rd_data_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG(aq_hw, rpf_rss_key_rd_data_adr);
+ return AQ_READ_REG(aq_hw, rpf_rss_key_rd_data_adr);
}
-u32 rpf_rss_key_wr_en_get(struct aq_hw *aq_hw)
+uint32_t
+rpf_rss_key_wr_en_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG_BIT(aq_hw, rpf_rss_key_wr_eni_adr,
- rpf_rss_key_wr_eni_msk,
- rpf_rss_key_wr_eni_shift);
+ return AQ_READ_REG_BIT(aq_hw, rpf_rss_key_wr_eni_adr,
+ rpf_rss_key_wr_eni_msk, rpf_rss_key_wr_eni_shift);
}
-void rpf_rss_key_wr_en_set(struct aq_hw *aq_hw, u32 rss_key_wr_en)
+void
+rpf_rss_key_wr_en_set(struct aq_hw *aq_hw, uint32_t rss_key_wr_en)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_rss_key_wr_eni_adr,
- rpf_rss_key_wr_eni_msk,
- rpf_rss_key_wr_eni_shift,
- rss_key_wr_en);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_rss_key_wr_eni_adr,
+ rpf_rss_key_wr_eni_msk, rpf_rss_key_wr_eni_shift,
+ rss_key_wr_en);
}
-void rpf_rss_redir_tbl_addr_set(struct aq_hw *aq_hw, u32 rss_redir_tbl_addr)
+void
+rpf_rss_redir_tbl_addr_set(struct aq_hw *aq_hw, uint32_t rss_redir_tbl_addr)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_rss_redir_addr_adr,
- rpf_rss_redir_addr_msk,
- rpf_rss_redir_addr_shift, rss_redir_tbl_addr);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_rss_redir_addr_adr, rpf_rss_redir_addr_msk,
+ rpf_rss_redir_addr_shift, rss_redir_tbl_addr);
}
-void rpf_rss_redir_tbl_wr_data_set(struct aq_hw *aq_hw,
- u32 rss_redir_tbl_wr_data)
+void
+rpf_rss_redir_tbl_wr_data_set(struct aq_hw *aq_hw, uint32_t rss_redir_tbl_wr_data)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_rss_redir_wr_data_adr,
- rpf_rss_redir_wr_data_msk,
- rpf_rss_redir_wr_data_shift,
- rss_redir_tbl_wr_data);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_rss_redir_wr_data_adr,
+ rpf_rss_redir_wr_data_msk, rpf_rss_redir_wr_data_shift,
+ rss_redir_tbl_wr_data);
}
-u32 rpf_rss_redir_wr_en_get(struct aq_hw *aq_hw)
+uint32_t
+rpf_rss_redir_wr_en_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG_BIT(aq_hw, rpf_rss_redir_wr_eni_adr,
- rpf_rss_redir_wr_eni_msk,
- rpf_rss_redir_wr_eni_shift);
+ return AQ_READ_REG_BIT(aq_hw, rpf_rss_redir_wr_eni_adr,
+ rpf_rss_redir_wr_eni_msk, rpf_rss_redir_wr_eni_shift);
}
-void rpf_rss_redir_wr_en_set(struct aq_hw *aq_hw, u32 rss_redir_wr_en)
+void
+rpf_rss_redir_wr_en_set(struct aq_hw *aq_hw, uint32_t rss_redir_wr_en)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_rss_redir_wr_eni_adr,
- rpf_rss_redir_wr_eni_msk,
- rpf_rss_redir_wr_eni_shift, rss_redir_wr_en);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_rss_redir_wr_eni_adr,
+ rpf_rss_redir_wr_eni_msk, rpf_rss_redir_wr_eni_shift,
+ rss_redir_wr_en);
}
-void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw *aq_hw, u32 tpo_to_rpf_sys_lbk)
+void
+rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw *aq_hw, uint32_t tpo_to_rpf_sys_lbk)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_tpo_rpf_sys_lbk_adr,
- rpf_tpo_rpf_sys_lbk_msk,
- rpf_tpo_rpf_sys_lbk_shift,
- tpo_to_rpf_sys_lbk);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_tpo_rpf_sys_lbk_adr,
+ rpf_tpo_rpf_sys_lbk_msk, rpf_tpo_rpf_sys_lbk_shift,
+ tpo_to_rpf_sys_lbk);
}
-void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
+void
+hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, uint32_t vlan_inner_etht)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_INNER_TPID_ADR,
HW_ATL_RPF_VL_INNER_TPID_MSK,
@@ -940,7 +1000,8 @@
vlan_inner_etht);
}
-void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
+void
+hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, uint32_t vlan_outer_etht)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_OUTER_TPID_ADR,
HW_ATL_RPF_VL_OUTER_TPID_MSK,
@@ -948,8 +1009,8 @@
vlan_outer_etht);
}
-void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
- u32 vlan_prom_mode_en)
+void
+hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, uint32_t vlan_prom_mode_en)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_PROMIS_MODE_ADR,
HW_ATL_RPF_VL_PROMIS_MODE_MSK,
@@ -957,8 +1018,9 @@
vlan_prom_mode_en);
}
-void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
- u32 vlan_acc_untagged_packets)
+void
+hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+ uint32_t vlan_acc_untagged_packets)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR,
HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK,
@@ -966,8 +1028,8 @@
vlan_acc_untagged_packets);
}
-void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
- u32 vlan_untagged_act)
+void
+hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, uint32_t vlan_untagged_act)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_UNTAGGED_ACT_ADR,
HW_ATL_RPF_VL_UNTAGGED_ACT_MSK,
@@ -975,8 +1037,8 @@
vlan_untagged_act);
}
-void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
- u32 filter)
+void
+hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, uint32_t vlan_flr_en, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_EN_F_ADR(filter),
HW_ATL_RPF_VL_EN_F_MSK,
@@ -984,8 +1046,8 @@
vlan_flr_en);
}
-void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act,
- u32 filter)
+void
+hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, uint32_t vlan_flr_act, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACT_F_ADR(filter),
HW_ATL_RPF_VL_ACT_F_MSK,
@@ -993,8 +1055,8 @@
vlan_flr_act);
}
-void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
- u32 filter)
+void
+hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, uint32_t vlan_id_flr, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ID_F_ADR(filter),
HW_ATL_RPF_VL_ID_F_MSK,
@@ -1002,8 +1064,9 @@
vlan_id_flr);
}
-void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en,
- u32 filter)
+void
+hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, uint32_t vlan_rxq_en,
+ uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter),
HW_ATL_RPF_VL_RXQ_EN_F_MSK,
@@ -1011,8 +1074,8 @@
vlan_rxq_en);
}
-void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq,
- u32 filter)
+void
+hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, uint32_t vlan_rxq, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_F_ADR(filter),
HW_ATL_RPF_VL_RXQ_F_MSK,
@@ -1020,25 +1083,26 @@
vlan_rxq);
};
-void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
- u32 filter)
+void
+hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, uint32_t etht_flr_en, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ENF_ADR(filter),
HW_ATL_RPF_ET_ENF_MSK,
HW_ATL_RPF_ET_ENF_SHIFT, etht_flr_en);
}
-void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
- u32 etht_user_priority_en, u32 filter)
+void
+hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+ uint32_t etht_user_priority_en, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPFEN_ADR(filter),
HW_ATL_RPF_ET_UPFEN_MSK, HW_ATL_RPF_ET_UPFEN_SHIFT,
etht_user_priority_en);
}
-void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
- u32 etht_rx_queue_en,
- u32 filter)
+void
+hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, uint32_t etht_rx_queue_en,
+ uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQFEN_ADR(filter),
HW_ATL_RPF_ET_RXQFEN_MSK,
@@ -1046,25 +1110,27 @@
etht_rx_queue_en);
}
-void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
- u32 etht_user_priority,
- u32 filter)
+void
+hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, uint32_t etht_user_priority,
+ uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPF_ADR(filter),
HW_ATL_RPF_ET_UPF_MSK,
HW_ATL_RPF_ET_UPF_SHIFT, etht_user_priority);
}
-void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
- u32 filter)
+void
+hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, uint32_t etht_rx_queue,
+ uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQF_ADR(filter),
HW_ATL_RPF_ET_RXQF_MSK,
HW_ATL_RPF_ET_RXQF_SHIFT, etht_rx_queue);
}
-void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
- u32 filter)
+void
+hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, uint32_t etht_mgt_queue,
+ uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_MNG_RXQF_ADR(filter),
HW_ATL_RPF_ET_MNG_RXQF_MSK,
@@ -1072,915 +1138,925 @@
etht_mgt_queue);
}
-void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
- u32 filter)
+void
+hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, uint32_t etht_flr_act,
+ uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ACTF_ADR(filter),
HW_ATL_RPF_ET_ACTF_MSK,
HW_ATL_RPF_ET_ACTF_SHIFT, etht_flr_act);
}
-void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
+void
+hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, uint32_t etht_flr, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_VALF_ADR(filter),
HW_ATL_RPF_ET_VALF_MSK,
HW_ATL_RPF_ET_VALF_SHIFT, etht_flr);
}
-void hw_atl_rpf_l3_l4_enf_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+void
+hw_atl_rpf_l3_l4_enf_set(struct aq_hw_s *aq_hw, uint32_t val, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L3_L4_ENF_ADR(filter),
HW_ATL_RPF_L3_L4_ENF_MSK,
HW_ATL_RPF_L3_L4_ENF_SHIFT, val);
}
-void hw_atl_rpf_l3_v6_enf_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+void
+hw_atl_rpf_l3_v6_enf_set(struct aq_hw_s *aq_hw, uint32_t val, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L3_V6_ENF_ADR(filter),
HW_ATL_RPF_L3_V6_ENF_MSK,
HW_ATL_RPF_L3_V6_ENF_SHIFT, val);
}
-void hw_atl_rpf_l3_saf_en_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+void
+hw_atl_rpf_l3_saf_en_set(struct aq_hw_s *aq_hw, uint32_t val, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L3_SAF_EN_ADR(filter),
HW_ATL_RPF_L3_SAF_EN_MSK,
HW_ATL_RPF_L3_SAF_EN_SHIFT, val);
}
-void hw_atl_rpf_l3_daf_en_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+void
+hw_atl_rpf_l3_daf_en_set(struct aq_hw_s *aq_hw, uint32_t val, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L3_DAF_EN_ADR(filter),
HW_ATL_RPF_L3_DAF_EN_MSK,
HW_ATL_RPF_L3_DAF_EN_SHIFT, val);
}
-void hw_atl_rpf_l4_spf_en_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+void
+hw_atl_rpf_l4_spf_en_set(struct aq_hw_s *aq_hw, uint32_t val, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_SPF_EN_ADR(filter),
HW_ATL_RPF_L4_SPF_EN_MSK,
HW_ATL_RPF_L4_SPF_EN_SHIFT, val);
}
-void hw_atl_rpf_l4_dpf_en_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+void
+hw_atl_rpf_l4_dpf_en_set(struct aq_hw_s *aq_hw, uint32_t val, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_DPF_EN_ADR(filter),
HW_ATL_RPF_L4_DPF_EN_MSK,
HW_ATL_RPF_L4_DPF_EN_SHIFT, val);
}
-void hw_atl_rpf_l4_protf_en_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+void
+hw_atl_rpf_l4_protf_en_set(struct aq_hw_s *aq_hw, uint32_t val, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_PROTF_EN_ADR(filter),
HW_ATL_RPF_L4_PROTF_EN_MSK,
HW_ATL_RPF_L4_PROTF_EN_SHIFT, val);
}
-void hw_atl_rpf_l3_arpf_en_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+void
+hw_atl_rpf_l3_arpf_en_set(struct aq_hw_s *aq_hw, uint32_t val, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L3_ARPF_EN_ADR(filter),
HW_ATL_RPF_L3_ARPF_EN_MSK,
HW_ATL_RPF_L3_ARPF_EN_SHIFT, val);
}
-void hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+void
+hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw_s *aq_hw, uint32_t val, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L3_L4_RXQF_EN_ADR(filter),
HW_ATL_RPF_L3_L4_RXQF_EN_MSK,
HW_ATL_RPF_L3_L4_RXQF_EN_SHIFT, val);
}
-void hw_atl_rpf_l3_l4_mng_rxqf_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+void
+hw_atl_rpf_l3_l4_mng_rxqf_set(struct aq_hw_s *aq_hw, uint32_t val, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L3_L4_MNG_RXQF_ADR(filter),
HW_ATL_RPF_L3_L4_MNG_RXQF_MSK,
HW_ATL_RPF_L3_L4_MNG_RXQF_SHIFT, val);
}
-void hw_atl_rpf_l3_l4_actf_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+void
+hw_atl_rpf_l3_l4_actf_set(struct aq_hw_s *aq_hw, uint32_t val, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L3_L4_ACTF_ADR(filter),
HW_ATL_RPF_L3_L4_ACTF_MSK,
HW_ATL_RPF_L3_L4_ACTF_SHIFT, val);
}
-void hw_atl_rpf_l3_l4_rxqf_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+void
+hw_atl_rpf_l3_l4_rxqf_set(struct aq_hw_s *aq_hw, uint32_t val, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L3_L4_RXQF_ADR(filter),
HW_ATL_RPF_L3_L4_RXQF_MSK,
HW_ATL_RPF_L3_L4_RXQF_SHIFT, val);
}
-void hw_atl_rpf_l4_protf_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+void
+hw_atl_rpf_l4_protf_set(struct aq_hw_s *aq_hw, uint32_t val, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_PROTF_ADR(filter),
HW_ATL_RPF_L4_PROTF_MSK,
HW_ATL_RPF_L4_PROTF_SHIFT, val);
}
-void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+void
+hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, uint32_t val, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_SPD_ADR(filter),
HW_ATL_RPF_L4_SPD_MSK,
HW_ATL_RPF_L4_SPD_SHIFT, val);
}
-void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+void
+hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, uint32_t val, uint32_t filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_DPD_ADR(filter),
HW_ATL_RPF_L4_DPD_MSK,
HW_ATL_RPF_L4_DPD_SHIFT, val);
}
-void rpf_vlan_inner_etht_set(struct aq_hw *aq_hw, u32 vlan_inner_etht)
+void
+rpf_vlan_inner_etht_set(struct aq_hw *aq_hw, uint32_t vlan_inner_etht)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_vl_inner_tpid_adr,
- rpf_vl_inner_tpid_msk,
- rpf_vl_inner_tpid_shift,
- vlan_inner_etht);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_vl_inner_tpid_adr, rpf_vl_inner_tpid_msk,
+ rpf_vl_inner_tpid_shift, vlan_inner_etht);
}
-void rpf_vlan_outer_etht_set(struct aq_hw *aq_hw, u32 vlan_outer_etht)
+void
+rpf_vlan_outer_etht_set(struct aq_hw *aq_hw, uint32_t vlan_outer_etht)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_vl_outer_tpid_adr,
- rpf_vl_outer_tpid_msk,
- rpf_vl_outer_tpid_shift,
- vlan_outer_etht);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_vl_outer_tpid_adr, rpf_vl_outer_tpid_msk,
+ rpf_vl_outer_tpid_shift, vlan_outer_etht);
}
-void rpf_vlan_prom_mode_en_set(struct aq_hw *aq_hw, u32 vlan_prom_mode_en)
+void
+rpf_vlan_prom_mode_en_set(struct aq_hw *aq_hw, uint32_t vlan_prom_mode_en)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_vl_promis_mode_adr,
- rpf_vl_promis_mode_msk,
- rpf_vl_promis_mode_shift,
- vlan_prom_mode_en);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_vl_promis_mode_adr, rpf_vl_promis_mode_msk,
+ rpf_vl_promis_mode_shift, vlan_prom_mode_en);
}
-void rpf_vlan_accept_untagged_packets_set(struct aq_hw *aq_hw,
- u32 vlan_accept_untagged_packets)
+void
+rpf_vlan_accept_untagged_packets_set(struct aq_hw *aq_hw,
+ uint32_t vlan_accept_untagged_packets)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_vl_accept_untagged_mode_adr,
- rpf_vl_accept_untagged_mode_msk,
- rpf_vl_accept_untagged_mode_shift,
- vlan_accept_untagged_packets);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_vl_accept_untagged_mode_adr,
+ rpf_vl_accept_untagged_mode_msk, rpf_vl_accept_untagged_mode_shift,
+ vlan_accept_untagged_packets);
}
-void rpf_vlan_untagged_act_set(struct aq_hw *aq_hw, u32 vlan_untagged_act)
+void
+rpf_vlan_untagged_act_set(struct aq_hw *aq_hw, uint32_t vlan_untagged_act)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_vl_untagged_act_adr,
- rpf_vl_untagged_act_msk,
- rpf_vl_untagged_act_shift,
- vlan_untagged_act);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_vl_untagged_act_adr,
+ rpf_vl_untagged_act_msk, rpf_vl_untagged_act_shift,
+ vlan_untagged_act);
}
-void rpf_vlan_flr_en_set(struct aq_hw *aq_hw, u32 vlan_flr_en, u32 filter)
+void
+rpf_vlan_flr_en_set(struct aq_hw *aq_hw, uint32_t vlan_flr_en, uint32_t filter)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_vl_en_f_adr(filter),
- rpf_vl_en_f_msk,
- rpf_vl_en_f_shift,
- vlan_flr_en);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_vl_en_f_adr(filter), rpf_vl_en_f_msk,
+ rpf_vl_en_f_shift, vlan_flr_en);
}
-void rpf_vlan_flr_act_set(struct aq_hw *aq_hw, u32 vlan_flr_act, u32 filter)
+void
+rpf_vlan_flr_act_set(struct aq_hw *aq_hw, uint32_t vlan_flr_act, uint32_t filter)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_vl_act_f_adr(filter),
- rpf_vl_act_f_msk,
- rpf_vl_act_f_shift,
- vlan_flr_act);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_vl_act_f_adr(filter), rpf_vl_act_f_msk,
+ rpf_vl_act_f_shift, vlan_flr_act);
}
-void rpf_vlan_id_flr_set(struct aq_hw *aq_hw, u32 vlan_id_flr, u32 filter)
+void
+rpf_vlan_id_flr_set(struct aq_hw *aq_hw, uint32_t vlan_id_flr, uint32_t filter)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_vl_id_f_adr(filter),
- rpf_vl_id_f_msk,
- rpf_vl_id_f_shift,
- vlan_id_flr);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_vl_id_f_adr(filter), rpf_vl_id_f_msk,
+ rpf_vl_id_f_shift, vlan_id_flr);
}
-void rpf_etht_flr_en_set(struct aq_hw *aq_hw, u32 etht_flr_en, u32 filter)
+void
+rpf_etht_flr_en_set(struct aq_hw *aq_hw, uint32_t etht_flr_en, uint32_t filter)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_et_enf_adr(filter),
- rpf_et_enf_msk,
- rpf_et_enf_shift, etht_flr_en);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_et_enf_adr(filter), rpf_et_enf_msk,
+ rpf_et_enf_shift, etht_flr_en);
}
-void rpf_etht_user_priority_en_set(struct aq_hw *aq_hw,
- u32 etht_user_priority_en, u32 filter)
+void
+rpf_etht_user_priority_en_set(struct aq_hw *aq_hw,
+ uint32_t etht_user_priority_en, uint32_t filter)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_et_upfen_adr(filter),
- rpf_et_upfen_msk, rpf_et_upfen_shift,
- etht_user_priority_en);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_et_upfen_adr(filter), rpf_et_upfen_msk,
+ rpf_et_upfen_shift, etht_user_priority_en);
}
-void rpf_etht_rx_queue_en_set(struct aq_hw *aq_hw, u32 etht_rx_queue_en,
- u32 filter)
+void
+rpf_etht_rx_queue_en_set(struct aq_hw *aq_hw, uint32_t etht_rx_queue_en,
+ uint32_t filter)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_et_rxqfen_adr(filter),
- rpf_et_rxqfen_msk, rpf_et_rxqfen_shift,
- etht_rx_queue_en);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_et_rxqfen_adr(filter), rpf_et_rxqfen_msk,
+ rpf_et_rxqfen_shift, etht_rx_queue_en);
}
-void rpf_etht_user_priority_set(struct aq_hw *aq_hw, u32 etht_user_priority,
- u32 filter)
+void
+rpf_etht_user_priority_set(struct aq_hw *aq_hw, uint32_t etht_user_priority,
+ uint32_t filter)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_et_upf_adr(filter),
- rpf_et_upf_msk,
- rpf_et_upf_shift, etht_user_priority);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_et_upf_adr(filter), rpf_et_upf_msk,
+ rpf_et_upf_shift, etht_user_priority);
}
-void rpf_etht_rx_queue_set(struct aq_hw *aq_hw, u32 etht_rx_queue,
- u32 filter)
+void
+rpf_etht_rx_queue_set(struct aq_hw *aq_hw, uint32_t etht_rx_queue, uint32_t filter)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_et_rxqf_adr(filter),
- rpf_et_rxqf_msk,
- rpf_et_rxqf_shift, etht_rx_queue);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_et_rxqf_adr(filter), rpf_et_rxqf_msk,
+ rpf_et_rxqf_shift, etht_rx_queue);
}
-void rpf_etht_mgt_queue_set(struct aq_hw *aq_hw, u32 etht_mgt_queue,
- u32 filter)
+void
+rpf_etht_mgt_queue_set(struct aq_hw *aq_hw, uint32_t etht_mgt_queue, uint32_t filter)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_et_mng_rxqf_adr(filter),
- rpf_et_mng_rxqf_msk, rpf_et_mng_rxqf_shift,
- etht_mgt_queue);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_et_mng_rxqf_adr(filter),
+ rpf_et_mng_rxqf_msk, rpf_et_mng_rxqf_shift, etht_mgt_queue);
}
-void rpf_etht_flr_act_set(struct aq_hw *aq_hw, u32 etht_flr_act, u32 filter)
+void
+rpf_etht_flr_act_set(struct aq_hw *aq_hw, uint32_t etht_flr_act, uint32_t filter)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_et_actf_adr(filter),
- rpf_et_actf_msk,
- rpf_et_actf_shift, etht_flr_act);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_et_actf_adr(filter), rpf_et_actf_msk,
+ rpf_et_actf_shift, etht_flr_act);
}
-void rpf_etht_flr_set(struct aq_hw *aq_hw, u32 etht_flr, u32 filter)
+void
+rpf_etht_flr_set(struct aq_hw *aq_hw, uint32_t etht_flr, uint32_t filter)
{
- AQ_WRITE_REG_BIT(aq_hw, rpf_et_valf_adr(filter),
- rpf_et_valf_msk,
- rpf_et_valf_shift, etht_flr);
+ AQ_WRITE_REG_BIT(aq_hw, rpf_et_valf_adr(filter), rpf_et_valf_msk,
+ rpf_et_valf_shift, etht_flr);
}
/* RPO: rx packet offload */
-void rpo_ipv4header_crc_offload_en_set(struct aq_hw *aq_hw,
- u32 ipv4header_crc_offload_en)
+void
+rpo_ipv4header_crc_offload_en_set(struct aq_hw *aq_hw,
+ uint32_t ipv4header_crc_offload_en)
{
- AQ_WRITE_REG_BIT(aq_hw, rpo_ipv4chk_en_adr,
- rpo_ipv4chk_en_msk,
- rpo_ipv4chk_en_shift,
- ipv4header_crc_offload_en);
+ AQ_WRITE_REG_BIT(aq_hw, rpo_ipv4chk_en_adr, rpo_ipv4chk_en_msk,
+ rpo_ipv4chk_en_shift, ipv4header_crc_offload_en);
}
-void rpo_rx_desc_vlan_stripping_set(struct aq_hw *aq_hw,
- u32 rx_desc_vlan_stripping, u32 descriptor)
+void
+rpo_rx_desc_vlan_stripping_set(struct aq_hw *aq_hw,
+ uint32_t rx_desc_vlan_stripping, uint32_t descriptor)
{
- AQ_WRITE_REG_BIT(aq_hw, rpo_descdvl_strip_adr(descriptor),
- rpo_descdvl_strip_msk,
- rpo_descdvl_strip_shift,
- rx_desc_vlan_stripping);
+ AQ_WRITE_REG_BIT(aq_hw, rpo_descdvl_strip_adr(descriptor),
+ rpo_descdvl_strip_msk, rpo_descdvl_strip_shift,
+ rx_desc_vlan_stripping);
}
-void rpo_tcp_udp_crc_offload_en_set(struct aq_hw *aq_hw,
- u32 tcp_udp_crc_offload_en)
+void
+rpo_tcp_udp_crc_offload_en_set(struct aq_hw *aq_hw,
+ uint32_t tcp_udp_crc_offload_en)
{
- AQ_WRITE_REG_BIT(aq_hw, rpol4chk_en_adr, rpol4chk_en_msk,
- rpol4chk_en_shift, tcp_udp_crc_offload_en);
+ AQ_WRITE_REG_BIT(aq_hw, rpol4chk_en_adr, rpol4chk_en_msk,
+ rpol4chk_en_shift, tcp_udp_crc_offload_en);
}
-void rpo_lro_en_set(struct aq_hw *aq_hw, u32 lro_en)
+void
+rpo_lro_en_set(struct aq_hw *aq_hw, uint32_t lro_en)
{
- AQ_WRITE_REG(aq_hw, rpo_lro_en_adr, lro_en);
+ AQ_WRITE_REG(aq_hw, rpo_lro_en_adr, lro_en);
}
-void rpo_lro_patch_optimization_en_set(struct aq_hw *aq_hw,
- u32 lro_patch_optimization_en)
+void
+rpo_lro_patch_optimization_en_set(struct aq_hw *aq_hw,
+ uint32_t lro_patch_optimization_en)
{
- AQ_WRITE_REG_BIT(aq_hw, rpo_lro_ptopt_en_adr,
- rpo_lro_ptopt_en_msk,
- rpo_lro_ptopt_en_shift,
- lro_patch_optimization_en);
+ AQ_WRITE_REG_BIT(aq_hw, rpo_lro_ptopt_en_adr, rpo_lro_ptopt_en_msk,
+ rpo_lro_ptopt_en_shift, lro_patch_optimization_en);
}
-void rpo_lro_qsessions_lim_set(struct aq_hw *aq_hw,
- u32 lro_qsessions_lim)
+void
+rpo_lro_qsessions_lim_set(struct aq_hw *aq_hw, uint32_t lro_qsessions_lim)
{
- AQ_WRITE_REG_BIT(aq_hw, rpo_lro_qses_lmt_adr,
- rpo_lro_qses_lmt_msk,
- rpo_lro_qses_lmt_shift,
- lro_qsessions_lim);
+ AQ_WRITE_REG_BIT(aq_hw, rpo_lro_qses_lmt_adr, rpo_lro_qses_lmt_msk,
+ rpo_lro_qses_lmt_shift, lro_qsessions_lim);
}
-void rpo_lro_total_desc_lim_set(struct aq_hw *aq_hw, u32 lro_total_desc_lim)
+void
+rpo_lro_total_desc_lim_set(struct aq_hw *aq_hw, uint32_t lro_total_desc_lim)
{
- AQ_WRITE_REG_BIT(aq_hw, rpo_lro_tot_dsc_lmt_adr,
- rpo_lro_tot_dsc_lmt_msk,
- rpo_lro_tot_dsc_lmt_shift,
- lro_total_desc_lim);
+ AQ_WRITE_REG_BIT(aq_hw, rpo_lro_tot_dsc_lmt_adr,
+ rpo_lro_tot_dsc_lmt_msk, rpo_lro_tot_dsc_lmt_shift,
+ lro_total_desc_lim);
}
-void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw *aq_hw,
- u32 lro_min_pld_of_first_pkt)
+void
+rpo_lro_min_pay_of_first_pkt_set(struct aq_hw *aq_hw,
+ uint32_t lro_min_pld_of_first_pkt)
{
- AQ_WRITE_REG_BIT(aq_hw, rpo_lro_pkt_min_adr,
- rpo_lro_pkt_min_msk,
- rpo_lro_pkt_min_shift,
- lro_min_pld_of_first_pkt);
+ AQ_WRITE_REG_BIT(aq_hw, rpo_lro_pkt_min_adr, rpo_lro_pkt_min_msk,
+ rpo_lro_pkt_min_shift, lro_min_pld_of_first_pkt);
}
-void rpo_lro_pkt_lim_set(struct aq_hw *aq_hw, u32 lro_pkt_lim)
+void
+rpo_lro_pkt_lim_set(struct aq_hw *aq_hw, uint32_t lro_pkt_lim)
{
- AQ_WRITE_REG(aq_hw, rpo_lro_rsc_max_adr, lro_pkt_lim);
+ AQ_WRITE_REG(aq_hw, rpo_lro_rsc_max_adr, lro_pkt_lim);
}
-void rpo_lro_max_num_of_descriptors_set(struct aq_hw *aq_hw,
- u32 lro_max_number_of_descriptors,
- u32 lro)
+void
+rpo_lro_max_num_of_descriptors_set(struct aq_hw *aq_hw,
+ uint32_t lro_max_number_of_descriptors, uint32_t lro)
{
/* Register address for bitfield lro{L}_des_max[1:0] */
- static u32 rpo_lro_ldes_max_adr[32] = {
- 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U,
- 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U,
- 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U,
- 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U,
- 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U,
- 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U,
- 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU,
- 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU
- };
+ static uint32_t rpo_lro_ldes_max_adr[32] = {
+ 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U,
+ 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U,
+ 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U,
+ 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U,
+ 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U,
+ 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U,
+ 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU,
+ 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU
+ };
/* Bitmask for bitfield lro{L}_des_max[1:0] */
- static u32 rpo_lro_ldes_max_msk[32] = {
- 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
- 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
- 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
- 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
- 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
- 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
- 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
- 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U
- };
+ static uint32_t rpo_lro_ldes_max_msk[32] = {
+ 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+ 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
+ 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+ 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
+ 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+ 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
+ 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+ 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U
+ };
/* Lower bit position of bitfield lro{L}_des_max[1:0] */
- static u32 rpo_lro_ldes_max_shift[32] = {
- 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
- 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
- 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
- 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U
- };
+ static uint32_t rpo_lro_ldes_max_shift[32] = {
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U
+ };
- AQ_WRITE_REG_BIT(aq_hw, rpo_lro_ldes_max_adr[lro],
- rpo_lro_ldes_max_msk[lro],
- rpo_lro_ldes_max_shift[lro],
- lro_max_number_of_descriptors);
+ AQ_WRITE_REG_BIT(aq_hw, rpo_lro_ldes_max_adr[lro],
+ rpo_lro_ldes_max_msk[lro], rpo_lro_ldes_max_shift[lro],
+ lro_max_number_of_descriptors);
}
-void rpo_lro_time_base_divider_set(struct aq_hw *aq_hw,
- u32 lro_time_base_divider)
+void
+rpo_lro_time_base_divider_set(struct aq_hw *aq_hw,
+ uint32_t lro_time_base_divider)
{
- AQ_WRITE_REG_BIT(aq_hw, rpo_lro_tb_div_adr,
- rpo_lro_tb_div_msk,
- rpo_lro_tb_div_shift,
- lro_time_base_divider);
+ AQ_WRITE_REG_BIT(aq_hw, rpo_lro_tb_div_adr, rpo_lro_tb_div_msk,
+ rpo_lro_tb_div_shift, lro_time_base_divider);
}
-void rpo_lro_inactive_interval_set(struct aq_hw *aq_hw,
- u32 lro_inactive_interval)
+void
+rpo_lro_inactive_interval_set(struct aq_hw *aq_hw,
+ uint32_t lro_inactive_interval)
{
- AQ_WRITE_REG_BIT(aq_hw, rpo_lro_ina_ival_adr,
- rpo_lro_ina_ival_msk,
- rpo_lro_ina_ival_shift,
- lro_inactive_interval);
+ AQ_WRITE_REG_BIT(aq_hw, rpo_lro_ina_ival_adr, rpo_lro_ina_ival_msk,
+ rpo_lro_ina_ival_shift, lro_inactive_interval);
}
-void rpo_lro_max_coalescing_interval_set(struct aq_hw *aq_hw,
- u32 lro_max_coalescing_interval)
+void
+rpo_lro_max_coalescing_interval_set(struct aq_hw *aq_hw,
+ uint32_t lro_max_coalescing_interval)
{
- AQ_WRITE_REG_BIT(aq_hw, rpo_lro_max_ival_adr,
- rpo_lro_max_ival_msk,
- rpo_lro_max_ival_shift,
- lro_max_coalescing_interval);
+ AQ_WRITE_REG_BIT(aq_hw, rpo_lro_max_ival_adr, rpo_lro_max_ival_msk,
+ rpo_lro_max_ival_shift, lro_max_coalescing_interval);
}
/* rx */
-void rx_rx_reg_res_dis_set(struct aq_hw *aq_hw, u32 rx_reg_res_dis)
+void
+rx_rx_reg_res_dis_set(struct aq_hw *aq_hw, uint32_t rx_reg_res_dis)
{
- AQ_WRITE_REG_BIT(aq_hw, rx_reg_res_dsbl_adr,
- rx_reg_res_dsbl_msk,
- rx_reg_res_dsbl_shift,
- rx_reg_res_dis);
+ AQ_WRITE_REG_BIT(aq_hw, rx_reg_res_dsbl_adr, rx_reg_res_dsbl_msk,
+ rx_reg_res_dsbl_shift, rx_reg_res_dis);
}
/* tdm */
-void tdm_cpu_id_set(struct aq_hw *aq_hw, u32 cpuid, u32 dca)
+void
+tdm_cpu_id_set(struct aq_hw *aq_hw, uint32_t cpuid, uint32_t dca)
{
- AQ_WRITE_REG_BIT(aq_hw, tdm_dcadcpuid_adr(dca),
- tdm_dcadcpuid_msk,
- tdm_dcadcpuid_shift, cpuid);
+ AQ_WRITE_REG_BIT(aq_hw, tdm_dcadcpuid_adr(dca), tdm_dcadcpuid_msk,
+ tdm_dcadcpuid_shift, cpuid);
}
-void tdm_large_send_offload_en_set(struct aq_hw *aq_hw,
- u32 large_send_offload_en)
+void
+tdm_large_send_offload_en_set(struct aq_hw *aq_hw,
+ uint32_t large_send_offload_en)
{
- AQ_WRITE_REG(aq_hw, tdm_lso_en_adr, large_send_offload_en);
+ AQ_WRITE_REG(aq_hw, tdm_lso_en_adr, large_send_offload_en);
}
-void tdm_tx_dca_en_set(struct aq_hw *aq_hw, u32 tx_dca_en)
+void
+tdm_tx_dca_en_set(struct aq_hw *aq_hw, uint32_t tx_dca_en)
{
- AQ_WRITE_REG_BIT(aq_hw, tdm_dca_en_adr, tdm_dca_en_msk,
- tdm_dca_en_shift, tx_dca_en);
+ AQ_WRITE_REG_BIT(aq_hw, tdm_dca_en_adr, tdm_dca_en_msk,
+ tdm_dca_en_shift, tx_dca_en);
}
-void tdm_tx_dca_mode_set(struct aq_hw *aq_hw, u32 tx_dca_mode)
+void
+tdm_tx_dca_mode_set(struct aq_hw *aq_hw, uint32_t tx_dca_mode)
{
- AQ_WRITE_REG_BIT(aq_hw, tdm_dca_mode_adr, tdm_dca_mode_msk,
- tdm_dca_mode_shift, tx_dca_mode);
+ AQ_WRITE_REG_BIT(aq_hw, tdm_dca_mode_adr, tdm_dca_mode_msk,
+ tdm_dca_mode_shift, tx_dca_mode);
}
-void tdm_tx_desc_dca_en_set(struct aq_hw *aq_hw, u32 tx_desc_dca_en, u32 dca)
+void
+tdm_tx_desc_dca_en_set(struct aq_hw *aq_hw, uint32_t tx_desc_dca_en, uint32_t dca)
{
- AQ_WRITE_REG_BIT(aq_hw, tdm_dcaddesc_en_adr(dca),
- tdm_dcaddesc_en_msk, tdm_dcaddesc_en_shift,
- tx_desc_dca_en);
+ AQ_WRITE_REG_BIT(aq_hw, tdm_dcaddesc_en_adr(dca), tdm_dcaddesc_en_msk,
+ tdm_dcaddesc_en_shift, tx_desc_dca_en);
}
-void tdm_tx_desc_en_set(struct aq_hw *aq_hw, u32 tx_desc_en, u32 descriptor)
+void
+tdm_tx_desc_en_set(struct aq_hw *aq_hw, uint32_t tx_desc_en, uint32_t descriptor)
{
- AQ_WRITE_REG_BIT(aq_hw, tdm_descden_adr(descriptor),
- tdm_descden_msk,
- tdm_descden_shift,
- tx_desc_en);
+ AQ_WRITE_REG_BIT(aq_hw, tdm_descden_adr(descriptor), tdm_descden_msk,
+ tdm_descden_shift, tx_desc_en);
}
-u32 tdm_tx_desc_head_ptr_get(struct aq_hw *aq_hw, u32 descriptor)
+uint32_t
+tdm_tx_desc_head_ptr_get(struct aq_hw *aq_hw, uint32_t descriptor)
{
- return AQ_READ_REG_BIT(aq_hw, tdm_descdhd_adr(descriptor),
- tdm_descdhd_msk, tdm_descdhd_shift);
+ return AQ_READ_REG_BIT(aq_hw, tdm_descdhd_adr(descriptor),
+ tdm_descdhd_msk, tdm_descdhd_shift);
}
-void tdm_tx_desc_len_set(struct aq_hw *aq_hw, u32 tx_desc_len,
- u32 descriptor)
+void
+tdm_tx_desc_len_set(struct aq_hw *aq_hw, uint32_t tx_desc_len, uint32_t descriptor)
{
- AQ_WRITE_REG_BIT(aq_hw, tdm_descdlen_adr(descriptor),
- tdm_descdlen_msk,
- tdm_descdlen_shift,
- tx_desc_len);
+ AQ_WRITE_REG_BIT(aq_hw, tdm_descdlen_adr(descriptor), tdm_descdlen_msk,
+ tdm_descdlen_shift, tx_desc_len);
}
-void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw *aq_hw,
- u32 tx_desc_wr_wb_irq_en)
+void
+tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw *aq_hw, uint32_t tx_desc_wr_wb_irq_en)
{
- AQ_WRITE_REG_BIT(aq_hw, tdm_int_desc_wrb_en_adr,
- tdm_int_desc_wrb_en_msk,
- tdm_int_desc_wrb_en_shift,
- tx_desc_wr_wb_irq_en);
+ AQ_WRITE_REG_BIT(aq_hw, tdm_int_desc_wrb_en_adr,
+ tdm_int_desc_wrb_en_msk, tdm_int_desc_wrb_en_shift,
+ tx_desc_wr_wb_irq_en);
}
-void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw *aq_hw,
- u32 tx_desc_wr_wb_threshold,
- u32 descriptor)
+void
+tdm_tx_desc_wr_wb_threshold_set(struct aq_hw *aq_hw,
+ uint32_t tx_desc_wr_wb_threshold, uint32_t descriptor)
{
- AQ_WRITE_REG_BIT(aq_hw, tdm_descdwrb_thresh_adr(descriptor),
- tdm_descdwrb_thresh_msk,
- tdm_descdwrb_thresh_shift,
- tx_desc_wr_wb_threshold);
+ AQ_WRITE_REG_BIT(aq_hw, tdm_descdwrb_thresh_adr(descriptor),
+ tdm_descdwrb_thresh_msk, tdm_descdwrb_thresh_shift,
+ tx_desc_wr_wb_threshold);
}
-void tdm_tdm_intr_moder_en_set(struct aq_hw *aq_hw,
- u32 tdm_irq_moderation_en)
+void
+tdm_tdm_intr_moder_en_set(struct aq_hw *aq_hw, uint32_t tdm_irq_moderation_en)
{
- AQ_WRITE_REG_BIT(aq_hw, tdm_int_mod_en_adr,
- tdm_int_mod_en_msk,
- tdm_int_mod_en_shift,
- tdm_irq_moderation_en);
+ AQ_WRITE_REG_BIT(aq_hw, tdm_int_mod_en_adr, tdm_int_mod_en_msk,
+ tdm_int_mod_en_shift, tdm_irq_moderation_en);
}
/* thm */
-void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw *aq_hw,
- u32 lso_tcp_flag_of_first_pkt)
+void
+thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw *aq_hw,
+ uint32_t lso_tcp_flag_of_first_pkt)
{
- AQ_WRITE_REG_BIT(aq_hw, thm_lso_tcp_flag_first_adr,
- thm_lso_tcp_flag_first_msk,
- thm_lso_tcp_flag_first_shift,
- lso_tcp_flag_of_first_pkt);
+ AQ_WRITE_REG_BIT(aq_hw, thm_lso_tcp_flag_first_adr,
+ thm_lso_tcp_flag_first_msk, thm_lso_tcp_flag_first_shift,
+ lso_tcp_flag_of_first_pkt);
}
-void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw *aq_hw,
- u32 lso_tcp_flag_of_last_pkt)
+void
+thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw *aq_hw,
+ uint32_t lso_tcp_flag_of_last_pkt)
{
- AQ_WRITE_REG_BIT(aq_hw, thm_lso_tcp_flag_last_adr,
- thm_lso_tcp_flag_last_msk,
- thm_lso_tcp_flag_last_shift,
- lso_tcp_flag_of_last_pkt);
+ AQ_WRITE_REG_BIT(aq_hw, thm_lso_tcp_flag_last_adr,
+ thm_lso_tcp_flag_last_msk, thm_lso_tcp_flag_last_shift,
+ lso_tcp_flag_of_last_pkt);
}
-void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw *aq_hw,
- u32 lso_tcp_flag_of_middle_pkt)
+void
+thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw *aq_hw,
+ uint32_t lso_tcp_flag_of_middle_pkt)
{
- AQ_WRITE_REG_BIT(aq_hw, thm_lso_tcp_flag_mid_adr,
- thm_lso_tcp_flag_mid_msk,
- thm_lso_tcp_flag_mid_shift,
- lso_tcp_flag_of_middle_pkt);
+ AQ_WRITE_REG_BIT(aq_hw, thm_lso_tcp_flag_mid_adr,
+ thm_lso_tcp_flag_mid_msk, thm_lso_tcp_flag_mid_shift,
+ lso_tcp_flag_of_middle_pkt);
}
/* TPB: tx packet buffer */
-void tpb_tx_buff_en_set(struct aq_hw *aq_hw, u32 tx_buff_en)
+void
+tpb_tx_buff_en_set(struct aq_hw *aq_hw, uint32_t tx_buff_en)
{
- AQ_WRITE_REG_BIT(aq_hw, tpb_tx_buf_en_adr, tpb_tx_buf_en_msk,
- tpb_tx_buf_en_shift, tx_buff_en);
+ AQ_WRITE_REG_BIT(aq_hw, tpb_tx_buf_en_adr, tpb_tx_buf_en_msk,
+ tpb_tx_buf_en_shift, tx_buff_en);
}
-void tpb_tx_tc_mode_set(struct aq_hw *aq_hw, u32 tc_mode)
+void
+tpb_tx_tc_mode_set(struct aq_hw *aq_hw, uint32_t tc_mode)
{
- AQ_WRITE_REG_BIT(aq_hw, tpb_tx_tc_mode_adr, tpb_tx_tc_mode_msk,
- tpb_tx_tc_mode_shift, tc_mode);
+ AQ_WRITE_REG_BIT(aq_hw, tpb_tx_tc_mode_adr, tpb_tx_tc_mode_msk,
+ tpb_tx_tc_mode_shift, tc_mode);
}
-void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw *aq_hw,
- u32 tx_buff_hi_threshold_per_tc,
- u32 buffer)
+void
+tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw *aq_hw,
+ uint32_t tx_buff_hi_threshold_per_tc, uint32_t buffer)
{
- AQ_WRITE_REG_BIT(aq_hw, tpb_txbhi_thresh_adr(buffer),
- tpb_txbhi_thresh_msk, tpb_txbhi_thresh_shift,
- tx_buff_hi_threshold_per_tc);
+ AQ_WRITE_REG_BIT(aq_hw, tpb_txbhi_thresh_adr(buffer),
+ tpb_txbhi_thresh_msk, tpb_txbhi_thresh_shift,
+ tx_buff_hi_threshold_per_tc);
}
-void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw *aq_hw,
- u32 tx_buff_lo_threshold_per_tc,
- u32 buffer)
+void
+tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw *aq_hw,
+ uint32_t tx_buff_lo_threshold_per_tc, uint32_t buffer)
{
- AQ_WRITE_REG_BIT(aq_hw, tpb_txblo_thresh_adr(buffer),
- tpb_txblo_thresh_msk, tpb_txblo_thresh_shift,
- tx_buff_lo_threshold_per_tc);
+ AQ_WRITE_REG_BIT(aq_hw, tpb_txblo_thresh_adr(buffer),
+ tpb_txblo_thresh_msk, tpb_txblo_thresh_shift,
+ tx_buff_lo_threshold_per_tc);
}
-void tpb_tx_dma_sys_lbk_en_set(struct aq_hw *aq_hw, u32 tx_dma_sys_lbk_en)
+void
+tpb_tx_dma_sys_lbk_en_set(struct aq_hw *aq_hw, uint32_t tx_dma_sys_lbk_en)
{
- AQ_WRITE_REG_BIT(aq_hw, tpb_dma_sys_lbk_adr,
- tpb_dma_sys_lbk_msk,
- tpb_dma_sys_lbk_shift,
- tx_dma_sys_lbk_en);
+ AQ_WRITE_REG_BIT(aq_hw, tpb_dma_sys_lbk_adr, tpb_dma_sys_lbk_msk,
+ tpb_dma_sys_lbk_shift, tx_dma_sys_lbk_en);
}
-void rdm_rx_dma_desc_cache_init_tgl(struct aq_hw *aq_hw)
+void
+rdm_rx_dma_desc_cache_init_tgl(struct aq_hw *aq_hw)
{
- AQ_WRITE_REG_BIT(aq_hw, rdm_rx_dma_desc_cache_init_adr,
- rdm_rx_dma_desc_cache_init_msk,
- rdm_rx_dma_desc_cache_init_shift,
- AQ_READ_REG_BIT(aq_hw, rdm_rx_dma_desc_cache_init_adr,
- rdm_rx_dma_desc_cache_init_msk,
- rdm_rx_dma_desc_cache_init_shift) ^ 1
- );
+ AQ_WRITE_REG_BIT(aq_hw, rdm_rx_dma_desc_cache_init_adr,
+ rdm_rx_dma_desc_cache_init_msk, rdm_rx_dma_desc_cache_init_shift,
+ AQ_READ_REG_BIT(aq_hw, rdm_rx_dma_desc_cache_init_adr,
+ rdm_rx_dma_desc_cache_init_msk,
+ rdm_rx_dma_desc_cache_init_shift) ^ 1
+ );
}
-void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw *aq_hw,
- u32 tx_pkt_buff_size_per_tc, u32 buffer)
+void
+tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw *aq_hw,
+ uint32_t tx_pkt_buff_size_per_tc, uint32_t buffer)
{
- AQ_WRITE_REG_BIT(aq_hw, tpb_txbbuf_size_adr(buffer),
- tpb_txbbuf_size_msk,
- tpb_txbbuf_size_shift,
- tx_pkt_buff_size_per_tc);
+ AQ_WRITE_REG_BIT(aq_hw, tpb_txbbuf_size_adr(buffer),
+ tpb_txbbuf_size_msk, tpb_txbbuf_size_shift,
+ tx_pkt_buff_size_per_tc);
}
-void tpb_tx_path_scp_ins_en_set(struct aq_hw *aq_hw, u32 tx_path_scp_ins_en)
+void
+tpb_tx_path_scp_ins_en_set(struct aq_hw *aq_hw, uint32_t tx_path_scp_ins_en)
{
- AQ_WRITE_REG_BIT(aq_hw, tpb_tx_scp_ins_en_adr,
- tpb_tx_scp_ins_en_msk,
- tpb_tx_scp_ins_en_shift,
- tx_path_scp_ins_en);
+ AQ_WRITE_REG_BIT(aq_hw, tpb_tx_scp_ins_en_adr, tpb_tx_scp_ins_en_msk,
+ tpb_tx_scp_ins_en_shift, tx_path_scp_ins_en);
}
/* TPO: tx packet offload */
-void tpo_ipv4header_crc_offload_en_set(struct aq_hw *aq_hw,
- u32 ipv4header_crc_offload_en)
+void
+tpo_ipv4header_crc_offload_en_set(struct aq_hw *aq_hw,
+ uint32_t ipv4header_crc_offload_en)
{
- AQ_WRITE_REG_BIT(aq_hw, tpo_ipv4chk_en_adr,
- tpo_ipv4chk_en_msk,
- tpo_ipv4chk_en_shift,
- ipv4header_crc_offload_en);
+ AQ_WRITE_REG_BIT(aq_hw, tpo_ipv4chk_en_adr, tpo_ipv4chk_en_msk,
+ tpo_ipv4chk_en_shift, ipv4header_crc_offload_en);
}
-void tpo_tcp_udp_crc_offload_en_set(struct aq_hw *aq_hw,
- u32 tcp_udp_crc_offload_en)
+void
+tpo_tcp_udp_crc_offload_en_set(struct aq_hw *aq_hw, uint32_t tcp_udp_crc_offload_en)
{
- AQ_WRITE_REG_BIT(aq_hw, tpol4chk_en_adr,
- tpol4chk_en_msk,
- tpol4chk_en_shift,
- tcp_udp_crc_offload_en);
+ AQ_WRITE_REG_BIT(aq_hw, tpol4chk_en_adr, tpol4chk_en_msk,
+ tpol4chk_en_shift, tcp_udp_crc_offload_en);
}
-void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw *aq_hw, u32 tx_pkt_sys_lbk_en)
+void
+tpo_tx_pkt_sys_lbk_en_set(struct aq_hw *aq_hw, uint32_t tx_pkt_sys_lbk_en)
{
- AQ_WRITE_REG_BIT(aq_hw, tpo_pkt_sys_lbk_adr,
- tpo_pkt_sys_lbk_msk,
- tpo_pkt_sys_lbk_shift,
- tx_pkt_sys_lbk_en);
+ AQ_WRITE_REG_BIT(aq_hw, tpo_pkt_sys_lbk_adr, tpo_pkt_sys_lbk_msk,
+ tpo_pkt_sys_lbk_shift, tx_pkt_sys_lbk_en);
}
/* TPS: tx packet scheduler */
-void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw *aq_hw,
- u32 tx_pkt_shed_data_arb_mode)
+void
+tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw *aq_hw,
+ uint32_t tx_pkt_shed_data_arb_mode)
{
- AQ_WRITE_REG_BIT(aq_hw, tps_data_tc_arb_mode_adr,
- tps_data_tc_arb_mode_msk,
- tps_data_tc_arb_mode_shift,
- tx_pkt_shed_data_arb_mode);
+ AQ_WRITE_REG_BIT(aq_hw, tps_data_tc_arb_mode_adr,
+ tps_data_tc_arb_mode_msk, tps_data_tc_arb_mode_shift,
+ tx_pkt_shed_data_arb_mode);
}
-void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw *aq_hw,
- u32 curr_time_res)
+void
+tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw *aq_hw,
+ uint32_t curr_time_res)
{
- AQ_WRITE_REG_BIT(aq_hw, tps_desc_rate_ta_rst_adr,
- tps_desc_rate_ta_rst_msk,
- tps_desc_rate_ta_rst_shift,
- curr_time_res);
+ AQ_WRITE_REG_BIT(aq_hw, tps_desc_rate_ta_rst_adr,
+ tps_desc_rate_ta_rst_msk, tps_desc_rate_ta_rst_shift,
+ curr_time_res);
}
-void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw *aq_hw,
- u32 tx_pkt_shed_desc_rate_lim)
+void
+tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw *aq_hw,
+ uint32_t tx_pkt_shed_desc_rate_lim)
{
- AQ_WRITE_REG_BIT(aq_hw, tps_desc_rate_lim_adr,
- tps_desc_rate_lim_msk,
- tps_desc_rate_lim_shift,
- tx_pkt_shed_desc_rate_lim);
+ AQ_WRITE_REG_BIT(aq_hw, tps_desc_rate_lim_adr, tps_desc_rate_lim_msk,
+ tps_desc_rate_lim_shift, tx_pkt_shed_desc_rate_lim);
}
-void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw *aq_hw,
- u32 tx_pkt_shed_desc_tc_arb_mode)
+void
+tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw *aq_hw,
+ uint32_t tx_pkt_shed_desc_tc_arb_mode)
{
- AQ_WRITE_REG_BIT(aq_hw, tps_desc_tc_arb_mode_adr,
- tps_desc_tc_arb_mode_msk,
- tps_desc_tc_arb_mode_shift,
- tx_pkt_shed_desc_tc_arb_mode);
+ AQ_WRITE_REG_BIT(aq_hw, tps_desc_tc_arb_mode_adr,
+ tps_desc_tc_arb_mode_msk, tps_desc_tc_arb_mode_shift,
+ tx_pkt_shed_desc_tc_arb_mode);
}
-void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw *aq_hw,
- u32 tx_pkt_shed_desc_tc_max_credit,
- u32 tc)
+void
+tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw *aq_hw,
+ uint32_t tx_pkt_shed_desc_tc_max_credit, uint32_t tc)
{
- AQ_WRITE_REG_BIT(aq_hw, tps_desc_tctcredit_max_adr(tc),
- tps_desc_tctcredit_max_msk,
- tps_desc_tctcredit_max_shift,
- tx_pkt_shed_desc_tc_max_credit);
+ AQ_WRITE_REG_BIT(aq_hw, tps_desc_tctcredit_max_adr(tc),
+ tps_desc_tctcredit_max_msk, tps_desc_tctcredit_max_shift,
+ tx_pkt_shed_desc_tc_max_credit);
}
-void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw *aq_hw,
- u32 tx_pkt_shed_desc_tc_weight, u32 tc)
+void
+tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw *aq_hw,
+ uint32_t tx_pkt_shed_desc_tc_weight, uint32_t tc)
{
- AQ_WRITE_REG_BIT(aq_hw, tps_desc_tctweight_adr(tc),
- tps_desc_tctweight_msk,
- tps_desc_tctweight_shift,
- tx_pkt_shed_desc_tc_weight);
+ AQ_WRITE_REG_BIT(aq_hw, tps_desc_tctweight_adr(tc),
+ tps_desc_tctweight_msk, tps_desc_tctweight_shift,
+ tx_pkt_shed_desc_tc_weight);
}
-void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw *aq_hw,
- u32 tx_pkt_shed_desc_vm_arb_mode)
+void
+tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw *aq_hw,
+ uint32_t tx_pkt_shed_desc_vm_arb_mode)
{
- AQ_WRITE_REG_BIT(aq_hw, tps_desc_vm_arb_mode_adr,
- tps_desc_vm_arb_mode_msk,
- tps_desc_vm_arb_mode_shift,
- tx_pkt_shed_desc_vm_arb_mode);
+ AQ_WRITE_REG_BIT(aq_hw, tps_desc_vm_arb_mode_adr,
+ tps_desc_vm_arb_mode_msk, tps_desc_vm_arb_mode_shift,
+ tx_pkt_shed_desc_vm_arb_mode);
}
-void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw *aq_hw,
- u32 tx_pkt_shed_tc_data_max_credit,
- u32 tc)
+void
+tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw *aq_hw,
+ uint32_t tx_pkt_shed_tc_data_max_credit, uint32_t tc)
{
- AQ_WRITE_REG_BIT(aq_hw, tps_data_tctcredit_max_adr(tc),
- tps_data_tctcredit_max_msk,
- tps_data_tctcredit_max_shift,
- tx_pkt_shed_tc_data_max_credit);
+ AQ_WRITE_REG_BIT(aq_hw, tps_data_tctcredit_max_adr(tc),
+ tps_data_tctcredit_max_msk, tps_data_tctcredit_max_shift,
+ tx_pkt_shed_tc_data_max_credit);
}
-void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw *aq_hw,
- u32 tx_pkt_shed_tc_data_weight, u32 tc)
+void
+tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw *aq_hw,
+ uint32_t tx_pkt_shed_tc_data_weight, uint32_t tc)
{
- AQ_WRITE_REG_BIT(aq_hw, tps_data_tctweight_adr(tc),
- tps_data_tctweight_msk,
- tps_data_tctweight_shift,
- tx_pkt_shed_tc_data_weight);
+ AQ_WRITE_REG_BIT(aq_hw, tps_data_tctweight_adr(tc),
+ tps_data_tctweight_msk, tps_data_tctweight_shift,
+ tx_pkt_shed_tc_data_weight);
}
/* tx */
-void tx_tx_reg_res_dis_set(struct aq_hw *aq_hw, u32 tx_reg_res_dis)
+void
+tx_tx_reg_res_dis_set(struct aq_hw *aq_hw, uint32_t tx_reg_res_dis)
{
- AQ_WRITE_REG_BIT(aq_hw, tx_reg_res_dsbl_adr,
- tx_reg_res_dsbl_msk,
- tx_reg_res_dsbl_shift, tx_reg_res_dis);
+ AQ_WRITE_REG_BIT(aq_hw, tx_reg_res_dsbl_adr, tx_reg_res_dsbl_msk,
+ tx_reg_res_dsbl_shift, tx_reg_res_dis);
}
/* msm */
-u32 msm_reg_access_status_get(struct aq_hw *aq_hw)
+uint32_t
+msm_reg_access_status_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG_BIT(aq_hw, msm_reg_access_busy_adr,
- msm_reg_access_busy_msk,
- msm_reg_access_busy_shift);
+ return AQ_READ_REG_BIT(aq_hw, msm_reg_access_busy_adr,
+ msm_reg_access_busy_msk, msm_reg_access_busy_shift);
}
-void msm_reg_addr_for_indirect_addr_set(struct aq_hw *aq_hw,
- u32 reg_addr_for_indirect_addr)
+void
+msm_reg_addr_for_indirect_addr_set(struct aq_hw *aq_hw,
+ uint32_t reg_addr_for_indirect_addr)
{
- AQ_WRITE_REG_BIT(aq_hw, msm_reg_addr_adr,
- msm_reg_addr_msk,
- msm_reg_addr_shift,
- reg_addr_for_indirect_addr);
+ AQ_WRITE_REG_BIT(aq_hw, msm_reg_addr_adr, msm_reg_addr_msk,
+ msm_reg_addr_shift, reg_addr_for_indirect_addr);
}
-void msm_reg_rd_strobe_set(struct aq_hw *aq_hw, u32 reg_rd_strobe)
+void
+msm_reg_rd_strobe_set(struct aq_hw *aq_hw, uint32_t reg_rd_strobe)
{
- AQ_WRITE_REG_BIT(aq_hw, msm_reg_rd_strobe_adr,
- msm_reg_rd_strobe_msk,
- msm_reg_rd_strobe_shift,
- reg_rd_strobe);
+ AQ_WRITE_REG_BIT(aq_hw, msm_reg_rd_strobe_adr, msm_reg_rd_strobe_msk,
+ msm_reg_rd_strobe_shift, reg_rd_strobe);
}
-u32 msm_reg_rd_data_get(struct aq_hw *aq_hw)
+uint32_t
+msm_reg_rd_data_get(struct aq_hw *aq_hw)
{
- return AQ_READ_REG(aq_hw, msm_reg_rd_data_adr);
+ return AQ_READ_REG(aq_hw, msm_reg_rd_data_adr);
}
-void msm_reg_wr_data_set(struct aq_hw *aq_hw, u32 reg_wr_data)
+void
+msm_reg_wr_data_set(struct aq_hw *aq_hw, uint32_t reg_wr_data)
{
- AQ_WRITE_REG(aq_hw, msm_reg_wr_data_adr, reg_wr_data);
+ AQ_WRITE_REG(aq_hw, msm_reg_wr_data_adr, reg_wr_data);
}
-void msm_reg_wr_strobe_set(struct aq_hw *aq_hw, u32 reg_wr_strobe)
+void
+msm_reg_wr_strobe_set(struct aq_hw *aq_hw, uint32_t reg_wr_strobe)
{
- AQ_WRITE_REG_BIT(aq_hw, msm_reg_wr_strobe_adr,
- msm_reg_wr_strobe_msk,
- msm_reg_wr_strobe_shift,
- reg_wr_strobe);
+ AQ_WRITE_REG_BIT(aq_hw, msm_reg_wr_strobe_adr, msm_reg_wr_strobe_msk,
+ msm_reg_wr_strobe_shift, reg_wr_strobe);
}
/* pci */
-void pci_pci_reg_res_dis_set(struct aq_hw *aq_hw, u32 pci_reg_res_dis)
+void
+pci_pci_reg_res_dis_set(struct aq_hw *aq_hw, uint32_t pci_reg_res_dis)
{
- AQ_WRITE_REG_BIT(aq_hw, pci_reg_res_dsbl_adr,
- pci_reg_res_dsbl_msk,
- pci_reg_res_dsbl_shift,
- pci_reg_res_dis);
+ AQ_WRITE_REG_BIT(aq_hw, pci_reg_res_dsbl_adr, pci_reg_res_dsbl_msk,
+ pci_reg_res_dsbl_shift, pci_reg_res_dis);
}
-u32 reg_glb_cpu_scratch_scp_get(struct aq_hw *hw, u32 glb_cpu_scratch_scp_idx)
+uint32_t
+reg_glb_cpu_scratch_scp_get(struct aq_hw *hw, uint32_t glb_cpu_scratch_scp_idx)
{
- return AQ_READ_REG(hw, glb_cpu_scratch_scp_adr(glb_cpu_scratch_scp_idx));
+ return AQ_READ_REG(hw, glb_cpu_scratch_scp_adr(glb_cpu_scratch_scp_idx));
}
-void reg_glb_cpu_scratch_scp_set(struct aq_hw *aq_hw, u32 glb_cpu_scratch_scp,
- u32 scratch_scp)
+void
+reg_glb_cpu_scratch_scp_set(struct aq_hw *aq_hw, uint32_t glb_cpu_scratch_scp,
+ uint32_t scratch_scp)
{
- AQ_WRITE_REG(aq_hw, glb_cpu_scratch_scp_adr(scratch_scp),
- glb_cpu_scratch_scp);
+ AQ_WRITE_REG(aq_hw, glb_cpu_scratch_scp_adr(scratch_scp),
+ glb_cpu_scratch_scp);
}
-u32 reg_glb_cpu_no_reset_scratchpad_get(struct aq_hw *hw, u32 index)
+uint32_t
+reg_glb_cpu_no_reset_scratchpad_get(struct aq_hw *hw, uint32_t index)
{
- return AQ_READ_REG(hw, glb_cpu_no_reset_scratchpad_adr(index));
+ return AQ_READ_REG(hw, glb_cpu_no_reset_scratchpad_adr(index));
}
-void reg_glb_cpu_no_reset_scratchpad_set(struct aq_hw* hw, u32 value, u32 index)
+void
+reg_glb_cpu_no_reset_scratchpad_set(struct aq_hw* hw, uint32_t value, uint32_t index)
{
- AQ_WRITE_REG(hw, glb_cpu_no_reset_scratchpad_adr(index), value);
+ AQ_WRITE_REG(hw, glb_cpu_no_reset_scratchpad_adr(index), value);
}
-void reg_mif_power_gating_enable_control_set(struct aq_hw* hw, u32 value)
+void
+reg_mif_power_gating_enable_control_set(struct aq_hw* hw, uint32_t value)
{
- AQ_WRITE_REG(hw, mif_power_gating_enable_control_adr, value);
+ AQ_WRITE_REG(hw, mif_power_gating_enable_control_adr, value);
}
-u32 reg_mif_power_gating_enable_control_get(struct aq_hw* hw)
+uint32_t
+reg_mif_power_gating_enable_control_get(struct aq_hw* hw)
{
- return AQ_READ_REG(hw, mif_power_gating_enable_control_adr);
+ return AQ_READ_REG(hw, mif_power_gating_enable_control_adr);
}
-void reg_glb_general_provisioning9_set(struct aq_hw* hw, u32 value)
+void
+reg_glb_general_provisioning9_set(struct aq_hw* hw, uint32_t value)
{
- AQ_WRITE_REG(hw, glb_general_provisioning9_adr, value);
+ AQ_WRITE_REG(hw, glb_general_provisioning9_adr, value);
}
-u32 reg_glb_general_provisioning9_get(struct aq_hw* hw)
+uint32_t
+reg_glb_general_provisioning9_get(struct aq_hw* hw)
{
- return AQ_READ_REG(hw, glb_general_provisioning9_adr);
+ return AQ_READ_REG(hw, glb_general_provisioning9_adr);
}
-void reg_glb_nvr_provisioning2_set(struct aq_hw* hw, u32 value)
+void
+reg_glb_nvr_provisioning2_set(struct aq_hw* hw, uint32_t value)
{
- AQ_WRITE_REG(hw, glb_nvr_provisioning2_adr, value);
+ AQ_WRITE_REG(hw, glb_nvr_provisioning2_adr, value);
}
-u32 reg_glb_nvr_provisioning2_get(struct aq_hw* hw)
+uint32_t
+reg_glb_nvr_provisioning2_get(struct aq_hw* hw)
{
- return AQ_READ_REG(hw, glb_nvr_provisioning2_adr);
+ return AQ_READ_REG(hw, glb_nvr_provisioning2_adr);
}
-void reg_glb_nvr_interface1_set(struct aq_hw* hw, u32 value)
+void
+reg_glb_nvr_interface1_set(struct aq_hw* hw, uint32_t value)
{
- AQ_WRITE_REG(hw, glb_nvr_interface1_adr, value);
+ AQ_WRITE_REG(hw, glb_nvr_interface1_adr, value);
}
-u32 reg_glb_nvr_interface1_get(struct aq_hw* hw)
+uint32_t
+reg_glb_nvr_interface1_get(struct aq_hw* hw)
{
- return AQ_READ_REG(hw, glb_nvr_interface1_adr);
+ return AQ_READ_REG(hw, glb_nvr_interface1_adr);
}
/* get mif up mailbox busy */
-u32 mif_mcp_up_mailbox_busy_get(struct aq_hw *hw)
+uint32_t
+mif_mcp_up_mailbox_busy_get(struct aq_hw *hw)
{
- return AQ_READ_REG_BIT(hw, mif_mcp_up_mailbox_busy_adr,
- mif_mcp_up_mailbox_busy_msk,
- mif_mcp_up_mailbox_busy_shift);
+ return AQ_READ_REG_BIT(hw, mif_mcp_up_mailbox_busy_adr,
+ mif_mcp_up_mailbox_busy_msk, mif_mcp_up_mailbox_busy_shift);
}
/* set mif up mailbox execute operation */
-void mif_mcp_up_mailbox_execute_operation_set(struct aq_hw* hw, u32 value)
+void
+mif_mcp_up_mailbox_execute_operation_set(struct aq_hw* hw, uint32_t value)
{
- AQ_WRITE_REG_BIT(hw, mif_mcp_up_mailbox_execute_operation_adr,
- mif_mcp_up_mailbox_execute_operation_msk,
- mif_mcp_up_mailbox_execute_operation_shift,
- value);
+ AQ_WRITE_REG_BIT(hw, mif_mcp_up_mailbox_execute_operation_adr,
+ mif_mcp_up_mailbox_execute_operation_msk,
+ mif_mcp_up_mailbox_execute_operation_shift, value);
}
/* get mif uP mailbox address */
-u32 mif_mcp_up_mailbox_addr_get(struct aq_hw *hw)
+uint32_t
+mif_mcp_up_mailbox_addr_get(struct aq_hw *hw)
{
- return AQ_READ_REG(hw, mif_mcp_up_mailbox_addr_adr);
+ return AQ_READ_REG(hw, mif_mcp_up_mailbox_addr_adr);
}
/* set mif uP mailbox address */
-void mif_mcp_up_mailbox_addr_set(struct aq_hw *hw, u32 value)
+void
+mif_mcp_up_mailbox_addr_set(struct aq_hw *hw, uint32_t value)
{
- AQ_WRITE_REG(hw, mif_mcp_up_mailbox_addr_adr, value);
+ AQ_WRITE_REG(hw, mif_mcp_up_mailbox_addr_adr, value);
}
/* get mif uP mailbox data */
-u32 mif_mcp_up_mailbox_data_get(struct aq_hw *hw)
+uint32_t
+mif_mcp_up_mailbox_data_get(struct aq_hw *hw)
{
- return AQ_READ_REG(hw, mif_mcp_up_mailbox_data_adr);
+ return AQ_READ_REG(hw, mif_mcp_up_mailbox_data_adr);
}
-void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+void
+hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, uint8_t location)
{
aq_hw_write_reg(aq_hw, HW_ATL_RX_GET_ADDR_DESTA_FL3L4(location), 0U);
}
-void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+void
+hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, uint8_t location)
{
aq_hw_write_reg(aq_hw, HW_ATL_RX_GET_ADDR_SRCA_FL3L4(location), 0U);
}
-void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location)
+void
+hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, uint8_t location)
{
aq_hw_write_reg(aq_hw, HW_ATL_RX_GET_ADDR_CTRL_FL3L4(location), 0U);
}
-void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+void
+hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, uint8_t location)
{
int i;
for (i = 0; i < 4; ++i)
aq_hw_write_reg(aq_hw,
- HW_ATL_RX_GET_ADDR_DESTA_FL3L4(location + i),
- 0U);
+ HW_ATL_RX_GET_ADDR_DESTA_FL3L4(location + i), 0U);
}
-void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+void
+hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, uint8_t location)
{
int i;
for (i = 0; i < 4; ++i)
aq_hw_write_reg(aq_hw,
- HW_ATL_RX_GET_ADDR_SRCA_FL3L4(location + i),
- 0U);
+ HW_ATL_RX_GET_ADDR_SRCA_FL3L4(location + i), 0U);
}
-void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
- u32 ipv4_dest)
+void
+hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, uint8_t location,
+ uint32_t ipv4_dest)
{
aq_hw_write_reg(aq_hw, HW_ATL_RX_GET_ADDR_DESTA_FL3L4(location),
- ipv4_dest);
+ ipv4_dest);
}
-void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
- u32 ipv4_src)
+void
+hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, uint8_t location,
+ uint32_t ipv4_src)
{
- aq_hw_write_reg(aq_hw,
- HW_ATL_RX_GET_ADDR_SRCA_FL3L4(location),
- ipv4_src);
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_GET_ADDR_SRCA_FL3L4(location),
+ ipv4_src);
}
-void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd)
+void
+hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, uint8_t location, uint32_t cmd)
{
aq_hw_write_reg(aq_hw, HW_ATL_RX_GET_ADDR_CTRL_FL3L4(location), cmd);
}
-void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
- u32 *ipv6_src)
+void
+hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, uint8_t location,
+ uint32_t *ipv6_src)
{
int i;
for (i = 0; i < 4; ++i)
aq_hw_write_reg(aq_hw,
- HW_ATL_RX_GET_ADDR_SRCA_FL3L4(location + i),
- ipv6_src[i]);
+ HW_ATL_RX_GET_ADDR_SRCA_FL3L4(location + i), ipv6_src[i]);
}
-void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
- u32 *ipv6_dest)
+void
+hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, uint8_t location,
+ uint32_t *ipv6_dest)
{
int i;
for (i = 0; i < 4; ++i)
aq_hw_write_reg(aq_hw,
- HW_ATL_RX_GET_ADDR_DESTA_FL3L4(location + i),
- ipv6_dest[i]);
+ HW_ATL_RX_GET_ADDR_DESTA_FL3L4(location + i), ipv6_dest[i]);
}
Index: sys/dev/aq/aq_hw_llh_internal.h
===================================================================
--- sys/dev/aq/aq_hw_llh_internal.h
+++ sys/dev/aq/aq_hw_llh_internal.h
@@ -3248,7 +3248,7 @@
/* tx dma descriptor base address msw definitions */
#define tx_dma_desc_base_addrmsw_adr(descriptor) \
- (0x00007c04u + (descriptor) * 0x40)
+ (0x00007c04u + (descriptor) * 0x40)
/* tx interrupt moderation control register definitions
* Preprocessor definitions for TX Interrupt Moderation Control Register
Index: sys/dev/aq/aq_irq.c
===================================================================
--- sys/dev/aq/aq_irq.c
+++ sys/dev/aq/aq_irq.c
@@ -37,14 +37,14 @@
__FBSDID("$FreeBSD$");
#include <sys/param.h>
+#include <sys/bitstring.h>
#include <sys/kernel.h>
#include <sys/socket.h>
-#include <sys/bitstring.h>
+#include <net/ethernet.h>
#include <net/if.h>
+#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_var.h>
-#include <net/if_dl.h>
-#include <net/ethernet.h>
#include <net/iflib.h>
#include "aq_common.h"
@@ -54,60 +54,60 @@
#include "aq_hw.h"
#include "aq_hw_llh.h"
-int aq_update_hw_stats(aq_dev_t *aq_dev)
+int
+aq_update_hw_stats(aq_dev_t *aq_dev)
{
- struct aq_hw *hw = &aq_dev->hw;
- struct aq_hw_fw_mbox mbox;
+ struct aq_hw *hw = &aq_dev->hw;
+ struct aq_hw_fw_mbox mbox;
- aq_hw_mpi_read_stats(hw, &mbox);
+ aq_hw_mpi_read_stats(hw, &mbox);
#define AQ_SDELTA(_N_) (aq_dev->curr_stats._N_ += \
- mbox.stats._N_ - aq_dev->last_stats._N_)
- if (aq_dev->linkup) {
- AQ_SDELTA(uprc);
- AQ_SDELTA(mprc);
- AQ_SDELTA(bprc);
- AQ_SDELTA(cprc);
- AQ_SDELTA(erpt);
-
- AQ_SDELTA(uptc);
- AQ_SDELTA(mptc);
- AQ_SDELTA(bptc);
- AQ_SDELTA(erpr);
-
- AQ_SDELTA(ubrc);
- AQ_SDELTA(ubtc);
- AQ_SDELTA(mbrc);
- AQ_SDELTA(mbtc);
- AQ_SDELTA(bbrc);
- AQ_SDELTA(bbtc);
-
- AQ_SDELTA(ptc);
- AQ_SDELTA(prc);
-
- AQ_SDELTA(dpc);
-
- aq_dev->curr_stats.brc = aq_dev->curr_stats.ubrc +
- aq_dev->curr_stats.mbrc +
- aq_dev->curr_stats.bbrc;
- aq_dev->curr_stats.btc = aq_dev->curr_stats.ubtc +
- aq_dev->curr_stats.mbtc +
- aq_dev->curr_stats.bbtc;
-
- }
+ mbox.stats._N_ - aq_dev->last_stats._N_)
+ if (aq_dev->linkup) {
+ AQ_SDELTA(uprc);
+ AQ_SDELTA(mprc);
+ AQ_SDELTA(bprc);
+ AQ_SDELTA(cprc);
+ AQ_SDELTA(erpt);
+
+ AQ_SDELTA(uptc);
+ AQ_SDELTA(mptc);
+ AQ_SDELTA(bptc);
+ AQ_SDELTA(erpr);
+
+ AQ_SDELTA(ubrc);
+ AQ_SDELTA(ubtc);
+ AQ_SDELTA(mbrc);
+ AQ_SDELTA(mbtc);
+ AQ_SDELTA(bbrc);
+ AQ_SDELTA(bbtc);
+
+ AQ_SDELTA(ptc);
+ AQ_SDELTA(prc);
+
+ AQ_SDELTA(dpc);
+
+ aq_dev->curr_stats.brc = aq_dev->curr_stats.ubrc +
+ aq_dev->curr_stats.mbrc + aq_dev->curr_stats.bbrc;
+ aq_dev->curr_stats.btc = aq_dev->curr_stats.ubtc +
+ aq_dev->curr_stats.mbtc + aq_dev->curr_stats.bbtc;
+
+ }
#undef AQ_SDELTA
- memcpy(&aq_dev->last_stats, &mbox.stats, sizeof(mbox.stats));
+ memcpy(&aq_dev->last_stats, &mbox.stats, sizeof(mbox.stats));
- return (0);
+ return (0);
}
-void aq_if_update_admin_status(if_ctx_t ctx)
+void
+aq_if_update_admin_status(if_ctx_t ctx)
{
aq_dev_t *aq_dev = iflib_get_softc(ctx);
struct aq_hw *hw = &aq_dev->hw;
- u32 link_speed;
+ uint32_t link_speed;
// AQ_DBG_ENTER();
@@ -156,7 +156,8 @@
/**************************************************************************/
/* interrupt service routine (Top half) */
/**************************************************************************/
-int aq_isr_rx(void *arg)
+int
+aq_isr_rx(void *arg)
{
struct aq_ring *ring = arg;
struct aq_dev *aq_dev = ring->dev;
@@ -171,7 +172,8 @@
/**************************************************************************/
/* interrupt service routine (Top half) */
/**************************************************************************/
-int aq_linkstat_isr(void *arg)
+int
+aq_linkstat_isr(void *arg)
{
aq_dev_t *aq_dev = arg;
struct aq_hw *hw = &aq_dev->hw;
Index: sys/dev/aq/aq_main.c
===================================================================
--- sys/dev/aq/aq_main.c
+++ sys/dev/aq/aq_main.c
@@ -32,23 +32,26 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_rss.h"
+
#include <sys/param.h>
-#include <sys/malloc.h>
-#include <sys/socket.h>
-#include <sys/kernel.h>
+#include <sys/bitstring.h>
#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
#include <sys/module.h>
+#include <sys/priv.h>
#include <sys/rman.h>
-#include <sys/endian.h>
+#include <sys/sbuf.h>
+#include <sys/socket.h>
#include <sys/sockio.h>
-#include <sys/priv.h>
#include <sys/sysctl.h>
-#include <sys/sbuf.h>
-#include <sys/bitstring.h>
#include <machine/bus.h>
#include <machine/resource.h>
@@ -56,18 +59,14 @@
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
+#include <net/ethernet.h>
#include <net/if.h>
+#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_var.h>
-#include <net/if_dl.h>
-#include <net/ethernet.h>
#include <net/iflib.h>
#include <net/rss_config.h>
-#include "opt_inet.h"
-#include "opt_inet6.h"
-#include "opt_rss.h"
-
#include "ifdi_if.h"
#include "aq_device.h"
@@ -110,25 +109,41 @@
#define AQ_DEVICE_ID_AQC112S 0x92B1
static pci_vendor_info_t aq_vendor_info_array[] = {
- PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_0001, "Aquantia AQtion 10Gbit Network Adapter"),
- PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_D107, "Aquantia AQtion 10Gbit Network Adapter"),
- PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_D108, "Aquantia AQtion 5Gbit Network Adapter"),
- PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_D109, "Aquantia AQtion 2.5Gbit Network Adapter"),
-
- PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC107, "Aquantia AQtion 10Gbit Network Adapter"),
- PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC108, "Aquantia AQtion 5Gbit Network Adapter"),
- PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC109, "Aquantia AQtion 2.5Gbit Network Adapter"),
- PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC100, "Aquantia AQtion 10Gbit Network Adapter"),
-
- PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC107S, "Aquantia AQtion 10Gbit Network Adapter"),
- PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC108S, "Aquantia AQtion 5Gbit Network Adapter"),
- PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC109S, "Aquantia AQtion 2.5Gbit Network Adapter"),
- PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC100S, "Aquantia AQtion 10Gbit Network Adapter"),
-
- PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC111, "Aquantia AQtion 5Gbit Network Adapter"),
- PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC112, "Aquantia AQtion 2.5Gbit Network Adapter"),
- PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC111S, "Aquantia AQtion 5Gbit Network Adapter"),
- PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC112S, "Aquantia AQtion 2.5Gbit Network Adapter"),
+ PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_0001,
+ "Aquantia AQtion 10Gbit Network Adapter"),
+ PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_D107,
+ "Aquantia AQtion 10Gbit Network Adapter"),
+ PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_D108,
+ "Aquantia AQtion 5Gbit Network Adapter"),
+ PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_D109,
+ "Aquantia AQtion 2.5Gbit Network Adapter"),
+
+ PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC107,
+ "Aquantia AQtion 10Gbit Network Adapter"),
+ PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC108,
+ "Aquantia AQtion 5Gbit Network Adapter"),
+ PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC109,
+ "Aquantia AQtion 2.5Gbit Network Adapter"),
+ PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC100,
+ "Aquantia AQtion 10Gbit Network Adapter"),
+
+ PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC107S,
+ "Aquantia AQtion 10Gbit Network Adapter"),
+ PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC108S,
+ "Aquantia AQtion 5Gbit Network Adapter"),
+ PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC109S,
+ "Aquantia AQtion 2.5Gbit Network Adapter"),
+ PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC100S,
+ "Aquantia AQtion 10Gbit Network Adapter"),
+
+ PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC111,
+ "Aquantia AQtion 5Gbit Network Adapter"),
+ PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC112,
+ "Aquantia AQtion 2.5Gbit Network Adapter"),
+ PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC111S,
+ "Aquantia AQtion 5Gbit Network Adapter"),
+ PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC112S,
+ "Aquantia AQtion 2.5Gbit Network Adapter"),
PVID_END
};
@@ -292,19 +307,21 @@
static SYSCTL_NODE(_hw, OID_AUTO, aq, CTLFLAG_RD, 0, "Atlantic driver parameters");
/* UDP Receive-Side Scaling */
static int aq_enable_rss_udp = 1;
-SYSCTL_INT(_hw_aq, OID_AUTO, enable_rss_udp, CTLFLAG_RDTUN, &aq_enable_rss_udp, 0,
- "Enable Receive-Side Scaling (RSS) for UDP");
+SYSCTL_INT(_hw_aq, OID_AUTO, enable_rss_udp, CTLFLAG_RDTUN, &aq_enable_rss_udp,
+ 0, "Enable Receive-Side Scaling (RSS) for UDP");
/*
* Device Methods
*/
-static void *aq_register(device_t dev)
+static void *
+aq_register(device_t dev)
{
return (&aq_sctx_init);
}
-static int aq_if_attach_pre(if_ctx_t ctx)
+static int
+aq_if_attach_pre(if_ctx_t ctx)
{
struct aq_dev *softc;
struct aq_hw *hw;
@@ -335,7 +352,7 @@
softc->mmio_tag = rman_get_bustag(softc->mmio_res);
softc->mmio_handle = rman_get_bushandle(softc->mmio_res);
softc->mmio_size = rman_get_size(softc->mmio_res);
- softc->hw.hw_addr = (u8*) softc->mmio_handle;
+ softc->hw.hw_addr = (uint8_t*) softc->mmio_handle;
hw = &softc->hw;
hw->link_rate = aq_fw_speed_auto;
hw->itr = -1;
@@ -371,22 +388,21 @@
#endif
scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO;
#if __FreeBSD__ >= 12
- scctx->isc_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM | IFCAP_HWCSUM | IFCAP_TSO |
- IFCAP_JUMBO_MTU | IFCAP_VLAN_HWFILTER |
- IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
- IFCAP_VLAN_HWCSUM;
+ scctx->isc_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM | IFCAP_HWCSUM |
+ IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_MTU |
+ IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
scctx->isc_capenable = scctx->isc_capabilities;
#else
if_t ifp;
ifp = iflib_get_ifp(ctx);
- if_setcapenable(ifp, IFCAP_RXCSUM | IFCAP_TXCSUM | IFCAP_HWCSUM | IFCAP_TSO |
- IFCAP_JUMBO_MTU | IFCAP_VLAN_HWFILTER |
- IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
- IFCAP_VLAN_HWCSUM;
+ if_setcapenable(ifp, IFCAP_RXCSUM | IFCAP_TXCSUM | IFCAP_HWCSUM |
+ IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_MTU |
+ IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
#endif
scctx->isc_tx_nsegments = 31,
scctx->isc_tx_tso_segments_max = 31;
- scctx->isc_tx_tso_size_max = HW_ATL_B0_TSO_SIZE - sizeof(struct ether_vlan_header);
+ scctx->isc_tx_tso_size_max =
+ HW_ATL_B0_TSO_SIZE - sizeof(struct ether_vlan_header);
scctx->isc_tx_tso_segsize_max = HW_ATL_B0_MTU_JUMBO;
scctx->isc_min_frame_size = 52;
scctx->isc_txrx = &aq_txrx;
@@ -415,7 +431,8 @@
}
-static int aq_if_attach_post(if_ctx_t ctx)
+static int
+aq_if_attach_post(if_ctx_t ctx)
{
struct aq_dev *softc;
int rc;
@@ -434,7 +451,7 @@
case IFLIB_INTR_LEGACY:
rc = EOPNOTSUPP;
goto exit;
- goto exit;
+ goto exit;
break;
case IFLIB_INTR_MSI:
break;
@@ -458,7 +475,8 @@
}
-static int aq_if_detach(if_ctx_t ctx)
+static int
+aq_if_detach(if_ctx_t ctx)
{
struct aq_dev *softc;
int i;
@@ -483,7 +501,8 @@
return (0);
}
-static int aq_if_shutdown(if_ctx_t ctx)
+static int
+aq_if_shutdown(if_ctx_t ctx)
{
AQ_DBG_ENTER();
@@ -494,7 +513,8 @@
return (0);
}
-static int aq_if_suspend(if_ctx_t ctx)
+static int
+aq_if_suspend(if_ctx_t ctx)
{
AQ_DBG_ENTER();
@@ -504,7 +524,8 @@
return (0);
}
-static int aq_if_resume(if_ctx_t ctx)
+static int
+aq_if_resume(if_ctx_t ctx)
{
AQ_DBG_ENTER();
@@ -515,8 +536,9 @@
}
/* Soft queue setup and teardown */
-static int aq_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
- uint64_t *paddrs, int ntxqs, int ntxqsets)
+static int
+aq_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
+ int ntxqs, int ntxqsets)
{
struct aq_dev *softc;
struct aq_ring *ring;
@@ -553,8 +575,9 @@
return (rc);
}
-static int aq_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
- uint64_t *paddrs, int nrxqs, int nrxqsets)
+static int
+aq_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
+ int nrxqs, int nrxqsets)
{
struct aq_dev *softc;
struct aq_ring *ring;
@@ -568,7 +591,8 @@
M_AQ, M_NOWAIT | M_ZERO);
if (!ring){
rc = ENOMEM;
- device_printf(softc->dev, "atlantic: rx_ring malloc fail\n");
+ device_printf(softc->dev,
+ "atlantic: rx_ring malloc fail\n");
goto fail;
}
@@ -601,7 +625,8 @@
return (rc);
}
-static void aq_if_queues_free(if_ctx_t ctx)
+static void
+aq_if_queues_free(if_ctx_t ctx)
{
struct aq_dev *softc;
int i;
@@ -629,7 +654,8 @@
}
/* Device configuration */
-static void aq_if_init(if_ctx_t ctx)
+static void
+aq_if_init(if_ctx_t ctx)
{
struct aq_dev *softc;
struct aq_hw *hw;
@@ -641,7 +667,7 @@
hw = &softc->hw;
err = aq_hw_init(&softc->hw, softc->hw.mac_addr, softc->msix,
- softc->scctx->isc_intr == IFLIB_INTR_MSIX);
+ softc->scctx->isc_intr == IFLIB_INTR_MSIX);
if (err != EOK) {
device_printf(softc->dev, "atlantic: aq_hw_init: %d", err);
}
@@ -654,22 +680,26 @@
struct aq_ring *ring = softc->tx_rings[i];
err = aq_ring_tx_init(&softc->hw, ring);
if (err) {
- device_printf(softc->dev, "atlantic: aq_ring_tx_init: %d", err);
+ device_printf(softc->dev,
+ "atlantic: aq_ring_tx_init: %d", err);
}
err = aq_ring_tx_start(hw, ring);
if (err != EOK) {
- device_printf(softc->dev, "atlantic: aq_ring_tx_start: %d", err);
+ device_printf(softc->dev,
+ "atlantic: aq_ring_tx_start: %d", err);
}
}
for (i = 0; i < softc->rx_rings_count; i++) {
struct aq_ring *ring = softc->rx_rings[i];
err = aq_ring_rx_init(&softc->hw, ring);
if (err) {
- device_printf(softc->dev, "atlantic: aq_ring_rx_init: %d", err);
+ device_printf(softc->dev,
+ "atlantic: aq_ring_rx_init: %d", err);
}
err = aq_ring_rx_start(hw, ring);
if (err != EOK) {
- device_printf(softc->dev, "atlantic: aq_ring_rx_start: %d", err);
+ device_printf(softc->dev,
+ "atlantic: aq_ring_rx_start: %d", err);
}
aq_if_rx_queue_intr_enable(ctx, i);
}
@@ -685,7 +715,8 @@
}
-static void aq_if_stop(if_ctx_t ctx)
+static void
+aq_if_stop(if_ctx_t ctx)
{
struct aq_dev *softc;
struct aq_hw *hw;
@@ -715,7 +746,8 @@
AQ_DBG_EXIT(0);
}
-static uint64_t aq_if_get_counter(if_ctx_t ctx, ift_counter cnt)
+static uint64_t
+aq_if_get_counter(if_ctx_t ctx, ift_counter cnt)
{
struct aq_dev *softc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
@@ -733,11 +765,12 @@
}
#if __FreeBSD_version >= 1300054
-static u_int aq_mc_filter_apply(void *arg, struct sockaddr_dl *dl, u_int count)
+static u_int
+aq_mc_filter_apply(void *arg, struct sockaddr_dl *dl, u_int count)
{
struct aq_dev *softc = arg;
struct aq_hw *hw = &softc->hw;
- u8 *mac_addr = NULL;
+ uint8_t *mac_addr = NULL;
if (count == AQ_HW_MAC_MAX)
return (0);
@@ -749,11 +782,12 @@
return (1);
}
#else
-static int aq_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count)
+static int
+aq_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count)
{
struct aq_dev *softc = arg;
struct aq_hw *hw = &softc->hw;
- u8 *mac_addr = NULL;
+ uint8_t *mac_addr = NULL;
if (ifma->ifma_addr->sa_family != AF_LINK)
return (0);
@@ -768,12 +802,14 @@
}
#endif
-static bool aq_is_mc_promisc_required(struct aq_dev *softc)
+static bool
+aq_is_mc_promisc_required(struct aq_dev *softc)
{
return (softc->mcnt >= AQ_HW_MAC_MAX);
}
-static void aq_if_multi_set(if_ctx_t ctx)
+static void
+aq_if_multi_set(if_ctx_t ctx)
{
struct aq_dev *softc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
@@ -784,12 +820,11 @@
#else
softc->mcnt = if_multiaddr_count(iflib_get_ifp(ctx), AQ_HW_MAC_MAX);
#endif
- if (softc->mcnt >= AQ_HW_MAC_MAX)
- {
+ if (softc->mcnt >= AQ_HW_MAC_MAX) {
aq_hw_set_promisc(hw, !!(if_getflags(ifp) & IFF_PROMISC),
- aq_is_vlan_promisc_required(softc),
- !!(if_getflags(ifp) & IFF_ALLMULTI) || aq_is_mc_promisc_required(softc));
- }else{
+ aq_is_vlan_promisc_required(softc),
+ !!(if_getflags(ifp) & IFF_ALLMULTI) || aq_is_mc_promisc_required(softc));
+ } else {
#if __FreeBSD_version >= 1300054
if_foreach_llmaddr(iflib_get_ifp(ctx), &aq_mc_filter_apply, softc);
#else
@@ -799,7 +834,8 @@
AQ_DBG_EXIT(0);
}
-static int aq_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
+static int
+aq_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
{
int err = 0;
AQ_DBG_ENTER();
@@ -808,7 +844,8 @@
return (err);
}
-static void aq_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
+static void
+aq_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
{
if_t ifp;
@@ -821,7 +858,8 @@
AQ_DBG_EXIT(0);
}
-static int aq_if_media_change(if_ctx_t ctx)
+static int
+aq_if_media_change(if_ctx_t ctx)
{
struct aq_dev *softc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
@@ -844,7 +882,8 @@
return (rc);
}
-static int aq_if_promisc_set(if_ctx_t ctx, int flags)
+static int
+aq_if_promisc_set(if_ctx_t ctx, int flags)
{
struct aq_dev *softc;
@@ -853,14 +892,15 @@
softc = iflib_get_softc(ctx);
aq_hw_set_promisc(&softc->hw, !!(flags & IFF_PROMISC),
- aq_is_vlan_promisc_required(softc),
- !!(flags & IFF_ALLMULTI) || aq_is_mc_promisc_required(softc));
+ aq_is_vlan_promisc_required(softc),
+ !!(flags & IFF_ALLMULTI) || aq_is_mc_promisc_required(softc));
AQ_DBG_EXIT(0);
return (0);
}
-static void aq_if_timer(if_ctx_t ctx, uint16_t qid)
+static void
+aq_if_timer(if_ctx_t ctx, uint16_t qid)
{
struct aq_dev *softc;
uint64_t ticks_now;
@@ -882,7 +922,8 @@
}
/* Interrupt enable / disable */
-static void aq_if_enable_intr(if_ctx_t ctx)
+static void
+aq_if_enable_intr(if_ctx_t ctx)
{
struct aq_dev *softc = iflib_get_softc(ctx);
struct aq_hw *hw = &softc->hw;
@@ -895,7 +936,8 @@
AQ_DBG_EXIT(0);
}
-static void aq_if_disable_intr(if_ctx_t ctx)
+static void
+aq_if_disable_intr(if_ctx_t ctx)
{
struct aq_dev *softc = iflib_get_softc(ctx);
struct aq_hw *hw = &softc->hw;
@@ -908,7 +950,8 @@
AQ_DBG_EXIT(0);
}
-static int aq_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
+static int
+aq_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
{
struct aq_dev *softc = iflib_get_softc(ctx);
struct aq_hw *hw = &softc->hw;
@@ -921,7 +964,8 @@
return (0);
}
-static int aq_if_msix_intr_assign(if_ctx_t ctx, int msix)
+static int
+aq_if_msix_intr_assign(if_ctx_t ctx, int msix)
{
struct aq_dev *softc;
int i, vector = 0, rc;
@@ -952,22 +996,22 @@
for (i = 0; i < softc->tx_rings_count; i++, vector++) {
snprintf(irq_name, sizeof(irq_name), "txq%d", i);
- iflib_softirq_alloc_generic(ctx, &softc->rx_rings[i]->irq, IFLIB_INTR_TX,
- softc->tx_rings[i], i, irq_name);
+ iflib_softirq_alloc_generic(ctx, &softc->rx_rings[i]->irq,
+ IFLIB_INTR_TX, softc->tx_rings[i], i, irq_name);
softc->tx_rings[i]->msix = (vector % softc->rx_rings_count);
device_printf(softc->dev, "Assign IRQ %u to tx ring %u\n",
- softc->tx_rings[i]->msix, softc->tx_rings[i]->index);
+ softc->tx_rings[i]->msix, softc->tx_rings[i]->index);
}
rc = iflib_irq_alloc_generic(ctx, &softc->irq, rx_vectors + 1,
- IFLIB_INTR_ADMIN, aq_linkstat_isr,
- softc, 0, "aq");
+ IFLIB_INTR_ADMIN, aq_linkstat_isr, softc, 0, "aq");
softc->msix = rx_vectors;
device_printf(softc->dev, "Assign IRQ %u to admin proc \n",
- rx_vectors);
+ rx_vectors);
if (rc) {
- device_printf(iflib_get_dev(ctx), "Failed to register admin handler");
+ device_printf(iflib_get_dev(ctx),
+ "Failed to register admin handler");
i = softc->rx_rings_count;
goto fail;
}
@@ -981,7 +1025,8 @@
return (rc);
}
-static bool aq_is_vlan_promisc_required(struct aq_dev *softc)
+static bool
+aq_is_vlan_promisc_required(struct aq_dev *softc)
{
int vlan_tag_count;
@@ -994,7 +1039,8 @@
}
-static void aq_update_vlan_filters(struct aq_dev *softc)
+static void
+aq_update_vlan_filters(struct aq_dev *softc)
{
struct aq_rx_filter_vlan aq_vlans[AQ_HW_VLAN_MAX_FILTERS];
struct aq_hw *hw = &softc->hw;
@@ -1021,7 +1067,8 @@
}
/* VLAN support */
-static void aq_if_vlan_register(if_ctx_t ctx, uint16_t vtag)
+static void
+aq_if_vlan_register(if_ctx_t ctx, uint16_t vtag)
{
struct aq_dev *softc = iflib_get_softc(ctx);
@@ -1034,7 +1081,8 @@
AQ_DBG_EXIT(0);
}
-static void aq_if_vlan_unregister(if_ctx_t ctx, uint16_t vtag)
+static void
+aq_if_vlan_unregister(if_ctx_t ctx, uint16_t vtag)
{
struct aq_dev *softc = iflib_get_softc(ctx);
@@ -1047,7 +1095,8 @@
AQ_DBG_EXIT(0);
}
-static void aq_if_led_func(if_ctx_t ctx, int onoff)
+static void
+aq_if_led_func(if_ctx_t ctx, int onoff)
{
struct aq_dev *softc = iflib_get_softc(ctx);
struct aq_hw *hw = &softc->hw;
@@ -1059,7 +1108,8 @@
AQ_DBG_EXIT(0);
}
-static int aq_hw_capabilities(struct aq_dev *softc)
+static int
+aq_hw_capabilities(struct aq_dev *softc)
{
if (pci_get_vendor(softc->dev) != AQUANTIA_VENDOR_ID)
@@ -1106,7 +1156,8 @@
return (0);
}
-static int aq_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
+static int
+aq_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
{
struct aq_dev *softc = (struct aq_dev *)arg1;
device_t dev = softc->dev;
@@ -1142,7 +1193,8 @@
return (0);
}
-static int aq_sysctl_print_tx_head(SYSCTL_HANDLER_ARGS)
+static int
+aq_sysctl_print_tx_head(SYSCTL_HANDLER_ARGS)
{
struct aq_ring *ring = arg1;
int error = 0;
@@ -1160,7 +1212,8 @@
return (0);
}
-static int aq_sysctl_print_tx_tail(SYSCTL_HANDLER_ARGS)
+static int
+aq_sysctl_print_tx_tail(SYSCTL_HANDLER_ARGS)
{
struct aq_ring *ring = arg1;
int error = 0;
@@ -1178,7 +1231,8 @@
return (0);
}
-static int aq_sysctl_print_rx_head(SYSCTL_HANDLER_ARGS)
+static int
+aq_sysctl_print_rx_head(SYSCTL_HANDLER_ARGS)
{
struct aq_ring *ring = arg1;
int error = 0;
@@ -1196,7 +1250,8 @@
return (0);
}
-static int aq_sysctl_print_rx_tail(SYSCTL_HANDLER_ARGS)
+static int
+aq_sysctl_print_rx_tail(SYSCTL_HANDLER_ARGS)
{
struct aq_ring *ring = arg1;
int error = 0;
@@ -1214,116 +1269,117 @@
return (0);
}
-static void aq_add_stats_sysctls(struct aq_dev *softc)
+static void
+aq_add_stats_sysctls(struct aq_dev *softc)
{
- device_t dev = softc->dev;
- struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
- struct sysctl_oid *tree = device_get_sysctl_tree(dev);
- struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
- struct aq_stats_s *stats = &softc->curr_stats;
- struct sysctl_oid *stat_node, *queue_node;
- struct sysctl_oid_list *stat_list, *queue_list;
+ device_t dev = softc->dev;
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid *tree = device_get_sysctl_tree(dev);
+ struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+ struct aq_stats_s *stats = &softc->curr_stats;
+ struct sysctl_oid *stat_node, *queue_node;
+ struct sysctl_oid_list *stat_list, *queue_list;
#define QUEUE_NAME_LEN 32
- char namebuf[QUEUE_NAME_LEN];
+ char namebuf[QUEUE_NAME_LEN];
/* RSS configuration */
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
- CTLTYPE_STRING | CTLFLAG_RD, softc, 0,
- aq_sysctl_print_rss_config, "A", "Prints RSS Configuration");
-
- /* Driver Statistics */
- for (int i = 0; i < softc->tx_rings_count; i++) {
- struct aq_ring *ring = softc->tx_rings[i];
- snprintf(namebuf, QUEUE_NAME_LEN, "tx_queue%d", i);
- queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
- CTLFLAG_RD, NULL, "Queue Name");
- queue_list = SYSCTL_CHILDREN(queue_node);
-
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_pkts",
- CTLFLAG_RD, &(ring->stats.tx_pkts), "TX Packets");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
- CTLFLAG_RD, &(ring->stats.tx_bytes), "TX Octets");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_drops",
- CTLFLAG_RD, &(ring->stats.tx_drops), "TX Drops");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_queue_full",
- CTLFLAG_RD, &(ring->stats.tx_queue_full), "TX Queue Full");
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "tx_head",
- CTLTYPE_UINT | CTLFLAG_RD, ring, 0,
- aq_sysctl_print_tx_head, "IU", "ring head pointer");
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "tx_tail",
- CTLTYPE_UINT | CTLFLAG_RD, ring, 0,
+ CTLTYPE_STRING | CTLFLAG_RD, softc, 0,
+ aq_sysctl_print_rss_config, "A", "Prints RSS Configuration");
+
+ /* Driver Statistics */
+ for (int i = 0; i < softc->tx_rings_count; i++) {
+ struct aq_ring *ring = softc->tx_rings[i];
+ snprintf(namebuf, QUEUE_NAME_LEN, "tx_queue%d", i);
+ queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
+ CTLFLAG_RD, NULL, "Queue Name");
+ queue_list = SYSCTL_CHILDREN(queue_node);
+
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_pkts",
+ CTLFLAG_RD, &(ring->stats.tx_pkts), "TX Packets");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
+ CTLFLAG_RD, &(ring->stats.tx_bytes), "TX Octets");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_drops",
+ CTLFLAG_RD, &(ring->stats.tx_drops), "TX Drops");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_queue_full",
+ CTLFLAG_RD, &(ring->stats.tx_queue_full), "TX Queue Full");
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "tx_head",
+ CTLTYPE_UINT | CTLFLAG_RD, ring, 0,
+ aq_sysctl_print_tx_head, "IU", "ring head pointer");
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "tx_tail",
+ CTLTYPE_UINT | CTLFLAG_RD, ring, 0,
aq_sysctl_print_tx_tail, "IU", "ring tail pointer");
- }
-
- for (int i = 0; i < softc->rx_rings_count; i++) {
- struct aq_ring *ring = softc->rx_rings[i];
- snprintf(namebuf, QUEUE_NAME_LEN, "rx_queue%d", i);
- queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
- CTLFLAG_RD, NULL, "Queue Name");
- queue_list = SYSCTL_CHILDREN(queue_node);
-
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_pkts",
- CTLFLAG_RD, &(ring->stats.rx_pkts), "RX Packets");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
- CTLFLAG_RD, &(ring->stats.rx_bytes), "TX Octets");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "jumbo_pkts",
- CTLFLAG_RD, &(ring->stats.jumbo_pkts), "Jumbo Packets");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_err",
- CTLFLAG_RD, &(ring->stats.rx_err), "RX Errors");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irq",
- CTLFLAG_RD, &(ring->stats.irq), "RX interrupts");
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rx_head",
- CTLTYPE_UINT | CTLFLAG_RD, ring, 0,
+ }
+
+ for (int i = 0; i < softc->rx_rings_count; i++) {
+ struct aq_ring *ring = softc->rx_rings[i];
+ snprintf(namebuf, QUEUE_NAME_LEN, "rx_queue%d", i);
+ queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
+ CTLFLAG_RD, NULL, "Queue Name");
+ queue_list = SYSCTL_CHILDREN(queue_node);
+
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_pkts",
+ CTLFLAG_RD, &(ring->stats.rx_pkts), "RX Packets");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
+ CTLFLAG_RD, &(ring->stats.rx_bytes), "TX Octets");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "jumbo_pkts",
+ CTLFLAG_RD, &(ring->stats.jumbo_pkts), "Jumbo Packets");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_err",
+ CTLFLAG_RD, &(ring->stats.rx_err), "RX Errors");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irq",
+ CTLFLAG_RD, &(ring->stats.irq), "RX interrupts");
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rx_head",
+ CTLTYPE_UINT | CTLFLAG_RD, ring, 0,
aq_sysctl_print_rx_head, "IU", "ring head pointer");
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rx_tail",
- CTLTYPE_UINT | CTLFLAG_RD, ring, 0,
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rx_tail",
+ CTLTYPE_UINT | CTLFLAG_RD, ring, 0,
aq_sysctl_print_rx_tail, "IU", " ring tail pointer");
- }
-
- stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
- CTLFLAG_RD, NULL, "Statistics (read from HW registers)");
- stat_list = SYSCTL_CHILDREN(stat_node);
-
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
- CTLFLAG_RD, &stats->prc, "Good Packets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ucast_pkts_rcvd",
- CTLFLAG_RD, &stats->uprc, "Unicast Packets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
- CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
- CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rsc_pkts_rcvd",
- CTLFLAG_RD, &stats->cprc, "Coalesced Packets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "err_pkts_rcvd",
- CTLFLAG_RD, &stats->erpr, "Errors of Packet Receive");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "drop_pkts_dma",
- CTLFLAG_RD, &stats->dpc, "Dropped Packets in DMA");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
- CTLFLAG_RD, &stats->brc, "Good Octets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ucast_octets_rcvd",
- CTLFLAG_RD, &stats->ubrc, "Unicast Octets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_octets_rcvd",
- CTLFLAG_RD, &stats->mbrc, "Multicast Octets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_octets_rcvd",
- CTLFLAG_RD, &stats->bbrc, "Broadcast Octets Received");
-
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
- CTLFLAG_RD, &stats->ptc, "Good Packets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ucast_pkts_txd",
- CTLFLAG_RD, &stats->uptc, "Unicast Packets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
- CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
- CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
-
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "err_pkts_txd",
- CTLFLAG_RD, &stats->erpt, "Errors of Packet Transmit");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
- CTLFLAG_RD, &stats->btc, "Good Octets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ucast_octets_txd",
- CTLFLAG_RD, &stats->ubtc, "Unicast Octets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_octets_txd",
- CTLFLAG_RD, &stats->mbtc, "Multicast Octets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_octets_txd",
- CTLFLAG_RD, &stats->bbtc, "Broadcast Octets Transmitted");
+ }
+
+ stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
+ CTLFLAG_RD, NULL, "Statistics (read from HW registers)");
+ stat_list = SYSCTL_CHILDREN(stat_node);
+
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
+ CTLFLAG_RD, &stats->prc, "Good Packets Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ucast_pkts_rcvd",
+ CTLFLAG_RD, &stats->uprc, "Unicast Packets Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
+ CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
+ CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rsc_pkts_rcvd",
+ CTLFLAG_RD, &stats->cprc, "Coalesced Packets Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "err_pkts_rcvd",
+ CTLFLAG_RD, &stats->erpr, "Errors of Packet Receive");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "drop_pkts_dma",
+ CTLFLAG_RD, &stats->dpc, "Dropped Packets in DMA");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
+ CTLFLAG_RD, &stats->brc, "Good Octets Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ucast_octets_rcvd",
+ CTLFLAG_RD, &stats->ubrc, "Unicast Octets Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_octets_rcvd",
+ CTLFLAG_RD, &stats->mbrc, "Multicast Octets Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_octets_rcvd",
+ CTLFLAG_RD, &stats->bbrc, "Broadcast Octets Received");
+
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
+ CTLFLAG_RD, &stats->ptc, "Good Packets Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ucast_pkts_txd",
+ CTLFLAG_RD, &stats->uptc, "Unicast Packets Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
+ CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
+ CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
+
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "err_pkts_txd",
+ CTLFLAG_RD, &stats->erpt, "Errors of Packet Transmit");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
+ CTLFLAG_RD, &stats->btc, "Good Octets Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ucast_octets_txd",
+ CTLFLAG_RD, &stats->ubtc, "Unicast Octets Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_octets_txd",
+ CTLFLAG_RD, &stats->mbtc, "Multicast Octets Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_octets_txd",
+ CTLFLAG_RD, &stats->bbtc, "Broadcast Octets Transmitted");
}
Index: sys/dev/aq/aq_media.c
===================================================================
--- sys/dev/aq/aq_media.c
+++ sys/dev/aq/aq_media.c
@@ -36,14 +36,14 @@
__FBSDID("$FreeBSD$");
#include <sys/param.h>
+#include <sys/bitstring.h>
#include <sys/kernel.h>
#include <sys/socket.h>
-#include <sys/bitstring.h>
+#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_media.h>
#include <net/if_var.h>
#include <net/if_dl.h>
-#include <net/ethernet.h>
#include <net/iflib.h>
#include "aq_device.h"
@@ -53,48 +53,46 @@
#define AQ_HW_SUPPORT_SPEED(softc, s) ((softc)->link_speeds & s)
-void aq_mediastatus_update(aq_dev_t *aq_dev, u32 link_speed, const struct aq_hw_fc_info *fc_neg)
+void
+aq_mediastatus_update(aq_dev_t *aq_dev, uint32_t link_speed,
+const struct aq_hw_fc_info *fc_neg)
{
struct aq_hw *hw = &aq_dev->hw;
aq_dev->media_active = 0;
if (fc_neg->fc_rx)
- aq_dev->media_active |= IFM_ETH_RXPAUSE;
+ aq_dev->media_active |= IFM_ETH_RXPAUSE;
if (fc_neg->fc_tx)
- aq_dev->media_active |= IFM_ETH_TXPAUSE;
+ aq_dev->media_active |= IFM_ETH_TXPAUSE;
switch(link_speed) {
case 100:
aq_dev->media_active |= IFM_100_TX | IFM_FDX;
- break;
-
+ break;
case 1000:
aq_dev->media_active |= IFM_1000_T | IFM_FDX;
- break;
-
+ break;
case 2500:
aq_dev->media_active |= IFM_2500_T | IFM_FDX;
- break;
-
+ break;
case 5000:
aq_dev->media_active |= IFM_5000_T | IFM_FDX;
- break;
-
+ break;
case 10000:
aq_dev->media_active |= IFM_10G_T | IFM_FDX;
- break;
-
+ break;
case 0:
default:
aq_dev->media_active |= IFM_NONE;
- break;
+ break;
}
if (hw->link_rate == aq_fw_speed_auto)
aq_dev->media_active |= IFM_AUTO;
}
-void aq_mediastatus(if_t ifp, struct ifmediareq *ifmr)
+void
+aq_mediastatus(if_t ifp, struct ifmediareq *ifmr)
{
aq_dev_t *aq_dev = iflib_get_softc(if_getsoftc(ifp));
@@ -107,7 +105,8 @@
ifmr->ifm_active |= aq_dev->media_active;
}
-int aq_mediachange(if_t ifp)
+int
+aq_mediachange(if_t ifp)
{
aq_dev_t *aq_dev = iflib_get_softc(if_getsoftc(ifp));
struct aq_hw *hw = &aq_dev->hw;
@@ -172,7 +171,8 @@
if (!(if_getflags(ifp) & IFF_UP))
return (0);
- if ((media_rate != old_media_rate) || (hw->link_rate != old_link_speed)) {
+ if ((media_rate != old_media_rate) ||
+ (hw->link_rate != old_link_speed)) {
// re-initialize hardware with new parameters
aq_hw_set_link_speed(hw, hw->link_rate);
}
@@ -181,17 +181,20 @@
return (0);
}
-static void aq_add_media_types(aq_dev_t *aq_dev, int media_link_speed)
+static void
+aq_add_media_types(aq_dev_t *aq_dev, int media_link_speed)
{
- ifmedia_add(aq_dev->media, IFM_ETHER | media_link_speed | IFM_FDX, 0, NULL);
+ ifmedia_add(aq_dev->media, IFM_ETHER | media_link_speed | IFM_FDX, 0,
+ NULL);
ifmedia_add(aq_dev->media, IFM_ETHER | media_link_speed | IFM_FDX |
- IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
+ IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
ifmedia_add(aq_dev->media, IFM_ETHER | media_link_speed | IFM_FDX |
- IFM_ETH_RXPAUSE, 0, NULL);
+ IFM_ETH_RXPAUSE, 0, NULL);
ifmedia_add(aq_dev->media, IFM_ETHER | media_link_speed | IFM_FDX |
- IFM_ETH_TXPAUSE, 0, NULL);
+ IFM_ETH_TXPAUSE, 0, NULL);
}
-void aq_initmedia(aq_dev_t *aq_dev)
+void
+aq_initmedia(aq_dev_t *aq_dev)
{
AQ_DBG_ENTER();
@@ -213,7 +216,8 @@
aq_add_media_types(aq_dev, IFM_10G_T);
// link is initially autoselect
- ifmedia_set(aq_dev->media, IFM_ETHER | IFM_AUTO | IFM_FDX | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
+ ifmedia_set(aq_dev->media,
+ IFM_ETHER | IFM_AUTO | IFM_FDX | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
AQ_DBG_EXIT(0);
}
Index: sys/dev/aq/aq_ring.h
===================================================================
--- sys/dev/aq/aq_ring.h
+++ sys/dev/aq/aq_ring.h
@@ -41,130 +41,130 @@
typedef volatile struct {
- u32 rss_type:4;
- u32 pkt_type:8;
- u32 rdm_err:1;
- u32 rsvd:6;
- u32 rx_cntl:2;
- u32 sph:1;
- u32 hdr_len:10;
- u32 rss_hash;
- u16 dd:1;
- u16 eop:1;
- u16 rx_stat:4;
- u16 rx_estat:6;
- u16 rsc_cnt:4;
- u16 pkt_len;
- u16 next_desp;
- u16 vlan;
+ uint32_t rss_type:4;
+ uint32_t pkt_type:8;
+ uint32_t rdm_err:1;
+ uint32_t rsvd:6;
+ uint32_t rx_cntl:2;
+ uint32_t sph:1;
+ uint32_t hdr_len:10;
+ uint32_t rss_hash;
+ uint16_t dd:1;
+ uint16_t eop:1;
+ uint16_t rx_stat:4;
+ uint16_t rx_estat:6;
+ uint16_t rsc_cnt:4;
+ uint16_t pkt_len;
+ uint16_t next_desp;
+ uint16_t vlan;
} __attribute__((__packed__)) aq_rx_wb_t;
typedef volatile struct {
- union {
- /* HW RX descriptor */
- struct __packed {
- u64 buf_addr;
- u64 hdr_addr;
- } read;
-
- /* HW RX descriptor writeback */
- aq_rx_wb_t wb;
- };
+ union {
+ /* HW RX descriptor */
+ struct __packed {
+ uint64_t buf_addr;
+ uint64_t hdr_addr;
+ } read;
+
+ /* HW RX descriptor writeback */
+ aq_rx_wb_t wb;
+ };
} __attribute__((__packed__)) aq_rx_desc_t;
/* Hardware tx descriptor */
typedef volatile struct {
- u64 buf_addr;
-
- union {
- struct {
- u32 type:3;
- u32 :1;
- u32 len:16;
- u32 dd:1;
- u32 eop:1;
- u32 cmd:8;
- u32 :14;
- u32 ct_idx:1;
- u32 ct_en:1;
- u32 pay_len:18;
- } __attribute__((__packed__));
- u64 flags;
- };
+ uint64_t buf_addr;
+
+ union {
+ struct {
+ uint32_t type:3;
+ uint32_t :1;
+ uint32_t len:16;
+ uint32_t dd:1;
+ uint32_t eop:1;
+ uint32_t cmd:8;
+ uint32_t :14;
+ uint32_t ct_idx:1;
+ uint32_t ct_en:1;
+ uint32_t pay_len:18;
+ } __attribute__((__packed__));
+ uint64_t flags;
+ };
} __attribute__((__packed__)) aq_tx_desc_t;
enum aq_tx_desc_type {
- tx_desc_type_desc = 1,
- tx_desc_type_ctx = 2,
+ tx_desc_type_desc = 1,
+ tx_desc_type_ctx = 2,
};
enum aq_tx_desc_cmd {
- tx_desc_cmd_vlan = 1,
- tx_desc_cmd_fcs = 2,
- tx_desc_cmd_ipv4 = 4,
- tx_desc_cmd_l4cs = 8,
- tx_desc_cmd_lso = 0x10,
- tx_desc_cmd_wb = 0x20,
+ tx_desc_cmd_vlan = 1,
+ tx_desc_cmd_fcs = 2,
+ tx_desc_cmd_ipv4 = 4,
+ tx_desc_cmd_l4cs = 8,
+ tx_desc_cmd_lso = 0x10,
+ tx_desc_cmd_wb = 0x20,
};
/* Hardware tx context descriptor */
typedef volatile union {
- struct __packed {
- u64 flags1;
- u64 flags2;
- };
-
- struct __packed {
- u64 :40;
- u32 tun_len:8;
- u32 out_len:16;
- u32 type:3;
- u32 idx:1;
- u32 vlan_tag:16;
- u32 cmd:4;
- u32 l2_len:7;
- u32 l3_len:9;
- u32 l4_len:8;
- u32 mss_len:16;
- };
+ struct __packed {
+ uint64_t flags1;
+ uint64_t flags2;
+ };
+
+ struct __packed {
+ uint64_t :40;
+ uint32_t tun_len:8;
+ uint32_t out_len:16;
+ uint32_t type:3;
+ uint32_t idx:1;
+ uint32_t vlan_tag:16;
+ uint32_t cmd:4;
+ uint32_t l2_len:7;
+ uint32_t l3_len:9;
+ uint32_t l4_len:8;
+ uint32_t mss_len:16;
+ };
} __attribute__((__packed__)) aq_txc_desc_t;
struct aq_ring_stats {
- u64 rx_pkts;
- u64 rx_bytes;
- u64 jumbo_pkts;
- u64 rx_err;
- u64 irq;
-
- u64 tx_pkts;
- u64 tx_bytes;
- u64 tx_drops;
- u64 tx_queue_full;
+ uint64_t rx_pkts;
+ uint64_t rx_bytes;
+ uint64_t jumbo_pkts;
+ uint64_t rx_err;
+ uint64_t irq;
+
+ uint64_t tx_pkts;
+ uint64_t tx_bytes;
+ uint64_t tx_drops;
+ uint64_t tx_queue_full;
};
struct aq_dev;
struct aq_ring {
- struct aq_dev *dev;
- int index;
+ struct aq_dev *dev;
+ int index;
- struct if_irq irq;
- int msix;
+ struct if_irq irq;
+ int msix;
/* RX */
- qidx_t rx_size;
- int rx_max_frame_size;
- void *rx_desc_area_ptr;
- aq_rx_desc_t *rx_descs;
- uint64_t rx_descs_phys;
+ qidx_t rx_size;
+ int rx_max_frame_size;
+ void *rx_desc_area_ptr;
+ aq_rx_desc_t *rx_descs;
+ uint64_t rx_descs_phys;
/* TX */
- int tx_head, tx_tail;
- qidx_t tx_size;
- void *tx_desc_area_ptr;
- aq_tx_desc_t *tx_descs;
- uint64_t tx_descs_phys;
+ int tx_head, tx_tail;
+ qidx_t tx_size;
+ void *tx_desc_area_ptr;
+ aq_tx_desc_t *tx_descs;
+ uint64_t tx_descs_phys;
- struct aq_ring_stats stats;
+ struct aq_ring_stats stats;
};
int aq_ring_rx_init(struct aq_hw *hw, struct aq_ring *ring);
@@ -175,7 +175,7 @@
int aq_ring_rx_start(struct aq_hw *hw, struct aq_ring *ring);
int aq_ring_rx_stop(struct aq_hw *hw, struct aq_ring *ring);
-int aq_ring_tx_tail_update(struct aq_hw *hw, struct aq_ring *ring, u32 tail);
+int aq_ring_tx_tail_update(struct aq_hw *hw, struct aq_ring *ring, uint32_t tail);
extern struct if_txrx aq_txrx;
Index: sys/dev/aq/aq_ring.c
===================================================================
--- sys/dev/aq/aq_ring.c
+++ sys/dev/aq/aq_ring.c
@@ -36,15 +36,15 @@
__FBSDID("$FreeBSD$");
#include <sys/param.h>
-#include <machine/param.h>
+#include <sys/bitstring.h>
#include <sys/kernel.h>
#include <sys/socket.h>
-#include <sys/bitstring.h>
+#include <machine/param.h>
+#include <net/ethernet.h>
#include <net/if.h>
+#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_var.h>
-#include <net/if_dl.h>
-#include <net/ethernet.h>
#include <net/iflib.h>
#include <netinet/in.h>
@@ -80,151 +80,159 @@
static inline uint32_t
aq_next(uint32_t i, uint32_t lim)
{
- return (i == lim) ? 0 : i + 1;
+ return (i == lim) ? 0 : i + 1;
}
-int aq_ring_rx_init(struct aq_hw *hw, struct aq_ring *ring)
+int
+aq_ring_rx_init(struct aq_hw *hw, struct aq_ring *ring)
/* uint64_t ring_addr,
- u32 ring_size,
- u32 ring_idx,
- u32 interrupt_cause,
- u32 cpu_idx) */
+ uint32_t ring_size,
+ uint32_t ring_idx,
+ uint32_t interrupt_cause,
+ uint32_t cpu_idx) */
{
- int err;
- u32 dma_desc_addr_lsw = (u32)ring->rx_descs_phys & 0xffffffff;
- u32 dma_desc_addr_msw = (u32)(ring->rx_descs_phys >> 32);
+ int err;
+ uint32_t dma_desc_addr_lsw = (uint32_t)ring->rx_descs_phys & 0xffffffff;
+ uint32_t dma_desc_addr_msw = (uint32_t)(ring->rx_descs_phys >> 32);
- AQ_DBG_ENTERA("[%d]", ring->index);
+ AQ_DBG_ENTERA("[%d]", ring->index);
- rdm_rx_desc_en_set(hw, false, ring->index);
+ rdm_rx_desc_en_set(hw, false, ring->index);
- rdm_rx_desc_head_splitting_set(hw, 0U, ring->index);
+ rdm_rx_desc_head_splitting_set(hw, 0U, ring->index);
- reg_rx_dma_desc_base_addresslswset(hw, dma_desc_addr_lsw, ring->index);
+ reg_rx_dma_desc_base_addresslswset(hw, dma_desc_addr_lsw, ring->index);
- reg_rx_dma_desc_base_addressmswset(hw, dma_desc_addr_msw, ring->index);
+ reg_rx_dma_desc_base_addressmswset(hw, dma_desc_addr_msw, ring->index);
- rdm_rx_desc_len_set(hw, ring->rx_size / 8U, ring->index);
+ rdm_rx_desc_len_set(hw, ring->rx_size / 8U, ring->index);
- device_printf(ring->dev->dev, "ring %d: __PAGESIZE=%d MCLBYTES=%d hw->max_frame_size=%d\n",
+ device_printf(ring->dev->dev, "ring %d: __PAGESIZE=%d MCLBYTES=%d hw->max_frame_size=%d\n",
ring->index, PAGE_SIZE, MCLBYTES, ring->rx_max_frame_size);
- rdm_rx_desc_data_buff_size_set(hw, ring->rx_max_frame_size / 1024U, ring->index);
+ rdm_rx_desc_data_buff_size_set(hw, ring->rx_max_frame_size / 1024U, ring->index);
- rdm_rx_desc_head_buff_size_set(hw, 0U, ring->index);
- rdm_rx_desc_head_splitting_set(hw, 0U, ring->index);
- rpo_rx_desc_vlan_stripping_set(hw, 0U, ring->index);
+ rdm_rx_desc_head_buff_size_set(hw, 0U, ring->index);
+ rdm_rx_desc_head_splitting_set(hw, 0U, ring->index);
+ rpo_rx_desc_vlan_stripping_set(hw, 0U, ring->index);
- /* Rx ring set mode */
+ /* Rx ring set mode */
- /* Mapping interrupt vector */
- itr_irq_map_rx_set(hw, ring->msix, ring->index);
- itr_irq_map_en_rx_set(hw, true, ring->index);
+ /* Mapping interrupt vector */
+ itr_irq_map_rx_set(hw, ring->msix, ring->index);
+ itr_irq_map_en_rx_set(hw, true, ring->index);
- rdm_cpu_id_set(hw, 0, ring->index);
- rdm_rx_desc_dca_en_set(hw, 0U, ring->index);
- rdm_rx_head_dca_en_set(hw, 0U, ring->index);
- rdm_rx_pld_dca_en_set(hw, 0U, ring->index);
+ rdm_cpu_id_set(hw, 0, ring->index);
+ rdm_rx_desc_dca_en_set(hw, 0U, ring->index);
+ rdm_rx_head_dca_en_set(hw, 0U, ring->index);
+ rdm_rx_pld_dca_en_set(hw, 0U, ring->index);
- err = aq_hw_err_from_flags(hw);
- AQ_DBG_EXIT(err);
- return (err);
+ err = aq_hw_err_from_flags(hw);
+ AQ_DBG_EXIT(err);
+ return (err);
}
-int aq_ring_tx_init(struct aq_hw *hw, struct aq_ring *ring)
+int
+aq_ring_tx_init(struct aq_hw *hw, struct aq_ring *ring)
/* uint64_t ring_addr,
- u32 ring_size,
- u32 ring_idx,
- u32 interrupt_cause,
- u32 cpu_idx) */
+ uint32_t ring_size,
+ uint32_t ring_idx,
+ uint32_t interrupt_cause,
+ uint32_t cpu_idx) */
{
- int err;
- u32 dma_desc_addr_lsw = (u32)ring->tx_descs_phys & 0xffffffff;
- u32 dma_desc_addr_msw = (u64)(ring->tx_descs_phys >> 32);
+ int err;
+ uint32_t dma_desc_addr_lsw = (uint32_t)ring->tx_descs_phys & 0xffffffff;
+ uint32_t dma_desc_addr_msw = (uint64_t)(ring->tx_descs_phys >> 32);
- AQ_DBG_ENTERA("[%d]", ring->index);
+ AQ_DBG_ENTERA("[%d]", ring->index);
- tdm_tx_desc_en_set(hw, 0U, ring->index);
+ tdm_tx_desc_en_set(hw, 0U, ring->index);
- reg_tx_dma_desc_base_addresslswset(hw, dma_desc_addr_lsw, ring->index);
+ reg_tx_dma_desc_base_addresslswset(hw, dma_desc_addr_lsw, ring->index);
- reg_tx_dma_desc_base_addressmswset(hw, dma_desc_addr_msw, ring->index);
+ reg_tx_dma_desc_base_addressmswset(hw, dma_desc_addr_msw, ring->index);
- tdm_tx_desc_len_set(hw, ring->tx_size / 8U, ring->index);
+ tdm_tx_desc_len_set(hw, ring->tx_size / 8U, ring->index);
- aq_ring_tx_tail_update(hw, ring, 0U);
+ aq_ring_tx_tail_update(hw, ring, 0U);
- /* Set Tx threshold */
- tdm_tx_desc_wr_wb_threshold_set(hw, 0U, ring->index);
+ /* Set Tx threshold */
+ tdm_tx_desc_wr_wb_threshold_set(hw, 0U, ring->index);
- /* Mapping interrupt vector */
- itr_irq_map_tx_set(hw, ring->msix, ring->index);
- itr_irq_map_en_tx_set(hw, true, ring->index);
+ /* Mapping interrupt vector */
+ itr_irq_map_tx_set(hw, ring->msix, ring->index);
+ itr_irq_map_en_tx_set(hw, true, ring->index);
- tdm_cpu_id_set(hw, 0, ring->index);
- tdm_tx_desc_dca_en_set(hw, 0U, ring->index);
+ tdm_cpu_id_set(hw, 0, ring->index);
+ tdm_tx_desc_dca_en_set(hw, 0U, ring->index);
- err = aq_hw_err_from_flags(hw);
- AQ_DBG_EXIT(err);
- return (err);
+ err = aq_hw_err_from_flags(hw);
+ AQ_DBG_EXIT(err);
+ return (err);
}
-int aq_ring_tx_tail_update(struct aq_hw *hw, struct aq_ring *ring, u32 tail)
+int
+aq_ring_tx_tail_update(struct aq_hw *hw, struct aq_ring *ring, uint32_t tail)
{
- AQ_DBG_ENTERA("[%d]", ring->index);
- reg_tx_dma_desc_tail_ptr_set(hw, tail, ring->index);
- AQ_DBG_EXIT(0);
- return (0);
+ AQ_DBG_ENTERA("[%d]", ring->index);
+ reg_tx_dma_desc_tail_ptr_set(hw, tail, ring->index);
+ AQ_DBG_EXIT(0);
+ return (0);
}
-int aq_ring_tx_start(struct aq_hw *hw, struct aq_ring *ring)
+int
+aq_ring_tx_start(struct aq_hw *hw, struct aq_ring *ring)
{
- int err;
+ int err;
- AQ_DBG_ENTERA("[%d]", ring->index);
- tdm_tx_desc_en_set(hw, 1U, ring->index);
- err = aq_hw_err_from_flags(hw);
- AQ_DBG_EXIT(err);
- return (err);
+ AQ_DBG_ENTERA("[%d]", ring->index);
+ tdm_tx_desc_en_set(hw, 1U, ring->index);
+ err = aq_hw_err_from_flags(hw);
+ AQ_DBG_EXIT(err);
+ return (err);
}
-int aq_ring_rx_start(struct aq_hw *hw, struct aq_ring *ring)
+int
+aq_ring_rx_start(struct aq_hw *hw, struct aq_ring *ring)
{
- int err;
+ int err;
- AQ_DBG_ENTERA("[%d]", ring->index);
- rdm_rx_desc_en_set(hw, 1U, ring->index);
- err = aq_hw_err_from_flags(hw);
- AQ_DBG_EXIT(err);
- return (err);
+ AQ_DBG_ENTERA("[%d]", ring->index);
+ rdm_rx_desc_en_set(hw, 1U, ring->index);
+ err = aq_hw_err_from_flags(hw);
+ AQ_DBG_EXIT(err);
+ return (err);
}
-int aq_ring_tx_stop(struct aq_hw *hw, struct aq_ring *ring)
+int
+aq_ring_tx_stop(struct aq_hw *hw, struct aq_ring *ring)
{
- int err;
+ int err;
- AQ_DBG_ENTERA("[%d]", ring->index);
- tdm_tx_desc_en_set(hw, 0U, ring->index);
- err = aq_hw_err_from_flags(hw);
- AQ_DBG_EXIT(err);
- return (err);
+ AQ_DBG_ENTERA("[%d]", ring->index);
+ tdm_tx_desc_en_set(hw, 0U, ring->index);
+ err = aq_hw_err_from_flags(hw);
+ AQ_DBG_EXIT(err);
+ return (err);
}
-int aq_ring_rx_stop(struct aq_hw *hw, struct aq_ring *ring)
+int
+aq_ring_rx_stop(struct aq_hw *hw, struct aq_ring *ring)
{
- int err;
-
- AQ_DBG_ENTERA("[%d]", ring->index);
- rdm_rx_desc_en_set(hw, 0U, ring->index);
- /* Invalidate Descriptor Cache to prevent writing to the cached
- * descriptors and to the data pointer of those descriptors
- */
- rdm_rx_dma_desc_cache_init_tgl(hw);
- err = aq_hw_err_from_flags(hw);
- AQ_DBG_EXIT(err);
- return (err);
+ int err;
+
+ AQ_DBG_ENTERA("[%d]", ring->index);
+ rdm_rx_desc_en_set(hw, 0U, ring->index);
+ /* Invalidate Descriptor Cache to prevent writing to the cached
+ * descriptors and to the data pointer of those descriptors
+ */
+ rdm_rx_dma_desc_cache_init_tgl(hw);
+ err = aq_hw_err_from_flags(hw);
+ AQ_DBG_EXIT(err);
+ return (err);
}
-static void aq_ring_rx_refill(void* arg, if_rxd_update_t iru)
+static void
+aq_ring_rx_refill(void* arg, if_rxd_update_t iru)
{
aq_dev_t *aq_dev = arg;
aq_rx_desc_t *rx_desc;
@@ -232,7 +240,7 @@
qidx_t i, pidx;
AQ_DBG_ENTERA("ring=%d iru_pidx=%d iru_count=%d iru->iru_buf_size=%d",
- iru->iru_qsidx, iru->iru_pidx, iru->iru_count, iru->iru_buf_size);
+ iru->iru_qsidx, iru->iru_pidx, iru->iru_count, iru->iru_buf_size);
ring = aq_dev->rx_rings[iru->iru_qsidx];
pidx = iru->iru_pidx;
@@ -248,8 +256,8 @@
AQ_DBG_EXIT(0);
}
-static void aq_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
- qidx_t pidx)
+static void
+aq_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx)
{
aq_dev_t *aq_dev = arg;
struct aq_ring *ring = aq_dev->rx_rings[rxqid];
@@ -259,7 +267,8 @@
AQ_DBG_EXIT(0);
}
-static int aq_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
+static int
+aq_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
{
aq_dev_t *aq_dev = arg;
struct aq_ring *ring = aq_dev->rx_rings[rxqid];
@@ -268,8 +277,9 @@
AQ_DBG_ENTERA("[%d] head=%u, budget %d", ring->index, idx, budget);
- for (iter = 0, cnt = 0, i = idx; iter < ring->rx_size && cnt <= budget;) {
- trace_aq_rx_descr(ring->index, i, (volatile u64*)&rx_desc[i]);
+ for (iter = 0, cnt = 0, i = idx;
+ iter < ring->rx_size && cnt <= budget;) {
+ trace_aq_rx_descr(ring->index, i, (volatile uint64_t*)&rx_desc[i]);
if (!rx_desc[i].wb.dd)
break;
@@ -296,7 +306,8 @@
return (cnt);
}
-static void aq_rx_set_cso_flags(aq_rx_desc_t *rx_desc, if_rxd_info_t ri)
+static void
+aq_rx_set_cso_flags(aq_rx_desc_t *rx_desc, if_rxd_info_t ri)
{
if ((rx_desc->wb.pkt_type & 0x3) == 0) { //IPv4
if (rx_desc->wb.rx_cntl & BIT(0)){ // IPv4 csum checked
@@ -316,17 +327,18 @@
}
static uint8_t bsd_rss_type[16] = {
- [AQ_RX_RSS_TYPE_IPV4]=M_HASHTYPE_RSS_IPV4,
- [AQ_RX_RSS_TYPE_IPV6]=M_HASHTYPE_RSS_IPV6,
- [AQ_RX_RSS_TYPE_IPV4_TCP]=M_HASHTYPE_RSS_TCP_IPV4,
- [AQ_RX_RSS_TYPE_IPV6_TCP]=M_HASHTYPE_RSS_TCP_IPV6,
- [AQ_RX_RSS_TYPE_IPV4_UDP]=M_HASHTYPE_RSS_UDP_IPV4,
- [AQ_RX_RSS_TYPE_IPV6_UDP]=M_HASHTYPE_RSS_UDP_IPV6,
+ [AQ_RX_RSS_TYPE_IPV4] = M_HASHTYPE_RSS_IPV4,
+ [AQ_RX_RSS_TYPE_IPV6] = M_HASHTYPE_RSS_IPV6,
+ [AQ_RX_RSS_TYPE_IPV4_TCP] = M_HASHTYPE_RSS_TCP_IPV4,
+ [AQ_RX_RSS_TYPE_IPV6_TCP] = M_HASHTYPE_RSS_TCP_IPV6,
+ [AQ_RX_RSS_TYPE_IPV4_UDP] = M_HASHTYPE_RSS_UDP_IPV4,
+ [AQ_RX_RSS_TYPE_IPV6_UDP] = M_HASHTYPE_RSS_UDP_IPV6,
};
-static int aq_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
+static int
+aq_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
{
aq_dev_t *aq_dev = arg;
struct aq_ring *ring = aq_dev->rx_rings[ri->iri_qsidx];
@@ -343,7 +355,7 @@
do {
rx_desc = (aq_rx_desc_t *) &ring->rx_descs[cidx];
- trace_aq_rx_descr(ring->index, cidx, (volatile u64*)rx_desc);
+ trace_aq_rx_descr(ring->index, cidx, (volatile uint64_t*)rx_desc);
if ((rx_desc->wb.rx_stat & BIT(0)) != 0) {
ring->stats.rx_err++;
@@ -393,20 +405,24 @@
/* */
/*****************************************************************************/
-static void aq_setup_offloads(aq_dev_t *aq_dev, if_pkt_info_t pi, aq_tx_desc_t *txd, u32 tx_cmd)
+static void
+aq_setup_offloads(aq_dev_t *aq_dev, if_pkt_info_t pi, aq_tx_desc_t *txd,
+ uint32_t tx_cmd)
{
- AQ_DBG_ENTER();
- txd->cmd |= tx_desc_cmd_fcs;
- txd->cmd |= (pi->ipi_csum_flags & (CSUM_IP|CSUM_TSO)) ? tx_desc_cmd_ipv4 : 0;
- txd->cmd |= (pi->ipi_csum_flags &
- (CSUM_IP_TCP | CSUM_IP6_TCP | CSUM_IP_UDP | CSUM_IP6_UDP)
- ) ? tx_desc_cmd_l4cs : 0;
- txd->cmd |= (pi->ipi_flags & IPI_TX_INTR) ? tx_desc_cmd_wb : 0;
- txd->cmd |= tx_cmd;
- AQ_DBG_EXIT(0);
+ AQ_DBG_ENTER();
+ txd->cmd |= tx_desc_cmd_fcs;
+ txd->cmd |= (pi->ipi_csum_flags & (CSUM_IP|CSUM_TSO)) ?
+ tx_desc_cmd_ipv4 : 0;
+ txd->cmd |= (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP |
+ CSUM_IP_UDP | CSUM_IP6_UDP)) ? tx_desc_cmd_l4cs : 0;
+ txd->cmd |= (pi->ipi_flags & IPI_TX_INTR) ? tx_desc_cmd_wb : 0;
+ txd->cmd |= tx_cmd;
+ AQ_DBG_EXIT(0);
}
-static int aq_ring_tso_setup(aq_dev_t *aq_dev, if_pkt_info_t pi, uint32_t *hdrlen, aq_txc_desc_t *txc)
+static int
+aq_ring_tso_setup(aq_dev_t *aq_dev, if_pkt_info_t pi, uint32_t *hdrlen,
+ aq_txc_desc_t *txc)
{
uint32_t tx_cmd = 0;
@@ -448,7 +464,8 @@
return (tx_cmd);
}
-static int aq_isc_txd_encap(void *arg, if_pkt_info_t pi)
+static int
+aq_isc_txd_encap(void *arg, if_pkt_info_t pi)
{
aq_dev_t *aq_dev = arg;
struct aq_ring *ring;
@@ -477,7 +494,8 @@
AQ_DBG_PRINT("tx_cmd = 0x%x", tx_cmd);
if (tx_cmd) {
- trace_aq_tx_context_descr(ring->index, pidx, (volatile void*)txc);
+ trace_aq_tx_context_descr(ring->index, pidx,
+ (volatile void*)txc);
/* We've consumed the first desc, adjust counters */
pidx = aq_next(pidx, ring->tx_size - 1);
@@ -515,7 +533,8 @@
txd->len = segs[i].ds_len;
txd->pay_len = pay_len;
if (i < pi->ipi_nsegs - 1)
- trace_aq_tx_descr(ring->index, pidx, (volatile void*)txd);
+ trace_aq_tx_descr(ring->index, pidx,
+ (volatile void*)txd);
pidx = aq_next(pidx, ring->tx_size - 1);
@@ -537,7 +556,8 @@
return (0);
}
-static void aq_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
+static void
+aq_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
{
aq_dev_t *aq_dev = arg;
struct aq_ring *ring = aq_dev->tx_rings[txqid];
@@ -549,12 +569,14 @@
}
-static inline unsigned int aq_avail_desc(int a, int b, int size)
+static inline unsigned int
+aq_avail_desc(int a, int b, int size)
{
- return (((b >= a)) ? ((size ) - b + a) : (a - b));
+ return (((b >= a)) ? ((size) - b + a) : (a - b));
}
-static int aq_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
+static int
+aq_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
{
aq_dev_t *aq_dev = arg;
struct aq_ring *ring = aq_dev->tx_rings[txqid];
@@ -567,7 +589,7 @@
AQ_DBG_PRINT("swhead %d hwhead %d", ring->tx_head, head);
if (ring->tx_head == head) {
- avail = 0; //ring->tx_size;
+ avail = 0; // ring->tx_size;
goto done;
}

File Metadata

Mime Type
text/plain
Expires
Sat, Dec 20, 6:42 AM (29 m, 51 s)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
27092650
Default Alt Text
D54304.id168386.diff (330 KB)

Event Timeline