Index: sys/dev/aq/.cirrus.yml =================================================================== --- /dev/null +++ sys/dev/aq/.cirrus.yml @@ -0,0 +1,20 @@ +env: + CIRRUS_CLONE_DEPTH: 1 + ARCH: amd64 + +task: + compute_engine_instance: + image_project: freebsd-org-cloud-dev + platform: freebsd + matrix: + - name: freebsd14-amd64 + freebsd_instance: + image_family: freebsd-14-0-snap + - name: freebsd13-amd64 + freebsd_instance: + image: freebsd-13-0-release-amd64 + - name: freebsd12-amd64 + freebsd_instance: + image: freebsd-12-3-release-amd64 + script: + - make Index: sys/dev/aq/.gitignore =================================================================== --- /dev/null +++ sys/dev/aq/.gitignore @@ -0,0 +1,13 @@ +*.o +.depend.* +x86 +machine +*.ko +device_if.h +export_syms +ifdi_if.h +bus_if.h +opt_inet.h +opt_inet6.h +opt_rss.h +pci_if.h Index: sys/dev/aq/LICENSE =================================================================== --- /dev/null +++ sys/dev/aq/LICENSE @@ -0,0 +1,33 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2019 aQuantia Corporation. All rights reserved + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * (1) Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * (2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * (3)The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ Index: sys/dev/aq/Makefile =================================================================== --- /dev/null +++ sys/dev/aq/Makefile @@ -0,0 +1,14 @@ +# $FreeBSD$ + +.PATH: ./ + +KMOD= if_atlantic +SRCS= device_if.h bus_if.h pci_if.h ifdi_if.h +SRCS+= opt_inet.h opt_inet6.h opt_rss.h +SRCS+= aq_main.c aq_media.c aq_irq.c +SRCS+= aq_ring.c aq_hw.c aq_hw_llh.c +SRCS+= aq_fw.c aq_fw1x.c aq_fw2x.c aq_dbg.c + +CFLAGS+= -DIFLIB -ferror-limit=1 -I/usr/include + +.include Index: sys/dev/aq/README.md =================================================================== --- /dev/null +++ sys/dev/aq/README.md @@ -0,0 +1,3 @@ +# FreeBSD driver + +Atlantic driver for FreeBSD \ No newline at end of file Index: sys/dev/aq/aq_common.h =================================================================== --- /dev/null +++ sys/dev/aq/aq_common.h @@ -0,0 +1,87 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * (1) Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * (2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * (3)The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _AQ_COMMON_H_ +#define _AQ_COMMON_H_ + +#include + +#define ETH_MAC_LEN 6 + +/* Types definition */ +#define TRUE 1 +#define FALSE 0 + +#define s8 __int8_t +#define u8 __uint8_t +#define u16 __uint16_t +#define s16 __int16_t +#define u32 __uint32_t +#define u64 __uint64_t +#define s64 __int64_t +#define s32 int +typedef __uint32_t DWORD; + +#define ETIME ETIMEDOUT +#define EOK 0 + +#define BIT(nr) (1UL << (nr)) + +#define usec_delay(x) DELAY(x) + +#ifndef msec_delay +#define msec_delay(x) DELAY(x*1000) +#define msec_delay_irq(x) DELAY(x*1000) +#endif + +#define AQ_HW_WAIT_FOR(_B_, _US_, _N_) \ + do { \ + unsigned int i; \ + for (i = _N_; (!(_B_)) && i; --i) { \ + usec_delay(_US_); \ + } \ + if (!i) { \ + err = -1; \ + } \ + } while (0) + + +#define LODWORD(a) ((DWORD)(a)) +#define LOWORD(a) ((u16)(a)) +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +#define AQ_VER "0.0.5" + +#endif //_AQ_COMMON_H_ + Index: sys/dev/aq/aq_dbg.h =================================================================== --- /dev/null +++ sys/dev/aq/aq_dbg.h @@ -0,0 +1,138 @@ +/** + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * (1) Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * (2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * (3) The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @file aq_dbg.h + * Debug print macros & definitions. + * @date 2017.12.07 @author roman.agafonov@aquantia.com + */ +#ifndef AQ_DBG_H +#define AQ_DBG_H + +#include +#include +/* +Debug levels: +0 - no debug +1 - important warnings +2 - debug prints +3 - trace function calls +4 - dump descriptor +*/ + +#define AQ_CFG_DEBUG_LVL 0x0 + +#define AQ_DBG_ERROR(string, args...) printf( "atlantic: " string "\n", ##args) + +/* Debug stuff */ +#if AQ_CFG_DEBUG_LVL > 0 +#define AQ_DBG_WARNING(string, args...) printf( "atlantic: " string "\n", ##args) +#else +#define AQ_DBG_WARNING(string, ...) +#endif + +#if AQ_CFG_DEBUG_LVL > 1 +#define AQ_DBG_PRINT(string, args...) printf( "atlantic: " string "\n", ##args) +#else +#define AQ_DBG_PRINT(string, ...) +#endif + +#if AQ_CFG_DEBUG_LVL > 2 +#define AQ_DBG_ENTER() printf( "atlantic: %s() {\n", __func__) +#define AQ_DBG_ENTERA(s, args...) printf( "atlantic: %s(" s ") {\n", __func__, ##args) +#define AQ_DBG_EXIT(err) printf( "atlantic: } %s(), err=%d\n", __func__, err) +#else +#define AQ_DBG_ENTER() +#define AQ_DBG_ENTERA(s, args...) +#define AQ_DBG_EXIT(err) +#endif + +#if AQ_CFG_DEBUG_LVL > 2 +#define AQ_DBG_DUMP_DESC(desc) { \ + volatile u8 *raw = (volatile u8*)(desc); \ + printf( "07-00 %02X%02X%02X%02X %02X%02X%02X%02X 15-08 %02X%02X%02X%02X %02X%02X%02X%02X\n", \ + raw[7], raw[6], raw[5], raw[4], raw[3], raw[2], raw[1], raw[0], \ + raw[15], raw[14], raw[13], raw[12], raw[11], raw[10], raw[9], raw[8]); \ +}\ + +#else +#define AQ_DBG_DUMP_DESC(desc) +#endif + +typedef enum aq_debug_level +{ + lvl_error = LOG_ERR, + lvl_warn = LOG_WARNING, + lvl_trace = LOG_NOTICE, + lvl_detail = LOG_INFO, +} aq_debug_level; + +typedef enum aq_debug_category +{ + dbg_init = 1, + dbg_config = 1 << 1, + dbg_tx = 1 << 2, + dbg_rx = 1 << 3, + dbg_intr = 1 << 4, + dbg_fw = 1 << 5, +} aq_debug_category; + + +#define __FILENAME__ (__builtin_strrchr(__FILE__, '/') ? __builtin_strrchr(__FILE__, '/') + 1 : __FILE__) + +extern const aq_debug_level dbg_level_; +extern const u32 dbg_categories_; + +#define log_base_(_lvl, _fmt, args...) printf( "atlantic: " _fmt "\n", ##args) + +#if AQ_CFG_DEBUG_LVL > 0 +#define trace_base_(_lvl, _cat, _fmt, args...) do { if (dbg_level_ >= _lvl && (_cat & dbg_categories_)) { printf( "atlantic: " _fmt " @%s,%d\n", ##args, __FILENAME__, __LINE__); }} while (0) +#else +#define trace_base_(_lvl, _cat, _fmt, ...) do {} while (0) +#endif // AQ_CFG_DEBUG_LVL > 0 + +#define aq_log_error(_fmt, args...) log_base_(lvl_error, "[!] " _fmt, ##args) +#define aq_log_warn(_fmt, args...) log_base_(lvl_warn, "/!\\ " _fmt, ##args) +#define aq_log(_fmt, args...) log_base_(lvl_trace, _fmt, ##args) +#define aq_log_detail(_fmt, args...) log_base_(lvl_detail, _fmt, ##args) + +#define trace_error(_cat,_fmt, args...) trace_base_(lvl_error, _cat, "[!] " _fmt, ##args) +#define trace_warn(_cat, _fmt, args...) trace_base_(lvl_warn, _cat, "/!\\ " _fmt, ##args) +#define trace(_cat, _fmt, args...) trace_base_(lvl_trace, _cat, _fmt, ##args) +#define trace_detail(_cat, _fmt, args...) trace_base_(lvl_detail, _cat, _fmt, ##args) + +void trace_aq_tx_descr(int ring_idx, unsigned int pointer, volatile u64 descr[2]); +void trace_aq_rx_descr(int ring_idx, unsigned int pointer, volatile u64 descr[2]); +void trace_aq_tx_context_descr(int ring_idx, unsigned int pointer, volatile u64 descr[2]); +void DumpHex(const void* data, size_t size); + +#endif // AQ_DBG_H Index: sys/dev/aq/aq_dbg.c =================================================================== --- /dev/null +++ sys/dev/aq/aq_dbg.c @@ -0,0 +1,237 @@ +/** + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * (1) Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * (2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * (3) The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @file aq_dbg.c + * Debugging stuff. + * @date 2017.12.13 @author roman.agafonov@aquantia.com + */ + + +#include +__FBSDID("$FreeBSD$"); + +#include +#include "aq_common.h" +#include "aq_dbg.h" + + +const aq_debug_level dbg_level_ = lvl_detail; +const u32 dbg_categories_ = dbg_init | dbg_config | dbg_fw; + + + +#define DESCR_FIELD(DESCR, BIT_BEGIN, BIT_END) \ + ((DESCR >> BIT_END) &\ + (BIT(BIT_BEGIN - BIT_END + 1) -1)) + +#define __field(TYPE, VAR) TYPE VAR; +void trace_aq_tx_descr(int ring_idx, unsigned int pointer, volatile u64 descr[2]) +{ +#if AQ_CFG_DEBUG_LVL > 2 + struct __entry{ + __field(unsigned int, ring_idx) + __field(unsigned int, pointer) + /* Tx Descriptor */ + __field(u64, data_buf_addr) + __field(u32, pay_len) + __field(u8, ct_en) + __field(u8, ct_idx) + __field(u16, rsvd2) + __field(u8, tx_cmd) + __field(u8, eop) + __field(u8, dd) + __field(u16, buf_len) + __field(u8, rsvd1) + __field(u8, des_typ) + } entry; + + entry.ring_idx = ring_idx; + entry.pointer = pointer; + entry.data_buf_addr = descr[0]; + entry.pay_len = DESCR_FIELD(descr[1], 63, 46); + entry.ct_en = DESCR_FIELD(descr[1], 45, 45); + entry.ct_idx = DESCR_FIELD(descr[1], 44, 44); + entry.rsvd2 = DESCR_FIELD(descr[1], 43, 30); + entry.tx_cmd = DESCR_FIELD(descr[1], 29, 22); + entry.eop = DESCR_FIELD(descr[1], 21, 21); + entry.dd = DESCR_FIELD(descr[1], 20, 20); + entry.buf_len = DESCR_FIELD(descr[1], 19, 4); + entry.rsvd1 = DESCR_FIELD(descr[1], 3, 3); + entry.des_typ = DESCR_FIELD(descr[1], 2, 0); + + + aq_log_detail("trace_aq_tx_descr ring=%d descr=%u pay_len=%u ct_en=%u ct_idx=%u rsvd2=0x%x tx_cmd=0x%x eop=%u dd=%u buf_len=%u rsvd1=%u des_typ=0x%x", + entry.ring_idx, entry.pointer, entry.pay_len, + entry.ct_en, entry.ct_idx, entry.rsvd2, + entry.tx_cmd, entry.eop, entry.dd, entry.buf_len, + entry.rsvd1, entry.des_typ); +#endif +} + +void trace_aq_rx_descr(int ring_idx, unsigned int pointer, volatile u64 descr[2]) +{ +#if AQ_CFG_DEBUG_LVL > 2 + u8 dd; + u8 eop; + u8 rx_stat; + u8 rx_estat; + u8 rsc_cnt; + u16 pkt_len; + u16 next_desp; + u16 vlan_tag; + + u8 rss_type; + u8 pkt_type; + u8 rdm_err; + u8 avb_ts; + u8 rsvd; + u8 rx_cntl; + u8 sph; + u16 hdr_len; + u32 rss_hash; + + rss_hash = DESCR_FIELD(descr[0], 63, 32); + hdr_len = DESCR_FIELD(descr[0], 31, 22); + sph = DESCR_FIELD(descr[0], 21, 21); + rx_cntl = DESCR_FIELD(descr[0], 20, 19); + rsvd = DESCR_FIELD(descr[0], 18, 14); + avb_ts = DESCR_FIELD(descr[0], 13, 13); + rdm_err = DESCR_FIELD(descr[0], 12, 12); + pkt_type = DESCR_FIELD(descr[0], 11, 4); + rss_type = DESCR_FIELD(descr[0], 3, 0); + + vlan_tag = DESCR_FIELD(descr[1], 63, 48); + next_desp = DESCR_FIELD(descr[1], 47, 32); + pkt_len = DESCR_FIELD(descr[1], 31, 16); + rsc_cnt = DESCR_FIELD(descr[1], 15, 12); + rx_estat = DESCR_FIELD(descr[1], 11, 6); + rx_stat = DESCR_FIELD(descr[1], 5, 2); + eop = DESCR_FIELD(descr[1], 1, 1); + dd = DESCR_FIELD(descr[1], 0, 0); + + printf("trace_aq_rx_descr ring=%d descr=%u rss_hash=0x%x hdr_len=%u sph=%u rx_cntl=%u rsvd=0x%x avb_ts=%u rdm_err=%u pkt_type=%u rss_type=%u vlan_tag=%u next_desp=%u pkt_len=%u rsc_cnt=%u rx_estat=0x%x rx_stat=0x%x eop=%u dd=%u\n", + ring_idx, pointer, rss_hash, + hdr_len, sph, rx_cntl, + rsvd, avb_ts, rdm_err, + pkt_type, rss_type, vlan_tag, + next_desp, pkt_len, rsc_cnt, + rx_estat, rx_stat, eop, dd); +#endif +} + +void trace_aq_tx_context_descr(int ring_idx, unsigned int pointer, volatile u64 descr[2]) +{ +#if AQ_CFG_DEBUG_LVL > 2 + struct __entry_s{ + __field(unsigned int, ring_idx) + __field(unsigned int, pointer) + /* Tx Context Descriptor */ + __field(u16, out_len) + __field(u8, tun_len) + __field(u64, resvd3) + __field(u16, mss_len) + __field(u8, l4_len) + __field(u8, l3_len) + __field(u8, l2_len) + __field(u8, ct_cmd) + __field(u16, vlan_tag) + __field(u8, ct_idx) + __field(u8, des_typ) + } entry; + struct __entry_s *__entry = &entry; + __entry->ring_idx = ring_idx; + __entry->pointer = pointer; + __entry->out_len = DESCR_FIELD(descr[0], 63, 48); + __entry->tun_len = DESCR_FIELD(descr[0], 47, 40); + __entry->resvd3 = DESCR_FIELD(descr[0], 39, 0); + __entry->mss_len = DESCR_FIELD(descr[1], 63, 48); + __entry->l4_len = DESCR_FIELD(descr[1], 47, 40); + __entry->l3_len = DESCR_FIELD(descr[1], 39, 31); + __entry->l2_len = DESCR_FIELD(descr[1], 30, 24); + __entry->ct_cmd = DESCR_FIELD(descr[1], 23, 20); + __entry->vlan_tag = DESCR_FIELD(descr[1], 19, 4); + __entry->ct_idx = DESCR_FIELD(descr[1], 3, 3); + __entry->des_typ = DESCR_FIELD(descr[1], 2, 0); + + printf("trace_aq_tx_context_descr ring=%d descr=%u out_len=%u tun_len=%u resvd3=%lu mss_len=%u l4_len=%u l3_len=%u l2_len=%d ct_cmd=%u vlan_tag=%u ct_idx=%u des_typ=0x%x\n", + __entry->ring_idx, __entry->pointer, __entry->out_len, + __entry->tun_len, __entry->resvd3, __entry->mss_len, + __entry->l4_len, __entry->l3_len, __entry->l2_len, + __entry->ct_cmd, __entry->vlan_tag, __entry->ct_idx, + __entry->des_typ); +#endif +} + +void DumpHex(const void* data, size_t size) { +#if AQ_CFG_DEBUG_LVL > 3 + char ascii[17]; + size_t i, j; + char line[256]; + char buf[256]; + + ascii[16] = '\0'; + line[0] = '\0'; + printf("packet at %p\n", data); + + for (i = 0; i < size; ++i) { + sprintf(buf, "%02X ", ((const unsigned char*)data)[i]); + strcat(line, buf); + if (((const unsigned char*)data)[i] >= ' ' && ((const unsigned char*)data)[i] <= '~') { + ascii[i % 16] = ((const unsigned char*)data)[i]; + } else { + ascii[i % 16] = '.'; + } + if ((i+1) % 8 == 0 || i+1 == size) { + strcat(line, " "); + if ((i+1) % 16 == 0) { + sprintf(buf, "| %s \n", ascii); + strcat(line, buf); + printf("%s", line); + line[0] = '\0'; + } else if (i+1 == size) { + ascii[(i+1) % 16] = '\0'; + if ((i+1) % 16 <= 8) { + strcat(line, " "); + } + for (j = (i+1) % 16; j < 16; ++j) { + strcat(line, " "); + } + sprintf(buf, "| %s \n", ascii); + strcat(line, buf); + printf("%s", line); + line[0] = '\0'; + } + } + } +#endif +} \ No newline at end of file Index: sys/dev/aq/aq_device.h =================================================================== --- /dev/null +++ sys/dev/aq/aq_device.h @@ -0,0 +1,150 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * (1) Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * (2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * (3)The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _AQ_DEVICE_H_ +#define _AQ_DEVICE_H_ + +#include "aq_hw.h" + +enum aq_media_type { + AQ_MEDIA_TYPE_UNKNOWN = 0, + AQ_MEDIA_TYPE_FIBRE, + AQ_MEDIA_TYPE_TP, +}; + +#define AQ_LINK_UNKNOWN 0x00000000 +#define AQ_LINK_100M 0x00000001 +#define AQ_LINK_1G 0x00000002 +#define AQ_LINK_2G5 0x00000004 +#define AQ_LINK_5G 0x00000008 +#define AQ_LINK_10G 0x00000010 + +#define AQ_LINK_ALL (AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G | \ + AQ_LINK_10G ) + +struct aq_stats_s { + u64 prc; + u64 uprc; + u64 mprc; + u64 bprc; + u64 cprc; + u64 erpr; + u64 dpc; + u64 brc; + u64 ubrc; + u64 mbrc; + u64 bbrc; + + u64 ptc; + u64 uptc; + u64 mptc; + u64 bptc; + u64 erpt; + u64 btc; + u64 ubtc; + u64 mbtc; + u64 bbtc; +}; + +enum aq_dev_state_e { + AQ_DEV_STATE_UNLOAD, + AQ_DEV_STATE_PCI_STOP, + AQ_DEV_STATE_DOWN, + AQ_DEV_STATE_UP, +}; + +struct aq_rx_filters { + unsigned int rule_cnt; + struct aq_rx_filter_vlan vlan_filters[AQ_HW_VLAN_MAX_FILTERS]; + struct aq_rx_filter_l2 etype_filters[AQ_HW_ETYPE_MAX_FILTERS]; +}; + +struct aq_vlan_tag { + SLIST_ENTRY(aq_vlan_tag) next; + uint16_t tag; +}; + +struct aq_dev { + device_t dev; + if_ctx_t ctx; + if_softc_ctx_t scctx; + if_shared_ctx_t sctx; + struct ifmedia * media; + + struct aq_hw hw; + + enum aq_media_type media_type; + uint32_t link_speeds; + uint32_t chip_features; + uint32_t mbox_addr; + uint8_t mac_addr[ETHER_ADDR_LEN]; + uint64_t admin_ticks; + struct if_irq irq; + int msix; + + int mmio_rid; + struct resource * mmio_res; + bus_space_tag_t mmio_tag; + bus_space_handle_t mmio_handle; + bus_size_t mmio_size; + + struct aq_ring *tx_rings[HW_ATL_B0_RINGS_MAX]; + struct aq_ring *rx_rings[HW_ATL_B0_RINGS_MAX]; + uint32_t tx_rings_count; + uint32_t rx_rings_count; + bool linkup; + int media_active; + + struct aq_hw_stats_s last_stats; + struct aq_stats_s curr_stats; + + bitstr_t *vlan_tags; + int mcnt; + + uint8_t rss_key[HW_ATL_RSS_HASHKEY_SIZE]; + uint8_t rss_table[HW_ATL_RSS_INDIRECTION_TABLE_MAX]; +}; + +typedef struct aq_dev aq_dev_t; + +int aq_update_hw_stats(aq_dev_t *aq_dev); +void aq_initmedia(aq_dev_t *aq_dev); +int aq_linkstat_isr(void *arg); +int aq_isr_rx(void *arg); +void aq_mediastatus_update(aq_dev_t *aq_dev, u32 link_speed, const struct aq_hw_fc_info *fc_neg); +void aq_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr); +int aq_mediachange(struct ifnet *ifp); +void aq_if_update_admin_status(if_ctx_t ctx); + +#endif Index: sys/dev/aq/aq_fw.h =================================================================== --- /dev/null +++ sys/dev/aq/aq_fw.h @@ -0,0 +1,76 @@ +/** + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * (1) Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * (2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * (3) The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef AQ_FW_H +#define AQ_FW_H + +struct aq_hw; + +typedef enum aq_fw_link_speed +{ + aq_fw_none = 0, + aq_fw_100M = (1 << 0), + aq_fw_1G = (1 << 1), + aq_fw_2G5 = (1 << 2), + aq_fw_5G = (1 << 3), + aq_fw_10G = (1 << 4), +} aq_fw_link_speed_t; + +typedef enum aq_fw_link_fc +{ + aq_fw_fc_none = 0, + aq_fw_fc_ENABLE_RX = BIT(0), + aq_fw_fc_ENABLE_TX = BIT(1), + aq_fw_fc_ENABLE_ALL = aq_fw_fc_ENABLE_RX | aq_fw_fc_ENABLE_TX, +} aq_fw_link_fc_t; + +#define aq_fw_speed_auto (aq_fw_100M | aq_fw_1G | aq_fw_2G5 | aq_fw_5G | aq_fw_10G) + +struct aq_firmware_ops +{ + int (*reset)(struct aq_hw* hal); + + int (*set_mode)(struct aq_hw* hal, enum aq_hw_fw_mpi_state_e mode, aq_fw_link_speed_t speed); + int (*get_mode)(struct aq_hw* hal, enum aq_hw_fw_mpi_state_e* mode, aq_fw_link_speed_t* speed, aq_fw_link_fc_t* fc); + + int (*get_mac_addr)(struct aq_hw* hal, u8* mac_addr); + int (*get_stats)(struct aq_hw* hal, struct aq_hw_stats_s* stats); + + int (*led_control)(struct aq_hw* hal, u32 mode); +}; + + +int aq_fw_reset(struct aq_hw* hw); +int aq_fw_ops_init(struct aq_hw* hw); + +#endif // AQ_FW_H Index: sys/dev/aq/aq_fw.c =================================================================== --- /dev/null +++ sys/dev/aq/aq_fw.c @@ -0,0 +1,352 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * (1) Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * (2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * (3) The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @file aq_fw.c + * Firmware-related functions implementation. + * @date 2017.12.07 @author roman.agafonov@aquantia.com + */ + +#include +__FBSDID("$FreeBSD$"); + +#include + +#include "aq_common.h" + +#include "aq_hw.h" +#include "aq_hw_llh.h" +#include "aq_hw_llh_internal.h" + +#include "aq_fw.h" +#include "aq_common.h" + +#include "aq_dbg.h" + + +typedef enum aq_fw_bootloader_mode +{ + boot_mode_unknown = 0, + boot_mode_flb, + boot_mode_rbl_flash, + boot_mode_rbl_host_bootload, +} aq_fw_bootloader_mode; + +#define AQ_CFG_HOST_BOOT_DISABLE 0 +// Timeouts +#define RBL_TIMEOUT_MS 10000 +#define MAC_FW_START_TIMEOUT_MS 10000 +#define FW_LOADER_START_TIMEOUT_MS 10000 + +const u32 NO_RESET_SCRATCHPAD_ADDRESS = 0; +const u32 NO_RESET_SCRATCHPAD_LEN_RES = 1; +const u32 NO_RESET_SCRATCHPAD_RBL_STATUS = 2; +const u32 NO_RESET_SCRATCHPAD_RBL_STATUS_2 = 3; +const u32 WRITE_DATA_COMPLETE = 0x55555555; +const u32 WRITE_DATA_CHUNK_DONE = 0xaaaaaaaa; +const u32 WRITE_DATA_FAIL_WRONG_ADDRESS = 0x66666666; + +const u32 WAIT_WRITE_TIMEOUT = 1; +const u32 WAIT_WRITE_TIMEOUT_COUNT = 1000; + +const u32 RBL_STATUS_SUCCESS = 0xabba; +const u32 RBL_STATUS_FAILURE = 0xbad; +const u32 RBL_STATUS_HOST_BOOT = 0xf1a7; + +const u32 SCRATCHPAD_FW_LOADER_STATUS = (0x40 / sizeof(u32)); + + +extern struct aq_firmware_ops aq_fw1x_ops; +extern struct aq_firmware_ops aq_fw2x_ops; + + +int mac_soft_reset_(struct aq_hw* hw, aq_fw_bootloader_mode* mode); +int mac_soft_reset_flb_(struct aq_hw* hw); +int mac_soft_reset_rbl_(struct aq_hw* hw, aq_fw_bootloader_mode* mode); +int wait_init_mac_firmware_(struct aq_hw* hw); + + +int aq_fw_reset(struct aq_hw* hw) +{ + int ver = AQ_READ_REG(hw, 0x18); + u32 bootExitCode = 0; + int k; + + for (k = 0; k < 1000; ++k) { + u32 flbStatus = reg_glb_daisy_chain_status1_get(hw); + bootExitCode = AQ_READ_REG(hw, 0x388); + if (flbStatus != 0x06000000 || bootExitCode != 0) + break; + } + + if (k == 1000) { + aq_log_error("Neither RBL nor FLB started"); + return (-EBUSY); + } + + hw->rbl_enabled = bootExitCode != 0; + + trace(dbg_init, "RBL enabled = %d", hw->rbl_enabled); + + /* Having FW version 0 is an indicator that cold start + * is in progress. This means two things: + * 1) Driver have to wait for FW/HW to finish boot (500ms giveup) + * 2) Driver may skip reset sequence and save time. + */ + if (hw->fast_start_enabled && !ver) { + int err = wait_init_mac_firmware_(hw); + /* Skip reset as it just completed */ + if (!err) + return (0); + } + + aq_fw_bootloader_mode mode = boot_mode_unknown; + int err = mac_soft_reset_(hw, &mode); + if (err < 0) { + aq_log_error("MAC reset failed: %d", err); + return (err); + } + + switch (mode) { + case boot_mode_flb: + aq_log("FLB> F/W successfully loaded from flash."); + hw->flash_present = true; + return wait_init_mac_firmware_(hw); + + case boot_mode_rbl_flash: + aq_log("RBL> F/W loaded from flash. Host Bootload disabled."); + hw->flash_present = true; + return wait_init_mac_firmware_(hw); + + case boot_mode_unknown: + aq_log_error("F/W bootload error: unknown bootloader type"); + return (-ENOTSUP); + + case boot_mode_rbl_host_bootload: +#if AQ_CFG_HOST_BOOT_DISABLE + aq_log_error("RBL> Host Bootload mode: this driver does not support Host Boot"); + return (-ENOTSUP); +#else + trace(dbg_init, "RBL> Host Bootload mode"); + break; +#endif // HOST_BOOT_DISABLE + } + + /* + * #todo: Host Boot + */ + aq_log_error("RBL> F/W Host Bootload not implemented"); + + return (-ENOTSUP); +} + +int aq_fw_ops_init(struct aq_hw* hw) +{ + if (hw->fw_version.raw == 0) + hw->fw_version.raw = AQ_READ_REG(hw, 0x18); + + aq_log("MAC F/W version is %d.%d.%d", + hw->fw_version.major_version, hw->fw_version.minor_version, + hw->fw_version.build_number); + + if (hw->fw_version.major_version == 1) { + trace(dbg_init, "using F/W ops v1.x"); + hw->fw_ops = &aq_fw1x_ops; + return (EOK); + } else if (hw->fw_version.major_version >= 2) { + trace(dbg_init, "using F/W ops v2.x"); + hw->fw_ops = &aq_fw2x_ops; + return (EOK); + } + + aq_log_error("aq_fw_ops_init(): invalid F/W version %#x", hw->fw_version.raw); + return (-ENOTSUP); +} + + +int mac_soft_reset_(struct aq_hw* hw, aq_fw_bootloader_mode* mode /*= nullptr*/) +{ + if (hw->rbl_enabled) { + return mac_soft_reset_rbl_(hw, mode); + } else { + if (mode) + *mode = boot_mode_flb; + + return mac_soft_reset_flb_(hw); + } +} + +int mac_soft_reset_flb_(struct aq_hw* hw) +{ + int k; + + reg_global_ctl2_set(hw, 0x40e1); + // Let Felicity hardware to complete SMBUS transaction before Global software reset. + msec_delay(50); + + /* + * If SPI burst transaction was interrupted(before running the script), global software + * reset may not clear SPI interface. Clean it up manually before global reset. + */ + reg_glb_nvr_provisioning2_set(hw, 0xa0); + reg_glb_nvr_interface1_set(hw, 0x9f); + reg_glb_nvr_interface1_set(hw, 0x809f); + msec_delay(50); + + reg_glb_standard_ctl1_set(hw, (reg_glb_standard_ctl1_get(hw) & ~glb_reg_res_dis_msk) | glb_soft_res_msk); + + // Kickstart. + reg_global_ctl2_set(hw, 0x80e0); + reg_mif_power_gating_enable_control_set(hw, 0); + if (!hw->fast_start_enabled) + reg_glb_general_provisioning9_set(hw, 1); + + /* + * For the case SPI burst transaction was interrupted (by MCP reset above), + * wait until it is completed by hardware. + */ + msec_delay(50); // Sleep for 10 ms. + + /* MAC Kickstart */ + if (!hw->fast_start_enabled) { + reg_global_ctl2_set(hw, 0x180e0); + + u32 flb_status = 0; + int k; + for (k = 0; k < 1000; ++k) { + flb_status = reg_glb_daisy_chain_status1_get(hw) & 0x10; + if (flb_status != 0) + break; + msec_delay(10); // Sleep for 10 ms. + } + + if (flb_status == 0) { + trace_error(dbg_init, "FLB> MAC kickstart failed: timed out"); + return (false); + } + + trace(dbg_init, "FLB> MAC kickstart done, %d ms", k); + /* FW reset */ + reg_global_ctl2_set(hw, 0x80e0); + // Let Felicity hardware complete SMBUS transaction before Global software reset. + msec_delay(50); + } + reg_glb_cpu_sem_set(hw, 1, 0); + + // PHY Kickstart: #undone + + // Global software reset + rx_rx_reg_res_dis_set(hw, 0); + tx_tx_reg_res_dis_set(hw, 0); + mpi_tx_reg_res_dis_set(hw, 0); + reg_glb_standard_ctl1_set(hw, (reg_glb_standard_ctl1_get(hw) & ~glb_reg_res_dis_msk) | glb_soft_res_msk); + + bool restart_completed = false; + for (k = 0; k < 1000; ++k) { + restart_completed = reg_glb_fw_image_id1_get(hw) != 0; + if (restart_completed) + break; + msec_delay(10); + } + + if (!restart_completed) { + trace_error(dbg_init, "FLB> Global Soft Reset failed"); + return (false); + } + + trace(dbg_init, "FLB> F/W restart: %d ms", k * 10); + return (true); +} + +int mac_soft_reset_rbl_(struct aq_hw* hw, aq_fw_bootloader_mode* mode) +{ + trace(dbg_init, "RBL> MAC reset STARTED!"); + + reg_global_ctl2_set(hw, 0x40e1); + reg_glb_cpu_sem_set(hw, 1, 0); + reg_mif_power_gating_enable_control_set(hw, 0); + + // MAC FW will reload PHY FW if 1E.1000.3 was cleaned - #undone + + reg_glb_cpu_no_reset_scratchpad_set(hw, 0xDEAD, NO_RESET_SCRATCHPAD_RBL_STATUS); + + // Global software reset + rx_rx_reg_res_dis_set(hw, 0); + tx_tx_reg_res_dis_set(hw, 0); + mpi_tx_reg_res_dis_set(hw, 0); + reg_glb_standard_ctl1_set(hw, (reg_glb_standard_ctl1_get(hw) & ~glb_reg_res_dis_msk) | glb_soft_res_msk); + + reg_global_ctl2_set(hw, 0x40e0); + + // Wait for RBL to finish boot process. + u16 rbl_status = 0; + for (int k = 0; k < RBL_TIMEOUT_MS; ++k) { + rbl_status = LOWORD(reg_glb_cpu_no_reset_scratchpad_get(hw, NO_RESET_SCRATCHPAD_RBL_STATUS)); + if (rbl_status != 0 && rbl_status != 0xDEAD) + break; + + msec_delay(1); + } + + if (rbl_status == 0 || rbl_status == 0xDEAD) { + trace_error(dbg_init, "RBL> RBL restart failed: timeout"); + return (-EBUSY); + } + + if (rbl_status == RBL_STATUS_SUCCESS) { + if (mode) + *mode = boot_mode_rbl_flash; + trace(dbg_init, "RBL> reset complete! [Flash]"); + } else if (rbl_status == RBL_STATUS_HOST_BOOT) { + if (mode) + *mode = boot_mode_rbl_host_bootload; + trace(dbg_init, "RBL> reset complete! [Host Bootload]"); + } else { + trace_error(dbg_init, "unknown RBL status 0x%x", rbl_status); + return (-EBUSY); + } + + return (EOK); +} + +int wait_init_mac_firmware_(struct aq_hw* hw) +{ + for (int i = 0; i < MAC_FW_START_TIMEOUT_MS; ++i) { + if ((hw->fw_version.raw = AQ_READ_REG(hw, 0x18)) != 0) + return (EOK); + + msec_delay(1); + } + + trace_error(dbg_init, "timeout waiting for reg 0x18. MAC f/w NOT READY"); + return (-EBUSY); +} Index: sys/dev/aq/aq_fw1x.c =================================================================== --- /dev/null +++ sys/dev/aq/aq_fw1x.c @@ -0,0 +1,313 @@ +/** + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * (1) Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * (2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * (3) The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include + +#include "aq_common.h" +#include "aq_hw.h" +#include "aq_hw_llh.h" +#include "aq_hw_llh_internal.h" +#include "aq_fw.h" +#include "aq_dbg.h" + + +#define FW1X_MPI_CONTROL_ADR 0x368 +#define FW1X_MPI_STATE_ADR 0x36C + + +typedef enum fw1x_mode { + FW1X_MPI_DEINIT = 0, + FW1X_MPI_RESERVED = 1, + FW1X_MPI_INIT = 2, + FW1X_MPI_POWER = 4, +} fw1x_mode; + +typedef enum aq_fw1x_rate { + FW1X_RATE_10G = 1 << 0, + FW1X_RATE_5G = 1 << 1, + FW1X_RATE_5GSR = 1 << 2, + FW1X_RATE_2G5 = 1 << 3, + FW1X_RATE_1G = 1 << 4, + FW1X_RATE_100M = 1 << 5, + FW1X_RATE_INVALID = 1 << 6, +} aq_fw1x_rate; + +typedef union fw1x_state_reg { + u32 val; + struct { + u8 mode; + u8 reserved1; + u8 speed; + u8 reserved2 : 1; + u8 disableDirtyWake : 1; + u8 reserved3 : 2; + u8 downshift : 4; + }; +} fw1x_state_reg; + +int fw1x_reset(struct aq_hw* hw); + +int fw1x_set_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e mode, aq_fw_link_speed_t speed); +int fw1x_get_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e* mode, aq_fw_link_speed_t* speed, aq_fw_link_fc_t* fc); +int fw1x_get_mac_addr(struct aq_hw* hw, u8* mac_addr); +int fw1x_get_stats(struct aq_hw* hw, struct aq_hw_stats_s* stats); + + +static +fw1x_mode mpi_mode_to_fw1x_(enum aq_hw_fw_mpi_state_e mode) +{ + switch (mode) { + case MPI_DEINIT: + return (FW1X_MPI_DEINIT); + + case MPI_INIT: + return (FW1X_MPI_INIT); + + case MPI_POWER: + return (FW1X_MPI_POWER); + + case MPI_RESET: + return (FW1X_MPI_RESERVED); + } + + /* + * We shouldn't get here. + */ + + return (FW1X_MPI_RESERVED); +} + +static +aq_fw1x_rate link_speed_mask_to_fw1x_(u32 /*aq_fw_link_speed*/ speed) +{ + u32 rate = 0; + if (speed & aq_fw_10G) + rate |= FW1X_RATE_10G; + + if (speed & aq_fw_5G) { + rate |= FW1X_RATE_5G; + rate |= FW1X_RATE_5GSR; + } + + if (speed & aq_fw_2G5) + rate |= FW1X_RATE_2G5; + + if (speed & aq_fw_1G) + rate |= FW1X_RATE_1G; + + if (speed & aq_fw_100M) + rate |= FW1X_RATE_100M; + + return ((aq_fw1x_rate)rate); +} + +static +aq_fw_link_speed_t fw1x_rate_to_link_speed_(aq_fw1x_rate rate) +{ + switch (rate) { + case FW1X_RATE_10G: + return (aq_fw_10G); + case FW1X_RATE_5G: + case FW1X_RATE_5GSR: + return (aq_fw_5G); + case FW1X_RATE_2G5: + return (aq_fw_2G5); + case FW1X_RATE_1G: + return (aq_fw_1G); + case FW1X_RATE_100M: + return (aq_fw_100M); + case FW1X_RATE_INVALID: + return (aq_fw_none); + } + + /* + * We should never get here. + */ + + return (aq_fw_none); +} + +int fw1x_reset(struct aq_hw* hal) +{ + u32 tid0 = ~0u; /*< Initial value of MBOX transactionId. */ + struct aq_hw_fw_mbox mbox; + const int retryCount = 1000; + + for (int i = 0; i < retryCount; ++i) { + // Read the beginning of Statistics structure to capture the Transaction ID. + aq_hw_fw_downld_dwords(hal, hal->mbox_addr, (u32*)&mbox, + (u32)((char*)&mbox.stats - (char*)&mbox) / sizeof(u32)); + + // Successfully read the stats. + if (tid0 == ~0U) { + // We have read the initial value. + tid0 = mbox.transaction_id; + continue; + } else if (mbox.transaction_id != tid0) { + /* + * Compare transaction ID to initial value. + * If it's different means f/w is alive. We're done. + */ + + return (EOK); + } + + /* + * Transaction ID value haven't changed since last time. + * Try reading the stats again. + */ + usec_delay(10); + } + + trace_error(dbg_init, "F/W 1.x reset finalize timeout"); + return (-EBUSY); +} + +int fw1x_set_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e mode, aq_fw_link_speed_t speed) +{ + union fw1x_state_reg state = {0}; + state.mode = mpi_mode_to_fw1x_(mode); + state.speed = link_speed_mask_to_fw1x_(speed); + + trace(dbg_init, "fw1x> set mode %d, rate mask = %#x; raw = %#x", state.mode, state.speed, state.val); + + AQ_WRITE_REG(hw, FW1X_MPI_CONTROL_ADR, state.val); + + return (EOK); +} + +int fw1x_get_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e* mode, aq_fw_link_speed_t* speed, aq_fw_link_fc_t* fc) +{ + union fw1x_state_reg state = { .val = AQ_READ_REG(hw, AQ_HW_MPI_STATE_ADR) }; + + trace(dbg_init, "fw1x> get_mode(): 0x36c -> %x, 0x368 -> %x", state.val, AQ_READ_REG(hw, AQ_HW_MPI_CONTROL_ADR)); + + enum aq_hw_fw_mpi_state_e md = MPI_DEINIT; + + switch (state.mode) { + case FW1X_MPI_DEINIT: + md = MPI_DEINIT; + break; + case FW1X_MPI_RESERVED: + md = MPI_RESET; + break; + case FW1X_MPI_INIT: + md = MPI_INIT; + break; + case FW1X_MPI_POWER: + md = MPI_POWER; + break; + } + + if (mode) + *mode = md; + + if (speed) + *speed = fw1x_rate_to_link_speed_(state.speed); + + *fc = aq_fw_fc_none; + + AQ_DBG_EXIT(EOK); + return (EOK); +} + + +int fw1x_get_mac_addr(struct aq_hw* hw, u8* mac) +{ + int err = -EFAULT; + u32 mac_addr[2]; + + AQ_DBG_ENTER(); + + u32 efuse_shadow_addr = AQ_READ_REG(hw, 0x374); + if (efuse_shadow_addr == 0) { + trace_error(dbg_init, "couldn't read eFUSE Shadow Address"); + AQ_DBG_EXIT(-EFAULT); + return (-EFAULT); + } + + err = aq_hw_fw_downld_dwords(hw, efuse_shadow_addr + (40 * 4), + mac_addr, ARRAY_SIZE(mac_addr)); + if (err < 0) { + mac_addr[0] = 0; + mac_addr[1] = 0; + AQ_DBG_EXIT(err); + return (err); + } + + mac_addr[0] = bswap32(mac_addr[0]); + mac_addr[1] = bswap32(mac_addr[1]); + + memcpy(mac, (u8*)mac_addr, ETH_MAC_LEN); + + trace(dbg_init, "fw1x> eFUSE MAC addr -> %02x-%02x-%02x-%02x-%02x-%02x", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + + AQ_DBG_EXIT(EOK); + return (EOK); +} + +int fw1x_get_stats(struct aq_hw* hw, struct aq_hw_stats_s* stats) +{ + int err = 0; + + AQ_DBG_ENTER(); + err = aq_hw_fw_downld_dwords(hw, hw->mbox_addr, (u32*)(void*)&hw->mbox, + sizeof hw->mbox / sizeof(u32)); + + if (err >= 0) { + if (stats != &hw->mbox.stats) + memcpy(stats, &hw->mbox.stats, sizeof *stats); + + stats->dpc = reg_rx_dma_stat_counter7get(hw); + } + + AQ_DBG_EXIT(err); + return (err); +} + +struct aq_firmware_ops aq_fw1x_ops = +{ + .reset = fw1x_reset, + + .set_mode = fw1x_set_mode, + .get_mode = fw1x_get_mode, + + .get_mac_addr = fw1x_get_mac_addr, + .get_stats = fw1x_get_stats, +}; + Index: sys/dev/aq/aq_fw2x.c =================================================================== --- /dev/null +++ sys/dev/aq/aq_fw2x.c @@ -0,0 +1,498 @@ +/** + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * (1) Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * (2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * (3) The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @file aq_fw2x.c + * Firmware v2.x specific functions. + * @date 2017.12.11 @author roman.agafonov@aquantia.com + */ +#include +__FBSDID("$FreeBSD$"); + +#include + +#include "aq_common.h" + + +#include "aq_hw.h" +#include "aq_hw_llh.h" +#include "aq_hw_llh_internal.h" + +#include "aq_fw.h" + +#include "aq_dbg.h" + +typedef enum { + CAPS_LO_10BASET_HD = 0x00, + CAPS_LO_10BASET_FD, + CAPS_LO_100BASETX_HD, + CAPS_LO_100BASET4_HD, + CAPS_LO_100BASET2_HD, + CAPS_LO_100BASETX_FD, + CAPS_LO_100BASET2_FD, + CAPS_LO_1000BASET_HD, + CAPS_LO_1000BASET_FD, + CAPS_LO_2P5GBASET_FD, + CAPS_LO_5GBASET_FD, + CAPS_LO_10GBASET_FD, +} fw2x_caps_lo; + +typedef enum { + CAPS_HI_RESERVED1 = 0x00, + CAPS_HI_10BASET_EEE, + CAPS_HI_RESERVED2, + CAPS_HI_PAUSE, + CAPS_HI_ASYMMETRIC_PAUSE, + CAPS_HI_100BASETX_EEE, + CAPS_HI_RESERVED3, + CAPS_HI_RESERVED4, + CAPS_HI_1000BASET_FD_EEE, + CAPS_HI_2P5GBASET_FD_EEE, + CAPS_HI_5GBASET_FD_EEE, + CAPS_HI_10GBASET_FD_EEE, + CAPS_HI_RESERVED5, + CAPS_HI_RESERVED6, + CAPS_HI_RESERVED7, + CAPS_HI_RESERVED8, + CAPS_HI_RESERVED9, + CAPS_HI_CABLE_DIAG, + CAPS_HI_TEMPERATURE, + CAPS_HI_DOWNSHIFT, + CAPS_HI_PTP_AVB_EN, + CAPS_HI_MEDIA_DETECT, + CAPS_HI_LINK_DROP, + CAPS_HI_SLEEP_PROXY, + CAPS_HI_WOL, + CAPS_HI_MAC_STOP, + CAPS_HI_EXT_LOOPBACK, + CAPS_HI_INT_LOOPBACK, + CAPS_HI_EFUSE_AGENT, + CAPS_HI_WOL_TIMER, + CAPS_HI_STATISTICS, + CAPS_HI_TRANSACTION_ID, +} fw2x_caps_hi; + +typedef enum aq_fw2x_rate +{ + FW2X_RATE_100M = 0x20, + FW2X_RATE_1G = 0x100, + FW2X_RATE_2G5 = 0x200, + FW2X_RATE_5G = 0x400, + FW2X_RATE_10G = 0x800, +} aq_fw2x_rate; + + +typedef struct fw2x_msm_statistics +{ + uint32_t uprc; + uint32_t mprc; + uint32_t bprc; + uint32_t erpt; + uint32_t uptc; + uint32_t mptc; + uint32_t bptc; + uint32_t erpr; + uint32_t mbtc; + uint32_t bbtc; + uint32_t mbrc; + uint32_t bbrc; + uint32_t ubrc; + uint32_t ubtc; + uint32_t ptc; + uint32_t prc; +} fw2x_msm_statistics; + +typedef struct fw2x_phy_cable_diag_data +{ + u32 lane_data[4]; +} fw2x_phy_cable_diag_data; + +typedef struct fw2x_capabilities { + u32 caps_lo; + u32 caps_hi; +} fw2x_capabilities; + +typedef struct fw2x_mailbox // struct fwHostInterface +{ + u32 version; + u32 transaction_id; + s32 error; + fw2x_msm_statistics msm; // msmStatistics_t msm; + u16 phy_h_bit; + u16 phy_fault_code; + s16 phy_temperature; + u8 cable_len; + u8 reserved1; + fw2x_phy_cable_diag_data diag_data; + u32 reserved[8]; + + fw2x_capabilities caps; + + /* ... */ +} fw2x_mailbox; + + +// EEE caps +#define FW2X_FW_CAP_EEE_100M (1ULL << (32 + CAPS_HI_100BASETX_EEE)) +#define FW2X_FW_CAP_EEE_1G (1ULL << (32 + CAPS_HI_1000BASET_FD_EEE)) +#define FW2X_FW_CAP_EEE_2G5 (1ULL << (32 + CAPS_HI_2P5GBASET_FD_EEE)) +#define FW2X_FW_CAP_EEE_5G (1ULL << (32 + CAPS_HI_5GBASET_FD_EEE)) +#define FW2X_FW_CAP_EEE_10G (1ULL << (32 + CAPS_HI_10GBASET_FD_EEE)) + +// Flow Control +#define FW2X_FW_CAP_PAUSE (1ULL << (32 + CAPS_HI_PAUSE)) +#define FW2X_FW_CAP_ASYM_PAUSE (1ULL << (32 + CAPS_HI_ASYMMETRIC_PAUSE)) + +// Link Drop +#define FW2X_CAP_LINK_DROP (1ull << (32 + CAPS_HI_LINK_DROP)) + +// MSM Statistics +#define FW2X_CAP_STATISTICS (1ull << (32 + CAPS_HI_STATISTICS)) + + +#define FW2X_RATE_MASK (FW2X_RATE_100M | FW2X_RATE_1G | FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G) +#define FW2X_EEE_MASK (FW2X_FW_CAP_EEE_100M | FW2X_FW_CAP_EEE_1G | FW2X_FW_CAP_EEE_2G5 | FW2X_FW_CAP_EEE_5G | FW2X_FW_CAP_EEE_10G) + + +#define FW2X_MPI_LED_ADDR 0x31c +#define FW2X_MPI_CONTROL_ADDR 0x368 +#define FW2X_MPI_STATE_ADDR 0x370 + +#define FW2X_FW_MIN_VER_LED 0x03010026U + +#define FW2X_LED_BLINK 0x2U +#define FW2X_LED_DEFAULT 0x0U + +// Firmware v2-3.x specific functions. +//@{ +int fw2x_reset(struct aq_hw* hw); + +int fw2x_set_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e mode, aq_fw_link_speed_t speed); +int fw2x_get_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e* mode, aq_fw_link_speed_t* speed, aq_fw_link_fc_t* fc); + +int fw2x_get_mac_addr(struct aq_hw* hw, u8* mac); +int fw2x_get_stats(struct aq_hw* hw, struct aq_hw_stats_s* stats); +//@} + + + +static u64 read64_(struct aq_hw* hw, u32 addr) +{ + u64 lo = AQ_READ_REG(hw, addr); + u64 hi = AQ_READ_REG(hw, addr + 4); + return (lo | (hi << 32)); +} + +static uint64_t get_mpi_ctrl_(struct aq_hw* hw) +{ + return read64_(hw, FW2X_MPI_CONTROL_ADDR); +} + +static uint64_t get_mpi_state_(struct aq_hw* hw) +{ + return read64_(hw, FW2X_MPI_STATE_ADDR); +} + +static void set_mpi_ctrl_(struct aq_hw* hw, u64 value) +{ + AQ_WRITE_REG(hw, FW2X_MPI_CONTROL_ADDR, (u32)value); + AQ_WRITE_REG(hw, FW2X_MPI_CONTROL_ADDR + 4, (u32)(value >> 32)); +} + + +int fw2x_reset(struct aq_hw* hw) +{ + fw2x_capabilities caps = {0}; + AQ_DBG_ENTER(); + int err = aq_hw_fw_downld_dwords(hw, hw->mbox_addr + offsetof(fw2x_mailbox, caps), (u32*)&caps, sizeof caps/sizeof(u32)); + if (err == EOK) { + hw->fw_caps = caps.caps_lo | ((u64)caps.caps_hi << 32); + trace(dbg_init, "fw2x> F/W capabilities mask = %llx", (unsigned long long)hw->fw_caps); + } else { + trace_error(dbg_init, "fw2x> can't get F/W capabilities mask, error %d", err); + } + + AQ_DBG_EXIT(EOK); + return (EOK); +} + + +static +aq_fw2x_rate link_speed_mask_to_fw2x_(u32 speed) +{ + u32 rate = 0; + + AQ_DBG_ENTER(); + if (speed & aq_fw_10G) + rate |= FW2X_RATE_10G; + + if (speed & aq_fw_5G) + rate |= FW2X_RATE_5G; + + if (speed & aq_fw_2G5) + rate |= FW2X_RATE_2G5; + + if (speed & aq_fw_1G) + rate |= FW2X_RATE_1G; + + if (speed & aq_fw_100M) + rate |= FW2X_RATE_100M; + + AQ_DBG_EXIT(rate); + return ((aq_fw2x_rate)rate); +} + + +int fw2x_set_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e mode, aq_fw_link_speed_t speed) +{ + u64 mpi_ctrl = get_mpi_ctrl_(hw); + + AQ_DBG_ENTERA("speed=%d", speed); + switch (mode) { + case MPI_INIT: + mpi_ctrl &= ~FW2X_RATE_MASK; + mpi_ctrl |= link_speed_mask_to_fw2x_(speed); + mpi_ctrl &= ~FW2X_CAP_LINK_DROP; +#if 0 // #todo #flowcontrol #pause #eee + if (pHal->pCfg->eee) + mpi_ctrl |= FW2X_EEE_MASK; +#endif + if (hw->fc.fc_rx) + mpi_ctrl |= FW2X_FW_CAP_PAUSE; + if (hw->fc.fc_tx) + mpi_ctrl |= FW2X_FW_CAP_ASYM_PAUSE; + break; + + case MPI_DEINIT: + mpi_ctrl &= ~(FW2X_RATE_MASK | FW2X_EEE_MASK); + mpi_ctrl &= ~(FW2X_FW_CAP_PAUSE | FW2X_FW_CAP_ASYM_PAUSE); + break; + + default: + trace_error(dbg_init, "fw2x> unknown MPI state %d", mode); + return (-EINVAL); + } + + set_mpi_ctrl_(hw, mpi_ctrl); + AQ_DBG_EXIT(EOK); + return (EOK); +} + +int fw2x_get_mode(struct aq_hw* hw, enum aq_hw_fw_mpi_state_e* mode, aq_fw_link_speed_t* link_speed, aq_fw_link_fc_t* fc) +{ + u64 mpi_state = get_mpi_state_(hw); + u32 rates = mpi_state & FW2X_RATE_MASK; + + // AQ_DBG_ENTER(); + + if (mode) { + u64 mpi_ctrl = get_mpi_ctrl_(hw); + if (mpi_ctrl & FW2X_RATE_MASK) + *mode = MPI_INIT; + else + *mode = MPI_DEINIT; + } + + aq_fw_link_speed_t speed = aq_fw_none; + + if (rates & FW2X_RATE_10G) + speed = aq_fw_10G; + else if (rates & FW2X_RATE_5G) + speed = aq_fw_5G; + else if (rates & FW2X_RATE_2G5) + speed = aq_fw_2G5; + else if (rates & FW2X_RATE_1G) + speed = aq_fw_1G; + else if (rates & FW2X_RATE_100M) + speed = aq_fw_100M; + + if (link_speed) + *link_speed = speed; + + *fc = (mpi_state & (FW2X_FW_CAP_PAUSE | FW2X_FW_CAP_ASYM_PAUSE)) >> (32 + CAPS_HI_PAUSE); + + +// AQ_DBG_EXIT(0); + return (EOK); +} + + +int fw2x_get_mac_addr(struct aq_hw* hw, u8* mac) +{ + int err = -EFAULT; + u32 mac_addr[2]; + + AQ_DBG_ENTER(); + + u32 efuse_shadow_addr = AQ_READ_REG(hw, 0x364); + if (efuse_shadow_addr == 0) { + trace_error(dbg_init, "couldn't read eFUSE Shadow Address"); + AQ_DBG_EXIT(-EFAULT); + return (-EFAULT); + } + + err = aq_hw_fw_downld_dwords(hw, efuse_shadow_addr + (40 * 4), + mac_addr, ARRAY_SIZE(mac_addr)); + if (err < 0) { + mac_addr[0] = 0; + mac_addr[1] = 0; + AQ_DBG_EXIT(err); + return (err); + } + + mac_addr[0] = bswap32(mac_addr[0]); + mac_addr[1] = bswap32(mac_addr[1]); + + memcpy(mac, (u8*)mac_addr, ETH_MAC_LEN); + + AQ_DBG_EXIT(EOK); + return (EOK); +} + +static inline +void fw2x_stats_to_fw_stats_(struct aq_hw_stats_s* dst, const fw2x_msm_statistics* src) +{ + dst->uprc = src->uprc; + dst->mprc = src->mprc; + dst->bprc = src->bprc; + dst->erpt = src->erpt; + dst->uptc = src->uptc; + dst->mptc = src->mptc; + dst->bptc = src->bptc; + dst->erpr = src->erpr; + dst->mbtc = src->mbtc; + dst->bbtc = src->bbtc; + dst->mbrc = src->mbrc; + dst->bbrc = src->bbrc; + dst->ubrc = src->ubrc; + dst->ubtc = src->ubtc; + dst->ptc = src->ptc; + dst->prc = src->prc; +} + + +static bool toggle_mpi_ctrl_and_wait_(struct aq_hw* hw, u64 mask, u32 timeout_ms, u32 try_count) +{ + u64 ctrl = get_mpi_ctrl_(hw); + u64 state = get_mpi_state_(hw); + + // AQ_DBG_ENTER(); + // First, check that control and state values are consistent + if ((ctrl & mask) != (state & mask)) { + trace_warn(dbg_fw, "fw2x> MPI control (%#llx) and state (%#llx) are not consistent for mask %#llx!", + (unsigned long long)ctrl, (unsigned long long)state, (unsigned long long)mask); + AQ_DBG_EXIT(false); + return (false); + } + + // Invert bits (toggle) in control register + ctrl ^= mask; + set_mpi_ctrl_(hw, ctrl); + + // Clear all bits except masked + ctrl &= mask; + + // Wait for FW reflecting change in state register + while (try_count-- != 0) { + if ((get_mpi_state_(hw) & mask) == ctrl) + { +// AQ_DBG_EXIT(true); + return (true); + } + msec_delay(timeout_ms); + } + + trace_detail(dbg_fw, "f/w2x> timeout while waiting for response in state register for bit %#llx!", (unsigned long long)mask); + // AQ_DBG_EXIT(false); + return (false); +} + + +int fw2x_get_stats(struct aq_hw* hw, struct aq_hw_stats_s* stats) +{ + int err = 0; + fw2x_msm_statistics fw2x_stats = {0}; + +// AQ_DBG_ENTER(); + + if ((hw->fw_caps & FW2X_CAP_STATISTICS) == 0) { + trace_warn(dbg_fw, "fw2x> statistics not supported by F/W"); + return (-ENOTSUP); + } + + // Say to F/W to update the statistics + if (!toggle_mpi_ctrl_and_wait_(hw, FW2X_CAP_STATISTICS, 1, 25)) { + trace_error(dbg_fw, "fw2x> statistics update timeout"); + AQ_DBG_EXIT(-ETIME); + return (-ETIME); + } + + err = aq_hw_fw_downld_dwords(hw, hw->mbox_addr + offsetof(fw2x_mailbox, msm), + (u32*)&fw2x_stats, sizeof fw2x_stats/sizeof(u32)); + + fw2x_stats_to_fw_stats_(stats, &fw2x_stats); + + if (err != EOK) + trace_error(dbg_fw, "fw2x> download statistics data FAILED, error %d", err); + +// AQ_DBG_EXIT(err); + return (err); +} + +static int fw2x_led_control(struct aq_hw* hw, u32 onoff) +{ + int err = 0; + + AQ_DBG_ENTER(); + + aq_hw_fw_version ver_expected = { .raw = FW2X_FW_MIN_VER_LED}; + if (aq_hw_ver_match(&ver_expected, &hw->fw_version)) + AQ_WRITE_REG(hw, FW2X_MPI_LED_ADDR, (onoff)? + ((FW2X_LED_BLINK) | (FW2X_LED_BLINK << 2) | (FW2X_LED_BLINK << 4)): + (FW2X_LED_DEFAULT)); + + AQ_DBG_EXIT(err); + return (err); +} + +struct aq_firmware_ops aq_fw2x_ops = +{ + .reset = fw2x_reset, + + .set_mode = fw2x_set_mode, + .get_mode = fw2x_get_mode, + + .get_mac_addr = fw2x_get_mac_addr, + .get_stats = fw2x_get_stats, + + .led_control = fw2x_led_control, +}; Index: sys/dev/aq/aq_hw.h =================================================================== --- /dev/null +++ sys/dev/aq/aq_hw.h @@ -0,0 +1,356 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * (1) Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * (2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * (3)The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _AQ_HW_H_ +#define _AQ_HW_H_ + +#include +#include +#include +#include +#include +#include "aq_common.h" + +#define AQ_WRITE_REG(hw, reg, value) writel(((hw)->hw_addr + (reg)), htole32(value)) + +#define AQ_READ_REG(hw, reg) le32toh(readl((hw)->hw_addr + reg)) + + +#define AQ_WRITE_REG_BIT(hw, reg, msk, shift, value) do { \ + if (msk ^ ~0) { \ + u32 reg_old, reg_new = 0U; \ + reg_old = AQ_READ_REG(hw, reg); \ + reg_new = (reg_old & (~msk)) | (value << shift); \ + if (reg_old != reg_new) \ + AQ_WRITE_REG(hw, reg, reg_new); \ + } else { \ + AQ_WRITE_REG(hw, reg, value); \ + } } while(0) + + +#define AQ_READ_REG_BIT(a, reg, msk, shift) ( \ + ((AQ_READ_REG(a, reg) & msk) >> shift)) + +#define AQ_HW_FLUSH() { (void)AQ_READ_REG(hw, 0x10); } + +#define aq_hw_write_reg_bit AQ_WRITE_REG_BIT + +#define aq_hw_write_reg AQ_WRITE_REG + +/* Statistics */ +struct aq_hw_stats { + u64 crcerrs; +}; + +struct aq_hw_stats_s { + u32 uprc; + u32 mprc; + u32 bprc; + u32 erpt; + u32 uptc; + u32 mptc; + u32 bptc; + u32 erpr; + u32 mbtc; + u32 bbtc; + u32 mbrc; + u32 bbrc; + u32 ubrc; + u32 ubtc; + u32 ptc; + u32 prc; + u32 dpc; + u32 cprc; +} __attribute__((__packed__)); + +union ip_addr { + struct { + u8 addr[16]; + } v6; + struct { + u8 padding[12]; + u8 addr[4]; + } v4; +} __attribute__((__packed__)); + +struct aq_hw_fw_mbox { + u32 version; + u32 transaction_id; + int error; + struct aq_hw_stats_s stats; +} __attribute__((__packed__)); + +typedef struct aq_hw_fw_version { + union { + struct { + u16 build_number; + u8 minor_version; + u8 major_version; + }; + u32 raw; + }; +} aq_hw_fw_version; + +enum aq_hw_irq_type { + aq_irq_invalid = 0, + aq_irq_legacy = 1, + aq_irq_msi = 2, + aq_irq_msix = 3, +}; + +struct aq_hw_fc_info { + bool fc_rx; + bool fc_tx; +}; + +struct aq_hw { + void *aq_dev; + u8 *hw_addr; + u32 regs_size; + + u8 mac_addr[ETH_MAC_LEN]; + + enum aq_hw_irq_type irq_type; + + struct aq_hw_fc_info fc; + u16 link_rate; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + u8 revision_id; + + /* Interrupt Moderation value. */ + int itr; + + /* Firmware-related stuff. */ + aq_hw_fw_version fw_version; + const struct aq_firmware_ops* fw_ops; + bool rbl_enabled; + bool fast_start_enabled; + bool flash_present; + u32 chip_features; + u64 fw_caps; + + bool lro_enabled; + + u32 mbox_addr; + struct aq_hw_fw_mbox mbox; +}; + +#define aq_hw_s aq_hw + +#define AQ_HW_MAC 0U +#define AQ_HW_MAC_MIN 1U +#define AQ_HW_MAC_MAX 33U + +#define HW_ATL_B0_MIN_RXD 32U +#define HW_ATL_B0_MIN_TXD 32U +#define HW_ATL_B0_MAX_RXD 4096U /* in fact up to 8184, but closest to power of 2 */ +#define HW_ATL_B0_MAX_TXD 4096U /* in fact up to 8184, but closest to power of 2 */ + +#define HW_ATL_B0_MTU_JUMBO 16352U +#define HW_ATL_B0_TSO_SIZE (160*1024) +#define HW_ATL_B0_RINGS_MAX 32U +#define HW_ATL_B0_LRO_RXD_MAX 16U + +#define AQ_HW_FW_SM_RAM 0x2U + +#define AQ_HW_MPI_STATE_MSK 0x00FFU +#define AQ_HW_MPI_STATE_SHIFT 0U + +#define AQ_HW_MPI_CONTROL_ADR 0x0368U +#define AQ_HW_MPI_STATE_ADR 0x036CU + +#define HW_ATL_RSS_INDIRECTION_TABLE_MAX 64U +#define HW_ATL_RSS_HASHKEY_SIZE 40U + +/* PCI core control register */ +#define AQ_HW_PCI_REG_CONTROL_6_ADR 0x1014U +/* tx dma total request limit */ +#define AQ_HW_TX_DMA_TOTAL_REQ_LIMIT_ADR 0x00007b20U + +#define AQ_HW_TXBUF_MAX 160U +#define AQ_HW_RXBUF_MAX 320U + +#define L2_FILTER_ACTION_DISCARD (0x0) +#define L2_FILTER_ACTION_HOST (0x1) + +#define AQ_HW_UCP_0X370_REG (0x370) +#define AQ_HW_CHIP_MIPS 0x00000001U +#define AQ_HW_CHIP_TPO2 0x00000002U +#define AQ_HW_CHIP_RPF2 0x00000004U +#define AQ_HW_CHIP_MPI_AQ 0x00000010U +#define AQ_HW_CHIP_REVISION_A0 0x01000000U +#define AQ_HW_CHIP_REVISION_B0 0x02000000U +#define AQ_HW_CHIP_REVISION_B1 0x04000000U +#define IS_CHIP_FEATURE(HW, _F_) (AQ_HW_CHIP_##_F_ & \ + (HW)->chip_features) + +#define AQ_HW_FW_VER_EXPECTED 0x01050006U + +#define AQ_RX_RSS_TYPE_NONE 0x0 +#define AQ_RX_RSS_TYPE_IPV4 0x2 +#define AQ_RX_RSS_TYPE_IPV6 0x3 +#define AQ_RX_RSS_TYPE_IPV4_TCP 0x4 +#define AQ_RX_RSS_TYPE_IPV6_TCP 0x5 +#define AQ_RX_RSS_TYPE_IPV4_UDP 0x6 +#define AQ_RX_RSS_TYPE_IPV6_UDP 0x7 + +enum hw_atl_rx_action_with_traffic { + HW_ATL_RX_DISCARD, + HW_ATL_RX_HOST, + HW_ATL_RX_MNGMNT, + HW_ATL_RX_HOST_AND_MNGMNT, + HW_ATL_RX_WOL +}; + +struct aq_rx_filter_vlan { + u8 enable; + u8 location; + u16 vlan_id; + u8 queue; +}; + +#define AQ_HW_VLAN_MAX_FILTERS 16U +#define AQ_HW_ETYPE_MAX_FILTERS 16U + +struct aq_rx_filter_l2 { + u8 enable; + s8 queue; + u8 location; + u8 user_priority_en; + u8 user_priority; + u16 ethertype; +}; + +enum hw_atl_rx_ctrl_registers_l2 { + HW_ATL_RX_ENABLE_UNICAST_MNGNT_QUEUE_L2 = BIT(19), + HW_ATL_RX_ENABLE_UNICAST_FLTR_L2 = BIT(31) +}; + +struct aq_rx_filter_l3l4 { + u32 cmd; + u8 location; + u32 ip_dst[4]; + u32 ip_src[4]; + u16 p_dst; + u16 p_src; + bool is_ipv6; +}; + +enum hw_atl_rx_protocol_value_l3l4 { + HW_ATL_RX_TCP, + HW_ATL_RX_UDP, + HW_ATL_RX_SCTP, + HW_ATL_RX_ICMP +}; + +enum hw_atl_rx_ctrl_registers_l3l4 { + HW_ATL_RX_ENABLE_MNGMNT_QUEUE_L3L4 = BIT(22), + HW_ATL_RX_ENABLE_QUEUE_L3L4 = BIT(23), + HW_ATL_RX_ENABLE_ARP_FLTR_L3 = BIT(24), + HW_ATL_RX_ENABLE_CMP_PROT_L4 = BIT(25), + HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 = BIT(26), + HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4 = BIT(27), + HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3 = BIT(28), + HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3 = BIT(29), + HW_ATL_RX_ENABLE_L3_IPv6 = BIT(30), + HW_ATL_RX_ENABLE_FLTR_L3L4 = BIT(31) +}; + +#define HW_ATL_RX_BOFFSET_PROT_FL3L4 0U +#define HW_ATL_RX_BOFFSET_QUEUE_FL3L4 8U +#define HW_ATL_RX_BOFFSET_ACTION_FL3F4 16U + +#define HW_ATL_RX_CNT_REG_ADDR_IPV6 4U + +#define HW_ATL_GET_REG_LOCATION_FL3L4(location) \ + ((location) - AQ_RX_FIRST_LOC_FL3L4) + +enum aq_hw_fw_mpi_state_e { + MPI_DEINIT = 0, + MPI_RESET = 1, + MPI_INIT = 2, + MPI_POWER = 4, +}; + +int aq_hw_get_mac_permanent(struct aq_hw *hw, u8 *mac); + +int aq_hw_mac_addr_set(struct aq_hw *hw, u8 *mac_addr, u8 index); + +/* link speed in mbps. "0" - no link detected */ +int aq_hw_get_link_state(struct aq_hw *hw, u32 *link_speed, struct aq_hw_fc_info *fc_neg); + +int aq_hw_set_link_speed(struct aq_hw *hw, u32 speed); + +int aq_hw_fw_downld_dwords(struct aq_hw *hw, u32 a, u32 *p, u32 cnt); + +int aq_hw_reset(struct aq_hw *hw); + +int aq_hw_mpi_create(struct aq_hw *hw); + +int aq_hw_mpi_read_stats(struct aq_hw *hw, struct aq_hw_fw_mbox *pmbox); + +int aq_hw_init(struct aq_hw *hw, u8 *mac_addr, u8 adm_irq, bool msix); + +int aq_hw_start(struct aq_hw *hw); + +int aq_hw_interrupt_moderation_set(struct aq_hw *hw); + +int aq_hw_get_fw_version(struct aq_hw *hw, u32 *fw_version); + +int aq_hw_deinit(struct aq_hw *hw); + +int aq_hw_ver_match(const aq_hw_fw_version* ver_expected, const aq_hw_fw_version* ver_actual); + +void aq_hw_set_promisc(struct aq_hw_s *self, bool l2_promisc, bool vlan_promisc, bool mc_promisc); + +int aq_hw_set_power(struct aq_hw *hw, unsigned int power_state); + +int aq_hw_err_from_flags(struct aq_hw *hw); + +int hw_atl_b0_hw_vlan_promisc_set(struct aq_hw_s *self, bool promisc); + +int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self, + struct aq_rx_filter_vlan *aq_vlans); + +int aq_hw_rss_hash_set(struct aq_hw_s *self, u8 rss_key[HW_ATL_RSS_HASHKEY_SIZE]); +int aq_hw_rss_hash_get(struct aq_hw_s *self, u8 rss_key[HW_ATL_RSS_HASHKEY_SIZE]); +int aq_hw_rss_set(struct aq_hw_s *self, u8 rss_table[HW_ATL_RSS_INDIRECTION_TABLE_MAX]); +int aq_hw_udp_rss_enable(struct aq_hw_s *self, bool enable); + +#endif //_AQ_HW_H_ + Index: sys/dev/aq/aq_hw.c =================================================================== --- /dev/null +++ sys/dev/aq/aq_hw.c @@ -0,0 +1,907 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * (1) Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * (2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * (3)The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "aq_hw.h" +#include "aq_dbg.h" +#include "aq_hw_llh.h" +#include "aq_fw.h" + +#define AQ_HW_FW_SM_RAM 0x2U +#define AQ_CFG_FW_MIN_VER_EXPECTED 0x01050006U + + +int aq_hw_err_from_flags(struct aq_hw *hw) +{ + return (0); +} + +static void aq_hw_chip_features_init(struct aq_hw *hw, u32 *p) +{ + u32 chip_features = 0U; + u32 val = reg_glb_mif_id_get(hw); + u32 mif_rev = val & 0xFFU; + + if ((0xFU & mif_rev) == 1U) { + chip_features |= AQ_HW_CHIP_REVISION_A0 | + AQ_HW_CHIP_MPI_AQ | + AQ_HW_CHIP_MIPS; + } else if ((0xFU & mif_rev) == 2U) { + chip_features |= AQ_HW_CHIP_REVISION_B0 | + AQ_HW_CHIP_MPI_AQ | + AQ_HW_CHIP_MIPS | + AQ_HW_CHIP_TPO2 | + AQ_HW_CHIP_RPF2; + } else if ((0xFU & mif_rev) == 0xAU) { + chip_features |= AQ_HW_CHIP_REVISION_B1 | + AQ_HW_CHIP_MPI_AQ | + AQ_HW_CHIP_MIPS | + AQ_HW_CHIP_TPO2 | + AQ_HW_CHIP_RPF2; + } + + *p = chip_features; +} + +int aq_hw_fw_downld_dwords(struct aq_hw *hw, u32 a, u32 *p, u32 cnt) +{ + int err = 0; + +// AQ_DBG_ENTER(); + AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(hw, + AQ_HW_FW_SM_RAM) == 1U, + 1U, 10000U); + + if (err < 0) { + bool is_locked; + + reg_glb_cpu_sem_set(hw, 1U, AQ_HW_FW_SM_RAM); + is_locked = reg_glb_cpu_sem_get(hw, AQ_HW_FW_SM_RAM); + if (!is_locked) { + err = -ETIME; + goto err_exit; + } + } + + mif_mcp_up_mailbox_addr_set(hw, a); + + for (++cnt; --cnt && !err;) { + mif_mcp_up_mailbox_execute_operation_set(hw, 1); + + if (IS_CHIP_FEATURE(hw, REVISION_B1)) + AQ_HW_WAIT_FOR(a != mif_mcp_up_mailbox_addr_get(hw), 1U, 1000U); + else + AQ_HW_WAIT_FOR(!mif_mcp_up_mailbox_busy_get(hw), 1, 1000U); + + *(p++) = mif_mcp_up_mailbox_data_get(hw); + } + + reg_glb_cpu_sem_set(hw, 1U, AQ_HW_FW_SM_RAM); + +err_exit: +// AQ_DBG_EXIT(err); + return (err); +} + +int aq_hw_ver_match(const aq_hw_fw_version* ver_expected, const aq_hw_fw_version* ver_actual) +{ + AQ_DBG_ENTER(); + + if (ver_actual->major_version >= ver_expected->major_version) + return (true); + if (ver_actual->minor_version >= ver_expected->minor_version) + return (true); + if (ver_actual->build_number >= ver_expected->build_number) + return (true); + + return (false); +} + +static int aq_hw_init_ucp(struct aq_hw *hw) +{ + int err = 0; + AQ_DBG_ENTER(); + + hw->fw_version.raw = 0; + + err = aq_fw_reset(hw); + if (err != EOK) { + aq_log_error("aq_hw_init_ucp(): F/W reset failed, err %d", err); + return (err); + } + + aq_hw_chip_features_init(hw, &hw->chip_features); + err = aq_fw_ops_init(hw); + if (err < 0) { + aq_log_error("could not initialize F/W ops, err %d", err); + return (-1); + } + + if (hw->fw_version.major_version == 1) { + if (!AQ_READ_REG(hw, 0x370)) { + unsigned int rnd = 0; + unsigned int ucp_0x370 = 0; + + rnd = arc4random(); + + ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd); + AQ_WRITE_REG(hw, AQ_HW_UCP_0X370_REG, ucp_0x370); + } + + reg_glb_cpu_scratch_scp_set(hw, 0, 25); + } + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR((hw->mbox_addr = AQ_READ_REG(hw, 0x360)) != 0, 400U, 20); + + aq_hw_fw_version ver_expected = { .raw = AQ_CFG_FW_MIN_VER_EXPECTED }; + if (!aq_hw_ver_match(&ver_expected, &hw->fw_version)) + aq_log_error("atlantic: aq_hw_init_ucp(), wrong FW version: expected:%x actual:%x", + AQ_CFG_FW_MIN_VER_EXPECTED, hw->fw_version.raw); + + AQ_DBG_EXIT(err); + return (err); +} + +int aq_hw_mpi_create(struct aq_hw *hw) +{ + int err = 0; + + AQ_DBG_ENTER(); + err = aq_hw_init_ucp(hw); + if (err < 0) + goto err_exit; + +err_exit: + AQ_DBG_EXIT(err); + return (err); +} + +int aq_hw_mpi_read_stats(struct aq_hw *hw, struct aq_hw_fw_mbox *pmbox) +{ + int err = 0; +// AQ_DBG_ENTER(); + + if (hw->fw_ops && hw->fw_ops->get_stats) { + err = hw->fw_ops->get_stats(hw, &pmbox->stats); + } else { + err = -ENOTSUP; + aq_log_error("get_stats() not supported by F/W"); + } + + if (err == EOK) { + pmbox->stats.dpc = reg_rx_dma_stat_counter7get(hw); + pmbox->stats.cprc = stats_rx_lro_coalesced_pkt_count0_get(hw); + } + +// AQ_DBG_EXIT(err); + return (err); +} + +static int aq_hw_mpi_set(struct aq_hw *hw, + enum aq_hw_fw_mpi_state_e state, u32 speed) +{ + int err = -ENOTSUP; + AQ_DBG_ENTERA("speed %d", speed); + + if (hw->fw_ops && hw->fw_ops->set_mode) { + err = hw->fw_ops->set_mode(hw, state, speed); + } else { + aq_log_error("set_mode() not supported by F/W"); + } + + AQ_DBG_EXIT(err); + return (err); +} + +int aq_hw_set_link_speed(struct aq_hw *hw, u32 speed) +{ + return aq_hw_mpi_set(hw, MPI_INIT, speed); +} + +int aq_hw_get_link_state(struct aq_hw *hw, u32 *link_speed, struct aq_hw_fc_info *fc_neg) +{ + int err = EOK; + + // AQ_DBG_ENTER(); + + enum aq_hw_fw_mpi_state_e mode; + aq_fw_link_speed_t speed = aq_fw_none; + aq_fw_link_fc_t fc; + + if (hw->fw_ops && hw->fw_ops->get_mode) { + err = hw->fw_ops->get_mode(hw, &mode, &speed, &fc); + } else { + aq_log_error("get_mode() not supported by F/W"); + AQ_DBG_EXIT(-ENOTSUP); + return (-ENOTSUP); + } + + if (err < 0) { + aq_log_error("get_mode() failed, err %d", err); + AQ_DBG_EXIT(err); + return (err); + } + *link_speed = 0; + if (mode != MPI_INIT) + return (0); + + switch (speed) { + case aq_fw_10G: + *link_speed = 10000U; + break; + + case aq_fw_5G: + *link_speed = 5000U; + break; + + case aq_fw_2G5: + *link_speed = 2500U; + break; + + case aq_fw_1G: + *link_speed = 1000U; + break; + + case aq_fw_100M: + *link_speed = 100U; + break; + + default: + *link_speed = 0U; + break; + } + + fc_neg->fc_rx = !!(fc & aq_fw_fc_ENABLE_RX); + fc_neg->fc_tx = !!(fc & aq_fw_fc_ENABLE_TX); + + // AQ_DBG_EXIT(0); + return (0); +} + +int aq_hw_get_mac_permanent(struct aq_hw *hw, u8 *mac) +{ + int err = -ENOTSUP; + AQ_DBG_ENTER(); + + if (hw->fw_ops && hw->fw_ops->get_mac_addr) + err = hw->fw_ops->get_mac_addr(hw, mac); + + /* Couldn't get MAC address from HW. Use auto-generated one. */ + if ((mac[0] & 1) || ((mac[0] | mac[1] | mac[2]) == 0)) { + u16 rnd; + u32 h = 0; + u32 l = 0; + + printf("atlantic: HW MAC address %x:%x:%x:%x:%x:%x is multicast or empty MAC", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + printf("atlantic: Use random MAC address"); + + rnd = arc4random(); + + /* chip revision */ + l = 0xE3000000U + | (0xFFFFU & rnd) + | (0x00 << 16); + h = 0x8001300EU; + + mac[5] = (u8)(0xFFU & l); + l >>= 8; + mac[4] = (u8)(0xFFU & l); + l >>= 8; + mac[3] = (u8)(0xFFU & l); + l >>= 8; + mac[2] = (u8)(0xFFU & l); + mac[1] = (u8)(0xFFU & h); + h >>= 8; + mac[0] = (u8)(0xFFU & h); + + err = EOK; + } + + AQ_DBG_EXIT(err); + return (err); +} + +int aq_hw_deinit(struct aq_hw *hw) +{ + AQ_DBG_ENTER(); + aq_hw_mpi_set(hw, MPI_DEINIT, 0); + AQ_DBG_EXIT(0); + return (0); +} + +int aq_hw_set_power(struct aq_hw *hw, unsigned int power_state) +{ + AQ_DBG_ENTER(); + aq_hw_mpi_set(hw, MPI_POWER, 0); + AQ_DBG_EXIT(0); + return (0); +} + + +/* HW NIC functions */ + +int aq_hw_reset(struct aq_hw *hw) +{ + int err = 0; + + AQ_DBG_ENTER(); + + err = aq_fw_reset(hw); + if (err < 0) + goto err_exit; + + itr_irq_reg_res_dis_set(hw, 0); + itr_res_irq_set(hw, 1); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(itr_res_irq_get(hw) == 0, 1000, 10); + if (err < 0) { + printf("atlantic: IRQ reset failed: %d", err); + goto err_exit; + } + + if (hw->fw_ops && hw->fw_ops->reset) + hw->fw_ops->reset(hw); + + err = aq_hw_err_from_flags(hw); + +err_exit: + AQ_DBG_EXIT(err); + return (err); +} + +static int aq_hw_qos_set(struct aq_hw *hw) +{ + u32 tc = 0U; + u32 buff_size = 0U; + unsigned int i_priority = 0U; + int err = 0; + + AQ_DBG_ENTER(); + /* TPS Descriptor rate init */ + tps_tx_pkt_shed_desc_rate_curr_time_res_set(hw, 0x0U); + tps_tx_pkt_shed_desc_rate_lim_set(hw, 0xA); + + /* TPS VM init */ + tps_tx_pkt_shed_desc_vm_arb_mode_set(hw, 0U); + + /* TPS TC credits init */ + tps_tx_pkt_shed_desc_tc_arb_mode_set(hw, 0U); + tps_tx_pkt_shed_data_arb_mode_set(hw, 0U); + + tps_tx_pkt_shed_tc_data_max_credit_set(hw, 0xFFF, 0U); + tps_tx_pkt_shed_tc_data_weight_set(hw, 0x64, 0U); + tps_tx_pkt_shed_desc_tc_max_credit_set(hw, 0x50, 0U); + tps_tx_pkt_shed_desc_tc_weight_set(hw, 0x1E, 0U); + + /* Tx buf size */ + buff_size = AQ_HW_TXBUF_MAX; + + tpb_tx_pkt_buff_size_per_tc_set(hw, buff_size, tc); + tpb_tx_buff_hi_threshold_per_tc_set(hw, + (buff_size * (1024 / 32U) * 66U) / + 100U, tc); + tpb_tx_buff_lo_threshold_per_tc_set(hw, + (buff_size * (1024 / 32U) * 50U) / + 100U, tc); + + /* QoS Rx buf size per TC */ + tc = 0; + buff_size = AQ_HW_RXBUF_MAX; + + rpb_rx_pkt_buff_size_per_tc_set(hw, buff_size, tc); + rpb_rx_buff_hi_threshold_per_tc_set(hw, + (buff_size * + (1024U / 32U) * 66U) / + 100U, tc); + rpb_rx_buff_lo_threshold_per_tc_set(hw, + (buff_size * + (1024U / 32U) * 50U) / + 100U, tc); + + /* QoS 802.1p priority -> TC mapping */ + for (i_priority = 8U; i_priority--;) + rpf_rpb_user_priority_tc_map_set(hw, i_priority, 0U); + + err = aq_hw_err_from_flags(hw); + AQ_DBG_EXIT(err); + return (err); +} + +static int aq_hw_offload_set(struct aq_hw *hw) +{ + int err = 0; + + AQ_DBG_ENTER(); + /* TX checksums offloads*/ + tpo_ipv4header_crc_offload_en_set(hw, 1); + tpo_tcp_udp_crc_offload_en_set(hw, 1); + if (err < 0) + goto err_exit; + + /* RX checksums offloads*/ + rpo_ipv4header_crc_offload_en_set(hw, 1); + rpo_tcp_udp_crc_offload_en_set(hw, 1); + if (err < 0) + goto err_exit; + + /* LSO offloads*/ + tdm_large_send_offload_en_set(hw, 0xFFFFFFFFU); + if (err < 0) + goto err_exit; + +/* LRO offloads */ + { + u32 i = 0; + u32 val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U : + ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U : + ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0)); + + for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++) + rpo_lro_max_num_of_descriptors_set(hw, val, i); + + rpo_lro_time_base_divider_set(hw, 0x61AU); + rpo_lro_inactive_interval_set(hw, 0); + /* the LRO timebase divider is 5 uS (0x61a), + * to get a maximum coalescing interval of 250 uS, + * we need to multiply by 50(0x32) to get + * the default value 250 uS + */ + rpo_lro_max_coalescing_interval_set(hw, 50); + + rpo_lro_qsessions_lim_set(hw, 1U); + + rpo_lro_total_desc_lim_set(hw, 2U); + + rpo_lro_patch_optimization_en_set(hw, 0U); + + rpo_lro_min_pay_of_first_pkt_set(hw, 10U); + + rpo_lro_pkt_lim_set(hw, 1U); + + rpo_lro_en_set(hw, (hw->lro_enabled ? 0xFFFFFFFFU : 0U)); + } + + + err = aq_hw_err_from_flags(hw); + +err_exit: + AQ_DBG_EXIT(err); + return (err); +} + +static int aq_hw_init_tx_path(struct aq_hw *hw) +{ + int err = 0; + + AQ_DBG_ENTER(); + + /* Tx TC/RSS number config */ + tpb_tx_tc_mode_set(hw, 1U); + + thm_lso_tcp_flag_of_first_pkt_set(hw, 0x0FF6U); + thm_lso_tcp_flag_of_middle_pkt_set(hw, 0x0FF6U); + thm_lso_tcp_flag_of_last_pkt_set(hw, 0x0F7FU); + + /* Tx interrupts */ + tdm_tx_desc_wr_wb_irq_en_set(hw, 1U); + + /* misc */ + AQ_WRITE_REG(hw, 0x00007040U, 0x00010000U);//IS_CHIP_FEATURE(TPO2) ? 0x00010000U : 0x00000000U); + tdm_tx_dca_en_set(hw, 0U); + tdm_tx_dca_mode_set(hw, 0U); + + tpb_tx_path_scp_ins_en_set(hw, 1U); + + err = aq_hw_err_from_flags(hw); + AQ_DBG_EXIT(err); + return (err); +} + +static int aq_hw_init_rx_path(struct aq_hw *hw) +{ + //struct aq_nic_cfg_s *cfg = hw->aq_nic_cfg; + unsigned int control_reg_val = 0U; + int i; + int err; + + AQ_DBG_ENTER(); + /* Rx TC/RSS number config */ + rpb_rpf_rx_traf_class_mode_set(hw, 1U); + + /* Rx flow control */ + rpb_rx_flow_ctl_mode_set(hw, 1U); + + /* RSS Ring selection */ + reg_rx_flr_rss_control1set(hw, 0xB3333333U); + + /* Multicast filters */ + for (i = AQ_HW_MAC_MAX; i--;) { + rpfl2_uc_flr_en_set(hw, (i == 0U) ? 1U : 0U, i); + rpfl2unicast_flr_act_set(hw, 1U, i); + } + + reg_rx_flr_mcst_flr_msk_set(hw, 0x00000000U); + reg_rx_flr_mcst_flr_set(hw, 0x00010FFFU, 0U); + + /* Vlan filters */ + rpf_vlan_outer_etht_set(hw, 0x88A8U); + rpf_vlan_inner_etht_set(hw, 0x8100U); + rpf_vlan_accept_untagged_packets_set(hw, true); + rpf_vlan_untagged_act_set(hw, HW_ATL_RX_HOST); + + rpf_vlan_prom_mode_en_set(hw, 1); + + /* Rx Interrupts */ + rdm_rx_desc_wr_wb_irq_en_set(hw, 1U); + + /* misc */ + control_reg_val = 0x000F0000U; //RPF2 + + /* RSS hash type set for IP/TCP */ + control_reg_val |= 0x1EU; + + AQ_WRITE_REG(hw, 0x00005040U, control_reg_val); + + rpfl2broadcast_en_set(hw, 1U); + rpfl2broadcast_flr_act_set(hw, 1U); + rpfl2broadcast_count_threshold_set(hw, 0xFFFFU & (~0U / 256U)); + + rdm_rx_dca_en_set(hw, 0U); + rdm_rx_dca_mode_set(hw, 0U); + + err = aq_hw_err_from_flags(hw); + AQ_DBG_EXIT(err); + return (err); +} + +int aq_hw_mac_addr_set(struct aq_hw *hw, u8 *mac_addr, u8 index) +{ + int err = 0; + unsigned int h = 0U; + unsigned int l = 0U; + + AQ_DBG_ENTER(); + if (!mac_addr) { + err = -EINVAL; + goto err_exit; + } + h = (mac_addr[0] << 8) | (mac_addr[1]); + l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]; + + rpfl2_uc_flr_en_set(hw, 0U, index); + rpfl2unicast_dest_addresslsw_set(hw, l, index); + rpfl2unicast_dest_addressmsw_set(hw, h, index); + rpfl2_uc_flr_en_set(hw, 1U, index); + + err = aq_hw_err_from_flags(hw); + +err_exit: + AQ_DBG_EXIT(err); + return (err); +} + +int aq_hw_init(struct aq_hw *hw, u8 *mac_addr, u8 adm_irq, bool msix) +{ + + int err = 0; + u32 val = 0; + + AQ_DBG_ENTER(); + + /* Force limit MRRS on RDM/TDM to 2K */ + val = AQ_READ_REG(hw, AQ_HW_PCI_REG_CONTROL_6_ADR); + AQ_WRITE_REG(hw, AQ_HW_PCI_REG_CONTROL_6_ADR, (val & ~0x707) | 0x404); + + /* TX DMA total request limit. B0 hardware is not capable to + * handle more than (8K-MRRS) incoming DMA data. + * Value 24 in 256byte units + */ + AQ_WRITE_REG(hw, AQ_HW_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24); + + aq_hw_init_tx_path(hw); + aq_hw_init_rx_path(hw); + + aq_hw_mac_addr_set(hw, mac_addr, AQ_HW_MAC); + + aq_hw_mpi_set(hw, MPI_INIT, hw->link_rate); + + aq_hw_qos_set(hw); + + err = aq_hw_err_from_flags(hw); + if (err < 0) + goto err_exit; + + /* Interrupts */ + //Enable interrupt + itr_irq_status_cor_en_set(hw, 0); //Disable clear-on-read for status + itr_irq_auto_mask_clr_en_set(hw, 1); // Enable auto-mask clear. + if (msix) + itr_irq_mode_set(hw, 0x6); //MSIX + multi vector + else + itr_irq_mode_set(hw, 0x5); //MSI + multi vector + + reg_gen_irq_map_set(hw, 0x80 | adm_irq, 3); + + aq_hw_offload_set(hw); + +err_exit: + AQ_DBG_EXIT(err); + return (err); +} + + +int aq_hw_start(struct aq_hw *hw) +{ + int err; + + AQ_DBG_ENTER(); + tpb_tx_buff_en_set(hw, 1U); + rpb_rx_buff_en_set(hw, 1U); + err = aq_hw_err_from_flags(hw); + AQ_DBG_EXIT(err); + return (err); +} + + +int aq_hw_interrupt_moderation_set(struct aq_hw *hw) +{ + static unsigned int AQ_HW_NIC_timers_table_rx_[][2] = { + {80, 120},//{0x6U, 0x38U},/* 10Gbit */ + {0xCU, 0x70U},/* 5Gbit */ + {0xCU, 0x70U},/* 5Gbit 5GS */ + {0x18U, 0xE0U},/* 2.5Gbit */ + {0x30U, 0x80U},/* 1Gbit */ + {0x4U, 0x50U},/* 100Mbit */ + }; + static unsigned int AQ_HW_NIC_timers_table_tx_[][2] = { + {0x4fU, 0x1ff},//{0xffU, 0xffU}, /* 10Gbit */ + {0x4fU, 0xffU}, /* 5Gbit */ + {0x4fU, 0xffU}, /* 5Gbit 5GS */ + {0x4fU, 0xffU}, /* 2.5Gbit */ + {0x4fU, 0xffU}, /* 1Gbit */ + {0x4fU, 0xffU}, /* 100Mbit */ + }; + + u32 speed_index = 0U; //itr settings for 10 g + u32 itr_rx = 2U; + u32 itr_tx = 2U; + int custom_itr = hw->itr; + int active = custom_itr != 0; + int err; + + + AQ_DBG_ENTER(); + + if (custom_itr == -1) { + itr_rx |= AQ_HW_NIC_timers_table_rx_[speed_index][0] << 0x8U; /* set min timer value */ + itr_rx |= AQ_HW_NIC_timers_table_rx_[speed_index][1] << 0x10U; /* set max timer value */ + + itr_tx |= AQ_HW_NIC_timers_table_tx_[speed_index][0] << 0x8U; /* set min timer value */ + itr_tx |= AQ_HW_NIC_timers_table_tx_[speed_index][1] << 0x10U; /* set max timer value */ + }else{ + if (custom_itr > 0x1FF) + custom_itr = 0x1FF; + + itr_rx |= (custom_itr/2) << 0x8U; /* set min timer value */ + itr_rx |= custom_itr << 0x10U; /* set max timer value */ + + itr_tx |= (custom_itr/2) << 0x8U; /* set min timer value */ + itr_tx |= custom_itr << 0x10U; /* set max timer value */ + } + + tdm_tx_desc_wr_wb_irq_en_set(hw, !active); + tdm_tdm_intr_moder_en_set(hw, active); + rdm_rx_desc_wr_wb_irq_en_set(hw, !active); + rdm_rdm_intr_moder_en_set(hw, active); + + for (int i = HW_ATL_B0_RINGS_MAX; i--;) { + reg_tx_intr_moder_ctrl_set(hw, itr_tx, i); + reg_rx_intr_moder_ctrl_set(hw, itr_rx, i); + } + + err = aq_hw_err_from_flags(hw); + AQ_DBG_EXIT(err); + return (err); +} + +/** + * @brief Set VLAN filter table + * @details Configure VLAN filter table to accept (and assign the queue) traffic + * for the particular vlan ids. + * Note: use this function under vlan promisc mode not to lost the traffic + * + * @param aq_hw_s + * @param aq_rx_filter_vlan VLAN filter configuration + * @return 0 - OK, <0 - error + */ +int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self, + struct aq_rx_filter_vlan *aq_vlans) +{ + int i; + + for (i = 0; i < AQ_HW_VLAN_MAX_FILTERS; i++) { + hw_atl_rpf_vlan_flr_en_set(self, 0U, i); + hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i); + if (aq_vlans[i].enable) { + hw_atl_rpf_vlan_id_flr_set(self, + aq_vlans[i].vlan_id, + i); + hw_atl_rpf_vlan_flr_act_set(self, 1U, i); + hw_atl_rpf_vlan_flr_en_set(self, 1U, i); + if (aq_vlans[i].queue != 0xFF) { + hw_atl_rpf_vlan_rxq_flr_set(self, + aq_vlans[i].queue, + i); + hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i); + } + } + } + + return aq_hw_err_from_flags(self); +} + +int hw_atl_b0_hw_vlan_promisc_set(struct aq_hw_s *self, bool promisc) +{ + hw_atl_rpf_vlan_prom_mode_en_set(self, promisc); + return aq_hw_err_from_flags(self); +} + + +void aq_hw_set_promisc(struct aq_hw_s *self, bool l2_promisc, bool vlan_promisc, bool mc_promisc) +{ + AQ_DBG_ENTERA("promisc %d, vlan_promisc %d, allmulti %d", l2_promisc, vlan_promisc, mc_promisc); + + rpfl2promiscuous_mode_en_set(self, l2_promisc); + + hw_atl_b0_hw_vlan_promisc_set(self, l2_promisc | vlan_promisc); + + rpfl2_accept_all_mc_packets_set(self, mc_promisc); + rpfl2multicast_flr_en_set(self, mc_promisc, 0); + + AQ_DBG_EXIT(0); +} + +int aq_hw_rss_hash_set(struct aq_hw_s *self, u8 rss_key[HW_ATL_RSS_HASHKEY_SIZE]) +{ + u32 rss_key_dw[HW_ATL_RSS_HASHKEY_SIZE / 4]; + u32 addr = 0U; + u32 i = 0U; + int err = 0; + + AQ_DBG_ENTER(); + + memcpy(rss_key_dw, rss_key, HW_ATL_RSS_HASHKEY_SIZE); + + for (i = 10, addr = 0U; i--; ++addr) { + u32 key_data = bswap32(rss_key_dw[i]); + rpf_rss_key_wr_data_set(self, key_data); + rpf_rss_key_addr_set(self, addr); + rpf_rss_key_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, + 1000U, 10U); + if (err < 0) + goto err_exit; + } + + err = aq_hw_err_from_flags(self); + +err_exit: + AQ_DBG_EXIT(err); + return (err); +} + +int aq_hw_rss_hash_get(struct aq_hw_s *self, u8 rss_key[HW_ATL_RSS_HASHKEY_SIZE]) +{ + u32 rss_key_dw[HW_ATL_RSS_HASHKEY_SIZE / 4]; + u32 addr = 0U; + u32 i = 0U; + int err = 0; + + AQ_DBG_ENTER(); + + for (i = 10, addr = 0U; i--; ++addr) { + rpf_rss_key_addr_set(self, addr); + rss_key_dw[i] = bswap32(rpf_rss_key_rd_data_get(self)); + } + memcpy(rss_key, rss_key_dw, HW_ATL_RSS_HASHKEY_SIZE); + + err = aq_hw_err_from_flags(self); + + AQ_DBG_EXIT(err); + return (err); +} + +int aq_hw_rss_set(struct aq_hw_s *self, u8 rss_table[HW_ATL_RSS_INDIRECTION_TABLE_MAX]) +{ + u16 bitary[(HW_ATL_RSS_INDIRECTION_TABLE_MAX * + 3 / 16U)]; + int err = 0; + u32 i = 0U; + + memset(bitary, 0, sizeof(bitary)); + + for (i = HW_ATL_RSS_INDIRECTION_TABLE_MAX; i--;) { + (*(u32 *)(bitary + ((i * 3U) / 16U))) |= + ((rss_table[i]) << ((i * 3U) & 0xFU)); + } + + for (i = ARRAY_SIZE(bitary); i--;) { + rpf_rss_redir_tbl_wr_data_set(self, bitary[i]); + rpf_rss_redir_tbl_addr_set(self, i); + rpf_rss_redir_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, + 1000U, 10U); + if (err < 0) + goto err_exit; + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return (err); +} + +int aq_hw_udp_rss_enable(struct aq_hw_s *self, bool enable) +{ + int err = 0; + if(!enable) { + /* HW bug workaround: + * Disable RSS for UDP using rx flow filter 0. + * HW does not track RSS stream for fragmenged UDP, + * 0x5040 control reg does not work. + */ + hw_atl_rpf_l3_l4_enf_set(self, true, 0); + hw_atl_rpf_l4_protf_en_set(self, true, 0); + hw_atl_rpf_l3_l4_rxqf_en_set(self, true, 0); + hw_atl_rpf_l3_l4_actf_set(self, L2_FILTER_ACTION_HOST, 0); + hw_atl_rpf_l3_l4_rxqf_set(self, 0, 0); + hw_atl_rpf_l4_protf_set(self, HW_ATL_RX_UDP, 0); + } else { + hw_atl_rpf_l3_l4_enf_set(self, false, 0); + } + + err = aq_hw_err_from_flags(self); + return (err); + +} Index: sys/dev/aq/aq_hw_llh.h =================================================================== --- /dev/null +++ sys/dev/aq/aq_hw_llh.h @@ -0,0 +1,1176 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * (1) Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * (2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * (3)The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* File hw_atl_llh.h: Declarations of bitfield and register access functions for + * Atlantic registers. + */ + +#ifndef HW_ATL_LLH_H +#define HW_ATL_LLH_H + +#include "aq_common.h" + +struct aq_hw; + +/* global */ + + +void reg_glb_fw_image_id1_set(struct aq_hw* hw, u32 value); +u32 reg_glb_fw_image_id1_get(struct aq_hw* hw); + +/* set global microprocessor semaphore */ +void reg_glb_cpu_sem_set(struct aq_hw *aq_hw, u32 sem_value, u32 sem_index); + +/* get global microprocessor semaphore */ +u32 reg_glb_cpu_sem_get(struct aq_hw *aq_hw, u32 sem_index); + +/* +* \brief Get Global Standard Control 1 +* \return GlobalStandardControl1 +*/ +u32 reg_glb_standard_ctl1_get(struct aq_hw* hw); +/* +* \brief Set Global Standard Control 1 +*/ +void reg_glb_standard_ctl1_set(struct aq_hw* hw, u32 glb_standard_ctl1); + +/* +* \brief Set Global Control 2 +*/ +void reg_global_ctl2_set(struct aq_hw* hw, u32 global_ctl2); +/* +* \brief Get Global Control 2 +* \return GlobalControl2 +*/ +u32 reg_global_ctl2_get(struct aq_hw* hw); + + +/* +* \brief Set Global Daisy Chain Status 1 +*/ +void reg_glb_daisy_chain_status1_set(struct aq_hw* hw, u32 glb_daisy_chain_status1); +/* +* \brief Get Global Daisy Chain Status 1 +* \return glb_daisy_chain_status1 +*/ +u32 reg_glb_daisy_chain_status1_get(struct aq_hw* hw); + + +/* +* \brief Set Global General Provisioning 9 +*/ +void reg_glb_general_provisioning9_set(struct aq_hw* hw, u32 value); +/* +* \brief Get Global General Provisioning 9 +* \return GlobalGeneralProvisioning9 +*/ +u32 reg_glb_general_provisioning9_get(struct aq_hw* hw); + +/* +* \brief Set Global NVR Provisioning 2 +*/ +void reg_glb_nvr_provisioning2_set(struct aq_hw* hw, u32 value); +/* +* \brief Get Global NVR Provisioning 2 +* \return GlobalNvrProvisioning2 +*/ +u32 reg_glb_nvr_provisioning2_get(struct aq_hw* hw); + +/* +* \brief Set Global NVR Interface 1 +*/ +void reg_glb_nvr_interface1_set(struct aq_hw* hw, u32 value); +/* +* \brief Get Global NVR Interface 1 +* \return GlobalNvrInterface1 +*/ +u32 reg_glb_nvr_interface1_get(struct aq_hw* hw); + + +/* set global register reset disable */ +void glb_glb_reg_res_dis_set(struct aq_hw *aq_hw, u32 glb_reg_res_dis); + +/* set soft reset */ +void glb_soft_res_set(struct aq_hw *aq_hw, u32 soft_res); + +/* get soft reset */ +u32 glb_soft_res_get(struct aq_hw *aq_hw); + +/* stats */ + +u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw *aq_hw); + +/* get rx dma good octet counter lsw */ +u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw *aq_hw); + +/* get rx dma good packet counter lsw */ +u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw *aq_hw); + +/* get tx dma good octet counter lsw */ +u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw *aq_hw); + +/* get tx dma good packet counter lsw */ +u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw *aq_hw); + +/* get rx dma good octet counter msw */ +u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw *aq_hw); + +/* get rx dma good packet counter msw */ +u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw *aq_hw); + +/* get tx dma good octet counter msw */ +u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw *aq_hw); + +/* get tx dma good packet counter msw */ +u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw *aq_hw); + +/* get rx lro coalesced packet count lsw */ +u32 stats_rx_lro_coalesced_pkt_count0_get(struct aq_hw *aq_hw); + +/* get msm rx errors counter register */ +u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw *aq_hw); + +/* get msm rx unicast frames counter register */ +u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw *aq_hw); + +/* get msm rx multicast frames counter register */ +u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw *aq_hw); + +/* get msm rx broadcast frames counter register */ +u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw *aq_hw); + +/* get msm rx broadcast octets counter register 1 */ +u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw *aq_hw); + +/* get msm rx unicast octets counter register 0 */ +u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw *aq_hw); + +/* get rx dma statistics counter 7 */ +u32 reg_rx_dma_stat_counter7get(struct aq_hw *aq_hw); + +/* get msm tx errors counter register */ +u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw *aq_hw); + +/* get msm tx unicast frames counter register */ +u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw *aq_hw); + +/* get msm tx multicast frames counter register */ +u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw *aq_hw); + +/* get msm tx broadcast frames counter register */ +u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw *aq_hw); + +/* get msm tx multicast octets counter register 1 */ +u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw *aq_hw); + +/* get msm tx broadcast octets counter register 1 */ +u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw *aq_hw); + +/* get msm tx unicast octets counter register 0 */ +u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw *aq_hw); + +/* get global mif identification */ +u32 reg_glb_mif_id_get(struct aq_hw *aq_hw); + +/** \brief Set Tx Register Reset Disable +* \param txRegisterResetDisable 1 = Disable the S/W reset to MAC-PHY registers, 0 = Enable the S/W reset to MAC-PHY registers +* \note Default value: 0x1 +* \note PORT="pif_mpi_reg_reset_dsbl_i" +*/ +void mpi_tx_reg_res_dis_set(struct aq_hw* hw, u32 mpi_tx_reg_res_dis); +/** \brief Get Tx Register Reset Disable +* \return 1 = Disable the S/W reset to MAC-PHY registers, 0 = Enable the S/W reset to MAC-PHY registers +* \note Default value: 0x1 +* \note PORT="pif_mpi_reg_reset_dsbl_i" +*/ +u32 mpi_tx_reg_res_dis_get(struct aq_hw* hw); + + +/* interrupt */ + +/* set interrupt auto mask lsw */ +void itr_irq_auto_masklsw_set(struct aq_hw *aq_hw, u32 irq_auto_masklsw); + +/* set interrupt mapping enable rx */ +void itr_irq_map_en_rx_set(struct aq_hw *aq_hw, u32 irq_map_en_rx, u32 rx); + +/* set interrupt mapping enable tx */ +void itr_irq_map_en_tx_set(struct aq_hw *aq_hw, u32 irq_map_en_tx, u32 tx); + +/* set interrupt mapping rx */ +void itr_irq_map_rx_set(struct aq_hw *aq_hw, u32 irq_map_rx, u32 rx); + +/* set interrupt mapping tx */ +void itr_irq_map_tx_set(struct aq_hw *aq_hw, u32 irq_map_tx, u32 tx); + +/* set interrupt mask clear lsw */ +void itr_irq_msk_clearlsw_set(struct aq_hw *aq_hw, u32 irq_msk_clearlsw); + +/* set interrupt mask set lsw */ +void itr_irq_msk_setlsw_set(struct aq_hw *aq_hw, u32 irq_msk_setlsw); + +/* set interrupt register reset disable */ +void itr_irq_reg_res_dis_set(struct aq_hw *aq_hw, u32 irq_reg_res_dis); + +/* set interrupt status clear lsw */ +void itr_irq_status_clearlsw_set(struct aq_hw *aq_hw, + u32 irq_status_clearlsw); + +/* get interrupt status lsw */ +u32 itr_irq_statuslsw_get(struct aq_hw *aq_hw); + +/* get reset interrupt */ +u32 itr_res_irq_get(struct aq_hw *aq_hw); + +/* set reset interrupt */ +void itr_res_irq_set(struct aq_hw *aq_hw, u32 res_irq); + +void itr_irq_mode_set(struct aq_hw *aq_hw, u32 irq_mode); + +/* Set Link Interrupt Mapping Enable */ +void itr_link_int_map_en_set(struct aq_hw *aq_hw, u32 link_int_en_map_en); + +/* Get Link Interrupt Mapping Enable */ +u32 itr_link_int_map_en_get(struct aq_hw *aq_hw); + +/* Set Link Interrupt Mapping */ +void itr_link_int_map_set(struct aq_hw *aq_hw, u32 link_int_map); + +/* Get Link Interrupt Mapping */ +u32 itr_link_int_map_get(struct aq_hw *aq_hw); + + +/* Set MIF Interrupt Mapping Enable */ +void itr_mif_int_map_en_set(struct aq_hw *aq_hw, u32 mif_int_map_en, u32 mif); + +/* Get MIF Interrupt Mapping Enable */ +u32 itr_mif_int_map_en_get(struct aq_hw *aq_hw, u32 mif); + +/* Set MIF Interrupt Mapping */ +void itr_mif_int_map_set(struct aq_hw *aq_hw, u32 mif_int_map, u32 mif); + +/* Get MIF Interrupt Mapping */ +u32 itr_mif_int_map_get(struct aq_hw *aq_hw, u32 mif); + +void itr_irq_status_cor_en_set(struct aq_hw *aq_hw, u32 irq_status_cor_enable); + +void itr_irq_auto_mask_clr_en_set(struct aq_hw *aq_hw, u32 irq_auto_mask_clr_en); + +/* rdm */ + +/* set cpu id */ +void rdm_cpu_id_set(struct aq_hw *aq_hw, u32 cpuid, u32 dca); + +/* set rx dca enable */ +void rdm_rx_dca_en_set(struct aq_hw *aq_hw, u32 rx_dca_en); + +/* set rx dca mode */ +void rdm_rx_dca_mode_set(struct aq_hw *aq_hw, u32 rx_dca_mode); + +/* set rx descriptor data buffer size */ +void rdm_rx_desc_data_buff_size_set(struct aq_hw *aq_hw, + u32 rx_desc_data_buff_size, + u32 descriptor); + +/* set rx descriptor dca enable */ +void rdm_rx_desc_dca_en_set(struct aq_hw *aq_hw, u32 rx_desc_dca_en, + u32 dca); + +/* set rx descriptor enable */ +void rdm_rx_desc_en_set(struct aq_hw *aq_hw, u32 rx_desc_en, + u32 descriptor); + +/* set rx descriptor header splitting */ +void rdm_rx_desc_head_splitting_set(struct aq_hw *aq_hw, + u32 rx_desc_head_splitting, + u32 descriptor); + +/* get rx descriptor head pointer */ +u32 rdm_rx_desc_head_ptr_get(struct aq_hw *aq_hw, u32 descriptor); + +/* set rx descriptor length */ +void rdm_rx_desc_len_set(struct aq_hw *aq_hw, u32 rx_desc_len, + u32 descriptor); + +/* set rx descriptor write-back interrupt enable */ +void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw *aq_hw, + u32 rx_desc_wr_wb_irq_en); + +/* set rx header dca enable */ +void rdm_rx_head_dca_en_set(struct aq_hw *aq_hw, u32 rx_head_dca_en, + u32 dca); + +/* set rx payload dca enable */ +void rdm_rx_pld_dca_en_set(struct aq_hw *aq_hw, u32 rx_pld_dca_en, u32 dca); + +/* set rx descriptor header buffer size */ +void rdm_rx_desc_head_buff_size_set(struct aq_hw *aq_hw, + u32 rx_desc_head_buff_size, + u32 descriptor); + +/* set rx descriptor reset */ +void rdm_rx_desc_res_set(struct aq_hw *aq_hw, u32 rx_desc_res, + u32 descriptor); + +/* Set RDM Interrupt Moderation Enable */ +void rdm_rdm_intr_moder_en_set(struct aq_hw *aq_hw, u32 rdm_intr_moder_en); + +/* reg */ + +/* set general interrupt mapping register */ +void reg_gen_irq_map_set(struct aq_hw *aq_hw, u32 gen_intr_map, u32 regidx); + +/* get general interrupt status register */ +u32 reg_gen_irq_status_get(struct aq_hw *aq_hw); + +/* set interrupt global control register */ +void reg_irq_glb_ctl_set(struct aq_hw *aq_hw, u32 intr_glb_ctl); + +/* set interrupt throttle register */ +void reg_irq_thr_set(struct aq_hw *aq_hw, u32 intr_thr, u32 throttle); + +/* set rx dma descriptor base address lsw */ +void reg_rx_dma_desc_base_addresslswset(struct aq_hw *aq_hw, + u32 rx_dma_desc_base_addrlsw, + u32 descriptor); + +/* set rx dma descriptor base address msw */ +void reg_rx_dma_desc_base_addressmswset(struct aq_hw *aq_hw, + u32 rx_dma_desc_base_addrmsw, + u32 descriptor); + +/* get rx dma descriptor status register */ +u32 reg_rx_dma_desc_status_get(struct aq_hw *aq_hw, u32 descriptor); + +/* set rx dma descriptor tail pointer register */ +void reg_rx_dma_desc_tail_ptr_set(struct aq_hw *aq_hw, + u32 rx_dma_desc_tail_ptr, + u32 descriptor); +/* get rx dma descriptor tail pointer register */ +u32 reg_rx_dma_desc_tail_ptr_get(struct aq_hw *aq_hw, u32 descriptor); + +/* set rx filter multicast filter mask register */ +void reg_rx_flr_mcst_flr_msk_set(struct aq_hw *aq_hw, + u32 rx_flr_mcst_flr_msk); + +/* set rx filter multicast filter register */ +void reg_rx_flr_mcst_flr_set(struct aq_hw *aq_hw, u32 rx_flr_mcst_flr, + u32 filter); + +/* set rx filter rss control register 1 */ +void reg_rx_flr_rss_control1set(struct aq_hw *aq_hw, + u32 rx_flr_rss_control1); + +/* Set RX Filter Control Register 2 */ +void reg_rx_flr_control2_set(struct aq_hw *aq_hw, u32 rx_flr_control2); + +/* Set RX Interrupt Moderation Control Register */ +void reg_rx_intr_moder_ctrl_set(struct aq_hw *aq_hw, + u32 rx_intr_moderation_ctl, + u32 queue); + +/* set tx dma debug control */ +void reg_tx_dma_debug_ctl_set(struct aq_hw *aq_hw, u32 tx_dma_debug_ctl); + +/* set tx dma descriptor base address lsw */ +void reg_tx_dma_desc_base_addresslswset(struct aq_hw *aq_hw, + u32 tx_dma_desc_base_addrlsw, + u32 descriptor); + +/* set tx dma descriptor base address msw */ +void reg_tx_dma_desc_base_addressmswset(struct aq_hw *aq_hw, + u32 tx_dma_desc_base_addrmsw, + u32 descriptor); + +/* set tx dma descriptor tail pointer register */ +void reg_tx_dma_desc_tail_ptr_set(struct aq_hw *aq_hw, + u32 tx_dma_desc_tail_ptr, + u32 descriptor); + +/* get tx dma descriptor tail pointer register */ +u32 reg_tx_dma_desc_tail_ptr_get(struct aq_hw *aq_hw, u32 descriptor); + +/* Set TX Interrupt Moderation Control Register */ +void reg_tx_intr_moder_ctrl_set(struct aq_hw *aq_hw, + u32 tx_intr_moderation_ctl, + u32 queue); + +/* get global microprocessor scratch pad */ +u32 reg_glb_cpu_scratch_scp_get(struct aq_hw *hw, u32 glb_cpu_scratch_scp_idx); +/* set global microprocessor scratch pad */ +void reg_glb_cpu_scratch_scp_set(struct aq_hw *aq_hw, + u32 glb_cpu_scratch_scp, u32 scratch_scp); + +/* get global microprocessor no reset scratch pad */ +u32 reg_glb_cpu_no_reset_scratchpad_get(struct aq_hw* hw, u32 index); +/* set global microprocessor no reset scratch pad */ +void reg_glb_cpu_no_reset_scratchpad_set(struct aq_hw* aq_hw, u32 value, + u32 index); + +/* rpb */ + +/* set dma system loopback */ +void rpb_dma_sys_lbk_set(struct aq_hw *aq_hw, u32 dma_sys_lbk); + +/* set rx traffic class mode */ +void rpb_rpf_rx_traf_class_mode_set(struct aq_hw *aq_hw, + u32 rx_traf_class_mode); + +/* set rx buffer enable */ +void rpb_rx_buff_en_set(struct aq_hw *aq_hw, u32 rx_buff_en); + +/* set rx buffer high threshold (per tc) */ +void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw *aq_hw, + u32 rx_buff_hi_threshold_per_tc, + u32 buffer); + +/* set rx buffer low threshold (per tc) */ +void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw *aq_hw, + u32 rx_buff_lo_threshold_per_tc, + u32 buffer); + +/* set rx flow control mode */ +void rpb_rx_flow_ctl_mode_set(struct aq_hw *aq_hw, u32 rx_flow_ctl_mode); + +/* set rx packet buffer size (per tc) */ +void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw *aq_hw, + u32 rx_pkt_buff_size_per_tc, + u32 buffer); + +/* set rx xoff enable (per tc) */ +void rpb_rx_xoff_en_per_tc_set(struct aq_hw *aq_hw, u32 rx_xoff_en_per_tc, + u32 buffer); + +/* rpf */ + +/* set l2 broadcast count threshold */ +void rpfl2broadcast_count_threshold_set(struct aq_hw *aq_hw, + u32 l2broadcast_count_threshold); + +/* set l2 broadcast enable */ +void rpfl2broadcast_en_set(struct aq_hw *aq_hw, u32 l2broadcast_en); + +/* set l2 broadcast filter action */ +void rpfl2broadcast_flr_act_set(struct aq_hw *aq_hw, + u32 l2broadcast_flr_act); + +/* set l2 multicast filter enable */ +void rpfl2multicast_flr_en_set(struct aq_hw *aq_hw, u32 l2multicast_flr_en, + u32 filter); + +/* set l2 promiscuous mode enable */ +void rpfl2promiscuous_mode_en_set(struct aq_hw *aq_hw, + u32 l2promiscuous_mode_en); + +/* set l2 unicast filter action */ +void rpfl2unicast_flr_act_set(struct aq_hw *aq_hw, u32 l2unicast_flr_act, + u32 filter); + +/* set l2 unicast filter enable */ +void rpfl2_uc_flr_en_set(struct aq_hw *aq_hw, u32 l2unicast_flr_en, + u32 filter); + +/* set l2 unicast destination address lsw */ +void rpfl2unicast_dest_addresslsw_set(struct aq_hw *aq_hw, + u32 l2unicast_dest_addresslsw, + u32 filter); + +/* set l2 unicast destination address msw */ +void rpfl2unicast_dest_addressmsw_set(struct aq_hw *aq_hw, + u32 l2unicast_dest_addressmsw, + u32 filter); + +/* Set L2 Accept all Multicast packets */ +void rpfl2_accept_all_mc_packets_set(struct aq_hw *aq_hw, + u32 l2_accept_all_mc_packets); + +/* set user-priority tc mapping */ +void rpf_rpb_user_priority_tc_map_set(struct aq_hw *aq_hw, + u32 user_priority_tc_map, u32 tc); + +/* set rss key address */ +void rpf_rss_key_addr_set(struct aq_hw *aq_hw, u32 rss_key_addr); + +/* set rss key write data */ +void rpf_rss_key_wr_data_set(struct aq_hw *aq_hw, u32 rss_key_wr_data); + +/* get rss key read data */ +u32 rpf_rss_key_rd_data_get(struct aq_hw *aq_hw); + +/* get rss key write enable */ +u32 rpf_rss_key_wr_en_get(struct aq_hw *aq_hw); + +/* set rss key write enable */ +void rpf_rss_key_wr_en_set(struct aq_hw *aq_hw, u32 rss_key_wr_en); + +/* set rss redirection table address */ +void rpf_rss_redir_tbl_addr_set(struct aq_hw *aq_hw, + u32 rss_redir_tbl_addr); + +/* set rss redirection table write data */ +void rpf_rss_redir_tbl_wr_data_set(struct aq_hw *aq_hw, + u32 rss_redir_tbl_wr_data); + +/* get rss redirection write enable */ +u32 rpf_rss_redir_wr_en_get(struct aq_hw *aq_hw); + +/* set rss redirection write enable */ +void rpf_rss_redir_wr_en_set(struct aq_hw *aq_hw, u32 rss_redir_wr_en); + +/* set tpo to rpf system loopback */ +void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw *aq_hw, + u32 tpo_to_rpf_sys_lbk); + +/* set vlan inner ethertype */ +void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw *aq_hw, u32 vlan_inner_etht); + +/* set vlan outer ethertype */ +void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw *aq_hw, u32 vlan_outer_etht); + +/* set vlan promiscuous mode enable */ +void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw *aq_hw, + u32 vlan_prom_mode_en); + +/* Set VLAN untagged action */ +void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw *aq_hw, + u32 vlan_untagged_act); + +/* Set VLAN accept untagged packets */ +void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw *aq_hw, + u32 vlan_acc_untagged_packets); + +/* Set VLAN filter enable */ +void hw_atl_rpf_vlan_flr_en_set(struct aq_hw *aq_hw, u32 vlan_flr_en, + u32 filter); + +/* Set VLAN Filter Action */ +void hw_atl_rpf_vlan_flr_act_set(struct aq_hw *aq_hw, u32 vlan_filter_act, + u32 filter); + +/* Set VLAN ID Filter */ +void hw_atl_rpf_vlan_id_flr_set(struct aq_hw *aq_hw, u32 vlan_id_flr, + u32 filter); + +/* Set VLAN RX queue assignment enable */ +void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw *aq_hw, u32 vlan_rxq_en, + u32 filter); + +/* Set VLAN RX queue */ +void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw *aq_hw, u32 vlan_rxq, + u32 filter); + +/* set ethertype filter enable */ +void hw_atl_rpf_etht_flr_en_set(struct aq_hw *aq_hw, u32 etht_flr_en, + u32 filter); + +/* set ethertype user-priority enable */ +void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw *aq_hw, + u32 etht_user_priority_en, + u32 filter); + +/* set ethertype rx queue enable */ +void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw *aq_hw, + u32 etht_rx_queue_en, + u32 filter); + +/* set ethertype rx queue */ +void hw_atl_rpf_etht_rx_queue_set(struct aq_hw *aq_hw, u32 etht_rx_queue, + u32 filter); + +/* set ethertype user-priority */ +void hw_atl_rpf_etht_user_priority_set(struct aq_hw *aq_hw, + u32 etht_user_priority, + u32 filter); + +/* set ethertype management queue */ +void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw *aq_hw, u32 etht_mgt_queue, + u32 filter); + +/* set ethertype filter action */ +void hw_atl_rpf_etht_flr_act_set(struct aq_hw *aq_hw, u32 etht_flr_act, + u32 filter); + +/* set ethertype filter */ +void hw_atl_rpf_etht_flr_set(struct aq_hw *aq_hw, u32 etht_flr, u32 filter); + +/* set L3/L4 filter enable */ +void hw_atl_rpf_l3_l4_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3 IPv6 enable */ +void hw_atl_rpf_l3_v6_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3 source address enable */ +void hw_atl_rpf_l3_saf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3 destination address enable */ +void hw_atl_rpf_l3_daf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L4 source port enable */ +void hw_atl_rpf_l4_spf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L4 destination port enable */ +void hw_atl_rpf_l4_dpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L4 protocol enable */ +void hw_atl_rpf_l4_protf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3 ARP filter enable */ +void hw_atl_rpf_l3_arpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3/L4 rx queue enable */ +void hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3/L4 management queue */ +void hw_atl_rpf_l3_l4_mng_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3/L4 filter action */ +void hw_atl_rpf_l3_l4_actf_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3/L4 rx queue */ +void hw_atl_rpf_l3_l4_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L4 protocol value */ +void hw_atl_rpf_l4_protf_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L4 source port */ +void hw_atl_rpf_l4_spd_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L4 destination port */ +void hw_atl_rpf_l4_dpd_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set vlan inner ethertype */ +void rpf_vlan_inner_etht_set(struct aq_hw *aq_hw, u32 vlan_inner_etht); + +/* set vlan outer ethertype */ +void rpf_vlan_outer_etht_set(struct aq_hw *aq_hw, u32 vlan_outer_etht); + +/* set vlan promiscuous mode enable */ +void rpf_vlan_prom_mode_en_set(struct aq_hw *aq_hw, u32 vlan_prom_mode_en); + +/* Set VLAN untagged action */ +void rpf_vlan_untagged_act_set(struct aq_hw *aq_hw, u32 vlan_untagged_act); + +/* Set VLAN accept untagged packets */ +void rpf_vlan_accept_untagged_packets_set(struct aq_hw *aq_hw, + u32 vlan_accept_untagged_packets); + +/* Set VLAN filter enable */ +void rpf_vlan_flr_en_set(struct aq_hw *aq_hw, u32 vlan_flr_en, u32 filter); + +/* Set VLAN Filter Action */ +void rpf_vlan_flr_act_set(struct aq_hw *aq_hw, u32 vlan_filter_act, + u32 filter); + +/* Set VLAN ID Filter */ +void rpf_vlan_id_flr_set(struct aq_hw *aq_hw, u32 vlan_id_flr, u32 filter); + +/* set ethertype filter enable */ +void rpf_etht_flr_en_set(struct aq_hw *aq_hw, u32 etht_flr_en, u32 filter); + +/* set ethertype user-priority enable */ +void rpf_etht_user_priority_en_set(struct aq_hw *aq_hw, + u32 etht_user_priority_en, u32 filter); + +/* set ethertype rx queue enable */ +void rpf_etht_rx_queue_en_set(struct aq_hw *aq_hw, u32 etht_rx_queue_en, + u32 filter); + +/* set ethertype rx queue */ +void rpf_etht_rx_queue_set(struct aq_hw *aq_hw, u32 etht_rx_queue, + u32 filter); + +/* set ethertype user-priority */ +void rpf_etht_user_priority_set(struct aq_hw *aq_hw, u32 etht_user_priority, + u32 filter); + +/* set ethertype management queue */ +void rpf_etht_mgt_queue_set(struct aq_hw *aq_hw, u32 etht_mgt_queue, + u32 filter); + +/* set ethertype filter action */ +void rpf_etht_flr_act_set(struct aq_hw *aq_hw, u32 etht_flr_act, + u32 filter); + +/* set ethertype filter */ +void rpf_etht_flr_set(struct aq_hw *aq_hw, u32 etht_flr, u32 filter); + +/* set L3/L4 filter enable */ +void hw_atl_rpf_l3_l4_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3 IPv6 enable */ +void hw_atl_rpf_l3_v6_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3 source address enable */ +void hw_atl_rpf_l3_saf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3 destination address enable */ +void hw_atl_rpf_l3_daf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L4 source port enable */ +void hw_atl_rpf_l4_spf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L4 destination port enable */ +void hw_atl_rpf_l4_dpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L4 protocol enable */ +void hw_atl_rpf_l4_protf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3 ARP filter enable */ +void hw_atl_rpf_l3_arpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3/L4 rx queue enable */ +void hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3/L4 management queue */ +void hw_atl_rpf_l3_l4_mng_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3/L4 filter action */ +void hw_atl_rpf_l3_l4_actf_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3/L4 rx queue */ +void hw_atl_rpf_l3_l4_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L4 protocol value */ +void hw_atl_rpf_l4_protf_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L4 source port */ +void hw_atl_rpf_l4_spd_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L4 destination port */ +void hw_atl_rpf_l4_dpd_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* rpo */ + +/* set ipv4 header checksum offload enable */ +void rpo_ipv4header_crc_offload_en_set(struct aq_hw *aq_hw, + u32 ipv4header_crc_offload_en); + +/* set rx descriptor vlan stripping */ +void rpo_rx_desc_vlan_stripping_set(struct aq_hw *aq_hw, + u32 rx_desc_vlan_stripping, + u32 descriptor); + +/* set tcp/udp checksum offload enable */ +void rpo_tcp_udp_crc_offload_en_set(struct aq_hw *aq_hw, + u32 tcp_udp_crc_offload_en); + +/* Set LRO Patch Optimization Enable. */ +void rpo_lro_patch_optimization_en_set(struct aq_hw *aq_hw, + u32 lro_patch_optimization_en); + +/* Set Large Receive Offload Enable */ +void rpo_lro_en_set(struct aq_hw *aq_hw, u32 lro_en); + +/* Set LRO Q Sessions Limit */ +void rpo_lro_qsessions_lim_set(struct aq_hw *aq_hw, u32 lro_qsessions_lim); + +/* Set LRO Total Descriptor Limit */ +void rpo_lro_total_desc_lim_set(struct aq_hw *aq_hw, u32 lro_total_desc_lim); + +/* Set LRO Min Payload of First Packet */ +void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw *aq_hw, + u32 lro_min_pld_of_first_pkt); + +/* Set LRO Packet Limit */ +void rpo_lro_pkt_lim_set(struct aq_hw *aq_hw, u32 lro_packet_lim); + +/* Set LRO Max Number of Descriptors */ +void rpo_lro_max_num_of_descriptors_set(struct aq_hw *aq_hw, + u32 lro_max_desc_num, u32 lro); + +/* Set LRO Time Base Divider */ +void rpo_lro_time_base_divider_set(struct aq_hw *aq_hw, + u32 lro_time_base_divider); + +/*Set LRO Inactive Interval */ +void rpo_lro_inactive_interval_set(struct aq_hw *aq_hw, + u32 lro_inactive_interval); + +/*Set LRO Max Coalescing Interval */ +void rpo_lro_max_coalescing_interval_set(struct aq_hw *aq_hw, + u32 lro_max_coalescing_interval); + +/* rx */ + +/* set rx register reset disable */ +void rx_rx_reg_res_dis_set(struct aq_hw *aq_hw, u32 rx_reg_res_dis); + +/* tdm */ + +/* set cpu id */ +void tdm_cpu_id_set(struct aq_hw *aq_hw, u32 cpuid, u32 dca); + +/* set large send offload enable */ +void tdm_large_send_offload_en_set(struct aq_hw *aq_hw, + u32 large_send_offload_en); + +/* set tx descriptor enable */ +void tdm_tx_desc_en_set(struct aq_hw *aq_hw, u32 tx_desc_en, u32 descriptor); + +/* set tx dca enable */ +void tdm_tx_dca_en_set(struct aq_hw *aq_hw, u32 tx_dca_en); + +/* set tx dca mode */ +void tdm_tx_dca_mode_set(struct aq_hw *aq_hw, u32 tx_dca_mode); + +/* set tx descriptor dca enable */ +void tdm_tx_desc_dca_en_set(struct aq_hw *aq_hw, u32 tx_desc_dca_en, u32 dca); + +/* get tx descriptor head pointer */ +u32 tdm_tx_desc_head_ptr_get(struct aq_hw *aq_hw, u32 descriptor); + +/* set tx descriptor length */ +void tdm_tx_desc_len_set(struct aq_hw *aq_hw, u32 tx_desc_len, + u32 descriptor); + +/* set tx descriptor write-back interrupt enable */ +void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw *aq_hw, + u32 tx_desc_wr_wb_irq_en); + +/* set tx descriptor write-back threshold */ +void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw *aq_hw, + u32 tx_desc_wr_wb_threshold, + u32 descriptor); + +/* Set TDM Interrupt Moderation Enable */ +void tdm_tdm_intr_moder_en_set(struct aq_hw *aq_hw, + u32 tdm_irq_moderation_en); +/* thm */ + +/* set lso tcp flag of first packet */ +void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw *aq_hw, + u32 lso_tcp_flag_of_first_pkt); + +/* set lso tcp flag of last packet */ +void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw *aq_hw, + u32 lso_tcp_flag_of_last_pkt); + +/* set lso tcp flag of middle packet */ +void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw *aq_hw, + u32 lso_tcp_flag_of_middle_pkt); + +/* tpb */ + +/* set tx buffer enable */ +void tpb_tx_buff_en_set(struct aq_hw *aq_hw, u32 tx_buff_en); + +/* set tx tc mode */ +void tpb_tx_tc_mode_set(struct aq_hw *aq_hw, u32 tc_mode); + +/* set tx buffer high threshold (per tc) */ +void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw *aq_hw, + u32 tx_buff_hi_threshold_per_tc, + u32 buffer); + +/* set tx buffer low threshold (per tc) */ +void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw *aq_hw, + u32 tx_buff_lo_threshold_per_tc, + u32 buffer); + +/* set tx dma system loopback enable */ +void tpb_tx_dma_sys_lbk_en_set(struct aq_hw *aq_hw, u32 tx_dma_sys_lbk_en); + +/* set tx packet buffer size (per tc) */ +void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw *aq_hw, + u32 tx_pkt_buff_size_per_tc, u32 buffer); + +/* toggle rdm rx dma descriptor cache init */ +void rdm_rx_dma_desc_cache_init_tgl(struct aq_hw *aq_hw); + +/* set tx path pad insert enable */ +void tpb_tx_path_scp_ins_en_set(struct aq_hw *aq_hw, u32 tx_path_scp_ins_en); + +/* tpo */ + +/* set ipv4 header checksum offload enable */ +void tpo_ipv4header_crc_offload_en_set(struct aq_hw *aq_hw, + u32 ipv4header_crc_offload_en); + +/* set tcp/udp checksum offload enable */ +void tpo_tcp_udp_crc_offload_en_set(struct aq_hw *aq_hw, + u32 tcp_udp_crc_offload_en); + +/* set tx pkt system loopback enable */ +void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw *aq_hw, u32 tx_pkt_sys_lbk_en); + +/* tps */ + +/* set tx packet scheduler data arbitration mode */ +void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw *aq_hw, + u32 tx_pkt_shed_data_arb_mode); + +/* set tx packet scheduler descriptor rate current time reset */ +void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw *aq_hw, + u32 curr_time_res); + +/* set tx packet scheduler descriptor rate limit */ +void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw *aq_hw, + u32 tx_pkt_shed_desc_rate_lim); + +/* set tx packet scheduler descriptor tc arbitration mode */ +void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw *aq_hw, + u32 tx_pkt_shed_desc_tc_arb_mode); + +/* set tx packet scheduler descriptor tc max credit */ +void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw *aq_hw, + u32 tx_pkt_shed_desc_tc_max_credit, + u32 tc); + +/* set tx packet scheduler descriptor tc weight */ +void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw *aq_hw, + u32 tx_pkt_shed_desc_tc_weight, + u32 tc); + +/* set tx packet scheduler descriptor vm arbitration mode */ +void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw *aq_hw, + u32 tx_pkt_shed_desc_vm_arb_mode); + +/* set tx packet scheduler tc data max credit */ +void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw *aq_hw, + u32 tx_pkt_shed_tc_data_max_credit, + u32 tc); + +/* set tx packet scheduler tc data weight */ +void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw *aq_hw, + u32 tx_pkt_shed_tc_data_weight, + u32 tc); + +/* tx */ + +/* set tx register reset disable */ +void tx_tx_reg_res_dis_set(struct aq_hw *aq_hw, u32 tx_reg_res_dis); + +/* msm */ + +/* get register access status */ +u32 msm_reg_access_status_get(struct aq_hw *aq_hw); + +/* set register address for indirect address */ +void msm_reg_addr_for_indirect_addr_set(struct aq_hw *aq_hw, + u32 reg_addr_for_indirect_addr); + +/* set register read strobe */ +void msm_reg_rd_strobe_set(struct aq_hw *aq_hw, u32 reg_rd_strobe); + +/* get register read data */ +u32 msm_reg_rd_data_get(struct aq_hw *aq_hw); + +/* set register write data */ +void msm_reg_wr_data_set(struct aq_hw *aq_hw, u32 reg_wr_data); + +/* set register write strobe */ +void msm_reg_wr_strobe_set(struct aq_hw *aq_hw, u32 reg_wr_strobe); + +/* pci */ + +/* set pci register reset disable */ +void pci_pci_reg_res_dis_set(struct aq_hw *aq_hw, u32 pci_reg_res_dis); + + +/* +* \brief Set MIF Power Gating Enable Control +*/ +void reg_mif_power_gating_enable_control_set(struct aq_hw* hw, u32 value); +/* +* \brief Get MIF Power Gating Enable Control +* \return MifPowerGatingEnableControl +*/ +u32 reg_mif_power_gating_enable_control_get(struct aq_hw* hw); + +/* get mif up mailbox busy */ +u32 mif_mcp_up_mailbox_busy_get(struct aq_hw *aq_hw); + +/* set mif up mailbox execute operation */ +void mif_mcp_up_mailbox_execute_operation_set(struct aq_hw* hw, u32 value); + +/* get mif uP mailbox address */ +u32 mif_mcp_up_mailbox_addr_get(struct aq_hw *aq_hw); +/* set mif uP mailbox address */ +void mif_mcp_up_mailbox_addr_set(struct aq_hw *hw, u32 value); + +/* get mif uP mailbox data */ +u32 mif_mcp_up_mailbox_data_get(struct aq_hw *aq_hw); + +/* clear ipv4 filter destination address */ +void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw *aq_hw, u8 location); + +/* clear ipv4 filter source address */ +void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw *aq_hw, u8 location); + +/* clear command for filter l3-l4 */ +void hw_atl_rpfl3l4_cmd_clear(struct aq_hw *aq_hw, u8 location); + +/* clear ipv6 filter destination address */ +void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw *aq_hw, u8 location); + +/* clear ipv6 filter source address */ +void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw *aq_hw, u8 location); + +/* set ipv4 filter destination address */ +void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw *aq_hw, u8 location, + u32 ipv4_dest); + +/* set ipv4 filter source address */ +void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw *aq_hw, u8 location, + u32 ipv4_src); + +/* set command for filter l3-l4 */ +void hw_atl_rpfl3l4_cmd_set(struct aq_hw *aq_hw, u8 location, u32 cmd); + +/* set ipv6 filter source address */ +void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw *aq_hw, u8 location, + u32 *ipv6_src); + +/* set ipv6 filter destination address */ +void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw *aq_hw, u8 location, + u32 *ipv6_dest); + +/* set vlan inner ethertype */ +void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw *aq_hw, u32 vlan_inner_etht); + +/* set vlan outer ethertype */ +void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw *aq_hw, u32 vlan_outer_etht); + +/* set vlan promiscuous mode enable */ +void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw *aq_hw, + u32 vlan_prom_mode_en); + +/* Set VLAN untagged action */ +void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw *aq_hw, + u32 vlan_untagged_act); + +/* Set VLAN accept untagged packets */ +void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw *aq_hw, + u32 vlan_acc_untagged_packets); + +/* Set VLAN filter enable */ +void hw_atl_rpf_vlan_flr_en_set(struct aq_hw *aq_hw, u32 vlan_flr_en, + u32 filter); + +/* Set VLAN Filter Action */ +void hw_atl_rpf_vlan_flr_act_set(struct aq_hw *aq_hw, u32 vlan_filter_act, + u32 filter); + +/* Set VLAN ID Filter */ +void hw_atl_rpf_vlan_id_flr_set(struct aq_hw *aq_hw, u32 vlan_id_flr, + u32 filter); + +/* Set VLAN RX queue assignment enable */ +void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw *aq_hw, u32 vlan_rxq_en, + u32 filter); + +/* Set VLAN RX queue */ +void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw *aq_hw, u32 vlan_rxq, + u32 filter); + +/* set ethertype filter enable */ +void hw_atl_rpf_etht_flr_en_set(struct aq_hw *aq_hw, u32 etht_flr_en, + u32 filter); + +/* set ethertype user-priority enable */ +void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw *aq_hw, + u32 etht_user_priority_en, + u32 filter); + +/* set ethertype rx queue enable */ +void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw *aq_hw, + u32 etht_rx_queue_en, + u32 filter); + +/* set ethertype rx queue */ +void hw_atl_rpf_etht_rx_queue_set(struct aq_hw *aq_hw, u32 etht_rx_queue, + u32 filter); + +/* set ethertype user-priority */ +void hw_atl_rpf_etht_user_priority_set(struct aq_hw *aq_hw, + u32 etht_user_priority, + u32 filter); + +/* set ethertype management queue */ +void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw *aq_hw, u32 etht_mgt_queue, + u32 filter); + +/* set ethertype filter action */ +void hw_atl_rpf_etht_flr_act_set(struct aq_hw *aq_hw, u32 etht_flr_act, + u32 filter); + +/* set ethertype filter */ +void hw_atl_rpf_etht_flr_set(struct aq_hw *aq_hw, u32 etht_flr, u32 filter); + +/* set L3/L4 filter enable */ +void hw_atl_rpf_l3_l4_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3 IPv6 enable */ +void hw_atl_rpf_l3_v6_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3 source address enable */ +void hw_atl_rpf_l3_saf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3 destination address enable */ +void hw_atl_rpf_l3_daf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L4 source port enable */ +void hw_atl_rpf_l4_spf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L4 destination port enable */ +void hw_atl_rpf_l4_dpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L4 protocol enable */ +void hw_atl_rpf_l4_protf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3 ARP filter enable */ +void hw_atl_rpf_l3_arpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3/L4 rx queue enable */ +void hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3/L4 management queue */ +void hw_atl_rpf_l3_l4_mng_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3/L4 filter action */ +void hw_atl_rpf_l3_l4_actf_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L3/L4 rx queue */ +void hw_atl_rpf_l3_l4_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L4 protocol value */ +void hw_atl_rpf_l4_protf_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L4 source port */ +void hw_atl_rpf_l4_spd_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +/* set L4 destination port */ +void hw_atl_rpf_l4_dpd_set(struct aq_hw *aq_hw, u32 val, u32 filter); + +#endif /* HW_ATL_LLH_H */ Index: sys/dev/aq/aq_hw_llh.c =================================================================== --- /dev/null +++ sys/dev/aq/aq_hw_llh.c @@ -0,0 +1,1986 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * (1) Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * (2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * (3)The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* File aq_hw_llh.c: Definitions of bitfield and register access functions for + * Atlantic registers. + */ + +#include "aq_hw.h" +#include "aq_hw_llh.h" +#include "aq_hw_llh_internal.h" + + +/* global */ + +void reg_glb_fw_image_id1_set(struct aq_hw* hw, u32 value) +{ + AQ_WRITE_REG(hw, glb_fw_image_id1_adr, value); +} +u32 reg_glb_fw_image_id1_get(struct aq_hw* hw) +{ + return AQ_READ_REG(hw, glb_fw_image_id1_adr); +} + +void reg_glb_cpu_sem_set(struct aq_hw *aq_hw, u32 sem_value, u32 sem_index) +{ + AQ_WRITE_REG(aq_hw, glb_cpu_sem_adr(sem_index), sem_value); +} + +u32 reg_glb_cpu_sem_get(struct aq_hw *aq_hw, u32 sem_index) +{ + return AQ_READ_REG(aq_hw, glb_cpu_sem_adr(sem_index)); +} + +u32 reg_glb_standard_ctl1_get(struct aq_hw* hw) +{ + return AQ_READ_REG(hw, glb_standard_ctl1_adr); +} +void reg_glb_standard_ctl1_set(struct aq_hw* hw, u32 glb_standard_ctl1) +{ + AQ_WRITE_REG(hw, glb_standard_ctl1_adr, glb_standard_ctl1); +} + +void reg_global_ctl2_set(struct aq_hw* hw, u32 global_ctl2) +{ + AQ_WRITE_REG(hw, glb_ctl2_adr, global_ctl2); +} +u32 reg_global_ctl2_get(struct aq_hw* hw) +{ + return AQ_READ_REG(hw, glb_ctl2_adr); +} + +void reg_glb_daisy_chain_status1_set(struct aq_hw* hw, u32 glb_daisy_chain_status1) +{ + AQ_WRITE_REG(hw, glb_daisy_chain_status1_adr, glb_daisy_chain_status1); +} +u32 reg_glb_daisy_chain_status1_get(struct aq_hw* hw) +{ + return AQ_READ_REG(hw, glb_daisy_chain_status1_adr); +} + +void glb_glb_reg_res_dis_set(struct aq_hw *aq_hw, u32 glb_reg_res_dis) +{ + AQ_WRITE_REG_BIT(aq_hw, glb_reg_res_dis_adr, + glb_reg_res_dis_msk, + glb_reg_res_dis_shift, + glb_reg_res_dis); +} + +void glb_soft_res_set(struct aq_hw *aq_hw, u32 soft_res) +{ + AQ_WRITE_REG_BIT(aq_hw, glb_soft_res_adr, glb_soft_res_msk, + glb_soft_res_shift, soft_res); +} + +u32 glb_soft_res_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG_BIT(aq_hw, glb_soft_res_adr, + glb_soft_res_msk, + glb_soft_res_shift); +} + +u32 reg_rx_dma_stat_counter7get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG(aq_hw, rx_dma_stat_counter7_adr); +} + +u32 reg_glb_mif_id_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG(aq_hw, glb_mif_id_adr); +} + + +void mpi_tx_reg_res_dis_set(struct aq_hw* hw, u32 mpi_tx_reg_res_dis) +{ + AQ_WRITE_REG_BIT(hw, mpi_tx_reg_res_dis_adr, + mpi_tx_reg_res_dis_msk, mpi_tx_reg_res_dis_shift, mpi_tx_reg_res_dis); +} +u32 mpi_tx_reg_res_dis_get(struct aq_hw* hw) +{ + return AQ_READ_REG_BIT(hw, mpi_tx_reg_res_dis_adr, + mpi_tx_reg_res_dis_msk, mpi_tx_reg_res_dis_shift); +} + + +/* stats */ +u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG(aq_hw, rpb_rx_dma_drop_pkt_cnt_adr); +} + +u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG(aq_hw, stats_rx_dma_good_octet_counterlsw__adr); +} + +u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG(aq_hw, stats_rx_dma_good_pkt_counterlsw__adr); +} + +u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG(aq_hw, stats_tx_dma_good_octet_counterlsw__adr); +} + +u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG(aq_hw, stats_tx_dma_good_pkt_counterlsw__adr); +} + +u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG(aq_hw, stats_rx_dma_good_octet_countermsw__adr); +} + +u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG(aq_hw, stats_rx_dma_good_pkt_countermsw__adr); +} + +u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG(aq_hw, stats_tx_dma_good_octet_countermsw__adr); +} + +u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG(aq_hw, stats_tx_dma_good_pkt_countermsw__adr); +} + +u32 stats_rx_lro_coalesced_pkt_count0_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG(aq_hw, stats_rx_lo_coalesced_pkt_count0__addr); +} + +/* interrupt */ +void itr_irq_auto_masklsw_set(struct aq_hw *aq_hw, u32 irq_auto_masklsw) +{ + AQ_WRITE_REG(aq_hw, itr_iamrlsw_adr, irq_auto_masklsw); +} + +void itr_irq_map_en_rx_set(struct aq_hw *aq_hw, u32 irq_map_en_rx, u32 rx) +{ +/* register address for bitfield imr_rx{r}_en */ + static u32 itr_imr_rxren_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + }; + +/* bitmask for bitfield imr_rx{r}_en */ + static u32 itr_imr_rxren_msk[32] = { + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U + }; + +/* lower bit position of bitfield imr_rx{r}_en */ + static u32 itr_imr_rxren_shift[32] = { + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U, + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U, + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U, + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U + }; + + AQ_WRITE_REG_BIT(aq_hw, itr_imr_rxren_adr[rx], + itr_imr_rxren_msk[rx], + itr_imr_rxren_shift[rx], + irq_map_en_rx); +} + +void itr_irq_map_en_tx_set(struct aq_hw *aq_hw, u32 irq_map_en_tx, u32 tx) +{ +/* register address for bitfield imr_tx{t}_en */ + static u32 itr_imr_txten_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + }; + +/* bitmask for bitfield imr_tx{t}_en */ + static u32 itr_imr_txten_msk[32] = { + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U + }; + +/* lower bit position of bitfield imr_tx{t}_en */ + static u32 itr_imr_txten_shift[32] = { + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U, + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U, + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U, + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U + }; + + AQ_WRITE_REG_BIT(aq_hw, itr_imr_txten_adr[tx], + itr_imr_txten_msk[tx], + itr_imr_txten_shift[tx], + irq_map_en_tx); +} + +void itr_irq_map_rx_set(struct aq_hw *aq_hw, u32 irq_map_rx, u32 rx) +{ +/* register address for bitfield imr_rx{r}[4:0] */ + static u32 itr_imr_rxr_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + }; + +/* bitmask for bitfield imr_rx{r}[4:0] */ + static u32 itr_imr_rxr_msk[32] = { + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU + }; + +/* lower bit position of bitfield imr_rx{r}[4:0] */ + static u32 itr_imr_rxr_shift[32] = { + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U, + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U, + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U, + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U + }; + + AQ_WRITE_REG_BIT(aq_hw, itr_imr_rxr_adr[rx], + itr_imr_rxr_msk[rx], + itr_imr_rxr_shift[rx], + irq_map_rx); +} + +void itr_irq_map_tx_set(struct aq_hw *aq_hw, u32 irq_map_tx, u32 tx) +{ +/* register address for bitfield imr_tx{t}[4:0] */ + static u32 itr_imr_txt_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + }; + +/* bitmask for bitfield imr_tx{t}[4:0] */ + static u32 itr_imr_txt_msk[32] = { + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U + }; + +/* lower bit position of bitfield imr_tx{t}[4:0] */ + static u32 itr_imr_txt_shift[32] = { + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U, + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U, + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U, + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U + }; + + AQ_WRITE_REG_BIT(aq_hw, itr_imr_txt_adr[tx], + itr_imr_txt_msk[tx], + itr_imr_txt_shift[tx], + irq_map_tx); +} + +void itr_irq_msk_clearlsw_set(struct aq_hw *aq_hw, u32 irq_msk_clearlsw) +{ + AQ_WRITE_REG(aq_hw, itr_imcrlsw_adr, irq_msk_clearlsw); +} + +void itr_irq_msk_setlsw_set(struct aq_hw *aq_hw, u32 irq_msk_setlsw) +{ + AQ_WRITE_REG(aq_hw, itr_imsrlsw_adr, irq_msk_setlsw); +} + +void itr_irq_reg_res_dis_set(struct aq_hw *aq_hw, u32 irq_reg_res_dis) +{ + AQ_WRITE_REG_BIT(aq_hw, itr_reg_res_dsbl_adr, + itr_reg_res_dsbl_msk, + itr_reg_res_dsbl_shift, irq_reg_res_dis); +} + +void itr_irq_status_clearlsw_set(struct aq_hw *aq_hw, + u32 irq_status_clearlsw) +{ + AQ_WRITE_REG(aq_hw, itr_iscrlsw_adr, irq_status_clearlsw); +} + +u32 itr_irq_statuslsw_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG(aq_hw, itr_isrlsw_adr); +} + +u32 itr_res_irq_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG_BIT(aq_hw, itr_res_adr, itr_res_msk, + itr_res_shift); +} + +void itr_res_irq_set(struct aq_hw *aq_hw, u32 res_irq) +{ + AQ_WRITE_REG_BIT(aq_hw, itr_res_adr, itr_res_msk, + itr_res_shift, res_irq); +} + +void itr_link_int_map_en_set(struct aq_hw *aq_hw, u32 link_int_en_map_en) +{ + AQ_WRITE_REG_BIT(aq_hw, itrImrLinkEn_ADR, itrImrLinkEn_MSK, itrImrLinkEn_SHIFT, link_int_en_map_en); +} + +u32 itr_link_int_map_en_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG_BIT(aq_hw, itrImrLinkEn_ADR, itrImrLinkEn_MSK, itrImrLinkEn_SHIFT); +} + +void itr_link_int_map_set(struct aq_hw *aq_hw, u32 link_int_map) +{ + AQ_WRITE_REG_BIT(aq_hw, itrImrLink_ADR, itrImrLink_MSK, itrImrLink_SHIFT, link_int_map); +} + +u32 itr_link_int_map_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG_BIT(aq_hw, itrImrLink_ADR, itrImrLink_MSK, itrImrLink_SHIFT); +} + +void itr_mif_int_map_en_set(struct aq_hw *aq_hw, u32 mifInterruptMappingEnable, u32 mif) +{ + AQ_WRITE_REG_BIT(aq_hw, itrImrMifMEn_ADR(mif), itrImrMifMEn_MSK(mif), itrImrMifMEn_SHIFT(mif), mifInterruptMappingEnable); +} + +u32 itr_mif_int_map_en_get(struct aq_hw *aq_hw, u32 mif) +{ + return AQ_READ_REG_BIT(aq_hw, itrImrMifMEn_ADR(mif), itrImrMifMEn_MSK(mif), itrImrMifMEn_SHIFT(mif)); +} + +void itr_mif_int_map_set(struct aq_hw *aq_hw, u32 mifInterruptMapping, u32 mif) +{ + AQ_WRITE_REG_BIT(aq_hw, itrImrMifM_ADR(mif), itrImrMifM_MSK(mif), itrImrMifM_SHIFT(mif), mifInterruptMapping); +} + +u32 itr_mif_int_map_get(struct aq_hw *aq_hw, u32 mif) +{ + return AQ_READ_REG_BIT(aq_hw, itrImrMifM_ADR(mif), itrImrMifM_MSK(mif), itrImrMifM_SHIFT(mif)); +} + +void itr_irq_mode_set(struct aq_hw *aq_hw, u32 irq_mode) +{ + AQ_WRITE_REG_BIT(aq_hw, itrIntMode_ADR, itrIntMode_MSK, itrIntMode_SHIFT, irq_mode); +} + +void itr_irq_status_cor_en_set(struct aq_hw *aq_hw, u32 irq_status_cor_en) +{ + AQ_WRITE_REG_BIT(aq_hw, itrIsrCorEn_ADR, itrIsrCorEn_MSK, itrIsrCorEn_SHIFT, irq_status_cor_en); +} + +void itr_irq_auto_mask_clr_en_set(struct aq_hw *aq_hw, u32 irq_auto_mask_clr_en) +{ + AQ_WRITE_REG_BIT(aq_hw, itrIamrClrEn_ADR, itrIamrClrEn_MSK, itrIamrClrEn_SHIFT, irq_auto_mask_clr_en); +} + +/* rdm */ +void rdm_cpu_id_set(struct aq_hw *aq_hw, u32 cpuid, u32 dca) +{ + AQ_WRITE_REG_BIT(aq_hw, rdm_dcadcpuid_adr(dca), + rdm_dcadcpuid_msk, + rdm_dcadcpuid_shift, cpuid); +} + +void rdm_rx_dca_en_set(struct aq_hw *aq_hw, u32 rx_dca_en) +{ + AQ_WRITE_REG_BIT(aq_hw, rdm_dca_en_adr, rdm_dca_en_msk, + rdm_dca_en_shift, rx_dca_en); +} + +void rdm_rx_dca_mode_set(struct aq_hw *aq_hw, u32 rx_dca_mode) +{ + AQ_WRITE_REG_BIT(aq_hw, rdm_dca_mode_adr, rdm_dca_mode_msk, + rdm_dca_mode_shift, rx_dca_mode); +} + +void rdm_rx_desc_data_buff_size_set(struct aq_hw *aq_hw, + u32 rx_desc_data_buff_size, u32 descriptor) +{ + AQ_WRITE_REG_BIT(aq_hw, rdm_descddata_size_adr(descriptor), + rdm_descddata_size_msk, + rdm_descddata_size_shift, + rx_desc_data_buff_size); +} + +void rdm_rx_desc_dca_en_set(struct aq_hw *aq_hw, u32 rx_desc_dca_en, u32 dca) +{ + AQ_WRITE_REG_BIT(aq_hw, rdm_dcaddesc_en_adr(dca), + rdm_dcaddesc_en_msk, + rdm_dcaddesc_en_shift, + rx_desc_dca_en); +} + +void rdm_rx_desc_en_set(struct aq_hw *aq_hw, u32 rx_desc_en, u32 descriptor) +{ + AQ_WRITE_REG_BIT(aq_hw, rdm_descden_adr(descriptor), + rdm_descden_msk, + rdm_descden_shift, + rx_desc_en); +} + +void rdm_rx_desc_head_buff_size_set(struct aq_hw *aq_hw, + u32 rx_desc_head_buff_size, u32 descriptor) +{ + AQ_WRITE_REG_BIT(aq_hw, rdm_descdhdr_size_adr(descriptor), + rdm_descdhdr_size_msk, + rdm_descdhdr_size_shift, + rx_desc_head_buff_size); +} + +void rdm_rx_desc_head_splitting_set(struct aq_hw *aq_hw, + u32 rx_desc_head_splitting, u32 descriptor) +{ + AQ_WRITE_REG_BIT(aq_hw, rdm_descdhdr_split_adr(descriptor), + rdm_descdhdr_split_msk, + rdm_descdhdr_split_shift, + rx_desc_head_splitting); +} + +u32 rdm_rx_desc_head_ptr_get(struct aq_hw *aq_hw, u32 descriptor) +{ + return AQ_READ_REG_BIT(aq_hw, rdm_descdhd_adr(descriptor), + rdm_descdhd_msk, rdm_descdhd_shift); +} + +void rdm_rx_desc_len_set(struct aq_hw *aq_hw, u32 rx_desc_len, u32 descriptor) +{ + AQ_WRITE_REG_BIT(aq_hw, rdm_descdlen_adr(descriptor), + rdm_descdlen_msk, rdm_descdlen_shift, + rx_desc_len); +} + +void rdm_rx_desc_res_set(struct aq_hw *aq_hw, u32 rx_desc_res, u32 descriptor) +{ + AQ_WRITE_REG_BIT(aq_hw, rdm_descdreset_adr(descriptor), + rdm_descdreset_msk, rdm_descdreset_shift, + rx_desc_res); +} + +void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw *aq_hw, + u32 rx_desc_wr_wb_irq_en) +{ + AQ_WRITE_REG_BIT(aq_hw, rdm_int_desc_wrb_en_adr, + rdm_int_desc_wrb_en_msk, + rdm_int_desc_wrb_en_shift, + rx_desc_wr_wb_irq_en); +} + +void rdm_rx_head_dca_en_set(struct aq_hw *aq_hw, u32 rx_head_dca_en, u32 dca) +{ + AQ_WRITE_REG_BIT(aq_hw, rdm_dcadhdr_en_adr(dca), + rdm_dcadhdr_en_msk, + rdm_dcadhdr_en_shift, + rx_head_dca_en); +} + +void rdm_rx_pld_dca_en_set(struct aq_hw *aq_hw, u32 rx_pld_dca_en, u32 dca) +{ + AQ_WRITE_REG_BIT(aq_hw, rdm_dcadpay_en_adr(dca), + rdm_dcadpay_en_msk, rdm_dcadpay_en_shift, + rx_pld_dca_en); +} + +void rdm_rdm_intr_moder_en_set(struct aq_hw *aq_hw, u32 rdm_intr_moder_en) +{ + AQ_WRITE_REG_BIT(aq_hw, rdm_int_rim_en_adr, + rdm_int_rim_en_msk, + rdm_int_rim_en_shift, + rdm_intr_moder_en); +} + +/* reg */ +void reg_gen_irq_map_set(struct aq_hw *aq_hw, u32 gen_intr_map, u32 regidx) +{ + AQ_WRITE_REG(aq_hw, gen_intr_map_adr(regidx), gen_intr_map); +} + +u32 reg_gen_irq_status_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG(aq_hw, gen_intr_stat_adr); +} + +void reg_irq_glb_ctl_set(struct aq_hw *aq_hw, u32 intr_glb_ctl) +{ + AQ_WRITE_REG(aq_hw, intr_glb_ctl_adr, intr_glb_ctl); +} + +void reg_irq_thr_set(struct aq_hw *aq_hw, u32 intr_thr, u32 throttle) +{ + AQ_WRITE_REG(aq_hw, intr_thr_adr(throttle), intr_thr); +} + +void reg_rx_dma_desc_base_addresslswset(struct aq_hw *aq_hw, + u32 rx_dma_desc_base_addrlsw, + u32 descriptor) +{ + AQ_WRITE_REG(aq_hw, rx_dma_desc_base_addrlsw_adr(descriptor), + rx_dma_desc_base_addrlsw); +} + +void reg_rx_dma_desc_base_addressmswset(struct aq_hw *aq_hw, + u32 rx_dma_desc_base_addrmsw, + u32 descriptor) +{ + AQ_WRITE_REG(aq_hw, rx_dma_desc_base_addrmsw_adr(descriptor), + rx_dma_desc_base_addrmsw); +} + +u32 reg_rx_dma_desc_status_get(struct aq_hw *aq_hw, u32 descriptor) +{ + return AQ_READ_REG(aq_hw, rx_dma_desc_stat_adr(descriptor)); +} + +void reg_rx_dma_desc_tail_ptr_set(struct aq_hw *aq_hw, + u32 rx_dma_desc_tail_ptr, u32 descriptor) +{ + AQ_WRITE_REG(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor), + rx_dma_desc_tail_ptr); +} + +u32 reg_rx_dma_desc_tail_ptr_get(struct aq_hw *aq_hw, u32 descriptor) +{ + return AQ_READ_REG(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor)); +} + +void reg_rx_flr_mcst_flr_msk_set(struct aq_hw *aq_hw, u32 rx_flr_mcst_flr_msk) +{ + AQ_WRITE_REG(aq_hw, rx_flr_mcst_flr_msk_adr, rx_flr_mcst_flr_msk); +} + +void reg_rx_flr_mcst_flr_set(struct aq_hw *aq_hw, u32 rx_flr_mcst_flr, + u32 filter) +{ + AQ_WRITE_REG(aq_hw, rx_flr_mcst_flr_adr(filter), rx_flr_mcst_flr); +} + +void reg_rx_flr_rss_control1set(struct aq_hw *aq_hw, u32 rx_flr_rss_control1) +{ + AQ_WRITE_REG(aq_hw, rx_flr_rss_control1_adr, rx_flr_rss_control1); +} + +void reg_rx_flr_control2_set(struct aq_hw *aq_hw, u32 rx_filter_control2) +{ + AQ_WRITE_REG(aq_hw, rx_flr_control2_adr, rx_filter_control2); +} + +void reg_rx_intr_moder_ctrl_set(struct aq_hw *aq_hw, + u32 rx_intr_moderation_ctl, + u32 queue) +{ + AQ_WRITE_REG(aq_hw, rx_intr_moderation_ctl_adr(queue), + rx_intr_moderation_ctl); +} + +void reg_tx_dma_debug_ctl_set(struct aq_hw *aq_hw, u32 tx_dma_debug_ctl) +{ + AQ_WRITE_REG(aq_hw, tx_dma_debug_ctl_adr, tx_dma_debug_ctl); +} + +void reg_tx_dma_desc_base_addresslswset(struct aq_hw *aq_hw, + u32 tx_dma_desc_base_addrlsw, + u32 descriptor) +{ + AQ_WRITE_REG(aq_hw, tx_dma_desc_base_addrlsw_adr(descriptor), + tx_dma_desc_base_addrlsw); +} + +void reg_tx_dma_desc_base_addressmswset(struct aq_hw *aq_hw, + u32 tx_dma_desc_base_addrmsw, + u32 descriptor) +{ + AQ_WRITE_REG(aq_hw, tx_dma_desc_base_addrmsw_adr(descriptor), + tx_dma_desc_base_addrmsw); +} + +void reg_tx_dma_desc_tail_ptr_set(struct aq_hw *aq_hw, + u32 tx_dma_desc_tail_ptr, u32 descriptor) +{ + //wmb(); + + AQ_WRITE_REG(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor), + tx_dma_desc_tail_ptr); +} + +u32 reg_tx_dma_desc_tail_ptr_get(struct aq_hw *aq_hw, u32 descriptor) +{ + return AQ_READ_REG(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor)); +} + +void reg_tx_intr_moder_ctrl_set(struct aq_hw *aq_hw, + u32 tx_intr_moderation_ctl, + u32 queue) +{ + AQ_WRITE_REG(aq_hw, tx_intr_moderation_ctl_adr(queue), + tx_intr_moderation_ctl); +} + +/* RPB: rx packet buffer */ +void rpb_dma_sys_lbk_set(struct aq_hw *aq_hw, u32 dma_sys_lbk) +{ + AQ_WRITE_REG_BIT(aq_hw, rpb_dma_sys_lbk_adr, + rpb_dma_sys_lbk_msk, + rpb_dma_sys_lbk_shift, dma_sys_lbk); +} + +void rpb_rpf_rx_traf_class_mode_set(struct aq_hw *aq_hw, + u32 rx_traf_class_mode) +{ + AQ_WRITE_REG_BIT(aq_hw, rpb_rpf_rx_tc_mode_adr, + rpb_rpf_rx_tc_mode_msk, + rpb_rpf_rx_tc_mode_shift, + rx_traf_class_mode); +} + +void rpb_rx_buff_en_set(struct aq_hw *aq_hw, u32 rx_buff_en) +{ + AQ_WRITE_REG_BIT(aq_hw, rpb_rx_buf_en_adr, rpb_rx_buf_en_msk, + rpb_rx_buf_en_shift, rx_buff_en); +} + +void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw *aq_hw, + u32 rx_buff_hi_threshold_per_tc, + u32 buffer) +{ + AQ_WRITE_REG_BIT(aq_hw, rpb_rxbhi_thresh_adr(buffer), + rpb_rxbhi_thresh_msk, rpb_rxbhi_thresh_shift, + rx_buff_hi_threshold_per_tc); +} + +void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw *aq_hw, + u32 rx_buff_lo_threshold_per_tc, + u32 buffer) +{ + AQ_WRITE_REG_BIT(aq_hw, rpb_rxblo_thresh_adr(buffer), + rpb_rxblo_thresh_msk, + rpb_rxblo_thresh_shift, + rx_buff_lo_threshold_per_tc); +} + +void rpb_rx_flow_ctl_mode_set(struct aq_hw *aq_hw, u32 rx_flow_ctl_mode) +{ + AQ_WRITE_REG_BIT(aq_hw, rpb_rx_fc_mode_adr, + rpb_rx_fc_mode_msk, + rpb_rx_fc_mode_shift, rx_flow_ctl_mode); +} + +void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw *aq_hw, + u32 rx_pkt_buff_size_per_tc, u32 buffer) +{ + AQ_WRITE_REG_BIT(aq_hw, rpb_rxbbuf_size_adr(buffer), + rpb_rxbbuf_size_msk, rpb_rxbbuf_size_shift, + rx_pkt_buff_size_per_tc); +} + +void rpb_rx_xoff_en_per_tc_set(struct aq_hw *aq_hw, u32 rx_xoff_en_per_tc, + u32 buffer) +{ + AQ_WRITE_REG_BIT(aq_hw, rpb_rxbxoff_en_adr(buffer), + rpb_rxbxoff_en_msk, rpb_rxbxoff_en_shift, + rx_xoff_en_per_tc); +} + +/* rpf */ + +void rpfl2broadcast_count_threshold_set(struct aq_hw *aq_hw, + u32 l2broadcast_count_threshold) +{ + AQ_WRITE_REG_BIT(aq_hw, rpfl2bc_thresh_adr, + rpfl2bc_thresh_msk, + rpfl2bc_thresh_shift, + l2broadcast_count_threshold); +} + +void rpfl2broadcast_en_set(struct aq_hw *aq_hw, u32 l2broadcast_en) +{ + AQ_WRITE_REG_BIT(aq_hw, rpfl2bc_en_adr, rpfl2bc_en_msk, + rpfl2bc_en_shift, l2broadcast_en); +} + +void rpfl2broadcast_flr_act_set(struct aq_hw *aq_hw, u32 l2broadcast_flr_act) +{ + AQ_WRITE_REG_BIT(aq_hw, rpfl2bc_act_adr, rpfl2bc_act_msk, + rpfl2bc_act_shift, l2broadcast_flr_act); +} + +void rpfl2multicast_flr_en_set(struct aq_hw *aq_hw, u32 l2multicast_flr_en, + u32 filter) +{ + AQ_WRITE_REG_BIT(aq_hw, rpfl2mc_enf_adr(filter), + rpfl2mc_enf_msk, + rpfl2mc_enf_shift, l2multicast_flr_en); +} + +void rpfl2promiscuous_mode_en_set(struct aq_hw *aq_hw, + u32 l2promiscuous_mode_en) +{ + AQ_WRITE_REG_BIT(aq_hw, rpfl2promis_mode_adr, + rpfl2promis_mode_msk, + rpfl2promis_mode_shift, + l2promiscuous_mode_en); +} + +void rpfl2unicast_flr_act_set(struct aq_hw *aq_hw, u32 l2unicast_flr_act, + u32 filter) +{ + AQ_WRITE_REG_BIT(aq_hw, rpfl2uc_actf_adr(filter), + rpfl2uc_actf_msk, rpfl2uc_actf_shift, + l2unicast_flr_act); +} + +void rpfl2_uc_flr_en_set(struct aq_hw *aq_hw, u32 l2unicast_flr_en, + u32 filter) +{ + AQ_WRITE_REG_BIT(aq_hw, rpfl2uc_enf_adr(filter), + rpfl2uc_enf_msk, + rpfl2uc_enf_shift, l2unicast_flr_en); +} + +void rpfl2unicast_dest_addresslsw_set(struct aq_hw *aq_hw, + u32 l2unicast_dest_addresslsw, + u32 filter) +{ + AQ_WRITE_REG(aq_hw, rpfl2uc_daflsw_adr(filter), + l2unicast_dest_addresslsw); +} + +void rpfl2unicast_dest_addressmsw_set(struct aq_hw *aq_hw, + u32 l2unicast_dest_addressmsw, + u32 filter) +{ + AQ_WRITE_REG_BIT(aq_hw, rpfl2uc_dafmsw_adr(filter), + rpfl2uc_dafmsw_msk, rpfl2uc_dafmsw_shift, + l2unicast_dest_addressmsw); +} + +void rpfl2_accept_all_mc_packets_set(struct aq_hw *aq_hw, + u32 l2_accept_all_mc_packets) +{ + AQ_WRITE_REG_BIT(aq_hw, rpfl2mc_accept_all_adr, + rpfl2mc_accept_all_msk, + rpfl2mc_accept_all_shift, + l2_accept_all_mc_packets); +} + +void rpf_rpb_user_priority_tc_map_set(struct aq_hw *aq_hw, + u32 user_priority_tc_map, u32 tc) +{ +/* register address for bitfield rx_tc_up{t}[2:0] */ + static u32 rpf_rpb_rx_tc_upt_adr[8] = { + 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U, + 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U + }; + +/* bitmask for bitfield rx_tc_up{t}[2:0] */ + static u32 rpf_rpb_rx_tc_upt_msk[8] = { + 0x00000007U, 0x00000070U, 0x00000700U, 0x00007000U, + 0x00070000U, 0x00700000U, 0x07000000U, 0x70000000U + }; + +/* lower bit position of bitfield rx_tc_up{t}[2:0] */ + static u32 rpf_rpb_rx_tc_upt_shft[8] = { + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U + }; + + AQ_WRITE_REG_BIT(aq_hw, rpf_rpb_rx_tc_upt_adr[tc], + rpf_rpb_rx_tc_upt_msk[tc], + rpf_rpb_rx_tc_upt_shft[tc], + user_priority_tc_map); +} + +void rpf_rss_key_addr_set(struct aq_hw *aq_hw, u32 rss_key_addr) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_rss_key_addr_adr, + rpf_rss_key_addr_msk, + rpf_rss_key_addr_shift, + rss_key_addr); +} + +void rpf_rss_key_wr_data_set(struct aq_hw *aq_hw, u32 rss_key_wr_data) +{ + AQ_WRITE_REG(aq_hw, rpf_rss_key_wr_data_adr, + rss_key_wr_data); +} + +u32 rpf_rss_key_rd_data_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG(aq_hw, rpf_rss_key_rd_data_adr); +} + +u32 rpf_rss_key_wr_en_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG_BIT(aq_hw, rpf_rss_key_wr_eni_adr, + rpf_rss_key_wr_eni_msk, + rpf_rss_key_wr_eni_shift); +} + +void rpf_rss_key_wr_en_set(struct aq_hw *aq_hw, u32 rss_key_wr_en) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_rss_key_wr_eni_adr, + rpf_rss_key_wr_eni_msk, + rpf_rss_key_wr_eni_shift, + rss_key_wr_en); +} + +void rpf_rss_redir_tbl_addr_set(struct aq_hw *aq_hw, u32 rss_redir_tbl_addr) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_rss_redir_addr_adr, + rpf_rss_redir_addr_msk, + rpf_rss_redir_addr_shift, rss_redir_tbl_addr); +} + +void rpf_rss_redir_tbl_wr_data_set(struct aq_hw *aq_hw, + u32 rss_redir_tbl_wr_data) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_rss_redir_wr_data_adr, + rpf_rss_redir_wr_data_msk, + rpf_rss_redir_wr_data_shift, + rss_redir_tbl_wr_data); +} + +u32 rpf_rss_redir_wr_en_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG_BIT(aq_hw, rpf_rss_redir_wr_eni_adr, + rpf_rss_redir_wr_eni_msk, + rpf_rss_redir_wr_eni_shift); +} + +void rpf_rss_redir_wr_en_set(struct aq_hw *aq_hw, u32 rss_redir_wr_en) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_rss_redir_wr_eni_adr, + rpf_rss_redir_wr_eni_msk, + rpf_rss_redir_wr_eni_shift, rss_redir_wr_en); +} + +void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw *aq_hw, u32 tpo_to_rpf_sys_lbk) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_tpo_rpf_sys_lbk_adr, + rpf_tpo_rpf_sys_lbk_msk, + rpf_tpo_rpf_sys_lbk_shift, + tpo_to_rpf_sys_lbk); +} + + +void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_INNER_TPID_ADR, + HW_ATL_RPF_VL_INNER_TPID_MSK, + HW_ATL_RPF_VL_INNER_TPID_SHIFT, + vlan_inner_etht); +} + +void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_OUTER_TPID_ADR, + HW_ATL_RPF_VL_OUTER_TPID_MSK, + HW_ATL_RPF_VL_OUTER_TPID_SHIFT, + vlan_outer_etht); +} + +void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, + u32 vlan_prom_mode_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_PROMIS_MODE_ADR, + HW_ATL_RPF_VL_PROMIS_MODE_MSK, + HW_ATL_RPF_VL_PROMIS_MODE_SHIFT, + vlan_prom_mode_en); +} + +void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw, + u32 vlan_acc_untagged_packets) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR, + HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK, + HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT, + vlan_acc_untagged_packets); +} + +void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, + u32 vlan_untagged_act) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_UNTAGGED_ACT_ADR, + HW_ATL_RPF_VL_UNTAGGED_ACT_MSK, + HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT, + vlan_untagged_act); +} + +void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_EN_F_ADR(filter), + HW_ATL_RPF_VL_EN_F_MSK, + HW_ATL_RPF_VL_EN_F_SHIFT, + vlan_flr_en); +} + +void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACT_F_ADR(filter), + HW_ATL_RPF_VL_ACT_F_MSK, + HW_ATL_RPF_VL_ACT_F_SHIFT, + vlan_flr_act); +} + +void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ID_F_ADR(filter), + HW_ATL_RPF_VL_ID_F_MSK, + HW_ATL_RPF_VL_ID_F_SHIFT, + vlan_id_flr); +} + +void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter), + HW_ATL_RPF_VL_RXQ_EN_F_MSK, + HW_ATL_RPF_VL_RXQ_EN_F_SHIFT, + vlan_rxq_en); +} + +void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_F_ADR(filter), + HW_ATL_RPF_VL_RXQ_F_MSK, + HW_ATL_RPF_VL_RXQ_F_SHIFT, + vlan_rxq); +}; + +void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ENF_ADR(filter), + HW_ATL_RPF_ET_ENF_MSK, + HW_ATL_RPF_ET_ENF_SHIFT, etht_flr_en); +} + +void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw, + u32 etht_user_priority_en, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPFEN_ADR(filter), + HW_ATL_RPF_ET_UPFEN_MSK, HW_ATL_RPF_ET_UPFEN_SHIFT, + etht_user_priority_en); +} + +void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, + u32 etht_rx_queue_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQFEN_ADR(filter), + HW_ATL_RPF_ET_RXQFEN_MSK, + HW_ATL_RPF_ET_RXQFEN_SHIFT, + etht_rx_queue_en); +} + +void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, + u32 etht_user_priority, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPF_ADR(filter), + HW_ATL_RPF_ET_UPF_MSK, + HW_ATL_RPF_ET_UPF_SHIFT, etht_user_priority); +} + +void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQF_ADR(filter), + HW_ATL_RPF_ET_RXQF_MSK, + HW_ATL_RPF_ET_RXQF_SHIFT, etht_rx_queue); +} + +void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_MNG_RXQF_ADR(filter), + HW_ATL_RPF_ET_MNG_RXQF_MSK, + HW_ATL_RPF_ET_MNG_RXQF_SHIFT, + etht_mgt_queue); +} + +void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ACTF_ADR(filter), + HW_ATL_RPF_ET_ACTF_MSK, + HW_ATL_RPF_ET_ACTF_SHIFT, etht_flr_act); +} + +void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_VALF_ADR(filter), + HW_ATL_RPF_ET_VALF_MSK, + HW_ATL_RPF_ET_VALF_SHIFT, etht_flr); +} + +void hw_atl_rpf_l3_l4_enf_set(struct aq_hw_s *aq_hw, u32 val, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L3_L4_ENF_ADR(filter), + HW_ATL_RPF_L3_L4_ENF_MSK, + HW_ATL_RPF_L3_L4_ENF_SHIFT, val); +} + +void hw_atl_rpf_l3_v6_enf_set(struct aq_hw_s *aq_hw, u32 val, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L3_V6_ENF_ADR(filter), + HW_ATL_RPF_L3_V6_ENF_MSK, + HW_ATL_RPF_L3_V6_ENF_SHIFT, val); +} + +void hw_atl_rpf_l3_saf_en_set(struct aq_hw_s *aq_hw, u32 val, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L3_SAF_EN_ADR(filter), + HW_ATL_RPF_L3_SAF_EN_MSK, + HW_ATL_RPF_L3_SAF_EN_SHIFT, val); +} + +void hw_atl_rpf_l3_daf_en_set(struct aq_hw_s *aq_hw, u32 val, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L3_DAF_EN_ADR(filter), + HW_ATL_RPF_L3_DAF_EN_MSK, + HW_ATL_RPF_L3_DAF_EN_SHIFT, val); +} + +void hw_atl_rpf_l4_spf_en_set(struct aq_hw_s *aq_hw, u32 val, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_SPF_EN_ADR(filter), + HW_ATL_RPF_L4_SPF_EN_MSK, + HW_ATL_RPF_L4_SPF_EN_SHIFT, val); +} + +void hw_atl_rpf_l4_dpf_en_set(struct aq_hw_s *aq_hw, u32 val, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_DPF_EN_ADR(filter), + HW_ATL_RPF_L4_DPF_EN_MSK, + HW_ATL_RPF_L4_DPF_EN_SHIFT, val); +} + +void hw_atl_rpf_l4_protf_en_set(struct aq_hw_s *aq_hw, u32 val, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_PROTF_EN_ADR(filter), + HW_ATL_RPF_L4_PROTF_EN_MSK, + HW_ATL_RPF_L4_PROTF_EN_SHIFT, val); +} + +void hw_atl_rpf_l3_arpf_en_set(struct aq_hw_s *aq_hw, u32 val, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L3_ARPF_EN_ADR(filter), + HW_ATL_RPF_L3_ARPF_EN_MSK, + HW_ATL_RPF_L3_ARPF_EN_SHIFT, val); +} + +void hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw_s *aq_hw, u32 val, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L3_L4_RXQF_EN_ADR(filter), + HW_ATL_RPF_L3_L4_RXQF_EN_MSK, + HW_ATL_RPF_L3_L4_RXQF_EN_SHIFT, val); +} + +void hw_atl_rpf_l3_l4_mng_rxqf_set(struct aq_hw_s *aq_hw, u32 val, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L3_L4_MNG_RXQF_ADR(filter), + HW_ATL_RPF_L3_L4_MNG_RXQF_MSK, + HW_ATL_RPF_L3_L4_MNG_RXQF_SHIFT, val); +} + +void hw_atl_rpf_l3_l4_actf_set(struct aq_hw_s *aq_hw, u32 val, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L3_L4_ACTF_ADR(filter), + HW_ATL_RPF_L3_L4_ACTF_MSK, + HW_ATL_RPF_L3_L4_ACTF_SHIFT, val); +} + +void hw_atl_rpf_l3_l4_rxqf_set(struct aq_hw_s *aq_hw, u32 val, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L3_L4_RXQF_ADR(filter), + HW_ATL_RPF_L3_L4_RXQF_MSK, + HW_ATL_RPF_L3_L4_RXQF_SHIFT, val); +} + +void hw_atl_rpf_l4_protf_set(struct aq_hw_s *aq_hw, u32 val, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_PROTF_ADR(filter), + HW_ATL_RPF_L4_PROTF_MSK, + HW_ATL_RPF_L4_PROTF_SHIFT, val); +} + +void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_SPD_ADR(filter), + HW_ATL_RPF_L4_SPD_MSK, + HW_ATL_RPF_L4_SPD_SHIFT, val); +} + +void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_DPD_ADR(filter), + HW_ATL_RPF_L4_DPD_MSK, + HW_ATL_RPF_L4_DPD_SHIFT, val); +} + +void rpf_vlan_inner_etht_set(struct aq_hw *aq_hw, u32 vlan_inner_etht) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_vl_inner_tpid_adr, + rpf_vl_inner_tpid_msk, + rpf_vl_inner_tpid_shift, + vlan_inner_etht); +} + +void rpf_vlan_outer_etht_set(struct aq_hw *aq_hw, u32 vlan_outer_etht) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_vl_outer_tpid_adr, + rpf_vl_outer_tpid_msk, + rpf_vl_outer_tpid_shift, + vlan_outer_etht); +} + +void rpf_vlan_prom_mode_en_set(struct aq_hw *aq_hw, u32 vlan_prom_mode_en) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_vl_promis_mode_adr, + rpf_vl_promis_mode_msk, + rpf_vl_promis_mode_shift, + vlan_prom_mode_en); +} + +void rpf_vlan_accept_untagged_packets_set(struct aq_hw *aq_hw, + u32 vlan_accept_untagged_packets) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_vl_accept_untagged_mode_adr, + rpf_vl_accept_untagged_mode_msk, + rpf_vl_accept_untagged_mode_shift, + vlan_accept_untagged_packets); +} + +void rpf_vlan_untagged_act_set(struct aq_hw *aq_hw, u32 vlan_untagged_act) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_vl_untagged_act_adr, + rpf_vl_untagged_act_msk, + rpf_vl_untagged_act_shift, + vlan_untagged_act); +} + +void rpf_vlan_flr_en_set(struct aq_hw *aq_hw, u32 vlan_flr_en, u32 filter) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_vl_en_f_adr(filter), + rpf_vl_en_f_msk, + rpf_vl_en_f_shift, + vlan_flr_en); +} + +void rpf_vlan_flr_act_set(struct aq_hw *aq_hw, u32 vlan_flr_act, u32 filter) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_vl_act_f_adr(filter), + rpf_vl_act_f_msk, + rpf_vl_act_f_shift, + vlan_flr_act); +} + +void rpf_vlan_id_flr_set(struct aq_hw *aq_hw, u32 vlan_id_flr, u32 filter) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_vl_id_f_adr(filter), + rpf_vl_id_f_msk, + rpf_vl_id_f_shift, + vlan_id_flr); +} + +void rpf_etht_flr_en_set(struct aq_hw *aq_hw, u32 etht_flr_en, u32 filter) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_et_enf_adr(filter), + rpf_et_enf_msk, + rpf_et_enf_shift, etht_flr_en); +} + +void rpf_etht_user_priority_en_set(struct aq_hw *aq_hw, + u32 etht_user_priority_en, u32 filter) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_et_upfen_adr(filter), + rpf_et_upfen_msk, rpf_et_upfen_shift, + etht_user_priority_en); +} + +void rpf_etht_rx_queue_en_set(struct aq_hw *aq_hw, u32 etht_rx_queue_en, + u32 filter) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_et_rxqfen_adr(filter), + rpf_et_rxqfen_msk, rpf_et_rxqfen_shift, + etht_rx_queue_en); +} + +void rpf_etht_user_priority_set(struct aq_hw *aq_hw, u32 etht_user_priority, + u32 filter) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_et_upf_adr(filter), + rpf_et_upf_msk, + rpf_et_upf_shift, etht_user_priority); +} + +void rpf_etht_rx_queue_set(struct aq_hw *aq_hw, u32 etht_rx_queue, + u32 filter) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_et_rxqf_adr(filter), + rpf_et_rxqf_msk, + rpf_et_rxqf_shift, etht_rx_queue); +} + +void rpf_etht_mgt_queue_set(struct aq_hw *aq_hw, u32 etht_mgt_queue, + u32 filter) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_et_mng_rxqf_adr(filter), + rpf_et_mng_rxqf_msk, rpf_et_mng_rxqf_shift, + etht_mgt_queue); +} + +void rpf_etht_flr_act_set(struct aq_hw *aq_hw, u32 etht_flr_act, u32 filter) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_et_actf_adr(filter), + rpf_et_actf_msk, + rpf_et_actf_shift, etht_flr_act); +} + +void rpf_etht_flr_set(struct aq_hw *aq_hw, u32 etht_flr, u32 filter) +{ + AQ_WRITE_REG_BIT(aq_hw, rpf_et_valf_adr(filter), + rpf_et_valf_msk, + rpf_et_valf_shift, etht_flr); +} + +/* RPO: rx packet offload */ +void rpo_ipv4header_crc_offload_en_set(struct aq_hw *aq_hw, + u32 ipv4header_crc_offload_en) +{ + AQ_WRITE_REG_BIT(aq_hw, rpo_ipv4chk_en_adr, + rpo_ipv4chk_en_msk, + rpo_ipv4chk_en_shift, + ipv4header_crc_offload_en); +} + +void rpo_rx_desc_vlan_stripping_set(struct aq_hw *aq_hw, + u32 rx_desc_vlan_stripping, u32 descriptor) +{ + AQ_WRITE_REG_BIT(aq_hw, rpo_descdvl_strip_adr(descriptor), + rpo_descdvl_strip_msk, + rpo_descdvl_strip_shift, + rx_desc_vlan_stripping); +} + +void rpo_tcp_udp_crc_offload_en_set(struct aq_hw *aq_hw, + u32 tcp_udp_crc_offload_en) +{ + AQ_WRITE_REG_BIT(aq_hw, rpol4chk_en_adr, rpol4chk_en_msk, + rpol4chk_en_shift, tcp_udp_crc_offload_en); +} + +void rpo_lro_en_set(struct aq_hw *aq_hw, u32 lro_en) +{ + AQ_WRITE_REG(aq_hw, rpo_lro_en_adr, lro_en); +} + +void rpo_lro_patch_optimization_en_set(struct aq_hw *aq_hw, + u32 lro_patch_optimization_en) +{ + AQ_WRITE_REG_BIT(aq_hw, rpo_lro_ptopt_en_adr, + rpo_lro_ptopt_en_msk, + rpo_lro_ptopt_en_shift, + lro_patch_optimization_en); +} + +void rpo_lro_qsessions_lim_set(struct aq_hw *aq_hw, + u32 lro_qsessions_lim) +{ + AQ_WRITE_REG_BIT(aq_hw, rpo_lro_qses_lmt_adr, + rpo_lro_qses_lmt_msk, + rpo_lro_qses_lmt_shift, + lro_qsessions_lim); +} + +void rpo_lro_total_desc_lim_set(struct aq_hw *aq_hw, u32 lro_total_desc_lim) +{ + AQ_WRITE_REG_BIT(aq_hw, rpo_lro_tot_dsc_lmt_adr, + rpo_lro_tot_dsc_lmt_msk, + rpo_lro_tot_dsc_lmt_shift, + lro_total_desc_lim); +} + +void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw *aq_hw, + u32 lro_min_pld_of_first_pkt) +{ + AQ_WRITE_REG_BIT(aq_hw, rpo_lro_pkt_min_adr, + rpo_lro_pkt_min_msk, + rpo_lro_pkt_min_shift, + lro_min_pld_of_first_pkt); +} + +void rpo_lro_pkt_lim_set(struct aq_hw *aq_hw, u32 lro_pkt_lim) +{ + AQ_WRITE_REG(aq_hw, rpo_lro_rsc_max_adr, lro_pkt_lim); +} + +void rpo_lro_max_num_of_descriptors_set(struct aq_hw *aq_hw, + u32 lro_max_number_of_descriptors, + u32 lro) +{ +/* Register address for bitfield lro{L}_des_max[1:0] */ + static u32 rpo_lro_ldes_max_adr[32] = { + 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U, + 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U, + 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U, + 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U, + 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U, + 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U, + 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU, + 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU + }; + +/* Bitmask for bitfield lro{L}_des_max[1:0] */ + static u32 rpo_lro_ldes_max_msk[32] = { + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U, + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U, + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U, + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U + }; + +/* Lower bit position of bitfield lro{L}_des_max[1:0] */ + static u32 rpo_lro_ldes_max_shift[32] = { + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U, + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U, + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U, + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U + }; + + AQ_WRITE_REG_BIT(aq_hw, rpo_lro_ldes_max_adr[lro], + rpo_lro_ldes_max_msk[lro], + rpo_lro_ldes_max_shift[lro], + lro_max_number_of_descriptors); +} + +void rpo_lro_time_base_divider_set(struct aq_hw *aq_hw, + u32 lro_time_base_divider) +{ + AQ_WRITE_REG_BIT(aq_hw, rpo_lro_tb_div_adr, + rpo_lro_tb_div_msk, + rpo_lro_tb_div_shift, + lro_time_base_divider); +} + +void rpo_lro_inactive_interval_set(struct aq_hw *aq_hw, + u32 lro_inactive_interval) +{ + AQ_WRITE_REG_BIT(aq_hw, rpo_lro_ina_ival_adr, + rpo_lro_ina_ival_msk, + rpo_lro_ina_ival_shift, + lro_inactive_interval); +} + +void rpo_lro_max_coalescing_interval_set(struct aq_hw *aq_hw, + u32 lro_max_coalescing_interval) +{ + AQ_WRITE_REG_BIT(aq_hw, rpo_lro_max_ival_adr, + rpo_lro_max_ival_msk, + rpo_lro_max_ival_shift, + lro_max_coalescing_interval); +} + +/* rx */ +void rx_rx_reg_res_dis_set(struct aq_hw *aq_hw, u32 rx_reg_res_dis) +{ + AQ_WRITE_REG_BIT(aq_hw, rx_reg_res_dsbl_adr, + rx_reg_res_dsbl_msk, + rx_reg_res_dsbl_shift, + rx_reg_res_dis); +} + +/* tdm */ +void tdm_cpu_id_set(struct aq_hw *aq_hw, u32 cpuid, u32 dca) +{ + AQ_WRITE_REG_BIT(aq_hw, tdm_dcadcpuid_adr(dca), + tdm_dcadcpuid_msk, + tdm_dcadcpuid_shift, cpuid); +} + +void tdm_large_send_offload_en_set(struct aq_hw *aq_hw, + u32 large_send_offload_en) +{ + AQ_WRITE_REG(aq_hw, tdm_lso_en_adr, large_send_offload_en); +} + +void tdm_tx_dca_en_set(struct aq_hw *aq_hw, u32 tx_dca_en) +{ + AQ_WRITE_REG_BIT(aq_hw, tdm_dca_en_adr, tdm_dca_en_msk, + tdm_dca_en_shift, tx_dca_en); +} + +void tdm_tx_dca_mode_set(struct aq_hw *aq_hw, u32 tx_dca_mode) +{ + AQ_WRITE_REG_BIT(aq_hw, tdm_dca_mode_adr, tdm_dca_mode_msk, + tdm_dca_mode_shift, tx_dca_mode); +} + +void tdm_tx_desc_dca_en_set(struct aq_hw *aq_hw, u32 tx_desc_dca_en, u32 dca) +{ + AQ_WRITE_REG_BIT(aq_hw, tdm_dcaddesc_en_adr(dca), + tdm_dcaddesc_en_msk, tdm_dcaddesc_en_shift, + tx_desc_dca_en); +} + +void tdm_tx_desc_en_set(struct aq_hw *aq_hw, u32 tx_desc_en, u32 descriptor) +{ + AQ_WRITE_REG_BIT(aq_hw, tdm_descden_adr(descriptor), + tdm_descden_msk, + tdm_descden_shift, + tx_desc_en); +} + +u32 tdm_tx_desc_head_ptr_get(struct aq_hw *aq_hw, u32 descriptor) +{ + return AQ_READ_REG_BIT(aq_hw, tdm_descdhd_adr(descriptor), + tdm_descdhd_msk, tdm_descdhd_shift); +} + +void tdm_tx_desc_len_set(struct aq_hw *aq_hw, u32 tx_desc_len, + u32 descriptor) +{ + AQ_WRITE_REG_BIT(aq_hw, tdm_descdlen_adr(descriptor), + tdm_descdlen_msk, + tdm_descdlen_shift, + tx_desc_len); +} + +void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw *aq_hw, + u32 tx_desc_wr_wb_irq_en) +{ + AQ_WRITE_REG_BIT(aq_hw, tdm_int_desc_wrb_en_adr, + tdm_int_desc_wrb_en_msk, + tdm_int_desc_wrb_en_shift, + tx_desc_wr_wb_irq_en); +} + +void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw *aq_hw, + u32 tx_desc_wr_wb_threshold, + u32 descriptor) +{ + AQ_WRITE_REG_BIT(aq_hw, tdm_descdwrb_thresh_adr(descriptor), + tdm_descdwrb_thresh_msk, + tdm_descdwrb_thresh_shift, + tx_desc_wr_wb_threshold); +} + +void tdm_tdm_intr_moder_en_set(struct aq_hw *aq_hw, + u32 tdm_irq_moderation_en) +{ + AQ_WRITE_REG_BIT(aq_hw, tdm_int_mod_en_adr, + tdm_int_mod_en_msk, + tdm_int_mod_en_shift, + tdm_irq_moderation_en); +} + +/* thm */ +void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw *aq_hw, + u32 lso_tcp_flag_of_first_pkt) +{ + AQ_WRITE_REG_BIT(aq_hw, thm_lso_tcp_flag_first_adr, + thm_lso_tcp_flag_first_msk, + thm_lso_tcp_flag_first_shift, + lso_tcp_flag_of_first_pkt); +} + +void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw *aq_hw, + u32 lso_tcp_flag_of_last_pkt) +{ + AQ_WRITE_REG_BIT(aq_hw, thm_lso_tcp_flag_last_adr, + thm_lso_tcp_flag_last_msk, + thm_lso_tcp_flag_last_shift, + lso_tcp_flag_of_last_pkt); +} + +void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw *aq_hw, + u32 lso_tcp_flag_of_middle_pkt) +{ + AQ_WRITE_REG_BIT(aq_hw, thm_lso_tcp_flag_mid_adr, + thm_lso_tcp_flag_mid_msk, + thm_lso_tcp_flag_mid_shift, + lso_tcp_flag_of_middle_pkt); +} + +/* TPB: tx packet buffer */ +void tpb_tx_buff_en_set(struct aq_hw *aq_hw, u32 tx_buff_en) +{ + AQ_WRITE_REG_BIT(aq_hw, tpb_tx_buf_en_adr, tpb_tx_buf_en_msk, + tpb_tx_buf_en_shift, tx_buff_en); +} + +void tpb_tx_tc_mode_set(struct aq_hw *aq_hw, u32 tc_mode) +{ + AQ_WRITE_REG_BIT(aq_hw, tpb_tx_tc_mode_adr, tpb_tx_tc_mode_msk, + tpb_tx_tc_mode_shift, tc_mode); +} + +void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw *aq_hw, + u32 tx_buff_hi_threshold_per_tc, + u32 buffer) +{ + AQ_WRITE_REG_BIT(aq_hw, tpb_txbhi_thresh_adr(buffer), + tpb_txbhi_thresh_msk, tpb_txbhi_thresh_shift, + tx_buff_hi_threshold_per_tc); +} + +void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw *aq_hw, + u32 tx_buff_lo_threshold_per_tc, + u32 buffer) +{ + AQ_WRITE_REG_BIT(aq_hw, tpb_txblo_thresh_adr(buffer), + tpb_txblo_thresh_msk, tpb_txblo_thresh_shift, + tx_buff_lo_threshold_per_tc); +} + +void tpb_tx_dma_sys_lbk_en_set(struct aq_hw *aq_hw, u32 tx_dma_sys_lbk_en) +{ + AQ_WRITE_REG_BIT(aq_hw, tpb_dma_sys_lbk_adr, + tpb_dma_sys_lbk_msk, + tpb_dma_sys_lbk_shift, + tx_dma_sys_lbk_en); +} + +void rdm_rx_dma_desc_cache_init_tgl(struct aq_hw *aq_hw) +{ + AQ_WRITE_REG_BIT(aq_hw, rdm_rx_dma_desc_cache_init_adr, + rdm_rx_dma_desc_cache_init_msk, + rdm_rx_dma_desc_cache_init_shift, + AQ_READ_REG_BIT(aq_hw, rdm_rx_dma_desc_cache_init_adr, + rdm_rx_dma_desc_cache_init_msk, + rdm_rx_dma_desc_cache_init_shift) ^ 1 + ); +} + +void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw *aq_hw, + u32 tx_pkt_buff_size_per_tc, u32 buffer) +{ + AQ_WRITE_REG_BIT(aq_hw, tpb_txbbuf_size_adr(buffer), + tpb_txbbuf_size_msk, + tpb_txbbuf_size_shift, + tx_pkt_buff_size_per_tc); +} + +void tpb_tx_path_scp_ins_en_set(struct aq_hw *aq_hw, u32 tx_path_scp_ins_en) +{ + AQ_WRITE_REG_BIT(aq_hw, tpb_tx_scp_ins_en_adr, + tpb_tx_scp_ins_en_msk, + tpb_tx_scp_ins_en_shift, + tx_path_scp_ins_en); +} + +/* TPO: tx packet offload */ +void tpo_ipv4header_crc_offload_en_set(struct aq_hw *aq_hw, + u32 ipv4header_crc_offload_en) +{ + AQ_WRITE_REG_BIT(aq_hw, tpo_ipv4chk_en_adr, + tpo_ipv4chk_en_msk, + tpo_ipv4chk_en_shift, + ipv4header_crc_offload_en); +} + +void tpo_tcp_udp_crc_offload_en_set(struct aq_hw *aq_hw, + u32 tcp_udp_crc_offload_en) +{ + AQ_WRITE_REG_BIT(aq_hw, tpol4chk_en_adr, + tpol4chk_en_msk, + tpol4chk_en_shift, + tcp_udp_crc_offload_en); +} + +void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw *aq_hw, u32 tx_pkt_sys_lbk_en) +{ + AQ_WRITE_REG_BIT(aq_hw, tpo_pkt_sys_lbk_adr, + tpo_pkt_sys_lbk_msk, + tpo_pkt_sys_lbk_shift, + tx_pkt_sys_lbk_en); +} + +/* TPS: tx packet scheduler */ +void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw *aq_hw, + u32 tx_pkt_shed_data_arb_mode) +{ + AQ_WRITE_REG_BIT(aq_hw, tps_data_tc_arb_mode_adr, + tps_data_tc_arb_mode_msk, + tps_data_tc_arb_mode_shift, + tx_pkt_shed_data_arb_mode); +} + +void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw *aq_hw, + u32 curr_time_res) +{ + AQ_WRITE_REG_BIT(aq_hw, tps_desc_rate_ta_rst_adr, + tps_desc_rate_ta_rst_msk, + tps_desc_rate_ta_rst_shift, + curr_time_res); +} + +void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw *aq_hw, + u32 tx_pkt_shed_desc_rate_lim) +{ + AQ_WRITE_REG_BIT(aq_hw, tps_desc_rate_lim_adr, + tps_desc_rate_lim_msk, + tps_desc_rate_lim_shift, + tx_pkt_shed_desc_rate_lim); +} + +void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw *aq_hw, + u32 tx_pkt_shed_desc_tc_arb_mode) +{ + AQ_WRITE_REG_BIT(aq_hw, tps_desc_tc_arb_mode_adr, + tps_desc_tc_arb_mode_msk, + tps_desc_tc_arb_mode_shift, + tx_pkt_shed_desc_tc_arb_mode); +} + +void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw *aq_hw, + u32 tx_pkt_shed_desc_tc_max_credit, + u32 tc) +{ + AQ_WRITE_REG_BIT(aq_hw, tps_desc_tctcredit_max_adr(tc), + tps_desc_tctcredit_max_msk, + tps_desc_tctcredit_max_shift, + tx_pkt_shed_desc_tc_max_credit); +} + +void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw *aq_hw, + u32 tx_pkt_shed_desc_tc_weight, u32 tc) +{ + AQ_WRITE_REG_BIT(aq_hw, tps_desc_tctweight_adr(tc), + tps_desc_tctweight_msk, + tps_desc_tctweight_shift, + tx_pkt_shed_desc_tc_weight); +} + +void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw *aq_hw, + u32 tx_pkt_shed_desc_vm_arb_mode) +{ + AQ_WRITE_REG_BIT(aq_hw, tps_desc_vm_arb_mode_adr, + tps_desc_vm_arb_mode_msk, + tps_desc_vm_arb_mode_shift, + tx_pkt_shed_desc_vm_arb_mode); +} + +void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw *aq_hw, + u32 tx_pkt_shed_tc_data_max_credit, + u32 tc) +{ + AQ_WRITE_REG_BIT(aq_hw, tps_data_tctcredit_max_adr(tc), + tps_data_tctcredit_max_msk, + tps_data_tctcredit_max_shift, + tx_pkt_shed_tc_data_max_credit); +} + +void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw *aq_hw, + u32 tx_pkt_shed_tc_data_weight, u32 tc) +{ + AQ_WRITE_REG_BIT(aq_hw, tps_data_tctweight_adr(tc), + tps_data_tctweight_msk, + tps_data_tctweight_shift, + tx_pkt_shed_tc_data_weight); +} + +/* tx */ +void tx_tx_reg_res_dis_set(struct aq_hw *aq_hw, u32 tx_reg_res_dis) +{ + AQ_WRITE_REG_BIT(aq_hw, tx_reg_res_dsbl_adr, + tx_reg_res_dsbl_msk, + tx_reg_res_dsbl_shift, tx_reg_res_dis); +} + +/* msm */ +u32 msm_reg_access_status_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG_BIT(aq_hw, msm_reg_access_busy_adr, + msm_reg_access_busy_msk, + msm_reg_access_busy_shift); +} + +void msm_reg_addr_for_indirect_addr_set(struct aq_hw *aq_hw, + u32 reg_addr_for_indirect_addr) +{ + AQ_WRITE_REG_BIT(aq_hw, msm_reg_addr_adr, + msm_reg_addr_msk, + msm_reg_addr_shift, + reg_addr_for_indirect_addr); +} + +void msm_reg_rd_strobe_set(struct aq_hw *aq_hw, u32 reg_rd_strobe) +{ + AQ_WRITE_REG_BIT(aq_hw, msm_reg_rd_strobe_adr, + msm_reg_rd_strobe_msk, + msm_reg_rd_strobe_shift, + reg_rd_strobe); +} + +u32 msm_reg_rd_data_get(struct aq_hw *aq_hw) +{ + return AQ_READ_REG(aq_hw, msm_reg_rd_data_adr); +} + +void msm_reg_wr_data_set(struct aq_hw *aq_hw, u32 reg_wr_data) +{ + AQ_WRITE_REG(aq_hw, msm_reg_wr_data_adr, reg_wr_data); +} + +void msm_reg_wr_strobe_set(struct aq_hw *aq_hw, u32 reg_wr_strobe) +{ + AQ_WRITE_REG_BIT(aq_hw, msm_reg_wr_strobe_adr, + msm_reg_wr_strobe_msk, + msm_reg_wr_strobe_shift, + reg_wr_strobe); +} + +/* pci */ +void pci_pci_reg_res_dis_set(struct aq_hw *aq_hw, u32 pci_reg_res_dis) +{ + AQ_WRITE_REG_BIT(aq_hw, pci_reg_res_dsbl_adr, + pci_reg_res_dsbl_msk, + pci_reg_res_dsbl_shift, + pci_reg_res_dis); +} + +u32 reg_glb_cpu_scratch_scp_get(struct aq_hw *hw, u32 glb_cpu_scratch_scp_idx) +{ + return AQ_READ_REG(hw, glb_cpu_scratch_scp_adr(glb_cpu_scratch_scp_idx)); +} +void reg_glb_cpu_scratch_scp_set(struct aq_hw *aq_hw, u32 glb_cpu_scratch_scp, + u32 scratch_scp) +{ + AQ_WRITE_REG(aq_hw, glb_cpu_scratch_scp_adr(scratch_scp), + glb_cpu_scratch_scp); +} + +u32 reg_glb_cpu_no_reset_scratchpad_get(struct aq_hw *hw, u32 index) +{ + return AQ_READ_REG(hw, glb_cpu_no_reset_scratchpad_adr(index)); +} +void reg_glb_cpu_no_reset_scratchpad_set(struct aq_hw* hw, u32 value, u32 index) +{ + AQ_WRITE_REG(hw, glb_cpu_no_reset_scratchpad_adr(index), value); +} + +void reg_mif_power_gating_enable_control_set(struct aq_hw* hw, u32 value) +{ + AQ_WRITE_REG(hw, mif_power_gating_enable_control_adr, value); + +} +u32 reg_mif_power_gating_enable_control_get(struct aq_hw* hw) +{ + return AQ_READ_REG(hw, mif_power_gating_enable_control_adr); +} + + +void reg_glb_general_provisioning9_set(struct aq_hw* hw, u32 value) +{ + AQ_WRITE_REG(hw, glb_general_provisioning9_adr, value); +} +u32 reg_glb_general_provisioning9_get(struct aq_hw* hw) +{ + return AQ_READ_REG(hw, glb_general_provisioning9_adr); +} + +void reg_glb_nvr_provisioning2_set(struct aq_hw* hw, u32 value) +{ + AQ_WRITE_REG(hw, glb_nvr_provisioning2_adr, value); +} +u32 reg_glb_nvr_provisioning2_get(struct aq_hw* hw) +{ + return AQ_READ_REG(hw, glb_nvr_provisioning2_adr); +} + +void reg_glb_nvr_interface1_set(struct aq_hw* hw, u32 value) +{ + AQ_WRITE_REG(hw, glb_nvr_interface1_adr, value); +} +u32 reg_glb_nvr_interface1_get(struct aq_hw* hw) +{ + return AQ_READ_REG(hw, glb_nvr_interface1_adr); +} + +/* get mif up mailbox busy */ +u32 mif_mcp_up_mailbox_busy_get(struct aq_hw *hw) +{ + return AQ_READ_REG_BIT(hw, mif_mcp_up_mailbox_busy_adr, + mif_mcp_up_mailbox_busy_msk, + mif_mcp_up_mailbox_busy_shift); +} + +/* set mif up mailbox execute operation */ +void mif_mcp_up_mailbox_execute_operation_set(struct aq_hw* hw, u32 value) +{ + AQ_WRITE_REG_BIT(hw, mif_mcp_up_mailbox_execute_operation_adr, + mif_mcp_up_mailbox_execute_operation_msk, + mif_mcp_up_mailbox_execute_operation_shift, + value); +} +/* get mif uP mailbox address */ +u32 mif_mcp_up_mailbox_addr_get(struct aq_hw *hw) +{ + return AQ_READ_REG(hw, mif_mcp_up_mailbox_addr_adr); +} +/* set mif uP mailbox address */ +void mif_mcp_up_mailbox_addr_set(struct aq_hw *hw, u32 value) +{ + AQ_WRITE_REG(hw, mif_mcp_up_mailbox_addr_adr, value); +} + +/* get mif uP mailbox data */ +u32 mif_mcp_up_mailbox_data_get(struct aq_hw *hw) +{ + return AQ_READ_REG(hw, mif_mcp_up_mailbox_data_adr); +} + +void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RX_GET_ADDR_DESTA_FL3L4(location), 0U); +} + +void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RX_GET_ADDR_SRCA_FL3L4(location), 0U); +} + +void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RX_GET_ADDR_CTRL_FL3L4(location), 0U); +} + +void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location) +{ + int i; + + for (i = 0; i < 4; ++i) + aq_hw_write_reg(aq_hw, + HW_ATL_RX_GET_ADDR_DESTA_FL3L4(location + i), + 0U); +} + +void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location) +{ + int i; + + for (i = 0; i < 4; ++i) + aq_hw_write_reg(aq_hw, + HW_ATL_RX_GET_ADDR_SRCA_FL3L4(location + i), + 0U); +} + +void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, + u32 ipv4_dest) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RX_GET_ADDR_DESTA_FL3L4(location), + ipv4_dest); +} + +void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location, + u32 ipv4_src) +{ + aq_hw_write_reg(aq_hw, + HW_ATL_RX_GET_ADDR_SRCA_FL3L4(location), + ipv4_src); +} + +void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RX_GET_ADDR_CTRL_FL3L4(location), cmd); +} + +void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location, + u32 *ipv6_src) +{ + int i; + + for (i = 0; i < 4; ++i) + aq_hw_write_reg(aq_hw, + HW_ATL_RX_GET_ADDR_SRCA_FL3L4(location + i), + ipv6_src[i]); +} + +void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, + u32 *ipv6_dest) +{ + int i; + + for (i = 0; i < 4; ++i) + aq_hw_write_reg(aq_hw, + HW_ATL_RX_GET_ADDR_DESTA_FL3L4(location + i), + ipv6_dest[i]); +} Index: sys/dev/aq/aq_hw_llh_internal.h =================================================================== --- /dev/null +++ sys/dev/aq/aq_hw_llh_internal.h @@ -0,0 +1,3335 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * (1) Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * (2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * (3)The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* File aq_hw_llh_internal.h: Preprocessor definitions + * for Atlantic registers. + */ + +#ifndef HW_ATL_LLH_INTERNAL_H +#define HW_ATL_LLH_INTERNAL_H + +/* global microprocessor semaphore definitions + * base address: 0x000003a0 + * parameter: semaphore {s} | stride size 0x4 | range [0, 15] + */ +#define glb_cpu_sem_adr(semaphore) (0x000003a0u + (semaphore) * 0x4) +/* register address for bitfield rx dma good octet counter lsw [1f:0] */ +#define stats_rx_dma_good_octet_counterlsw__adr 0x00006808 +/* register address for bitfield rx dma good packet counter lsw [1f:0] */ +#define stats_rx_dma_good_pkt_counterlsw__adr 0x00006800 +/* register address for bitfield tx dma good octet counter lsw [1f:0] */ +#define stats_tx_dma_good_octet_counterlsw__adr 0x00008808 +/* register address for bitfield tx dma good packet counter lsw [1f:0] */ +#define stats_tx_dma_good_pkt_counterlsw__adr 0x00008800 + +/* register address for bitfield rx dma good octet counter msw [3f:20] */ +#define stats_rx_dma_good_octet_countermsw__adr 0x0000680c +/* register address for bitfield rx dma good packet counter msw [3f:20] */ +#define stats_rx_dma_good_pkt_countermsw__adr 0x00006804 +/* register address for bitfield tx dma good octet counter msw [3f:20] */ +#define stats_tx_dma_good_octet_countermsw__adr 0x0000880c +/* register address for bitfield tx dma good packet counter msw [3f:20] */ +#define stats_tx_dma_good_pkt_countermsw__adr 0x00008804 +/* register address for bitfield rx lro coalesced packet count lsw [1f:0] */ +#define stats_rx_lo_coalesced_pkt_count0__addr 0x00006820u + +/* preprocessor definitions for msm rx errors counter register */ +#define mac_msm_rx_errs_cnt_adr 0x00000120u + +/* preprocessor definitions for msm rx unicast frames counter register */ +#define mac_msm_rx_ucst_frm_cnt_adr 0x000000e0u + +/* preprocessor definitions for msm rx multicast frames counter register */ +#define mac_msm_rx_mcst_frm_cnt_adr 0x000000e8u + +/* preprocessor definitions for msm rx broadcast frames counter register */ +#define mac_msm_rx_bcst_frm_cnt_adr 0x000000f0u + +/* preprocessor definitions for msm rx broadcast octets counter register 1 */ +#define mac_msm_rx_bcst_octets_counter1_adr 0x000001b0u + +/* preprocessor definitions for msm rx broadcast octets counter register 2 */ +#define mac_msm_rx_bcst_octets_counter2_adr 0x000001b4u + +/* preprocessor definitions for msm rx unicast octets counter register 0 */ +#define mac_msm_rx_ucst_octets_counter0_adr 0x000001b8u + +/* preprocessor definitions for rx dma statistics counter 7 */ +#define rx_dma_stat_counter7_adr 0x00006818u + +/* preprocessor definitions for msm tx unicast frames counter register */ +#define mac_msm_tx_ucst_frm_cnt_adr 0x00000108u + +/* preprocessor definitions for msm tx multicast frames counter register */ +#define mac_msm_tx_mcst_frm_cnt_adr 0x00000110u + +/*! @name Global FW Image Identification 1 Definitions +* +* Preprocessor definitions for Global FW Image Identification 1 +* Address: 0x00000018 +@{*/ +#define glb_fw_image_id1_adr 0x00000018u +/*@}*/ + +/* preprocessor definitions for global mif identification */ +#define glb_mif_id_adr 0x0000001cu + +/* register address for bitfield iamr_lsw[1f:0] */ +#define itr_iamrlsw_adr 0x00002090 +/* register address for bitfield rx dma drop packet counter [1f:0] */ +#define rpb_rx_dma_drop_pkt_cnt_adr 0x00006818 + +/* register address for bitfield imcr_lsw[1f:0] */ +#define itr_imcrlsw_adr 0x00002070 +/* register address for bitfield imsr_lsw[1f:0] */ +#define itr_imsrlsw_adr 0x00002060 +/* register address for bitfield itr_reg_res_dsbl */ +#define itr_reg_res_dsbl_adr 0x00002300 +/* bitmask for bitfield itr_reg_res_dsbl */ +#define itr_reg_res_dsbl_msk 0x20000000 +/* lower bit position of bitfield itr_reg_res_dsbl */ +#define itr_reg_res_dsbl_shift 29 +/* register address for bitfield iscr_lsw[1f:0] */ +#define itr_iscrlsw_adr 0x00002050 +/* register address for bitfield isr_lsw[1f:0] */ +#define itr_isrlsw_adr 0x00002000 +/* register address for bitfield itr_reset */ +#define itr_res_adr 0x00002300 +/* bitmask for bitfield itr_reset */ +#define itr_res_msk 0x80000000 +/* lower bit position of bitfield itr_reset */ +#define itr_res_shift 31 +/* register address for bitfield dca{d}_cpuid[7:0] */ +#define rdm_dcadcpuid_adr(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_cpuid[7:0] */ +#define rdm_dcadcpuid_msk 0x000000ff +/* lower bit position of bitfield dca{d}_cpuid[7:0] */ +#define rdm_dcadcpuid_shift 0 +/* register address for bitfield dca_en */ +#define rdm_dca_en_adr 0x00006180 + +/*! @name MIF Power Gating Enable Control Definitions +* Preprocessor definitions for MIF Power Gating Enable Control +* Address: 0x000032A8 +@{*/ +#define mif_power_gating_enable_control_adr 0x000032A8u +/*@}*/ + +/*! @name Global General Provisioning 9 Definitions +* +* Preprocessor definitions for Global General Provisioning 9 +* Address: 0x00000520 +@{*/ +#define glb_general_provisioning9_adr 0x00000520u +/*@}*/ + +/*! @name Global NVR Provisioning 2 Definitions +* +* Preprocessor definitions for Global NVR Provisioning 2 +* Address: 0x00000534 +@{*/ +#define glb_nvr_provisioning2_adr 0x00000534u +/*@}*/ + +/*! @name Global NVR Interface 1 Definitions +* +* Preprocessor definitions for Global NVR Interface 1 +* Address: 0x00000100 +@{*/ +#define glb_nvr_interface1_adr 0x00000100u +/*@}*/ + + +/* rx dca_en bitfield definitions + * preprocessor definitions for the bitfield "dca_en". + * port="pif_rdm_dca_en_i" + */ + +/* register address for bitfield dca_en */ +#define rdm_dca_en_adr 0x00006180 +/* bitmask for bitfield dca_en */ +#define rdm_dca_en_msk 0x80000000 +/* inverted bitmask for bitfield dca_en */ +#define rdm_dca_en_mskn 0x7fffffff +/* lower bit position of bitfield dca_en */ +#define rdm_dca_en_shift 31 +/* width of bitfield dca_en */ +#define rdm_dca_en_width 1 +/* default value of bitfield dca_en */ +#define rdm_dca_en_default 0x1 + + +/*! @name MAC_PHY MPI register reset disable Bitfield Definitions +* Preprocessor definitions for the bitfield "MPI register reset disable". +* PORT="pif_mpi_reg_reset_dsbl_i" +@{ */ +/*! \brief Register address for bitfield MPI register reset disable */ +#define mpi_tx_reg_res_dis_adr 0x00004000 +/*! \brief Bitmask for bitfield MPI register reset disable */ +#define mpi_tx_reg_res_dis_msk 0x20000000 +/*! \brief Inverted bitmask for bitfield MPI register reset disable */ +#define mpi_tx_reg_res_dis_mskn 0xDFFFFFFF +/*! \brief Lower bit position of bitfield MPI register reset disable */ +#define mpi_tx_reg_res_dis_shift 29 +/*! \brief Width of bitfield MPI register reset disable */ +#define mpi_tx_reg_res_dis_width 1 +/*! \brief Default value of bitfield MPI register reset disable */ +#define mpi_tx_reg_res_dis_default 0x1 +/*@}*/ + + +/* rx dca_mode[3:0] bitfield definitions + * preprocessor definitions for the bitfield "dca_mode[3:0]". + * port="pif_rdm_dca_mode_i[3:0]" + */ + +/* register address for bitfield dca_mode[3:0] */ +#define rdm_dca_mode_adr 0x00006180 +/* bitmask for bitfield dca_mode[3:0] */ +#define rdm_dca_mode_msk 0x0000000f +/* inverted bitmask for bitfield dca_mode[3:0] */ +#define rdm_dca_mode_mskn 0xfffffff0 +/* lower bit position of bitfield dca_mode[3:0] */ +#define rdm_dca_mode_shift 0 +/* width of bitfield dca_mode[3:0] */ +#define rdm_dca_mode_width 4 +/* default value of bitfield dca_mode[3:0] */ +#define rdm_dca_mode_default 0x0 + +/* rx desc{d}_data_size[4:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_data_size[4:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc0_data_size_i[4:0]" + */ + +/* register address for bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_msk 0x0000001f +/* inverted bitmask for bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_mskn 0xffffffe0 +/* lower bit position of bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_shift 0 +/* width of bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_width 5 +/* default value of bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_default 0x0 + +/* rx dca{d}_desc_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_desc_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_rdm_dca_desc_en_i[0]" + */ + +/* register address for bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_adr(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_msk 0x80000000 +/* inverted bitmask for bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_mskn 0x7fffffff +/* lower bit position of bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_shift 31 +/* width of bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_width 1 +/* default value of bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_default 0x0 + +/* rx desc{d}_en bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_en". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc_en_i[0]" + */ + +/* register address for bitfield desc{d}_en */ +#define rdm_descden_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_en */ +#define rdm_descden_msk 0x80000000 +/* inverted bitmask for bitfield desc{d}_en */ +#define rdm_descden_mskn 0x7fffffff +/* lower bit position of bitfield desc{d}_en */ +#define rdm_descden_shift 31 +/* width of bitfield desc{d}_en */ +#define rdm_descden_width 1 +/* default value of bitfield desc{d}_en */ +#define rdm_descden_default 0x0 + +/* rx desc{d}_hdr_size[4:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hdr_size[4:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc0_hdr_size_i[4:0]" + */ + +/* register address for bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_msk 0x00001f00 +/* inverted bitmask for bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_mskn 0xffffe0ff +/* lower bit position of bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_shift 8 +/* width of bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_width 5 +/* default value of bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_default 0x0 + +/* rx desc{d}_hdr_split bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hdr_split". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc_hdr_split_i[0]" + */ + +/* register address for bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_msk 0x10000000 +/* inverted bitmask for bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_mskn 0xefffffff +/* lower bit position of bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_shift 28 +/* width of bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_width 1 +/* default value of bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_default 0x0 + +/* rx desc{d}_hd[c:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hd[c:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="rdm_pif_desc0_hd_o[12:0]" + */ + +/* register address for bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_adr(descriptor) (0x00005b0c + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_msk 0x00001fff +/* inverted bitmask for bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_mskn 0xffffe000 +/* lower bit position of bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_shift 0 +/* width of bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_width 13 + +/* rx desc{d}_len[9:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_len[9:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc0_len_i[9:0]" + */ + +/* register address for bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_msk 0x00001ff8 +/* inverted bitmask for bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_mskn 0xffffe007 +/* lower bit position of bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_shift 3 +/* width of bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_width 10 +/* default value of bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_default 0x0 + +/* rx desc{d}_reset bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_reset". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_q_pf_res_i[0]" + */ + +/* register address for bitfield desc{d}_reset */ +#define rdm_descdreset_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_reset */ +#define rdm_descdreset_msk 0x02000000 +/* inverted bitmask for bitfield desc{d}_reset */ +#define rdm_descdreset_mskn 0xfdffffff +/* lower bit position of bitfield desc{d}_reset */ +#define rdm_descdreset_shift 25 +/* width of bitfield desc{d}_reset */ +#define rdm_descdreset_width 1 +/* default value of bitfield desc{d}_reset */ +#define rdm_descdreset_default 0x0 + +/* rdm_desc_init_i bitfield definitions + * preprocessor definitions for the bitfield rdm_desc_init_i. + * port="pif_rdm_desc_init_i" + */ + +/* register address for bitfield rdm_desc_init_i */ +#define rdm_rx_dma_desc_cache_init_adr 0x00005a00 +/* bitmask for bitfield rdm_desc_init_i */ +#define rdm_rx_dma_desc_cache_init_msk 0x00000001 +/* inverted bitmask for bitfield rdm_desc_init_i */ +#define rdm_rx_dma_desc_cache_init_mskn 0xfffffffe +/* lower bit position of bitfield rdm_desc_init_i */ +#define rdm_rx_dma_desc_cache_init_shift 0 +/* width of bitfield rdm_desc_init_i */ +#define rdm_rx_dma_desc_cache_init_width 1 +/* default value of bitfield rdm_desc_init_i */ +#define rdm_rx_dma_desc_cache_init_defaulT 0x0 + +/* rx int_desc_wrb_en bitfield definitions + * preprocessor definitions for the bitfield "int_desc_wrb_en". + * port="pif_rdm_int_desc_wrb_en_i" + */ + +/* register address for bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_adr 0x00005a30 +/* bitmask for bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_msk 0x00000004 +/* inverted bitmask for bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_mskn 0xfffffffb +/* lower bit position of bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_shift 2 +/* width of bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_width 1 +/* default value of bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_default 0x0 + +/* rx dca{d}_hdr_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_hdr_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_rdm_dca_hdr_en_i[0]" + */ + +/* register address for bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_adr(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_msk 0x40000000 +/* inverted bitmask for bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_mskn 0xbfffffff +/* lower bit position of bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_shift 30 +/* width of bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_width 1 +/* default value of bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_default 0x0 + +/* rx dca{d}_pay_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_pay_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_rdm_dca_pay_en_i[0]" + */ + +/* register address for bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_adr(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_msk 0x20000000 +/* inverted bitmask for bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_mskn 0xdfffffff +/* lower bit position of bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_shift 29 +/* width of bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_width 1 +/* default value of bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_default 0x0 + +/* RX rdm_int_rim_en Bitfield Definitions + * Preprocessor definitions for the bitfield "rdm_int_rim_en". + * PORT="pif_rdm_int_rim_en_i" + */ + +/* Register address for bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_adr 0x00005A30 +/* Bitmask for bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_msk 0x00000008 +/* Inverted bitmask for bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_mskn 0xFFFFFFF7 +/* Lower bit position of bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_shift 3 +/* Width of bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_width 1 +/* Default value of bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_default 0x0 + +/* general interrupt mapping register definitions + * preprocessor definitions for general interrupt mapping register + * base address: 0x00002180 + * parameter: regidx {f} | stride size 0x4 | range [0, 3] + */ +#define gen_intr_map_adr(regidx) (0x00002180u + (regidx) * 0x4) + +/* general interrupt status register definitions + * preprocessor definitions for general interrupt status register + * address: 0x000021A0 + */ + +#define gen_intr_stat_adr 0x000021A4U + +/* interrupt global control register definitions + * preprocessor definitions for interrupt global control register + * address: 0x00002300 + */ +#define intr_glb_ctl_adr 0x00002300u + +/* interrupt throttle register definitions + * preprocessor definitions for interrupt throttle register + * base address: 0x00002800 + * parameter: throttle {t} | stride size 0x4 | range [0, 31] + */ +#define intr_thr_adr(throttle) (0x00002800u + (throttle) * 0x4) + +/* Register address for bitfield imr_link_en */ +#define itrImrLinkEn_ADR 0x00002180 +/* Bitmask for bitfield imr_link_en */ +#define itrImrLinkEn_MSK 0x00008000 +/* Inverted bitmask for bitfield imr_link_en */ +#define itrImrLinkEn_MSKN 0xFFFF7FFF +/* Lower bit position of bitfield imr_link_en */ +#define itrImrLinkEn_SHIFT 15 +/* Width of bitfield imr_link_en */ +#define itrImrLinkEn_WIDTH 1 +/* Default value of bitfield imr_link_en */ +#define itrImrLinkEn_DEFAULT 0x0 + +/* Register address for bitfield imr_link[4:0] */ +#define itrImrLink_ADR 0x00002180 +/* Bitmask for bitfield imr_link[4:0] */ +#define itrImrLink_MSK 0x00001F00 +/* Inverted bitmask for bitfield imr_link[4:0] */ +#define itrImrLink_MSKN 0xFFFFE0FF +/* Lower bit position of bitfield imr_link[4:0] */ +#define itrImrLink_SHIFT 8 +/* Width of bitfield imr_link[4:0] */ +#define itrImrLink_WIDTH 5 +/* Default value of bitfield imr_link[4:0] */ +#define itrImrLink_DEFAULT 0x0 + + +/* INTR imr_mif{M}_en Bitfield Definitions +* Preprocessor definitions for the bitfield "imr_mif{M}_en". +* Parameter: MIF {M} | bit-level stride | range [0, 3] +* PORT="pif_itr_map_mif_en_i[0]" +*/ +/* Register address for bitfield imr_mif{M}_en */ +#define itrImrMifMEn_ADR(MIF) \ + (((MIF) == 0) ? 0x0000218C : \ + (((MIF) == 1) ? 0x0000218C : \ + (((MIF) == 2) ? 0x0000218C : \ + (((MIF) == 3) ? 0x0000218C : \ + 0)))) +/* Bitmask for bitfield imr_mif{M}_en */ +#define itrImrMifMEn_MSK(MIF) \ + (((MIF) == 0) ? 0x80000000 : \ + (((MIF) == 1) ? 0x00800000 : \ + (((MIF) == 2) ? 0x00008000 : \ + (((MIF) == 3) ? 0x00000080 : \ + 0)))) +/* Inverted bitmask for bitfield imr_mif{M}_en */ +#define itrImrMifMEn_MSKN(MIF) \ + (((MIF) == 0) ? 0x7FFFFFFF : \ + (((MIF) == 1) ? 0xFF7FFFFF : \ + (((MIF) == 2) ? 0xFFFF7FFF : \ + (((MIF) == 3) ? 0xFFFFFF7F : \ + 0)))) +/* Lower bit position of bitfield imr_mif{M}_en */ +#define itrImrMifMEn_SHIFT(MIF) \ + (((MIF) == 0) ? 31 : \ + (((MIF) == 1) ? 23 : \ + (((MIF) == 2) ? 15 : \ + (((MIF) == 3) ? 7 : \ + 0)))) +/* Width of bitfield imr_mif{M}_en */ +#define itrImrMifMEn_WIDTH 1 +/* Default value of bitfield imr_mif{M}_en */ +#define itrImrMifMEn_DEFAULT 0x0 + +/* INTR imr_mif{M}[4:0] Bitfield Definitions +* Preprocessor definitions for the bitfield "imr_mif{M}[4:0]". +* Parameter: MIF {M} | bit-level stride | range [0, 3] +* PORT="pif_itr_map_mif0_i[4:0]" +*/ +/* Register address for bitfield imr_mif{M}[4:0] */ +#define itrImrMifM_ADR(MIF) \ + (((MIF) == 0) ? 0x0000218C : \ + (((MIF) == 1) ? 0x0000218C : \ + (((MIF) == 2) ? 0x0000218C : \ + (((MIF) == 3) ? 0x0000218C : \ + 0)))) +/* Bitmask for bitfield imr_mif{M}[4:0] */ +#define itrImrMifM_MSK(MIF) \ + (((MIF) == 0) ? 0x1F000000 : \ + (((MIF) == 1) ? 0x001F0000 : \ + (((MIF) == 2) ? 0x00001F00 : \ + (((MIF) == 3) ? 0x0000001F : \ + 0)))) +/* Inverted bitmask for bitfield imr_mif{M}[4:0] */ +#define itrImrMifM_MSKN(MIF) \ + (((MIF) == 0) ? 0xE0FFFFFF : \ + (((MIF) == 1) ? 0xFFE0FFFF : \ + (((MIF) == 2) ? 0xFFFFE0FF : \ + (((MIF) == 3) ? 0xFFFFFFE0 : \ + 0)))) +/* Lower bit position of bitfield imr_mif{M}[4:0] */ +#define itrImrMifM_SHIFT(MIF) \ + (((MIF) == 0) ? 24 : \ + (((MIF) == 1) ? 16 : \ + (((MIF) == 2) ? 8 : \ + (((MIF) == 3) ? 0 : \ + 0)))) +/* Width of bitfield imr_mif{M}[4:0] */ +#define itrImrMifM_WIDTH 5 +/* Default value of bitfield imr_mif{M}[4:0] */ +#define itrImrMifM_DEFAULT 0x0 + + +/* Register address for bitfield int_mode[1:0] */ +#define itrIntMode_ADR 0x00002300 +/* Bitmask for bitfield int_mode[1:0] */ +#define itrIntMode_MSK 0x00000003 +/* Inverted bitmask for bitfield int_mode[1:0] */ +#define itrIntMode_MSKN 0xFFFFFFFC +/* Lower bit position of bitfield int_mode[1:0] */ +#define itrIntMode_SHIFT 0 +/*f Width of bitfield int_mode[1:0] */ +#define itrIntMode_WIDTH 2 +/* Default value of bitfield int_mode[1:0] */ +#define itrIntMode_DEFAULT 0x0 + +/* Register address for bitfield isr_cor_en */ +#define itrIsrCorEn_ADR 0x00002300 +/* Bitmask for bitfield isr_cor_en */ +#define itrIsrCorEn_MSK 0x00000080 +/* Inverted bitmask for bitfield isr_cor_en */ +#define itrIsrCorEn_MSKN 0xFFFFFF7F +/* Lower bit position of bitfield isr_cor_en */ +#define itrIsrCorEn_SHIFT 7 +/* Width of bitfield isr_cor_en */ +#define itrIsrCorEn_WIDTH 1 +/* Default value of bitfield isr_cor_en */ +#define itrIsrCorEn_DEFAULT 0x0 +/*@}*/ + +/* Register address for bitfield iamr_clr_en */ +#define itrIamrClrEn_ADR 0x00002300 +/* Bitmask for bitfield iamr_clr_en */ +#define itrIamrClrEn_MSK 0x00000020 +/* Inverted bitmask for bitfield iamr_clr_en */ +#define itrIamrClrEn_MSKN 0xFFFFFFDF +/* Lower bit position of bitfield iamr_clr_en */ +#define itrIamrClrEn_SHIFT 5 +/* Width of bitfield iamr_clr_en */ +#define itrIamrClrEn_WIDTH 1 +/* Default value of bitfield iamr_clr_en */ +#define itrIamrClrEn_DEFAULT 0x0 + +/* rx dma descriptor base address lsw definitions + * preprocessor definitions for rx dma descriptor base address lsw + * base address: 0x00005b00 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define rx_dma_desc_base_addrlsw_adr(descriptor) \ +(0x00005b00u + (descriptor) * 0x20) + +/* rx dma descriptor base address msw definitions + * preprocessor definitions for rx dma descriptor base address msw + * base address: 0x00005b04 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define rx_dma_desc_base_addrmsw_adr(descriptor) \ +(0x00005b04u + (descriptor) * 0x20) + +/* rx dma descriptor status register definitions + * preprocessor definitions for rx dma descriptor status register + * base address: 0x00005b14 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define rx_dma_desc_stat_adr(descriptor) (0x00005b14u + (descriptor) * 0x20) + +/* rx dma descriptor tail pointer register definitions + * preprocessor definitions for rx dma descriptor tail pointer register + * base address: 0x00005b10 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define rx_dma_desc_tail_ptr_adr(descriptor) (0x00005b10u + (descriptor) * 0x20) + +/* rx interrupt moderation control register definitions + * Preprocessor definitions for RX Interrupt Moderation Control Register + * Base Address: 0x00005A40 + * Parameter: RIM {R} | stride size 0x4 | range [0, 31] + */ +#define rx_intr_moderation_ctl_adr(rim) (0x00005A40u + (rim) * 0x4) + +/* rx filter multicast filter mask register definitions + * preprocessor definitions for rx filter multicast filter mask register + * address: 0x00005270 + */ +#define rx_flr_mcst_flr_msk_adr 0x00005270u + +/* rx filter multicast filter register definitions + * preprocessor definitions for rx filter multicast filter register + * base address: 0x00005250 + * parameter: filter {f} | stride size 0x4 | range [0, 7] + */ +#define rx_flr_mcst_flr_adr(filter) (0x00005250u + (filter) * 0x4) + +/* RX Filter RSS Control Register 1 Definitions + * Preprocessor definitions for RX Filter RSS Control Register 1 + * Address: 0x000054C0 + */ +#define rx_flr_rss_control1_adr 0x000054C0u + +/* RX Filter Control Register 2 Definitions + * Preprocessor definitions for RX Filter Control Register 2 + * Address: 0x00005104 + */ +#define rx_flr_control2_adr 0x00005104u + +/* tx tx dma debug control [1f:0] bitfield definitions + * preprocessor definitions for the bitfield "tx dma debug control [1f:0]". + * port="pif_tdm_debug_cntl_i[31:0]" + */ + +/* register address for bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_adr 0x00008920 +/* bitmask for bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_msk 0xffffffff +/* inverted bitmask for bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_mskn 0x00000000 +/* lower bit position of bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_shift 0 +/* width of bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_width 32 +/* default value of bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_default 0x0 + +/* tx dma descriptor base address lsw definitions + * preprocessor definitions for tx dma descriptor base address lsw + * base address: 0x00007c00 + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + */ +#define tx_dma_desc_base_addrlsw_adr(descriptor) \ + (0x00007c00u + (descriptor) * 0x40) + +/* tx dma descriptor tail pointer register definitions + * preprocessor definitions for tx dma descriptor tail pointer register + * base address: 0x00007c10 + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + */ +#define tx_dma_desc_tail_ptr_adr(descriptor) (0x00007c10u + (descriptor) * 0x40) + +/* rx dma_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "dma_sys_loopback". + * port="pif_rpb_dma_sys_lbk_i" + */ + +/* register address for bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_adr 0x00005000 +/* bitmask for bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_msk 0x00000040 +/* inverted bitmask for bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_mskn 0xffffffbf +/* lower bit position of bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_shift 6 +/* width of bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_width 1 +/* default value of bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_default 0x0 + +/* rx rx_tc_mode bitfield definitions + * preprocessor definitions for the bitfield "rx_tc_mode". + * port="pif_rpb_rx_tc_mode_i,pif_rpf_rx_tc_mode_i" + */ + +/* register address for bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_adr 0x00005700 +/* bitmask for bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_msk 0x00000100 +/* inverted bitmask for bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_mskn 0xfffffeff +/* lower bit position of bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_shift 8 +/* width of bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_width 1 +/* default value of bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_default 0x0 + +/* rx rx_buf_en bitfield definitions + * preprocessor definitions for the bitfield "rx_buf_en". + * port="pif_rpb_rx_buf_en_i" + */ + +/* register address for bitfield rx_buf_en */ +#define rpb_rx_buf_en_adr 0x00005700 +/* bitmask for bitfield rx_buf_en */ +#define rpb_rx_buf_en_msk 0x00000001 +/* inverted bitmask for bitfield rx_buf_en */ +#define rpb_rx_buf_en_mskn 0xfffffffe +/* lower bit position of bitfield rx_buf_en */ +#define rpb_rx_buf_en_shift 0 +/* width of bitfield rx_buf_en */ +#define rpb_rx_buf_en_width 1 +/* default value of bitfield rx_buf_en */ +#define rpb_rx_buf_en_default 0x0 + +/* rx rx{b}_hi_thresh[d:0] bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_hi_thresh[d:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx0_hi_thresh_i[13:0]" + */ + +/* register address for bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_msk 0x3fff0000 +/* inverted bitmask for bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_mskn 0xc000ffff +/* lower bit position of bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_shift 16 +/* width of bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_width 14 +/* default value of bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_default 0x0 + +/* rx rx{b}_lo_thresh[d:0] bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_lo_thresh[d:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx0_lo_thresh_i[13:0]" + */ + +/* register address for bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_msk 0x00003fff +/* inverted bitmask for bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_mskn 0xffffc000 +/* lower bit position of bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_shift 0 +/* width of bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_width 14 +/* default value of bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_default 0x0 + +/* rx rx_fc_mode[1:0] bitfield definitions + * preprocessor definitions for the bitfield "rx_fc_mode[1:0]". + * port="pif_rpb_rx_fc_mode_i[1:0]" + */ + +/* register address for bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_adr 0x00005700 +/* bitmask for bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_msk 0x00000030 +/* inverted bitmask for bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_mskn 0xffffffcf +/* lower bit position of bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_shift 4 +/* width of bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_width 2 +/* default value of bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_default 0x0 + +/* rx rx{b}_buf_size[8:0] bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_buf_size[8:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx0_buf_size_i[8:0]" + */ + +/* register address for bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_adr(buffer) (0x00005710 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_msk 0x000001ff +/* inverted bitmask for bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_mskn 0xfffffe00 +/* lower bit position of bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_shift 0 +/* width of bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_width 9 +/* default value of bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_default 0x0 + +/* rx rx{b}_xoff_en bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_xoff_en". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx_xoff_en_i[0]" + */ + +/* register address for bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_adr(buffer) (0x00005714 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_msk 0x80000000 +/* inverted bitmask for bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_mskn 0x7fffffff +/* lower bit position of bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_shift 31 +/* width of bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_width 1 +/* default value of bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_default 0x0 + +/* rx l2_bc_thresh[f:0] bitfield definitions + * preprocessor definitions for the bitfield "l2_bc_thresh[f:0]". + * port="pif_rpf_l2_bc_thresh_i[15:0]" + */ + +/* register address for bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_adr 0x00005100 +/* bitmask for bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_msk 0xffff0000 +/* inverted bitmask for bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_mskn 0x0000ffff +/* lower bit position of bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_shift 16 +/* width of bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_width 16 +/* default value of bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_default 0x0 + +/* rx l2_bc_en bitfield definitions + * preprocessor definitions for the bitfield "l2_bc_en". + * port="pif_rpf_l2_bc_en_i" + */ + +/* register address for bitfield l2_bc_en */ +#define rpfl2bc_en_adr 0x00005100 +/* bitmask for bitfield l2_bc_en */ +#define rpfl2bc_en_msk 0x00000001 +/* inverted bitmask for bitfield l2_bc_en */ +#define rpfl2bc_en_mskn 0xfffffffe +/* lower bit position of bitfield l2_bc_en */ +#define rpfl2bc_en_shift 0 +/* width of bitfield l2_bc_en */ +#define rpfl2bc_en_width 1 +/* default value of bitfield l2_bc_en */ +#define rpfl2bc_en_default 0x0 + +/* rx l2_bc_act[2:0] bitfield definitions + * preprocessor definitions for the bitfield "l2_bc_act[2:0]". + * port="pif_rpf_l2_bc_act_i[2:0]" + */ + +/* register address for bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_adr 0x00005100 +/* bitmask for bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_msk 0x00007000 +/* inverted bitmask for bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_mskn 0xffff8fff +/* lower bit position of bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_shift 12 +/* width of bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_width 3 +/* default value of bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_default 0x0 + +/* rx l2_mc_en{f} bitfield definitions + * preprocessor definitions for the bitfield "l2_mc_en{f}". + * parameter: filter {f} | stride size 0x4 | range [0, 7] + * port="pif_rpf_l2_mc_en_i[0]" + */ + +/* register address for bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_adr(filter) (0x00005250 + (filter) * 0x4) +/* bitmask for bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_msk 0x80000000 +/* inverted bitmask for bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_mskn 0x7fffffff +/* lower bit position of bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_shift 31 +/* width of bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_width 1 +/* default value of bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_default 0x0 + +/* rx l2_promis_mode bitfield definitions + * preprocessor definitions for the bitfield "l2_promis_mode". + * port="pif_rpf_l2_promis_mode_i" + */ + +/* register address for bitfield l2_promis_mode */ +#define rpfl2promis_mode_adr 0x00005100 +/* bitmask for bitfield l2_promis_mode */ +#define rpfl2promis_mode_msk 0x00000008 +/* inverted bitmask for bitfield l2_promis_mode */ +#define rpfl2promis_mode_mskn 0xfffffff7 +/* lower bit position of bitfield l2_promis_mode */ +#define rpfl2promis_mode_shift 3 +/* width of bitfield l2_promis_mode */ +#define rpfl2promis_mode_width 1 +/* default value of bitfield l2_promis_mode */ +#define rpfl2promis_mode_default 0x0 + +/* rx l2_uc_act{f}[2:0] bitfield definitions + * preprocessor definitions for the bitfield "l2_uc_act{f}[2:0]". + * parameter: filter {f} | stride size 0x8 | range [0, 37] + * port="pif_rpf_l2_uc_act0_i[2:0]" + */ + +/* register address for bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_adr(filter) (0x00005114 + (filter) * 0x8) +/* bitmask for bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_msk 0x00070000 +/* inverted bitmask for bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_mskn 0xfff8ffff +/* lower bit position of bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_shift 16 +/* width of bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_width 3 +/* default value of bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_default 0x0 + +/* rx l2_uc_en{f} bitfield definitions + * preprocessor definitions for the bitfield "l2_uc_en{f}". + * parameter: filter {f} | stride size 0x8 | range [0, 37] + * port="pif_rpf_l2_uc_en_i[0]" + */ + +/* register address for bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_adr(filter) (0x00005114 + (filter) * 0x8) +/* bitmask for bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_msk 0x80000000 +/* inverted bitmask for bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_mskn 0x7fffffff +/* lower bit position of bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_shift 31 +/* width of bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_width 1 +/* default value of bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_default 0x0 + +/* register address for bitfield l2_uc_da{f}_lsw[1f:0] */ +#define rpfl2uc_daflsw_adr(filter) (0x00005110 + (filter) * 0x8) +/* register address for bitfield l2_uc_da{f}_msw[f:0] */ +#define rpfl2uc_dafmsw_adr(filter) (0x00005114 + (filter) * 0x8) +/* bitmask for bitfield l2_uc_da{f}_msw[f:0] */ +#define rpfl2uc_dafmsw_msk 0x0000ffff +/* lower bit position of bitfield l2_uc_da{f}_msw[f:0] */ +#define rpfl2uc_dafmsw_shift 0 + +/* rx l2_mc_accept_all bitfield definitions + * Preprocessor definitions for the bitfield "l2_mc_accept_all". + * PORT="pif_rpf_l2_mc_all_accept_i" + */ + +/* Register address for bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_adr 0x00005270 +/* Bitmask for bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_msk 0x00004000 +/* Inverted bitmask for bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_mskn 0xFFFFBFFF +/* Lower bit position of bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_shift 14 +/* Width of bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_width 1 +/* Default value of bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_default 0x0 + +/* width of bitfield rx_tc_up{t}[2:0] */ +#define rpf_rpb_rx_tc_upt_width 3 +/* default value of bitfield rx_tc_up{t}[2:0] */ +#define rpf_rpb_rx_tc_upt_default 0x0 + +/* rx rss_key_addr[4:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_key_addr[4:0]". + * port="pif_rpf_rss_key_addr_i[4:0]" + */ + +/* register address for bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_adr 0x000054d0 +/* bitmask for bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_msk 0x0000001f +/* inverted bitmask for bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_mskn 0xffffffe0 +/* lower bit position of bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_shift 0 +/* width of bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_width 5 +/* default value of bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_default 0x0 + +/* rx rss_key_wr_data[1f:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_key_wr_data[1f:0]". + * port="pif_rpf_rss_key_wr_data_i[31:0]" + */ + +/* register address for bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_adr 0x000054d4 +/* bitmask for bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_msk 0xffffffff +/* inverted bitmask for bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_mskn 0x00000000 +/* lower bit position of bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_shift 0 +/* width of bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_width 32 +/* default value of bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_default 0x0 + +/* rx rss_key_rd_data[1f:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_key_rd_data[1f:0]". + * port="pif_rpf_rss_key_wr_data_i[31:0]" + */ + +/* register address for bitfield rss_key_rd_data[1f:0] */ +#define rpf_rss_key_rd_data_adr 0x000054d8 +/* bitmask for bitfield rss_key_rd_data[1f:0] */ +#define rpf_rss_key_rd_data_msk 0xffffffff +/* inverted bitmask for bitfield rss_key_rd_data[1f:0] */ +#define rpf_rss_key_rd_data_mskn 0x00000000 +/* lower bit position of bitfield rss_key_rd_data[1f:0] */ +#define rpf_rss_key_rd_data_shift 0 +/* width of bitfield rss_key_rd_data[1f:0] */ +#define rpf_rss_key_rd_data_width 32 +/* default value of bitfield rss_key_rd_data[1f:0] */ +#define rpf_rss_key_rd_data_default 0x0 + +/* rx rss_key_wr_en_i bitfield definitions + * preprocessor definitions for the bitfield "rss_key_wr_en_i". + * port="pif_rpf_rss_key_wr_en_i" + */ + +/* register address for bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_adr 0x000054d0 +/* bitmask for bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_msk 0x00000020 +/* inverted bitmask for bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_mskn 0xffffffdf +/* lower bit position of bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_shift 5 +/* width of bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_width 1 +/* default value of bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_default 0x0 + +/* rx rss_redir_addr[3:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_redir_addr[3:0]". + * port="pif_rpf_rss_redir_addr_i[3:0]" + */ + +/* register address for bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_adr 0x000054e0 +/* bitmask for bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_msk 0x0000000f +/* inverted bitmask for bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_mskn 0xfffffff0 +/* lower bit position of bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_shift 0 +/* width of bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_width 4 +/* default value of bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_default 0x0 + +/* rx rss_redir_wr_data[f:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_redir_wr_data[f:0]". + * port="pif_rpf_rss_redir_wr_data_i[15:0]" + */ + +/* register address for bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_adr 0x000054e4 +/* bitmask for bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_msk 0x0000ffff +/* inverted bitmask for bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_mskn 0xffff0000 +/* lower bit position of bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_shift 0 +/* width of bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_width 16 +/* default value of bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_default 0x0 + +/* rx rss_redir_wr_en_i bitfield definitions + * preprocessor definitions for the bitfield "rss_redir_wr_en_i". + * port="pif_rpf_rss_redir_wr_en_i" + */ + +/* register address for bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_adr 0x000054e0 +/* bitmask for bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_msk 0x00000010 +/* inverted bitmask for bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_mskn 0xffffffef +/* lower bit position of bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_shift 4 +/* width of bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_width 1 +/* default value of bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_default 0x0 + +/* rx tpo_rpf_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "tpo_rpf_sys_loopback". + * port="pif_rpf_tpo_pkt_sys_lbk_i" + */ + +/* register address for bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_adr 0x00005000 +/* bitmask for bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_msk 0x00000100 +/* inverted bitmask for bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_mskn 0xfffffeff +/* lower bit position of bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_shift 8 +/* width of bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_width 1 +/* default value of bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_default 0x0 + +/* rx vl_inner_tpid[f:0] bitfield definitions + * preprocessor definitions for the bitfield "vl_inner_tpid[f:0]". + * port="pif_rpf_vl_inner_tpid_i[15:0]" + */ + +/* register address for bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_adr 0x00005284 +/* bitmask for bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_msk 0x0000ffff +/* inverted bitmask for bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_mskn 0xffff0000 +/* lower bit position of bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_shift 0 +/* width of bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_width 16 +/* default value of bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_default 0x8100 + +/* rx vl_outer_tpid[f:0] bitfield definitions + * preprocessor definitions for the bitfield "vl_outer_tpid[f:0]". + * port="pif_rpf_vl_outer_tpid_i[15:0]" + */ + +/* register address for bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_adr 0x00005284 +/* bitmask for bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_msk 0xffff0000 +/* inverted bitmask for bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_mskn 0x0000ffff +/* lower bit position of bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_shift 16 +/* width of bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_width 16 +/* default value of bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_default 0x88a8 + +/* rx vl_promis_mode bitfield definitions + * preprocessor definitions for the bitfield "vl_promis_mode". + * port="pif_rpf_vl_promis_mode_i" + */ + +/* register address for bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_adr 0x00005280 +/* bitmask for bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_msk 0x00000002 +/* inverted bitmask for bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_mskn 0xfffffffd +/* lower bit position of bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_shift 1 +/* width of bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_width 1 +/* default value of bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_default 0x0 + +/* RX vl_accept_untagged_mode Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_accept_untagged_mode". + * PORT="pif_rpf_vl_accept_untagged_i" + */ + +/* Register address for bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_adr 0x00005280 +/* Bitmask for bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_msk 0x00000004 +/* Inverted bitmask for bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_mskn 0xFFFFFFFB +/* Lower bit position of bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_shift 2 +/* Width of bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_width 1 +/* Default value of bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_default 0x0 + +/* rX vl_untagged_act[2:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_untagged_act[2:0]". + * PORT="pif_rpf_vl_untagged_act_i[2:0]" + */ + +/* Register address for bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_adr 0x00005280 +/* Bitmask for bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_msk 0x00000038 +/* Inverted bitmask for bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_mskn 0xFFFFFFC7 +/* Lower bit position of bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_shift 3 +/* Width of bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_width 3 +/* Default value of bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_default 0x0 + +/* RX vl_en{F} Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_en{F}". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_en_i[0]" + */ + +/* Register address for bitfield vl_en{F} */ +#define rpf_vl_en_f_adr(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_en{F} */ +#define rpf_vl_en_f_msk 0x80000000 +/* Inverted bitmask for bitfield vl_en{F} */ +#define rpf_vl_en_f_mskn 0x7FFFFFFF +/* Lower bit position of bitfield vl_en{F} */ +#define rpf_vl_en_f_shift 31 +/* Width of bitfield vl_en{F} */ +#define rpf_vl_en_f_width 1 +/* Default value of bitfield vl_en{F} */ +#define rpf_vl_en_f_default 0x0 + +/* RX vl_act{F}[2:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_act{F}[2:0]". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_act0_i[2:0]" + */ + +/* Register address for bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_adr(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_msk 0x00070000 +/* Inverted bitmask for bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_mskn 0xFFF8FFFF +/* Lower bit position of bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_shift 16 +/* Width of bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_width 3 +/* Default value of bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_default 0x0 + +/* RX vl_id{F}[B:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_id{F}[B:0]". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_id0_i[11:0]" + */ + +/* Register address for bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_adr(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_msk 0x00000FFF +/* Inverted bitmask for bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_mskn 0xFFFFF000 +/* Lower bit position of bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_shift 0 +/* Width of bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_width 12 +/* Default value of bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_default 0x0 + +/* RX et_en{F} Bitfield Definitions + * Preprocessor definitions for the bitfield "et_en{F}". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_et_en_i[0]" + */ + +/* Register address for bitfield et_en{F} */ +#define rpf_et_en_f_adr(filter) (0x00005300 + (filter) * 0x4) +/* Bitmask for bitfield et_en{F} */ +#define rpf_et_en_f_msk 0x80000000 +/* Inverted bitmask for bitfield et_en{F} */ +#define rpf_et_en_f_mskn 0x7FFFFFFF +/* Lower bit position of bitfield et_en{F} */ +#define rpf_et_en_f_shift 31 +/* Width of bitfield et_en{F} */ +#define rpf_et_en_f_width 1 +/* Default value of bitfield et_en{F} */ +#define rpf_et_en_f_default 0x0 + +/* rx et_en{f} bitfield definitions + * preprocessor definitions for the bitfield "et_en{f}". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_en_i[0]" + */ + +/* register address for bitfield et_en{f} */ +#define rpf_et_enf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_en{f} */ +#define rpf_et_enf_msk 0x80000000 +/* inverted bitmask for bitfield et_en{f} */ +#define rpf_et_enf_mskn 0x7fffffff +/* lower bit position of bitfield et_en{f} */ +#define rpf_et_enf_shift 31 +/* width of bitfield et_en{f} */ +#define rpf_et_enf_width 1 +/* default value of bitfield et_en{f} */ +#define rpf_et_enf_default 0x0 + +/* rx et_up{f}_en bitfield definitions + * preprocessor definitions for the bitfield "et_up{f}_en". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_up_en_i[0]" + */ + +/* register address for bitfield et_up{f}_en */ +#define rpf_et_upfen_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_up{f}_en */ +#define rpf_et_upfen_msk 0x40000000 +/* inverted bitmask for bitfield et_up{f}_en */ +#define rpf_et_upfen_mskn 0xbfffffff +/* lower bit position of bitfield et_up{f}_en */ +#define rpf_et_upfen_shift 30 +/* width of bitfield et_up{f}_en */ +#define rpf_et_upfen_width 1 +/* default value of bitfield et_up{f}_en */ +#define rpf_et_upfen_default 0x0 + +/* rx et_rxq{f}_en bitfield definitions + * preprocessor definitions for the bitfield "et_rxq{f}_en". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_rxq_en_i[0]" + */ + +/* register address for bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_msk 0x20000000 +/* inverted bitmask for bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_mskn 0xdfffffff +/* lower bit position of bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_shift 29 +/* width of bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_width 1 +/* default value of bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_default 0x0 + +/* rx et_up{f}[2:0] bitfield definitions + * preprocessor definitions for the bitfield "et_up{f}[2:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_up0_i[2:0]" + */ + +/* register address for bitfield et_up{f}[2:0] */ +#define rpf_et_upf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_up{f}[2:0] */ +#define rpf_et_upf_msk 0x1c000000 +/* inverted bitmask for bitfield et_up{f}[2:0] */ +#define rpf_et_upf_mskn 0xe3ffffff +/* lower bit position of bitfield et_up{f}[2:0] */ +#define rpf_et_upf_shift 26 +/* width of bitfield et_up{f}[2:0] */ +#define rpf_et_upf_width 3 +/* default value of bitfield et_up{f}[2:0] */ +#define rpf_et_upf_default 0x0 + +/* rx et_rxq{f}[4:0] bitfield definitions + * preprocessor definitions for the bitfield "et_rxq{f}[4:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_rxq0_i[4:0]" + */ + +/* register address for bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_msk 0x01f00000 +/* inverted bitmask for bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_mskn 0xfe0fffff +/* lower bit position of bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_shift 20 +/* width of bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_width 5 +/* default value of bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_default 0x0 + +/* rx et_mng_rxq{f} bitfield definitions + * preprocessor definitions for the bitfield "et_mng_rxq{f}". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_mng_rxq_i[0]" + */ + +/* register address for bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_msk 0x00080000 +/* inverted bitmask for bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_mskn 0xfff7ffff +/* lower bit position of bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_shift 19 +/* width of bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_width 1 +/* default value of bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_default 0x0 + +/* rx et_act{f}[2:0] bitfield definitions + * preprocessor definitions for the bitfield "et_act{f}[2:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_act0_i[2:0]" + */ + +/* register address for bitfield et_act{f}[2:0] */ +#define rpf_et_actf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_act{f}[2:0] */ +#define rpf_et_actf_msk 0x00070000 +/* inverted bitmask for bitfield et_act{f}[2:0] */ +#define rpf_et_actf_mskn 0xfff8ffff +/* lower bit position of bitfield et_act{f}[2:0] */ +#define rpf_et_actf_shift 16 +/* width of bitfield et_act{f}[2:0] */ +#define rpf_et_actf_width 3 +/* default value of bitfield et_act{f}[2:0] */ +#define rpf_et_actf_default 0x0 + +/* rx et_val{f}[f:0] bitfield definitions + * preprocessor definitions for the bitfield "et_val{f}[f:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_val0_i[15:0]" + */ + +/* register address for bitfield et_val{f}[f:0] */ +#define rpf_et_valf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_val{f}[f:0] */ +#define rpf_et_valf_msk 0x0000ffff +/* inverted bitmask for bitfield et_val{f}[f:0] */ +#define rpf_et_valf_mskn 0xffff0000 +/* lower bit position of bitfield et_val{f}[f:0] */ +#define rpf_et_valf_shift 0 +/* width of bitfield et_val{f}[f:0] */ +#define rpf_et_valf_width 16 +/* default value of bitfield et_val{f}[f:0] */ +#define rpf_et_valf_default 0x0 + +/* rx vl_inner_tpid[f:0] bitfield definitions + * preprocessor definitions for the bitfield "vl_inner_tpid[f:0]". + * port="pif_rpf_vl_inner_tpid_i[15:0]" + */ + +/* register address for bitfield vl_inner_tpid[f:0] */ +#define HW_ATL_RPF_VL_INNER_TPID_ADR 0x00005284 +/* bitmask for bitfield vl_inner_tpid[f:0] */ +#define HW_ATL_RPF_VL_INNER_TPID_MSK 0x0000ffff +/* inverted bitmask for bitfield vl_inner_tpid[f:0] */ +#define HW_ATL_RPF_VL_INNER_TPID_MSKN 0xffff0000 +/* lower bit position of bitfield vl_inner_tpid[f:0] */ +#define HW_ATL_RPF_VL_INNER_TPID_SHIFT 0 +/* width of bitfield vl_inner_tpid[f:0] */ +#define HW_ATL_RPF_VL_INNER_TPID_WIDTH 16 +/* default value of bitfield vl_inner_tpid[f:0] */ +#define HW_ATL_RPF_VL_INNER_TPID_DEFAULT 0x8100 + +/* rx vl_outer_tpid[f:0] bitfield definitions + * preprocessor definitions for the bitfield "vl_outer_tpid[f:0]". + * port="pif_rpf_vl_outer_tpid_i[15:0]" + */ + +/* register address for bitfield vl_outer_tpid[f:0] */ +#define HW_ATL_RPF_VL_OUTER_TPID_ADR 0x00005284 +/* bitmask for bitfield vl_outer_tpid[f:0] */ +#define HW_ATL_RPF_VL_OUTER_TPID_MSK 0xffff0000 +/* inverted bitmask for bitfield vl_outer_tpid[f:0] */ +#define HW_ATL_RPF_VL_OUTER_TPID_MSKN 0x0000ffff +/* lower bit position of bitfield vl_outer_tpid[f:0] */ +#define HW_ATL_RPF_VL_OUTER_TPID_SHIFT 16 +/* width of bitfield vl_outer_tpid[f:0] */ +#define HW_ATL_RPF_VL_OUTER_TPID_WIDTH 16 +/* default value of bitfield vl_outer_tpid[f:0] */ +#define HW_ATL_RPF_VL_OUTER_TPID_DEFAULT 0x88a8 + +/* rx vl_promis_mode bitfield definitions + * preprocessor definitions for the bitfield "vl_promis_mode". + * port="pif_rpf_vl_promis_mode_i" + */ + +/* register address for bitfield vl_promis_mode */ +#define HW_ATL_RPF_VL_PROMIS_MODE_ADR 0x00005280 +/* bitmask for bitfield vl_promis_mode */ +#define HW_ATL_RPF_VL_PROMIS_MODE_MSK 0x00000002 +/* inverted bitmask for bitfield vl_promis_mode */ +#define HW_ATL_RPF_VL_PROMIS_MODE_MSKN 0xfffffffd +/* lower bit position of bitfield vl_promis_mode */ +#define HW_ATL_RPF_VL_PROMIS_MODE_SHIFT 1 +/* width of bitfield vl_promis_mode */ +#define HW_ATL_RPF_VL_PROMIS_MODE_WIDTH 1 +/* default value of bitfield vl_promis_mode */ +#define HW_ATL_RPF_VL_PROMIS_MODE_DEFAULT 0x0 + +/* RX vl_accept_untagged_mode Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_accept_untagged_mode". + * PORT="pif_rpf_vl_accept_untagged_i" + */ + +/* Register address for bitfield vl_accept_untagged_mode */ +#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR 0x00005280 +/* Bitmask for bitfield vl_accept_untagged_mode */ +#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK 0x00000004 +/* Inverted bitmask for bitfield vl_accept_untagged_mode */ +#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSKN 0xFFFFFFFB +/* Lower bit position of bitfield vl_accept_untagged_mode */ +#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT 2 +/* Width of bitfield vl_accept_untagged_mode */ +#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_WIDTH 1 +/* Default value of bitfield vl_accept_untagged_mode */ +#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_DEFAULT 0x0 + +/* rX vl_untagged_act[2:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_untagged_act[2:0]". + * PORT="pif_rpf_vl_untagged_act_i[2:0]" + */ + +/* Register address for bitfield vl_untagged_act[2:0] */ +#define HW_ATL_RPF_VL_UNTAGGED_ACT_ADR 0x00005280 +/* Bitmask for bitfield vl_untagged_act[2:0] */ +#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSK 0x00000038 +/* Inverted bitmask for bitfield vl_untagged_act[2:0] */ +#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSKN 0xFFFFFFC7 +/* Lower bit position of bitfield vl_untagged_act[2:0] */ +#define HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT 3 +/* Width of bitfield vl_untagged_act[2:0] */ +#define HW_ATL_RPF_VL_UNTAGGED_ACT_WIDTH 3 +/* Default value of bitfield vl_untagged_act[2:0] */ +#define HW_ATL_RPF_VL_UNTAGGED_ACT_DEFAULT 0x0 + +/* RX vl_en{F} Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_en{F}". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_en_i[0]" + */ + +/* Register address for bitfield vl_en{F} */ +#define HW_ATL_RPF_VL_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_en{F} */ +#define HW_ATL_RPF_VL_EN_F_MSK 0x80000000 +/* Inverted bitmask for bitfield vl_en{F} */ +#define HW_ATL_RPF_VL_EN_F_MSKN 0x7FFFFFFF +/* Lower bit position of bitfield vl_en{F} */ +#define HW_ATL_RPF_VL_EN_F_SHIFT 31 +/* Width of bitfield vl_en{F} */ +#define HW_ATL_RPF_VL_EN_F_WIDTH 1 +/* Default value of bitfield vl_en{F} */ +#define HW_ATL_RPF_VL_EN_F_DEFAULT 0x0 + +/* RX vl_act{F}[2:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_act{F}[2:0]". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_act0_i[2:0]" + */ + +/* Register address for bitfield vl_act{F}[2:0] */ +#define HW_ATL_RPF_VL_ACT_F_ADR(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_act{F}[2:0] */ +#define HW_ATL_RPF_VL_ACT_F_MSK 0x00070000 +/* Inverted bitmask for bitfield vl_act{F}[2:0] */ +#define HW_ATL_RPF_VL_ACT_F_MSKN 0xFFF8FFFF +/* Lower bit position of bitfield vl_act{F}[2:0] */ +#define HW_ATL_RPF_VL_ACT_F_SHIFT 16 +/* Width of bitfield vl_act{F}[2:0] */ +#define HW_ATL_RPF_VL_ACT_F_WIDTH 3 +/* Default value of bitfield vl_act{F}[2:0] */ +#define HW_ATL_RPF_VL_ACT_F_DEFAULT 0x0 + +/* RX vl_id{F}[B:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_id{F}[B:0]". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_id0_i[11:0]" + */ + +/* Register address for bitfield vl_id{F}[B:0] */ +#define HW_ATL_RPF_VL_ID_F_ADR(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_id{F}[B:0] */ +#define HW_ATL_RPF_VL_ID_F_MSK 0x00000FFF +/* Inverted bitmask for bitfield vl_id{F}[B:0] */ +#define HW_ATL_RPF_VL_ID_F_MSKN 0xFFFFF000 +/* Lower bit position of bitfield vl_id{F}[B:0] */ +#define HW_ATL_RPF_VL_ID_F_SHIFT 0 +/* Width of bitfield vl_id{F}[B:0] */ +#define HW_ATL_RPF_VL_ID_F_WIDTH 12 +/* Default value of bitfield vl_id{F}[B:0] */ +#define HW_ATL_RPF_VL_ID_F_DEFAULT 0x0 + +/* RX vl_rxq_en{F} Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_rxq{F}". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_rxq_en_i" + */ + +/* Register address for bitfield vl_rxq_en{F} */ +#define HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_rxq_en{F} */ +#define HW_ATL_RPF_VL_RXQ_EN_F_MSK 0x10000000 +/* Inverted bitmask for bitfield vl_rxq_en{F}[ */ +#define HW_ATL_RPF_VL_RXQ_EN_F_MSKN 0xEFFFFFFF +/* Lower bit position of bitfield vl_rxq_en{F} */ +#define HW_ATL_RPF_VL_RXQ_EN_F_SHIFT 28 +/* Width of bitfield vl_rxq_en{F} */ +#define HW_ATL_RPF_VL_RXQ_EN_F_WIDTH 1 +/* Default value of bitfield vl_rxq_en{F} */ +#define HW_ATL_RPF_VL_RXQ_EN_F_DEFAULT 0x0 + +/* RX vl_rxq{F}[4:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_rxq{F}[4:0]". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_rxq0_i[4:0]" + */ + +/* Register address for bitfield vl_rxq{F}[4:0] */ +#define HW_ATL_RPF_VL_RXQ_F_ADR(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_rxq{F}[4:0] */ +#define HW_ATL_RPF_VL_RXQ_F_MSK 0x01F00000 +/* Inverted bitmask for bitfield vl_rxq{F}[4:0] */ +#define HW_ATL_RPF_VL_RXQ_F_MSKN 0xFE0FFFFF +/* Lower bit position of bitfield vl_rxq{F}[4:0] */ +#define HW_ATL_RPF_VL_RXQ_F_SHIFT 20 +/* Width of bitfield vl_rxw{F}[4:0] */ +#define HW_ATL_RPF_VL_RXQ_F_WIDTH 5 +/* Default value of bitfield vl_rxq{F}[4:0] */ +#define HW_ATL_RPF_VL_RXQ_F_DEFAULT 0x0 + +/* rx et_en{f} bitfield definitions + * preprocessor definitions for the bitfield "et_en{f}". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_en_i[0]" + */ + +/* register address for bitfield et_en{f} */ +#define HW_ATL_RPF_ET_ENF_ADR(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_en{f} */ +#define HW_ATL_RPF_ET_ENF_MSK 0x80000000 +/* inverted bitmask for bitfield et_en{f} */ +#define HW_ATL_RPF_ET_ENF_MSKN 0x7fffffff +/* lower bit position of bitfield et_en{f} */ +#define HW_ATL_RPF_ET_ENF_SHIFT 31 +/* width of bitfield et_en{f} */ +#define HW_ATL_RPF_ET_ENF_WIDTH 1 +/* default value of bitfield et_en{f} */ +#define HW_ATL_RPF_ET_ENF_DEFAULT 0x0 + +/* rx et_up{f}_en bitfield definitions + * preprocessor definitions for the bitfield "et_up{f}_en". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_up_en_i[0]" + */ + +/* register address for bitfield et_up{f}_en */ +#define HW_ATL_RPF_ET_UPFEN_ADR(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_up{f}_en */ +#define HW_ATL_RPF_ET_UPFEN_MSK 0x40000000 +/* inverted bitmask for bitfield et_up{f}_en */ +#define HW_ATL_RPF_ET_UPFEN_MSKN 0xbfffffff +/* lower bit position of bitfield et_up{f}_en */ +#define HW_ATL_RPF_ET_UPFEN_SHIFT 30 +/* width of bitfield et_up{f}_en */ +#define HW_ATL_RPF_ET_UPFEN_WIDTH 1 +/* default value of bitfield et_up{f}_en */ +#define HW_ATL_RPF_ET_UPFEN_DEFAULT 0x0 + +/* rx et_rxq{f}_en bitfield definitions + * preprocessor definitions for the bitfield "et_rxq{f}_en". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_rxq_en_i[0]" + */ + +/* register address for bitfield et_rxq{f}_en */ +#define HW_ATL_RPF_ET_RXQFEN_ADR(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_rxq{f}_en */ +#define HW_ATL_RPF_ET_RXQFEN_MSK 0x20000000 +/* inverted bitmask for bitfield et_rxq{f}_en */ +#define HW_ATL_RPF_ET_RXQFEN_MSKN 0xdfffffff +/* lower bit position of bitfield et_rxq{f}_en */ +#define HW_ATL_RPF_ET_RXQFEN_SHIFT 29 +/* width of bitfield et_rxq{f}_en */ +#define HW_ATL_RPF_ET_RXQFEN_WIDTH 1 +/* default value of bitfield et_rxq{f}_en */ +#define HW_ATL_RPF_ET_RXQFEN_DEFAULT 0x0 + +/* rx et_up{f}[2:0] bitfield definitions + * preprocessor definitions for the bitfield "et_up{f}[2:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_up0_i[2:0]" + */ + +/* register address for bitfield et_up{f}[2:0] */ +#define HW_ATL_RPF_ET_UPF_ADR(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_up{f}[2:0] */ +#define HW_ATL_RPF_ET_UPF_MSK 0x1c000000 +/* inverted bitmask for bitfield et_up{f}[2:0] */ +#define HW_ATL_RPF_ET_UPF_MSKN 0xe3ffffff +/* lower bit position of bitfield et_up{f}[2:0] */ +#define HW_ATL_RPF_ET_UPF_SHIFT 26 +/* width of bitfield et_up{f}[2:0] */ +#define HW_ATL_RPF_ET_UPF_WIDTH 3 +/* default value of bitfield et_up{f}[2:0] */ +#define HW_ATL_RPF_ET_UPF_DEFAULT 0x0 + +/* rx et_rxq{f}[4:0] bitfield definitions + * preprocessor definitions for the bitfield "et_rxq{f}[4:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_rxq0_i[4:0]" + */ + +/* register address for bitfield et_rxq{f}[4:0] */ +#define HW_ATL_RPF_ET_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_rxq{f}[4:0] */ +#define HW_ATL_RPF_ET_RXQF_MSK 0x01f00000 +/* inverted bitmask for bitfield et_rxq{f}[4:0] */ +#define HW_ATL_RPF_ET_RXQF_MSKN 0xfe0fffff +/* lower bit position of bitfield et_rxq{f}[4:0] */ +#define HW_ATL_RPF_ET_RXQF_SHIFT 20 +/* width of bitfield et_rxq{f}[4:0] */ +#define HW_ATL_RPF_ET_RXQF_WIDTH 5 +/* default value of bitfield et_rxq{f}[4:0] */ +#define HW_ATL_RPF_ET_RXQF_DEFAULT 0x0 + +/* rx et_mng_rxq{f} bitfield definitions + * preprocessor definitions for the bitfield "et_mng_rxq{f}". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_mng_rxq_i[0]" + */ + +/* register address for bitfield et_mng_rxq{f} */ +#define HW_ATL_RPF_ET_MNG_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_mng_rxq{f} */ +#define HW_ATL_RPF_ET_MNG_RXQF_MSK 0x00080000 +/* inverted bitmask for bitfield et_mng_rxq{f} */ +#define HW_ATL_RPF_ET_MNG_RXQF_MSKN 0xfff7ffff +/* lower bit position of bitfield et_mng_rxq{f} */ +#define HW_ATL_RPF_ET_MNG_RXQF_SHIFT 19 +/* width of bitfield et_mng_rxq{f} */ +#define HW_ATL_RPF_ET_MNG_RXQF_WIDTH 1 +/* default value of bitfield et_mng_rxq{f} */ +#define HW_ATL_RPF_ET_MNG_RXQF_DEFAULT 0x0 + +/* rx et_act{f}[2:0] bitfield definitions + * preprocessor definitions for the bitfield "et_act{f}[2:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_act0_i[2:0]" + */ + +/* register address for bitfield et_act{f}[2:0] */ +#define HW_ATL_RPF_ET_ACTF_ADR(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_act{f}[2:0] */ +#define HW_ATL_RPF_ET_ACTF_MSK 0x00070000 +/* inverted bitmask for bitfield et_act{f}[2:0] */ +#define HW_ATL_RPF_ET_ACTF_MSKN 0xfff8ffff +/* lower bit position of bitfield et_act{f}[2:0] */ +#define HW_ATL_RPF_ET_ACTF_SHIFT 16 +/* width of bitfield et_act{f}[2:0] */ +#define HW_ATL_RPF_ET_ACTF_WIDTH 3 +/* default value of bitfield et_act{f}[2:0] */ +#define HW_ATL_RPF_ET_ACTF_DEFAULT 0x0 + +/* rx et_val{f}[f:0] bitfield definitions + * preprocessor definitions for the bitfield "et_val{f}[f:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_val0_i[15:0]" + */ + +/* register address for bitfield et_val{f}[f:0] */ +#define HW_ATL_RPF_ET_VALF_ADR(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_val{f}[f:0] */ +#define HW_ATL_RPF_ET_VALF_MSK 0x0000ffff +/* inverted bitmask for bitfield et_val{f}[f:0] */ +#define HW_ATL_RPF_ET_VALF_MSKN 0xffff0000 +/* lower bit position of bitfield et_val{f}[f:0] */ +#define HW_ATL_RPF_ET_VALF_SHIFT 0 +/* width of bitfield et_val{f}[f:0] */ +#define HW_ATL_RPF_ET_VALF_WIDTH 16 +/* default value of bitfield et_val{f}[f:0] */ +#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0 + +/* RX l3_l4_en{F} Bitfield Definitions + * Preprocessor definitions for the bitfield "l3_l4_en{F}". + * Parameter: filter {F} | stride size 0x4 | range [0, 7] + * PORT="pif_rpf_l3_l4_en_i[0]" + */ + +/* Register address for bitfield l3_l4_en{F} */ +#define HW_ATL_RPF_L3_L4_ENF_ADR(filter) (0x00005380u + (filter) * 0x4) +/* Bitmask for bitfield l3_l4_en{F} */ +#define HW_ATL_RPF_L3_L4_ENF_MSK 0x80000000u +/* Inverted bitmask for bitfield l3_l4_en{F} */ +#define HW_ATL_RPF_L3_L4_ENF_MSKN 0x7FFFFFFFu +/* Lower bit position of bitfield l3_l4_en{F} */ +#define HW_ATL_RPF_L3_L4_ENF_SHIFT 31 +/* Width of bitfield l3_l4_en{F} */ +#define HW_ATL_RPF_L3_L4_ENF_WIDTH 1 +/* Default value of bitfield l3_l4_en{F} */ +#define HW_ATL_RPF_L3_L4_ENF_DEFAULT 0x0 + +/* RX l3_v6_en{F} Bitfield Definitions + * Preprocessor definitions for the bitfield "l3_v6_en{F}". + * Parameter: filter {F} | stride size 0x4 | range [0, 7] + * PORT="pif_rpf_l3_v6_en_i[0]" + */ +/* Register address for bitfield l3_v6_en{F} */ +#define HW_ATL_RPF_L3_V6_ENF_ADR(filter) (0x00005380u + (filter) * 0x4) +/* Bitmask for bitfield l3_v6_en{F} */ +#define HW_ATL_RPF_L3_V6_ENF_MSK 0x40000000u +/* Inverted bitmask for bitfield l3_v6_en{F} */ +#define HW_ATL_RPF_L3_V6_ENF_MSKN 0xBFFFFFFFu +/* Lower bit position of bitfield l3_v6_en{F} */ +#define HW_ATL_RPF_L3_V6_ENF_SHIFT 30 +/* Width of bitfield l3_v6_en{F} */ +#define HW_ATL_RPF_L3_V6_ENF_WIDTH 1 +/* Default value of bitfield l3_v6_en{F} */ +#define HW_ATL_RPF_L3_V6_ENF_DEFAULT 0x0 + +/* RX l3_sa{F}_en Bitfield Definitions + * Preprocessor definitions for the bitfield "l3_sa{F}_en". + * Parameter: filter {F} | stride size 0x4 | range [0, 7] + * PORT="pif_rpf_l3_sa_en_i[0]" + */ + +/* Register address for bitfield l3_sa{F}_en */ +#define HW_ATL_RPF_L3_SAF_EN_ADR(filter) (0x00005380u + (filter) * 0x4) +/* Bitmask for bitfield l3_sa{F}_en */ +#define HW_ATL_RPF_L3_SAF_EN_MSK 0x20000000u +/* Inverted bitmask for bitfield l3_sa{F}_en */ +#define HW_ATL_RPF_L3_SAF_EN_MSKN 0xDFFFFFFFu +/* Lower bit position of bitfield l3_sa{F}_en */ +#define HW_ATL_RPF_L3_SAF_EN_SHIFT 29 +/* Width of bitfield l3_sa{F}_en */ +#define HW_ATL_RPF_L3_SAF_EN_WIDTH 1 +/* Default value of bitfield l3_sa{F}_en */ +#define HW_ATL_RPF_L3_SAF_EN_DEFAULT 0x0 + +/* RX l3_da{F}_en Bitfield Definitions + * Preprocessor definitions for the bitfield "l3_da{F}_en". + * Parameter: filter {F} | stride size 0x4 | range [0, 7] + * PORT="pif_rpf_l3_da_en_i[0]" + */ + +/* Register address for bitfield l3_da{F}_en */ +#define HW_ATL_RPF_L3_DAF_EN_ADR(filter) (0x00005380u + (filter) * 0x4) +/* Bitmask for bitfield l3_da{F}_en */ +#define HW_ATL_RPF_L3_DAF_EN_MSK 0x10000000u +/* Inverted bitmask for bitfield l3_da{F}_en */ +#define HW_ATL_RPF_L3_DAF_EN_MSKN 0xEFFFFFFFu +/* Lower bit position of bitfield l3_da{F}_en */ +#define HW_ATL_RPF_L3_DAF_EN_SHIFT 28 +/* Width of bitfield l3_da{F}_en */ +#define HW_ATL_RPF_L3_DAF_EN_WIDTH 1 +/* Default value of bitfield l3_da{F}_en */ +#define HW_ATL_RPF_L3_DAF_EN_DEFAULT 0x0 + +/* RX l4_sp{F}_en Bitfield Definitions + * Preprocessor definitions for the bitfield "l4_sp{F}_en". + * Parameter: filter {F} | stride size 0x4 | range [0, 7] + * PORT="pif_rpf_l4_sp_en_i[0]" + */ + +/* Register address for bitfield l4_sp{F}_en */ +#define HW_ATL_RPF_L4_SPF_EN_ADR(filter) (0x00005380u + (filter) * 0x4) +/* Bitmask for bitfield l4_sp{F}_en */ +#define HW_ATL_RPF_L4_SPF_EN_MSK 0x08000000u +/* Inverted bitmask for bitfield l4_sp{F}_en */ +#define HW_ATL_RPF_L4_SPF_EN_MSKN 0xF7FFFFFFu +/* Lower bit position of bitfield l4_sp{F}_en */ +#define HW_ATL_RPF_L4_SPF_EN_SHIFT 27 +/* Width of bitfield l4_sp{F}_en */ +#define HW_ATL_RPF_L4_SPF_EN_WIDTH 1 +/* Default value of bitfield l4_sp{F}_en */ +#define HW_ATL_RPF_L4_SPF_EN_DEFAULT 0x0 + +/* RX l4_dp{F}_en Bitfield Definitions + * Preprocessor definitions for the bitfield "l4_dp{F}_en". + * Parameter: filter {F} | stride size 0x4 | range [0, 7] + * PORT="pif_rpf_l4_dp_en_i[0]" + */ + +/* Register address for bitfield l4_dp{F}_en */ +#define HW_ATL_RPF_L4_DPF_EN_ADR(filter) (0x00005380u + (filter) * 0x4) +/* Bitmask for bitfield l4_dp{F}_en */ +#define HW_ATL_RPF_L4_DPF_EN_MSK 0x04000000u +/* Inverted bitmask for bitfield l4_dp{F}_en */ +#define HW_ATL_RPF_L4_DPF_EN_MSKN 0xFBFFFFFFu +/* Lower bit position of bitfield l4_dp{F}_en */ +#define HW_ATL_RPF_L4_DPF_EN_SHIFT 26 +/* Width of bitfield l4_dp{F}_en */ +#define HW_ATL_RPF_L4_DPF_EN_WIDTH 1 +/* Default value of bitfield l4_dp{F}_en */ +#define HW_ATL_RPF_L4_DPF_EN_DEFAULT 0x0 + +/* RX l4_prot{F}_en Bitfield Definitions + * Preprocessor definitions for the bitfield "l4_prot{F}_en". + * Parameter: filter {F} | stride size 0x4 | range [0, 7] + * PORT="pif_rpf_l4_prot_en_i[0]" + */ + +/* Register address for bitfield l4_prot{F}_en */ +#define HW_ATL_RPF_L4_PROTF_EN_ADR(filter) (0x00005380u + (filter) * 0x4) +/* Bitmask for bitfield l4_prot{F}_en */ +#define HW_ATL_RPF_L4_PROTF_EN_MSK 0x02000000u +/* Inverted bitmask for bitfield l4_prot{F}_en */ +#define HW_ATL_RPF_L4_PROTF_EN_MSKN 0xFDFFFFFFu +/* Lower bit position of bitfield l4_prot{F}_en */ +#define HW_ATL_RPF_L4_PROTF_EN_SHIFT 25 +/* Width of bitfield l4_prot{F}_en */ +#define HW_ATL_RPF_L4_PROTF_EN_WIDTH 1 +/* Default value of bitfield l4_prot{F}_en */ +#define HW_ATL_RPF_L4_PROTF_EN_DEFAULT 0x0 + +/* RX l3_arp{F}_en Bitfield Definitions + * Preprocessor definitions for the bitfield "l3_arp{F}_en". + * Parameter: filter {F} | stride size 0x4 | range [0, 7] + * PORT="pif_rpf_l3_arp_en_i[0]" + */ + +/* Register address for bitfield l3_arp{F}_en */ +#define HW_ATL_RPF_L3_ARPF_EN_ADR(filter) (0x00005380u + (filter) * 0x4) +/* Bitmask for bitfield l3_arp{F}_en */ +#define HW_ATL_RPF_L3_ARPF_EN_MSK 0x01000000u +/* Inverted bitmask for bitfield l3_arp{F}_en */ +#define HW_ATL_RPF_L3_ARPF_EN_MSKN 0xFEFFFFFFu +/* Lower bit position of bitfield l3_arp{F}_en */ +#define HW_ATL_RPF_L3_ARPF_EN_SHIFT 24 +/* Width of bitfield l3_arp{F}_en */ +#define HW_ATL_RPF_L3_ARPF_EN_WIDTH 1 +/* Default value of bitfield l3_arp{F}_en */ +#define HW_ATL_RPF_L3_ARPF_EN_DEFAULT 0x0 + +/* RX l3_l4_rxq{F}_en Bitfield Definitions + * Preprocessor definitions for the bitfield "l3_l4_rxq{F}_en". + * Parameter: filter {F} | stride size 0x4 | range [0, 7] + * PORT="pif_rpf_l3_l4_rxq_en_i[0]" + */ + +/* Register address for bitfield l3_l4_RXq{F}_en */ +#define HW_ATL_RPF_L3_L4_RXQF_EN_ADR(filter) (0x00005380u + (filter) * 0x4) +/* Bitmask for bitfield l3_l4_RXq{F}_en */ +#define HW_ATL_RPF_L3_L4_RXQF_EN_MSK 0x00800000u +/* Inverted bitmask for bitfield l3_l4_RXq{F}_en */ +#define HW_ATL_RPF_L3_L4_RXQF_EN_MSKN 0xFF7FFFFFu +/* Lower bit position of bitfield l3_l4_RXq{F}_en */ +#define HW_ATL_RPF_L3_L4_RXQF_EN_SHIFT 23 +/* Width of bitfield l3_l4_RXq{F}_en */ +#define HW_ATL_RPF_L3_L4_RXQF_EN_WIDTH 1 +/* Default value of bitfield l3_l4_RXq{F}_en */ +#define HW_ATL_RPF_L3_L4_RXQF_EN_DEFAULT 0x0 + +/* RX l3_l4_mng_RXq{F} Bitfield Definitions + * Preprocessor definitions for the bitfield "l3_l4_mng_RXq{F}". + * Parameter: filter {F} | stride size 0x4 | range [0, 7] + * PORT="pif_rpf_l3_l4_mng_rxq_i[0]" + */ + +/* Register address for bitfield l3_l4_mng_rxq{F} */ +#define HW_ATL_RPF_L3_L4_MNG_RXQF_ADR(filter) (0x00005380u + (filter) * 0x4) +/* Bitmask for bitfield l3_l4_mng_rxq{F} */ +#define HW_ATL_RPF_L3_L4_MNG_RXQF_MSK 0x00400000u +/* Inverted bitmask for bitfield l3_l4_mng_rxq{F} */ +#define HW_ATL_RPF_L3_L4_MNG_RXQF_MSKN 0xFFBFFFFFu +/* Lower bit position of bitfield l3_l4_mng_rxq{F} */ +#define HW_ATL_RPF_L3_L4_MNG_RXQF_SHIFT 22 +/* Width of bitfield l3_l4_mng_rxq{F} */ +#define HW_ATL_RPF_L3_L4_MNG_RXQF_WIDTH 1 +/* Default value of bitfield l3_l4_mng_rxq{F} */ +#define HW_ATL_RPF_L3_L4_MNG_RXQF_DEFAULT 0x0 + +/* RX l3_l4_act{F}[2:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "l3_l4_act{F}[2:0]". + * Parameter: filter {F} | stride size 0x4 | range [0, 7] + * PORT="pif_rpf_l3_l4_act0_i[2:0]" + */ + +/* Register address for bitfield l3_l4_act{F}[2:0] */ +#define HW_ATL_RPF_L3_L4_ACTF_ADR(filter) (0x00005380u + (filter) * 0x4) +/* Bitmask for bitfield l3_l4_act{F}[2:0] */ +#define HW_ATL_RPF_L3_L4_ACTF_MSK 0x00070000u +/* Inverted bitmask for bitfield l3_l4_act{F}[2:0] */ +#define HW_ATL_RPF_L3_L4_ACTF_MSKN 0xFFF8FFFFu +/* Lower bit position of bitfield l3_l4_act{F}[2:0] */ +#define HW_ATL_RPF_L3_L4_ACTF_SHIFT 16 +/* Width of bitfield l3_l4_act{F}[2:0] */ +#define HW_ATL_RPF_L3_L4_ACTF_WIDTH 3 +/* Default value of bitfield l3_l4_act{F}[2:0] */ +#define HW_ATL_RPF_L3_L4_ACTF_DEFAULT 0x0 + +/* RX l3_l4_rxq{F}[4:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "l3_l4_rxq{F}[4:0]". + * Parameter: filter {F} | stride size 0x4 | range [0, 7] + * PORT="pif_rpf_l3_l4_rxq0_i[4:0]" + */ + +/* Register address for bitfield l3_l4_rxq{F}[4:0] */ +#define HW_ATL_RPF_L3_L4_RXQF_ADR(filter) (0x00005380u + (filter) * 0x4) +/* Bitmask for bitfield l3_l4_rxq{F}[4:0] */ +#define HW_ATL_RPF_L3_L4_RXQF_MSK 0x00001F00u +/* Inverted bitmask for bitfield l3_l4_rxq{F}[4:0] */ +#define HW_ATL_RPF_L3_L4_RXQF_MSKN 0xFFFFE0FFu +/* Lower bit position of bitfield l3_l4_rxq{F}[4:0] */ +#define HW_ATL_RPF_L3_L4_RXQF_SHIFT 8 +/* Width of bitfield l3_l4_rxq{F}[4:0] */ +#define HW_ATL_RPF_L3_L4_RXQF_WIDTH 5 +/* Default value of bitfield l3_l4_rxq{F}[4:0] */ +#define HW_ATL_RPF_L3_L4_RXQF_DEFAULT 0x0 + +/* RX l4_prot{F}[2:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "l4_prot{F}[2:0]". + * Parameter: filter {F} | stride size 0x4 | range [0, 7] + * PORT="pif_rpf_l4_prot0_i[2:0]" + */ + +/* Register address for bitfield l4_prot{F}[2:0] */ +#define HW_ATL_RPF_L4_PROTF_ADR(filter) (0x00005380u + (filter) * 0x4) +/* Bitmask for bitfield l4_prot{F}[2:0] */ +#define HW_ATL_RPF_L4_PROTF_MSK 0x00000007u +/* Inverted bitmask for bitfield l4_prot{F}[2:0] */ +#define HW_ATL_RPF_L4_PROTF_MSKN 0xFFFFFFF8u +/* Lower bit position of bitfield l4_prot{F}[2:0] */ +#define HW_ATL_RPF_L4_PROTF_SHIFT 0 +/* Width of bitfield l4_prot{F}[2:0] */ +#define HW_ATL_RPF_L4_PROTF_WIDTH 3 +/* Default value of bitfield l4_prot{F}[2:0] */ +#define HW_ATL_RPF_L4_PROTF_DEFAULT 0x0 + +/* RX l4_sp{D}[F:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "l4_sp{D}[F:0]". + * Parameter: srcport {D} | stride size 0x4 | range [0, 7] + * PORT="pif_rpf_l4_sp0_i[15:0]" + */ + +/* Register address for bitfield l4_sp{D}[F:0] */ +#define HW_ATL_RPF_L4_SPD_ADR(srcport) (0x00005400u + (srcport) * 0x4) +/* Bitmask for bitfield l4_sp{D}[F:0] */ +#define HW_ATL_RPF_L4_SPD_MSK 0x0000FFFFu +/* Inverted bitmask for bitfield l4_sp{D}[F:0] */ +#define HW_ATL_RPF_L4_SPD_MSKN 0xFFFF0000u +/* Lower bit position of bitfield l4_sp{D}[F:0] */ +#define HW_ATL_RPF_L4_SPD_SHIFT 0 +/* Width of bitfield l4_sp{D}[F:0] */ +#define HW_ATL_RPF_L4_SPD_WIDTH 16 +/* Default value of bitfield l4_sp{D}[F:0] */ +#define HW_ATL_RPF_L4_SPD_DEFAULT 0x0 + +/* RX l4_dp{D}[F:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "l4_dp{D}[F:0]". + * Parameter: destport {D} | stride size 0x4 | range [0, 7] + * PORT="pif_rpf_l4_dp0_i[15:0]" + */ + +/* Register address for bitfield l4_dp{D}[F:0] */ +#define HW_ATL_RPF_L4_DPD_ADR(destport) (0x00005420u + (destport) * 0x4) +/* Bitmask for bitfield l4_dp{D}[F:0] */ +#define HW_ATL_RPF_L4_DPD_MSK 0x0000FFFFu +/* Inverted bitmask for bitfield l4_dp{D}[F:0] */ +#define HW_ATL_RPF_L4_DPD_MSKN 0xFFFF0000u +/* Lower bit position of bitfield l4_dp{D}[F:0] */ +#define HW_ATL_RPF_L4_DPD_SHIFT 0 +/* Width of bitfield l4_dp{D}[F:0] */ +#define HW_ATL_RPF_L4_DPD_WIDTH 16 +/* Default value of bitfield l4_dp{D}[F:0] */ +#define HW_ATL_RPF_L4_DPD_DEFAULT 0x0 + +/* rx ipv4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "ipv4_chk_en". + * port="pif_rpo_ipv4_chk_en_i" + */ + +/* register address for bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_adr 0x00005580 +/* bitmask for bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_msk 0x00000002 +/* inverted bitmask for bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_mskn 0xfffffffd +/* lower bit position of bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_shift 1 +/* width of bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_width 1 +/* default value of bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_default 0x0 + +/* rx desc{d}_vl_strip bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_vl_strip". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rpo_desc_vl_strip_i[0]" + */ + +/* register address for bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_msk 0x20000000 +/* inverted bitmask for bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_mskn 0xdfffffff +/* lower bit position of bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_shift 29 +/* width of bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_width 1 +/* default value of bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_default 0x0 + +/* rx l4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "l4_chk_en". + * port="pif_rpo_l4_chk_en_i" + */ + +/* register address for bitfield l4_chk_en */ +#define rpol4chk_en_adr 0x00005580 +/* bitmask for bitfield l4_chk_en */ +#define rpol4chk_en_msk 0x00000001 +/* inverted bitmask for bitfield l4_chk_en */ +#define rpol4chk_en_mskn 0xfffffffe +/* lower bit position of bitfield l4_chk_en */ +#define rpol4chk_en_shift 0 +/* width of bitfield l4_chk_en */ +#define rpol4chk_en_width 1 +/* default value of bitfield l4_chk_en */ +#define rpol4chk_en_default 0x0 + +/* rx reg_res_dsbl bitfield definitions + * preprocessor definitions for the bitfield "reg_res_dsbl". + * port="pif_rx_reg_res_dsbl_i" + */ + +/* register address for bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_adr 0x00005000 +/* bitmask for bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_msk 0x20000000 +/* inverted bitmask for bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_mskn 0xdfffffff +/* lower bit position of bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_shift 29 +/* width of bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_width 1 +/* default value of bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_default 0x1 + +/* tx dca{d}_cpuid[7:0] bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_cpuid[7:0]". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_tdm_dca0_cpuid_i[7:0]" + */ + +/* register address for bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_adr(dca) (0x00008400 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_msk 0x000000ff +/* inverted bitmask for bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_mskn 0xffffff00 +/* lower bit position of bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_shift 0 +/* width of bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_width 8 +/* default value of bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_default 0x0 + +/* tx lso_en[1f:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_en[1f:0]". + * port="pif_tdm_lso_en_i[31:0]" + */ + +/* register address for bitfield lso_en[1f:0] */ +#define tdm_lso_en_adr 0x00007810 +/* bitmask for bitfield lso_en[1f:0] */ +#define tdm_lso_en_msk 0xffffffff +/* inverted bitmask for bitfield lso_en[1f:0] */ +#define tdm_lso_en_mskn 0x00000000 +/* lower bit position of bitfield lso_en[1f:0] */ +#define tdm_lso_en_shift 0 +/* width of bitfield lso_en[1f:0] */ +#define tdm_lso_en_width 32 +/* default value of bitfield lso_en[1f:0] */ +#define tdm_lso_en_default 0x0 + +/* tx dca_en bitfield definitions + * preprocessor definitions for the bitfield "dca_en". + * port="pif_tdm_dca_en_i" + */ + +/* register address for bitfield dca_en */ +#define tdm_dca_en_adr 0x00008480 +/* bitmask for bitfield dca_en */ +#define tdm_dca_en_msk 0x80000000 +/* inverted bitmask for bitfield dca_en */ +#define tdm_dca_en_mskn 0x7fffffff +/* lower bit position of bitfield dca_en */ +#define tdm_dca_en_shift 31 +/* width of bitfield dca_en */ +#define tdm_dca_en_width 1 +/* default value of bitfield dca_en */ +#define tdm_dca_en_default 0x1 + +/* tx dca_mode[3:0] bitfield definitions + * preprocessor definitions for the bitfield "dca_mode[3:0]". + * port="pif_tdm_dca_mode_i[3:0]" + */ + +/* register address for bitfield dca_mode[3:0] */ +#define tdm_dca_mode_adr 0x00008480 +/* bitmask for bitfield dca_mode[3:0] */ +#define tdm_dca_mode_msk 0x0000000f +/* inverted bitmask for bitfield dca_mode[3:0] */ +#define tdm_dca_mode_mskn 0xfffffff0 +/* lower bit position of bitfield dca_mode[3:0] */ +#define tdm_dca_mode_shift 0 +/* width of bitfield dca_mode[3:0] */ +#define tdm_dca_mode_width 4 +/* default value of bitfield dca_mode[3:0] */ +#define tdm_dca_mode_default 0x0 + +/* tx dca{d}_desc_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_desc_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_tdm_dca_desc_en_i[0]" + */ + +/* register address for bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_adr(dca) (0x00008400 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_msk 0x80000000 +/* inverted bitmask for bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_mskn 0x7fffffff +/* lower bit position of bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_shift 31 +/* width of bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_width 1 +/* default value of bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_default 0x0 + +/* tx desc{d}_en bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_en". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="pif_tdm_desc_en_i[0]" + */ + +/* register address for bitfield desc{d}_en */ +#define tdm_descden_adr(descriptor) (0x00007c08 + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_en */ +#define tdm_descden_msk 0x80000000 +/* inverted bitmask for bitfield desc{d}_en */ +#define tdm_descden_mskn 0x7fffffff +/* lower bit position of bitfield desc{d}_en */ +#define tdm_descden_shift 31 +/* width of bitfield desc{d}_en */ +#define tdm_descden_width 1 +/* default value of bitfield desc{d}_en */ +#define tdm_descden_default 0x0 + +/* tx desc{d}_hd[c:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hd[c:0]". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="tdm_pif_desc0_hd_o[12:0]" + */ + +/* register address for bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_adr(descriptor) (0x00007c0c + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_msk 0x00001fff +/* inverted bitmask for bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_mskn 0xffffe000 +/* lower bit position of bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_shift 0 +/* width of bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_width 13 + +/* tx desc{d}_len[9:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_len[9:0]". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="pif_tdm_desc0_len_i[9:0]" + */ + +/* register address for bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_adr(descriptor) (0x00007c08 + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_msk 0x00001ff8 +/* inverted bitmask for bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_mskn 0xffffe007 +/* lower bit position of bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_shift 3 +/* width of bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_width 10 +/* default value of bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_default 0x0 + +/* tx int_desc_wrb_en bitfield definitions + * preprocessor definitions for the bitfield "int_desc_wrb_en". + * port="pif_tdm_int_desc_wrb_en_i" + */ + +/* register address for bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_adr 0x00007b40 +/* bitmask for bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_msk 0x00000002 +/* inverted bitmask for bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_mskn 0xfffffffd +/* lower bit position of bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_shift 1 +/* width of bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_width 1 +/* default value of bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_default 0x0 + +/* tx desc{d}_wrb_thresh[6:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_wrb_thresh[6:0]". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="pif_tdm_desc0_wrb_thresh_i[6:0]" + */ + +/* register address for bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_adr(descriptor) (0x00007c18 + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_msk 0x00007f00 +/* inverted bitmask for bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_mskn 0xffff80ff +/* lower bit position of bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_shift 8 +/* width of bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_width 7 +/* default value of bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_default 0x0 + +/* tx lso_tcp_flag_first[b:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_tcp_flag_first[b:0]". + * port="pif_thm_lso_tcp_flag_first_i[11:0]" + */ + +/* register address for bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_adr 0x00007820 +/* bitmask for bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_msk 0x00000fff +/* inverted bitmask for bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_mskn 0xfffff000 +/* lower bit position of bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_shift 0 +/* width of bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_width 12 +/* default value of bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_default 0x0 + +/* tx lso_tcp_flag_last[b:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_tcp_flag_last[b:0]". + * port="pif_thm_lso_tcp_flag_last_i[11:0]" + */ + +/* register address for bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_adr 0x00007824 +/* bitmask for bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_msk 0x00000fff +/* inverted bitmask for bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_mskn 0xfffff000 +/* lower bit position of bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_shift 0 +/* width of bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_width 12 +/* default value of bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_default 0x0 + +/* tx lso_tcp_flag_mid[b:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_tcp_flag_mid[b:0]". + * port="pif_thm_lso_tcp_flag_mid_i[11:0]" + */ + +/* Register address for bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_adr 0x00005598 +/* Bitmask for bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_msk 0xFFFFFFFF +/* Inverted bitmask for bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_mskn 0x00000000 +/* Lower bit position of bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_shift 0 +/* Width of bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_width 32 +/* Default value of bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_default 0x0 + +/* RX lro_en[1F:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_en[1F:0]". + * PORT="pif_rpo_lro_en_i[31:0]" + */ + +/* Register address for bitfield lro_en[1F:0] */ +#define rpo_lro_en_adr 0x00005590 +/* Bitmask for bitfield lro_en[1F:0] */ +#define rpo_lro_en_msk 0xFFFFFFFF +/* Inverted bitmask for bitfield lro_en[1F:0] */ +#define rpo_lro_en_mskn 0x00000000 +/* Lower bit position of bitfield lro_en[1F:0] */ +#define rpo_lro_en_shift 0 +/* Width of bitfield lro_en[1F:0] */ +#define rpo_lro_en_width 32 +/* Default value of bitfield lro_en[1F:0] */ +#define rpo_lro_en_default 0x0 + +/* RX lro_ptopt_en Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_ptopt_en". + * PORT="pif_rpo_lro_ptopt_en_i" + */ + +/* Register address for bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_adr 0x00005594 +/* Bitmask for bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_msk 0x00008000 +/* Inverted bitmask for bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_mskn 0xFFFF7FFF +/* Lower bit position of bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_shift 15 +/* Width of bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_width 1 +/* Default value of bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_defalt 0x1 + +/* RX lro_q_ses_lmt Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_q_ses_lmt". + * PORT="pif_rpo_lro_q_ses_lmt_i[1:0]" + */ + +/* Register address for bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_adr 0x00005594 +/* Bitmask for bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_msk 0x00003000 +/* Inverted bitmask for bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_mskn 0xFFFFCFFF +/* Lower bit position of bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_shift 12 +/* Width of bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_width 2 +/* Default value of bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_default 0x1 + +/* RX lro_tot_dsc_lmt[1:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_tot_dsc_lmt[1:0]". + * PORT="pif_rpo_lro_tot_dsc_lmt_i[1:0]" + */ + +/* Register address for bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_adr 0x00005594 +/* Bitmask for bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_msk 0x00000060 +/* Inverted bitmask for bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_mskn 0xFFFFFF9F +/* Lower bit position of bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_shift 5 +/* Width of bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_width 2 +/* Default value of bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_defalt 0x1 + +/* RX lro_pkt_min[4:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_pkt_min[4:0]". + * PORT="pif_rpo_lro_pkt_min_i[4:0]" + */ + +/* Register address for bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_adr 0x00005594 +/* Bitmask for bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_msk 0x0000001F +/* Inverted bitmask for bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_mskn 0xFFFFFFE0 +/* Lower bit position of bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_shift 0 +/* Width of bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_width 5 +/* Default value of bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_default 0x8 + +/* Width of bitfield lro{L}_des_max[1:0] */ +#define rpo_lro_ldes_max_width 2 +/* Default value of bitfield lro{L}_des_max[1:0] */ +#define rpo_lro_ldes_max_default 0x0 + +/* RX lro_tb_div[11:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_tb_div[11:0]". + * PORT="pif_rpo_lro_tb_div_i[11:0]" + */ + +/* Register address for bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_adr 0x00005620 +/* Bitmask for bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_msk 0xFFF00000 +/* Inverted bitmask for bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_mskn 0x000FFFFF +/* Lower bit position of bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_shift 20 +/* Width of bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_width 12 +/* Default value of bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_default 0xC35 + +/* RX lro_ina_ival[9:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_ina_ival[9:0]". + * PORT="pif_rpo_lro_ina_ival_i[9:0]" + */ + +/* Register address for bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_adr 0x00005620 +/* Bitmask for bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_msk 0x000FFC00 +/* Inverted bitmask for bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_mskn 0xFFF003FF +/* Lower bit position of bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_shift 10 +/* Width of bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_width 10 +/* Default value of bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_default 0xA + +/* RX lro_max_ival[9:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_max_ival[9:0]". + * PORT="pif_rpo_lro_max_ival_i[9:0]" + */ + +/* Register address for bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_adr 0x00005620 +/* Bitmask for bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_msk 0x000003FF +/* Inverted bitmask for bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_mskn 0xFFFFFC00 +/* Lower bit position of bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_shift 0 +/* Width of bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_width 10 +/* Default value of bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_default 0x19 + +/* TX dca{D}_cpuid[7:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "dca{D}_cpuid[7:0]". + * Parameter: DCA {D} | stride size 0x4 | range [0, 31] + * PORT="pif_tdm_dca0_cpuid_i[7:0]" + */ + +/* Register address for bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_adr(dca) (0x00008400 + (dca) * 0x4) +/* Bitmask for bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_msk 0x000000FF +/* Inverted bitmask for bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_mskn 0xFFFFFF00 +/* Lower bit position of bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_shift 0 +/* Width of bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_width 8 +/* Default value of bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_default 0x0 + +/* TX dca{D}_desc_en Bitfield Definitions + * Preprocessor definitions for the bitfield "dca{D}_desc_en". + * Parameter: DCA {D} | stride size 0x4 | range [0, 31] + * PORT="pif_tdm_dca_desc_en_i[0]" + */ + +/* Register address for bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_adr(dca) (0x00008400 + (dca) * 0x4) +/* Bitmask for bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_msk 0x80000000 +/* Inverted bitmask for bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_mskn 0x7FFFFFFF +/* Lower bit position of bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_shift 31 +/* Width of bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_width 1 +/* Default value of bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_default 0x0 + +/* TX desc{D}_en Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_en". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="pif_tdm_desc_en_i[0]" + */ + +/* Register address for bitfield desc{D}_en */ +#define tdm_desc_den_adr(descriptor) (0x00007C08 + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_en */ +#define tdm_desc_den_msk 0x80000000 +/* Inverted bitmask for bitfield desc{D}_en */ +#define tdm_desc_den_mskn 0x7FFFFFFF +/* Lower bit position of bitfield desc{D}_en */ +#define tdm_desc_den_shift 31 +/* Width of bitfield desc{D}_en */ +#define tdm_desc_den_width 1 +/* Default value of bitfield desc{D}_en */ +#define tdm_desc_den_default 0x0 + +/* TX desc{D}_hd[C:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_hd[C:0]". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="tdm_pif_desc0_hd_o[12:0]" + */ + +/* Register address for bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_adr(descriptor) (0x00007C0C + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_msk 0x00001FFF +/* Inverted bitmask for bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_mskn 0xFFFFE000 +/* Lower bit position of bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_shift 0 +/* Width of bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_width 13 + +/* TX desc{D}_len[9:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_len[9:0]". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="pif_tdm_desc0_len_i[9:0]" + */ + +/* Register address for bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_adr(descriptor) (0x00007C08 + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_msk 0x00001FF8 +/* Inverted bitmask for bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_mskn 0xFFFFE007 +/* Lower bit position of bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_shift 3 +/* Width of bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_width 10 +/* Default value of bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_default 0x0 + +/* TX desc{D}_wrb_thresh[6:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_wrb_thresh[6:0]". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="pif_tdm_desc0_wrb_thresh_i[6:0]" + */ + +/* Register address for bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_adr(descriptor) \ + (0x00007C18 + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_msk 0x00007F00 +/* Inverted bitmask for bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_mskn 0xFFFF80FF +/* Lower bit position of bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_shift 8 +/* Width of bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_width 7 +/* Default value of bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_default 0x0 + +/* TX tdm_int_mod_en Bitfield Definitions + * Preprocessor definitions for the bitfield "tdm_int_mod_en". + * PORT="pif_tdm_int_mod_en_i" + */ + +/* Register address for bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_adr 0x00007B40 +/* Bitmask for bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_msk 0x00000010 +/* Inverted bitmask for bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_mskn 0xFFFFFFEF +/* Lower bit position of bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_shift 4 +/* Width of bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_width 1 +/* Default value of bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_default 0x0 + +/* TX lso_tcp_flag_mid[B:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lso_tcp_flag_mid[B:0]". + * PORT="pif_thm_lso_tcp_flag_mid_i[11:0]" + */ +/* register address for bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_adr 0x00007820 +/* bitmask for bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_msk 0x0fff0000 +/* inverted bitmask for bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_mskn 0xf000ffff +/* lower bit position of bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_shift 16 +/* width of bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_width 12 +/* default value of bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_default 0x0 + +/* tx tx_buf_en bitfield definitions + * preprocessor definitions for the bitfield "tx_buf_en". + * port="pif_tpb_tx_buf_en_i" + */ + +/* register address for bitfield tx_buf_en */ +#define tpb_tx_buf_en_adr 0x00007900 +/* bitmask for bitfield tx_buf_en */ +#define tpb_tx_buf_en_msk 0x00000001 +/* inverted bitmask for bitfield tx_buf_en */ +#define tpb_tx_buf_en_mskn 0xfffffffe +/* lower bit position of bitfield tx_buf_en */ +#define tpb_tx_buf_en_shift 0 +/* width of bitfield tx_buf_en */ +#define tpb_tx_buf_en_width 1 +/* default value of bitfield tx_buf_en */ +#define tpb_tx_buf_en_default 0x0 + +/* tx tx_tc_mode bitfield definitions + * preprocessor definitions for the bitfield "tx_tc_mode". + * port="pif_tpb_tx_tc_mode_i" + */ + +/* register address for bitfield tx_tc_mode */ +#define tpb_tx_tc_mode_adr 0x00007900 +/* bitmask for bitfield tx_tc_mode */ +#define tpb_tx_tc_mode_msk 0x00000100 +/* inverted bitmask for bitfield tx_tc_mode */ +#define tpb_tx_tc_mode_mskn 0xfffffeff +/* lower bit position of bitfield tx_tc_mode */ +#define tpb_tx_tc_mode_shift 8 +/* width of bitfield tx_tc_mode */ +#define tpb_tx_tc_mode_width 1 +/* default value of bitfield tx_tc_mode */ +#define tpb_tx_tc_mode_default 0x0 + + +/* tx tx{b}_hi_thresh[c:0] bitfield definitions + * preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_tpb_tx0_hi_thresh_i[12:0]" + */ + +/* register address for bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10) +/* bitmask for bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_msk 0x1fff0000 +/* inverted bitmask for bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_mskn 0xe000ffff +/* lower bit position of bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_shift 16 +/* width of bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_width 13 +/* default value of bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_default 0x0 + +/* tx tx{b}_lo_thresh[c:0] bitfield definitions + * preprocessor definitions for the bitfield "tx{b}_lo_thresh[c:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_tpb_tx0_lo_thresh_i[12:0]" + */ + +/* register address for bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10) +/* bitmask for bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_msk 0x00001fff +/* inverted bitmask for bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_mskn 0xffffe000 +/* lower bit position of bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_shift 0 +/* width of bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_width 13 +/* default value of bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_default 0x0 + +/* tx dma_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "dma_sys_loopback". + * port="pif_tpb_dma_sys_lbk_i" + */ + +/* register address for bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_adr 0x00007000 +/* bitmask for bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_msk 0x00000040 +/* inverted bitmask for bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_mskn 0xffffffbf +/* lower bit position of bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_shift 6 +/* width of bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_width 1 +/* default value of bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_default 0x0 + +/* tx tx{b}_buf_size[7:0] bitfield definitions + * preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_tpb_tx0_buf_size_i[7:0]" + */ + +/* register address for bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_adr(buffer) (0x00007910 + (buffer) * 0x10) +/* bitmask for bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_msk 0x000000ff +/* inverted bitmask for bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_mskn 0xffffff00 +/* lower bit position of bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_shift 0 +/* width of bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_width 8 +/* default value of bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_default 0x0 + +/* tx tx_scp_ins_en bitfield definitions + * preprocessor definitions for the bitfield "tx_scp_ins_en". + * port="pif_tpb_scp_ins_en_i" + */ + +/* register address for bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_adr 0x00007900 +/* bitmask for bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_msk 0x00000004 +/* inverted bitmask for bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_mskn 0xfffffffb +/* lower bit position of bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_shift 2 +/* width of bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_width 1 +/* default value of bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_default 0x0 + +/* tx ipv4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "ipv4_chk_en". + * port="pif_tpo_ipv4_chk_en_i" + */ + +/* register address for bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_adr 0x00007800 +/* bitmask for bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_msk 0x00000002 +/* inverted bitmask for bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_mskn 0xfffffffd +/* lower bit position of bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_shift 1 +/* width of bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_width 1 +/* default value of bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_default 0x0 + +/* tx l4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "l4_chk_en". + * port="pif_tpo_l4_chk_en_i" + */ + +/* register address for bitfield l4_chk_en */ +#define tpol4chk_en_adr 0x00007800 +/* bitmask for bitfield l4_chk_en */ +#define tpol4chk_en_msk 0x00000001 +/* inverted bitmask for bitfield l4_chk_en */ +#define tpol4chk_en_mskn 0xfffffffe +/* lower bit position of bitfield l4_chk_en */ +#define tpol4chk_en_shift 0 +/* width of bitfield l4_chk_en */ +#define tpol4chk_en_width 1 +/* default value of bitfield l4_chk_en */ +#define tpol4chk_en_default 0x0 + +/* tx pkt_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "pkt_sys_loopback". + * port="pif_tpo_pkt_sys_lbk_i" + */ + +/* register address for bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_adr 0x00007000 +/* bitmask for bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_msk 0x00000080 +/* inverted bitmask for bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_mskn 0xffffff7f +/* lower bit position of bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_shift 7 +/* width of bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_width 1 +/* default value of bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_default 0x0 + +/* tx data_tc_arb_mode bitfield definitions + * preprocessor definitions for the bitfield "data_tc_arb_mode". + * port="pif_tps_data_tc_arb_mode_i" + */ + +/* register address for bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_adr 0x00007100 +/* bitmask for bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_msk 0x00000001 +/* inverted bitmask for bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_mskn 0xfffffffe +/* lower bit position of bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_shift 0 +/* width of bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_width 1 +/* default value of bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_default 0x0 + +/* tx desc_rate_ta_rst bitfield definitions + * preprocessor definitions for the bitfield "desc_rate_ta_rst". + * port="pif_tps_desc_rate_ta_rst_i" + */ + +/* register address for bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_adr 0x00007310 +/* bitmask for bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_msk 0x80000000 +/* inverted bitmask for bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_mskn 0x7fffffff +/* lower bit position of bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_shift 31 +/* width of bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_width 1 +/* default value of bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_default 0x0 + +/* tx desc_rate_limit[a:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_rate_limit[a:0]". + * port="pif_tps_desc_rate_lim_i[10:0]" + */ + +/* register address for bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_adr 0x00007310 +/* bitmask for bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_msk 0x000007ff +/* inverted bitmask for bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_mskn 0xfffff800 +/* lower bit position of bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_shift 0 +/* width of bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_width 11 +/* default value of bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_default 0x0 + +/* tx desc_tc_arb_mode[1:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_tc_arb_mode[1:0]". + * port="pif_tps_desc_tc_arb_mode_i[1:0]" + */ + +/* register address for bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_adr 0x00007200 +/* bitmask for bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_msk 0x00000003 +/* inverted bitmask for bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_mskn 0xfffffffc +/* lower bit position of bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_shift 0 +/* width of bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_width 2 +/* default value of bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_default 0x0 + +/* tx desc_tc{t}_credit_max[b:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_tc{t}_credit_max[b:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_desc_tc0_credit_max_i[11:0]" + */ + +/* register address for bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_adr(tc) (0x00007210 + (tc) * 0x4) +/* bitmask for bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_msk 0x0fff0000 +/* inverted bitmask for bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_mskn 0xf000ffff +/* lower bit position of bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_shift 16 +/* width of bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_width 12 +/* default value of bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_default 0x0 + +/* tx desc_tc{t}_weight[8:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_tc{t}_weight[8:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_desc_tc0_weight_i[8:0]" + */ + +/* register address for bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_adr(tc) (0x00007210 + (tc) * 0x4) +/* bitmask for bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_msk 0x000001ff +/* inverted bitmask for bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_mskn 0xfffffe00 +/* lower bit position of bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_shift 0 +/* width of bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_width 9 +/* default value of bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_default 0x0 + +/* tx desc_vm_arb_mode bitfield definitions + * preprocessor definitions for the bitfield "desc_vm_arb_mode". + * port="pif_tps_desc_vm_arb_mode_i" + */ + +/* register address for bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_adr 0x00007300 +/* bitmask for bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_msk 0x00000001 +/* inverted bitmask for bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_mskn 0xfffffffe +/* lower bit position of bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_shift 0 +/* width of bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_width 1 +/* default value of bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_default 0x0 + +/* tx data_tc{t}_credit_max[b:0] bitfield definitions + * preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_data_tc0_credit_max_i[11:0]" + */ + +/* register address for bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_adr(tc) (0x00007110 + (tc) * 0x4) +/* bitmask for bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_msk 0x0fff0000 +/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_mskn 0xf000ffff +/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_shift 16 +/* width of bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_width 12 +/* default value of bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_default 0x0 + +/* tx data_tc{t}_weight[8:0] bitfield definitions + * preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_data_tc0_weight_i[8:0]" + */ + +/* register address for bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_adr(tc) (0x00007110 + (tc) * 0x4) +/* bitmask for bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_msk 0x000001ff +/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_mskn 0xfffffe00 +/* lower bit position of bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_shift 0 +/* width of bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_width 9 +/* default value of bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_default 0x0 + +/* tx reg_res_dsbl bitfield definitions + * preprocessor definitions for the bitfield "reg_res_dsbl". + * port="pif_tx_reg_res_dsbl_i" + */ + +/* register address for bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_adr 0x00007000 +/* bitmask for bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_msk 0x20000000 +/* inverted bitmask for bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_mskn 0xdfffffff +/* lower bit position of bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_shift 29 +/* width of bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_width 1 +/* default value of bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_default 0x1 + +/* mac_phy register access busy bitfield definitions + * preprocessor definitions for the bitfield "register access busy". + * port="msm_pif_reg_busy_o" + */ + +/* register address for bitfield register access busy */ +#define msm_reg_access_busy_adr 0x00004400 +/* bitmask for bitfield register access busy */ +#define msm_reg_access_busy_msk 0x00001000 +/* inverted bitmask for bitfield register access busy */ +#define msm_reg_access_busy_mskn 0xffffefff +/* lower bit position of bitfield register access busy */ +#define msm_reg_access_busy_shift 12 +/* width of bitfield register access busy */ +#define msm_reg_access_busy_width 1 + +/* mac_phy msm register address[7:0] bitfield definitions + * preprocessor definitions for the bitfield "msm register address[7:0]". + * port="pif_msm_reg_addr_i[7:0]" + */ + +/* register address for bitfield msm register address[7:0] */ +#define msm_reg_addr_adr 0x00004400 +/* bitmask for bitfield msm register address[7:0] */ +#define msm_reg_addr_msk 0x000000ff +/* inverted bitmask for bitfield msm register address[7:0] */ +#define msm_reg_addr_mskn 0xffffff00 +/* lower bit position of bitfield msm register address[7:0] */ +#define msm_reg_addr_shift 0 +/* width of bitfield msm register address[7:0] */ +#define msm_reg_addr_width 8 +/* default value of bitfield msm register address[7:0] */ +#define msm_reg_addr_default 0x0 + +/* mac_phy register read strobe bitfield definitions + * preprocessor definitions for the bitfield "register read strobe". + * port="pif_msm_reg_rden_i" + */ + +/* register address for bitfield register read strobe */ +#define msm_reg_rd_strobe_adr 0x00004400 +/* bitmask for bitfield register read strobe */ +#define msm_reg_rd_strobe_msk 0x00000200 +/* inverted bitmask for bitfield register read strobe */ +#define msm_reg_rd_strobe_mskn 0xfffffdff +/* lower bit position of bitfield register read strobe */ +#define msm_reg_rd_strobe_shift 9 +/* width of bitfield register read strobe */ +#define msm_reg_rd_strobe_width 1 +/* default value of bitfield register read strobe */ +#define msm_reg_rd_strobe_default 0x0 + +/* mac_phy msm register read data[31:0] bitfield definitions + * preprocessor definitions for the bitfield "msm register read data[31:0]". + * port="msm_pif_reg_rd_data_o[31:0]" + */ + +/* register address for bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_adr 0x00004408 +/* bitmask for bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_msk 0xffffffff +/* inverted bitmask for bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_mskn 0x00000000 +/* lower bit position of bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_shift 0 +/* width of bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_width 32 + +/* mac_phy msm register write data[31:0] bitfield definitions + * preprocessor definitions for the bitfield "msm register write data[31:0]". + * port="pif_msm_reg_wr_data_i[31:0]" + */ + +/* register address for bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_adr 0x00004404 +/* bitmask for bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_msk 0xffffffff +/* inverted bitmask for bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_mskn 0x00000000 +/* lower bit position of bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_shift 0 +/* width of bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_width 32 +/* default value of bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_default 0x0 + +/* mac_phy register write strobe bitfield definitions + * preprocessor definitions for the bitfield "register write strobe". + * port="pif_msm_reg_wren_i" + */ + +/* register address for bitfield register write strobe */ +#define msm_reg_wr_strobe_adr 0x00004400 +/* bitmask for bitfield register write strobe */ +#define msm_reg_wr_strobe_msk 0x00000100 +/* inverted bitmask for bitfield register write strobe */ +#define msm_reg_wr_strobe_mskn 0xfffffeff +/* lower bit position of bitfield register write strobe */ +#define msm_reg_wr_strobe_shift 8 +/* width of bitfield register write strobe */ +#define msm_reg_wr_strobe_width 1 +/* default value of bitfield register write strobe */ +#define msm_reg_wr_strobe_default 0x0 + +/* mif soft reset bitfield definitions + * preprocessor definitions for the bitfield "soft reset". + * port="pif_glb_res_i" + */ + +/* register address for bitfield soft reset */ +#define glb_soft_res_adr 0x00000000 +/* bitmask for bitfield soft reset */ +#define glb_soft_res_msk 0x00008000 +/* inverted bitmask for bitfield soft reset */ +#define glb_soft_res_mskn 0xffff7fff +/* lower bit position of bitfield soft reset */ +#define glb_soft_res_shift 15 +/* width of bitfield soft reset */ +#define glb_soft_res_width 1 +/* default value of bitfield soft reset */ +#define glb_soft_res_default 0x0 + +/* mif register reset disable bitfield definitions + * preprocessor definitions for the bitfield "register reset disable". + * port="pif_glb_reg_res_dsbl_i" + */ + +/* register address for bitfield register reset disable */ +#define glb_reg_res_dis_adr 0x00000000 +/* bitmask for bitfield register reset disable */ +#define glb_reg_res_dis_msk 0x00004000 +/* inverted bitmask for bitfield register reset disable */ +#define glb_reg_res_dis_mskn 0xffffbfff +/* lower bit position of bitfield register reset disable */ +#define glb_reg_res_dis_shift 14 +/* width of bitfield register reset disable */ +#define glb_reg_res_dis_width 1 +/* default value of bitfield register reset disable */ +#define glb_reg_res_dis_default 0x1 + +/* tx dma debug control definitions */ +#define tx_dma_debug_ctl_adr 0x00008920u + +/* tx dma descriptor base address msw definitions */ +#define tx_dma_desc_base_addrmsw_adr(descriptor) \ + (0x00007c04u + (descriptor) * 0x40) + +/* tx interrupt moderation control register definitions + * Preprocessor definitions for TX Interrupt Moderation Control Register + * Base Address: 0x00008980 + * Parameter: queue {Q} | stride size 0x4 | range [0, 31] + */ + +#define tx_intr_moderation_ctl_adr(queue) (0x00008980u + (queue) * 0x4) + +/* pcie reg_res_dsbl bitfield definitions + * preprocessor definitions for the bitfield "reg_res_dsbl". + * port="pif_pci_reg_res_dsbl_i" + */ + +/* register address for bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_adr 0x00001000 +/* bitmask for bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_msk 0x20000000 +/* inverted bitmask for bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_mskn 0xdfffffff +/* lower bit position of bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_shift 29 +/* width of bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_width 1 +/* default value of bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_default 0x1 + + +/* global microprocessor scratch pad definitions */ +#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4) +/* global microprocessor scratch pad definitions */ +#define glb_cpu_no_reset_scratchpad_adr(idx) (0x00000380u + (idx) * 0x4) + +/*! @name Global Standard Control 1 Definitions +* +* Preprocessor definitions for Global Standard Control 1 +* Address: 0x00000000 +@{*/ +#define glb_standard_ctl1_adr 0x00000000u +/*@}*/ + +/*! @name Global Control 2 Definitions +* +* Preprocessor definitions for Global Control 2 +* Address: 0x00000404 +@{*/ +#define glb_ctl2_adr 0x00000404u +/*@}*/ + +/*! @name Global Daisy Chain Status 1 Definitions +* +* Preprocessor definitions for Global Daisy Chain Status 1 +* Address: 0x00000704 +@{*/ +#define glb_daisy_chain_status1_adr 0x00000704u +/*@}*/ + +/* mif up mailbox execute operation */ +#define mif_mcp_up_mailbox_execute_operation_adr 0x00000200u +#define mif_mcp_up_mailbox_execute_operation_msk 0x00008000u +#define mif_mcp_up_mailbox_execute_operation_shift 15 + +/* MIF uP Mailbox Busy */ +#define mif_mcp_up_mailbox_busy_adr 0x00000200u +#define mif_mcp_up_mailbox_busy_msk 0x00000100u +#define mif_mcp_up_mailbox_busy_shift 8 + +/* mif uP mailbox address [1f:2] */ +#define mif_mcp_up_mailbox_addr_adr 0x00000208u +/* mif uP mailbox data [1f:0] */ +#define mif_mcp_up_mailbox_data_adr 0x0000020cu + +#define HW_ATL_RX_CTRL_ADDR_BEGIN_FL3L4 0x00005380 +#define HW_ATL_RX_SRCA_ADDR_BEGIN_FL3L4 0x000053B0 +#define HW_ATL_RX_DESTA_ADDR_BEGIN_FL3L4 0x000053D0 + +#define HW_ATL_RX_GET_ADDR_CTRL_FL3L4(location) \ + (HW_ATL_RX_CTRL_ADDR_BEGIN_FL3L4 + ((location) * 0x4)) +#define HW_ATL_RX_GET_ADDR_SRCA_FL3L4(location) \ + (HW_ATL_RX_SRCA_ADDR_BEGIN_FL3L4 + ((location) * 0x4)) +#define HW_ATL_RX_GET_ADDR_DESTA_FL3L4(location) \ + (HW_ATL_RX_DESTA_ADDR_BEGIN_FL3L4 + ((location) * 0x4)) + +#endif /* HW_ATL_LLH_INTERNAL_H */ Index: sys/dev/aq/aq_irq.c =================================================================== --- /dev/null +++ sys/dev/aq/aq_irq.c @@ -0,0 +1,185 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * (1) Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * (2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * (3)The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "aq_common.h" +#include "aq_device.h" +#include "aq_ring.h" +#include "aq_dbg.h" +#include "aq_hw.h" +#include "aq_hw_llh.h" + +int aq_update_hw_stats(aq_dev_t *aq_dev) +{ + struct aq_hw *hw = &aq_dev->hw; + struct aq_hw_fw_mbox mbox; + + aq_hw_mpi_read_stats(hw, &mbox); + +#define AQ_SDELTA(_N_) (aq_dev->curr_stats._N_ += \ + mbox.stats._N_ - aq_dev->last_stats._N_) + if (aq_dev->linkup) { + AQ_SDELTA(uprc); + AQ_SDELTA(mprc); + AQ_SDELTA(bprc); + AQ_SDELTA(cprc); + AQ_SDELTA(erpt); + + AQ_SDELTA(uptc); + AQ_SDELTA(mptc); + AQ_SDELTA(bptc); + AQ_SDELTA(erpr); + + AQ_SDELTA(ubrc); + AQ_SDELTA(ubtc); + AQ_SDELTA(mbrc); + AQ_SDELTA(mbtc); + AQ_SDELTA(bbrc); + AQ_SDELTA(bbtc); + + AQ_SDELTA(ptc); + AQ_SDELTA(prc); + + AQ_SDELTA(dpc); + + aq_dev->curr_stats.brc = aq_dev->curr_stats.ubrc + + aq_dev->curr_stats.mbrc + + aq_dev->curr_stats.bbrc; + aq_dev->curr_stats.btc = aq_dev->curr_stats.ubtc + + aq_dev->curr_stats.mbtc + + aq_dev->curr_stats.bbtc; + + } +#undef AQ_SDELTA + + memcpy(&aq_dev->last_stats, &mbox.stats, sizeof(mbox.stats)); + + return (0); +} + + +void aq_if_update_admin_status(if_ctx_t ctx) +{ + aq_dev_t *aq_dev = iflib_get_softc(ctx); + struct aq_hw *hw = &aq_dev->hw; + u32 link_speed; + + // AQ_DBG_ENTER(); + + struct aq_hw_fc_info fc_neg; + aq_hw_get_link_state(hw, &link_speed, &fc_neg); +// AQ_DBG_PRINT(" link_speed=%d aq_dev->linkup=%d", link_speed, aq_dev->linkup); + if (link_speed && !aq_dev->linkup) { /* link was DOWN */ + device_printf(aq_dev->dev, "atlantic: link UP: speed=%d\n", link_speed); + + aq_dev->linkup = 1; + +#if __FreeBSD__ >= 12 + /* Disable TSO if link speed < 1G */ + if (link_speed < 1000 && (iflib_get_softc_ctx(ctx)->isc_capabilities & (IFCAP_TSO4 | IFCAP_TSO6))) { + iflib_get_softc_ctx(ctx)->isc_capabilities &= ~(IFCAP_TSO4 | IFCAP_TSO6); + device_printf(aq_dev->dev, "atlantic: TSO disabled for link speed < 1G"); + }else{ + iflib_get_softc_ctx(ctx)->isc_capabilities |= (IFCAP_TSO4 | IFCAP_TSO6); + } +#endif + /* turn on/off RX Pause in RPB */ + rpb_rx_xoff_en_per_tc_set(hw, fc_neg.fc_rx, 0); + + + iflib_link_state_change(ctx, LINK_STATE_UP, IF_Mbps(link_speed)); + aq_mediastatus_update(aq_dev, link_speed, &fc_neg); + + /* update ITR settings according new link speed */ + aq_hw_interrupt_moderation_set(hw); + } else if (link_speed == 0U && aq_dev->linkup) { /* link was UP */ + device_printf(aq_dev->dev, "atlantic: link DOWN\n"); + + aq_dev->linkup = 0; + + /* turn off RX Pause in RPB */ + rpb_rx_xoff_en_per_tc_set(hw, 0, 0); + + iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); + aq_mediastatus_update(aq_dev, link_speed, &fc_neg); + } + + aq_update_hw_stats(aq_dev); +// AQ_DBG_EXIT(0); +} + +/**************************************************************************/ +/* interrupt service routine (Top half) */ +/**************************************************************************/ +int aq_isr_rx(void *arg) +{ + struct aq_ring *ring = arg; + struct aq_dev *aq_dev = ring->dev; + struct aq_hw *hw = &aq_dev->hw; + + /* clear interrupt status */ + itr_irq_status_clearlsw_set(hw, BIT(ring->msix)); + ring->stats.irq++; + return (FILTER_SCHEDULE_THREAD); +} + +/**************************************************************************/ +/* interrupt service routine (Top half) */ +/**************************************************************************/ +int aq_linkstat_isr(void *arg) +{ + aq_dev_t *aq_dev = arg; + struct aq_hw *hw = &aq_dev->hw; + + /* clear interrupt status */ + itr_irq_status_clearlsw_set(hw, aq_dev->msix); + + iflib_admin_intr_deferred(aq_dev->ctx); + + return (FILTER_HANDLED); +} Index: sys/dev/aq/aq_main.c =================================================================== --- /dev/null +++ sys/dev/aq/aq_main.c @@ -0,0 +1,1329 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2019 aQuantia Corporation. All rights reserved + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * (1) Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * (2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * (3)The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "opt_inet.h" +#include "opt_inet6.h" +#include "opt_rss.h" + +#include "ifdi_if.h" + +#include "aq_device.h" +#include "aq_fw.h" +#include "aq_hw.h" +#include "aq_hw_llh.h" +#include "aq_ring.h" +#include "aq_dbg.h" + + +#define AQ_XXX_UNIMPLEMENTED_FUNCTION do { \ + printf("atlantic: unimplemented function: %s@%s:%d\n", __func__, \ + __FILE__, __LINE__); \ +} while (0) + +MALLOC_DEFINE(M_AQ, "aq", "Aquantia"); + +char aq_driver_version[] = AQ_VER; + +#define AQUANTIA_VENDOR_ID 0x1D6A + +#define AQ_DEVICE_ID_0001 0x0001 +#define AQ_DEVICE_ID_D100 0xD100 +#define AQ_DEVICE_ID_D107 0xD107 +#define AQ_DEVICE_ID_D108 0xD108 +#define AQ_DEVICE_ID_D109 0xD109 + +#define AQ_DEVICE_ID_AQC100 0x00B1 +#define AQ_DEVICE_ID_AQC107 0x07B1 +#define AQ_DEVICE_ID_AQC108 0x08B1 +#define AQ_DEVICE_ID_AQC109 0x09B1 +#define AQ_DEVICE_ID_AQC111 0x11B1 +#define AQ_DEVICE_ID_AQC112 0x12B1 + +#define AQ_DEVICE_ID_AQC100S 0x80B1 +#define AQ_DEVICE_ID_AQC107S 0x87B1 +#define AQ_DEVICE_ID_AQC108S 0x88B1 +#define AQ_DEVICE_ID_AQC109S 0x89B1 +#define AQ_DEVICE_ID_AQC111S 0x91B1 +#define AQ_DEVICE_ID_AQC112S 0x92B1 + +static pci_vendor_info_t aq_vendor_info_array[] = { + PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_0001, "Aquantia AQtion 10Gbit Network Adapter"), + PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_D107, "Aquantia AQtion 10Gbit Network Adapter"), + PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_D108, "Aquantia AQtion 5Gbit Network Adapter"), + PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_D109, "Aquantia AQtion 2.5Gbit Network Adapter"), + + PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC107, "Aquantia AQtion 10Gbit Network Adapter"), + PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC108, "Aquantia AQtion 5Gbit Network Adapter"), + PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC109, "Aquantia AQtion 2.5Gbit Network Adapter"), + PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC100, "Aquantia AQtion 10Gbit Network Adapter"), + + PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC107S, "Aquantia AQtion 10Gbit Network Adapter"), + PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC108S, "Aquantia AQtion 5Gbit Network Adapter"), + PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC109S, "Aquantia AQtion 2.5Gbit Network Adapter"), + PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC100S, "Aquantia AQtion 10Gbit Network Adapter"), + + PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC111, "Aquantia AQtion 5Gbit Network Adapter"), + PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC112, "Aquantia AQtion 2.5Gbit Network Adapter"), + PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC111S, "Aquantia AQtion 5Gbit Network Adapter"), + PVID(AQUANTIA_VENDOR_ID, AQ_DEVICE_ID_AQC112S, "Aquantia AQtion 2.5Gbit Network Adapter"), + + PVID_END +}; + + +/* Device setup, teardown, etc */ +static void *aq_register(device_t dev); +static int aq_if_attach_pre(if_ctx_t ctx); +static int aq_if_attach_post(if_ctx_t ctx); +static int aq_if_detach(if_ctx_t ctx); +static int aq_if_shutdown(if_ctx_t ctx); +static int aq_if_suspend(if_ctx_t ctx); +static int aq_if_resume(if_ctx_t ctx); + +/* Soft queue setup and teardown */ +static int aq_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, + uint64_t *paddrs, int ntxqs, int ntxqsets); +static int aq_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, + uint64_t *paddrs, int nrxqs, int nrxqsets); +static void aq_if_queues_free(if_ctx_t ctx); + +/* Device configuration */ +static void aq_if_init(if_ctx_t ctx); +static void aq_if_stop(if_ctx_t ctx); +static void aq_if_multi_set(if_ctx_t ctx); +static int aq_if_mtu_set(if_ctx_t ctx, uint32_t mtu); +static void aq_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr); +static int aq_if_media_change(if_ctx_t ctx); +static int aq_if_promisc_set(if_ctx_t ctx, int flags); +static uint64_t aq_if_get_counter(if_ctx_t ctx, ift_counter cnt); +static void aq_if_timer(if_ctx_t ctx, uint16_t qid); +static int aq_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data); +static int aq_hw_capabilities(struct aq_dev *softc); +static void aq_add_stats_sysctls(struct aq_dev *softc); + +/* Interrupt enable / disable */ +static void aq_if_enable_intr(if_ctx_t ctx); +static void aq_if_disable_intr(if_ctx_t ctx); +static int aq_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); +static int aq_if_msix_intr_assign(if_ctx_t ctx, int msix); + +/* VLAN support */ +static bool aq_is_vlan_promisc_required(struct aq_dev *softc); +static void aq_update_vlan_filters(struct aq_dev *softc); +static void aq_if_vlan_register(if_ctx_t ctx, uint16_t vtag); +static void aq_if_vlan_unregister(if_ctx_t ctx, uint16_t vtag); + +/* Informational/diagnostic */ +static void aq_if_debug(if_ctx_t ctx); +static void aq_if_led_func(if_ctx_t ctx, int onoff); + +static device_method_t aq_methods[] = { + DEVMETHOD(device_register, aq_register), + DEVMETHOD(device_probe, iflib_device_probe), + DEVMETHOD(device_attach, iflib_device_attach), + DEVMETHOD(device_detach, iflib_device_detach), + DEVMETHOD(device_shutdown, iflib_device_shutdown), + DEVMETHOD(device_suspend, iflib_device_suspend), + DEVMETHOD(device_resume, iflib_device_resume), + + DEVMETHOD_END +}; + +static driver_t aq_driver = { + "aq", aq_methods, sizeof(struct aq_dev), +}; + +static devclass_t aq_devclass; +DRIVER_MODULE(atlantic, pci, aq_driver, aq_devclass, 0, 0); + +MODULE_DEPEND(atlantic, pci, 1, 1, 1); +MODULE_DEPEND(atlantic, ether, 1, 1, 1); +MODULE_DEPEND(atlantic, iflib, 1, 1, 1); + +IFLIB_PNP_INFO(pci, atlantic, aq_vendor_info_array); + +static device_method_t aq_if_methods[] = { + /* Device setup, teardown, etc */ + DEVMETHOD(ifdi_attach_pre, aq_if_attach_pre), + DEVMETHOD(ifdi_attach_post, aq_if_attach_post), + DEVMETHOD(ifdi_detach, aq_if_detach), + + DEVMETHOD(ifdi_shutdown, aq_if_shutdown), + DEVMETHOD(ifdi_suspend, aq_if_suspend), + DEVMETHOD(ifdi_resume, aq_if_resume), + + /* Soft queue setup and teardown */ + DEVMETHOD(ifdi_tx_queues_alloc, aq_if_tx_queues_alloc), + DEVMETHOD(ifdi_rx_queues_alloc, aq_if_rx_queues_alloc), + DEVMETHOD(ifdi_queues_free, aq_if_queues_free), + + /* Device configuration */ + DEVMETHOD(ifdi_init, aq_if_init), + DEVMETHOD(ifdi_stop, aq_if_stop), + DEVMETHOD(ifdi_multi_set, aq_if_multi_set), + DEVMETHOD(ifdi_mtu_set, aq_if_mtu_set), + DEVMETHOD(ifdi_media_status, aq_if_media_status), + DEVMETHOD(ifdi_media_change, aq_if_media_change), + DEVMETHOD(ifdi_promisc_set, aq_if_promisc_set), + DEVMETHOD(ifdi_get_counter, aq_if_get_counter), + DEVMETHOD(ifdi_update_admin_status, aq_if_update_admin_status), + DEVMETHOD(ifdi_timer, aq_if_timer), +// DEVMETHOD(ifdi_priv_ioctl, aq_if_priv_ioctl), + + /* Interrupt enable / disable */ + DEVMETHOD(ifdi_intr_enable, aq_if_enable_intr), + DEVMETHOD(ifdi_intr_disable, aq_if_disable_intr), + DEVMETHOD(ifdi_rx_queue_intr_enable, aq_if_rx_queue_intr_enable), + DEVMETHOD(ifdi_tx_queue_intr_enable, aq_if_rx_queue_intr_enable), + DEVMETHOD(ifdi_msix_intr_assign, aq_if_msix_intr_assign), + + /* VLAN support */ + DEVMETHOD(ifdi_vlan_register, aq_if_vlan_register), + DEVMETHOD(ifdi_vlan_unregister, aq_if_vlan_unregister), + + /* Informational/diagnostic */ + DEVMETHOD(ifdi_led_func, aq_if_led_func), +// DEVMETHOD(ifdi_debug, aq_if_debug), + + DEVMETHOD_END +}; + +static driver_t aq_if_driver = { + "aq_if", aq_if_methods, sizeof(struct aq_dev) +}; + +static struct if_shared_ctx aq_sctx_init = { + .isc_magic = IFLIB_MAGIC, + .isc_q_align = PAGE_SIZE, + .isc_tx_maxsize = HW_ATL_B0_TSO_SIZE, + .isc_tx_maxsegsize = HW_ATL_B0_MTU_JUMBO, +#if __FreeBSD__ >= 12 + .isc_tso_maxsize = HW_ATL_B0_TSO_SIZE, + .isc_tso_maxsegsize = HW_ATL_B0_MTU_JUMBO, +#endif + .isc_rx_maxsize = HW_ATL_B0_MTU_JUMBO, + .isc_rx_nsegments = 16, + .isc_rx_maxsegsize = PAGE_SIZE, + .isc_nfl = 1, + .isc_nrxqs = 1, + .isc_ntxqs = 1, + .isc_admin_intrcnt = 1, + .isc_vendor_info = aq_vendor_info_array, + .isc_driver_version = aq_driver_version, + .isc_driver = &aq_if_driver, + .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | + IFLIB_NEED_ZERO_CSUM, + + .isc_nrxd_min = {HW_ATL_B0_MIN_RXD}, + .isc_ntxd_min = {HW_ATL_B0_MIN_TXD}, + .isc_nrxd_max = {HW_ATL_B0_MAX_RXD}, + .isc_ntxd_max = {HW_ATL_B0_MAX_TXD}, + .isc_nrxd_default = {PAGE_SIZE / sizeof(aq_txc_desc_t) * 4}, + .isc_ntxd_default = {PAGE_SIZE / sizeof(aq_txc_desc_t) * 4}, +}; + +/* + * TUNEABLE PARAMETERS: + */ + +static SYSCTL_NODE(_hw, OID_AUTO, aq, CTLFLAG_RD, 0, "Atlantic driver parameters"); +/* UDP Receive-Side Scaling */ +static int aq_enable_rss_udp = 1; +SYSCTL_INT(_hw_aq, OID_AUTO, enable_rss_udp, CTLFLAG_RDTUN, &aq_enable_rss_udp, 0, + "Enable Receive-Side Scaling (RSS) for UDP"); + + +/* + * Device Methods + */ +static void *aq_register(device_t dev) +{ + return (&aq_sctx_init); +} + +static int aq_if_attach_pre(if_ctx_t ctx) +{ + struct aq_dev *softc; + struct aq_hw *hw; + if_softc_ctx_t scctx; + int rc; + + AQ_DBG_ENTER(); + softc = iflib_get_softc(ctx); + rc = 0; + + softc->ctx = ctx; + softc->dev = iflib_get_dev(ctx); + softc->media = iflib_get_media(ctx); + softc->scctx = iflib_get_softc_ctx(ctx); + softc->sctx = iflib_get_sctx(ctx); + scctx = softc->scctx; + + softc->mmio_rid = PCIR_BAR(0); + softc->mmio_res = bus_alloc_resource_any(softc->dev, SYS_RES_MEMORY, + &softc->mmio_rid, RF_ACTIVE|RF_SHAREABLE); + if (softc->mmio_res == NULL) { + device_printf(softc->dev, + "failed to allocate MMIO resources\n"); + rc = ENXIO; + goto fail; + } + + softc->mmio_tag = rman_get_bustag(softc->mmio_res); + softc->mmio_handle = rman_get_bushandle(softc->mmio_res); + softc->mmio_size = rman_get_size(softc->mmio_res); + softc->hw.hw_addr = (u8*) softc->mmio_handle; + hw = &softc->hw; + hw->link_rate = aq_fw_speed_auto; + hw->itr = -1; + hw->fc.fc_rx = 1; + hw->fc.fc_tx = 1; + softc->linkup = 0U; + + /* Look up ops and caps. */ + rc = aq_hw_mpi_create(hw); + if (rc < 0) { + AQ_DBG_ERROR(" %s: aq_hw_mpi_create fail err=%d", __func__, rc); + goto fail; + } + + if (hw->fast_start_enabled) { + if (hw->fw_ops && hw->fw_ops->reset) + hw->fw_ops->reset(hw); + } else + aq_hw_reset(&softc->hw); + aq_hw_capabilities(softc); + + if (aq_hw_get_mac_permanent(hw, hw->mac_addr) < 0) { + AQ_DBG_ERROR("Unable to get mac addr from hw"); + goto fail; + }; + + softc->admin_ticks = 0; + + iflib_set_mac(ctx, hw->mac_addr); +#if __FreeBSD__ < 13 + /* since FreeBSD13 deadlock due to calling iflib_led_func() under CTX_LOCK() */ + iflib_led_create(ctx); +#endif + scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO; +#if __FreeBSD__ >= 12 + scctx->isc_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM | IFCAP_HWCSUM | IFCAP_TSO | + IFCAP_JUMBO_MTU | IFCAP_VLAN_HWFILTER | + IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | + IFCAP_VLAN_HWCSUM; + scctx->isc_capenable = scctx->isc_capabilities; +#else + if_t ifp; + ifp = iflib_get_ifp(ctx); + ifp->if_capenable = IFCAP_RXCSUM | IFCAP_TXCSUM | IFCAP_HWCSUM | IFCAP_TSO | + IFCAP_JUMBO_MTU | IFCAP_VLAN_HWFILTER | + IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | + IFCAP_VLAN_HWCSUM; +#endif + scctx->isc_tx_nsegments = 31, + scctx->isc_tx_tso_segments_max = 31; + scctx->isc_tx_tso_size_max = HW_ATL_B0_TSO_SIZE - sizeof(struct ether_vlan_header); + scctx->isc_tx_tso_segsize_max = HW_ATL_B0_MTU_JUMBO; + scctx->isc_min_frame_size = 52; + scctx->isc_txrx = &aq_txrx; + + scctx->isc_txqsizes[0] = sizeof(aq_tx_desc_t) * scctx->isc_ntxd[0]; + scctx->isc_rxqsizes[0] = sizeof(aq_rx_desc_t) * scctx->isc_nrxd[0]; + + scctx->isc_ntxqsets_max = HW_ATL_B0_RINGS_MAX; + scctx->isc_nrxqsets_max = HW_ATL_B0_RINGS_MAX; + + /* iflib will map and release this bar */ + scctx->isc_msix_bar = pci_msix_table_bar(softc->dev); + + softc->vlan_tags = bit_alloc(4096, M_AQ, M_NOWAIT); + + AQ_DBG_EXIT(rc); + return (rc); + +fail: + if (softc->mmio_res != NULL) + bus_release_resource(softc->dev, SYS_RES_MEMORY, + softc->mmio_rid, softc->mmio_res); + + AQ_DBG_EXIT(rc); + return (ENXIO); +} + + +static int aq_if_attach_post(if_ctx_t ctx) +{ + struct aq_dev *softc; + int rc; + + AQ_DBG_ENTER(); + + softc = iflib_get_softc(ctx); + rc = 0; + + aq_update_hw_stats(softc); + + aq_initmedia(softc); + + + switch (softc->scctx->isc_intr) { + case IFLIB_INTR_LEGACY: + rc = EOPNOTSUPP; + goto exit; + goto exit; + break; + case IFLIB_INTR_MSI: + break; + case IFLIB_INTR_MSIX: + break; + default: + device_printf(softc->dev, "unknown interrupt mode\n"); + rc = EOPNOTSUPP; + goto exit; + } + + aq_add_stats_sysctls(softc); + /* RSS */ + arc4rand(softc->rss_key, HW_ATL_RSS_HASHKEY_SIZE, 0); + for (int i = ARRAY_SIZE(softc->rss_table); i--;){ + softc->rss_table[i] = i & (softc->rx_rings_count - 1); + } +exit: + AQ_DBG_EXIT(rc); + return (rc); +} + + +static int aq_if_detach(if_ctx_t ctx) +{ + struct aq_dev *softc; + int i; + + AQ_DBG_ENTER(); + softc = iflib_get_softc(ctx); + + aq_hw_deinit(&softc->hw); + + for (i = 0; i < softc->scctx->isc_nrxqsets; i++) + iflib_irq_free(ctx, &softc->rx_rings[i]->irq); + iflib_irq_free(ctx, &softc->irq); + + + if (softc->mmio_res != NULL) + bus_release_resource(softc->dev, SYS_RES_MEMORY, + softc->mmio_rid, softc->mmio_res); + + free(softc->vlan_tags, M_AQ); + + AQ_DBG_EXIT(0); + return (0); +} + +static int aq_if_shutdown(if_ctx_t ctx) +{ + + AQ_DBG_ENTER(); + + AQ_XXX_UNIMPLEMENTED_FUNCTION; + + AQ_DBG_EXIT(0); + return (0); +} + +static int aq_if_suspend(if_ctx_t ctx) +{ + AQ_DBG_ENTER(); + + AQ_XXX_UNIMPLEMENTED_FUNCTION; + + AQ_DBG_EXIT(0); + return (0); +} + +static int aq_if_resume(if_ctx_t ctx) +{ + AQ_DBG_ENTER(); + + AQ_XXX_UNIMPLEMENTED_FUNCTION; + + AQ_DBG_EXIT(0); + return (0); +} + +/* Soft queue setup and teardown */ +static int aq_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, + uint64_t *paddrs, int ntxqs, int ntxqsets) +{ + struct aq_dev *softc; + struct aq_ring *ring; + int rc = 0, i; + + AQ_DBG_ENTERA("ntxqs=%d, ntxqsets=%d", ntxqs, ntxqsets); + softc = iflib_get_softc(ctx); + AQ_DBG_PRINT("tx descriptors number %d", softc->scctx->isc_ntxd[0]); + + for (i = 0; i < ntxqsets; i++) { + ring = softc->tx_rings[i] = malloc(sizeof(struct aq_ring), + M_AQ, M_NOWAIT | M_ZERO); + if (!ring){ + rc = ENOMEM; + device_printf(softc->dev, "atlantic: tx_ring malloc fail\n"); + goto fail; + } + ring->tx_descs = (aq_tx_desc_t*)vaddrs[i]; + ring->tx_size = softc->scctx->isc_ntxd[0]; + ring->tx_descs_phys = paddrs[i]; + ring->tx_head = ring->tx_tail = 0; + ring->index = i; + ring->dev = softc; + + softc->tx_rings_count++; + } + + AQ_DBG_EXIT(rc); + return (rc); + +fail: + aq_if_queues_free(ctx); + AQ_DBG_EXIT(rc); + return (rc); +} + +static int aq_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, + uint64_t *paddrs, int nrxqs, int nrxqsets) +{ + struct aq_dev *softc; + struct aq_ring *ring; + int rc = 0, i; + + AQ_DBG_ENTERA("nrxqs=%d, nrxqsets=%d", nrxqs, nrxqsets); + softc = iflib_get_softc(ctx); + + for (i = 0; i < nrxqsets; i++) { + ring = softc->rx_rings[i] = malloc(sizeof(struct aq_ring), + M_AQ, M_NOWAIT | M_ZERO); + if (!ring){ + rc = ENOMEM; + device_printf(softc->dev, "atlantic: rx_ring malloc fail\n"); + goto fail; + } + + ring->rx_descs = (aq_rx_desc_t*)vaddrs[i]; + ring->rx_descs_phys = paddrs[i]; + ring->rx_size = softc->scctx->isc_nrxd[0]; + ring->index = i; + ring->dev = softc; + + switch (MCLBYTES) { + case (4 * 1024): + case (8 * 1024): + case (16 * 1024): + ring->rx_max_frame_size = MCLBYTES; + break; + default: + ring->rx_max_frame_size = 2048; + break; + } + + softc->rx_rings_count++; + } + + AQ_DBG_EXIT(rc); + return (rc); + +fail: + aq_if_queues_free(ctx); + AQ_DBG_EXIT(rc); + return (rc); +} + +static void aq_if_queues_free(if_ctx_t ctx) +{ + struct aq_dev *softc; + int i; + + AQ_DBG_ENTER(); + softc = iflib_get_softc(ctx); + + for (i = 0; i < softc->tx_rings_count; i++) { + if (softc->tx_rings[i]) { + free(softc->tx_rings[i], M_AQ); + softc->tx_rings[i] = NULL; + } + } + softc->tx_rings_count = 0; + for (i = 0; i < softc->rx_rings_count; i++) { + if (softc->rx_rings[i]){ + free(softc->rx_rings[i], M_AQ); + softc->rx_rings[i] = NULL; + } + } + softc->rx_rings_count = 0; + + AQ_DBG_EXIT(0); + return; +} + +/* Device configuration */ +static void aq_if_init(if_ctx_t ctx) +{ + struct aq_dev *softc; + struct aq_hw *hw; + struct ifmediareq ifmr; + int i, err; + + AQ_DBG_ENTER(); + softc = iflib_get_softc(ctx); + hw = &softc->hw; + + err = aq_hw_init(&softc->hw, softc->hw.mac_addr, softc->msix, + softc->scctx->isc_intr == IFLIB_INTR_MSIX); + if (err != EOK) { + device_printf(softc->dev, "atlantic: aq_hw_init: %d", err); + } + + aq_if_media_status(ctx, &ifmr); + + aq_update_vlan_filters(softc); + + for (i = 0; i < softc->tx_rings_count; i++) { + struct aq_ring *ring = softc->tx_rings[i]; + err = aq_ring_tx_init(&softc->hw, ring); + if (err) { + device_printf(softc->dev, "atlantic: aq_ring_tx_init: %d", err); + } + err = aq_ring_tx_start(hw, ring); + if (err != EOK) { + device_printf(softc->dev, "atlantic: aq_ring_tx_start: %d", err); + } + } + for (i = 0; i < softc->rx_rings_count; i++) { + struct aq_ring *ring = softc->rx_rings[i]; + err = aq_ring_rx_init(&softc->hw, ring); + if (err) { + device_printf(softc->dev, "atlantic: aq_ring_rx_init: %d", err); + } + err = aq_ring_rx_start(hw, ring); + if (err != EOK) { + device_printf(softc->dev, "atlantic: aq_ring_rx_start: %d", err); + } + aq_if_rx_queue_intr_enable(ctx, i); + } + + aq_hw_start(hw); + aq_if_enable_intr(ctx); + aq_hw_rss_hash_set(&softc->hw, softc->rss_key); + aq_hw_rss_set(&softc->hw, softc->rss_table); + aq_hw_udp_rss_enable(hw, aq_enable_rss_udp); + aq_hw_set_link_speed(hw, hw->link_rate); + + AQ_DBG_EXIT(0); +} + + +static void aq_if_stop(if_ctx_t ctx) +{ + struct aq_dev *softc; + struct aq_hw *hw; + int i; + + AQ_DBG_ENTER(); + + softc = iflib_get_softc(ctx); + hw = &softc->hw; + + /* disable interrupt */ + aq_if_disable_intr(ctx); + + for (i = 0; i < softc->tx_rings_count; i++) { + aq_ring_tx_stop(hw, softc->tx_rings[i]); + softc->tx_rings[i]->tx_head = 0; + softc->tx_rings[i]->tx_tail = 0; + } + for (i = 0; i < softc->rx_rings_count; i++) { + aq_ring_rx_stop(hw, softc->rx_rings[i]); + } + + aq_hw_reset(&softc->hw); + memset(&softc->last_stats, 0, sizeof(softc->last_stats)); + softc->linkup = false; + aq_if_update_admin_status(ctx); + AQ_DBG_EXIT(0); +} + +static uint64_t aq_if_get_counter(if_ctx_t ctx, ift_counter cnt) +{ + struct aq_dev *softc = iflib_get_softc(ctx); + struct ifnet *ifp = iflib_get_ifp(ctx); + + switch (cnt) { + case IFCOUNTER_IERRORS: + return (softc->curr_stats.erpr); + case IFCOUNTER_IQDROPS: + return (softc->curr_stats.dpc); + case IFCOUNTER_OERRORS: + return (softc->curr_stats.erpt); + default: + return (if_get_counter_default(ifp, cnt)); + } +} + +#if __FreeBSD_version >= 1300054 +static u_int aq_mc_filter_apply(void *arg, struct sockaddr_dl *dl, u_int count) +{ + struct aq_dev *softc = arg; + struct aq_hw *hw = &softc->hw; + u8 *mac_addr = NULL; + + if (count == AQ_HW_MAC_MAX) + return (0); + + mac_addr = LLADDR(dl); + aq_hw_mac_addr_set(hw, mac_addr, count + 1); + + aq_log_detail("set %d mc address %6D", count + 1, mac_addr, ":"); + return (1); +} +#else +static int aq_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count) +{ + struct aq_dev *softc = arg; + struct aq_hw *hw = &softc->hw; + u8 *mac_addr = NULL; + + if (ifma->ifma_addr->sa_family != AF_LINK) + return (0); + if (count == AQ_HW_MAC_MAX) + return (0); + + mac_addr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); + aq_hw_mac_addr_set(hw, mac_addr, count + 1); + + aq_log_detail("set %d mc address %6D", count + 1, mac_addr, ":"); + return (1); +} +#endif + +static bool aq_is_mc_promisc_required(struct aq_dev *softc) +{ + return (softc->mcnt >= AQ_HW_MAC_MAX); +} + +static void aq_if_multi_set(if_ctx_t ctx) +{ + struct aq_dev *softc = iflib_get_softc(ctx); + struct ifnet *ifp = iflib_get_ifp(ctx); + struct aq_hw *hw = &softc->hw; + AQ_DBG_ENTER(); +#if __FreeBSD_version >= 1300054 + softc->mcnt = if_llmaddr_count(iflib_get_ifp(ctx)); +#else + softc->mcnt = if_multiaddr_count(iflib_get_ifp(ctx), AQ_HW_MAC_MAX); +#endif + if (softc->mcnt >= AQ_HW_MAC_MAX) + { + aq_hw_set_promisc(hw, !!(ifp->if_flags & IFF_PROMISC), + aq_is_vlan_promisc_required(softc), + !!(ifp->if_flags & IFF_ALLMULTI) || aq_is_mc_promisc_required(softc)); + }else{ +#if __FreeBSD_version >= 1300054 + if_foreach_llmaddr(iflib_get_ifp(ctx), &aq_mc_filter_apply, softc); +#else + if_multi_apply(iflib_get_ifp(ctx), aq_mc_filter_apply, softc); +#endif + } + AQ_DBG_EXIT(0); +} + +static int aq_if_mtu_set(if_ctx_t ctx, uint32_t mtu) +{ + int err = 0; + AQ_DBG_ENTER(); + + AQ_DBG_EXIT(err); + return (err); +} + +static void aq_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) +{ + struct ifnet *ifp; + + AQ_DBG_ENTER(); + + ifp = iflib_get_ifp(ctx); + + aq_mediastatus(ifp, ifmr); + + AQ_DBG_EXIT(0); +} + +static int aq_if_media_change(if_ctx_t ctx) +{ + struct aq_dev *softc = iflib_get_softc(ctx); + struct ifnet *ifp = iflib_get_ifp(ctx); + int rc = 0; + + AQ_DBG_ENTER(); + + /* Not allowd in UP state, since causes unsync of rings */ + if ((ifp->if_flags & IFF_UP)){ + rc = EPERM; + goto exit; + } + + ifp = iflib_get_ifp(softc->ctx); + + rc = aq_mediachange(ifp); + +exit: + AQ_DBG_EXIT(rc); + return (rc); +} + +static int aq_if_promisc_set(if_ctx_t ctx, int flags) +{ + struct aq_dev *softc; + + AQ_DBG_ENTER(); + + softc = iflib_get_softc(ctx); + + aq_hw_set_promisc(&softc->hw, !!(flags & IFF_PROMISC), + aq_is_vlan_promisc_required(softc), + !!(flags & IFF_ALLMULTI) || aq_is_mc_promisc_required(softc)); + + AQ_DBG_EXIT(0); + return (0); +} + +static void aq_if_timer(if_ctx_t ctx, uint16_t qid) +{ + struct aq_dev *softc; + uint64_t ticks_now; + +// AQ_DBG_ENTER(); + + softc = iflib_get_softc(ctx); + ticks_now = ticks; + + /* Schedule aqc_if_update_admin_status() once per sec */ + if (ticks_now - softc->admin_ticks >= hz) { + softc->admin_ticks = ticks_now; + iflib_admin_intr_deferred(ctx); + } + +// AQ_DBG_EXIT(0); + return; + +} + +/* Interrupt enable / disable */ +static void aq_if_enable_intr(if_ctx_t ctx) +{ + struct aq_dev *softc = iflib_get_softc(ctx); + struct aq_hw *hw = &softc->hw; + + AQ_DBG_ENTER(); + + /* Enable interrupts */ + itr_irq_msk_setlsw_set(hw, BIT(softc->msix + 1) - 1); + + AQ_DBG_EXIT(0); +} + +static void aq_if_disable_intr(if_ctx_t ctx) +{ + struct aq_dev *softc = iflib_get_softc(ctx); + struct aq_hw *hw = &softc->hw; + + AQ_DBG_ENTER(); + + /* Disable interrupts */ + itr_irq_msk_clearlsw_set(hw, BIT(softc->msix + 1) - 1); + + AQ_DBG_EXIT(0); +} + +static int aq_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) +{ + struct aq_dev *softc = iflib_get_softc(ctx); + struct aq_hw *hw = &softc->hw; + + AQ_DBG_ENTER(); + + itr_irq_msk_setlsw_set(hw, BIT(softc->rx_rings[rxqid]->msix)); + + AQ_DBG_EXIT(0); + return (0); +} + +static int aq_if_msix_intr_assign(if_ctx_t ctx, int msix) +{ + struct aq_dev *softc; + int i, vector = 0, rc; + char irq_name[16]; + int rx_vectors; + + AQ_DBG_ENTER(); + softc = iflib_get_softc(ctx); + + for (i = 0; i < softc->rx_rings_count; i++, vector++) { + snprintf(irq_name, sizeof(irq_name), "rxq%d", i); + rc = iflib_irq_alloc_generic(ctx, &softc->rx_rings[i]->irq, + vector + 1, IFLIB_INTR_RX, aq_isr_rx, softc->rx_rings[i], + softc->rx_rings[i]->index, irq_name); + device_printf(softc->dev, "Assign IRQ %u to rx ring %u\n", + vector, softc->rx_rings[i]->index); + + if (rc) { + device_printf(softc->dev, "failed to set up RX handler\n"); + i--; + goto fail; + } + + softc->rx_rings[i]->msix = vector; + } + + rx_vectors = vector; + + for (i = 0; i < softc->tx_rings_count; i++, vector++) { + snprintf(irq_name, sizeof(irq_name), "txq%d", i); + iflib_softirq_alloc_generic(ctx, &softc->rx_rings[i]->irq, IFLIB_INTR_TX, + softc->tx_rings[i], i, irq_name); + + softc->tx_rings[i]->msix = (vector % softc->rx_rings_count); + device_printf(softc->dev, "Assign IRQ %u to tx ring %u\n", + softc->tx_rings[i]->msix, softc->tx_rings[i]->index); + } + + rc = iflib_irq_alloc_generic(ctx, &softc->irq, rx_vectors + 1, + IFLIB_INTR_ADMIN, aq_linkstat_isr, + softc, 0, "aq"); + softc->msix = rx_vectors; + device_printf(softc->dev, "Assign IRQ %u to admin proc \n", + rx_vectors); + if (rc) { + device_printf(iflib_get_dev(ctx), "Failed to register admin handler"); + i = softc->rx_rings_count; + goto fail; + } + AQ_DBG_EXIT(0); + return (0); + +fail: + for (; i >= 0; i--) + iflib_irq_free(ctx, &softc->rx_rings[i]->irq); + AQ_DBG_EXIT(rc); + return (rc); +} + +static bool aq_is_vlan_promisc_required(struct aq_dev *softc) +{ + int vlan_tag_count; + + bit_count(softc->vlan_tags, 0, 4096, &vlan_tag_count); + + if (vlan_tag_count <= AQ_HW_VLAN_MAX_FILTERS) + return (false); + else + return (true); + +} + +static void aq_update_vlan_filters(struct aq_dev *softc) +{ + struct aq_rx_filter_vlan aq_vlans[AQ_HW_VLAN_MAX_FILTERS]; + struct aq_hw *hw = &softc->hw; + int bit_pos = 0; + int vlan_tag = -1; + int i; + + hw_atl_b0_hw_vlan_promisc_set(hw, true); + for (i = 0; i < AQ_HW_VLAN_MAX_FILTERS; i++) { + bit_ffs_at(softc->vlan_tags, bit_pos, 4096, &vlan_tag); + if (vlan_tag != -1) { + aq_vlans[i].enable = true; + aq_vlans[i].location = i; + aq_vlans[i].queue = 0xFF; + aq_vlans[i].vlan_id = vlan_tag; + bit_pos = vlan_tag; + } else { + aq_vlans[i].enable = false; + } + } + + hw_atl_b0_hw_vlan_set(hw, aq_vlans); + hw_atl_b0_hw_vlan_promisc_set(hw, aq_is_vlan_promisc_required(softc)); +} + +/* VLAN support */ +static void aq_if_vlan_register(if_ctx_t ctx, uint16_t vtag) +{ + struct aq_dev *softc = iflib_get_softc(ctx); + + AQ_DBG_ENTERA("%d", vtag); + + bit_set(softc->vlan_tags, vtag); + + aq_update_vlan_filters(softc); + + AQ_DBG_EXIT(0); +} + +static void aq_if_vlan_unregister(if_ctx_t ctx, uint16_t vtag) +{ + struct aq_dev *softc = iflib_get_softc(ctx); + + AQ_DBG_ENTERA("%d", vtag); + + bit_clear(softc->vlan_tags, vtag); + + aq_update_vlan_filters(softc); + + AQ_DBG_EXIT(0); +} + +static void aq_if_led_func(if_ctx_t ctx, int onoff) +{ + struct aq_dev *softc = iflib_get_softc(ctx); + struct aq_hw *hw = &softc->hw; + + AQ_DBG_ENTERA("%d", onoff); + if (hw->fw_ops && hw->fw_ops->led_control) + hw->fw_ops->led_control(hw, onoff); + + AQ_DBG_EXIT(0); +} + +static int aq_hw_capabilities(struct aq_dev *softc) +{ + + if (pci_get_vendor(softc->dev) != AQUANTIA_VENDOR_ID) + return (ENXIO); + + switch (pci_get_device(softc->dev)) { + case AQ_DEVICE_ID_D100: + case AQ_DEVICE_ID_AQC100: + case AQ_DEVICE_ID_AQC100S: + softc->media_type = AQ_MEDIA_TYPE_FIBRE; + softc->link_speeds = AQ_LINK_ALL & ~AQ_LINK_10G; + break; + + case AQ_DEVICE_ID_0001: + case AQ_DEVICE_ID_D107: + case AQ_DEVICE_ID_AQC107: + case AQ_DEVICE_ID_AQC107S: + softc->media_type = AQ_MEDIA_TYPE_TP; + softc->link_speeds = AQ_LINK_ALL; + break; + + case AQ_DEVICE_ID_D108: + case AQ_DEVICE_ID_AQC108: + case AQ_DEVICE_ID_AQC108S: + case AQ_DEVICE_ID_AQC111: + case AQ_DEVICE_ID_AQC111S: + softc->media_type = AQ_MEDIA_TYPE_TP; + softc->link_speeds = AQ_LINK_ALL & ~AQ_LINK_10G; + break; + + case AQ_DEVICE_ID_D109: + case AQ_DEVICE_ID_AQC109: + case AQ_DEVICE_ID_AQC109S: + case AQ_DEVICE_ID_AQC112: + case AQ_DEVICE_ID_AQC112S: + softc->media_type = AQ_MEDIA_TYPE_TP; + softc->link_speeds = AQ_LINK_ALL & ~(AQ_LINK_10G | AQ_LINK_5G); + break; + + default: + return (ENXIO); + } + + return (0); +} + +static int aq_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS) +{ + struct aq_dev *softc = (struct aq_dev *)arg1; + device_t dev = softc->dev; + struct sbuf *buf; + int error = 0; + + buf = sbuf_new_for_sysctl(NULL, NULL, 256, req); + if (!buf) { + device_printf(dev, "Could not allocate sbuf for output.\n"); + return (ENOMEM); + } + + /* Print out the redirection table */ + sbuf_cat(buf, "\nRSS Indirection table:\n"); + for (int i = 0; i < HW_ATL_RSS_INDIRECTION_TABLE_MAX; i++) { + sbuf_printf(buf, "%d ", softc->rss_table[i]); + if ((i+1) % 10 == 0) + sbuf_printf(buf, "\n"); + } + + sbuf_cat(buf, "\nRSS Key:\n"); + for (int i = 0; i < HW_ATL_RSS_HASHKEY_SIZE; i++) { + sbuf_printf(buf, "0x%02x ", softc->rss_key[i]); + } + sbuf_printf(buf, "\n"); + + error = sbuf_finish(buf); + if (error) + device_printf(dev, "Error finishing sbuf: %d\n", error); + + sbuf_delete(buf); + + return (0); +} + +static int aq_sysctl_print_tx_head(SYSCTL_HANDLER_ARGS) +{ + struct aq_ring *ring = arg1; + int error = 0; + unsigned int val; + + if (!ring) + return (0); + + val = tdm_tx_desc_head_ptr_get(&ring->dev->hw, ring->index); + + error = sysctl_handle_int(oidp, &val, 0, req); + if (error || !req->newptr) + return (error); + + return (0); +} + +static int aq_sysctl_print_tx_tail(SYSCTL_HANDLER_ARGS) +{ + struct aq_ring *ring = arg1; + int error = 0; + unsigned int val; + + if (!ring) + return (0); + + val = reg_tx_dma_desc_tail_ptr_get(&ring->dev->hw, ring->index); + + error = sysctl_handle_int(oidp, &val, 0, req); + if (error || !req->newptr) + return (error); + + return (0); +} + +static int aq_sysctl_print_rx_head(SYSCTL_HANDLER_ARGS) +{ + struct aq_ring *ring = arg1; + int error = 0; + unsigned int val; + + if (!ring) + return (0); + + val = rdm_rx_desc_head_ptr_get(&ring->dev->hw, ring->index); + + error = sysctl_handle_int(oidp, &val, 0, req); + if (error || !req->newptr) + return (error); + + return (0); +} + +static int aq_sysctl_print_rx_tail(SYSCTL_HANDLER_ARGS) +{ + struct aq_ring *ring = arg1; + int error = 0; + unsigned int val; + + if (!ring) + return (0); + + val = reg_rx_dma_desc_tail_ptr_get(&ring->dev->hw, ring->index); + + error = sysctl_handle_int(oidp, &val, 0, req); + if (error || !req->newptr) + return (error); + + return (0); +} + +static void aq_add_stats_sysctls(struct aq_dev *softc) +{ + device_t dev = softc->dev; + struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); + struct sysctl_oid *tree = device_get_sysctl_tree(dev); + struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); + struct aq_stats_s *stats = &softc->curr_stats; + struct sysctl_oid *stat_node, *queue_node; + struct sysctl_oid_list *stat_list, *queue_list; + +#define QUEUE_NAME_LEN 32 + char namebuf[QUEUE_NAME_LEN]; + /* RSS configuration */ + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config", + CTLTYPE_STRING | CTLFLAG_RD, softc, 0, + aq_sysctl_print_rss_config, "A", "Prints RSS Configuration"); + + /* Driver Statistics */ + for (int i = 0; i < softc->tx_rings_count; i++) { + struct aq_ring *ring = softc->tx_rings[i]; + snprintf(namebuf, QUEUE_NAME_LEN, "tx_queue%d", i); + queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, + CTLFLAG_RD, NULL, "Queue Name"); + queue_list = SYSCTL_CHILDREN(queue_node); + + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_pkts", + CTLFLAG_RD, &(ring->stats.tx_pkts), "TX Packets"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes", + CTLFLAG_RD, &(ring->stats.tx_bytes), "TX Octets"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_drops", + CTLFLAG_RD, &(ring->stats.tx_drops), "TX Drops"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_queue_full", + CTLFLAG_RD, &(ring->stats.tx_queue_full), "TX Queue Full"); + SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "tx_head", + CTLTYPE_UINT | CTLFLAG_RD, ring, 0, + aq_sysctl_print_tx_head, "IU", "ring head pointer"); + SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "tx_tail", + CTLTYPE_UINT | CTLFLAG_RD, ring, 0, + aq_sysctl_print_tx_tail, "IU", "ring tail pointer"); + } + + for (int i = 0; i < softc->rx_rings_count; i++) { + struct aq_ring *ring = softc->rx_rings[i]; + snprintf(namebuf, QUEUE_NAME_LEN, "rx_queue%d", i); + queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, + CTLFLAG_RD, NULL, "Queue Name"); + queue_list = SYSCTL_CHILDREN(queue_node); + + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_pkts", + CTLFLAG_RD, &(ring->stats.rx_pkts), "RX Packets"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", + CTLFLAG_RD, &(ring->stats.rx_bytes), "TX Octets"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "jumbo_pkts", + CTLFLAG_RD, &(ring->stats.jumbo_pkts), "Jumbo Packets"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_err", + CTLFLAG_RD, &(ring->stats.rx_err), "RX Errors"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irq", + CTLFLAG_RD, &(ring->stats.irq), "RX interrupts"); + SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rx_head", + CTLTYPE_UINT | CTLFLAG_RD, ring, 0, + aq_sysctl_print_rx_head, "IU", "ring head pointer"); + SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rx_tail", + CTLTYPE_UINT | CTLFLAG_RD, ring, 0, + aq_sysctl_print_rx_tail, "IU", " ring tail pointer"); + } + + stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac", + CTLFLAG_RD, NULL, "Statistics (read from HW registers)"); + stat_list = SYSCTL_CHILDREN(stat_node); + + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", + CTLFLAG_RD, &stats->prc, "Good Packets Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ucast_pkts_rcvd", + CTLFLAG_RD, &stats->uprc, "Unicast Packets Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", + CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", + CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rsc_pkts_rcvd", + CTLFLAG_RD, &stats->cprc, "Coalesced Packets Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "err_pkts_rcvd", + CTLFLAG_RD, &stats->erpr, "Errors of Packet Receive"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "drop_pkts_dma", + CTLFLAG_RD, &stats->dpc, "Dropped Packets in DMA"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", + CTLFLAG_RD, &stats->brc, "Good Octets Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ucast_octets_rcvd", + CTLFLAG_RD, &stats->ubrc, "Unicast Octets Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_octets_rcvd", + CTLFLAG_RD, &stats->mbrc, "Multicast Octets Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_octets_rcvd", + CTLFLAG_RD, &stats->bbrc, "Broadcast Octets Received"); + + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", + CTLFLAG_RD, &stats->ptc, "Good Packets Transmitted"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ucast_pkts_txd", + CTLFLAG_RD, &stats->uptc, "Unicast Packets Transmitted"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", + CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", + CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted"); + + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "err_pkts_txd", + CTLFLAG_RD, &stats->erpt, "Errors of Packet Transmit"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", + CTLFLAG_RD, &stats->btc, "Good Octets Transmitted"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ucast_octets_txd", + CTLFLAG_RD, &stats->ubtc, "Unicast Octets Transmitted"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_octets_txd", + CTLFLAG_RD, &stats->mbtc, "Multicast Octets Transmitted"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_octets_txd", + CTLFLAG_RD, &stats->bbtc, "Broadcast Octets Transmitted"); +} Index: sys/dev/aq/aq_media.c =================================================================== --- /dev/null +++ sys/dev/aq/aq_media.c @@ -0,0 +1,219 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * (1) Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * (2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * (3)The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "aq_device.h" + +#include "aq_fw.h" +#include "aq_dbg.h" + +#define AQ_HW_SUPPORT_SPEED(softc, s) ((softc)->link_speeds & s) + +void aq_mediastatus_update(aq_dev_t *aq_dev, u32 link_speed, const struct aq_hw_fc_info *fc_neg) +{ + struct aq_hw *hw = &aq_dev->hw; + + aq_dev->media_active = 0; + if (fc_neg->fc_rx) + aq_dev->media_active |= IFM_ETH_RXPAUSE; + if (fc_neg->fc_tx) + aq_dev->media_active |= IFM_ETH_TXPAUSE; + + switch(link_speed) { + case 100: + aq_dev->media_active |= IFM_100_TX | IFM_FDX; + break; + + case 1000: + aq_dev->media_active |= IFM_1000_T | IFM_FDX; + break; + + case 2500: + aq_dev->media_active |= IFM_2500_T | IFM_FDX; + break; + + case 5000: + aq_dev->media_active |= IFM_5000_T | IFM_FDX; + break; + + case 10000: + aq_dev->media_active |= IFM_10G_T | IFM_FDX; + break; + + case 0: + default: + aq_dev->media_active |= IFM_NONE; + break; + } + + if (hw->link_rate == aq_fw_speed_auto) + aq_dev->media_active |= IFM_AUTO; +} + +void aq_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) +{ + aq_dev_t *aq_dev = iflib_get_softc(ifp->if_softc); + + ifmr->ifm_active = IFM_ETHER; + ifmr->ifm_status = IFM_AVALID; + + if (aq_dev->linkup) + ifmr->ifm_status |= IFM_ACTIVE; + + ifmr->ifm_active |= aq_dev->media_active; +} + +int aq_mediachange(struct ifnet *ifp) +{ + aq_dev_t *aq_dev = iflib_get_softc(ifp->if_softc); + struct aq_hw *hw = &aq_dev->hw; + int old_media_rate = ifp->if_baudrate; + int old_link_speed = hw->link_rate; + struct ifmedia *ifm = iflib_get_media(aq_dev->ctx); + int user_media = IFM_SUBTYPE(ifm->ifm_media); + uint64_t media_rate; + + AQ_DBG_ENTERA("media 0x%x", user_media); + + if (!(ifm->ifm_media & IFM_ETHER)) { + device_printf(aq_dev->dev, "%s(): aq_dev interface - bad media: 0x%X", __FUNCTION__, ifm->ifm_media); + return (0); // should never happen + } + + switch (user_media) { + case IFM_AUTO: // auto-select media + hw->link_rate = aq_fw_speed_auto; + media_rate = -1; + break; + + case IFM_NONE: // disable media + media_rate = 0; + hw->link_rate = 0; + iflib_link_state_change(aq_dev->ctx, LINK_STATE_DOWN, 0); + break; + + case IFM_100_TX: + hw->link_rate = aq_fw_100M; + media_rate = 100 * 1000; + break; + + case IFM_1000_T: + hw->link_rate = aq_fw_1G; + media_rate = 1000 * 1000; + break; + + case IFM_2500_T: + hw->link_rate = aq_fw_2G5; + media_rate = 2500 * 1000; + break; + + case IFM_5000_T: + hw->link_rate = aq_fw_5G; + media_rate = 5000 * 1000; + break; + + case IFM_10G_T: + hw->link_rate = aq_fw_10G; + media_rate = 10000 * 1000; + break; + + default: // should never happen + aq_log_error("unknown media: 0x%X", user_media); + return (0); + } + hw->fc.fc_rx = (ifm->ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0; + hw->fc.fc_tx = (ifm->ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0; + + /* In down state just remember new link speed */ + if (!(ifp->if_flags & IFF_UP)) + return (0); + + if ((media_rate != old_media_rate) || (hw->link_rate != old_link_speed)) { + // re-initialize hardware with new parameters + aq_hw_set_link_speed(hw, hw->link_rate); + } + + AQ_DBG_EXIT(0); + return (0); +} + +static void aq_add_media_types(aq_dev_t *aq_dev, int media_link_speed) +{ + ifmedia_add(aq_dev->media, IFM_ETHER | media_link_speed | IFM_FDX, 0, NULL); + ifmedia_add(aq_dev->media, IFM_ETHER | media_link_speed | IFM_FDX | + IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL); + ifmedia_add(aq_dev->media, IFM_ETHER | media_link_speed | IFM_FDX | + IFM_ETH_RXPAUSE, 0, NULL); + ifmedia_add(aq_dev->media, IFM_ETHER | media_link_speed | IFM_FDX | + IFM_ETH_TXPAUSE, 0, NULL); +} +void aq_initmedia(aq_dev_t *aq_dev) +{ + AQ_DBG_ENTER(); + + // ifconfig eth0 none + ifmedia_add(aq_dev->media, IFM_ETHER | IFM_NONE, 0, NULL); + + // ifconfig eth0 auto + aq_add_media_types(aq_dev, IFM_AUTO); + + if (AQ_HW_SUPPORT_SPEED(aq_dev, AQ_LINK_100M)) + aq_add_media_types(aq_dev, IFM_100_TX); + if (AQ_HW_SUPPORT_SPEED(aq_dev, AQ_LINK_1G)) + aq_add_media_types(aq_dev, IFM_1000_T); + if (AQ_HW_SUPPORT_SPEED(aq_dev, AQ_LINK_2G5)) + aq_add_media_types(aq_dev, IFM_2500_T); + if (AQ_HW_SUPPORT_SPEED(aq_dev, AQ_LINK_5G)) + aq_add_media_types(aq_dev, IFM_5000_T); + if (AQ_HW_SUPPORT_SPEED(aq_dev, AQ_LINK_10G)) + aq_add_media_types(aq_dev, IFM_10G_T); + + // link is initially autoselect + ifmedia_set(aq_dev->media, IFM_ETHER | IFM_AUTO | IFM_FDX | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); + + AQ_DBG_EXIT(0); +} Index: sys/dev/aq/aq_ring.h =================================================================== --- /dev/null +++ sys/dev/aq/aq_ring.h @@ -0,0 +1,184 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * (1) Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * (2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * (3)The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _AQ_RING_H_ +#define _AQ_RING_H_ + +#include "aq_hw.h" + +#define REFILL_THRESHOLD 128 + + +typedef volatile struct { + u32 rss_type:4; + u32 pkt_type:8; + u32 rdm_err:1; + u32 rsvd:6; + u32 rx_cntl:2; + u32 sph:1; + u32 hdr_len:10; + u32 rss_hash; + u16 dd:1; + u16 eop:1; + u16 rx_stat:4; + u16 rx_estat:6; + u16 rsc_cnt:4; + u16 pkt_len; + u16 next_desp; + u16 vlan; +} __attribute__((__packed__)) aq_rx_wb_t; + +typedef volatile struct { + union { + /* HW RX descriptor */ + struct __packed { + u64 buf_addr; + u64 hdr_addr; + } read; + + /* HW RX descriptor writeback */ + aq_rx_wb_t wb; + }; +} __attribute__((__packed__)) aq_rx_desc_t; + +/* Hardware tx descriptor */ +typedef volatile struct { + u64 buf_addr; + + union { + struct { + u32 type:3; + u32 :1; + u32 len:16; + u32 dd:1; + u32 eop:1; + u32 cmd:8; + u32 :14; + u32 ct_idx:1; + u32 ct_en:1; + u32 pay_len:18; + } __attribute__((__packed__)); + u64 flags; + }; +} __attribute__((__packed__)) aq_tx_desc_t; + +enum aq_tx_desc_type { + tx_desc_type_desc = 1, + tx_desc_type_ctx = 2, +}; + +enum aq_tx_desc_cmd { + tx_desc_cmd_vlan = 1, + tx_desc_cmd_fcs = 2, + tx_desc_cmd_ipv4 = 4, + tx_desc_cmd_l4cs = 8, + tx_desc_cmd_lso = 0x10, + tx_desc_cmd_wb = 0x20, +}; + +/* Hardware tx context descriptor */ +typedef volatile union { + struct __packed { + u64 flags1; + u64 flags2; + }; + + struct __packed { + u64 :40; + u32 tun_len:8; + u32 out_len:16; + u32 type:3; + u32 idx:1; + u32 vlan_tag:16; + u32 cmd:4; + u32 l2_len:7; + u32 l3_len:9; + u32 l4_len:8; + u32 mss_len:16; + }; +} __attribute__((__packed__)) aq_txc_desc_t; + +struct aq_ring_stats { + u64 rx_pkts; + u64 rx_bytes; + u64 jumbo_pkts; + u64 rx_err; + u64 irq; + + u64 tx_pkts; + u64 tx_bytes; + u64 tx_drops; + u64 tx_queue_full; +}; + +struct aq_dev; + +struct aq_ring { + struct aq_dev *dev; + int index; + + struct if_irq irq; + int msix; +/* RX */ + qidx_t rx_size; + int rx_max_frame_size; + void *rx_desc_area_ptr; + aq_rx_desc_t *rx_descs; + uint64_t rx_descs_phys; + +/* TX */ + int tx_head, tx_tail; + qidx_t tx_size; + void *tx_desc_area_ptr; + aq_tx_desc_t *tx_descs; + uint64_t tx_descs_phys; + + struct aq_ring_stats stats; +}; + +int aq_ring_rx_init(struct aq_hw *hw, struct aq_ring *ring); +int aq_ring_tx_init(struct aq_hw *hw, struct aq_ring *ring); + +int aq_ring_tx_start(struct aq_hw *hw, struct aq_ring *ring); +int aq_ring_tx_stop(struct aq_hw *hw, struct aq_ring *ring); +int aq_ring_rx_start(struct aq_hw *hw, struct aq_ring *ring); +int aq_ring_rx_stop(struct aq_hw *hw, struct aq_ring *ring); + +int aq_ring_tx_tail_update(struct aq_hw *hw, struct aq_ring *ring, u32 tail); + + +extern struct if_txrx aq_txrx; +int aq_intr(void *arg); + +#endif /* _AQ_RING_H_ */ Index: sys/dev/aq/aq_ring.c =================================================================== --- /dev/null +++ sys/dev/aq/aq_ring.c @@ -0,0 +1,581 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2018 aQuantia Corporation. All rights reserved + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * (1) Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * (2) Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * (3)The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "aq_common.h" + +#include "aq_ring.h" +#include "aq_dbg.h" +#include "aq_device.h" +#include "aq_hw.h" +#include "aq_hw_llh.h" + +/* iflib txrx interface prototypes */ +static int aq_isc_txd_encap(void *arg, if_pkt_info_t pi); +static void aq_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx); +static int aq_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear); +static void aq_ring_rx_refill(void* arg, if_rxd_update_t iru); +static void aq_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx); +static int aq_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget); +static int aq_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri); + +struct if_txrx aq_txrx = { + .ift_txd_encap = aq_isc_txd_encap, + .ift_txd_flush = aq_isc_txd_flush, + .ift_txd_credits_update = aq_isc_txd_credits_update, + .ift_rxd_available = aq_isc_rxd_available, + .ift_rxd_pkt_get = aq_isc_rxd_pkt_get, + .ift_rxd_refill = aq_ring_rx_refill, + .ift_rxd_flush = aq_isc_rxd_flush, + .ift_legacy_intr = NULL +}; + + +static inline uint32_t +aq_next(uint32_t i, uint32_t lim) +{ + return (i == lim) ? 0 : i + 1; +} + +int aq_ring_rx_init(struct aq_hw *hw, struct aq_ring *ring) +/* uint64_t ring_addr, + u32 ring_size, + u32 ring_idx, + u32 interrupt_cause, + u32 cpu_idx) */ +{ + int err; + u32 dma_desc_addr_lsw = (u32)ring->rx_descs_phys & 0xffffffff; + u32 dma_desc_addr_msw = (u32)(ring->rx_descs_phys >> 32); + + AQ_DBG_ENTERA("[%d]", ring->index); + + rdm_rx_desc_en_set(hw, false, ring->index); + + rdm_rx_desc_head_splitting_set(hw, 0U, ring->index); + + reg_rx_dma_desc_base_addresslswset(hw, dma_desc_addr_lsw, ring->index); + + reg_rx_dma_desc_base_addressmswset(hw, dma_desc_addr_msw, ring->index); + + rdm_rx_desc_len_set(hw, ring->rx_size / 8U, ring->index); + + device_printf(ring->dev->dev, "ring %d: __PAGESIZE=%d MCLBYTES=%d hw->max_frame_size=%d\n", + ring->index, PAGE_SIZE, MCLBYTES, ring->rx_max_frame_size); + rdm_rx_desc_data_buff_size_set(hw, ring->rx_max_frame_size / 1024U, ring->index); + + rdm_rx_desc_head_buff_size_set(hw, 0U, ring->index); + rdm_rx_desc_head_splitting_set(hw, 0U, ring->index); + rpo_rx_desc_vlan_stripping_set(hw, 0U, ring->index); + + /* Rx ring set mode */ + + /* Mapping interrupt vector */ + itr_irq_map_rx_set(hw, ring->msix, ring->index); + itr_irq_map_en_rx_set(hw, true, ring->index); + + rdm_cpu_id_set(hw, 0, ring->index); + rdm_rx_desc_dca_en_set(hw, 0U, ring->index); + rdm_rx_head_dca_en_set(hw, 0U, ring->index); + rdm_rx_pld_dca_en_set(hw, 0U, ring->index); + + err = aq_hw_err_from_flags(hw); + AQ_DBG_EXIT(err); + return (err); +} + +int aq_ring_tx_init(struct aq_hw *hw, struct aq_ring *ring) +/* uint64_t ring_addr, + u32 ring_size, + u32 ring_idx, + u32 interrupt_cause, + u32 cpu_idx) */ +{ + int err; + u32 dma_desc_addr_lsw = (u32)ring->tx_descs_phys & 0xffffffff; + u32 dma_desc_addr_msw = (u64)(ring->tx_descs_phys >> 32); + + AQ_DBG_ENTERA("[%d]", ring->index); + + tdm_tx_desc_en_set(hw, 0U, ring->index); + + reg_tx_dma_desc_base_addresslswset(hw, dma_desc_addr_lsw, ring->index); + + reg_tx_dma_desc_base_addressmswset(hw, dma_desc_addr_msw, ring->index); + + tdm_tx_desc_len_set(hw, ring->tx_size / 8U, ring->index); + + aq_ring_tx_tail_update(hw, ring, 0U); + + /* Set Tx threshold */ + tdm_tx_desc_wr_wb_threshold_set(hw, 0U, ring->index); + + /* Mapping interrupt vector */ + itr_irq_map_tx_set(hw, ring->msix, ring->index); + itr_irq_map_en_tx_set(hw, true, ring->index); + + tdm_cpu_id_set(hw, 0, ring->index); + tdm_tx_desc_dca_en_set(hw, 0U, ring->index); + + err = aq_hw_err_from_flags(hw); + AQ_DBG_EXIT(err); + return (err); +} + +int aq_ring_tx_tail_update(struct aq_hw *hw, struct aq_ring *ring, u32 tail) +{ + AQ_DBG_ENTERA("[%d]", ring->index); + reg_tx_dma_desc_tail_ptr_set(hw, tail, ring->index); + AQ_DBG_EXIT(0); + return (0); +} + +int aq_ring_tx_start(struct aq_hw *hw, struct aq_ring *ring) +{ + int err; + + AQ_DBG_ENTERA("[%d]", ring->index); + tdm_tx_desc_en_set(hw, 1U, ring->index); + err = aq_hw_err_from_flags(hw); + AQ_DBG_EXIT(err); + return (err); +} + +int aq_ring_rx_start(struct aq_hw *hw, struct aq_ring *ring) +{ + int err; + + AQ_DBG_ENTERA("[%d]", ring->index); + rdm_rx_desc_en_set(hw, 1U, ring->index); + err = aq_hw_err_from_flags(hw); + AQ_DBG_EXIT(err); + return (err); +} + +int aq_ring_tx_stop(struct aq_hw *hw, struct aq_ring *ring) +{ + int err; + + AQ_DBG_ENTERA("[%d]", ring->index); + tdm_tx_desc_en_set(hw, 0U, ring->index); + err = aq_hw_err_from_flags(hw); + AQ_DBG_EXIT(err); + return (err); +} + +int aq_ring_rx_stop(struct aq_hw *hw, struct aq_ring *ring) +{ + int err; + + AQ_DBG_ENTERA("[%d]", ring->index); + rdm_rx_desc_en_set(hw, 0U, ring->index); + /* Invalidate Descriptor Cache to prevent writing to the cached + * descriptors and to the data pointer of those descriptors + */ + rdm_rx_dma_desc_cache_init_tgl(hw); + err = aq_hw_err_from_flags(hw); + AQ_DBG_EXIT(err); + return (err); +} + +static void aq_ring_rx_refill(void* arg, if_rxd_update_t iru) +{ + aq_dev_t *aq_dev = arg; + aq_rx_desc_t *rx_desc; + struct aq_ring *ring; + qidx_t i, pidx; + + AQ_DBG_ENTERA("ring=%d iru_pidx=%d iru_count=%d iru->iru_buf_size=%d", + iru->iru_qsidx, iru->iru_pidx, iru->iru_count, iru->iru_buf_size); + + ring = aq_dev->rx_rings[iru->iru_qsidx]; + pidx = iru->iru_pidx; + + for (i = 0; i < iru->iru_count; i++) { + rx_desc = (aq_rx_desc_t *) &ring->rx_descs[pidx]; + rx_desc->read.buf_addr = htole64(iru->iru_paddrs[i]); + rx_desc->read.hdr_addr = 0; + + pidx=aq_next(pidx, ring->rx_size - 1); + } + + AQ_DBG_EXIT(0); +} + +static void aq_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, + qidx_t pidx) +{ + aq_dev_t *aq_dev = arg; + struct aq_ring *ring = aq_dev->rx_rings[rxqid]; + + AQ_DBG_ENTERA("[%d] tail=%u", ring->index, pidx); + reg_rx_dma_desc_tail_ptr_set(&aq_dev->hw, pidx, ring->index); + AQ_DBG_EXIT(0); +} + +static int aq_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget) +{ + aq_dev_t *aq_dev = arg; + struct aq_ring *ring = aq_dev->rx_rings[rxqid]; + aq_rx_desc_t *rx_desc = (aq_rx_desc_t *) ring->rx_descs; + int cnt, i, iter; + + AQ_DBG_ENTERA("[%d] head=%u, budget %d", ring->index, idx, budget); + + for (iter = 0, cnt = 0, i = idx; iter < ring->rx_size && cnt <= budget;) { + trace_aq_rx_descr(ring->index, i, (volatile u64*)&rx_desc[i]); + if (!rx_desc[i].wb.dd) + break; + + if (rx_desc[i].wb.eop) { + iter++; + i = aq_next(i, ring->rx_size - 1); + + cnt++; + } else { + /* LRO/Jumbo: wait for whole packet be in the ring */ + if (rx_desc[i].wb.rsc_cnt) { + i = rx_desc[i].wb.next_desp; + iter++; + continue; + } else { + iter++; + i = aq_next(i, ring->rx_size - 1); + continue; + } + } + } + + AQ_DBG_EXIT(cnt); + return (cnt); +} + +static void aq_rx_set_cso_flags(aq_rx_desc_t *rx_desc, if_rxd_info_t ri) +{ + if ((rx_desc->wb.pkt_type & 0x3) == 0) { //IPv4 + if (rx_desc->wb.rx_cntl & BIT(0)){ // IPv4 csum checked + ri->iri_csum_flags |= CSUM_IP_CHECKED; + if (!(rx_desc->wb.rx_stat & BIT(1))) + ri->iri_csum_flags |= CSUM_IP_VALID; + } + } + if (rx_desc->wb.rx_cntl & BIT(1)) { // TCP/UDP csum checked + ri->iri_csum_flags |= CSUM_L4_CALC; + if (!(rx_desc->wb.rx_stat & BIT(2)) && // L4 csum error + (rx_desc->wb.rx_stat & BIT(3))) { // L4 csum valid + ri->iri_csum_flags |= CSUM_L4_VALID; + ri->iri_csum_data = htons(0xffff); + } + } +} + +static uint8_t bsd_rss_type[16] = { + [AQ_RX_RSS_TYPE_IPV4]=M_HASHTYPE_RSS_IPV4, + [AQ_RX_RSS_TYPE_IPV6]=M_HASHTYPE_RSS_IPV6, + [AQ_RX_RSS_TYPE_IPV4_TCP]=M_HASHTYPE_RSS_TCP_IPV4, + [AQ_RX_RSS_TYPE_IPV6_TCP]=M_HASHTYPE_RSS_TCP_IPV6, + [AQ_RX_RSS_TYPE_IPV4_UDP]=M_HASHTYPE_RSS_UDP_IPV4, + [AQ_RX_RSS_TYPE_IPV6_UDP]=M_HASHTYPE_RSS_UDP_IPV6, +}; + + + +static int aq_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri) +{ + aq_dev_t *aq_dev = arg; + struct aq_ring *ring = aq_dev->rx_rings[ri->iri_qsidx]; + aq_rx_desc_t *rx_desc; + struct ifnet *ifp; + int cidx, rc = 0, i; + size_t len, total_len; + + AQ_DBG_ENTERA("[%d] start=%d", ring->index, ri->iri_cidx); + cidx = ri->iri_cidx; + ifp = iflib_get_ifp(aq_dev->ctx); + i = 0; + + do { + rx_desc = (aq_rx_desc_t *) &ring->rx_descs[cidx]; + + trace_aq_rx_descr(ring->index, cidx, (volatile u64*)rx_desc); + + if ((rx_desc->wb.rx_stat & BIT(0)) != 0) { + ring->stats.rx_err++; + rc = (EBADMSG); + goto exit; + } + + if (!rx_desc->wb.eop) { + len = ring->rx_max_frame_size; + } else { + total_len = le32toh(rx_desc->wb.pkt_len); + len = total_len & (ring->rx_max_frame_size - 1); + } + ri->iri_frags[i].irf_flid = 0; + ri->iri_frags[i].irf_idx = cidx; + ri->iri_frags[i].irf_len = len; + + if ((rx_desc->wb.pkt_type & 0x60) == 1) { + ri->iri_flags |= M_VLANTAG; + ri->iri_vtag = le32toh(rx_desc->wb.vlan); + } + + i++; + cidx = aq_next(cidx, ring->rx_size - 1); + } while (!rx_desc->wb.eop); + + if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { + aq_rx_set_cso_flags(rx_desc, ri); + } + ri->iri_rsstype = bsd_rss_type[rx_desc->wb.rss_type & 0xF]; + if (ri->iri_rsstype != M_HASHTYPE_NONE) { + ri->iri_flowid = le32toh(rx_desc->wb.rss_hash); + } + + ri->iri_len = total_len; + ri->iri_nfrags = i; + + ring->stats.rx_bytes += total_len; + ring->stats.rx_pkts++; + +exit: + AQ_DBG_EXIT(rc); + return (rc); +} + +/*****************************************************************************/ +/* */ +/*****************************************************************************/ + +static void aq_setup_offloads(aq_dev_t *aq_dev, if_pkt_info_t pi, aq_tx_desc_t *txd, u32 tx_cmd) +{ + AQ_DBG_ENTER(); + txd->cmd |= tx_desc_cmd_fcs; + txd->cmd |= (pi->ipi_csum_flags & (CSUM_IP|CSUM_TSO)) ? tx_desc_cmd_ipv4 : 0; + txd->cmd |= (pi->ipi_csum_flags & + (CSUM_IP_TCP | CSUM_IP6_TCP | CSUM_IP_UDP | CSUM_IP6_UDP) + ) ? tx_desc_cmd_l4cs : 0; + txd->cmd |= (pi->ipi_flags & IPI_TX_INTR) ? tx_desc_cmd_wb : 0; + txd->cmd |= tx_cmd; + AQ_DBG_EXIT(0); +} + +static int aq_ring_tso_setup(aq_dev_t *aq_dev, if_pkt_info_t pi, uint32_t *hdrlen, aq_txc_desc_t *txc) +{ + uint32_t tx_cmd = 0; + + AQ_DBG_ENTER(); + if (pi->ipi_csum_flags & CSUM_TSO) { + AQ_DBG_PRINT("aq_tso_setup(): TSO enabled"); + tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs; + + if (pi->ipi_ipproto != IPPROTO_TCP) { + AQ_DBG_PRINT("aq_tso_setup not a tcp"); + AQ_DBG_EXIT(0); + return (0); + } + + txc->cmd = 0x4; /* TCP */ + + if (pi->ipi_csum_flags & CSUM_IP6_TCP) + txc->cmd |= 0x2; + + txc->l2_len = pi->ipi_ehdrlen; + txc->l3_len = pi->ipi_ip_hlen; + txc->l4_len = pi->ipi_tcp_hlen; + txc->mss_len = pi->ipi_tso_segsz; + *hdrlen = txc->l2_len + txc->l3_len + txc->l4_len; + } + + // Set VLAN tag + if (pi->ipi_mflags & M_VLANTAG) { + tx_cmd |= tx_desc_cmd_vlan; + txc->vlan_tag = htole16(pi->ipi_vtag); + } + + if (tx_cmd) { + txc->type = tx_desc_type_ctx; + txc->idx = 0; + } + + AQ_DBG_EXIT(tx_cmd); + return (tx_cmd); +} + +static int aq_isc_txd_encap(void *arg, if_pkt_info_t pi) +{ + aq_dev_t *aq_dev = arg; + struct aq_ring *ring; + aq_txc_desc_t *txc; + aq_tx_desc_t *txd = NULL; + bus_dma_segment_t *segs; + qidx_t pidx; + uint32_t hdrlen=0, pay_len; + uint8_t tx_cmd = 0; + int i, desc_count = 0; + + AQ_DBG_ENTERA("[%d] start=%d", pi->ipi_qsidx, pi->ipi_pidx); + ring = aq_dev->tx_rings[pi->ipi_qsidx]; + + segs = pi->ipi_segs; + pidx = pi->ipi_pidx; + txc = (aq_txc_desc_t *)&ring->tx_descs[pidx]; + AQ_DBG_PRINT("txc at 0x%p, txd at 0x%p len %d", txc, txd, pi->ipi_len); + + pay_len = pi->ipi_len; + + txc->flags1 = 0U; + txc->flags2 = 0U; + + tx_cmd = aq_ring_tso_setup(aq_dev, pi, &hdrlen, txc); + AQ_DBG_PRINT("tx_cmd = 0x%x", tx_cmd); + + if (tx_cmd) { + trace_aq_tx_context_descr(ring->index, pidx, (volatile void*)txc); + /* We've consumed the first desc, adjust counters */ + pidx = aq_next(pidx, ring->tx_size - 1); + + txd = &ring->tx_descs[pidx]; + txd->flags = 0U; + } else { + txd = (aq_tx_desc_t *)txc; + } + AQ_DBG_PRINT("txc at 0x%p, txd at 0x%p", txc, txd); + + txd->ct_en = !!tx_cmd; + + txd->type = tx_desc_type_desc; + + aq_setup_offloads(aq_dev, pi, txd, tx_cmd); + + if (tx_cmd) { + txd->ct_idx = 0; + } + + pay_len -= hdrlen; + + txd->pay_len = pay_len; + + AQ_DBG_PRINT("num_frag[%d] pay_len[%d]", pi->ipi_nsegs, pay_len); + for (i = 0; i < pi->ipi_nsegs; i++) { + if (desc_count > 0) { + txd = &ring->tx_descs[pidx]; + txd->flags = 0U; + } + + txd->buf_addr = htole64(segs[i].ds_addr); + + txd->type = tx_desc_type_desc; + txd->len = segs[i].ds_len; + txd->pay_len = pay_len; + if (i < pi->ipi_nsegs - 1) + trace_aq_tx_descr(ring->index, pidx, (volatile void*)txd); + + pidx = aq_next(pidx, ring->tx_size - 1); + + desc_count++; + } + // Last descriptor requires EOP and WB + txd->eop = 1U; + + AQ_DBG_DUMP_DESC(txd); + trace_aq_tx_descr(ring->index, pidx, (volatile void*)txd); + ring->tx_tail = pidx; + + ring->stats.tx_pkts++; + ring->stats.tx_bytes += pay_len; + + pi->ipi_new_pidx = pidx; + + AQ_DBG_EXIT(0); + return (0); +} + +static void aq_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx) +{ + aq_dev_t *aq_dev = arg; + struct aq_ring *ring = aq_dev->tx_rings[txqid]; + AQ_DBG_ENTERA("[%d] tail=%d", ring->index, pidx); + + // Update the write pointer - submits packet for transmission + aq_ring_tx_tail_update(&aq_dev->hw, ring, pidx); + AQ_DBG_EXIT(0); +} + + +static inline unsigned int aq_avail_desc(int a, int b, int size) +{ + return (((b >= a)) ? ((size ) - b + a) : (a - b)); +} + +static int aq_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear) +{ + aq_dev_t *aq_dev = arg; + struct aq_ring *ring = aq_dev->tx_rings[txqid]; + uint32_t head; + int avail; + + AQ_DBG_ENTERA("[%d] clear=%d", ring->index, clear); + avail = 0; + head = tdm_tx_desc_head_ptr_get(&aq_dev->hw, ring->index); + AQ_DBG_PRINT("swhead %d hwhead %d", ring->tx_head, head); + + if (ring->tx_head == head) { + avail = 0; //ring->tx_size; + goto done; + } + + avail = aq_avail_desc(head, ring->tx_head, ring->tx_size); + if (clear) + ring->tx_head = head; + +done: + AQ_DBG_EXIT(avail); + return (avail); +}